##// END OF EJS Templates
localrepo: add some basic comment for block in __getitem__...
marmoute -
r44189:9c83d287 default
parent child Browse files
Show More
@@ -1,3720 +1,3725 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from .pycompat import (
27 27 delattr,
28 28 getattr,
29 29 )
30 30 from . import (
31 31 bookmarks,
32 32 branchmap,
33 33 bundle2,
34 34 changegroup,
35 35 color,
36 36 context,
37 37 dirstate,
38 38 dirstateguard,
39 39 discovery,
40 40 encoding,
41 41 error,
42 42 exchange,
43 43 extensions,
44 44 filelog,
45 45 hook,
46 46 lock as lockmod,
47 47 match as matchmod,
48 48 merge as mergemod,
49 49 mergeutil,
50 50 namespaces,
51 51 narrowspec,
52 52 obsolete,
53 53 pathutil,
54 54 phases,
55 55 pushkey,
56 56 pycompat,
57 57 repoview,
58 58 revset,
59 59 revsetlang,
60 60 scmutil,
61 61 sparse,
62 62 store as storemod,
63 63 subrepoutil,
64 64 tags as tagsmod,
65 65 transaction,
66 66 txnutil,
67 67 util,
68 68 vfs as vfsmod,
69 69 )
70 70
71 71 from .interfaces import (
72 72 repository,
73 73 util as interfaceutil,
74 74 )
75 75
76 76 from .utils import (
77 77 procutil,
78 78 stringutil,
79 79 )
80 80
81 81 from .revlogutils import constants as revlogconst
82 82
83 83 release = lockmod.release
84 84 urlerr = util.urlerr
85 85 urlreq = util.urlreq
86 86
87 87 # set of (path, vfs-location) tuples. vfs-location is:
88 88 # - 'plain for vfs relative paths
89 89 # - '' for svfs relative paths
90 90 _cachedfiles = set()
91 91
92 92
93 93 class _basefilecache(scmutil.filecache):
94 94 """All filecache usage on repo are done for logic that should be unfiltered
95 95 """
96 96
97 97 def __get__(self, repo, type=None):
98 98 if repo is None:
99 99 return self
100 100 # proxy to unfiltered __dict__ since filtered repo has no entry
101 101 unfi = repo.unfiltered()
102 102 try:
103 103 return unfi.__dict__[self.sname]
104 104 except KeyError:
105 105 pass
106 106 return super(_basefilecache, self).__get__(unfi, type)
107 107
108 108 def set(self, repo, value):
109 109 return super(_basefilecache, self).set(repo.unfiltered(), value)
110 110
111 111
112 112 class repofilecache(_basefilecache):
113 113 """filecache for files in .hg but outside of .hg/store"""
114 114
115 115 def __init__(self, *paths):
116 116 super(repofilecache, self).__init__(*paths)
117 117 for path in paths:
118 118 _cachedfiles.add((path, b'plain'))
119 119
120 120 def join(self, obj, fname):
121 121 return obj.vfs.join(fname)
122 122
123 123
124 124 class storecache(_basefilecache):
125 125 """filecache for files in the store"""
126 126
127 127 def __init__(self, *paths):
128 128 super(storecache, self).__init__(*paths)
129 129 for path in paths:
130 130 _cachedfiles.add((path, b''))
131 131
132 132 def join(self, obj, fname):
133 133 return obj.sjoin(fname)
134 134
135 135
136 136 class mixedrepostorecache(_basefilecache):
137 137 """filecache for a mix files in .hg/store and outside"""
138 138
139 139 def __init__(self, *pathsandlocations):
140 140 # scmutil.filecache only uses the path for passing back into our
141 141 # join(), so we can safely pass a list of paths and locations
142 142 super(mixedrepostorecache, self).__init__(*pathsandlocations)
143 143 _cachedfiles.update(pathsandlocations)
144 144
145 145 def join(self, obj, fnameandlocation):
146 146 fname, location = fnameandlocation
147 147 if location == b'plain':
148 148 return obj.vfs.join(fname)
149 149 else:
150 150 if location != b'':
151 151 raise error.ProgrammingError(
152 152 b'unexpected location: %s' % location
153 153 )
154 154 return obj.sjoin(fname)
155 155
156 156
157 157 def isfilecached(repo, name):
158 158 """check if a repo has already cached "name" filecache-ed property
159 159
160 160 This returns (cachedobj-or-None, iscached) tuple.
161 161 """
162 162 cacheentry = repo.unfiltered()._filecache.get(name, None)
163 163 if not cacheentry:
164 164 return None, False
165 165 return cacheentry.obj, True
166 166
167 167
168 168 class unfilteredpropertycache(util.propertycache):
169 169 """propertycache that apply to unfiltered repo only"""
170 170
171 171 def __get__(self, repo, type=None):
172 172 unfi = repo.unfiltered()
173 173 if unfi is repo:
174 174 return super(unfilteredpropertycache, self).__get__(unfi)
175 175 return getattr(unfi, self.name)
176 176
177 177
178 178 class filteredpropertycache(util.propertycache):
179 179 """propertycache that must take filtering in account"""
180 180
181 181 def cachevalue(self, obj, value):
182 182 object.__setattr__(obj, self.name, value)
183 183
184 184
185 185 def hasunfilteredcache(repo, name):
186 186 """check if a repo has an unfilteredpropertycache value for <name>"""
187 187 return name in vars(repo.unfiltered())
188 188
189 189
190 190 def unfilteredmethod(orig):
191 191 """decorate method that always need to be run on unfiltered version"""
192 192
193 193 def wrapper(repo, *args, **kwargs):
194 194 return orig(repo.unfiltered(), *args, **kwargs)
195 195
196 196 return wrapper
197 197
198 198
199 199 moderncaps = {
200 200 b'lookup',
201 201 b'branchmap',
202 202 b'pushkey',
203 203 b'known',
204 204 b'getbundle',
205 205 b'unbundle',
206 206 }
207 207 legacycaps = moderncaps.union({b'changegroupsubset'})
208 208
209 209
210 210 @interfaceutil.implementer(repository.ipeercommandexecutor)
211 211 class localcommandexecutor(object):
212 212 def __init__(self, peer):
213 213 self._peer = peer
214 214 self._sent = False
215 215 self._closed = False
216 216
217 217 def __enter__(self):
218 218 return self
219 219
220 220 def __exit__(self, exctype, excvalue, exctb):
221 221 self.close()
222 222
223 223 def callcommand(self, command, args):
224 224 if self._sent:
225 225 raise error.ProgrammingError(
226 226 b'callcommand() cannot be used after sendcommands()'
227 227 )
228 228
229 229 if self._closed:
230 230 raise error.ProgrammingError(
231 231 b'callcommand() cannot be used after close()'
232 232 )
233 233
234 234 # We don't need to support anything fancy. Just call the named
235 235 # method on the peer and return a resolved future.
236 236 fn = getattr(self._peer, pycompat.sysstr(command))
237 237
238 238 f = pycompat.futures.Future()
239 239
240 240 try:
241 241 result = fn(**pycompat.strkwargs(args))
242 242 except Exception:
243 243 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 244 else:
245 245 f.set_result(result)
246 246
247 247 return f
248 248
249 249 def sendcommands(self):
250 250 self._sent = True
251 251
252 252 def close(self):
253 253 self._closed = True
254 254
255 255
256 256 @interfaceutil.implementer(repository.ipeercommands)
257 257 class localpeer(repository.peer):
258 258 '''peer for a local repo; reflects only the most recent API'''
259 259
260 260 def __init__(self, repo, caps=None):
261 261 super(localpeer, self).__init__()
262 262
263 263 if caps is None:
264 264 caps = moderncaps.copy()
265 265 self._repo = repo.filtered(b'served')
266 266 self.ui = repo.ui
267 267 self._caps = repo._restrictcapabilities(caps)
268 268
269 269 # Begin of _basepeer interface.
270 270
271 271 def url(self):
272 272 return self._repo.url()
273 273
274 274 def local(self):
275 275 return self._repo
276 276
277 277 def peer(self):
278 278 return self
279 279
280 280 def canpush(self):
281 281 return True
282 282
283 283 def close(self):
284 284 self._repo.close()
285 285
286 286 # End of _basepeer interface.
287 287
288 288 # Begin of _basewirecommands interface.
289 289
290 290 def branchmap(self):
291 291 return self._repo.branchmap()
292 292
293 293 def capabilities(self):
294 294 return self._caps
295 295
296 296 def clonebundles(self):
297 297 return self._repo.tryread(b'clonebundles.manifest')
298 298
299 299 def debugwireargs(self, one, two, three=None, four=None, five=None):
300 300 """Used to test argument passing over the wire"""
301 301 return b"%s %s %s %s %s" % (
302 302 one,
303 303 two,
304 304 pycompat.bytestr(three),
305 305 pycompat.bytestr(four),
306 306 pycompat.bytestr(five),
307 307 )
308 308
309 309 def getbundle(
310 310 self, source, heads=None, common=None, bundlecaps=None, **kwargs
311 311 ):
312 312 chunks = exchange.getbundlechunks(
313 313 self._repo,
314 314 source,
315 315 heads=heads,
316 316 common=common,
317 317 bundlecaps=bundlecaps,
318 318 **kwargs
319 319 )[1]
320 320 cb = util.chunkbuffer(chunks)
321 321
322 322 if exchange.bundle2requested(bundlecaps):
323 323 # When requesting a bundle2, getbundle returns a stream to make the
324 324 # wire level function happier. We need to build a proper object
325 325 # from it in local peer.
326 326 return bundle2.getunbundler(self.ui, cb)
327 327 else:
328 328 return changegroup.getunbundler(b'01', cb, None)
329 329
330 330 def heads(self):
331 331 return self._repo.heads()
332 332
333 333 def known(self, nodes):
334 334 return self._repo.known(nodes)
335 335
336 336 def listkeys(self, namespace):
337 337 return self._repo.listkeys(namespace)
338 338
339 339 def lookup(self, key):
340 340 return self._repo.lookup(key)
341 341
342 342 def pushkey(self, namespace, key, old, new):
343 343 return self._repo.pushkey(namespace, key, old, new)
344 344
345 345 def stream_out(self):
346 346 raise error.Abort(_(b'cannot perform stream clone against local peer'))
347 347
348 348 def unbundle(self, bundle, heads, url):
349 349 """apply a bundle on a repo
350 350
351 351 This function handles the repo locking itself."""
352 352 try:
353 353 try:
354 354 bundle = exchange.readbundle(self.ui, bundle, None)
355 355 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
356 356 if util.safehasattr(ret, b'getchunks'):
357 357 # This is a bundle20 object, turn it into an unbundler.
358 358 # This little dance should be dropped eventually when the
359 359 # API is finally improved.
360 360 stream = util.chunkbuffer(ret.getchunks())
361 361 ret = bundle2.getunbundler(self.ui, stream)
362 362 return ret
363 363 except Exception as exc:
364 364 # If the exception contains output salvaged from a bundle2
365 365 # reply, we need to make sure it is printed before continuing
366 366 # to fail. So we build a bundle2 with such output and consume
367 367 # it directly.
368 368 #
369 369 # This is not very elegant but allows a "simple" solution for
370 370 # issue4594
371 371 output = getattr(exc, '_bundle2salvagedoutput', ())
372 372 if output:
373 373 bundler = bundle2.bundle20(self._repo.ui)
374 374 for out in output:
375 375 bundler.addpart(out)
376 376 stream = util.chunkbuffer(bundler.getchunks())
377 377 b = bundle2.getunbundler(self.ui, stream)
378 378 bundle2.processbundle(self._repo, b)
379 379 raise
380 380 except error.PushRaced as exc:
381 381 raise error.ResponseError(
382 382 _(b'push failed:'), stringutil.forcebytestr(exc)
383 383 )
384 384
385 385 # End of _basewirecommands interface.
386 386
387 387 # Begin of peer interface.
388 388
389 389 def commandexecutor(self):
390 390 return localcommandexecutor(self)
391 391
392 392 # End of peer interface.
393 393
394 394
395 395 @interfaceutil.implementer(repository.ipeerlegacycommands)
396 396 class locallegacypeer(localpeer):
397 397 '''peer extension which implements legacy methods too; used for tests with
398 398 restricted capabilities'''
399 399
400 400 def __init__(self, repo):
401 401 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
402 402
403 403 # Begin of baselegacywirecommands interface.
404 404
405 405 def between(self, pairs):
406 406 return self._repo.between(pairs)
407 407
408 408 def branches(self, nodes):
409 409 return self._repo.branches(nodes)
410 410
411 411 def changegroup(self, nodes, source):
412 412 outgoing = discovery.outgoing(
413 413 self._repo, missingroots=nodes, missingheads=self._repo.heads()
414 414 )
415 415 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
416 416
417 417 def changegroupsubset(self, bases, heads, source):
418 418 outgoing = discovery.outgoing(
419 419 self._repo, missingroots=bases, missingheads=heads
420 420 )
421 421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422 422
423 423 # End of baselegacywirecommands interface.
424 424
425 425
426 426 # Increment the sub-version when the revlog v2 format changes to lock out old
427 427 # clients.
428 428 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
429 429
430 430 # A repository with the sparserevlog feature will have delta chains that
431 431 # can spread over a larger span. Sparse reading cuts these large spans into
432 432 # pieces, so that each piece isn't too big.
433 433 # Without the sparserevlog capability, reading from the repository could use
434 434 # huge amounts of memory, because the whole span would be read at once,
435 435 # including all the intermediate revisions that aren't pertinent for the chain.
436 436 # This is why once a repository has enabled sparse-read, it becomes required.
437 437 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
438 438
439 439 # A repository with the sidedataflag requirement will allow to store extra
440 440 # information for revision without altering their original hashes.
441 441 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
442 442
443 443 # A repository with the the copies-sidedata-changeset requirement will store
444 444 # copies related information in changeset's sidedata.
445 445 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
446 446
447 447 # Functions receiving (ui, features) that extensions can register to impact
448 448 # the ability to load repositories with custom requirements. Only
449 449 # functions defined in loaded extensions are called.
450 450 #
451 451 # The function receives a set of requirement strings that the repository
452 452 # is capable of opening. Functions will typically add elements to the
453 453 # set to reflect that the extension knows how to handle that requirements.
454 454 featuresetupfuncs = set()
455 455
456 456
457 457 def makelocalrepository(baseui, path, intents=None):
458 458 """Create a local repository object.
459 459
460 460 Given arguments needed to construct a local repository, this function
461 461 performs various early repository loading functionality (such as
462 462 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
463 463 the repository can be opened, derives a type suitable for representing
464 464 that repository, and returns an instance of it.
465 465
466 466 The returned object conforms to the ``repository.completelocalrepository``
467 467 interface.
468 468
469 469 The repository type is derived by calling a series of factory functions
470 470 for each aspect/interface of the final repository. These are defined by
471 471 ``REPO_INTERFACES``.
472 472
473 473 Each factory function is called to produce a type implementing a specific
474 474 interface. The cumulative list of returned types will be combined into a
475 475 new type and that type will be instantiated to represent the local
476 476 repository.
477 477
478 478 The factory functions each receive various state that may be consulted
479 479 as part of deriving a type.
480 480
481 481 Extensions should wrap these factory functions to customize repository type
482 482 creation. Note that an extension's wrapped function may be called even if
483 483 that extension is not loaded for the repo being constructed. Extensions
484 484 should check if their ``__name__`` appears in the
485 485 ``extensionmodulenames`` set passed to the factory function and no-op if
486 486 not.
487 487 """
488 488 ui = baseui.copy()
489 489 # Prevent copying repo configuration.
490 490 ui.copy = baseui.copy
491 491
492 492 # Working directory VFS rooted at repository root.
493 493 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
494 494
495 495 # Main VFS for .hg/ directory.
496 496 hgpath = wdirvfs.join(b'.hg')
497 497 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
498 498
499 499 # The .hg/ path should exist and should be a directory. All other
500 500 # cases are errors.
501 501 if not hgvfs.isdir():
502 502 try:
503 503 hgvfs.stat()
504 504 except OSError as e:
505 505 if e.errno != errno.ENOENT:
506 506 raise
507 507
508 508 raise error.RepoError(_(b'repository %s not found') % path)
509 509
510 510 # .hg/requires file contains a newline-delimited list of
511 511 # features/capabilities the opener (us) must have in order to use
512 512 # the repository. This file was introduced in Mercurial 0.9.2,
513 513 # which means very old repositories may not have one. We assume
514 514 # a missing file translates to no requirements.
515 515 try:
516 516 requirements = set(hgvfs.read(b'requires').splitlines())
517 517 except IOError as e:
518 518 if e.errno != errno.ENOENT:
519 519 raise
520 520 requirements = set()
521 521
522 522 # The .hg/hgrc file may load extensions or contain config options
523 523 # that influence repository construction. Attempt to load it and
524 524 # process any new extensions that it may have pulled in.
525 525 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
526 526 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
527 527 extensions.loadall(ui)
528 528 extensions.populateui(ui)
529 529
530 530 # Set of module names of extensions loaded for this repository.
531 531 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
532 532
533 533 supportedrequirements = gathersupportedrequirements(ui)
534 534
535 535 # We first validate the requirements are known.
536 536 ensurerequirementsrecognized(requirements, supportedrequirements)
537 537
538 538 # Then we validate that the known set is reasonable to use together.
539 539 ensurerequirementscompatible(ui, requirements)
540 540
541 541 # TODO there are unhandled edge cases related to opening repositories with
542 542 # shared storage. If storage is shared, we should also test for requirements
543 543 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
544 544 # that repo, as that repo may load extensions needed to open it. This is a
545 545 # bit complicated because we don't want the other hgrc to overwrite settings
546 546 # in this hgrc.
547 547 #
548 548 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
549 549 # file when sharing repos. But if a requirement is added after the share is
550 550 # performed, thereby introducing a new requirement for the opener, we may
551 551 # will not see that and could encounter a run-time error interacting with
552 552 # that shared store since it has an unknown-to-us requirement.
553 553
554 554 # At this point, we know we should be capable of opening the repository.
555 555 # Now get on with doing that.
556 556
557 557 features = set()
558 558
559 559 # The "store" part of the repository holds versioned data. How it is
560 560 # accessed is determined by various requirements. The ``shared`` or
561 561 # ``relshared`` requirements indicate the store lives in the path contained
562 562 # in the ``.hg/sharedpath`` file. This is an absolute path for
563 563 # ``shared`` and relative to ``.hg/`` for ``relshared``.
564 564 if b'shared' in requirements or b'relshared' in requirements:
565 565 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
566 566 if b'relshared' in requirements:
567 567 sharedpath = hgvfs.join(sharedpath)
568 568
569 569 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
570 570
571 571 if not sharedvfs.exists():
572 572 raise error.RepoError(
573 573 _(b'.hg/sharedpath points to nonexistent directory %s')
574 574 % sharedvfs.base
575 575 )
576 576
577 577 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
578 578
579 579 storebasepath = sharedvfs.base
580 580 cachepath = sharedvfs.join(b'cache')
581 581 else:
582 582 storebasepath = hgvfs.base
583 583 cachepath = hgvfs.join(b'cache')
584 584 wcachepath = hgvfs.join(b'wcache')
585 585
586 586 # The store has changed over time and the exact layout is dictated by
587 587 # requirements. The store interface abstracts differences across all
588 588 # of them.
589 589 store = makestore(
590 590 requirements,
591 591 storebasepath,
592 592 lambda base: vfsmod.vfs(base, cacheaudited=True),
593 593 )
594 594 hgvfs.createmode = store.createmode
595 595
596 596 storevfs = store.vfs
597 597 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
598 598
599 599 # The cache vfs is used to manage cache files.
600 600 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
601 601 cachevfs.createmode = store.createmode
602 602 # The cache vfs is used to manage cache files related to the working copy
603 603 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
604 604 wcachevfs.createmode = store.createmode
605 605
606 606 # Now resolve the type for the repository object. We do this by repeatedly
607 607 # calling a factory function to produces types for specific aspects of the
608 608 # repo's operation. The aggregate returned types are used as base classes
609 609 # for a dynamically-derived type, which will represent our new repository.
610 610
611 611 bases = []
612 612 extrastate = {}
613 613
614 614 for iface, fn in REPO_INTERFACES:
615 615 # We pass all potentially useful state to give extensions tons of
616 616 # flexibility.
617 617 typ = fn()(
618 618 ui=ui,
619 619 intents=intents,
620 620 requirements=requirements,
621 621 features=features,
622 622 wdirvfs=wdirvfs,
623 623 hgvfs=hgvfs,
624 624 store=store,
625 625 storevfs=storevfs,
626 626 storeoptions=storevfs.options,
627 627 cachevfs=cachevfs,
628 628 wcachevfs=wcachevfs,
629 629 extensionmodulenames=extensionmodulenames,
630 630 extrastate=extrastate,
631 631 baseclasses=bases,
632 632 )
633 633
634 634 if not isinstance(typ, type):
635 635 raise error.ProgrammingError(
636 636 b'unable to construct type for %s' % iface
637 637 )
638 638
639 639 bases.append(typ)
640 640
641 641 # type() allows you to use characters in type names that wouldn't be
642 642 # recognized as Python symbols in source code. We abuse that to add
643 643 # rich information about our constructed repo.
644 644 name = pycompat.sysstr(
645 645 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
646 646 )
647 647
648 648 cls = type(name, tuple(bases), {})
649 649
650 650 return cls(
651 651 baseui=baseui,
652 652 ui=ui,
653 653 origroot=path,
654 654 wdirvfs=wdirvfs,
655 655 hgvfs=hgvfs,
656 656 requirements=requirements,
657 657 supportedrequirements=supportedrequirements,
658 658 sharedpath=storebasepath,
659 659 store=store,
660 660 cachevfs=cachevfs,
661 661 wcachevfs=wcachevfs,
662 662 features=features,
663 663 intents=intents,
664 664 )
665 665
666 666
667 667 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
668 668 """Load hgrc files/content into a ui instance.
669 669
670 670 This is called during repository opening to load any additional
671 671 config files or settings relevant to the current repository.
672 672
673 673 Returns a bool indicating whether any additional configs were loaded.
674 674
675 675 Extensions should monkeypatch this function to modify how per-repo
676 676 configs are loaded. For example, an extension may wish to pull in
677 677 configs from alternate files or sources.
678 678 """
679 679 try:
680 680 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
681 681 return True
682 682 except IOError:
683 683 return False
684 684
685 685
686 686 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
687 687 """Perform additional actions after .hg/hgrc is loaded.
688 688
689 689 This function is called during repository loading immediately after
690 690 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
691 691
692 692 The function can be used to validate configs, automatically add
693 693 options (including extensions) based on requirements, etc.
694 694 """
695 695
696 696 # Map of requirements to list of extensions to load automatically when
697 697 # requirement is present.
698 698 autoextensions = {
699 699 b'largefiles': [b'largefiles'],
700 700 b'lfs': [b'lfs'],
701 701 }
702 702
703 703 for requirement, names in sorted(autoextensions.items()):
704 704 if requirement not in requirements:
705 705 continue
706 706
707 707 for name in names:
708 708 if not ui.hasconfig(b'extensions', name):
709 709 ui.setconfig(b'extensions', name, b'', source=b'autoload')
710 710
711 711
712 712 def gathersupportedrequirements(ui):
713 713 """Determine the complete set of recognized requirements."""
714 714 # Start with all requirements supported by this file.
715 715 supported = set(localrepository._basesupported)
716 716
717 717 # Execute ``featuresetupfuncs`` entries if they belong to an extension
718 718 # relevant to this ui instance.
719 719 modules = {m.__name__ for n, m in extensions.extensions(ui)}
720 720
721 721 for fn in featuresetupfuncs:
722 722 if fn.__module__ in modules:
723 723 fn(ui, supported)
724 724
725 725 # Add derived requirements from registered compression engines.
726 726 for name in util.compengines:
727 727 engine = util.compengines[name]
728 728 if engine.available() and engine.revlogheader():
729 729 supported.add(b'exp-compression-%s' % name)
730 730 if engine.name() == b'zstd':
731 731 supported.add(b'revlog-compression-zstd')
732 732
733 733 return supported
734 734
735 735
736 736 def ensurerequirementsrecognized(requirements, supported):
737 737 """Validate that a set of local requirements is recognized.
738 738
739 739 Receives a set of requirements. Raises an ``error.RepoError`` if there
740 740 exists any requirement in that set that currently loaded code doesn't
741 741 recognize.
742 742
743 743 Returns a set of supported requirements.
744 744 """
745 745 missing = set()
746 746
747 747 for requirement in requirements:
748 748 if requirement in supported:
749 749 continue
750 750
751 751 if not requirement or not requirement[0:1].isalnum():
752 752 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
753 753
754 754 missing.add(requirement)
755 755
756 756 if missing:
757 757 raise error.RequirementError(
758 758 _(b'repository requires features unknown to this Mercurial: %s')
759 759 % b' '.join(sorted(missing)),
760 760 hint=_(
761 761 b'see https://mercurial-scm.org/wiki/MissingRequirement '
762 762 b'for more information'
763 763 ),
764 764 )
765 765
766 766
767 767 def ensurerequirementscompatible(ui, requirements):
768 768 """Validates that a set of recognized requirements is mutually compatible.
769 769
770 770 Some requirements may not be compatible with others or require
771 771 config options that aren't enabled. This function is called during
772 772 repository opening to ensure that the set of requirements needed
773 773 to open a repository is sane and compatible with config options.
774 774
775 775 Extensions can monkeypatch this function to perform additional
776 776 checking.
777 777
778 778 ``error.RepoError`` should be raised on failure.
779 779 """
780 780 if b'exp-sparse' in requirements and not sparse.enabled:
781 781 raise error.RepoError(
782 782 _(
783 783 b'repository is using sparse feature but '
784 784 b'sparse is not enabled; enable the '
785 785 b'"sparse" extensions to access'
786 786 )
787 787 )
788 788
789 789
790 790 def makestore(requirements, path, vfstype):
791 791 """Construct a storage object for a repository."""
792 792 if b'store' in requirements:
793 793 if b'fncache' in requirements:
794 794 return storemod.fncachestore(
795 795 path, vfstype, b'dotencode' in requirements
796 796 )
797 797
798 798 return storemod.encodedstore(path, vfstype)
799 799
800 800 return storemod.basicstore(path, vfstype)
801 801
802 802
803 803 def resolvestorevfsoptions(ui, requirements, features):
804 804 """Resolve the options to pass to the store vfs opener.
805 805
806 806 The returned dict is used to influence behavior of the storage layer.
807 807 """
808 808 options = {}
809 809
810 810 if b'treemanifest' in requirements:
811 811 options[b'treemanifest'] = True
812 812
813 813 # experimental config: format.manifestcachesize
814 814 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
815 815 if manifestcachesize is not None:
816 816 options[b'manifestcachesize'] = manifestcachesize
817 817
818 818 # In the absence of another requirement superseding a revlog-related
819 819 # requirement, we have to assume the repo is using revlog version 0.
820 820 # This revlog format is super old and we don't bother trying to parse
821 821 # opener options for it because those options wouldn't do anything
822 822 # meaningful on such old repos.
823 823 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
824 824 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
825 825 else: # explicitly mark repo as using revlogv0
826 826 options[b'revlogv0'] = True
827 827
828 828 if COPIESSDC_REQUIREMENT in requirements:
829 829 options[b'copies-storage'] = b'changeset-sidedata'
830 830 else:
831 831 writecopiesto = ui.config(b'experimental', b'copies.write-to')
832 832 copiesextramode = (b'changeset-only', b'compatibility')
833 833 if writecopiesto in copiesextramode:
834 834 options[b'copies-storage'] = b'extra'
835 835
836 836 return options
837 837
838 838
839 839 def resolverevlogstorevfsoptions(ui, requirements, features):
840 840 """Resolve opener options specific to revlogs."""
841 841
842 842 options = {}
843 843 options[b'flagprocessors'] = {}
844 844
845 845 if b'revlogv1' in requirements:
846 846 options[b'revlogv1'] = True
847 847 if REVLOGV2_REQUIREMENT in requirements:
848 848 options[b'revlogv2'] = True
849 849
850 850 if b'generaldelta' in requirements:
851 851 options[b'generaldelta'] = True
852 852
853 853 # experimental config: format.chunkcachesize
854 854 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
855 855 if chunkcachesize is not None:
856 856 options[b'chunkcachesize'] = chunkcachesize
857 857
858 858 deltabothparents = ui.configbool(
859 859 b'storage', b'revlog.optimize-delta-parent-choice'
860 860 )
861 861 options[b'deltabothparents'] = deltabothparents
862 862
863 863 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
864 864 lazydeltabase = False
865 865 if lazydelta:
866 866 lazydeltabase = ui.configbool(
867 867 b'storage', b'revlog.reuse-external-delta-parent'
868 868 )
869 869 if lazydeltabase is None:
870 870 lazydeltabase = not scmutil.gddeltaconfig(ui)
871 871 options[b'lazydelta'] = lazydelta
872 872 options[b'lazydeltabase'] = lazydeltabase
873 873
874 874 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
875 875 if 0 <= chainspan:
876 876 options[b'maxdeltachainspan'] = chainspan
877 877
878 878 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
879 879 if mmapindexthreshold is not None:
880 880 options[b'mmapindexthreshold'] = mmapindexthreshold
881 881
882 882 withsparseread = ui.configbool(b'experimental', b'sparse-read')
883 883 srdensitythres = float(
884 884 ui.config(b'experimental', b'sparse-read.density-threshold')
885 885 )
886 886 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
887 887 options[b'with-sparse-read'] = withsparseread
888 888 options[b'sparse-read-density-threshold'] = srdensitythres
889 889 options[b'sparse-read-min-gap-size'] = srmingapsize
890 890
891 891 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
892 892 options[b'sparse-revlog'] = sparserevlog
893 893 if sparserevlog:
894 894 options[b'generaldelta'] = True
895 895
896 896 sidedata = SIDEDATA_REQUIREMENT in requirements
897 897 options[b'side-data'] = sidedata
898 898
899 899 maxchainlen = None
900 900 if sparserevlog:
901 901 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
902 902 # experimental config: format.maxchainlen
903 903 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
904 904 if maxchainlen is not None:
905 905 options[b'maxchainlen'] = maxchainlen
906 906
907 907 for r in requirements:
908 908 # we allow multiple compression engine requirement to co-exist because
909 909 # strickly speaking, revlog seems to support mixed compression style.
910 910 #
911 911 # The compression used for new entries will be "the last one"
912 912 prefix = r.startswith
913 913 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
914 914 options[b'compengine'] = r.split(b'-', 2)[2]
915 915
916 916 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
917 917 if options[b'zlib.level'] is not None:
918 918 if not (0 <= options[b'zlib.level'] <= 9):
919 919 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
920 920 raise error.Abort(msg % options[b'zlib.level'])
921 921 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
922 922 if options[b'zstd.level'] is not None:
923 923 if not (0 <= options[b'zstd.level'] <= 22):
924 924 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
925 925 raise error.Abort(msg % options[b'zstd.level'])
926 926
927 927 if repository.NARROW_REQUIREMENT in requirements:
928 928 options[b'enableellipsis'] = True
929 929
930 930 return options
931 931
932 932
933 933 def makemain(**kwargs):
934 934 """Produce a type conforming to ``ilocalrepositorymain``."""
935 935 return localrepository
936 936
937 937
938 938 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
939 939 class revlogfilestorage(object):
940 940 """File storage when using revlogs."""
941 941
942 942 def file(self, path):
943 943 if path[0] == b'/':
944 944 path = path[1:]
945 945
946 946 return filelog.filelog(self.svfs, path)
947 947
948 948
949 949 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
950 950 class revlognarrowfilestorage(object):
951 951 """File storage when using revlogs and narrow files."""
952 952
953 953 def file(self, path):
954 954 if path[0] == b'/':
955 955 path = path[1:]
956 956
957 957 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
958 958
959 959
960 960 def makefilestorage(requirements, features, **kwargs):
961 961 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
962 962 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
963 963 features.add(repository.REPO_FEATURE_STREAM_CLONE)
964 964
965 965 if repository.NARROW_REQUIREMENT in requirements:
966 966 return revlognarrowfilestorage
967 967 else:
968 968 return revlogfilestorage
969 969
970 970
971 971 # List of repository interfaces and factory functions for them. Each
972 972 # will be called in order during ``makelocalrepository()`` to iteratively
973 973 # derive the final type for a local repository instance. We capture the
974 974 # function as a lambda so we don't hold a reference and the module-level
975 975 # functions can be wrapped.
976 976 REPO_INTERFACES = [
977 977 (repository.ilocalrepositorymain, lambda: makemain),
978 978 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
979 979 ]
980 980
981 981
982 982 @interfaceutil.implementer(repository.ilocalrepositorymain)
983 983 class localrepository(object):
984 984 """Main class for representing local repositories.
985 985
986 986 All local repositories are instances of this class.
987 987
988 988 Constructed on its own, instances of this class are not usable as
989 989 repository objects. To obtain a usable repository object, call
990 990 ``hg.repository()``, ``localrepo.instance()``, or
991 991 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
992 992 ``instance()`` adds support for creating new repositories.
993 993 ``hg.repository()`` adds more extension integration, including calling
994 994 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
995 995 used.
996 996 """
997 997
998 998 # obsolete experimental requirements:
999 999 # - manifestv2: An experimental new manifest format that allowed
1000 1000 # for stem compression of long paths. Experiment ended up not
1001 1001 # being successful (repository sizes went up due to worse delta
1002 1002 # chains), and the code was deleted in 4.6.
1003 1003 supportedformats = {
1004 1004 b'revlogv1',
1005 1005 b'generaldelta',
1006 1006 b'treemanifest',
1007 1007 COPIESSDC_REQUIREMENT,
1008 1008 REVLOGV2_REQUIREMENT,
1009 1009 SIDEDATA_REQUIREMENT,
1010 1010 SPARSEREVLOG_REQUIREMENT,
1011 1011 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1012 1012 }
1013 1013 _basesupported = supportedformats | {
1014 1014 b'store',
1015 1015 b'fncache',
1016 1016 b'shared',
1017 1017 b'relshared',
1018 1018 b'dotencode',
1019 1019 b'exp-sparse',
1020 1020 b'internal-phase',
1021 1021 }
1022 1022
1023 1023 # list of prefix for file which can be written without 'wlock'
1024 1024 # Extensions should extend this list when needed
1025 1025 _wlockfreeprefix = {
1026 1026 # We migh consider requiring 'wlock' for the next
1027 1027 # two, but pretty much all the existing code assume
1028 1028 # wlock is not needed so we keep them excluded for
1029 1029 # now.
1030 1030 b'hgrc',
1031 1031 b'requires',
1032 1032 # XXX cache is a complicatged business someone
1033 1033 # should investigate this in depth at some point
1034 1034 b'cache/',
1035 1035 # XXX shouldn't be dirstate covered by the wlock?
1036 1036 b'dirstate',
1037 1037 # XXX bisect was still a bit too messy at the time
1038 1038 # this changeset was introduced. Someone should fix
1039 1039 # the remainig bit and drop this line
1040 1040 b'bisect.state',
1041 1041 }
1042 1042
1043 1043 def __init__(
1044 1044 self,
1045 1045 baseui,
1046 1046 ui,
1047 1047 origroot,
1048 1048 wdirvfs,
1049 1049 hgvfs,
1050 1050 requirements,
1051 1051 supportedrequirements,
1052 1052 sharedpath,
1053 1053 store,
1054 1054 cachevfs,
1055 1055 wcachevfs,
1056 1056 features,
1057 1057 intents=None,
1058 1058 ):
1059 1059 """Create a new local repository instance.
1060 1060
1061 1061 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1062 1062 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1063 1063 object.
1064 1064
1065 1065 Arguments:
1066 1066
1067 1067 baseui
1068 1068 ``ui.ui`` instance that ``ui`` argument was based off of.
1069 1069
1070 1070 ui
1071 1071 ``ui.ui`` instance for use by the repository.
1072 1072
1073 1073 origroot
1074 1074 ``bytes`` path to working directory root of this repository.
1075 1075
1076 1076 wdirvfs
1077 1077 ``vfs.vfs`` rooted at the working directory.
1078 1078
1079 1079 hgvfs
1080 1080 ``vfs.vfs`` rooted at .hg/
1081 1081
1082 1082 requirements
1083 1083 ``set`` of bytestrings representing repository opening requirements.
1084 1084
1085 1085 supportedrequirements
1086 1086 ``set`` of bytestrings representing repository requirements that we
1087 1087 know how to open. May be a supetset of ``requirements``.
1088 1088
1089 1089 sharedpath
1090 1090 ``bytes`` Defining path to storage base directory. Points to a
1091 1091 ``.hg/`` directory somewhere.
1092 1092
1093 1093 store
1094 1094 ``store.basicstore`` (or derived) instance providing access to
1095 1095 versioned storage.
1096 1096
1097 1097 cachevfs
1098 1098 ``vfs.vfs`` used for cache files.
1099 1099
1100 1100 wcachevfs
1101 1101 ``vfs.vfs`` used for cache files related to the working copy.
1102 1102
1103 1103 features
1104 1104 ``set`` of bytestrings defining features/capabilities of this
1105 1105 instance.
1106 1106
1107 1107 intents
1108 1108 ``set`` of system strings indicating what this repo will be used
1109 1109 for.
1110 1110 """
1111 1111 self.baseui = baseui
1112 1112 self.ui = ui
1113 1113 self.origroot = origroot
1114 1114 # vfs rooted at working directory.
1115 1115 self.wvfs = wdirvfs
1116 1116 self.root = wdirvfs.base
1117 1117 # vfs rooted at .hg/. Used to access most non-store paths.
1118 1118 self.vfs = hgvfs
1119 1119 self.path = hgvfs.base
1120 1120 self.requirements = requirements
1121 1121 self.supported = supportedrequirements
1122 1122 self.sharedpath = sharedpath
1123 1123 self.store = store
1124 1124 self.cachevfs = cachevfs
1125 1125 self.wcachevfs = wcachevfs
1126 1126 self.features = features
1127 1127
1128 1128 self.filtername = None
1129 1129
1130 1130 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1131 1131 b'devel', b'check-locks'
1132 1132 ):
1133 1133 self.vfs.audit = self._getvfsward(self.vfs.audit)
1134 1134 # A list of callback to shape the phase if no data were found.
1135 1135 # Callback are in the form: func(repo, roots) --> processed root.
1136 1136 # This list it to be filled by extension during repo setup
1137 1137 self._phasedefaults = []
1138 1138
1139 1139 color.setup(self.ui)
1140 1140
1141 1141 self.spath = self.store.path
1142 1142 self.svfs = self.store.vfs
1143 1143 self.sjoin = self.store.join
1144 1144 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1145 1145 b'devel', b'check-locks'
1146 1146 ):
1147 1147 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1148 1148 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1149 1149 else: # standard vfs
1150 1150 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1151 1151
1152 1152 self._dirstatevalidatewarned = False
1153 1153
1154 1154 self._branchcaches = branchmap.BranchMapCache()
1155 1155 self._revbranchcache = None
1156 1156 self._filterpats = {}
1157 1157 self._datafilters = {}
1158 1158 self._transref = self._lockref = self._wlockref = None
1159 1159
1160 1160 # A cache for various files under .hg/ that tracks file changes,
1161 1161 # (used by the filecache decorator)
1162 1162 #
1163 1163 # Maps a property name to its util.filecacheentry
1164 1164 self._filecache = {}
1165 1165
1166 1166 # hold sets of revision to be filtered
1167 1167 # should be cleared when something might have changed the filter value:
1168 1168 # - new changesets,
1169 1169 # - phase change,
1170 1170 # - new obsolescence marker,
1171 1171 # - working directory parent change,
1172 1172 # - bookmark changes
1173 1173 self.filteredrevcache = {}
1174 1174
1175 1175 # post-dirstate-status hooks
1176 1176 self._postdsstatus = []
1177 1177
1178 1178 # generic mapping between names and nodes
1179 1179 self.names = namespaces.namespaces()
1180 1180
1181 1181 # Key to signature value.
1182 1182 self._sparsesignaturecache = {}
1183 1183 # Signature to cached matcher instance.
1184 1184 self._sparsematchercache = {}
1185 1185
1186 1186 self._extrafilterid = repoview.extrafilter(ui)
1187 1187
1188 1188 self.filecopiesmode = None
1189 1189 if COPIESSDC_REQUIREMENT in self.requirements:
1190 1190 self.filecopiesmode = b'changeset-sidedata'
1191 1191
1192 1192 def _getvfsward(self, origfunc):
1193 1193 """build a ward for self.vfs"""
1194 1194 rref = weakref.ref(self)
1195 1195
1196 1196 def checkvfs(path, mode=None):
1197 1197 ret = origfunc(path, mode=mode)
1198 1198 repo = rref()
1199 1199 if (
1200 1200 repo is None
1201 1201 or not util.safehasattr(repo, b'_wlockref')
1202 1202 or not util.safehasattr(repo, b'_lockref')
1203 1203 ):
1204 1204 return
1205 1205 if mode in (None, b'r', b'rb'):
1206 1206 return
1207 1207 if path.startswith(repo.path):
1208 1208 # truncate name relative to the repository (.hg)
1209 1209 path = path[len(repo.path) + 1 :]
1210 1210 if path.startswith(b'cache/'):
1211 1211 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1212 1212 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1213 1213 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1214 1214 # journal is covered by 'lock'
1215 1215 if repo._currentlock(repo._lockref) is None:
1216 1216 repo.ui.develwarn(
1217 1217 b'write with no lock: "%s"' % path,
1218 1218 stacklevel=3,
1219 1219 config=b'check-locks',
1220 1220 )
1221 1221 elif repo._currentlock(repo._wlockref) is None:
1222 1222 # rest of vfs files are covered by 'wlock'
1223 1223 #
1224 1224 # exclude special files
1225 1225 for prefix in self._wlockfreeprefix:
1226 1226 if path.startswith(prefix):
1227 1227 return
1228 1228 repo.ui.develwarn(
1229 1229 b'write with no wlock: "%s"' % path,
1230 1230 stacklevel=3,
1231 1231 config=b'check-locks',
1232 1232 )
1233 1233 return ret
1234 1234
1235 1235 return checkvfs
1236 1236
1237 1237 def _getsvfsward(self, origfunc):
1238 1238 """build a ward for self.svfs"""
1239 1239 rref = weakref.ref(self)
1240 1240
1241 1241 def checksvfs(path, mode=None):
1242 1242 ret = origfunc(path, mode=mode)
1243 1243 repo = rref()
1244 1244 if repo is None or not util.safehasattr(repo, b'_lockref'):
1245 1245 return
1246 1246 if mode in (None, b'r', b'rb'):
1247 1247 return
1248 1248 if path.startswith(repo.sharedpath):
1249 1249 # truncate name relative to the repository (.hg)
1250 1250 path = path[len(repo.sharedpath) + 1 :]
1251 1251 if repo._currentlock(repo._lockref) is None:
1252 1252 repo.ui.develwarn(
1253 1253 b'write with no lock: "%s"' % path, stacklevel=4
1254 1254 )
1255 1255 return ret
1256 1256
1257 1257 return checksvfs
1258 1258
1259 1259 def close(self):
1260 1260 self._writecaches()
1261 1261
1262 1262 def _writecaches(self):
1263 1263 if self._revbranchcache:
1264 1264 self._revbranchcache.write()
1265 1265
1266 1266 def _restrictcapabilities(self, caps):
1267 1267 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1268 1268 caps = set(caps)
1269 1269 capsblob = bundle2.encodecaps(
1270 1270 bundle2.getrepocaps(self, role=b'client')
1271 1271 )
1272 1272 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1273 1273 return caps
1274 1274
1275 1275 def _writerequirements(self):
1276 1276 scmutil.writerequires(self.vfs, self.requirements)
1277 1277
1278 1278 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1279 1279 # self -> auditor -> self._checknested -> self
1280 1280
1281 1281 @property
1282 1282 def auditor(self):
1283 1283 # This is only used by context.workingctx.match in order to
1284 1284 # detect files in subrepos.
1285 1285 return pathutil.pathauditor(self.root, callback=self._checknested)
1286 1286
1287 1287 @property
1288 1288 def nofsauditor(self):
1289 1289 # This is only used by context.basectx.match in order to detect
1290 1290 # files in subrepos.
1291 1291 return pathutil.pathauditor(
1292 1292 self.root, callback=self._checknested, realfs=False, cached=True
1293 1293 )
1294 1294
1295 1295 def _checknested(self, path):
1296 1296 """Determine if path is a legal nested repository."""
1297 1297 if not path.startswith(self.root):
1298 1298 return False
1299 1299 subpath = path[len(self.root) + 1 :]
1300 1300 normsubpath = util.pconvert(subpath)
1301 1301
1302 1302 # XXX: Checking against the current working copy is wrong in
1303 1303 # the sense that it can reject things like
1304 1304 #
1305 1305 # $ hg cat -r 10 sub/x.txt
1306 1306 #
1307 1307 # if sub/ is no longer a subrepository in the working copy
1308 1308 # parent revision.
1309 1309 #
1310 1310 # However, it can of course also allow things that would have
1311 1311 # been rejected before, such as the above cat command if sub/
1312 1312 # is a subrepository now, but was a normal directory before.
1313 1313 # The old path auditor would have rejected by mistake since it
1314 1314 # panics when it sees sub/.hg/.
1315 1315 #
1316 1316 # All in all, checking against the working copy seems sensible
1317 1317 # since we want to prevent access to nested repositories on
1318 1318 # the filesystem *now*.
1319 1319 ctx = self[None]
1320 1320 parts = util.splitpath(subpath)
1321 1321 while parts:
1322 1322 prefix = b'/'.join(parts)
1323 1323 if prefix in ctx.substate:
1324 1324 if prefix == normsubpath:
1325 1325 return True
1326 1326 else:
1327 1327 sub = ctx.sub(prefix)
1328 1328 return sub.checknested(subpath[len(prefix) + 1 :])
1329 1329 else:
1330 1330 parts.pop()
1331 1331 return False
1332 1332
1333 1333 def peer(self):
1334 1334 return localpeer(self) # not cached to avoid reference cycle
1335 1335
1336 1336 def unfiltered(self):
1337 1337 """Return unfiltered version of the repository
1338 1338
1339 1339 Intended to be overwritten by filtered repo."""
1340 1340 return self
1341 1341
1342 1342 def filtered(self, name, visibilityexceptions=None):
1343 1343 """Return a filtered version of a repository
1344 1344
1345 1345 The `name` parameter is the identifier of the requested view. This
1346 1346 will return a repoview object set "exactly" to the specified view.
1347 1347
1348 1348 This function does not apply recursive filtering to a repository. For
1349 1349 example calling `repo.filtered("served")` will return a repoview using
1350 1350 the "served" view, regardless of the initial view used by `repo`.
1351 1351
1352 1352 In other word, there is always only one level of `repoview` "filtering".
1353 1353 """
1354 1354 if self._extrafilterid is not None and b'%' not in name:
1355 1355 name = name + b'%' + self._extrafilterid
1356 1356
1357 1357 cls = repoview.newtype(self.unfiltered().__class__)
1358 1358 return cls(self, name, visibilityexceptions)
1359 1359
1360 1360 @mixedrepostorecache(
1361 1361 (b'bookmarks', b'plain'),
1362 1362 (b'bookmarks.current', b'plain'),
1363 1363 (b'bookmarks', b''),
1364 1364 (b'00changelog.i', b''),
1365 1365 )
1366 1366 def _bookmarks(self):
1367 1367 # Since the multiple files involved in the transaction cannot be
1368 1368 # written atomically (with current repository format), there is a race
1369 1369 # condition here.
1370 1370 #
1371 1371 # 1) changelog content A is read
1372 1372 # 2) outside transaction update changelog to content B
1373 1373 # 3) outside transaction update bookmark file referring to content B
1374 1374 # 4) bookmarks file content is read and filtered against changelog-A
1375 1375 #
1376 1376 # When this happens, bookmarks against nodes missing from A are dropped.
1377 1377 #
1378 1378 # Having this happening during read is not great, but it become worse
1379 1379 # when this happen during write because the bookmarks to the "unknown"
1380 1380 # nodes will be dropped for good. However, writes happen within locks.
1381 1381 # This locking makes it possible to have a race free consistent read.
1382 1382 # For this purpose data read from disc before locking are
1383 1383 # "invalidated" right after the locks are taken. This invalidations are
1384 1384 # "light", the `filecache` mechanism keep the data in memory and will
1385 1385 # reuse them if the underlying files did not changed. Not parsing the
1386 1386 # same data multiple times helps performances.
1387 1387 #
1388 1388 # Unfortunately in the case describe above, the files tracked by the
1389 1389 # bookmarks file cache might not have changed, but the in-memory
1390 1390 # content is still "wrong" because we used an older changelog content
1391 1391 # to process the on-disk data. So after locking, the changelog would be
1392 1392 # refreshed but `_bookmarks` would be preserved.
1393 1393 # Adding `00changelog.i` to the list of tracked file is not
1394 1394 # enough, because at the time we build the content for `_bookmarks` in
1395 1395 # (4), the changelog file has already diverged from the content used
1396 1396 # for loading `changelog` in (1)
1397 1397 #
1398 1398 # To prevent the issue, we force the changelog to be explicitly
1399 1399 # reloaded while computing `_bookmarks`. The data race can still happen
1400 1400 # without the lock (with a narrower window), but it would no longer go
1401 1401 # undetected during the lock time refresh.
1402 1402 #
1403 1403 # The new schedule is as follow
1404 1404 #
1405 1405 # 1) filecache logic detect that `_bookmarks` needs to be computed
1406 1406 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1407 1407 # 3) We force `changelog` filecache to be tested
1408 1408 # 4) cachestat for `changelog` are captured (for changelog)
1409 1409 # 5) `_bookmarks` is computed and cached
1410 1410 #
1411 1411 # The step in (3) ensure we have a changelog at least as recent as the
1412 1412 # cache stat computed in (1). As a result at locking time:
1413 1413 # * if the changelog did not changed since (1) -> we can reuse the data
1414 1414 # * otherwise -> the bookmarks get refreshed.
1415 1415 self._refreshchangelog()
1416 1416 return bookmarks.bmstore(self)
1417 1417
1418 1418 def _refreshchangelog(self):
1419 1419 """make sure the in memory changelog match the on-disk one"""
1420 1420 if 'changelog' in vars(self) and self.currenttransaction() is None:
1421 1421 del self.changelog
1422 1422
1423 1423 @property
1424 1424 def _activebookmark(self):
1425 1425 return self._bookmarks.active
1426 1426
1427 1427 # _phasesets depend on changelog. what we need is to call
1428 1428 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1429 1429 # can't be easily expressed in filecache mechanism.
1430 1430 @storecache(b'phaseroots', b'00changelog.i')
1431 1431 def _phasecache(self):
1432 1432 return phases.phasecache(self, self._phasedefaults)
1433 1433
1434 1434 @storecache(b'obsstore')
1435 1435 def obsstore(self):
1436 1436 return obsolete.makestore(self.ui, self)
1437 1437
1438 1438 @storecache(b'00changelog.i')
1439 1439 def changelog(self):
1440 1440 return self.store.changelog(txnutil.mayhavepending(self.root))
1441 1441
1442 1442 @storecache(b'00manifest.i')
1443 1443 def manifestlog(self):
1444 1444 return self.store.manifestlog(self, self._storenarrowmatch)
1445 1445
1446 1446 @repofilecache(b'dirstate')
1447 1447 def dirstate(self):
1448 1448 return self._makedirstate()
1449 1449
1450 1450 def _makedirstate(self):
1451 1451 """Extension point for wrapping the dirstate per-repo."""
1452 1452 sparsematchfn = lambda: sparse.matcher(self)
1453 1453
1454 1454 return dirstate.dirstate(
1455 1455 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1456 1456 )
1457 1457
1458 1458 def _dirstatevalidate(self, node):
1459 1459 try:
1460 1460 self.changelog.rev(node)
1461 1461 return node
1462 1462 except error.LookupError:
1463 1463 if not self._dirstatevalidatewarned:
1464 1464 self._dirstatevalidatewarned = True
1465 1465 self.ui.warn(
1466 1466 _(b"warning: ignoring unknown working parent %s!\n")
1467 1467 % short(node)
1468 1468 )
1469 1469 return nullid
1470 1470
1471 1471 @storecache(narrowspec.FILENAME)
1472 1472 def narrowpats(self):
1473 1473 """matcher patterns for this repository's narrowspec
1474 1474
1475 1475 A tuple of (includes, excludes).
1476 1476 """
1477 1477 return narrowspec.load(self)
1478 1478
1479 1479 @storecache(narrowspec.FILENAME)
1480 1480 def _storenarrowmatch(self):
1481 1481 if repository.NARROW_REQUIREMENT not in self.requirements:
1482 1482 return matchmod.always()
1483 1483 include, exclude = self.narrowpats
1484 1484 return narrowspec.match(self.root, include=include, exclude=exclude)
1485 1485
1486 1486 @storecache(narrowspec.FILENAME)
1487 1487 def _narrowmatch(self):
1488 1488 if repository.NARROW_REQUIREMENT not in self.requirements:
1489 1489 return matchmod.always()
1490 1490 narrowspec.checkworkingcopynarrowspec(self)
1491 1491 include, exclude = self.narrowpats
1492 1492 return narrowspec.match(self.root, include=include, exclude=exclude)
1493 1493
1494 1494 def narrowmatch(self, match=None, includeexact=False):
1495 1495 """matcher corresponding the the repo's narrowspec
1496 1496
1497 1497 If `match` is given, then that will be intersected with the narrow
1498 1498 matcher.
1499 1499
1500 1500 If `includeexact` is True, then any exact matches from `match` will
1501 1501 be included even if they're outside the narrowspec.
1502 1502 """
1503 1503 if match:
1504 1504 if includeexact and not self._narrowmatch.always():
1505 1505 # do not exclude explicitly-specified paths so that they can
1506 1506 # be warned later on
1507 1507 em = matchmod.exact(match.files())
1508 1508 nm = matchmod.unionmatcher([self._narrowmatch, em])
1509 1509 return matchmod.intersectmatchers(match, nm)
1510 1510 return matchmod.intersectmatchers(match, self._narrowmatch)
1511 1511 return self._narrowmatch
1512 1512
1513 1513 def setnarrowpats(self, newincludes, newexcludes):
1514 1514 narrowspec.save(self, newincludes, newexcludes)
1515 1515 self.invalidate(clearfilecache=True)
1516 1516
1517 1517 def __getitem__(self, changeid):
1518 # dealing with special cases
1518 1519 if changeid is None:
1519 1520 return context.workingctx(self)
1520 1521 if isinstance(changeid, context.basectx):
1521 1522 return changeid
1523
1524 # dealing with multiple revisions
1522 1525 if isinstance(changeid, slice):
1523 1526 # wdirrev isn't contiguous so the slice shouldn't include it
1524 1527 return [
1525 1528 self[i]
1526 1529 for i in pycompat.xrange(*changeid.indices(len(self)))
1527 1530 if i not in self.changelog.filteredrevs
1528 1531 ]
1532
1533 # dealing with arbitrary values
1529 1534 try:
1530 1535 if isinstance(changeid, int):
1531 1536 node = self.changelog.node(changeid)
1532 1537 rev = changeid
1533 1538 elif changeid == b'null':
1534 1539 node = nullid
1535 1540 rev = nullrev
1536 1541 elif changeid == b'tip':
1537 1542 node = self.changelog.tip()
1538 1543 rev = self.changelog.rev(node)
1539 1544 elif changeid == b'.':
1540 1545 # this is a hack to delay/avoid loading obsmarkers
1541 1546 # when we know that '.' won't be hidden
1542 1547 node = self.dirstate.p1()
1543 1548 rev = self.unfiltered().changelog.rev(node)
1544 1549 elif len(changeid) == 20:
1545 1550 try:
1546 1551 node = changeid
1547 1552 rev = self.changelog.rev(changeid)
1548 1553 except error.FilteredLookupError:
1549 1554 changeid = hex(changeid) # for the error message
1550 1555 raise
1551 1556 except LookupError:
1552 1557 # check if it might have come from damaged dirstate
1553 1558 #
1554 1559 # XXX we could avoid the unfiltered if we had a recognizable
1555 1560 # exception for filtered changeset access
1556 1561 if (
1557 1562 self.local()
1558 1563 and changeid in self.unfiltered().dirstate.parents()
1559 1564 ):
1560 1565 msg = _(b"working directory has unknown parent '%s'!")
1561 1566 raise error.Abort(msg % short(changeid))
1562 1567 changeid = hex(changeid) # for the error message
1563 1568 raise
1564 1569
1565 1570 elif len(changeid) == 40:
1566 1571 node = bin(changeid)
1567 1572 rev = self.changelog.rev(node)
1568 1573 else:
1569 1574 raise error.ProgrammingError(
1570 1575 b"unsupported changeid '%s' of type %s"
1571 1576 % (changeid, pycompat.bytestr(type(changeid)))
1572 1577 )
1573 1578
1574 1579 return context.changectx(self, rev, node)
1575 1580
1576 1581 except (error.FilteredIndexError, error.FilteredLookupError):
1577 1582 raise error.FilteredRepoLookupError(
1578 1583 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1579 1584 )
1580 1585 except (IndexError, LookupError):
1581 1586 raise error.RepoLookupError(
1582 1587 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1583 1588 )
1584 1589 except error.WdirUnsupported:
1585 1590 return context.workingctx(self)
1586 1591
1587 1592 def __contains__(self, changeid):
1588 1593 """True if the given changeid exists
1589 1594
1590 1595 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1591 1596 specified.
1592 1597 """
1593 1598 try:
1594 1599 self[changeid]
1595 1600 return True
1596 1601 except error.RepoLookupError:
1597 1602 return False
1598 1603
1599 1604 def __nonzero__(self):
1600 1605 return True
1601 1606
1602 1607 __bool__ = __nonzero__
1603 1608
1604 1609 def __len__(self):
1605 1610 # no need to pay the cost of repoview.changelog
1606 1611 unfi = self.unfiltered()
1607 1612 return len(unfi.changelog)
1608 1613
1609 1614 def __iter__(self):
1610 1615 return iter(self.changelog)
1611 1616
1612 1617 def revs(self, expr, *args):
1613 1618 '''Find revisions matching a revset.
1614 1619
1615 1620 The revset is specified as a string ``expr`` that may contain
1616 1621 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1617 1622
1618 1623 Revset aliases from the configuration are not expanded. To expand
1619 1624 user aliases, consider calling ``scmutil.revrange()`` or
1620 1625 ``repo.anyrevs([expr], user=True)``.
1621 1626
1622 1627 Returns a revset.abstractsmartset, which is a list-like interface
1623 1628 that contains integer revisions.
1624 1629 '''
1625 1630 tree = revsetlang.spectree(expr, *args)
1626 1631 return revset.makematcher(tree)(self)
1627 1632
1628 1633 def set(self, expr, *args):
1629 1634 '''Find revisions matching a revset and emit changectx instances.
1630 1635
1631 1636 This is a convenience wrapper around ``revs()`` that iterates the
1632 1637 result and is a generator of changectx instances.
1633 1638
1634 1639 Revset aliases from the configuration are not expanded. To expand
1635 1640 user aliases, consider calling ``scmutil.revrange()``.
1636 1641 '''
1637 1642 for r in self.revs(expr, *args):
1638 1643 yield self[r]
1639 1644
1640 1645 def anyrevs(self, specs, user=False, localalias=None):
1641 1646 '''Find revisions matching one of the given revsets.
1642 1647
1643 1648 Revset aliases from the configuration are not expanded by default. To
1644 1649 expand user aliases, specify ``user=True``. To provide some local
1645 1650 definitions overriding user aliases, set ``localalias`` to
1646 1651 ``{name: definitionstring}``.
1647 1652 '''
1648 1653 if user:
1649 1654 m = revset.matchany(
1650 1655 self.ui,
1651 1656 specs,
1652 1657 lookup=revset.lookupfn(self),
1653 1658 localalias=localalias,
1654 1659 )
1655 1660 else:
1656 1661 m = revset.matchany(None, specs, localalias=localalias)
1657 1662 return m(self)
1658 1663
1659 1664 def url(self):
1660 1665 return b'file:' + self.root
1661 1666
1662 1667 def hook(self, name, throw=False, **args):
1663 1668 """Call a hook, passing this repo instance.
1664 1669
1665 1670 This a convenience method to aid invoking hooks. Extensions likely
1666 1671 won't call this unless they have registered a custom hook or are
1667 1672 replacing code that is expected to call a hook.
1668 1673 """
1669 1674 return hook.hook(self.ui, self, name, throw, **args)
1670 1675
1671 1676 @filteredpropertycache
1672 1677 def _tagscache(self):
1673 1678 '''Returns a tagscache object that contains various tags related
1674 1679 caches.'''
1675 1680
1676 1681 # This simplifies its cache management by having one decorated
1677 1682 # function (this one) and the rest simply fetch things from it.
1678 1683 class tagscache(object):
1679 1684 def __init__(self):
1680 1685 # These two define the set of tags for this repository. tags
1681 1686 # maps tag name to node; tagtypes maps tag name to 'global' or
1682 1687 # 'local'. (Global tags are defined by .hgtags across all
1683 1688 # heads, and local tags are defined in .hg/localtags.)
1684 1689 # They constitute the in-memory cache of tags.
1685 1690 self.tags = self.tagtypes = None
1686 1691
1687 1692 self.nodetagscache = self.tagslist = None
1688 1693
1689 1694 cache = tagscache()
1690 1695 cache.tags, cache.tagtypes = self._findtags()
1691 1696
1692 1697 return cache
1693 1698
1694 1699 def tags(self):
1695 1700 '''return a mapping of tag to node'''
1696 1701 t = {}
1697 1702 if self.changelog.filteredrevs:
1698 1703 tags, tt = self._findtags()
1699 1704 else:
1700 1705 tags = self._tagscache.tags
1701 1706 rev = self.changelog.rev
1702 1707 for k, v in pycompat.iteritems(tags):
1703 1708 try:
1704 1709 # ignore tags to unknown nodes
1705 1710 rev(v)
1706 1711 t[k] = v
1707 1712 except (error.LookupError, ValueError):
1708 1713 pass
1709 1714 return t
1710 1715
1711 1716 def _findtags(self):
1712 1717 '''Do the hard work of finding tags. Return a pair of dicts
1713 1718 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1714 1719 maps tag name to a string like \'global\' or \'local\'.
1715 1720 Subclasses or extensions are free to add their own tags, but
1716 1721 should be aware that the returned dicts will be retained for the
1717 1722 duration of the localrepo object.'''
1718 1723
1719 1724 # XXX what tagtype should subclasses/extensions use? Currently
1720 1725 # mq and bookmarks add tags, but do not set the tagtype at all.
1721 1726 # Should each extension invent its own tag type? Should there
1722 1727 # be one tagtype for all such "virtual" tags? Or is the status
1723 1728 # quo fine?
1724 1729
1725 1730 # map tag name to (node, hist)
1726 1731 alltags = tagsmod.findglobaltags(self.ui, self)
1727 1732 # map tag name to tag type
1728 1733 tagtypes = dict((tag, b'global') for tag in alltags)
1729 1734
1730 1735 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1731 1736
1732 1737 # Build the return dicts. Have to re-encode tag names because
1733 1738 # the tags module always uses UTF-8 (in order not to lose info
1734 1739 # writing to the cache), but the rest of Mercurial wants them in
1735 1740 # local encoding.
1736 1741 tags = {}
1737 1742 for (name, (node, hist)) in pycompat.iteritems(alltags):
1738 1743 if node != nullid:
1739 1744 tags[encoding.tolocal(name)] = node
1740 1745 tags[b'tip'] = self.changelog.tip()
1741 1746 tagtypes = dict(
1742 1747 [
1743 1748 (encoding.tolocal(name), value)
1744 1749 for (name, value) in pycompat.iteritems(tagtypes)
1745 1750 ]
1746 1751 )
1747 1752 return (tags, tagtypes)
1748 1753
1749 1754 def tagtype(self, tagname):
1750 1755 '''
1751 1756 return the type of the given tag. result can be:
1752 1757
1753 1758 'local' : a local tag
1754 1759 'global' : a global tag
1755 1760 None : tag does not exist
1756 1761 '''
1757 1762
1758 1763 return self._tagscache.tagtypes.get(tagname)
1759 1764
1760 1765 def tagslist(self):
1761 1766 '''return a list of tags ordered by revision'''
1762 1767 if not self._tagscache.tagslist:
1763 1768 l = []
1764 1769 for t, n in pycompat.iteritems(self.tags()):
1765 1770 l.append((self.changelog.rev(n), t, n))
1766 1771 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1767 1772
1768 1773 return self._tagscache.tagslist
1769 1774
1770 1775 def nodetags(self, node):
1771 1776 '''return the tags associated with a node'''
1772 1777 if not self._tagscache.nodetagscache:
1773 1778 nodetagscache = {}
1774 1779 for t, n in pycompat.iteritems(self._tagscache.tags):
1775 1780 nodetagscache.setdefault(n, []).append(t)
1776 1781 for tags in pycompat.itervalues(nodetagscache):
1777 1782 tags.sort()
1778 1783 self._tagscache.nodetagscache = nodetagscache
1779 1784 return self._tagscache.nodetagscache.get(node, [])
1780 1785
1781 1786 def nodebookmarks(self, node):
1782 1787 """return the list of bookmarks pointing to the specified node"""
1783 1788 return self._bookmarks.names(node)
1784 1789
1785 1790 def branchmap(self):
1786 1791 '''returns a dictionary {branch: [branchheads]} with branchheads
1787 1792 ordered by increasing revision number'''
1788 1793 return self._branchcaches[self]
1789 1794
1790 1795 @unfilteredmethod
1791 1796 def revbranchcache(self):
1792 1797 if not self._revbranchcache:
1793 1798 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1794 1799 return self._revbranchcache
1795 1800
1796 1801 def branchtip(self, branch, ignoremissing=False):
1797 1802 '''return the tip node for a given branch
1798 1803
1799 1804 If ignoremissing is True, then this method will not raise an error.
1800 1805 This is helpful for callers that only expect None for a missing branch
1801 1806 (e.g. namespace).
1802 1807
1803 1808 '''
1804 1809 try:
1805 1810 return self.branchmap().branchtip(branch)
1806 1811 except KeyError:
1807 1812 if not ignoremissing:
1808 1813 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1809 1814 else:
1810 1815 pass
1811 1816
1812 1817 def lookup(self, key):
1813 1818 node = scmutil.revsymbol(self, key).node()
1814 1819 if node is None:
1815 1820 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1816 1821 return node
1817 1822
1818 1823 def lookupbranch(self, key):
1819 1824 if self.branchmap().hasbranch(key):
1820 1825 return key
1821 1826
1822 1827 return scmutil.revsymbol(self, key).branch()
1823 1828
1824 1829 def known(self, nodes):
1825 1830 cl = self.changelog
1826 1831 get_rev = cl.index.get_rev
1827 1832 filtered = cl.filteredrevs
1828 1833 result = []
1829 1834 for n in nodes:
1830 1835 r = get_rev(n)
1831 1836 resp = not (r is None or r in filtered)
1832 1837 result.append(resp)
1833 1838 return result
1834 1839
1835 1840 def local(self):
1836 1841 return self
1837 1842
1838 1843 def publishing(self):
1839 1844 # it's safe (and desirable) to trust the publish flag unconditionally
1840 1845 # so that we don't finalize changes shared between users via ssh or nfs
1841 1846 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1842 1847
1843 1848 def cancopy(self):
1844 1849 # so statichttprepo's override of local() works
1845 1850 if not self.local():
1846 1851 return False
1847 1852 if not self.publishing():
1848 1853 return True
1849 1854 # if publishing we can't copy if there is filtered content
1850 1855 return not self.filtered(b'visible').changelog.filteredrevs
1851 1856
1852 1857 def shared(self):
1853 1858 '''the type of shared repository (None if not shared)'''
1854 1859 if self.sharedpath != self.path:
1855 1860 return b'store'
1856 1861 return None
1857 1862
1858 1863 def wjoin(self, f, *insidef):
1859 1864 return self.vfs.reljoin(self.root, f, *insidef)
1860 1865
1861 1866 def setparents(self, p1, p2=nullid):
1862 1867 with self.dirstate.parentchange():
1863 1868 copies = self.dirstate.setparents(p1, p2)
1864 1869 pctx = self[p1]
1865 1870 if copies:
1866 1871 # Adjust copy records, the dirstate cannot do it, it
1867 1872 # requires access to parents manifests. Preserve them
1868 1873 # only for entries added to first parent.
1869 1874 for f in copies:
1870 1875 if f not in pctx and copies[f] in pctx:
1871 1876 self.dirstate.copy(copies[f], f)
1872 1877 if p2 == nullid:
1873 1878 for f, s in sorted(self.dirstate.copies().items()):
1874 1879 if f not in pctx and s not in pctx:
1875 1880 self.dirstate.copy(None, f)
1876 1881
1877 1882 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1878 1883 """changeid must be a changeset revision, if specified.
1879 1884 fileid can be a file revision or node."""
1880 1885 return context.filectx(
1881 1886 self, path, changeid, fileid, changectx=changectx
1882 1887 )
1883 1888
1884 1889 def getcwd(self):
1885 1890 return self.dirstate.getcwd()
1886 1891
1887 1892 def pathto(self, f, cwd=None):
1888 1893 return self.dirstate.pathto(f, cwd)
1889 1894
1890 1895 def _loadfilter(self, filter):
1891 1896 if filter not in self._filterpats:
1892 1897 l = []
1893 1898 for pat, cmd in self.ui.configitems(filter):
1894 1899 if cmd == b'!':
1895 1900 continue
1896 1901 mf = matchmod.match(self.root, b'', [pat])
1897 1902 fn = None
1898 1903 params = cmd
1899 1904 for name, filterfn in pycompat.iteritems(self._datafilters):
1900 1905 if cmd.startswith(name):
1901 1906 fn = filterfn
1902 1907 params = cmd[len(name) :].lstrip()
1903 1908 break
1904 1909 if not fn:
1905 1910 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1906 1911 fn.__name__ = 'commandfilter'
1907 1912 # Wrap old filters not supporting keyword arguments
1908 1913 if not pycompat.getargspec(fn)[2]:
1909 1914 oldfn = fn
1910 1915 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1911 1916 fn.__name__ = 'compat-' + oldfn.__name__
1912 1917 l.append((mf, fn, params))
1913 1918 self._filterpats[filter] = l
1914 1919 return self._filterpats[filter]
1915 1920
1916 1921 def _filter(self, filterpats, filename, data):
1917 1922 for mf, fn, cmd in filterpats:
1918 1923 if mf(filename):
1919 1924 self.ui.debug(
1920 1925 b"filtering %s through %s\n"
1921 1926 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1922 1927 )
1923 1928 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1924 1929 break
1925 1930
1926 1931 return data
1927 1932
1928 1933 @unfilteredpropertycache
1929 1934 def _encodefilterpats(self):
1930 1935 return self._loadfilter(b'encode')
1931 1936
1932 1937 @unfilteredpropertycache
1933 1938 def _decodefilterpats(self):
1934 1939 return self._loadfilter(b'decode')
1935 1940
1936 1941 def adddatafilter(self, name, filter):
1937 1942 self._datafilters[name] = filter
1938 1943
1939 1944 def wread(self, filename):
1940 1945 if self.wvfs.islink(filename):
1941 1946 data = self.wvfs.readlink(filename)
1942 1947 else:
1943 1948 data = self.wvfs.read(filename)
1944 1949 return self._filter(self._encodefilterpats, filename, data)
1945 1950
1946 1951 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1947 1952 """write ``data`` into ``filename`` in the working directory
1948 1953
1949 1954 This returns length of written (maybe decoded) data.
1950 1955 """
1951 1956 data = self._filter(self._decodefilterpats, filename, data)
1952 1957 if b'l' in flags:
1953 1958 self.wvfs.symlink(data, filename)
1954 1959 else:
1955 1960 self.wvfs.write(
1956 1961 filename, data, backgroundclose=backgroundclose, **kwargs
1957 1962 )
1958 1963 if b'x' in flags:
1959 1964 self.wvfs.setflags(filename, False, True)
1960 1965 else:
1961 1966 self.wvfs.setflags(filename, False, False)
1962 1967 return len(data)
1963 1968
1964 1969 def wwritedata(self, filename, data):
1965 1970 return self._filter(self._decodefilterpats, filename, data)
1966 1971
1967 1972 def currenttransaction(self):
1968 1973 """return the current transaction or None if non exists"""
1969 1974 if self._transref:
1970 1975 tr = self._transref()
1971 1976 else:
1972 1977 tr = None
1973 1978
1974 1979 if tr and tr.running():
1975 1980 return tr
1976 1981 return None
1977 1982
1978 1983 def transaction(self, desc, report=None):
1979 1984 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1980 1985 b'devel', b'check-locks'
1981 1986 ):
1982 1987 if self._currentlock(self._lockref) is None:
1983 1988 raise error.ProgrammingError(b'transaction requires locking')
1984 1989 tr = self.currenttransaction()
1985 1990 if tr is not None:
1986 1991 return tr.nest(name=desc)
1987 1992
1988 1993 # abort here if the journal already exists
1989 1994 if self.svfs.exists(b"journal"):
1990 1995 raise error.RepoError(
1991 1996 _(b"abandoned transaction found"),
1992 1997 hint=_(b"run 'hg recover' to clean up transaction"),
1993 1998 )
1994 1999
1995 2000 idbase = b"%.40f#%f" % (random.random(), time.time())
1996 2001 ha = hex(hashlib.sha1(idbase).digest())
1997 2002 txnid = b'TXN:' + ha
1998 2003 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
1999 2004
2000 2005 self._writejournal(desc)
2001 2006 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2002 2007 if report:
2003 2008 rp = report
2004 2009 else:
2005 2010 rp = self.ui.warn
2006 2011 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2007 2012 # we must avoid cyclic reference between repo and transaction.
2008 2013 reporef = weakref.ref(self)
2009 2014 # Code to track tag movement
2010 2015 #
2011 2016 # Since tags are all handled as file content, it is actually quite hard
2012 2017 # to track these movement from a code perspective. So we fallback to a
2013 2018 # tracking at the repository level. One could envision to track changes
2014 2019 # to the '.hgtags' file through changegroup apply but that fails to
2015 2020 # cope with case where transaction expose new heads without changegroup
2016 2021 # being involved (eg: phase movement).
2017 2022 #
2018 2023 # For now, We gate the feature behind a flag since this likely comes
2019 2024 # with performance impacts. The current code run more often than needed
2020 2025 # and do not use caches as much as it could. The current focus is on
2021 2026 # the behavior of the feature so we disable it by default. The flag
2022 2027 # will be removed when we are happy with the performance impact.
2023 2028 #
2024 2029 # Once this feature is no longer experimental move the following
2025 2030 # documentation to the appropriate help section:
2026 2031 #
2027 2032 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2028 2033 # tags (new or changed or deleted tags). In addition the details of
2029 2034 # these changes are made available in a file at:
2030 2035 # ``REPOROOT/.hg/changes/tags.changes``.
2031 2036 # Make sure you check for HG_TAG_MOVED before reading that file as it
2032 2037 # might exist from a previous transaction even if no tag were touched
2033 2038 # in this one. Changes are recorded in a line base format::
2034 2039 #
2035 2040 # <action> <hex-node> <tag-name>\n
2036 2041 #
2037 2042 # Actions are defined as follow:
2038 2043 # "-R": tag is removed,
2039 2044 # "+A": tag is added,
2040 2045 # "-M": tag is moved (old value),
2041 2046 # "+M": tag is moved (new value),
2042 2047 tracktags = lambda x: None
2043 2048 # experimental config: experimental.hook-track-tags
2044 2049 shouldtracktags = self.ui.configbool(
2045 2050 b'experimental', b'hook-track-tags'
2046 2051 )
2047 2052 if desc != b'strip' and shouldtracktags:
2048 2053 oldheads = self.changelog.headrevs()
2049 2054
2050 2055 def tracktags(tr2):
2051 2056 repo = reporef()
2052 2057 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2053 2058 newheads = repo.changelog.headrevs()
2054 2059 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2055 2060 # notes: we compare lists here.
2056 2061 # As we do it only once buiding set would not be cheaper
2057 2062 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2058 2063 if changes:
2059 2064 tr2.hookargs[b'tag_moved'] = b'1'
2060 2065 with repo.vfs(
2061 2066 b'changes/tags.changes', b'w', atomictemp=True
2062 2067 ) as changesfile:
2063 2068 # note: we do not register the file to the transaction
2064 2069 # because we needs it to still exist on the transaction
2065 2070 # is close (for txnclose hooks)
2066 2071 tagsmod.writediff(changesfile, changes)
2067 2072
2068 2073 def validate(tr2):
2069 2074 """will run pre-closing hooks"""
2070 2075 # XXX the transaction API is a bit lacking here so we take a hacky
2071 2076 # path for now
2072 2077 #
2073 2078 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2074 2079 # dict is copied before these run. In addition we needs the data
2075 2080 # available to in memory hooks too.
2076 2081 #
2077 2082 # Moreover, we also need to make sure this runs before txnclose
2078 2083 # hooks and there is no "pending" mechanism that would execute
2079 2084 # logic only if hooks are about to run.
2080 2085 #
2081 2086 # Fixing this limitation of the transaction is also needed to track
2082 2087 # other families of changes (bookmarks, phases, obsolescence).
2083 2088 #
2084 2089 # This will have to be fixed before we remove the experimental
2085 2090 # gating.
2086 2091 tracktags(tr2)
2087 2092 repo = reporef()
2088 2093
2089 2094 singleheadopt = (b'experimental', b'single-head-per-branch')
2090 2095 singlehead = repo.ui.configbool(*singleheadopt)
2091 2096 if singlehead:
2092 2097 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2093 2098 accountclosed = singleheadsub.get(
2094 2099 b"account-closed-heads", False
2095 2100 )
2096 2101 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2097 2102 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2098 2103 for name, (old, new) in sorted(
2099 2104 tr.changes[b'bookmarks'].items()
2100 2105 ):
2101 2106 args = tr.hookargs.copy()
2102 2107 args.update(bookmarks.preparehookargs(name, old, new))
2103 2108 repo.hook(
2104 2109 b'pretxnclose-bookmark',
2105 2110 throw=True,
2106 2111 **pycompat.strkwargs(args)
2107 2112 )
2108 2113 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2109 2114 cl = repo.unfiltered().changelog
2110 2115 for rev, (old, new) in tr.changes[b'phases'].items():
2111 2116 args = tr.hookargs.copy()
2112 2117 node = hex(cl.node(rev))
2113 2118 args.update(phases.preparehookargs(node, old, new))
2114 2119 repo.hook(
2115 2120 b'pretxnclose-phase',
2116 2121 throw=True,
2117 2122 **pycompat.strkwargs(args)
2118 2123 )
2119 2124
2120 2125 repo.hook(
2121 2126 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2122 2127 )
2123 2128
2124 2129 def releasefn(tr, success):
2125 2130 repo = reporef()
2126 2131 if repo is None:
2127 2132 # If the repo has been GC'd (and this release function is being
2128 2133 # called from transaction.__del__), there's not much we can do,
2129 2134 # so just leave the unfinished transaction there and let the
2130 2135 # user run `hg recover`.
2131 2136 return
2132 2137 if success:
2133 2138 # this should be explicitly invoked here, because
2134 2139 # in-memory changes aren't written out at closing
2135 2140 # transaction, if tr.addfilegenerator (via
2136 2141 # dirstate.write or so) isn't invoked while
2137 2142 # transaction running
2138 2143 repo.dirstate.write(None)
2139 2144 else:
2140 2145 # discard all changes (including ones already written
2141 2146 # out) in this transaction
2142 2147 narrowspec.restorebackup(self, b'journal.narrowspec')
2143 2148 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2144 2149 repo.dirstate.restorebackup(None, b'journal.dirstate')
2145 2150
2146 2151 repo.invalidate(clearfilecache=True)
2147 2152
2148 2153 tr = transaction.transaction(
2149 2154 rp,
2150 2155 self.svfs,
2151 2156 vfsmap,
2152 2157 b"journal",
2153 2158 b"undo",
2154 2159 aftertrans(renames),
2155 2160 self.store.createmode,
2156 2161 validator=validate,
2157 2162 releasefn=releasefn,
2158 2163 checkambigfiles=_cachedfiles,
2159 2164 name=desc,
2160 2165 )
2161 2166 tr.changes[b'origrepolen'] = len(self)
2162 2167 tr.changes[b'obsmarkers'] = set()
2163 2168 tr.changes[b'phases'] = {}
2164 2169 tr.changes[b'bookmarks'] = {}
2165 2170
2166 2171 tr.hookargs[b'txnid'] = txnid
2167 2172 tr.hookargs[b'txnname'] = desc
2168 2173 # note: writing the fncache only during finalize mean that the file is
2169 2174 # outdated when running hooks. As fncache is used for streaming clone,
2170 2175 # this is not expected to break anything that happen during the hooks.
2171 2176 tr.addfinalize(b'flush-fncache', self.store.write)
2172 2177
2173 2178 def txnclosehook(tr2):
2174 2179 """To be run if transaction is successful, will schedule a hook run
2175 2180 """
2176 2181 # Don't reference tr2 in hook() so we don't hold a reference.
2177 2182 # This reduces memory consumption when there are multiple
2178 2183 # transactions per lock. This can likely go away if issue5045
2179 2184 # fixes the function accumulation.
2180 2185 hookargs = tr2.hookargs
2181 2186
2182 2187 def hookfunc():
2183 2188 repo = reporef()
2184 2189 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2185 2190 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2186 2191 for name, (old, new) in bmchanges:
2187 2192 args = tr.hookargs.copy()
2188 2193 args.update(bookmarks.preparehookargs(name, old, new))
2189 2194 repo.hook(
2190 2195 b'txnclose-bookmark',
2191 2196 throw=False,
2192 2197 **pycompat.strkwargs(args)
2193 2198 )
2194 2199
2195 2200 if hook.hashook(repo.ui, b'txnclose-phase'):
2196 2201 cl = repo.unfiltered().changelog
2197 2202 phasemv = sorted(tr.changes[b'phases'].items())
2198 2203 for rev, (old, new) in phasemv:
2199 2204 args = tr.hookargs.copy()
2200 2205 node = hex(cl.node(rev))
2201 2206 args.update(phases.preparehookargs(node, old, new))
2202 2207 repo.hook(
2203 2208 b'txnclose-phase',
2204 2209 throw=False,
2205 2210 **pycompat.strkwargs(args)
2206 2211 )
2207 2212
2208 2213 repo.hook(
2209 2214 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2210 2215 )
2211 2216
2212 2217 reporef()._afterlock(hookfunc)
2213 2218
2214 2219 tr.addfinalize(b'txnclose-hook', txnclosehook)
2215 2220 # Include a leading "-" to make it happen before the transaction summary
2216 2221 # reports registered via scmutil.registersummarycallback() whose names
2217 2222 # are 00-txnreport etc. That way, the caches will be warm when the
2218 2223 # callbacks run.
2219 2224 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2220 2225
2221 2226 def txnaborthook(tr2):
2222 2227 """To be run if transaction is aborted
2223 2228 """
2224 2229 reporef().hook(
2225 2230 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2226 2231 )
2227 2232
2228 2233 tr.addabort(b'txnabort-hook', txnaborthook)
2229 2234 # avoid eager cache invalidation. in-memory data should be identical
2230 2235 # to stored data if transaction has no error.
2231 2236 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2232 2237 self._transref = weakref.ref(tr)
2233 2238 scmutil.registersummarycallback(self, tr, desc)
2234 2239 return tr
2235 2240
2236 2241 def _journalfiles(self):
2237 2242 return (
2238 2243 (self.svfs, b'journal'),
2239 2244 (self.svfs, b'journal.narrowspec'),
2240 2245 (self.vfs, b'journal.narrowspec.dirstate'),
2241 2246 (self.vfs, b'journal.dirstate'),
2242 2247 (self.vfs, b'journal.branch'),
2243 2248 (self.vfs, b'journal.desc'),
2244 2249 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2245 2250 (self.svfs, b'journal.phaseroots'),
2246 2251 )
2247 2252
2248 2253 def undofiles(self):
2249 2254 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2250 2255
2251 2256 @unfilteredmethod
2252 2257 def _writejournal(self, desc):
2253 2258 self.dirstate.savebackup(None, b'journal.dirstate')
2254 2259 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2255 2260 narrowspec.savebackup(self, b'journal.narrowspec')
2256 2261 self.vfs.write(
2257 2262 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2258 2263 )
2259 2264 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2260 2265 bookmarksvfs = bookmarks.bookmarksvfs(self)
2261 2266 bookmarksvfs.write(
2262 2267 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2263 2268 )
2264 2269 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2265 2270
2266 2271 def recover(self):
2267 2272 with self.lock():
2268 2273 if self.svfs.exists(b"journal"):
2269 2274 self.ui.status(_(b"rolling back interrupted transaction\n"))
2270 2275 vfsmap = {
2271 2276 b'': self.svfs,
2272 2277 b'plain': self.vfs,
2273 2278 }
2274 2279 transaction.rollback(
2275 2280 self.svfs,
2276 2281 vfsmap,
2277 2282 b"journal",
2278 2283 self.ui.warn,
2279 2284 checkambigfiles=_cachedfiles,
2280 2285 )
2281 2286 self.invalidate()
2282 2287 return True
2283 2288 else:
2284 2289 self.ui.warn(_(b"no interrupted transaction available\n"))
2285 2290 return False
2286 2291
2287 2292 def rollback(self, dryrun=False, force=False):
2288 2293 wlock = lock = dsguard = None
2289 2294 try:
2290 2295 wlock = self.wlock()
2291 2296 lock = self.lock()
2292 2297 if self.svfs.exists(b"undo"):
2293 2298 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2294 2299
2295 2300 return self._rollback(dryrun, force, dsguard)
2296 2301 else:
2297 2302 self.ui.warn(_(b"no rollback information available\n"))
2298 2303 return 1
2299 2304 finally:
2300 2305 release(dsguard, lock, wlock)
2301 2306
2302 2307 @unfilteredmethod # Until we get smarter cache management
2303 2308 def _rollback(self, dryrun, force, dsguard):
2304 2309 ui = self.ui
2305 2310 try:
2306 2311 args = self.vfs.read(b'undo.desc').splitlines()
2307 2312 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2308 2313 if len(args) >= 3:
2309 2314 detail = args[2]
2310 2315 oldtip = oldlen - 1
2311 2316
2312 2317 if detail and ui.verbose:
2313 2318 msg = _(
2314 2319 b'repository tip rolled back to revision %d'
2315 2320 b' (undo %s: %s)\n'
2316 2321 ) % (oldtip, desc, detail)
2317 2322 else:
2318 2323 msg = _(
2319 2324 b'repository tip rolled back to revision %d (undo %s)\n'
2320 2325 ) % (oldtip, desc)
2321 2326 except IOError:
2322 2327 msg = _(b'rolling back unknown transaction\n')
2323 2328 desc = None
2324 2329
2325 2330 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2326 2331 raise error.Abort(
2327 2332 _(
2328 2333 b'rollback of last commit while not checked out '
2329 2334 b'may lose data'
2330 2335 ),
2331 2336 hint=_(b'use -f to force'),
2332 2337 )
2333 2338
2334 2339 ui.status(msg)
2335 2340 if dryrun:
2336 2341 return 0
2337 2342
2338 2343 parents = self.dirstate.parents()
2339 2344 self.destroying()
2340 2345 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2341 2346 transaction.rollback(
2342 2347 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2343 2348 )
2344 2349 bookmarksvfs = bookmarks.bookmarksvfs(self)
2345 2350 if bookmarksvfs.exists(b'undo.bookmarks'):
2346 2351 bookmarksvfs.rename(
2347 2352 b'undo.bookmarks', b'bookmarks', checkambig=True
2348 2353 )
2349 2354 if self.svfs.exists(b'undo.phaseroots'):
2350 2355 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2351 2356 self.invalidate()
2352 2357
2353 2358 has_node = self.changelog.index.has_node
2354 2359 parentgone = any(not has_node(p) for p in parents)
2355 2360 if parentgone:
2356 2361 # prevent dirstateguard from overwriting already restored one
2357 2362 dsguard.close()
2358 2363
2359 2364 narrowspec.restorebackup(self, b'undo.narrowspec')
2360 2365 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2361 2366 self.dirstate.restorebackup(None, b'undo.dirstate')
2362 2367 try:
2363 2368 branch = self.vfs.read(b'undo.branch')
2364 2369 self.dirstate.setbranch(encoding.tolocal(branch))
2365 2370 except IOError:
2366 2371 ui.warn(
2367 2372 _(
2368 2373 b'named branch could not be reset: '
2369 2374 b'current branch is still \'%s\'\n'
2370 2375 )
2371 2376 % self.dirstate.branch()
2372 2377 )
2373 2378
2374 2379 parents = tuple([p.rev() for p in self[None].parents()])
2375 2380 if len(parents) > 1:
2376 2381 ui.status(
2377 2382 _(
2378 2383 b'working directory now based on '
2379 2384 b'revisions %d and %d\n'
2380 2385 )
2381 2386 % parents
2382 2387 )
2383 2388 else:
2384 2389 ui.status(
2385 2390 _(b'working directory now based on revision %d\n') % parents
2386 2391 )
2387 2392 mergemod.mergestate.clean(self, self[b'.'].node())
2388 2393
2389 2394 # TODO: if we know which new heads may result from this rollback, pass
2390 2395 # them to destroy(), which will prevent the branchhead cache from being
2391 2396 # invalidated.
2392 2397 self.destroyed()
2393 2398 return 0
2394 2399
2395 2400 def _buildcacheupdater(self, newtransaction):
2396 2401 """called during transaction to build the callback updating cache
2397 2402
2398 2403 Lives on the repository to help extension who might want to augment
2399 2404 this logic. For this purpose, the created transaction is passed to the
2400 2405 method.
2401 2406 """
2402 2407 # we must avoid cyclic reference between repo and transaction.
2403 2408 reporef = weakref.ref(self)
2404 2409
2405 2410 def updater(tr):
2406 2411 repo = reporef()
2407 2412 repo.updatecaches(tr)
2408 2413
2409 2414 return updater
2410 2415
2411 2416 @unfilteredmethod
2412 2417 def updatecaches(self, tr=None, full=False):
2413 2418 """warm appropriate caches
2414 2419
2415 2420 If this function is called after a transaction closed. The transaction
2416 2421 will be available in the 'tr' argument. This can be used to selectively
2417 2422 update caches relevant to the changes in that transaction.
2418 2423
2419 2424 If 'full' is set, make sure all caches the function knows about have
2420 2425 up-to-date data. Even the ones usually loaded more lazily.
2421 2426 """
2422 2427 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2423 2428 # During strip, many caches are invalid but
2424 2429 # later call to `destroyed` will refresh them.
2425 2430 return
2426 2431
2427 2432 if tr is None or tr.changes[b'origrepolen'] < len(self):
2428 2433 # accessing the 'ser ved' branchmap should refresh all the others,
2429 2434 self.ui.debug(b'updating the branch cache\n')
2430 2435 self.filtered(b'served').branchmap()
2431 2436 self.filtered(b'served.hidden').branchmap()
2432 2437
2433 2438 if full:
2434 2439 unfi = self.unfiltered()
2435 2440 rbc = unfi.revbranchcache()
2436 2441 for r in unfi.changelog:
2437 2442 rbc.branchinfo(r)
2438 2443 rbc.write()
2439 2444
2440 2445 # ensure the working copy parents are in the manifestfulltextcache
2441 2446 for ctx in self[b'.'].parents():
2442 2447 ctx.manifest() # accessing the manifest is enough
2443 2448
2444 2449 # accessing fnode cache warms the cache
2445 2450 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2446 2451 # accessing tags warm the cache
2447 2452 self.tags()
2448 2453 self.filtered(b'served').tags()
2449 2454
2450 2455 # The `full` arg is documented as updating even the lazily-loaded
2451 2456 # caches immediately, so we're forcing a write to cause these caches
2452 2457 # to be warmed up even if they haven't explicitly been requested
2453 2458 # yet (if they've never been used by hg, they won't ever have been
2454 2459 # written, even if they're a subset of another kind of cache that
2455 2460 # *has* been used).
2456 2461 for filt in repoview.filtertable.keys():
2457 2462 filtered = self.filtered(filt)
2458 2463 filtered.branchmap().write(filtered)
2459 2464
2460 2465 def invalidatecaches(self):
2461 2466
2462 2467 if '_tagscache' in vars(self):
2463 2468 # can't use delattr on proxy
2464 2469 del self.__dict__['_tagscache']
2465 2470
2466 2471 self._branchcaches.clear()
2467 2472 self.invalidatevolatilesets()
2468 2473 self._sparsesignaturecache.clear()
2469 2474
2470 2475 def invalidatevolatilesets(self):
2471 2476 self.filteredrevcache.clear()
2472 2477 obsolete.clearobscaches(self)
2473 2478
2474 2479 def invalidatedirstate(self):
2475 2480 '''Invalidates the dirstate, causing the next call to dirstate
2476 2481 to check if it was modified since the last time it was read,
2477 2482 rereading it if it has.
2478 2483
2479 2484 This is different to dirstate.invalidate() that it doesn't always
2480 2485 rereads the dirstate. Use dirstate.invalidate() if you want to
2481 2486 explicitly read the dirstate again (i.e. restoring it to a previous
2482 2487 known good state).'''
2483 2488 if hasunfilteredcache(self, 'dirstate'):
2484 2489 for k in self.dirstate._filecache:
2485 2490 try:
2486 2491 delattr(self.dirstate, k)
2487 2492 except AttributeError:
2488 2493 pass
2489 2494 delattr(self.unfiltered(), 'dirstate')
2490 2495
2491 2496 def invalidate(self, clearfilecache=False):
2492 2497 '''Invalidates both store and non-store parts other than dirstate
2493 2498
2494 2499 If a transaction is running, invalidation of store is omitted,
2495 2500 because discarding in-memory changes might cause inconsistency
2496 2501 (e.g. incomplete fncache causes unintentional failure, but
2497 2502 redundant one doesn't).
2498 2503 '''
2499 2504 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2500 2505 for k in list(self._filecache.keys()):
2501 2506 # dirstate is invalidated separately in invalidatedirstate()
2502 2507 if k == b'dirstate':
2503 2508 continue
2504 2509 if (
2505 2510 k == b'changelog'
2506 2511 and self.currenttransaction()
2507 2512 and self.changelog._delayed
2508 2513 ):
2509 2514 # The changelog object may store unwritten revisions. We don't
2510 2515 # want to lose them.
2511 2516 # TODO: Solve the problem instead of working around it.
2512 2517 continue
2513 2518
2514 2519 if clearfilecache:
2515 2520 del self._filecache[k]
2516 2521 try:
2517 2522 delattr(unfiltered, k)
2518 2523 except AttributeError:
2519 2524 pass
2520 2525 self.invalidatecaches()
2521 2526 if not self.currenttransaction():
2522 2527 # TODO: Changing contents of store outside transaction
2523 2528 # causes inconsistency. We should make in-memory store
2524 2529 # changes detectable, and abort if changed.
2525 2530 self.store.invalidatecaches()
2526 2531
2527 2532 def invalidateall(self):
2528 2533 '''Fully invalidates both store and non-store parts, causing the
2529 2534 subsequent operation to reread any outside changes.'''
2530 2535 # extension should hook this to invalidate its caches
2531 2536 self.invalidate()
2532 2537 self.invalidatedirstate()
2533 2538
2534 2539 @unfilteredmethod
2535 2540 def _refreshfilecachestats(self, tr):
2536 2541 """Reload stats of cached files so that they are flagged as valid"""
2537 2542 for k, ce in self._filecache.items():
2538 2543 k = pycompat.sysstr(k)
2539 2544 if k == 'dirstate' or k not in self.__dict__:
2540 2545 continue
2541 2546 ce.refresh()
2542 2547
2543 2548 def _lock(
2544 2549 self,
2545 2550 vfs,
2546 2551 lockname,
2547 2552 wait,
2548 2553 releasefn,
2549 2554 acquirefn,
2550 2555 desc,
2551 2556 inheritchecker=None,
2552 2557 parentenvvar=None,
2553 2558 ):
2554 2559 parentlock = None
2555 2560 # the contents of parentenvvar are used by the underlying lock to
2556 2561 # determine whether it can be inherited
2557 2562 if parentenvvar is not None:
2558 2563 parentlock = encoding.environ.get(parentenvvar)
2559 2564
2560 2565 timeout = 0
2561 2566 warntimeout = 0
2562 2567 if wait:
2563 2568 timeout = self.ui.configint(b"ui", b"timeout")
2564 2569 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2565 2570 # internal config: ui.signal-safe-lock
2566 2571 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2567 2572
2568 2573 l = lockmod.trylock(
2569 2574 self.ui,
2570 2575 vfs,
2571 2576 lockname,
2572 2577 timeout,
2573 2578 warntimeout,
2574 2579 releasefn=releasefn,
2575 2580 acquirefn=acquirefn,
2576 2581 desc=desc,
2577 2582 inheritchecker=inheritchecker,
2578 2583 parentlock=parentlock,
2579 2584 signalsafe=signalsafe,
2580 2585 )
2581 2586 return l
2582 2587
2583 2588 def _afterlock(self, callback):
2584 2589 """add a callback to be run when the repository is fully unlocked
2585 2590
2586 2591 The callback will be executed when the outermost lock is released
2587 2592 (with wlock being higher level than 'lock')."""
2588 2593 for ref in (self._wlockref, self._lockref):
2589 2594 l = ref and ref()
2590 2595 if l and l.held:
2591 2596 l.postrelease.append(callback)
2592 2597 break
2593 2598 else: # no lock have been found.
2594 2599 callback()
2595 2600
2596 2601 def lock(self, wait=True):
2597 2602 '''Lock the repository store (.hg/store) and return a weak reference
2598 2603 to the lock. Use this before modifying the store (e.g. committing or
2599 2604 stripping). If you are opening a transaction, get a lock as well.)
2600 2605
2601 2606 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2602 2607 'wlock' first to avoid a dead-lock hazard.'''
2603 2608 l = self._currentlock(self._lockref)
2604 2609 if l is not None:
2605 2610 l.lock()
2606 2611 return l
2607 2612
2608 2613 l = self._lock(
2609 2614 vfs=self.svfs,
2610 2615 lockname=b"lock",
2611 2616 wait=wait,
2612 2617 releasefn=None,
2613 2618 acquirefn=self.invalidate,
2614 2619 desc=_(b'repository %s') % self.origroot,
2615 2620 )
2616 2621 self._lockref = weakref.ref(l)
2617 2622 return l
2618 2623
2619 2624 def _wlockchecktransaction(self):
2620 2625 if self.currenttransaction() is not None:
2621 2626 raise error.LockInheritanceContractViolation(
2622 2627 b'wlock cannot be inherited in the middle of a transaction'
2623 2628 )
2624 2629
2625 2630 def wlock(self, wait=True):
2626 2631 '''Lock the non-store parts of the repository (everything under
2627 2632 .hg except .hg/store) and return a weak reference to the lock.
2628 2633
2629 2634 Use this before modifying files in .hg.
2630 2635
2631 2636 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2632 2637 'wlock' first to avoid a dead-lock hazard.'''
2633 2638 l = self._wlockref and self._wlockref()
2634 2639 if l is not None and l.held:
2635 2640 l.lock()
2636 2641 return l
2637 2642
2638 2643 # We do not need to check for non-waiting lock acquisition. Such
2639 2644 # acquisition would not cause dead-lock as they would just fail.
2640 2645 if wait and (
2641 2646 self.ui.configbool(b'devel', b'all-warnings')
2642 2647 or self.ui.configbool(b'devel', b'check-locks')
2643 2648 ):
2644 2649 if self._currentlock(self._lockref) is not None:
2645 2650 self.ui.develwarn(b'"wlock" acquired after "lock"')
2646 2651
2647 2652 def unlock():
2648 2653 if self.dirstate.pendingparentchange():
2649 2654 self.dirstate.invalidate()
2650 2655 else:
2651 2656 self.dirstate.write(None)
2652 2657
2653 2658 self._filecache[b'dirstate'].refresh()
2654 2659
2655 2660 l = self._lock(
2656 2661 self.vfs,
2657 2662 b"wlock",
2658 2663 wait,
2659 2664 unlock,
2660 2665 self.invalidatedirstate,
2661 2666 _(b'working directory of %s') % self.origroot,
2662 2667 inheritchecker=self._wlockchecktransaction,
2663 2668 parentenvvar=b'HG_WLOCK_LOCKER',
2664 2669 )
2665 2670 self._wlockref = weakref.ref(l)
2666 2671 return l
2667 2672
2668 2673 def _currentlock(self, lockref):
2669 2674 """Returns the lock if it's held, or None if it's not."""
2670 2675 if lockref is None:
2671 2676 return None
2672 2677 l = lockref()
2673 2678 if l is None or not l.held:
2674 2679 return None
2675 2680 return l
2676 2681
2677 2682 def currentwlock(self):
2678 2683 """Returns the wlock if it's held, or None if it's not."""
2679 2684 return self._currentlock(self._wlockref)
2680 2685
2681 2686 def _filecommit(
2682 2687 self,
2683 2688 fctx,
2684 2689 manifest1,
2685 2690 manifest2,
2686 2691 linkrev,
2687 2692 tr,
2688 2693 changelist,
2689 2694 includecopymeta,
2690 2695 ):
2691 2696 """
2692 2697 commit an individual file as part of a larger transaction
2693 2698 """
2694 2699
2695 2700 fname = fctx.path()
2696 2701 fparent1 = manifest1.get(fname, nullid)
2697 2702 fparent2 = manifest2.get(fname, nullid)
2698 2703 if isinstance(fctx, context.filectx):
2699 2704 node = fctx.filenode()
2700 2705 if node in [fparent1, fparent2]:
2701 2706 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2702 2707 if (
2703 2708 fparent1 != nullid
2704 2709 and manifest1.flags(fname) != fctx.flags()
2705 2710 ) or (
2706 2711 fparent2 != nullid
2707 2712 and manifest2.flags(fname) != fctx.flags()
2708 2713 ):
2709 2714 changelist.append(fname)
2710 2715 return node
2711 2716
2712 2717 flog = self.file(fname)
2713 2718 meta = {}
2714 2719 cfname = fctx.copysource()
2715 2720 if cfname and cfname != fname:
2716 2721 # Mark the new revision of this file as a copy of another
2717 2722 # file. This copy data will effectively act as a parent
2718 2723 # of this new revision. If this is a merge, the first
2719 2724 # parent will be the nullid (meaning "look up the copy data")
2720 2725 # and the second one will be the other parent. For example:
2721 2726 #
2722 2727 # 0 --- 1 --- 3 rev1 changes file foo
2723 2728 # \ / rev2 renames foo to bar and changes it
2724 2729 # \- 2 -/ rev3 should have bar with all changes and
2725 2730 # should record that bar descends from
2726 2731 # bar in rev2 and foo in rev1
2727 2732 #
2728 2733 # this allows this merge to succeed:
2729 2734 #
2730 2735 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2731 2736 # \ / merging rev3 and rev4 should use bar@rev2
2732 2737 # \- 2 --- 4 as the merge base
2733 2738 #
2734 2739
2735 2740 cnode = manifest1.get(cfname)
2736 2741 newfparent = fparent2
2737 2742
2738 2743 if manifest2: # branch merge
2739 2744 if fparent2 == nullid or cnode is None: # copied on remote side
2740 2745 if cfname in manifest2:
2741 2746 cnode = manifest2[cfname]
2742 2747 newfparent = fparent1
2743 2748
2744 2749 # Here, we used to search backwards through history to try to find
2745 2750 # where the file copy came from if the source of a copy was not in
2746 2751 # the parent directory. However, this doesn't actually make sense to
2747 2752 # do (what does a copy from something not in your working copy even
2748 2753 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2749 2754 # the user that copy information was dropped, so if they didn't
2750 2755 # expect this outcome it can be fixed, but this is the correct
2751 2756 # behavior in this circumstance.
2752 2757
2753 2758 if cnode:
2754 2759 self.ui.debug(
2755 2760 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2756 2761 )
2757 2762 if includecopymeta:
2758 2763 meta[b"copy"] = cfname
2759 2764 meta[b"copyrev"] = hex(cnode)
2760 2765 fparent1, fparent2 = nullid, newfparent
2761 2766 else:
2762 2767 self.ui.warn(
2763 2768 _(
2764 2769 b"warning: can't find ancestor for '%s' "
2765 2770 b"copied from '%s'!\n"
2766 2771 )
2767 2772 % (fname, cfname)
2768 2773 )
2769 2774
2770 2775 elif fparent1 == nullid:
2771 2776 fparent1, fparent2 = fparent2, nullid
2772 2777 elif fparent2 != nullid:
2773 2778 # is one parent an ancestor of the other?
2774 2779 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2775 2780 if fparent1 in fparentancestors:
2776 2781 fparent1, fparent2 = fparent2, nullid
2777 2782 elif fparent2 in fparentancestors:
2778 2783 fparent2 = nullid
2779 2784
2780 2785 # is the file changed?
2781 2786 text = fctx.data()
2782 2787 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2783 2788 changelist.append(fname)
2784 2789 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2785 2790 # are just the flags changed during merge?
2786 2791 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2787 2792 changelist.append(fname)
2788 2793
2789 2794 return fparent1
2790 2795
2791 2796 def checkcommitpatterns(self, wctx, match, status, fail):
2792 2797 """check for commit arguments that aren't committable"""
2793 2798 if match.isexact() or match.prefix():
2794 2799 matched = set(status.modified + status.added + status.removed)
2795 2800
2796 2801 for f in match.files():
2797 2802 f = self.dirstate.normalize(f)
2798 2803 if f == b'.' or f in matched or f in wctx.substate:
2799 2804 continue
2800 2805 if f in status.deleted:
2801 2806 fail(f, _(b'file not found!'))
2802 2807 # Is it a directory that exists or used to exist?
2803 2808 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2804 2809 d = f + b'/'
2805 2810 for mf in matched:
2806 2811 if mf.startswith(d):
2807 2812 break
2808 2813 else:
2809 2814 fail(f, _(b"no match under directory!"))
2810 2815 elif f not in self.dirstate:
2811 2816 fail(f, _(b"file not tracked!"))
2812 2817
2813 2818 @unfilteredmethod
2814 2819 def commit(
2815 2820 self,
2816 2821 text=b"",
2817 2822 user=None,
2818 2823 date=None,
2819 2824 match=None,
2820 2825 force=False,
2821 2826 editor=False,
2822 2827 extra=None,
2823 2828 ):
2824 2829 """Add a new revision to current repository.
2825 2830
2826 2831 Revision information is gathered from the working directory,
2827 2832 match can be used to filter the committed files. If editor is
2828 2833 supplied, it is called to get a commit message.
2829 2834 """
2830 2835 if extra is None:
2831 2836 extra = {}
2832 2837
2833 2838 def fail(f, msg):
2834 2839 raise error.Abort(b'%s: %s' % (f, msg))
2835 2840
2836 2841 if not match:
2837 2842 match = matchmod.always()
2838 2843
2839 2844 if not force:
2840 2845 match.bad = fail
2841 2846
2842 2847 # lock() for recent changelog (see issue4368)
2843 2848 with self.wlock(), self.lock():
2844 2849 wctx = self[None]
2845 2850 merge = len(wctx.parents()) > 1
2846 2851
2847 2852 if not force and merge and not match.always():
2848 2853 raise error.Abort(
2849 2854 _(
2850 2855 b'cannot partially commit a merge '
2851 2856 b'(do not specify files or patterns)'
2852 2857 )
2853 2858 )
2854 2859
2855 2860 status = self.status(match=match, clean=force)
2856 2861 if force:
2857 2862 status.modified.extend(
2858 2863 status.clean
2859 2864 ) # mq may commit clean files
2860 2865
2861 2866 # check subrepos
2862 2867 subs, commitsubs, newstate = subrepoutil.precommit(
2863 2868 self.ui, wctx, status, match, force=force
2864 2869 )
2865 2870
2866 2871 # make sure all explicit patterns are matched
2867 2872 if not force:
2868 2873 self.checkcommitpatterns(wctx, match, status, fail)
2869 2874
2870 2875 cctx = context.workingcommitctx(
2871 2876 self, status, text, user, date, extra
2872 2877 )
2873 2878
2874 2879 # internal config: ui.allowemptycommit
2875 2880 allowemptycommit = (
2876 2881 wctx.branch() != wctx.p1().branch()
2877 2882 or extra.get(b'close')
2878 2883 or merge
2879 2884 or cctx.files()
2880 2885 or self.ui.configbool(b'ui', b'allowemptycommit')
2881 2886 )
2882 2887 if not allowemptycommit:
2883 2888 return None
2884 2889
2885 2890 if merge and cctx.deleted():
2886 2891 raise error.Abort(_(b"cannot commit merge with missing files"))
2887 2892
2888 2893 ms = mergemod.mergestate.read(self)
2889 2894 mergeutil.checkunresolved(ms)
2890 2895
2891 2896 if editor:
2892 2897 cctx._text = editor(self, cctx, subs)
2893 2898 edited = text != cctx._text
2894 2899
2895 2900 # Save commit message in case this transaction gets rolled back
2896 2901 # (e.g. by a pretxncommit hook). Leave the content alone on
2897 2902 # the assumption that the user will use the same editor again.
2898 2903 msgfn = self.savecommitmessage(cctx._text)
2899 2904
2900 2905 # commit subs and write new state
2901 2906 if subs:
2902 2907 uipathfn = scmutil.getuipathfn(self)
2903 2908 for s in sorted(commitsubs):
2904 2909 sub = wctx.sub(s)
2905 2910 self.ui.status(
2906 2911 _(b'committing subrepository %s\n')
2907 2912 % uipathfn(subrepoutil.subrelpath(sub))
2908 2913 )
2909 2914 sr = sub.commit(cctx._text, user, date)
2910 2915 newstate[s] = (newstate[s][0], sr)
2911 2916 subrepoutil.writestate(self, newstate)
2912 2917
2913 2918 p1, p2 = self.dirstate.parents()
2914 2919 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2915 2920 try:
2916 2921 self.hook(
2917 2922 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2918 2923 )
2919 2924 with self.transaction(b'commit'):
2920 2925 ret = self.commitctx(cctx, True)
2921 2926 # update bookmarks, dirstate and mergestate
2922 2927 bookmarks.update(self, [p1, p2], ret)
2923 2928 cctx.markcommitted(ret)
2924 2929 ms.reset()
2925 2930 except: # re-raises
2926 2931 if edited:
2927 2932 self.ui.write(
2928 2933 _(b'note: commit message saved in %s\n') % msgfn
2929 2934 )
2930 2935 raise
2931 2936
2932 2937 def commithook():
2933 2938 # hack for command that use a temporary commit (eg: histedit)
2934 2939 # temporary commit got stripped before hook release
2935 2940 if self.changelog.hasnode(ret):
2936 2941 self.hook(
2937 2942 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2938 2943 )
2939 2944
2940 2945 self._afterlock(commithook)
2941 2946 return ret
2942 2947
2943 2948 @unfilteredmethod
2944 2949 def commitctx(self, ctx, error=False, origctx=None):
2945 2950 """Add a new revision to current repository.
2946 2951 Revision information is passed via the context argument.
2947 2952
2948 2953 ctx.files() should list all files involved in this commit, i.e.
2949 2954 modified/added/removed files. On merge, it may be wider than the
2950 2955 ctx.files() to be committed, since any file nodes derived directly
2951 2956 from p1 or p2 are excluded from the committed ctx.files().
2952 2957
2953 2958 origctx is for convert to work around the problem that bug
2954 2959 fixes to the files list in changesets change hashes. For
2955 2960 convert to be the identity, it can pass an origctx and this
2956 2961 function will use the same files list when it makes sense to
2957 2962 do so.
2958 2963 """
2959 2964
2960 2965 p1, p2 = ctx.p1(), ctx.p2()
2961 2966 user = ctx.user()
2962 2967
2963 2968 if self.filecopiesmode == b'changeset-sidedata':
2964 2969 writechangesetcopy = True
2965 2970 writefilecopymeta = True
2966 2971 writecopiesto = None
2967 2972 else:
2968 2973 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
2969 2974 writefilecopymeta = writecopiesto != b'changeset-only'
2970 2975 writechangesetcopy = writecopiesto in (
2971 2976 b'changeset-only',
2972 2977 b'compatibility',
2973 2978 )
2974 2979 p1copies, p2copies = None, None
2975 2980 if writechangesetcopy:
2976 2981 p1copies = ctx.p1copies()
2977 2982 p2copies = ctx.p2copies()
2978 2983 filesadded, filesremoved = None, None
2979 2984 with self.lock(), self.transaction(b"commit") as tr:
2980 2985 trp = weakref.proxy(tr)
2981 2986
2982 2987 if ctx.manifestnode():
2983 2988 # reuse an existing manifest revision
2984 2989 self.ui.debug(b'reusing known manifest\n')
2985 2990 mn = ctx.manifestnode()
2986 2991 files = ctx.files()
2987 2992 if writechangesetcopy:
2988 2993 filesadded = ctx.filesadded()
2989 2994 filesremoved = ctx.filesremoved()
2990 2995 elif ctx.files():
2991 2996 m1ctx = p1.manifestctx()
2992 2997 m2ctx = p2.manifestctx()
2993 2998 mctx = m1ctx.copy()
2994 2999
2995 3000 m = mctx.read()
2996 3001 m1 = m1ctx.read()
2997 3002 m2 = m2ctx.read()
2998 3003
2999 3004 # check in files
3000 3005 added = []
3001 3006 changed = []
3002 3007 removed = list(ctx.removed())
3003 3008 linkrev = len(self)
3004 3009 self.ui.note(_(b"committing files:\n"))
3005 3010 uipathfn = scmutil.getuipathfn(self)
3006 3011 for f in sorted(ctx.modified() + ctx.added()):
3007 3012 self.ui.note(uipathfn(f) + b"\n")
3008 3013 try:
3009 3014 fctx = ctx[f]
3010 3015 if fctx is None:
3011 3016 removed.append(f)
3012 3017 else:
3013 3018 added.append(f)
3014 3019 m[f] = self._filecommit(
3015 3020 fctx,
3016 3021 m1,
3017 3022 m2,
3018 3023 linkrev,
3019 3024 trp,
3020 3025 changed,
3021 3026 writefilecopymeta,
3022 3027 )
3023 3028 m.setflag(f, fctx.flags())
3024 3029 except OSError:
3025 3030 self.ui.warn(
3026 3031 _(b"trouble committing %s!\n") % uipathfn(f)
3027 3032 )
3028 3033 raise
3029 3034 except IOError as inst:
3030 3035 errcode = getattr(inst, 'errno', errno.ENOENT)
3031 3036 if error or errcode and errcode != errno.ENOENT:
3032 3037 self.ui.warn(
3033 3038 _(b"trouble committing %s!\n") % uipathfn(f)
3034 3039 )
3035 3040 raise
3036 3041
3037 3042 # update manifest
3038 3043 removed = [f for f in removed if f in m1 or f in m2]
3039 3044 drop = sorted([f for f in removed if f in m])
3040 3045 for f in drop:
3041 3046 del m[f]
3042 3047 if p2.rev() != nullrev:
3043 3048
3044 3049 @util.cachefunc
3045 3050 def mas():
3046 3051 p1n = p1.node()
3047 3052 p2n = p2.node()
3048 3053 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3049 3054 if not cahs:
3050 3055 cahs = [nullrev]
3051 3056 return [self[r].manifest() for r in cahs]
3052 3057
3053 3058 def deletionfromparent(f):
3054 3059 # When a file is removed relative to p1 in a merge, this
3055 3060 # function determines whether the absence is due to a
3056 3061 # deletion from a parent, or whether the merge commit
3057 3062 # itself deletes the file. We decide this by doing a
3058 3063 # simplified three way merge of the manifest entry for
3059 3064 # the file. There are two ways we decide the merge
3060 3065 # itself didn't delete a file:
3061 3066 # - neither parent (nor the merge) contain the file
3062 3067 # - exactly one parent contains the file, and that
3063 3068 # parent has the same filelog entry as the merge
3064 3069 # ancestor (or all of them if there two). In other
3065 3070 # words, that parent left the file unchanged while the
3066 3071 # other one deleted it.
3067 3072 # One way to think about this is that deleting a file is
3068 3073 # similar to emptying it, so the list of changed files
3069 3074 # should be similar either way. The computation
3070 3075 # described above is not done directly in _filecommit
3071 3076 # when creating the list of changed files, however
3072 3077 # it does something very similar by comparing filelog
3073 3078 # nodes.
3074 3079 if f in m1:
3075 3080 return f not in m2 and all(
3076 3081 f in ma and ma.find(f) == m1.find(f)
3077 3082 for ma in mas()
3078 3083 )
3079 3084 elif f in m2:
3080 3085 return all(
3081 3086 f in ma and ma.find(f) == m2.find(f)
3082 3087 for ma in mas()
3083 3088 )
3084 3089 else:
3085 3090 return True
3086 3091
3087 3092 removed = [f for f in removed if not deletionfromparent(f)]
3088 3093
3089 3094 files = changed + removed
3090 3095 md = None
3091 3096 if not files:
3092 3097 # if no "files" actually changed in terms of the changelog,
3093 3098 # try hard to detect unmodified manifest entry so that the
3094 3099 # exact same commit can be reproduced later on convert.
3095 3100 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3096 3101 if not files and md:
3097 3102 self.ui.debug(
3098 3103 b'not reusing manifest (no file change in '
3099 3104 b'changelog, but manifest differs)\n'
3100 3105 )
3101 3106 if files or md:
3102 3107 self.ui.note(_(b"committing manifest\n"))
3103 3108 # we're using narrowmatch here since it's already applied at
3104 3109 # other stages (such as dirstate.walk), so we're already
3105 3110 # ignoring things outside of narrowspec in most cases. The
3106 3111 # one case where we might have files outside the narrowspec
3107 3112 # at this point is merges, and we already error out in the
3108 3113 # case where the merge has files outside of the narrowspec,
3109 3114 # so this is safe.
3110 3115 mn = mctx.write(
3111 3116 trp,
3112 3117 linkrev,
3113 3118 p1.manifestnode(),
3114 3119 p2.manifestnode(),
3115 3120 added,
3116 3121 drop,
3117 3122 match=self.narrowmatch(),
3118 3123 )
3119 3124
3120 3125 if writechangesetcopy:
3121 3126 filesadded = [
3122 3127 f for f in changed if not (f in m1 or f in m2)
3123 3128 ]
3124 3129 filesremoved = removed
3125 3130 else:
3126 3131 self.ui.debug(
3127 3132 b'reusing manifest from p1 (listed files '
3128 3133 b'actually unchanged)\n'
3129 3134 )
3130 3135 mn = p1.manifestnode()
3131 3136 else:
3132 3137 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3133 3138 mn = p1.manifestnode()
3134 3139 files = []
3135 3140
3136 3141 if writecopiesto == b'changeset-only':
3137 3142 # If writing only to changeset extras, use None to indicate that
3138 3143 # no entry should be written. If writing to both, write an empty
3139 3144 # entry to prevent the reader from falling back to reading
3140 3145 # filelogs.
3141 3146 p1copies = p1copies or None
3142 3147 p2copies = p2copies or None
3143 3148 filesadded = filesadded or None
3144 3149 filesremoved = filesremoved or None
3145 3150
3146 3151 if origctx and origctx.manifestnode() == mn:
3147 3152 files = origctx.files()
3148 3153
3149 3154 # update changelog
3150 3155 self.ui.note(_(b"committing changelog\n"))
3151 3156 self.changelog.delayupdate(tr)
3152 3157 n = self.changelog.add(
3153 3158 mn,
3154 3159 files,
3155 3160 ctx.description(),
3156 3161 trp,
3157 3162 p1.node(),
3158 3163 p2.node(),
3159 3164 user,
3160 3165 ctx.date(),
3161 3166 ctx.extra().copy(),
3162 3167 p1copies,
3163 3168 p2copies,
3164 3169 filesadded,
3165 3170 filesremoved,
3166 3171 )
3167 3172 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3168 3173 self.hook(
3169 3174 b'pretxncommit',
3170 3175 throw=True,
3171 3176 node=hex(n),
3172 3177 parent1=xp1,
3173 3178 parent2=xp2,
3174 3179 )
3175 3180 # set the new commit is proper phase
3176 3181 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3177 3182 if targetphase:
3178 3183 # retract boundary do not alter parent changeset.
3179 3184 # if a parent have higher the resulting phase will
3180 3185 # be compliant anyway
3181 3186 #
3182 3187 # if minimal phase was 0 we don't need to retract anything
3183 3188 phases.registernew(self, tr, targetphase, [n])
3184 3189 return n
3185 3190
3186 3191 @unfilteredmethod
3187 3192 def destroying(self):
3188 3193 '''Inform the repository that nodes are about to be destroyed.
3189 3194 Intended for use by strip and rollback, so there's a common
3190 3195 place for anything that has to be done before destroying history.
3191 3196
3192 3197 This is mostly useful for saving state that is in memory and waiting
3193 3198 to be flushed when the current lock is released. Because a call to
3194 3199 destroyed is imminent, the repo will be invalidated causing those
3195 3200 changes to stay in memory (waiting for the next unlock), or vanish
3196 3201 completely.
3197 3202 '''
3198 3203 # When using the same lock to commit and strip, the phasecache is left
3199 3204 # dirty after committing. Then when we strip, the repo is invalidated,
3200 3205 # causing those changes to disappear.
3201 3206 if '_phasecache' in vars(self):
3202 3207 self._phasecache.write()
3203 3208
3204 3209 @unfilteredmethod
3205 3210 def destroyed(self):
3206 3211 '''Inform the repository that nodes have been destroyed.
3207 3212 Intended for use by strip and rollback, so there's a common
3208 3213 place for anything that has to be done after destroying history.
3209 3214 '''
3210 3215 # When one tries to:
3211 3216 # 1) destroy nodes thus calling this method (e.g. strip)
3212 3217 # 2) use phasecache somewhere (e.g. commit)
3213 3218 #
3214 3219 # then 2) will fail because the phasecache contains nodes that were
3215 3220 # removed. We can either remove phasecache from the filecache,
3216 3221 # causing it to reload next time it is accessed, or simply filter
3217 3222 # the removed nodes now and write the updated cache.
3218 3223 self._phasecache.filterunknown(self)
3219 3224 self._phasecache.write()
3220 3225
3221 3226 # refresh all repository caches
3222 3227 self.updatecaches()
3223 3228
3224 3229 # Ensure the persistent tag cache is updated. Doing it now
3225 3230 # means that the tag cache only has to worry about destroyed
3226 3231 # heads immediately after a strip/rollback. That in turn
3227 3232 # guarantees that "cachetip == currenttip" (comparing both rev
3228 3233 # and node) always means no nodes have been added or destroyed.
3229 3234
3230 3235 # XXX this is suboptimal when qrefresh'ing: we strip the current
3231 3236 # head, refresh the tag cache, then immediately add a new head.
3232 3237 # But I think doing it this way is necessary for the "instant
3233 3238 # tag cache retrieval" case to work.
3234 3239 self.invalidate()
3235 3240
3236 3241 def status(
3237 3242 self,
3238 3243 node1=b'.',
3239 3244 node2=None,
3240 3245 match=None,
3241 3246 ignored=False,
3242 3247 clean=False,
3243 3248 unknown=False,
3244 3249 listsubrepos=False,
3245 3250 ):
3246 3251 '''a convenience method that calls node1.status(node2)'''
3247 3252 return self[node1].status(
3248 3253 node2, match, ignored, clean, unknown, listsubrepos
3249 3254 )
3250 3255
3251 3256 def addpostdsstatus(self, ps):
3252 3257 """Add a callback to run within the wlock, at the point at which status
3253 3258 fixups happen.
3254 3259
3255 3260 On status completion, callback(wctx, status) will be called with the
3256 3261 wlock held, unless the dirstate has changed from underneath or the wlock
3257 3262 couldn't be grabbed.
3258 3263
3259 3264 Callbacks should not capture and use a cached copy of the dirstate --
3260 3265 it might change in the meanwhile. Instead, they should access the
3261 3266 dirstate via wctx.repo().dirstate.
3262 3267
3263 3268 This list is emptied out after each status run -- extensions should
3264 3269 make sure it adds to this list each time dirstate.status is called.
3265 3270 Extensions should also make sure they don't call this for statuses
3266 3271 that don't involve the dirstate.
3267 3272 """
3268 3273
3269 3274 # The list is located here for uniqueness reasons -- it is actually
3270 3275 # managed by the workingctx, but that isn't unique per-repo.
3271 3276 self._postdsstatus.append(ps)
3272 3277
3273 3278 def postdsstatus(self):
3274 3279 """Used by workingctx to get the list of post-dirstate-status hooks."""
3275 3280 return self._postdsstatus
3276 3281
3277 3282 def clearpostdsstatus(self):
3278 3283 """Used by workingctx to clear post-dirstate-status hooks."""
3279 3284 del self._postdsstatus[:]
3280 3285
3281 3286 def heads(self, start=None):
3282 3287 if start is None:
3283 3288 cl = self.changelog
3284 3289 headrevs = reversed(cl.headrevs())
3285 3290 return [cl.node(rev) for rev in headrevs]
3286 3291
3287 3292 heads = self.changelog.heads(start)
3288 3293 # sort the output in rev descending order
3289 3294 return sorted(heads, key=self.changelog.rev, reverse=True)
3290 3295
3291 3296 def branchheads(self, branch=None, start=None, closed=False):
3292 3297 '''return a (possibly filtered) list of heads for the given branch
3293 3298
3294 3299 Heads are returned in topological order, from newest to oldest.
3295 3300 If branch is None, use the dirstate branch.
3296 3301 If start is not None, return only heads reachable from start.
3297 3302 If closed is True, return heads that are marked as closed as well.
3298 3303 '''
3299 3304 if branch is None:
3300 3305 branch = self[None].branch()
3301 3306 branches = self.branchmap()
3302 3307 if not branches.hasbranch(branch):
3303 3308 return []
3304 3309 # the cache returns heads ordered lowest to highest
3305 3310 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3306 3311 if start is not None:
3307 3312 # filter out the heads that cannot be reached from startrev
3308 3313 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3309 3314 bheads = [h for h in bheads if h in fbheads]
3310 3315 return bheads
3311 3316
3312 3317 def branches(self, nodes):
3313 3318 if not nodes:
3314 3319 nodes = [self.changelog.tip()]
3315 3320 b = []
3316 3321 for n in nodes:
3317 3322 t = n
3318 3323 while True:
3319 3324 p = self.changelog.parents(n)
3320 3325 if p[1] != nullid or p[0] == nullid:
3321 3326 b.append((t, n, p[0], p[1]))
3322 3327 break
3323 3328 n = p[0]
3324 3329 return b
3325 3330
3326 3331 def between(self, pairs):
3327 3332 r = []
3328 3333
3329 3334 for top, bottom in pairs:
3330 3335 n, l, i = top, [], 0
3331 3336 f = 1
3332 3337
3333 3338 while n != bottom and n != nullid:
3334 3339 p = self.changelog.parents(n)[0]
3335 3340 if i == f:
3336 3341 l.append(n)
3337 3342 f = f * 2
3338 3343 n = p
3339 3344 i += 1
3340 3345
3341 3346 r.append(l)
3342 3347
3343 3348 return r
3344 3349
3345 3350 def checkpush(self, pushop):
3346 3351 """Extensions can override this function if additional checks have
3347 3352 to be performed before pushing, or call it if they override push
3348 3353 command.
3349 3354 """
3350 3355
3351 3356 @unfilteredpropertycache
3352 3357 def prepushoutgoinghooks(self):
3353 3358 """Return util.hooks consists of a pushop with repo, remote, outgoing
3354 3359 methods, which are called before pushing changesets.
3355 3360 """
3356 3361 return util.hooks()
3357 3362
3358 3363 def pushkey(self, namespace, key, old, new):
3359 3364 try:
3360 3365 tr = self.currenttransaction()
3361 3366 hookargs = {}
3362 3367 if tr is not None:
3363 3368 hookargs.update(tr.hookargs)
3364 3369 hookargs = pycompat.strkwargs(hookargs)
3365 3370 hookargs['namespace'] = namespace
3366 3371 hookargs['key'] = key
3367 3372 hookargs['old'] = old
3368 3373 hookargs['new'] = new
3369 3374 self.hook(b'prepushkey', throw=True, **hookargs)
3370 3375 except error.HookAbort as exc:
3371 3376 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3372 3377 if exc.hint:
3373 3378 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3374 3379 return False
3375 3380 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3376 3381 ret = pushkey.push(self, namespace, key, old, new)
3377 3382
3378 3383 def runhook():
3379 3384 self.hook(
3380 3385 b'pushkey',
3381 3386 namespace=namespace,
3382 3387 key=key,
3383 3388 old=old,
3384 3389 new=new,
3385 3390 ret=ret,
3386 3391 )
3387 3392
3388 3393 self._afterlock(runhook)
3389 3394 return ret
3390 3395
3391 3396 def listkeys(self, namespace):
3392 3397 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3393 3398 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3394 3399 values = pushkey.list(self, namespace)
3395 3400 self.hook(b'listkeys', namespace=namespace, values=values)
3396 3401 return values
3397 3402
3398 3403 def debugwireargs(self, one, two, three=None, four=None, five=None):
3399 3404 '''used to test argument passing over the wire'''
3400 3405 return b"%s %s %s %s %s" % (
3401 3406 one,
3402 3407 two,
3403 3408 pycompat.bytestr(three),
3404 3409 pycompat.bytestr(four),
3405 3410 pycompat.bytestr(five),
3406 3411 )
3407 3412
3408 3413 def savecommitmessage(self, text):
3409 3414 fp = self.vfs(b'last-message.txt', b'wb')
3410 3415 try:
3411 3416 fp.write(text)
3412 3417 finally:
3413 3418 fp.close()
3414 3419 return self.pathto(fp.name[len(self.root) + 1 :])
3415 3420
3416 3421
3417 3422 # used to avoid circular references so destructors work
3418 3423 def aftertrans(files):
3419 3424 renamefiles = [tuple(t) for t in files]
3420 3425
3421 3426 def a():
3422 3427 for vfs, src, dest in renamefiles:
3423 3428 # if src and dest refer to a same file, vfs.rename is a no-op,
3424 3429 # leaving both src and dest on disk. delete dest to make sure
3425 3430 # the rename couldn't be such a no-op.
3426 3431 vfs.tryunlink(dest)
3427 3432 try:
3428 3433 vfs.rename(src, dest)
3429 3434 except OSError: # journal file does not yet exist
3430 3435 pass
3431 3436
3432 3437 return a
3433 3438
3434 3439
3435 3440 def undoname(fn):
3436 3441 base, name = os.path.split(fn)
3437 3442 assert name.startswith(b'journal')
3438 3443 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3439 3444
3440 3445
3441 3446 def instance(ui, path, create, intents=None, createopts=None):
3442 3447 localpath = util.urllocalpath(path)
3443 3448 if create:
3444 3449 createrepository(ui, localpath, createopts=createopts)
3445 3450
3446 3451 return makelocalrepository(ui, localpath, intents=intents)
3447 3452
3448 3453
3449 3454 def islocal(path):
3450 3455 return True
3451 3456
3452 3457
3453 3458 def defaultcreateopts(ui, createopts=None):
3454 3459 """Populate the default creation options for a repository.
3455 3460
3456 3461 A dictionary of explicitly requested creation options can be passed
3457 3462 in. Missing keys will be populated.
3458 3463 """
3459 3464 createopts = dict(createopts or {})
3460 3465
3461 3466 if b'backend' not in createopts:
3462 3467 # experimental config: storage.new-repo-backend
3463 3468 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3464 3469
3465 3470 return createopts
3466 3471
3467 3472
3468 3473 def newreporequirements(ui, createopts):
3469 3474 """Determine the set of requirements for a new local repository.
3470 3475
3471 3476 Extensions can wrap this function to specify custom requirements for
3472 3477 new repositories.
3473 3478 """
3474 3479 # If the repo is being created from a shared repository, we copy
3475 3480 # its requirements.
3476 3481 if b'sharedrepo' in createopts:
3477 3482 requirements = set(createopts[b'sharedrepo'].requirements)
3478 3483 if createopts.get(b'sharedrelative'):
3479 3484 requirements.add(b'relshared')
3480 3485 else:
3481 3486 requirements.add(b'shared')
3482 3487
3483 3488 return requirements
3484 3489
3485 3490 if b'backend' not in createopts:
3486 3491 raise error.ProgrammingError(
3487 3492 b'backend key not present in createopts; '
3488 3493 b'was defaultcreateopts() called?'
3489 3494 )
3490 3495
3491 3496 if createopts[b'backend'] != b'revlogv1':
3492 3497 raise error.Abort(
3493 3498 _(
3494 3499 b'unable to determine repository requirements for '
3495 3500 b'storage backend: %s'
3496 3501 )
3497 3502 % createopts[b'backend']
3498 3503 )
3499 3504
3500 3505 requirements = {b'revlogv1'}
3501 3506 if ui.configbool(b'format', b'usestore'):
3502 3507 requirements.add(b'store')
3503 3508 if ui.configbool(b'format', b'usefncache'):
3504 3509 requirements.add(b'fncache')
3505 3510 if ui.configbool(b'format', b'dotencode'):
3506 3511 requirements.add(b'dotencode')
3507 3512
3508 3513 compengine = ui.config(b'format', b'revlog-compression')
3509 3514 if compengine not in util.compengines:
3510 3515 raise error.Abort(
3511 3516 _(
3512 3517 b'compression engine %s defined by '
3513 3518 b'format.revlog-compression not available'
3514 3519 )
3515 3520 % compengine,
3516 3521 hint=_(
3517 3522 b'run "hg debuginstall" to list available '
3518 3523 b'compression engines'
3519 3524 ),
3520 3525 )
3521 3526
3522 3527 # zlib is the historical default and doesn't need an explicit requirement.
3523 3528 elif compengine == b'zstd':
3524 3529 requirements.add(b'revlog-compression-zstd')
3525 3530 elif compengine != b'zlib':
3526 3531 requirements.add(b'exp-compression-%s' % compengine)
3527 3532
3528 3533 if scmutil.gdinitconfig(ui):
3529 3534 requirements.add(b'generaldelta')
3530 3535 if ui.configbool(b'format', b'sparse-revlog'):
3531 3536 requirements.add(SPARSEREVLOG_REQUIREMENT)
3532 3537
3533 3538 # experimental config: format.exp-use-side-data
3534 3539 if ui.configbool(b'format', b'exp-use-side-data'):
3535 3540 requirements.add(SIDEDATA_REQUIREMENT)
3536 3541 # experimental config: format.exp-use-copies-side-data-changeset
3537 3542 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3538 3543 requirements.add(SIDEDATA_REQUIREMENT)
3539 3544 requirements.add(COPIESSDC_REQUIREMENT)
3540 3545 if ui.configbool(b'experimental', b'treemanifest'):
3541 3546 requirements.add(b'treemanifest')
3542 3547
3543 3548 revlogv2 = ui.config(b'experimental', b'revlogv2')
3544 3549 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3545 3550 requirements.remove(b'revlogv1')
3546 3551 # generaldelta is implied by revlogv2.
3547 3552 requirements.discard(b'generaldelta')
3548 3553 requirements.add(REVLOGV2_REQUIREMENT)
3549 3554 # experimental config: format.internal-phase
3550 3555 if ui.configbool(b'format', b'internal-phase'):
3551 3556 requirements.add(b'internal-phase')
3552 3557
3553 3558 if createopts.get(b'narrowfiles'):
3554 3559 requirements.add(repository.NARROW_REQUIREMENT)
3555 3560
3556 3561 if createopts.get(b'lfs'):
3557 3562 requirements.add(b'lfs')
3558 3563
3559 3564 if ui.configbool(b'format', b'bookmarks-in-store'):
3560 3565 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3561 3566
3562 3567 return requirements
3563 3568
3564 3569
3565 3570 def filterknowncreateopts(ui, createopts):
3566 3571 """Filters a dict of repo creation options against options that are known.
3567 3572
3568 3573 Receives a dict of repo creation options and returns a dict of those
3569 3574 options that we don't know how to handle.
3570 3575
3571 3576 This function is called as part of repository creation. If the
3572 3577 returned dict contains any items, repository creation will not
3573 3578 be allowed, as it means there was a request to create a repository
3574 3579 with options not recognized by loaded code.
3575 3580
3576 3581 Extensions can wrap this function to filter out creation options
3577 3582 they know how to handle.
3578 3583 """
3579 3584 known = {
3580 3585 b'backend',
3581 3586 b'lfs',
3582 3587 b'narrowfiles',
3583 3588 b'sharedrepo',
3584 3589 b'sharedrelative',
3585 3590 b'shareditems',
3586 3591 b'shallowfilestore',
3587 3592 }
3588 3593
3589 3594 return {k: v for k, v in createopts.items() if k not in known}
3590 3595
3591 3596
3592 3597 def createrepository(ui, path, createopts=None):
3593 3598 """Create a new repository in a vfs.
3594 3599
3595 3600 ``path`` path to the new repo's working directory.
3596 3601 ``createopts`` options for the new repository.
3597 3602
3598 3603 The following keys for ``createopts`` are recognized:
3599 3604
3600 3605 backend
3601 3606 The storage backend to use.
3602 3607 lfs
3603 3608 Repository will be created with ``lfs`` requirement. The lfs extension
3604 3609 will automatically be loaded when the repository is accessed.
3605 3610 narrowfiles
3606 3611 Set up repository to support narrow file storage.
3607 3612 sharedrepo
3608 3613 Repository object from which storage should be shared.
3609 3614 sharedrelative
3610 3615 Boolean indicating if the path to the shared repo should be
3611 3616 stored as relative. By default, the pointer to the "parent" repo
3612 3617 is stored as an absolute path.
3613 3618 shareditems
3614 3619 Set of items to share to the new repository (in addition to storage).
3615 3620 shallowfilestore
3616 3621 Indicates that storage for files should be shallow (not all ancestor
3617 3622 revisions are known).
3618 3623 """
3619 3624 createopts = defaultcreateopts(ui, createopts=createopts)
3620 3625
3621 3626 unknownopts = filterknowncreateopts(ui, createopts)
3622 3627
3623 3628 if not isinstance(unknownopts, dict):
3624 3629 raise error.ProgrammingError(
3625 3630 b'filterknowncreateopts() did not return a dict'
3626 3631 )
3627 3632
3628 3633 if unknownopts:
3629 3634 raise error.Abort(
3630 3635 _(
3631 3636 b'unable to create repository because of unknown '
3632 3637 b'creation option: %s'
3633 3638 )
3634 3639 % b', '.join(sorted(unknownopts)),
3635 3640 hint=_(b'is a required extension not loaded?'),
3636 3641 )
3637 3642
3638 3643 requirements = newreporequirements(ui, createopts=createopts)
3639 3644
3640 3645 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3641 3646
3642 3647 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3643 3648 if hgvfs.exists():
3644 3649 raise error.RepoError(_(b'repository %s already exists') % path)
3645 3650
3646 3651 if b'sharedrepo' in createopts:
3647 3652 sharedpath = createopts[b'sharedrepo'].sharedpath
3648 3653
3649 3654 if createopts.get(b'sharedrelative'):
3650 3655 try:
3651 3656 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3652 3657 except (IOError, ValueError) as e:
3653 3658 # ValueError is raised on Windows if the drive letters differ
3654 3659 # on each path.
3655 3660 raise error.Abort(
3656 3661 _(b'cannot calculate relative path'),
3657 3662 hint=stringutil.forcebytestr(e),
3658 3663 )
3659 3664
3660 3665 if not wdirvfs.exists():
3661 3666 wdirvfs.makedirs()
3662 3667
3663 3668 hgvfs.makedir(notindexed=True)
3664 3669 if b'sharedrepo' not in createopts:
3665 3670 hgvfs.mkdir(b'cache')
3666 3671 hgvfs.mkdir(b'wcache')
3667 3672
3668 3673 if b'store' in requirements and b'sharedrepo' not in createopts:
3669 3674 hgvfs.mkdir(b'store')
3670 3675
3671 3676 # We create an invalid changelog outside the store so very old
3672 3677 # Mercurial versions (which didn't know about the requirements
3673 3678 # file) encounter an error on reading the changelog. This
3674 3679 # effectively locks out old clients and prevents them from
3675 3680 # mucking with a repo in an unknown format.
3676 3681 #
3677 3682 # The revlog header has version 2, which won't be recognized by
3678 3683 # such old clients.
3679 3684 hgvfs.append(
3680 3685 b'00changelog.i',
3681 3686 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3682 3687 b'layout',
3683 3688 )
3684 3689
3685 3690 scmutil.writerequires(hgvfs, requirements)
3686 3691
3687 3692 # Write out file telling readers where to find the shared store.
3688 3693 if b'sharedrepo' in createopts:
3689 3694 hgvfs.write(b'sharedpath', sharedpath)
3690 3695
3691 3696 if createopts.get(b'shareditems'):
3692 3697 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3693 3698 hgvfs.write(b'shared', shared)
3694 3699
3695 3700
3696 3701 def poisonrepository(repo):
3697 3702 """Poison a repository instance so it can no longer be used."""
3698 3703 # Perform any cleanup on the instance.
3699 3704 repo.close()
3700 3705
3701 3706 # Our strategy is to replace the type of the object with one that
3702 3707 # has all attribute lookups result in error.
3703 3708 #
3704 3709 # But we have to allow the close() method because some constructors
3705 3710 # of repos call close() on repo references.
3706 3711 class poisonedrepository(object):
3707 3712 def __getattribute__(self, item):
3708 3713 if item == 'close':
3709 3714 return object.__getattribute__(self, item)
3710 3715
3711 3716 raise error.ProgrammingError(
3712 3717 b'repo instances should not be used after unshare'
3713 3718 )
3714 3719
3715 3720 def close(self):
3716 3721 pass
3717 3722
3718 3723 # We may have a repoview, which intercepts __setattr__. So be sure
3719 3724 # we operate at the lowest level possible.
3720 3725 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now