##// END OF EJS Templates
files: extract code for extra filtering of the `removed` entry into copies...
marmoute -
r45467:edd08aa1 default
parent child Browse files
Show More
@@ -1,3831 +1,3789 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 )
25 25 from .pycompat import (
26 26 delattr,
27 27 getattr,
28 28 )
29 29 from . import (
30 30 bookmarks,
31 31 branchmap,
32 32 bundle2,
33 33 changegroup,
34 34 color,
35 35 context,
36 36 dirstate,
37 37 dirstateguard,
38 38 discovery,
39 39 encoding,
40 40 error,
41 41 exchange,
42 42 extensions,
43 43 filelog,
44 44 hook,
45 45 lock as lockmod,
46 46 match as matchmod,
47 47 mergestate as mergestatemod,
48 48 mergeutil,
49 metadata,
49 50 namespaces,
50 51 narrowspec,
51 52 obsolete,
52 53 pathutil,
53 54 phases,
54 55 pushkey,
55 56 pycompat,
56 57 rcutil,
57 58 repoview,
58 59 revset,
59 60 revsetlang,
60 61 scmutil,
61 62 sparse,
62 63 store as storemod,
63 64 subrepoutil,
64 65 tags as tagsmod,
65 66 transaction,
66 67 txnutil,
67 68 util,
68 69 vfs as vfsmod,
69 70 )
70 71
71 72 from .interfaces import (
72 73 repository,
73 74 util as interfaceutil,
74 75 )
75 76
76 77 from .utils import (
77 78 hashutil,
78 79 procutil,
79 80 stringutil,
80 81 )
81 82
82 83 from .revlogutils import constants as revlogconst
83 84
84 85 release = lockmod.release
85 86 urlerr = util.urlerr
86 87 urlreq = util.urlreq
87 88
88 89 # set of (path, vfs-location) tuples. vfs-location is:
89 90 # - 'plain for vfs relative paths
90 91 # - '' for svfs relative paths
91 92 _cachedfiles = set()
92 93
93 94
94 95 class _basefilecache(scmutil.filecache):
95 96 """All filecache usage on repo are done for logic that should be unfiltered
96 97 """
97 98
98 99 def __get__(self, repo, type=None):
99 100 if repo is None:
100 101 return self
101 102 # proxy to unfiltered __dict__ since filtered repo has no entry
102 103 unfi = repo.unfiltered()
103 104 try:
104 105 return unfi.__dict__[self.sname]
105 106 except KeyError:
106 107 pass
107 108 return super(_basefilecache, self).__get__(unfi, type)
108 109
109 110 def set(self, repo, value):
110 111 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 112
112 113
113 114 class repofilecache(_basefilecache):
114 115 """filecache for files in .hg but outside of .hg/store"""
115 116
116 117 def __init__(self, *paths):
117 118 super(repofilecache, self).__init__(*paths)
118 119 for path in paths:
119 120 _cachedfiles.add((path, b'plain'))
120 121
121 122 def join(self, obj, fname):
122 123 return obj.vfs.join(fname)
123 124
124 125
125 126 class storecache(_basefilecache):
126 127 """filecache for files in the store"""
127 128
128 129 def __init__(self, *paths):
129 130 super(storecache, self).__init__(*paths)
130 131 for path in paths:
131 132 _cachedfiles.add((path, b''))
132 133
133 134 def join(self, obj, fname):
134 135 return obj.sjoin(fname)
135 136
136 137
137 138 class mixedrepostorecache(_basefilecache):
138 139 """filecache for a mix files in .hg/store and outside"""
139 140
140 141 def __init__(self, *pathsandlocations):
141 142 # scmutil.filecache only uses the path for passing back into our
142 143 # join(), so we can safely pass a list of paths and locations
143 144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 145 _cachedfiles.update(pathsandlocations)
145 146
146 147 def join(self, obj, fnameandlocation):
147 148 fname, location = fnameandlocation
148 149 if location == b'plain':
149 150 return obj.vfs.join(fname)
150 151 else:
151 152 if location != b'':
152 153 raise error.ProgrammingError(
153 154 b'unexpected location: %s' % location
154 155 )
155 156 return obj.sjoin(fname)
156 157
157 158
158 159 def isfilecached(repo, name):
159 160 """check if a repo has already cached "name" filecache-ed property
160 161
161 162 This returns (cachedobj-or-None, iscached) tuple.
162 163 """
163 164 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 165 if not cacheentry:
165 166 return None, False
166 167 return cacheentry.obj, True
167 168
168 169
169 170 class unfilteredpropertycache(util.propertycache):
170 171 """propertycache that apply to unfiltered repo only"""
171 172
172 173 def __get__(self, repo, type=None):
173 174 unfi = repo.unfiltered()
174 175 if unfi is repo:
175 176 return super(unfilteredpropertycache, self).__get__(unfi)
176 177 return getattr(unfi, self.name)
177 178
178 179
179 180 class filteredpropertycache(util.propertycache):
180 181 """propertycache that must take filtering in account"""
181 182
182 183 def cachevalue(self, obj, value):
183 184 object.__setattr__(obj, self.name, value)
184 185
185 186
186 187 def hasunfilteredcache(repo, name):
187 188 """check if a repo has an unfilteredpropertycache value for <name>"""
188 189 return name in vars(repo.unfiltered())
189 190
190 191
191 192 def unfilteredmethod(orig):
192 193 """decorate method that always need to be run on unfiltered version"""
193 194
194 195 def wrapper(repo, *args, **kwargs):
195 196 return orig(repo.unfiltered(), *args, **kwargs)
196 197
197 198 return wrapper
198 199
199 200
200 201 moderncaps = {
201 202 b'lookup',
202 203 b'branchmap',
203 204 b'pushkey',
204 205 b'known',
205 206 b'getbundle',
206 207 b'unbundle',
207 208 }
208 209 legacycaps = moderncaps.union({b'changegroupsubset'})
209 210
210 211
211 212 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 213 class localcommandexecutor(object):
213 214 def __init__(self, peer):
214 215 self._peer = peer
215 216 self._sent = False
216 217 self._closed = False
217 218
218 219 def __enter__(self):
219 220 return self
220 221
221 222 def __exit__(self, exctype, excvalue, exctb):
222 223 self.close()
223 224
224 225 def callcommand(self, command, args):
225 226 if self._sent:
226 227 raise error.ProgrammingError(
227 228 b'callcommand() cannot be used after sendcommands()'
228 229 )
229 230
230 231 if self._closed:
231 232 raise error.ProgrammingError(
232 233 b'callcommand() cannot be used after close()'
233 234 )
234 235
235 236 # We don't need to support anything fancy. Just call the named
236 237 # method on the peer and return a resolved future.
237 238 fn = getattr(self._peer, pycompat.sysstr(command))
238 239
239 240 f = pycompat.futures.Future()
240 241
241 242 try:
242 243 result = fn(**pycompat.strkwargs(args))
243 244 except Exception:
244 245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 246 else:
246 247 f.set_result(result)
247 248
248 249 return f
249 250
250 251 def sendcommands(self):
251 252 self._sent = True
252 253
253 254 def close(self):
254 255 self._closed = True
255 256
256 257
257 258 @interfaceutil.implementer(repository.ipeercommands)
258 259 class localpeer(repository.peer):
259 260 '''peer for a local repo; reflects only the most recent API'''
260 261
261 262 def __init__(self, repo, caps=None):
262 263 super(localpeer, self).__init__()
263 264
264 265 if caps is None:
265 266 caps = moderncaps.copy()
266 267 self._repo = repo.filtered(b'served')
267 268 self.ui = repo.ui
268 269 self._caps = repo._restrictcapabilities(caps)
269 270
270 271 # Begin of _basepeer interface.
271 272
272 273 def url(self):
273 274 return self._repo.url()
274 275
275 276 def local(self):
276 277 return self._repo
277 278
278 279 def peer(self):
279 280 return self
280 281
281 282 def canpush(self):
282 283 return True
283 284
284 285 def close(self):
285 286 self._repo.close()
286 287
287 288 # End of _basepeer interface.
288 289
289 290 # Begin of _basewirecommands interface.
290 291
291 292 def branchmap(self):
292 293 return self._repo.branchmap()
293 294
294 295 def capabilities(self):
295 296 return self._caps
296 297
297 298 def clonebundles(self):
298 299 return self._repo.tryread(b'clonebundles.manifest')
299 300
300 301 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 302 """Used to test argument passing over the wire"""
302 303 return b"%s %s %s %s %s" % (
303 304 one,
304 305 two,
305 306 pycompat.bytestr(three),
306 307 pycompat.bytestr(four),
307 308 pycompat.bytestr(five),
308 309 )
309 310
310 311 def getbundle(
311 312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 313 ):
313 314 chunks = exchange.getbundlechunks(
314 315 self._repo,
315 316 source,
316 317 heads=heads,
317 318 common=common,
318 319 bundlecaps=bundlecaps,
319 320 **kwargs
320 321 )[1]
321 322 cb = util.chunkbuffer(chunks)
322 323
323 324 if exchange.bundle2requested(bundlecaps):
324 325 # When requesting a bundle2, getbundle returns a stream to make the
325 326 # wire level function happier. We need to build a proper object
326 327 # from it in local peer.
327 328 return bundle2.getunbundler(self.ui, cb)
328 329 else:
329 330 return changegroup.getunbundler(b'01', cb, None)
330 331
331 332 def heads(self):
332 333 return self._repo.heads()
333 334
334 335 def known(self, nodes):
335 336 return self._repo.known(nodes)
336 337
337 338 def listkeys(self, namespace):
338 339 return self._repo.listkeys(namespace)
339 340
340 341 def lookup(self, key):
341 342 return self._repo.lookup(key)
342 343
343 344 def pushkey(self, namespace, key, old, new):
344 345 return self._repo.pushkey(namespace, key, old, new)
345 346
346 347 def stream_out(self):
347 348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 349
349 350 def unbundle(self, bundle, heads, url):
350 351 """apply a bundle on a repo
351 352
352 353 This function handles the repo locking itself."""
353 354 try:
354 355 try:
355 356 bundle = exchange.readbundle(self.ui, bundle, None)
356 357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 358 if util.safehasattr(ret, b'getchunks'):
358 359 # This is a bundle20 object, turn it into an unbundler.
359 360 # This little dance should be dropped eventually when the
360 361 # API is finally improved.
361 362 stream = util.chunkbuffer(ret.getchunks())
362 363 ret = bundle2.getunbundler(self.ui, stream)
363 364 return ret
364 365 except Exception as exc:
365 366 # If the exception contains output salvaged from a bundle2
366 367 # reply, we need to make sure it is printed before continuing
367 368 # to fail. So we build a bundle2 with such output and consume
368 369 # it directly.
369 370 #
370 371 # This is not very elegant but allows a "simple" solution for
371 372 # issue4594
372 373 output = getattr(exc, '_bundle2salvagedoutput', ())
373 374 if output:
374 375 bundler = bundle2.bundle20(self._repo.ui)
375 376 for out in output:
376 377 bundler.addpart(out)
377 378 stream = util.chunkbuffer(bundler.getchunks())
378 379 b = bundle2.getunbundler(self.ui, stream)
379 380 bundle2.processbundle(self._repo, b)
380 381 raise
381 382 except error.PushRaced as exc:
382 383 raise error.ResponseError(
383 384 _(b'push failed:'), stringutil.forcebytestr(exc)
384 385 )
385 386
386 387 # End of _basewirecommands interface.
387 388
388 389 # Begin of peer interface.
389 390
390 391 def commandexecutor(self):
391 392 return localcommandexecutor(self)
392 393
393 394 # End of peer interface.
394 395
395 396
396 397 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 398 class locallegacypeer(localpeer):
398 399 '''peer extension which implements legacy methods too; used for tests with
399 400 restricted capabilities'''
400 401
401 402 def __init__(self, repo):
402 403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 404
404 405 # Begin of baselegacywirecommands interface.
405 406
406 407 def between(self, pairs):
407 408 return self._repo.between(pairs)
408 409
409 410 def branches(self, nodes):
410 411 return self._repo.branches(nodes)
411 412
412 413 def changegroup(self, nodes, source):
413 414 outgoing = discovery.outgoing(
414 415 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 416 )
416 417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 418
418 419 def changegroupsubset(self, bases, heads, source):
419 420 outgoing = discovery.outgoing(
420 421 self._repo, missingroots=bases, missingheads=heads
421 422 )
422 423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 424
424 425 # End of baselegacywirecommands interface.
425 426
426 427
427 428 # Increment the sub-version when the revlog v2 format changes to lock out old
428 429 # clients.
429 430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 431
431 432 # A repository with the sparserevlog feature will have delta chains that
432 433 # can spread over a larger span. Sparse reading cuts these large spans into
433 434 # pieces, so that each piece isn't too big.
434 435 # Without the sparserevlog capability, reading from the repository could use
435 436 # huge amounts of memory, because the whole span would be read at once,
436 437 # including all the intermediate revisions that aren't pertinent for the chain.
437 438 # This is why once a repository has enabled sparse-read, it becomes required.
438 439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 440
440 441 # A repository with the sidedataflag requirement will allow to store extra
441 442 # information for revision without altering their original hashes.
442 443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 444
444 445 # A repository with the the copies-sidedata-changeset requirement will store
445 446 # copies related information in changeset's sidedata.
446 447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 448
448 449 # The repository use persistent nodemap for the changelog and the manifest.
449 450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
450 451
451 452 # Functions receiving (ui, features) that extensions can register to impact
452 453 # the ability to load repositories with custom requirements. Only
453 454 # functions defined in loaded extensions are called.
454 455 #
455 456 # The function receives a set of requirement strings that the repository
456 457 # is capable of opening. Functions will typically add elements to the
457 458 # set to reflect that the extension knows how to handle that requirements.
458 459 featuresetupfuncs = set()
459 460
460 461
461 462 def makelocalrepository(baseui, path, intents=None):
462 463 """Create a local repository object.
463 464
464 465 Given arguments needed to construct a local repository, this function
465 466 performs various early repository loading functionality (such as
466 467 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
467 468 the repository can be opened, derives a type suitable for representing
468 469 that repository, and returns an instance of it.
469 470
470 471 The returned object conforms to the ``repository.completelocalrepository``
471 472 interface.
472 473
473 474 The repository type is derived by calling a series of factory functions
474 475 for each aspect/interface of the final repository. These are defined by
475 476 ``REPO_INTERFACES``.
476 477
477 478 Each factory function is called to produce a type implementing a specific
478 479 interface. The cumulative list of returned types will be combined into a
479 480 new type and that type will be instantiated to represent the local
480 481 repository.
481 482
482 483 The factory functions each receive various state that may be consulted
483 484 as part of deriving a type.
484 485
485 486 Extensions should wrap these factory functions to customize repository type
486 487 creation. Note that an extension's wrapped function may be called even if
487 488 that extension is not loaded for the repo being constructed. Extensions
488 489 should check if their ``__name__`` appears in the
489 490 ``extensionmodulenames`` set passed to the factory function and no-op if
490 491 not.
491 492 """
492 493 ui = baseui.copy()
493 494 # Prevent copying repo configuration.
494 495 ui.copy = baseui.copy
495 496
496 497 # Working directory VFS rooted at repository root.
497 498 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
498 499
499 500 # Main VFS for .hg/ directory.
500 501 hgpath = wdirvfs.join(b'.hg')
501 502 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
502 503
503 504 # The .hg/ path should exist and should be a directory. All other
504 505 # cases are errors.
505 506 if not hgvfs.isdir():
506 507 try:
507 508 hgvfs.stat()
508 509 except OSError as e:
509 510 if e.errno != errno.ENOENT:
510 511 raise
511 512
512 513 raise error.RepoError(_(b'repository %s not found') % path)
513 514
514 515 # .hg/requires file contains a newline-delimited list of
515 516 # features/capabilities the opener (us) must have in order to use
516 517 # the repository. This file was introduced in Mercurial 0.9.2,
517 518 # which means very old repositories may not have one. We assume
518 519 # a missing file translates to no requirements.
519 520 try:
520 521 requirements = set(hgvfs.read(b'requires').splitlines())
521 522 except IOError as e:
522 523 if e.errno != errno.ENOENT:
523 524 raise
524 525 requirements = set()
525 526
526 527 # The .hg/hgrc file may load extensions or contain config options
527 528 # that influence repository construction. Attempt to load it and
528 529 # process any new extensions that it may have pulled in.
529 530 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
530 531 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
531 532 extensions.loadall(ui)
532 533 extensions.populateui(ui)
533 534
534 535 # Set of module names of extensions loaded for this repository.
535 536 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
536 537
537 538 supportedrequirements = gathersupportedrequirements(ui)
538 539
539 540 # We first validate the requirements are known.
540 541 ensurerequirementsrecognized(requirements, supportedrequirements)
541 542
542 543 # Then we validate that the known set is reasonable to use together.
543 544 ensurerequirementscompatible(ui, requirements)
544 545
545 546 # TODO there are unhandled edge cases related to opening repositories with
546 547 # shared storage. If storage is shared, we should also test for requirements
547 548 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
548 549 # that repo, as that repo may load extensions needed to open it. This is a
549 550 # bit complicated because we don't want the other hgrc to overwrite settings
550 551 # in this hgrc.
551 552 #
552 553 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
553 554 # file when sharing repos. But if a requirement is added after the share is
554 555 # performed, thereby introducing a new requirement for the opener, we may
555 556 # will not see that and could encounter a run-time error interacting with
556 557 # that shared store since it has an unknown-to-us requirement.
557 558
558 559 # At this point, we know we should be capable of opening the repository.
559 560 # Now get on with doing that.
560 561
561 562 features = set()
562 563
563 564 # The "store" part of the repository holds versioned data. How it is
564 565 # accessed is determined by various requirements. The ``shared`` or
565 566 # ``relshared`` requirements indicate the store lives in the path contained
566 567 # in the ``.hg/sharedpath`` file. This is an absolute path for
567 568 # ``shared`` and relative to ``.hg/`` for ``relshared``.
568 569 if b'shared' in requirements or b'relshared' in requirements:
569 570 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
570 571 if b'relshared' in requirements:
571 572 sharedpath = hgvfs.join(sharedpath)
572 573
573 574 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
574 575
575 576 if not sharedvfs.exists():
576 577 raise error.RepoError(
577 578 _(b'.hg/sharedpath points to nonexistent directory %s')
578 579 % sharedvfs.base
579 580 )
580 581
581 582 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
582 583
583 584 storebasepath = sharedvfs.base
584 585 cachepath = sharedvfs.join(b'cache')
585 586 else:
586 587 storebasepath = hgvfs.base
587 588 cachepath = hgvfs.join(b'cache')
588 589 wcachepath = hgvfs.join(b'wcache')
589 590
590 591 # The store has changed over time and the exact layout is dictated by
591 592 # requirements. The store interface abstracts differences across all
592 593 # of them.
593 594 store = makestore(
594 595 requirements,
595 596 storebasepath,
596 597 lambda base: vfsmod.vfs(base, cacheaudited=True),
597 598 )
598 599 hgvfs.createmode = store.createmode
599 600
600 601 storevfs = store.vfs
601 602 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
602 603
603 604 # The cache vfs is used to manage cache files.
604 605 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
605 606 cachevfs.createmode = store.createmode
606 607 # The cache vfs is used to manage cache files related to the working copy
607 608 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
608 609 wcachevfs.createmode = store.createmode
609 610
610 611 # Now resolve the type for the repository object. We do this by repeatedly
611 612 # calling a factory function to produces types for specific aspects of the
612 613 # repo's operation. The aggregate returned types are used as base classes
613 614 # for a dynamically-derived type, which will represent our new repository.
614 615
615 616 bases = []
616 617 extrastate = {}
617 618
618 619 for iface, fn in REPO_INTERFACES:
619 620 # We pass all potentially useful state to give extensions tons of
620 621 # flexibility.
621 622 typ = fn()(
622 623 ui=ui,
623 624 intents=intents,
624 625 requirements=requirements,
625 626 features=features,
626 627 wdirvfs=wdirvfs,
627 628 hgvfs=hgvfs,
628 629 store=store,
629 630 storevfs=storevfs,
630 631 storeoptions=storevfs.options,
631 632 cachevfs=cachevfs,
632 633 wcachevfs=wcachevfs,
633 634 extensionmodulenames=extensionmodulenames,
634 635 extrastate=extrastate,
635 636 baseclasses=bases,
636 637 )
637 638
638 639 if not isinstance(typ, type):
639 640 raise error.ProgrammingError(
640 641 b'unable to construct type for %s' % iface
641 642 )
642 643
643 644 bases.append(typ)
644 645
645 646 # type() allows you to use characters in type names that wouldn't be
646 647 # recognized as Python symbols in source code. We abuse that to add
647 648 # rich information about our constructed repo.
648 649 name = pycompat.sysstr(
649 650 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
650 651 )
651 652
652 653 cls = type(name, tuple(bases), {})
653 654
654 655 return cls(
655 656 baseui=baseui,
656 657 ui=ui,
657 658 origroot=path,
658 659 wdirvfs=wdirvfs,
659 660 hgvfs=hgvfs,
660 661 requirements=requirements,
661 662 supportedrequirements=supportedrequirements,
662 663 sharedpath=storebasepath,
663 664 store=store,
664 665 cachevfs=cachevfs,
665 666 wcachevfs=wcachevfs,
666 667 features=features,
667 668 intents=intents,
668 669 )
669 670
670 671
671 672 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
672 673 """Load hgrc files/content into a ui instance.
673 674
674 675 This is called during repository opening to load any additional
675 676 config files or settings relevant to the current repository.
676 677
677 678 Returns a bool indicating whether any additional configs were loaded.
678 679
679 680 Extensions should monkeypatch this function to modify how per-repo
680 681 configs are loaded. For example, an extension may wish to pull in
681 682 configs from alternate files or sources.
682 683 """
683 684 if not rcutil.use_repo_hgrc():
684 685 return False
685 686 try:
686 687 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
687 688 return True
688 689 except IOError:
689 690 return False
690 691
691 692
692 693 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
693 694 """Perform additional actions after .hg/hgrc is loaded.
694 695
695 696 This function is called during repository loading immediately after
696 697 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
697 698
698 699 The function can be used to validate configs, automatically add
699 700 options (including extensions) based on requirements, etc.
700 701 """
701 702
702 703 # Map of requirements to list of extensions to load automatically when
703 704 # requirement is present.
704 705 autoextensions = {
705 706 b'git': [b'git'],
706 707 b'largefiles': [b'largefiles'],
707 708 b'lfs': [b'lfs'],
708 709 }
709 710
710 711 for requirement, names in sorted(autoextensions.items()):
711 712 if requirement not in requirements:
712 713 continue
713 714
714 715 for name in names:
715 716 if not ui.hasconfig(b'extensions', name):
716 717 ui.setconfig(b'extensions', name, b'', source=b'autoload')
717 718
718 719
719 720 def gathersupportedrequirements(ui):
720 721 """Determine the complete set of recognized requirements."""
721 722 # Start with all requirements supported by this file.
722 723 supported = set(localrepository._basesupported)
723 724
724 725 # Execute ``featuresetupfuncs`` entries if they belong to an extension
725 726 # relevant to this ui instance.
726 727 modules = {m.__name__ for n, m in extensions.extensions(ui)}
727 728
728 729 for fn in featuresetupfuncs:
729 730 if fn.__module__ in modules:
730 731 fn(ui, supported)
731 732
732 733 # Add derived requirements from registered compression engines.
733 734 for name in util.compengines:
734 735 engine = util.compengines[name]
735 736 if engine.available() and engine.revlogheader():
736 737 supported.add(b'exp-compression-%s' % name)
737 738 if engine.name() == b'zstd':
738 739 supported.add(b'revlog-compression-zstd')
739 740
740 741 return supported
741 742
742 743
743 744 def ensurerequirementsrecognized(requirements, supported):
744 745 """Validate that a set of local requirements is recognized.
745 746
746 747 Receives a set of requirements. Raises an ``error.RepoError`` if there
747 748 exists any requirement in that set that currently loaded code doesn't
748 749 recognize.
749 750
750 751 Returns a set of supported requirements.
751 752 """
752 753 missing = set()
753 754
754 755 for requirement in requirements:
755 756 if requirement in supported:
756 757 continue
757 758
758 759 if not requirement or not requirement[0:1].isalnum():
759 760 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
760 761
761 762 missing.add(requirement)
762 763
763 764 if missing:
764 765 raise error.RequirementError(
765 766 _(b'repository requires features unknown to this Mercurial: %s')
766 767 % b' '.join(sorted(missing)),
767 768 hint=_(
768 769 b'see https://mercurial-scm.org/wiki/MissingRequirement '
769 770 b'for more information'
770 771 ),
771 772 )
772 773
773 774
774 775 def ensurerequirementscompatible(ui, requirements):
775 776 """Validates that a set of recognized requirements is mutually compatible.
776 777
777 778 Some requirements may not be compatible with others or require
778 779 config options that aren't enabled. This function is called during
779 780 repository opening to ensure that the set of requirements needed
780 781 to open a repository is sane and compatible with config options.
781 782
782 783 Extensions can monkeypatch this function to perform additional
783 784 checking.
784 785
785 786 ``error.RepoError`` should be raised on failure.
786 787 """
787 788 if b'exp-sparse' in requirements and not sparse.enabled:
788 789 raise error.RepoError(
789 790 _(
790 791 b'repository is using sparse feature but '
791 792 b'sparse is not enabled; enable the '
792 793 b'"sparse" extensions to access'
793 794 )
794 795 )
795 796
796 797
797 798 def makestore(requirements, path, vfstype):
798 799 """Construct a storage object for a repository."""
799 800 if b'store' in requirements:
800 801 if b'fncache' in requirements:
801 802 return storemod.fncachestore(
802 803 path, vfstype, b'dotencode' in requirements
803 804 )
804 805
805 806 return storemod.encodedstore(path, vfstype)
806 807
807 808 return storemod.basicstore(path, vfstype)
808 809
809 810
810 811 def resolvestorevfsoptions(ui, requirements, features):
811 812 """Resolve the options to pass to the store vfs opener.
812 813
813 814 The returned dict is used to influence behavior of the storage layer.
814 815 """
815 816 options = {}
816 817
817 818 if b'treemanifest' in requirements:
818 819 options[b'treemanifest'] = True
819 820
820 821 # experimental config: format.manifestcachesize
821 822 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
822 823 if manifestcachesize is not None:
823 824 options[b'manifestcachesize'] = manifestcachesize
824 825
825 826 # In the absence of another requirement superseding a revlog-related
826 827 # requirement, we have to assume the repo is using revlog version 0.
827 828 # This revlog format is super old and we don't bother trying to parse
828 829 # opener options for it because those options wouldn't do anything
829 830 # meaningful on such old repos.
830 831 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
831 832 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
832 833 else: # explicitly mark repo as using revlogv0
833 834 options[b'revlogv0'] = True
834 835
835 836 if COPIESSDC_REQUIREMENT in requirements:
836 837 options[b'copies-storage'] = b'changeset-sidedata'
837 838 else:
838 839 writecopiesto = ui.config(b'experimental', b'copies.write-to')
839 840 copiesextramode = (b'changeset-only', b'compatibility')
840 841 if writecopiesto in copiesextramode:
841 842 options[b'copies-storage'] = b'extra'
842 843
843 844 return options
844 845
845 846
846 847 def resolverevlogstorevfsoptions(ui, requirements, features):
847 848 """Resolve opener options specific to revlogs."""
848 849
849 850 options = {}
850 851 options[b'flagprocessors'] = {}
851 852
852 853 if b'revlogv1' in requirements:
853 854 options[b'revlogv1'] = True
854 855 if REVLOGV2_REQUIREMENT in requirements:
855 856 options[b'revlogv2'] = True
856 857
857 858 if b'generaldelta' in requirements:
858 859 options[b'generaldelta'] = True
859 860
860 861 # experimental config: format.chunkcachesize
861 862 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
862 863 if chunkcachesize is not None:
863 864 options[b'chunkcachesize'] = chunkcachesize
864 865
865 866 deltabothparents = ui.configbool(
866 867 b'storage', b'revlog.optimize-delta-parent-choice'
867 868 )
868 869 options[b'deltabothparents'] = deltabothparents
869 870
870 871 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
871 872 lazydeltabase = False
872 873 if lazydelta:
873 874 lazydeltabase = ui.configbool(
874 875 b'storage', b'revlog.reuse-external-delta-parent'
875 876 )
876 877 if lazydeltabase is None:
877 878 lazydeltabase = not scmutil.gddeltaconfig(ui)
878 879 options[b'lazydelta'] = lazydelta
879 880 options[b'lazydeltabase'] = lazydeltabase
880 881
881 882 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
882 883 if 0 <= chainspan:
883 884 options[b'maxdeltachainspan'] = chainspan
884 885
885 886 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
886 887 if mmapindexthreshold is not None:
887 888 options[b'mmapindexthreshold'] = mmapindexthreshold
888 889
889 890 withsparseread = ui.configbool(b'experimental', b'sparse-read')
890 891 srdensitythres = float(
891 892 ui.config(b'experimental', b'sparse-read.density-threshold')
892 893 )
893 894 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
894 895 options[b'with-sparse-read'] = withsparseread
895 896 options[b'sparse-read-density-threshold'] = srdensitythres
896 897 options[b'sparse-read-min-gap-size'] = srmingapsize
897 898
898 899 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
899 900 options[b'sparse-revlog'] = sparserevlog
900 901 if sparserevlog:
901 902 options[b'generaldelta'] = True
902 903
903 904 sidedata = SIDEDATA_REQUIREMENT in requirements
904 905 options[b'side-data'] = sidedata
905 906
906 907 maxchainlen = None
907 908 if sparserevlog:
908 909 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
909 910 # experimental config: format.maxchainlen
910 911 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
911 912 if maxchainlen is not None:
912 913 options[b'maxchainlen'] = maxchainlen
913 914
914 915 for r in requirements:
915 916 # we allow multiple compression engine requirement to co-exist because
916 917 # strickly speaking, revlog seems to support mixed compression style.
917 918 #
918 919 # The compression used for new entries will be "the last one"
919 920 prefix = r.startswith
920 921 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
921 922 options[b'compengine'] = r.split(b'-', 2)[2]
922 923
923 924 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
924 925 if options[b'zlib.level'] is not None:
925 926 if not (0 <= options[b'zlib.level'] <= 9):
926 927 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
927 928 raise error.Abort(msg % options[b'zlib.level'])
928 929 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
929 930 if options[b'zstd.level'] is not None:
930 931 if not (0 <= options[b'zstd.level'] <= 22):
931 932 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
932 933 raise error.Abort(msg % options[b'zstd.level'])
933 934
934 935 if repository.NARROW_REQUIREMENT in requirements:
935 936 options[b'enableellipsis'] = True
936 937
937 938 if ui.configbool(b'experimental', b'rust.index'):
938 939 options[b'rust.index'] = True
939 940 if NODEMAP_REQUIREMENT in requirements:
940 941 options[b'persistent-nodemap'] = True
941 942 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
942 943 options[b'persistent-nodemap.mmap'] = True
943 944 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
944 945 options[b'persistent-nodemap.mode'] = epnm
945 946 if ui.configbool(b'devel', b'persistent-nodemap'):
946 947 options[b'devel-force-nodemap'] = True
947 948
948 949 return options
949 950
950 951
951 952 def makemain(**kwargs):
952 953 """Produce a type conforming to ``ilocalrepositorymain``."""
953 954 return localrepository
954 955
955 956
956 957 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
957 958 class revlogfilestorage(object):
958 959 """File storage when using revlogs."""
959 960
960 961 def file(self, path):
961 962 if path[0] == b'/':
962 963 path = path[1:]
963 964
964 965 return filelog.filelog(self.svfs, path)
965 966
966 967
967 968 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
968 969 class revlognarrowfilestorage(object):
969 970 """File storage when using revlogs and narrow files."""
970 971
971 972 def file(self, path):
972 973 if path[0] == b'/':
973 974 path = path[1:]
974 975
975 976 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
976 977
977 978
978 979 def makefilestorage(requirements, features, **kwargs):
979 980 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
980 981 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
981 982 features.add(repository.REPO_FEATURE_STREAM_CLONE)
982 983
983 984 if repository.NARROW_REQUIREMENT in requirements:
984 985 return revlognarrowfilestorage
985 986 else:
986 987 return revlogfilestorage
987 988
988 989
989 990 # List of repository interfaces and factory functions for them. Each
990 991 # will be called in order during ``makelocalrepository()`` to iteratively
991 992 # derive the final type for a local repository instance. We capture the
992 993 # function as a lambda so we don't hold a reference and the module-level
993 994 # functions can be wrapped.
994 995 REPO_INTERFACES = [
995 996 (repository.ilocalrepositorymain, lambda: makemain),
996 997 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
997 998 ]
998 999
999 1000
1000 1001 @interfaceutil.implementer(repository.ilocalrepositorymain)
1001 1002 class localrepository(object):
1002 1003 """Main class for representing local repositories.
1003 1004
1004 1005 All local repositories are instances of this class.
1005 1006
1006 1007 Constructed on its own, instances of this class are not usable as
1007 1008 repository objects. To obtain a usable repository object, call
1008 1009 ``hg.repository()``, ``localrepo.instance()``, or
1009 1010 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1010 1011 ``instance()`` adds support for creating new repositories.
1011 1012 ``hg.repository()`` adds more extension integration, including calling
1012 1013 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1013 1014 used.
1014 1015 """
1015 1016
1016 1017 # obsolete experimental requirements:
1017 1018 # - manifestv2: An experimental new manifest format that allowed
1018 1019 # for stem compression of long paths. Experiment ended up not
1019 1020 # being successful (repository sizes went up due to worse delta
1020 1021 # chains), and the code was deleted in 4.6.
1021 1022 supportedformats = {
1022 1023 b'revlogv1',
1023 1024 b'generaldelta',
1024 1025 b'treemanifest',
1025 1026 COPIESSDC_REQUIREMENT,
1026 1027 REVLOGV2_REQUIREMENT,
1027 1028 SIDEDATA_REQUIREMENT,
1028 1029 SPARSEREVLOG_REQUIREMENT,
1029 1030 NODEMAP_REQUIREMENT,
1030 1031 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1031 1032 }
1032 1033 _basesupported = supportedformats | {
1033 1034 b'store',
1034 1035 b'fncache',
1035 1036 b'shared',
1036 1037 b'relshared',
1037 1038 b'dotencode',
1038 1039 b'exp-sparse',
1039 1040 b'internal-phase',
1040 1041 }
1041 1042
1042 1043 # list of prefix for file which can be written without 'wlock'
1043 1044 # Extensions should extend this list when needed
1044 1045 _wlockfreeprefix = {
1045 1046 # We migh consider requiring 'wlock' for the next
1046 1047 # two, but pretty much all the existing code assume
1047 1048 # wlock is not needed so we keep them excluded for
1048 1049 # now.
1049 1050 b'hgrc',
1050 1051 b'requires',
1051 1052 # XXX cache is a complicatged business someone
1052 1053 # should investigate this in depth at some point
1053 1054 b'cache/',
1054 1055 # XXX shouldn't be dirstate covered by the wlock?
1055 1056 b'dirstate',
1056 1057 # XXX bisect was still a bit too messy at the time
1057 1058 # this changeset was introduced. Someone should fix
1058 1059 # the remainig bit and drop this line
1059 1060 b'bisect.state',
1060 1061 }
1061 1062
1062 1063 def __init__(
1063 1064 self,
1064 1065 baseui,
1065 1066 ui,
1066 1067 origroot,
1067 1068 wdirvfs,
1068 1069 hgvfs,
1069 1070 requirements,
1070 1071 supportedrequirements,
1071 1072 sharedpath,
1072 1073 store,
1073 1074 cachevfs,
1074 1075 wcachevfs,
1075 1076 features,
1076 1077 intents=None,
1077 1078 ):
1078 1079 """Create a new local repository instance.
1079 1080
1080 1081 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1081 1082 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1082 1083 object.
1083 1084
1084 1085 Arguments:
1085 1086
1086 1087 baseui
1087 1088 ``ui.ui`` instance that ``ui`` argument was based off of.
1088 1089
1089 1090 ui
1090 1091 ``ui.ui`` instance for use by the repository.
1091 1092
1092 1093 origroot
1093 1094 ``bytes`` path to working directory root of this repository.
1094 1095
1095 1096 wdirvfs
1096 1097 ``vfs.vfs`` rooted at the working directory.
1097 1098
1098 1099 hgvfs
1099 1100 ``vfs.vfs`` rooted at .hg/
1100 1101
1101 1102 requirements
1102 1103 ``set`` of bytestrings representing repository opening requirements.
1103 1104
1104 1105 supportedrequirements
1105 1106 ``set`` of bytestrings representing repository requirements that we
1106 1107 know how to open. May be a supetset of ``requirements``.
1107 1108
1108 1109 sharedpath
1109 1110 ``bytes`` Defining path to storage base directory. Points to a
1110 1111 ``.hg/`` directory somewhere.
1111 1112
1112 1113 store
1113 1114 ``store.basicstore`` (or derived) instance providing access to
1114 1115 versioned storage.
1115 1116
1116 1117 cachevfs
1117 1118 ``vfs.vfs`` used for cache files.
1118 1119
1119 1120 wcachevfs
1120 1121 ``vfs.vfs`` used for cache files related to the working copy.
1121 1122
1122 1123 features
1123 1124 ``set`` of bytestrings defining features/capabilities of this
1124 1125 instance.
1125 1126
1126 1127 intents
1127 1128 ``set`` of system strings indicating what this repo will be used
1128 1129 for.
1129 1130 """
1130 1131 self.baseui = baseui
1131 1132 self.ui = ui
1132 1133 self.origroot = origroot
1133 1134 # vfs rooted at working directory.
1134 1135 self.wvfs = wdirvfs
1135 1136 self.root = wdirvfs.base
1136 1137 # vfs rooted at .hg/. Used to access most non-store paths.
1137 1138 self.vfs = hgvfs
1138 1139 self.path = hgvfs.base
1139 1140 self.requirements = requirements
1140 1141 self.supported = supportedrequirements
1141 1142 self.sharedpath = sharedpath
1142 1143 self.store = store
1143 1144 self.cachevfs = cachevfs
1144 1145 self.wcachevfs = wcachevfs
1145 1146 self.features = features
1146 1147
1147 1148 self.filtername = None
1148 1149
1149 1150 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1150 1151 b'devel', b'check-locks'
1151 1152 ):
1152 1153 self.vfs.audit = self._getvfsward(self.vfs.audit)
1153 1154 # A list of callback to shape the phase if no data were found.
1154 1155 # Callback are in the form: func(repo, roots) --> processed root.
1155 1156 # This list it to be filled by extension during repo setup
1156 1157 self._phasedefaults = []
1157 1158
1158 1159 color.setup(self.ui)
1159 1160
1160 1161 self.spath = self.store.path
1161 1162 self.svfs = self.store.vfs
1162 1163 self.sjoin = self.store.join
1163 1164 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1164 1165 b'devel', b'check-locks'
1165 1166 ):
1166 1167 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1167 1168 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1168 1169 else: # standard vfs
1169 1170 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1170 1171
1171 1172 self._dirstatevalidatewarned = False
1172 1173
1173 1174 self._branchcaches = branchmap.BranchMapCache()
1174 1175 self._revbranchcache = None
1175 1176 self._filterpats = {}
1176 1177 self._datafilters = {}
1177 1178 self._transref = self._lockref = self._wlockref = None
1178 1179
1179 1180 # A cache for various files under .hg/ that tracks file changes,
1180 1181 # (used by the filecache decorator)
1181 1182 #
1182 1183 # Maps a property name to its util.filecacheentry
1183 1184 self._filecache = {}
1184 1185
1185 1186 # hold sets of revision to be filtered
1186 1187 # should be cleared when something might have changed the filter value:
1187 1188 # - new changesets,
1188 1189 # - phase change,
1189 1190 # - new obsolescence marker,
1190 1191 # - working directory parent change,
1191 1192 # - bookmark changes
1192 1193 self.filteredrevcache = {}
1193 1194
1194 1195 # post-dirstate-status hooks
1195 1196 self._postdsstatus = []
1196 1197
1197 1198 # generic mapping between names and nodes
1198 1199 self.names = namespaces.namespaces()
1199 1200
1200 1201 # Key to signature value.
1201 1202 self._sparsesignaturecache = {}
1202 1203 # Signature to cached matcher instance.
1203 1204 self._sparsematchercache = {}
1204 1205
1205 1206 self._extrafilterid = repoview.extrafilter(ui)
1206 1207
1207 1208 self.filecopiesmode = None
1208 1209 if COPIESSDC_REQUIREMENT in self.requirements:
1209 1210 self.filecopiesmode = b'changeset-sidedata'
1210 1211
1211 1212 def _getvfsward(self, origfunc):
1212 1213 """build a ward for self.vfs"""
1213 1214 rref = weakref.ref(self)
1214 1215
1215 1216 def checkvfs(path, mode=None):
1216 1217 ret = origfunc(path, mode=mode)
1217 1218 repo = rref()
1218 1219 if (
1219 1220 repo is None
1220 1221 or not util.safehasattr(repo, b'_wlockref')
1221 1222 or not util.safehasattr(repo, b'_lockref')
1222 1223 ):
1223 1224 return
1224 1225 if mode in (None, b'r', b'rb'):
1225 1226 return
1226 1227 if path.startswith(repo.path):
1227 1228 # truncate name relative to the repository (.hg)
1228 1229 path = path[len(repo.path) + 1 :]
1229 1230 if path.startswith(b'cache/'):
1230 1231 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1231 1232 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1232 1233 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1233 1234 # journal is covered by 'lock'
1234 1235 if repo._currentlock(repo._lockref) is None:
1235 1236 repo.ui.develwarn(
1236 1237 b'write with no lock: "%s"' % path,
1237 1238 stacklevel=3,
1238 1239 config=b'check-locks',
1239 1240 )
1240 1241 elif repo._currentlock(repo._wlockref) is None:
1241 1242 # rest of vfs files are covered by 'wlock'
1242 1243 #
1243 1244 # exclude special files
1244 1245 for prefix in self._wlockfreeprefix:
1245 1246 if path.startswith(prefix):
1246 1247 return
1247 1248 repo.ui.develwarn(
1248 1249 b'write with no wlock: "%s"' % path,
1249 1250 stacklevel=3,
1250 1251 config=b'check-locks',
1251 1252 )
1252 1253 return ret
1253 1254
1254 1255 return checkvfs
1255 1256
1256 1257 def _getsvfsward(self, origfunc):
1257 1258 """build a ward for self.svfs"""
1258 1259 rref = weakref.ref(self)
1259 1260
1260 1261 def checksvfs(path, mode=None):
1261 1262 ret = origfunc(path, mode=mode)
1262 1263 repo = rref()
1263 1264 if repo is None or not util.safehasattr(repo, b'_lockref'):
1264 1265 return
1265 1266 if mode in (None, b'r', b'rb'):
1266 1267 return
1267 1268 if path.startswith(repo.sharedpath):
1268 1269 # truncate name relative to the repository (.hg)
1269 1270 path = path[len(repo.sharedpath) + 1 :]
1270 1271 if repo._currentlock(repo._lockref) is None:
1271 1272 repo.ui.develwarn(
1272 1273 b'write with no lock: "%s"' % path, stacklevel=4
1273 1274 )
1274 1275 return ret
1275 1276
1276 1277 return checksvfs
1277 1278
1278 1279 def close(self):
1279 1280 self._writecaches()
1280 1281
1281 1282 def _writecaches(self):
1282 1283 if self._revbranchcache:
1283 1284 self._revbranchcache.write()
1284 1285
1285 1286 def _restrictcapabilities(self, caps):
1286 1287 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1287 1288 caps = set(caps)
1288 1289 capsblob = bundle2.encodecaps(
1289 1290 bundle2.getrepocaps(self, role=b'client')
1290 1291 )
1291 1292 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1292 1293 return caps
1293 1294
1294 1295 def _writerequirements(self):
1295 1296 scmutil.writerequires(self.vfs, self.requirements)
1296 1297
1297 1298 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1298 1299 # self -> auditor -> self._checknested -> self
1299 1300
1300 1301 @property
1301 1302 def auditor(self):
1302 1303 # This is only used by context.workingctx.match in order to
1303 1304 # detect files in subrepos.
1304 1305 return pathutil.pathauditor(self.root, callback=self._checknested)
1305 1306
1306 1307 @property
1307 1308 def nofsauditor(self):
1308 1309 # This is only used by context.basectx.match in order to detect
1309 1310 # files in subrepos.
1310 1311 return pathutil.pathauditor(
1311 1312 self.root, callback=self._checknested, realfs=False, cached=True
1312 1313 )
1313 1314
1314 1315 def _checknested(self, path):
1315 1316 """Determine if path is a legal nested repository."""
1316 1317 if not path.startswith(self.root):
1317 1318 return False
1318 1319 subpath = path[len(self.root) + 1 :]
1319 1320 normsubpath = util.pconvert(subpath)
1320 1321
1321 1322 # XXX: Checking against the current working copy is wrong in
1322 1323 # the sense that it can reject things like
1323 1324 #
1324 1325 # $ hg cat -r 10 sub/x.txt
1325 1326 #
1326 1327 # if sub/ is no longer a subrepository in the working copy
1327 1328 # parent revision.
1328 1329 #
1329 1330 # However, it can of course also allow things that would have
1330 1331 # been rejected before, such as the above cat command if sub/
1331 1332 # is a subrepository now, but was a normal directory before.
1332 1333 # The old path auditor would have rejected by mistake since it
1333 1334 # panics when it sees sub/.hg/.
1334 1335 #
1335 1336 # All in all, checking against the working copy seems sensible
1336 1337 # since we want to prevent access to nested repositories on
1337 1338 # the filesystem *now*.
1338 1339 ctx = self[None]
1339 1340 parts = util.splitpath(subpath)
1340 1341 while parts:
1341 1342 prefix = b'/'.join(parts)
1342 1343 if prefix in ctx.substate:
1343 1344 if prefix == normsubpath:
1344 1345 return True
1345 1346 else:
1346 1347 sub = ctx.sub(prefix)
1347 1348 return sub.checknested(subpath[len(prefix) + 1 :])
1348 1349 else:
1349 1350 parts.pop()
1350 1351 return False
1351 1352
1352 1353 def peer(self):
1353 1354 return localpeer(self) # not cached to avoid reference cycle
1354 1355
1355 1356 def unfiltered(self):
1356 1357 """Return unfiltered version of the repository
1357 1358
1358 1359 Intended to be overwritten by filtered repo."""
1359 1360 return self
1360 1361
1361 1362 def filtered(self, name, visibilityexceptions=None):
1362 1363 """Return a filtered version of a repository
1363 1364
1364 1365 The `name` parameter is the identifier of the requested view. This
1365 1366 will return a repoview object set "exactly" to the specified view.
1366 1367
1367 1368 This function does not apply recursive filtering to a repository. For
1368 1369 example calling `repo.filtered("served")` will return a repoview using
1369 1370 the "served" view, regardless of the initial view used by `repo`.
1370 1371
1371 1372 In other word, there is always only one level of `repoview` "filtering".
1372 1373 """
1373 1374 if self._extrafilterid is not None and b'%' not in name:
1374 1375 name = name + b'%' + self._extrafilterid
1375 1376
1376 1377 cls = repoview.newtype(self.unfiltered().__class__)
1377 1378 return cls(self, name, visibilityexceptions)
1378 1379
1379 1380 @mixedrepostorecache(
1380 1381 (b'bookmarks', b'plain'),
1381 1382 (b'bookmarks.current', b'plain'),
1382 1383 (b'bookmarks', b''),
1383 1384 (b'00changelog.i', b''),
1384 1385 )
1385 1386 def _bookmarks(self):
1386 1387 # Since the multiple files involved in the transaction cannot be
1387 1388 # written atomically (with current repository format), there is a race
1388 1389 # condition here.
1389 1390 #
1390 1391 # 1) changelog content A is read
1391 1392 # 2) outside transaction update changelog to content B
1392 1393 # 3) outside transaction update bookmark file referring to content B
1393 1394 # 4) bookmarks file content is read and filtered against changelog-A
1394 1395 #
1395 1396 # When this happens, bookmarks against nodes missing from A are dropped.
1396 1397 #
1397 1398 # Having this happening during read is not great, but it become worse
1398 1399 # when this happen during write because the bookmarks to the "unknown"
1399 1400 # nodes will be dropped for good. However, writes happen within locks.
1400 1401 # This locking makes it possible to have a race free consistent read.
1401 1402 # For this purpose data read from disc before locking are
1402 1403 # "invalidated" right after the locks are taken. This invalidations are
1403 1404 # "light", the `filecache` mechanism keep the data in memory and will
1404 1405 # reuse them if the underlying files did not changed. Not parsing the
1405 1406 # same data multiple times helps performances.
1406 1407 #
1407 1408 # Unfortunately in the case describe above, the files tracked by the
1408 1409 # bookmarks file cache might not have changed, but the in-memory
1409 1410 # content is still "wrong" because we used an older changelog content
1410 1411 # to process the on-disk data. So after locking, the changelog would be
1411 1412 # refreshed but `_bookmarks` would be preserved.
1412 1413 # Adding `00changelog.i` to the list of tracked file is not
1413 1414 # enough, because at the time we build the content for `_bookmarks` in
1414 1415 # (4), the changelog file has already diverged from the content used
1415 1416 # for loading `changelog` in (1)
1416 1417 #
1417 1418 # To prevent the issue, we force the changelog to be explicitly
1418 1419 # reloaded while computing `_bookmarks`. The data race can still happen
1419 1420 # without the lock (with a narrower window), but it would no longer go
1420 1421 # undetected during the lock time refresh.
1421 1422 #
1422 1423 # The new schedule is as follow
1423 1424 #
1424 1425 # 1) filecache logic detect that `_bookmarks` needs to be computed
1425 1426 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1426 1427 # 3) We force `changelog` filecache to be tested
1427 1428 # 4) cachestat for `changelog` are captured (for changelog)
1428 1429 # 5) `_bookmarks` is computed and cached
1429 1430 #
1430 1431 # The step in (3) ensure we have a changelog at least as recent as the
1431 1432 # cache stat computed in (1). As a result at locking time:
1432 1433 # * if the changelog did not changed since (1) -> we can reuse the data
1433 1434 # * otherwise -> the bookmarks get refreshed.
1434 1435 self._refreshchangelog()
1435 1436 return bookmarks.bmstore(self)
1436 1437
1437 1438 def _refreshchangelog(self):
1438 1439 """make sure the in memory changelog match the on-disk one"""
1439 1440 if 'changelog' in vars(self) and self.currenttransaction() is None:
1440 1441 del self.changelog
1441 1442
1442 1443 @property
1443 1444 def _activebookmark(self):
1444 1445 return self._bookmarks.active
1445 1446
1446 1447 # _phasesets depend on changelog. what we need is to call
1447 1448 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1448 1449 # can't be easily expressed in filecache mechanism.
1449 1450 @storecache(b'phaseroots', b'00changelog.i')
1450 1451 def _phasecache(self):
1451 1452 return phases.phasecache(self, self._phasedefaults)
1452 1453
1453 1454 @storecache(b'obsstore')
1454 1455 def obsstore(self):
1455 1456 return obsolete.makestore(self.ui, self)
1456 1457
1457 1458 @storecache(b'00changelog.i')
1458 1459 def changelog(self):
1459 1460 # load dirstate before changelog to avoid race see issue6303
1460 1461 self.dirstate.prefetch_parents()
1461 1462 return self.store.changelog(txnutil.mayhavepending(self.root))
1462 1463
1463 1464 @storecache(b'00manifest.i')
1464 1465 def manifestlog(self):
1465 1466 return self.store.manifestlog(self, self._storenarrowmatch)
1466 1467
1467 1468 @repofilecache(b'dirstate')
1468 1469 def dirstate(self):
1469 1470 return self._makedirstate()
1470 1471
1471 1472 def _makedirstate(self):
1472 1473 """Extension point for wrapping the dirstate per-repo."""
1473 1474 sparsematchfn = lambda: sparse.matcher(self)
1474 1475
1475 1476 return dirstate.dirstate(
1476 1477 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1477 1478 )
1478 1479
1479 1480 def _dirstatevalidate(self, node):
1480 1481 try:
1481 1482 self.changelog.rev(node)
1482 1483 return node
1483 1484 except error.LookupError:
1484 1485 if not self._dirstatevalidatewarned:
1485 1486 self._dirstatevalidatewarned = True
1486 1487 self.ui.warn(
1487 1488 _(b"warning: ignoring unknown working parent %s!\n")
1488 1489 % short(node)
1489 1490 )
1490 1491 return nullid
1491 1492
1492 1493 @storecache(narrowspec.FILENAME)
1493 1494 def narrowpats(self):
1494 1495 """matcher patterns for this repository's narrowspec
1495 1496
1496 1497 A tuple of (includes, excludes).
1497 1498 """
1498 1499 return narrowspec.load(self)
1499 1500
1500 1501 @storecache(narrowspec.FILENAME)
1501 1502 def _storenarrowmatch(self):
1502 1503 if repository.NARROW_REQUIREMENT not in self.requirements:
1503 1504 return matchmod.always()
1504 1505 include, exclude = self.narrowpats
1505 1506 return narrowspec.match(self.root, include=include, exclude=exclude)
1506 1507
1507 1508 @storecache(narrowspec.FILENAME)
1508 1509 def _narrowmatch(self):
1509 1510 if repository.NARROW_REQUIREMENT not in self.requirements:
1510 1511 return matchmod.always()
1511 1512 narrowspec.checkworkingcopynarrowspec(self)
1512 1513 include, exclude = self.narrowpats
1513 1514 return narrowspec.match(self.root, include=include, exclude=exclude)
1514 1515
1515 1516 def narrowmatch(self, match=None, includeexact=False):
1516 1517 """matcher corresponding the the repo's narrowspec
1517 1518
1518 1519 If `match` is given, then that will be intersected with the narrow
1519 1520 matcher.
1520 1521
1521 1522 If `includeexact` is True, then any exact matches from `match` will
1522 1523 be included even if they're outside the narrowspec.
1523 1524 """
1524 1525 if match:
1525 1526 if includeexact and not self._narrowmatch.always():
1526 1527 # do not exclude explicitly-specified paths so that they can
1527 1528 # be warned later on
1528 1529 em = matchmod.exact(match.files())
1529 1530 nm = matchmod.unionmatcher([self._narrowmatch, em])
1530 1531 return matchmod.intersectmatchers(match, nm)
1531 1532 return matchmod.intersectmatchers(match, self._narrowmatch)
1532 1533 return self._narrowmatch
1533 1534
1534 1535 def setnarrowpats(self, newincludes, newexcludes):
1535 1536 narrowspec.save(self, newincludes, newexcludes)
1536 1537 self.invalidate(clearfilecache=True)
1537 1538
1538 1539 @unfilteredpropertycache
1539 1540 def _quick_access_changeid_null(self):
1540 1541 return {
1541 1542 b'null': (nullrev, nullid),
1542 1543 nullrev: (nullrev, nullid),
1543 1544 nullid: (nullrev, nullid),
1544 1545 }
1545 1546
1546 1547 @unfilteredpropertycache
1547 1548 def _quick_access_changeid_wc(self):
1548 1549 # also fast path access to the working copy parents
1549 1550 # however, only do it for filter that ensure wc is visible.
1550 1551 quick = {}
1551 1552 cl = self.unfiltered().changelog
1552 1553 for node in self.dirstate.parents():
1553 1554 if node == nullid:
1554 1555 continue
1555 1556 rev = cl.index.get_rev(node)
1556 1557 if rev is None:
1557 1558 # unknown working copy parent case:
1558 1559 #
1559 1560 # skip the fast path and let higher code deal with it
1560 1561 continue
1561 1562 pair = (rev, node)
1562 1563 quick[rev] = pair
1563 1564 quick[node] = pair
1564 1565 # also add the parents of the parents
1565 1566 for r in cl.parentrevs(rev):
1566 1567 if r == nullrev:
1567 1568 continue
1568 1569 n = cl.node(r)
1569 1570 pair = (r, n)
1570 1571 quick[r] = pair
1571 1572 quick[n] = pair
1572 1573 p1node = self.dirstate.p1()
1573 1574 if p1node != nullid:
1574 1575 quick[b'.'] = quick[p1node]
1575 1576 return quick
1576 1577
1577 1578 @unfilteredmethod
1578 1579 def _quick_access_changeid_invalidate(self):
1579 1580 if '_quick_access_changeid_wc' in vars(self):
1580 1581 del self.__dict__['_quick_access_changeid_wc']
1581 1582
1582 1583 @property
1583 1584 def _quick_access_changeid(self):
1584 1585 """an helper dictionnary for __getitem__ calls
1585 1586
1586 1587 This contains a list of symbol we can recognise right away without
1587 1588 further processing.
1588 1589 """
1589 1590 mapping = self._quick_access_changeid_null
1590 1591 if self.filtername in repoview.filter_has_wc:
1591 1592 mapping = mapping.copy()
1592 1593 mapping.update(self._quick_access_changeid_wc)
1593 1594 return mapping
1594 1595
1595 1596 def __getitem__(self, changeid):
1596 1597 # dealing with special cases
1597 1598 if changeid is None:
1598 1599 return context.workingctx(self)
1599 1600 if isinstance(changeid, context.basectx):
1600 1601 return changeid
1601 1602
1602 1603 # dealing with multiple revisions
1603 1604 if isinstance(changeid, slice):
1604 1605 # wdirrev isn't contiguous so the slice shouldn't include it
1605 1606 return [
1606 1607 self[i]
1607 1608 for i in pycompat.xrange(*changeid.indices(len(self)))
1608 1609 if i not in self.changelog.filteredrevs
1609 1610 ]
1610 1611
1611 1612 # dealing with some special values
1612 1613 quick_access = self._quick_access_changeid.get(changeid)
1613 1614 if quick_access is not None:
1614 1615 rev, node = quick_access
1615 1616 return context.changectx(self, rev, node, maybe_filtered=False)
1616 1617 if changeid == b'tip':
1617 1618 node = self.changelog.tip()
1618 1619 rev = self.changelog.rev(node)
1619 1620 return context.changectx(self, rev, node)
1620 1621
1621 1622 # dealing with arbitrary values
1622 1623 try:
1623 1624 if isinstance(changeid, int):
1624 1625 node = self.changelog.node(changeid)
1625 1626 rev = changeid
1626 1627 elif changeid == b'.':
1627 1628 # this is a hack to delay/avoid loading obsmarkers
1628 1629 # when we know that '.' won't be hidden
1629 1630 node = self.dirstate.p1()
1630 1631 rev = self.unfiltered().changelog.rev(node)
1631 1632 elif len(changeid) == 20:
1632 1633 try:
1633 1634 node = changeid
1634 1635 rev = self.changelog.rev(changeid)
1635 1636 except error.FilteredLookupError:
1636 1637 changeid = hex(changeid) # for the error message
1637 1638 raise
1638 1639 except LookupError:
1639 1640 # check if it might have come from damaged dirstate
1640 1641 #
1641 1642 # XXX we could avoid the unfiltered if we had a recognizable
1642 1643 # exception for filtered changeset access
1643 1644 if (
1644 1645 self.local()
1645 1646 and changeid in self.unfiltered().dirstate.parents()
1646 1647 ):
1647 1648 msg = _(b"working directory has unknown parent '%s'!")
1648 1649 raise error.Abort(msg % short(changeid))
1649 1650 changeid = hex(changeid) # for the error message
1650 1651 raise
1651 1652
1652 1653 elif len(changeid) == 40:
1653 1654 node = bin(changeid)
1654 1655 rev = self.changelog.rev(node)
1655 1656 else:
1656 1657 raise error.ProgrammingError(
1657 1658 b"unsupported changeid '%s' of type %s"
1658 1659 % (changeid, pycompat.bytestr(type(changeid)))
1659 1660 )
1660 1661
1661 1662 return context.changectx(self, rev, node)
1662 1663
1663 1664 except (error.FilteredIndexError, error.FilteredLookupError):
1664 1665 raise error.FilteredRepoLookupError(
1665 1666 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1666 1667 )
1667 1668 except (IndexError, LookupError):
1668 1669 raise error.RepoLookupError(
1669 1670 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1670 1671 )
1671 1672 except error.WdirUnsupported:
1672 1673 return context.workingctx(self)
1673 1674
1674 1675 def __contains__(self, changeid):
1675 1676 """True if the given changeid exists
1676 1677
1677 1678 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1678 1679 specified.
1679 1680 """
1680 1681 try:
1681 1682 self[changeid]
1682 1683 return True
1683 1684 except error.RepoLookupError:
1684 1685 return False
1685 1686
1686 1687 def __nonzero__(self):
1687 1688 return True
1688 1689
1689 1690 __bool__ = __nonzero__
1690 1691
1691 1692 def __len__(self):
1692 1693 # no need to pay the cost of repoview.changelog
1693 1694 unfi = self.unfiltered()
1694 1695 return len(unfi.changelog)
1695 1696
1696 1697 def __iter__(self):
1697 1698 return iter(self.changelog)
1698 1699
1699 1700 def revs(self, expr, *args):
1700 1701 '''Find revisions matching a revset.
1701 1702
1702 1703 The revset is specified as a string ``expr`` that may contain
1703 1704 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1704 1705
1705 1706 Revset aliases from the configuration are not expanded. To expand
1706 1707 user aliases, consider calling ``scmutil.revrange()`` or
1707 1708 ``repo.anyrevs([expr], user=True)``.
1708 1709
1709 1710 Returns a smartset.abstractsmartset, which is a list-like interface
1710 1711 that contains integer revisions.
1711 1712 '''
1712 1713 tree = revsetlang.spectree(expr, *args)
1713 1714 return revset.makematcher(tree)(self)
1714 1715
1715 1716 def set(self, expr, *args):
1716 1717 '''Find revisions matching a revset and emit changectx instances.
1717 1718
1718 1719 This is a convenience wrapper around ``revs()`` that iterates the
1719 1720 result and is a generator of changectx instances.
1720 1721
1721 1722 Revset aliases from the configuration are not expanded. To expand
1722 1723 user aliases, consider calling ``scmutil.revrange()``.
1723 1724 '''
1724 1725 for r in self.revs(expr, *args):
1725 1726 yield self[r]
1726 1727
1727 1728 def anyrevs(self, specs, user=False, localalias=None):
1728 1729 '''Find revisions matching one of the given revsets.
1729 1730
1730 1731 Revset aliases from the configuration are not expanded by default. To
1731 1732 expand user aliases, specify ``user=True``. To provide some local
1732 1733 definitions overriding user aliases, set ``localalias`` to
1733 1734 ``{name: definitionstring}``.
1734 1735 '''
1735 1736 if specs == [b'null']:
1736 1737 return revset.baseset([nullrev])
1737 1738 if specs == [b'.']:
1738 1739 quick_data = self._quick_access_changeid.get(b'.')
1739 1740 if quick_data is not None:
1740 1741 return revset.baseset([quick_data[0]])
1741 1742 if user:
1742 1743 m = revset.matchany(
1743 1744 self.ui,
1744 1745 specs,
1745 1746 lookup=revset.lookupfn(self),
1746 1747 localalias=localalias,
1747 1748 )
1748 1749 else:
1749 1750 m = revset.matchany(None, specs, localalias=localalias)
1750 1751 return m(self)
1751 1752
1752 1753 def url(self):
1753 1754 return b'file:' + self.root
1754 1755
1755 1756 def hook(self, name, throw=False, **args):
1756 1757 """Call a hook, passing this repo instance.
1757 1758
1758 1759 This a convenience method to aid invoking hooks. Extensions likely
1759 1760 won't call this unless they have registered a custom hook or are
1760 1761 replacing code that is expected to call a hook.
1761 1762 """
1762 1763 return hook.hook(self.ui, self, name, throw, **args)
1763 1764
1764 1765 @filteredpropertycache
1765 1766 def _tagscache(self):
1766 1767 '''Returns a tagscache object that contains various tags related
1767 1768 caches.'''
1768 1769
1769 1770 # This simplifies its cache management by having one decorated
1770 1771 # function (this one) and the rest simply fetch things from it.
1771 1772 class tagscache(object):
1772 1773 def __init__(self):
1773 1774 # These two define the set of tags for this repository. tags
1774 1775 # maps tag name to node; tagtypes maps tag name to 'global' or
1775 1776 # 'local'. (Global tags are defined by .hgtags across all
1776 1777 # heads, and local tags are defined in .hg/localtags.)
1777 1778 # They constitute the in-memory cache of tags.
1778 1779 self.tags = self.tagtypes = None
1779 1780
1780 1781 self.nodetagscache = self.tagslist = None
1781 1782
1782 1783 cache = tagscache()
1783 1784 cache.tags, cache.tagtypes = self._findtags()
1784 1785
1785 1786 return cache
1786 1787
1787 1788 def tags(self):
1788 1789 '''return a mapping of tag to node'''
1789 1790 t = {}
1790 1791 if self.changelog.filteredrevs:
1791 1792 tags, tt = self._findtags()
1792 1793 else:
1793 1794 tags = self._tagscache.tags
1794 1795 rev = self.changelog.rev
1795 1796 for k, v in pycompat.iteritems(tags):
1796 1797 try:
1797 1798 # ignore tags to unknown nodes
1798 1799 rev(v)
1799 1800 t[k] = v
1800 1801 except (error.LookupError, ValueError):
1801 1802 pass
1802 1803 return t
1803 1804
1804 1805 def _findtags(self):
1805 1806 '''Do the hard work of finding tags. Return a pair of dicts
1806 1807 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1807 1808 maps tag name to a string like \'global\' or \'local\'.
1808 1809 Subclasses or extensions are free to add their own tags, but
1809 1810 should be aware that the returned dicts will be retained for the
1810 1811 duration of the localrepo object.'''
1811 1812
1812 1813 # XXX what tagtype should subclasses/extensions use? Currently
1813 1814 # mq and bookmarks add tags, but do not set the tagtype at all.
1814 1815 # Should each extension invent its own tag type? Should there
1815 1816 # be one tagtype for all such "virtual" tags? Or is the status
1816 1817 # quo fine?
1817 1818
1818 1819 # map tag name to (node, hist)
1819 1820 alltags = tagsmod.findglobaltags(self.ui, self)
1820 1821 # map tag name to tag type
1821 1822 tagtypes = {tag: b'global' for tag in alltags}
1822 1823
1823 1824 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1824 1825
1825 1826 # Build the return dicts. Have to re-encode tag names because
1826 1827 # the tags module always uses UTF-8 (in order not to lose info
1827 1828 # writing to the cache), but the rest of Mercurial wants them in
1828 1829 # local encoding.
1829 1830 tags = {}
1830 1831 for (name, (node, hist)) in pycompat.iteritems(alltags):
1831 1832 if node != nullid:
1832 1833 tags[encoding.tolocal(name)] = node
1833 1834 tags[b'tip'] = self.changelog.tip()
1834 1835 tagtypes = {
1835 1836 encoding.tolocal(name): value
1836 1837 for (name, value) in pycompat.iteritems(tagtypes)
1837 1838 }
1838 1839 return (tags, tagtypes)
1839 1840
1840 1841 def tagtype(self, tagname):
1841 1842 '''
1842 1843 return the type of the given tag. result can be:
1843 1844
1844 1845 'local' : a local tag
1845 1846 'global' : a global tag
1846 1847 None : tag does not exist
1847 1848 '''
1848 1849
1849 1850 return self._tagscache.tagtypes.get(tagname)
1850 1851
1851 1852 def tagslist(self):
1852 1853 '''return a list of tags ordered by revision'''
1853 1854 if not self._tagscache.tagslist:
1854 1855 l = []
1855 1856 for t, n in pycompat.iteritems(self.tags()):
1856 1857 l.append((self.changelog.rev(n), t, n))
1857 1858 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1858 1859
1859 1860 return self._tagscache.tagslist
1860 1861
1861 1862 def nodetags(self, node):
1862 1863 '''return the tags associated with a node'''
1863 1864 if not self._tagscache.nodetagscache:
1864 1865 nodetagscache = {}
1865 1866 for t, n in pycompat.iteritems(self._tagscache.tags):
1866 1867 nodetagscache.setdefault(n, []).append(t)
1867 1868 for tags in pycompat.itervalues(nodetagscache):
1868 1869 tags.sort()
1869 1870 self._tagscache.nodetagscache = nodetagscache
1870 1871 return self._tagscache.nodetagscache.get(node, [])
1871 1872
1872 1873 def nodebookmarks(self, node):
1873 1874 """return the list of bookmarks pointing to the specified node"""
1874 1875 return self._bookmarks.names(node)
1875 1876
1876 1877 def branchmap(self):
1877 1878 '''returns a dictionary {branch: [branchheads]} with branchheads
1878 1879 ordered by increasing revision number'''
1879 1880 return self._branchcaches[self]
1880 1881
1881 1882 @unfilteredmethod
1882 1883 def revbranchcache(self):
1883 1884 if not self._revbranchcache:
1884 1885 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1885 1886 return self._revbranchcache
1886 1887
1887 1888 def branchtip(self, branch, ignoremissing=False):
1888 1889 '''return the tip node for a given branch
1889 1890
1890 1891 If ignoremissing is True, then this method will not raise an error.
1891 1892 This is helpful for callers that only expect None for a missing branch
1892 1893 (e.g. namespace).
1893 1894
1894 1895 '''
1895 1896 try:
1896 1897 return self.branchmap().branchtip(branch)
1897 1898 except KeyError:
1898 1899 if not ignoremissing:
1899 1900 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1900 1901 else:
1901 1902 pass
1902 1903
1903 1904 def lookup(self, key):
1904 1905 node = scmutil.revsymbol(self, key).node()
1905 1906 if node is None:
1906 1907 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1907 1908 return node
1908 1909
1909 1910 def lookupbranch(self, key):
1910 1911 if self.branchmap().hasbranch(key):
1911 1912 return key
1912 1913
1913 1914 return scmutil.revsymbol(self, key).branch()
1914 1915
1915 1916 def known(self, nodes):
1916 1917 cl = self.changelog
1917 1918 get_rev = cl.index.get_rev
1918 1919 filtered = cl.filteredrevs
1919 1920 result = []
1920 1921 for n in nodes:
1921 1922 r = get_rev(n)
1922 1923 resp = not (r is None or r in filtered)
1923 1924 result.append(resp)
1924 1925 return result
1925 1926
1926 1927 def local(self):
1927 1928 return self
1928 1929
1929 1930 def publishing(self):
1930 1931 # it's safe (and desirable) to trust the publish flag unconditionally
1931 1932 # so that we don't finalize changes shared between users via ssh or nfs
1932 1933 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1933 1934
1934 1935 def cancopy(self):
1935 1936 # so statichttprepo's override of local() works
1936 1937 if not self.local():
1937 1938 return False
1938 1939 if not self.publishing():
1939 1940 return True
1940 1941 # if publishing we can't copy if there is filtered content
1941 1942 return not self.filtered(b'visible').changelog.filteredrevs
1942 1943
1943 1944 def shared(self):
1944 1945 '''the type of shared repository (None if not shared)'''
1945 1946 if self.sharedpath != self.path:
1946 1947 return b'store'
1947 1948 return None
1948 1949
1949 1950 def wjoin(self, f, *insidef):
1950 1951 return self.vfs.reljoin(self.root, f, *insidef)
1951 1952
1952 1953 def setparents(self, p1, p2=nullid):
1953 1954 self[None].setparents(p1, p2)
1954 1955 self._quick_access_changeid_invalidate()
1955 1956
1956 1957 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1957 1958 """changeid must be a changeset revision, if specified.
1958 1959 fileid can be a file revision or node."""
1959 1960 return context.filectx(
1960 1961 self, path, changeid, fileid, changectx=changectx
1961 1962 )
1962 1963
1963 1964 def getcwd(self):
1964 1965 return self.dirstate.getcwd()
1965 1966
1966 1967 def pathto(self, f, cwd=None):
1967 1968 return self.dirstate.pathto(f, cwd)
1968 1969
1969 1970 def _loadfilter(self, filter):
1970 1971 if filter not in self._filterpats:
1971 1972 l = []
1972 1973 for pat, cmd in self.ui.configitems(filter):
1973 1974 if cmd == b'!':
1974 1975 continue
1975 1976 mf = matchmod.match(self.root, b'', [pat])
1976 1977 fn = None
1977 1978 params = cmd
1978 1979 for name, filterfn in pycompat.iteritems(self._datafilters):
1979 1980 if cmd.startswith(name):
1980 1981 fn = filterfn
1981 1982 params = cmd[len(name) :].lstrip()
1982 1983 break
1983 1984 if not fn:
1984 1985 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1985 1986 fn.__name__ = 'commandfilter'
1986 1987 # Wrap old filters not supporting keyword arguments
1987 1988 if not pycompat.getargspec(fn)[2]:
1988 1989 oldfn = fn
1989 1990 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1990 1991 fn.__name__ = 'compat-' + oldfn.__name__
1991 1992 l.append((mf, fn, params))
1992 1993 self._filterpats[filter] = l
1993 1994 return self._filterpats[filter]
1994 1995
1995 1996 def _filter(self, filterpats, filename, data):
1996 1997 for mf, fn, cmd in filterpats:
1997 1998 if mf(filename):
1998 1999 self.ui.debug(
1999 2000 b"filtering %s through %s\n"
2000 2001 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2001 2002 )
2002 2003 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2003 2004 break
2004 2005
2005 2006 return data
2006 2007
2007 2008 @unfilteredpropertycache
2008 2009 def _encodefilterpats(self):
2009 2010 return self._loadfilter(b'encode')
2010 2011
2011 2012 @unfilteredpropertycache
2012 2013 def _decodefilterpats(self):
2013 2014 return self._loadfilter(b'decode')
2014 2015
2015 2016 def adddatafilter(self, name, filter):
2016 2017 self._datafilters[name] = filter
2017 2018
2018 2019 def wread(self, filename):
2019 2020 if self.wvfs.islink(filename):
2020 2021 data = self.wvfs.readlink(filename)
2021 2022 else:
2022 2023 data = self.wvfs.read(filename)
2023 2024 return self._filter(self._encodefilterpats, filename, data)
2024 2025
2025 2026 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2026 2027 """write ``data`` into ``filename`` in the working directory
2027 2028
2028 2029 This returns length of written (maybe decoded) data.
2029 2030 """
2030 2031 data = self._filter(self._decodefilterpats, filename, data)
2031 2032 if b'l' in flags:
2032 2033 self.wvfs.symlink(data, filename)
2033 2034 else:
2034 2035 self.wvfs.write(
2035 2036 filename, data, backgroundclose=backgroundclose, **kwargs
2036 2037 )
2037 2038 if b'x' in flags:
2038 2039 self.wvfs.setflags(filename, False, True)
2039 2040 else:
2040 2041 self.wvfs.setflags(filename, False, False)
2041 2042 return len(data)
2042 2043
2043 2044 def wwritedata(self, filename, data):
2044 2045 return self._filter(self._decodefilterpats, filename, data)
2045 2046
2046 2047 def currenttransaction(self):
2047 2048 """return the current transaction or None if non exists"""
2048 2049 if self._transref:
2049 2050 tr = self._transref()
2050 2051 else:
2051 2052 tr = None
2052 2053
2053 2054 if tr and tr.running():
2054 2055 return tr
2055 2056 return None
2056 2057
2057 2058 def transaction(self, desc, report=None):
2058 2059 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2059 2060 b'devel', b'check-locks'
2060 2061 ):
2061 2062 if self._currentlock(self._lockref) is None:
2062 2063 raise error.ProgrammingError(b'transaction requires locking')
2063 2064 tr = self.currenttransaction()
2064 2065 if tr is not None:
2065 2066 return tr.nest(name=desc)
2066 2067
2067 2068 # abort here if the journal already exists
2068 2069 if self.svfs.exists(b"journal"):
2069 2070 raise error.RepoError(
2070 2071 _(b"abandoned transaction found"),
2071 2072 hint=_(b"run 'hg recover' to clean up transaction"),
2072 2073 )
2073 2074
2074 2075 idbase = b"%.40f#%f" % (random.random(), time.time())
2075 2076 ha = hex(hashutil.sha1(idbase).digest())
2076 2077 txnid = b'TXN:' + ha
2077 2078 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2078 2079
2079 2080 self._writejournal(desc)
2080 2081 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2081 2082 if report:
2082 2083 rp = report
2083 2084 else:
2084 2085 rp = self.ui.warn
2085 2086 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2086 2087 # we must avoid cyclic reference between repo and transaction.
2087 2088 reporef = weakref.ref(self)
2088 2089 # Code to track tag movement
2089 2090 #
2090 2091 # Since tags are all handled as file content, it is actually quite hard
2091 2092 # to track these movement from a code perspective. So we fallback to a
2092 2093 # tracking at the repository level. One could envision to track changes
2093 2094 # to the '.hgtags' file through changegroup apply but that fails to
2094 2095 # cope with case where transaction expose new heads without changegroup
2095 2096 # being involved (eg: phase movement).
2096 2097 #
2097 2098 # For now, We gate the feature behind a flag since this likely comes
2098 2099 # with performance impacts. The current code run more often than needed
2099 2100 # and do not use caches as much as it could. The current focus is on
2100 2101 # the behavior of the feature so we disable it by default. The flag
2101 2102 # will be removed when we are happy with the performance impact.
2102 2103 #
2103 2104 # Once this feature is no longer experimental move the following
2104 2105 # documentation to the appropriate help section:
2105 2106 #
2106 2107 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2107 2108 # tags (new or changed or deleted tags). In addition the details of
2108 2109 # these changes are made available in a file at:
2109 2110 # ``REPOROOT/.hg/changes/tags.changes``.
2110 2111 # Make sure you check for HG_TAG_MOVED before reading that file as it
2111 2112 # might exist from a previous transaction even if no tag were touched
2112 2113 # in this one. Changes are recorded in a line base format::
2113 2114 #
2114 2115 # <action> <hex-node> <tag-name>\n
2115 2116 #
2116 2117 # Actions are defined as follow:
2117 2118 # "-R": tag is removed,
2118 2119 # "+A": tag is added,
2119 2120 # "-M": tag is moved (old value),
2120 2121 # "+M": tag is moved (new value),
2121 2122 tracktags = lambda x: None
2122 2123 # experimental config: experimental.hook-track-tags
2123 2124 shouldtracktags = self.ui.configbool(
2124 2125 b'experimental', b'hook-track-tags'
2125 2126 )
2126 2127 if desc != b'strip' and shouldtracktags:
2127 2128 oldheads = self.changelog.headrevs()
2128 2129
2129 2130 def tracktags(tr2):
2130 2131 repo = reporef()
2131 2132 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2132 2133 newheads = repo.changelog.headrevs()
2133 2134 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2134 2135 # notes: we compare lists here.
2135 2136 # As we do it only once buiding set would not be cheaper
2136 2137 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2137 2138 if changes:
2138 2139 tr2.hookargs[b'tag_moved'] = b'1'
2139 2140 with repo.vfs(
2140 2141 b'changes/tags.changes', b'w', atomictemp=True
2141 2142 ) as changesfile:
2142 2143 # note: we do not register the file to the transaction
2143 2144 # because we needs it to still exist on the transaction
2144 2145 # is close (for txnclose hooks)
2145 2146 tagsmod.writediff(changesfile, changes)
2146 2147
2147 2148 def validate(tr2):
2148 2149 """will run pre-closing hooks"""
2149 2150 # XXX the transaction API is a bit lacking here so we take a hacky
2150 2151 # path for now
2151 2152 #
2152 2153 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2153 2154 # dict is copied before these run. In addition we needs the data
2154 2155 # available to in memory hooks too.
2155 2156 #
2156 2157 # Moreover, we also need to make sure this runs before txnclose
2157 2158 # hooks and there is no "pending" mechanism that would execute
2158 2159 # logic only if hooks are about to run.
2159 2160 #
2160 2161 # Fixing this limitation of the transaction is also needed to track
2161 2162 # other families of changes (bookmarks, phases, obsolescence).
2162 2163 #
2163 2164 # This will have to be fixed before we remove the experimental
2164 2165 # gating.
2165 2166 tracktags(tr2)
2166 2167 repo = reporef()
2167 2168
2168 2169 singleheadopt = (b'experimental', b'single-head-per-branch')
2169 2170 singlehead = repo.ui.configbool(*singleheadopt)
2170 2171 if singlehead:
2171 2172 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2172 2173 accountclosed = singleheadsub.get(
2173 2174 b"account-closed-heads", False
2174 2175 )
2175 2176 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2176 2177 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2177 2178 for name, (old, new) in sorted(
2178 2179 tr.changes[b'bookmarks'].items()
2179 2180 ):
2180 2181 args = tr.hookargs.copy()
2181 2182 args.update(bookmarks.preparehookargs(name, old, new))
2182 2183 repo.hook(
2183 2184 b'pretxnclose-bookmark',
2184 2185 throw=True,
2185 2186 **pycompat.strkwargs(args)
2186 2187 )
2187 2188 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2188 2189 cl = repo.unfiltered().changelog
2189 2190 for revs, (old, new) in tr.changes[b'phases']:
2190 2191 for rev in revs:
2191 2192 args = tr.hookargs.copy()
2192 2193 node = hex(cl.node(rev))
2193 2194 args.update(phases.preparehookargs(node, old, new))
2194 2195 repo.hook(
2195 2196 b'pretxnclose-phase',
2196 2197 throw=True,
2197 2198 **pycompat.strkwargs(args)
2198 2199 )
2199 2200
2200 2201 repo.hook(
2201 2202 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2202 2203 )
2203 2204
2204 2205 def releasefn(tr, success):
2205 2206 repo = reporef()
2206 2207 if repo is None:
2207 2208 # If the repo has been GC'd (and this release function is being
2208 2209 # called from transaction.__del__), there's not much we can do,
2209 2210 # so just leave the unfinished transaction there and let the
2210 2211 # user run `hg recover`.
2211 2212 return
2212 2213 if success:
2213 2214 # this should be explicitly invoked here, because
2214 2215 # in-memory changes aren't written out at closing
2215 2216 # transaction, if tr.addfilegenerator (via
2216 2217 # dirstate.write or so) isn't invoked while
2217 2218 # transaction running
2218 2219 repo.dirstate.write(None)
2219 2220 else:
2220 2221 # discard all changes (including ones already written
2221 2222 # out) in this transaction
2222 2223 narrowspec.restorebackup(self, b'journal.narrowspec')
2223 2224 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2224 2225 repo.dirstate.restorebackup(None, b'journal.dirstate')
2225 2226
2226 2227 repo.invalidate(clearfilecache=True)
2227 2228
2228 2229 tr = transaction.transaction(
2229 2230 rp,
2230 2231 self.svfs,
2231 2232 vfsmap,
2232 2233 b"journal",
2233 2234 b"undo",
2234 2235 aftertrans(renames),
2235 2236 self.store.createmode,
2236 2237 validator=validate,
2237 2238 releasefn=releasefn,
2238 2239 checkambigfiles=_cachedfiles,
2239 2240 name=desc,
2240 2241 )
2241 2242 tr.changes[b'origrepolen'] = len(self)
2242 2243 tr.changes[b'obsmarkers'] = set()
2243 2244 tr.changes[b'phases'] = []
2244 2245 tr.changes[b'bookmarks'] = {}
2245 2246
2246 2247 tr.hookargs[b'txnid'] = txnid
2247 2248 tr.hookargs[b'txnname'] = desc
2248 2249 tr.hookargs[b'changes'] = tr.changes
2249 2250 # note: writing the fncache only during finalize mean that the file is
2250 2251 # outdated when running hooks. As fncache is used for streaming clone,
2251 2252 # this is not expected to break anything that happen during the hooks.
2252 2253 tr.addfinalize(b'flush-fncache', self.store.write)
2253 2254
2254 2255 def txnclosehook(tr2):
2255 2256 """To be run if transaction is successful, will schedule a hook run
2256 2257 """
2257 2258 # Don't reference tr2 in hook() so we don't hold a reference.
2258 2259 # This reduces memory consumption when there are multiple
2259 2260 # transactions per lock. This can likely go away if issue5045
2260 2261 # fixes the function accumulation.
2261 2262 hookargs = tr2.hookargs
2262 2263
2263 2264 def hookfunc(unused_success):
2264 2265 repo = reporef()
2265 2266 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2266 2267 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2267 2268 for name, (old, new) in bmchanges:
2268 2269 args = tr.hookargs.copy()
2269 2270 args.update(bookmarks.preparehookargs(name, old, new))
2270 2271 repo.hook(
2271 2272 b'txnclose-bookmark',
2272 2273 throw=False,
2273 2274 **pycompat.strkwargs(args)
2274 2275 )
2275 2276
2276 2277 if hook.hashook(repo.ui, b'txnclose-phase'):
2277 2278 cl = repo.unfiltered().changelog
2278 2279 phasemv = sorted(
2279 2280 tr.changes[b'phases'], key=lambda r: r[0][0]
2280 2281 )
2281 2282 for revs, (old, new) in phasemv:
2282 2283 for rev in revs:
2283 2284 args = tr.hookargs.copy()
2284 2285 node = hex(cl.node(rev))
2285 2286 args.update(phases.preparehookargs(node, old, new))
2286 2287 repo.hook(
2287 2288 b'txnclose-phase',
2288 2289 throw=False,
2289 2290 **pycompat.strkwargs(args)
2290 2291 )
2291 2292
2292 2293 repo.hook(
2293 2294 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2294 2295 )
2295 2296
2296 2297 reporef()._afterlock(hookfunc)
2297 2298
2298 2299 tr.addfinalize(b'txnclose-hook', txnclosehook)
2299 2300 # Include a leading "-" to make it happen before the transaction summary
2300 2301 # reports registered via scmutil.registersummarycallback() whose names
2301 2302 # are 00-txnreport etc. That way, the caches will be warm when the
2302 2303 # callbacks run.
2303 2304 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2304 2305
2305 2306 def txnaborthook(tr2):
2306 2307 """To be run if transaction is aborted
2307 2308 """
2308 2309 reporef().hook(
2309 2310 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2310 2311 )
2311 2312
2312 2313 tr.addabort(b'txnabort-hook', txnaborthook)
2313 2314 # avoid eager cache invalidation. in-memory data should be identical
2314 2315 # to stored data if transaction has no error.
2315 2316 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2316 2317 self._transref = weakref.ref(tr)
2317 2318 scmutil.registersummarycallback(self, tr, desc)
2318 2319 return tr
2319 2320
2320 2321 def _journalfiles(self):
2321 2322 return (
2322 2323 (self.svfs, b'journal'),
2323 2324 (self.svfs, b'journal.narrowspec'),
2324 2325 (self.vfs, b'journal.narrowspec.dirstate'),
2325 2326 (self.vfs, b'journal.dirstate'),
2326 2327 (self.vfs, b'journal.branch'),
2327 2328 (self.vfs, b'journal.desc'),
2328 2329 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2329 2330 (self.svfs, b'journal.phaseroots'),
2330 2331 )
2331 2332
2332 2333 def undofiles(self):
2333 2334 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2334 2335
2335 2336 @unfilteredmethod
2336 2337 def _writejournal(self, desc):
2337 2338 self.dirstate.savebackup(None, b'journal.dirstate')
2338 2339 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2339 2340 narrowspec.savebackup(self, b'journal.narrowspec')
2340 2341 self.vfs.write(
2341 2342 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2342 2343 )
2343 2344 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2344 2345 bookmarksvfs = bookmarks.bookmarksvfs(self)
2345 2346 bookmarksvfs.write(
2346 2347 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2347 2348 )
2348 2349 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2349 2350
2350 2351 def recover(self):
2351 2352 with self.lock():
2352 2353 if self.svfs.exists(b"journal"):
2353 2354 self.ui.status(_(b"rolling back interrupted transaction\n"))
2354 2355 vfsmap = {
2355 2356 b'': self.svfs,
2356 2357 b'plain': self.vfs,
2357 2358 }
2358 2359 transaction.rollback(
2359 2360 self.svfs,
2360 2361 vfsmap,
2361 2362 b"journal",
2362 2363 self.ui.warn,
2363 2364 checkambigfiles=_cachedfiles,
2364 2365 )
2365 2366 self.invalidate()
2366 2367 return True
2367 2368 else:
2368 2369 self.ui.warn(_(b"no interrupted transaction available\n"))
2369 2370 return False
2370 2371
2371 2372 def rollback(self, dryrun=False, force=False):
2372 2373 wlock = lock = dsguard = None
2373 2374 try:
2374 2375 wlock = self.wlock()
2375 2376 lock = self.lock()
2376 2377 if self.svfs.exists(b"undo"):
2377 2378 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2378 2379
2379 2380 return self._rollback(dryrun, force, dsguard)
2380 2381 else:
2381 2382 self.ui.warn(_(b"no rollback information available\n"))
2382 2383 return 1
2383 2384 finally:
2384 2385 release(dsguard, lock, wlock)
2385 2386
2386 2387 @unfilteredmethod # Until we get smarter cache management
2387 2388 def _rollback(self, dryrun, force, dsguard):
2388 2389 ui = self.ui
2389 2390 try:
2390 2391 args = self.vfs.read(b'undo.desc').splitlines()
2391 2392 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2392 2393 if len(args) >= 3:
2393 2394 detail = args[2]
2394 2395 oldtip = oldlen - 1
2395 2396
2396 2397 if detail and ui.verbose:
2397 2398 msg = _(
2398 2399 b'repository tip rolled back to revision %d'
2399 2400 b' (undo %s: %s)\n'
2400 2401 ) % (oldtip, desc, detail)
2401 2402 else:
2402 2403 msg = _(
2403 2404 b'repository tip rolled back to revision %d (undo %s)\n'
2404 2405 ) % (oldtip, desc)
2405 2406 except IOError:
2406 2407 msg = _(b'rolling back unknown transaction\n')
2407 2408 desc = None
2408 2409
2409 2410 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2410 2411 raise error.Abort(
2411 2412 _(
2412 2413 b'rollback of last commit while not checked out '
2413 2414 b'may lose data'
2414 2415 ),
2415 2416 hint=_(b'use -f to force'),
2416 2417 )
2417 2418
2418 2419 ui.status(msg)
2419 2420 if dryrun:
2420 2421 return 0
2421 2422
2422 2423 parents = self.dirstate.parents()
2423 2424 self.destroying()
2424 2425 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2425 2426 transaction.rollback(
2426 2427 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2427 2428 )
2428 2429 bookmarksvfs = bookmarks.bookmarksvfs(self)
2429 2430 if bookmarksvfs.exists(b'undo.bookmarks'):
2430 2431 bookmarksvfs.rename(
2431 2432 b'undo.bookmarks', b'bookmarks', checkambig=True
2432 2433 )
2433 2434 if self.svfs.exists(b'undo.phaseroots'):
2434 2435 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2435 2436 self.invalidate()
2436 2437
2437 2438 has_node = self.changelog.index.has_node
2438 2439 parentgone = any(not has_node(p) for p in parents)
2439 2440 if parentgone:
2440 2441 # prevent dirstateguard from overwriting already restored one
2441 2442 dsguard.close()
2442 2443
2443 2444 narrowspec.restorebackup(self, b'undo.narrowspec')
2444 2445 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2445 2446 self.dirstate.restorebackup(None, b'undo.dirstate')
2446 2447 try:
2447 2448 branch = self.vfs.read(b'undo.branch')
2448 2449 self.dirstate.setbranch(encoding.tolocal(branch))
2449 2450 except IOError:
2450 2451 ui.warn(
2451 2452 _(
2452 2453 b'named branch could not be reset: '
2453 2454 b'current branch is still \'%s\'\n'
2454 2455 )
2455 2456 % self.dirstate.branch()
2456 2457 )
2457 2458
2458 2459 parents = tuple([p.rev() for p in self[None].parents()])
2459 2460 if len(parents) > 1:
2460 2461 ui.status(
2461 2462 _(
2462 2463 b'working directory now based on '
2463 2464 b'revisions %d and %d\n'
2464 2465 )
2465 2466 % parents
2466 2467 )
2467 2468 else:
2468 2469 ui.status(
2469 2470 _(b'working directory now based on revision %d\n') % parents
2470 2471 )
2471 2472 mergestatemod.mergestate.clean(self, self[b'.'].node())
2472 2473
2473 2474 # TODO: if we know which new heads may result from this rollback, pass
2474 2475 # them to destroy(), which will prevent the branchhead cache from being
2475 2476 # invalidated.
2476 2477 self.destroyed()
2477 2478 return 0
2478 2479
2479 2480 def _buildcacheupdater(self, newtransaction):
2480 2481 """called during transaction to build the callback updating cache
2481 2482
2482 2483 Lives on the repository to help extension who might want to augment
2483 2484 this logic. For this purpose, the created transaction is passed to the
2484 2485 method.
2485 2486 """
2486 2487 # we must avoid cyclic reference between repo and transaction.
2487 2488 reporef = weakref.ref(self)
2488 2489
2489 2490 def updater(tr):
2490 2491 repo = reporef()
2491 2492 repo.updatecaches(tr)
2492 2493
2493 2494 return updater
2494 2495
2495 2496 @unfilteredmethod
2496 2497 def updatecaches(self, tr=None, full=False):
2497 2498 """warm appropriate caches
2498 2499
2499 2500 If this function is called after a transaction closed. The transaction
2500 2501 will be available in the 'tr' argument. This can be used to selectively
2501 2502 update caches relevant to the changes in that transaction.
2502 2503
2503 2504 If 'full' is set, make sure all caches the function knows about have
2504 2505 up-to-date data. Even the ones usually loaded more lazily.
2505 2506 """
2506 2507 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2507 2508 # During strip, many caches are invalid but
2508 2509 # later call to `destroyed` will refresh them.
2509 2510 return
2510 2511
2511 2512 if tr is None or tr.changes[b'origrepolen'] < len(self):
2512 2513 # accessing the 'ser ved' branchmap should refresh all the others,
2513 2514 self.ui.debug(b'updating the branch cache\n')
2514 2515 self.filtered(b'served').branchmap()
2515 2516 self.filtered(b'served.hidden').branchmap()
2516 2517
2517 2518 if full:
2518 2519 unfi = self.unfiltered()
2519 2520
2520 2521 self.changelog.update_caches(transaction=tr)
2521 2522 self.manifestlog.update_caches(transaction=tr)
2522 2523
2523 2524 rbc = unfi.revbranchcache()
2524 2525 for r in unfi.changelog:
2525 2526 rbc.branchinfo(r)
2526 2527 rbc.write()
2527 2528
2528 2529 # ensure the working copy parents are in the manifestfulltextcache
2529 2530 for ctx in self[b'.'].parents():
2530 2531 ctx.manifest() # accessing the manifest is enough
2531 2532
2532 2533 # accessing fnode cache warms the cache
2533 2534 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2534 2535 # accessing tags warm the cache
2535 2536 self.tags()
2536 2537 self.filtered(b'served').tags()
2537 2538
2538 2539 # The `full` arg is documented as updating even the lazily-loaded
2539 2540 # caches immediately, so we're forcing a write to cause these caches
2540 2541 # to be warmed up even if they haven't explicitly been requested
2541 2542 # yet (if they've never been used by hg, they won't ever have been
2542 2543 # written, even if they're a subset of another kind of cache that
2543 2544 # *has* been used).
2544 2545 for filt in repoview.filtertable.keys():
2545 2546 filtered = self.filtered(filt)
2546 2547 filtered.branchmap().write(filtered)
2547 2548
2548 2549 def invalidatecaches(self):
2549 2550
2550 2551 if '_tagscache' in vars(self):
2551 2552 # can't use delattr on proxy
2552 2553 del self.__dict__['_tagscache']
2553 2554
2554 2555 self._branchcaches.clear()
2555 2556 self.invalidatevolatilesets()
2556 2557 self._sparsesignaturecache.clear()
2557 2558
2558 2559 def invalidatevolatilesets(self):
2559 2560 self.filteredrevcache.clear()
2560 2561 obsolete.clearobscaches(self)
2561 2562 self._quick_access_changeid_invalidate()
2562 2563
2563 2564 def invalidatedirstate(self):
2564 2565 '''Invalidates the dirstate, causing the next call to dirstate
2565 2566 to check if it was modified since the last time it was read,
2566 2567 rereading it if it has.
2567 2568
2568 2569 This is different to dirstate.invalidate() that it doesn't always
2569 2570 rereads the dirstate. Use dirstate.invalidate() if you want to
2570 2571 explicitly read the dirstate again (i.e. restoring it to a previous
2571 2572 known good state).'''
2572 2573 if hasunfilteredcache(self, 'dirstate'):
2573 2574 for k in self.dirstate._filecache:
2574 2575 try:
2575 2576 delattr(self.dirstate, k)
2576 2577 except AttributeError:
2577 2578 pass
2578 2579 delattr(self.unfiltered(), 'dirstate')
2579 2580
2580 2581 def invalidate(self, clearfilecache=False):
2581 2582 '''Invalidates both store and non-store parts other than dirstate
2582 2583
2583 2584 If a transaction is running, invalidation of store is omitted,
2584 2585 because discarding in-memory changes might cause inconsistency
2585 2586 (e.g. incomplete fncache causes unintentional failure, but
2586 2587 redundant one doesn't).
2587 2588 '''
2588 2589 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2589 2590 for k in list(self._filecache.keys()):
2590 2591 # dirstate is invalidated separately in invalidatedirstate()
2591 2592 if k == b'dirstate':
2592 2593 continue
2593 2594 if (
2594 2595 k == b'changelog'
2595 2596 and self.currenttransaction()
2596 2597 and self.changelog._delayed
2597 2598 ):
2598 2599 # The changelog object may store unwritten revisions. We don't
2599 2600 # want to lose them.
2600 2601 # TODO: Solve the problem instead of working around it.
2601 2602 continue
2602 2603
2603 2604 if clearfilecache:
2604 2605 del self._filecache[k]
2605 2606 try:
2606 2607 delattr(unfiltered, k)
2607 2608 except AttributeError:
2608 2609 pass
2609 2610 self.invalidatecaches()
2610 2611 if not self.currenttransaction():
2611 2612 # TODO: Changing contents of store outside transaction
2612 2613 # causes inconsistency. We should make in-memory store
2613 2614 # changes detectable, and abort if changed.
2614 2615 self.store.invalidatecaches()
2615 2616
2616 2617 def invalidateall(self):
2617 2618 '''Fully invalidates both store and non-store parts, causing the
2618 2619 subsequent operation to reread any outside changes.'''
2619 2620 # extension should hook this to invalidate its caches
2620 2621 self.invalidate()
2621 2622 self.invalidatedirstate()
2622 2623
2623 2624 @unfilteredmethod
2624 2625 def _refreshfilecachestats(self, tr):
2625 2626 """Reload stats of cached files so that they are flagged as valid"""
2626 2627 for k, ce in self._filecache.items():
2627 2628 k = pycompat.sysstr(k)
2628 2629 if k == 'dirstate' or k not in self.__dict__:
2629 2630 continue
2630 2631 ce.refresh()
2631 2632
2632 2633 def _lock(
2633 2634 self,
2634 2635 vfs,
2635 2636 lockname,
2636 2637 wait,
2637 2638 releasefn,
2638 2639 acquirefn,
2639 2640 desc,
2640 2641 inheritchecker=None,
2641 2642 parentenvvar=None,
2642 2643 ):
2643 2644 parentlock = None
2644 2645 # the contents of parentenvvar are used by the underlying lock to
2645 2646 # determine whether it can be inherited
2646 2647 if parentenvvar is not None:
2647 2648 parentlock = encoding.environ.get(parentenvvar)
2648 2649
2649 2650 timeout = 0
2650 2651 warntimeout = 0
2651 2652 if wait:
2652 2653 timeout = self.ui.configint(b"ui", b"timeout")
2653 2654 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2654 2655 # internal config: ui.signal-safe-lock
2655 2656 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2656 2657
2657 2658 l = lockmod.trylock(
2658 2659 self.ui,
2659 2660 vfs,
2660 2661 lockname,
2661 2662 timeout,
2662 2663 warntimeout,
2663 2664 releasefn=releasefn,
2664 2665 acquirefn=acquirefn,
2665 2666 desc=desc,
2666 2667 inheritchecker=inheritchecker,
2667 2668 parentlock=parentlock,
2668 2669 signalsafe=signalsafe,
2669 2670 )
2670 2671 return l
2671 2672
2672 2673 def _afterlock(self, callback):
2673 2674 """add a callback to be run when the repository is fully unlocked
2674 2675
2675 2676 The callback will be executed when the outermost lock is released
2676 2677 (with wlock being higher level than 'lock')."""
2677 2678 for ref in (self._wlockref, self._lockref):
2678 2679 l = ref and ref()
2679 2680 if l and l.held:
2680 2681 l.postrelease.append(callback)
2681 2682 break
2682 2683 else: # no lock have been found.
2683 2684 callback(True)
2684 2685
2685 2686 def lock(self, wait=True):
2686 2687 '''Lock the repository store (.hg/store) and return a weak reference
2687 2688 to the lock. Use this before modifying the store (e.g. committing or
2688 2689 stripping). If you are opening a transaction, get a lock as well.)
2689 2690
2690 2691 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2691 2692 'wlock' first to avoid a dead-lock hazard.'''
2692 2693 l = self._currentlock(self._lockref)
2693 2694 if l is not None:
2694 2695 l.lock()
2695 2696 return l
2696 2697
2697 2698 l = self._lock(
2698 2699 vfs=self.svfs,
2699 2700 lockname=b"lock",
2700 2701 wait=wait,
2701 2702 releasefn=None,
2702 2703 acquirefn=self.invalidate,
2703 2704 desc=_(b'repository %s') % self.origroot,
2704 2705 )
2705 2706 self._lockref = weakref.ref(l)
2706 2707 return l
2707 2708
2708 2709 def _wlockchecktransaction(self):
2709 2710 if self.currenttransaction() is not None:
2710 2711 raise error.LockInheritanceContractViolation(
2711 2712 b'wlock cannot be inherited in the middle of a transaction'
2712 2713 )
2713 2714
2714 2715 def wlock(self, wait=True):
2715 2716 '''Lock the non-store parts of the repository (everything under
2716 2717 .hg except .hg/store) and return a weak reference to the lock.
2717 2718
2718 2719 Use this before modifying files in .hg.
2719 2720
2720 2721 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2721 2722 'wlock' first to avoid a dead-lock hazard.'''
2722 2723 l = self._wlockref and self._wlockref()
2723 2724 if l is not None and l.held:
2724 2725 l.lock()
2725 2726 return l
2726 2727
2727 2728 # We do not need to check for non-waiting lock acquisition. Such
2728 2729 # acquisition would not cause dead-lock as they would just fail.
2729 2730 if wait and (
2730 2731 self.ui.configbool(b'devel', b'all-warnings')
2731 2732 or self.ui.configbool(b'devel', b'check-locks')
2732 2733 ):
2733 2734 if self._currentlock(self._lockref) is not None:
2734 2735 self.ui.develwarn(b'"wlock" acquired after "lock"')
2735 2736
2736 2737 def unlock():
2737 2738 if self.dirstate.pendingparentchange():
2738 2739 self.dirstate.invalidate()
2739 2740 else:
2740 2741 self.dirstate.write(None)
2741 2742
2742 2743 self._filecache[b'dirstate'].refresh()
2743 2744
2744 2745 l = self._lock(
2745 2746 self.vfs,
2746 2747 b"wlock",
2747 2748 wait,
2748 2749 unlock,
2749 2750 self.invalidatedirstate,
2750 2751 _(b'working directory of %s') % self.origroot,
2751 2752 inheritchecker=self._wlockchecktransaction,
2752 2753 parentenvvar=b'HG_WLOCK_LOCKER',
2753 2754 )
2754 2755 self._wlockref = weakref.ref(l)
2755 2756 return l
2756 2757
2757 2758 def _currentlock(self, lockref):
2758 2759 """Returns the lock if it's held, or None if it's not."""
2759 2760 if lockref is None:
2760 2761 return None
2761 2762 l = lockref()
2762 2763 if l is None or not l.held:
2763 2764 return None
2764 2765 return l
2765 2766
2766 2767 def currentwlock(self):
2767 2768 """Returns the wlock if it's held, or None if it's not."""
2768 2769 return self._currentlock(self._wlockref)
2769 2770
2770 2771 def _filecommit(
2771 2772 self,
2772 2773 fctx,
2773 2774 manifest1,
2774 2775 manifest2,
2775 2776 linkrev,
2776 2777 tr,
2777 2778 changelist,
2778 2779 includecopymeta,
2779 2780 ):
2780 2781 """
2781 2782 commit an individual file as part of a larger transaction
2782 2783 """
2783 2784
2784 2785 fname = fctx.path()
2785 2786 fparent1 = manifest1.get(fname, nullid)
2786 2787 fparent2 = manifest2.get(fname, nullid)
2787 2788 if isinstance(fctx, context.filectx):
2788 2789 node = fctx.filenode()
2789 2790 if node in [fparent1, fparent2]:
2790 2791 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2791 2792 if (
2792 2793 fparent1 != nullid
2793 2794 and manifest1.flags(fname) != fctx.flags()
2794 2795 ) or (
2795 2796 fparent2 != nullid
2796 2797 and manifest2.flags(fname) != fctx.flags()
2797 2798 ):
2798 2799 changelist.append(fname)
2799 2800 return node
2800 2801
2801 2802 flog = self.file(fname)
2802 2803 meta = {}
2803 2804 cfname = fctx.copysource()
2804 2805 if cfname and cfname != fname:
2805 2806 # Mark the new revision of this file as a copy of another
2806 2807 # file. This copy data will effectively act as a parent
2807 2808 # of this new revision. If this is a merge, the first
2808 2809 # parent will be the nullid (meaning "look up the copy data")
2809 2810 # and the second one will be the other parent. For example:
2810 2811 #
2811 2812 # 0 --- 1 --- 3 rev1 changes file foo
2812 2813 # \ / rev2 renames foo to bar and changes it
2813 2814 # \- 2 -/ rev3 should have bar with all changes and
2814 2815 # should record that bar descends from
2815 2816 # bar in rev2 and foo in rev1
2816 2817 #
2817 2818 # this allows this merge to succeed:
2818 2819 #
2819 2820 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2820 2821 # \ / merging rev3 and rev4 should use bar@rev2
2821 2822 # \- 2 --- 4 as the merge base
2822 2823 #
2823 2824
2824 2825 cnode = manifest1.get(cfname)
2825 2826 newfparent = fparent2
2826 2827
2827 2828 if manifest2: # branch merge
2828 2829 if fparent2 == nullid or cnode is None: # copied on remote side
2829 2830 if cfname in manifest2:
2830 2831 cnode = manifest2[cfname]
2831 2832 newfparent = fparent1
2832 2833
2833 2834 # Here, we used to search backwards through history to try to find
2834 2835 # where the file copy came from if the source of a copy was not in
2835 2836 # the parent directory. However, this doesn't actually make sense to
2836 2837 # do (what does a copy from something not in your working copy even
2837 2838 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2838 2839 # the user that copy information was dropped, so if they didn't
2839 2840 # expect this outcome it can be fixed, but this is the correct
2840 2841 # behavior in this circumstance.
2841 2842
2842 2843 if cnode:
2843 2844 self.ui.debug(
2844 2845 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2845 2846 )
2846 2847 if includecopymeta:
2847 2848 meta[b"copy"] = cfname
2848 2849 meta[b"copyrev"] = hex(cnode)
2849 2850 fparent1, fparent2 = nullid, newfparent
2850 2851 else:
2851 2852 self.ui.warn(
2852 2853 _(
2853 2854 b"warning: can't find ancestor for '%s' "
2854 2855 b"copied from '%s'!\n"
2855 2856 )
2856 2857 % (fname, cfname)
2857 2858 )
2858 2859
2859 2860 elif fparent1 == nullid:
2860 2861 fparent1, fparent2 = fparent2, nullid
2861 2862 elif fparent2 != nullid:
2862 2863 # is one parent an ancestor of the other?
2863 2864 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2864 2865 if fparent1 in fparentancestors:
2865 2866 fparent1, fparent2 = fparent2, nullid
2866 2867 elif fparent2 in fparentancestors:
2867 2868 fparent2 = nullid
2868 2869 elif not fparentancestors:
2869 2870 # TODO: this whole if-else might be simplified much more
2870 2871 ms = mergestatemod.mergestate.read(self)
2871 2872 if (
2872 2873 fname in ms
2873 2874 and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
2874 2875 ):
2875 2876 fparent1, fparent2 = fparent2, nullid
2876 2877
2877 2878 # is the file changed?
2878 2879 text = fctx.data()
2879 2880 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2880 2881 changelist.append(fname)
2881 2882 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2882 2883 # are just the flags changed during merge?
2883 2884 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2884 2885 changelist.append(fname)
2885 2886
2886 2887 return fparent1
2887 2888
2888 2889 def checkcommitpatterns(self, wctx, match, status, fail):
2889 2890 """check for commit arguments that aren't committable"""
2890 2891 if match.isexact() or match.prefix():
2891 2892 matched = set(status.modified + status.added + status.removed)
2892 2893
2893 2894 for f in match.files():
2894 2895 f = self.dirstate.normalize(f)
2895 2896 if f == b'.' or f in matched or f in wctx.substate:
2896 2897 continue
2897 2898 if f in status.deleted:
2898 2899 fail(f, _(b'file not found!'))
2899 2900 # Is it a directory that exists or used to exist?
2900 2901 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2901 2902 d = f + b'/'
2902 2903 for mf in matched:
2903 2904 if mf.startswith(d):
2904 2905 break
2905 2906 else:
2906 2907 fail(f, _(b"no match under directory!"))
2907 2908 elif f not in self.dirstate:
2908 2909 fail(f, _(b"file not tracked!"))
2909 2910
2910 2911 @unfilteredmethod
2911 2912 def commit(
2912 2913 self,
2913 2914 text=b"",
2914 2915 user=None,
2915 2916 date=None,
2916 2917 match=None,
2917 2918 force=False,
2918 2919 editor=None,
2919 2920 extra=None,
2920 2921 ):
2921 2922 """Add a new revision to current repository.
2922 2923
2923 2924 Revision information is gathered from the working directory,
2924 2925 match can be used to filter the committed files. If editor is
2925 2926 supplied, it is called to get a commit message.
2926 2927 """
2927 2928 if extra is None:
2928 2929 extra = {}
2929 2930
2930 2931 def fail(f, msg):
2931 2932 raise error.Abort(b'%s: %s' % (f, msg))
2932 2933
2933 2934 if not match:
2934 2935 match = matchmod.always()
2935 2936
2936 2937 if not force:
2937 2938 match.bad = fail
2938 2939
2939 2940 # lock() for recent changelog (see issue4368)
2940 2941 with self.wlock(), self.lock():
2941 2942 wctx = self[None]
2942 2943 merge = len(wctx.parents()) > 1
2943 2944
2944 2945 if not force and merge and not match.always():
2945 2946 raise error.Abort(
2946 2947 _(
2947 2948 b'cannot partially commit a merge '
2948 2949 b'(do not specify files or patterns)'
2949 2950 )
2950 2951 )
2951 2952
2952 2953 status = self.status(match=match, clean=force)
2953 2954 if force:
2954 2955 status.modified.extend(
2955 2956 status.clean
2956 2957 ) # mq may commit clean files
2957 2958
2958 2959 # check subrepos
2959 2960 subs, commitsubs, newstate = subrepoutil.precommit(
2960 2961 self.ui, wctx, status, match, force=force
2961 2962 )
2962 2963
2963 2964 # make sure all explicit patterns are matched
2964 2965 if not force:
2965 2966 self.checkcommitpatterns(wctx, match, status, fail)
2966 2967
2967 2968 cctx = context.workingcommitctx(
2968 2969 self, status, text, user, date, extra
2969 2970 )
2970 2971
2971 2972 ms = mergestatemod.mergestate.read(self)
2972 2973 mergeutil.checkunresolved(ms)
2973 2974
2974 2975 # internal config: ui.allowemptycommit
2975 2976 allowemptycommit = (
2976 2977 wctx.branch() != wctx.p1().branch()
2977 2978 or extra.get(b'close')
2978 2979 or merge
2979 2980 or cctx.files()
2980 2981 or self.ui.configbool(b'ui', b'allowemptycommit')
2981 2982 )
2982 2983 if not allowemptycommit:
2983 2984 self.ui.debug(b'nothing to commit, clearing merge state\n')
2984 2985 ms.reset()
2985 2986 return None
2986 2987
2987 2988 if merge and cctx.deleted():
2988 2989 raise error.Abort(_(b"cannot commit merge with missing files"))
2989 2990
2990 2991 if editor:
2991 2992 cctx._text = editor(self, cctx, subs)
2992 2993 edited = text != cctx._text
2993 2994
2994 2995 # Save commit message in case this transaction gets rolled back
2995 2996 # (e.g. by a pretxncommit hook). Leave the content alone on
2996 2997 # the assumption that the user will use the same editor again.
2997 2998 msgfn = self.savecommitmessage(cctx._text)
2998 2999
2999 3000 # commit subs and write new state
3000 3001 if subs:
3001 3002 uipathfn = scmutil.getuipathfn(self)
3002 3003 for s in sorted(commitsubs):
3003 3004 sub = wctx.sub(s)
3004 3005 self.ui.status(
3005 3006 _(b'committing subrepository %s\n')
3006 3007 % uipathfn(subrepoutil.subrelpath(sub))
3007 3008 )
3008 3009 sr = sub.commit(cctx._text, user, date)
3009 3010 newstate[s] = (newstate[s][0], sr)
3010 3011 subrepoutil.writestate(self, newstate)
3011 3012
3012 3013 p1, p2 = self.dirstate.parents()
3013 3014 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3014 3015 try:
3015 3016 self.hook(
3016 3017 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3017 3018 )
3018 3019 with self.transaction(b'commit'):
3019 3020 ret = self.commitctx(cctx, True)
3020 3021 # update bookmarks, dirstate and mergestate
3021 3022 bookmarks.update(self, [p1, p2], ret)
3022 3023 cctx.markcommitted(ret)
3023 3024 ms.reset()
3024 3025 except: # re-raises
3025 3026 if edited:
3026 3027 self.ui.write(
3027 3028 _(b'note: commit message saved in %s\n') % msgfn
3028 3029 )
3029 3030 self.ui.write(
3030 3031 _(
3031 3032 b"note: use 'hg commit --logfile "
3032 3033 b".hg/last-message.txt --edit' to reuse it\n"
3033 3034 )
3034 3035 )
3035 3036 raise
3036 3037
3037 3038 def commithook(unused_success):
3038 3039 # hack for command that use a temporary commit (eg: histedit)
3039 3040 # temporary commit got stripped before hook release
3040 3041 if self.changelog.hasnode(ret):
3041 3042 self.hook(
3042 3043 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3043 3044 )
3044 3045
3045 3046 self._afterlock(commithook)
3046 3047 return ret
3047 3048
3048 3049 @unfilteredmethod
3049 3050 def commitctx(self, ctx, error=False, origctx=None):
3050 3051 """Add a new revision to current repository.
3051 3052 Revision information is passed via the context argument.
3052 3053
3053 3054 ctx.files() should list all files involved in this commit, i.e.
3054 3055 modified/added/removed files. On merge, it may be wider than the
3055 3056 ctx.files() to be committed, since any file nodes derived directly
3056 3057 from p1 or p2 are excluded from the committed ctx.files().
3057 3058
3058 3059 origctx is for convert to work around the problem that bug
3059 3060 fixes to the files list in changesets change hashes. For
3060 3061 convert to be the identity, it can pass an origctx and this
3061 3062 function will use the same files list when it makes sense to
3062 3063 do so.
3063 3064 """
3064 3065
3065 3066 p1, p2 = ctx.p1(), ctx.p2()
3066 3067 user = ctx.user()
3067 3068
3068 3069 if self.filecopiesmode == b'changeset-sidedata':
3069 3070 writechangesetcopy = True
3070 3071 writefilecopymeta = True
3071 3072 writecopiesto = None
3072 3073 else:
3073 3074 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3074 3075 writefilecopymeta = writecopiesto != b'changeset-only'
3075 3076 writechangesetcopy = writecopiesto in (
3076 3077 b'changeset-only',
3077 3078 b'compatibility',
3078 3079 )
3079 3080 p1copies, p2copies = None, None
3080 3081 if writechangesetcopy:
3081 3082 p1copies = ctx.p1copies()
3082 3083 p2copies = ctx.p2copies()
3083 3084 filesadded, filesremoved = None, None
3084 3085 with self.lock(), self.transaction(b"commit") as tr:
3085 3086 trp = weakref.proxy(tr)
3086 3087
3087 3088 if ctx.manifestnode():
3088 3089 # reuse an existing manifest revision
3089 3090 self.ui.debug(b'reusing known manifest\n')
3090 3091 mn = ctx.manifestnode()
3091 3092 files = ctx.files()
3092 3093 if writechangesetcopy:
3093 3094 filesadded = ctx.filesadded()
3094 3095 filesremoved = ctx.filesremoved()
3095 3096 elif ctx.files():
3096 3097 m1ctx = p1.manifestctx()
3097 3098 m2ctx = p2.manifestctx()
3098 3099 mctx = m1ctx.copy()
3099 3100
3100 3101 m = mctx.read()
3101 3102 m1 = m1ctx.read()
3102 3103 m2 = m2ctx.read()
3103 3104
3104 3105 # check in files
3105 3106 added = []
3106 3107 changed = []
3107 3108 removed = list(ctx.removed())
3108 3109 linkrev = len(self)
3109 3110 self.ui.note(_(b"committing files:\n"))
3110 3111 uipathfn = scmutil.getuipathfn(self)
3111 3112 for f in sorted(ctx.modified() + ctx.added()):
3112 3113 self.ui.note(uipathfn(f) + b"\n")
3113 3114 try:
3114 3115 fctx = ctx[f]
3115 3116 if fctx is None:
3116 3117 removed.append(f)
3117 3118 else:
3118 3119 added.append(f)
3119 3120 m[f] = self._filecommit(
3120 3121 fctx,
3121 3122 m1,
3122 3123 m2,
3123 3124 linkrev,
3124 3125 trp,
3125 3126 changed,
3126 3127 writefilecopymeta,
3127 3128 )
3128 3129 m.setflag(f, fctx.flags())
3129 3130 except OSError:
3130 3131 self.ui.warn(
3131 3132 _(b"trouble committing %s!\n") % uipathfn(f)
3132 3133 )
3133 3134 raise
3134 3135 except IOError as inst:
3135 3136 errcode = getattr(inst, 'errno', errno.ENOENT)
3136 3137 if error or errcode and errcode != errno.ENOENT:
3137 3138 self.ui.warn(
3138 3139 _(b"trouble committing %s!\n") % uipathfn(f)
3139 3140 )
3140 3141 raise
3141 3142
3142 3143 # update manifest
3143 3144 removed = [f for f in removed if f in m1 or f in m2]
3144 3145 drop = sorted([f for f in removed if f in m])
3145 3146 for f in drop:
3146 3147 del m[f]
3147 3148 if p2.rev() != nullrev:
3148
3149 @util.cachefunc
3150 def mas():
3151 p1n = p1.node()
3152 p2n = p2.node()
3153 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3154 if not cahs:
3155 cahs = [nullrev]
3156 return [self[r].manifest() for r in cahs]
3157
3158 def deletionfromparent(f):
3159 # When a file is removed relative to p1 in a merge, this
3160 # function determines whether the absence is due to a
3161 # deletion from a parent, or whether the merge commit
3162 # itself deletes the file. We decide this by doing a
3163 # simplified three way merge of the manifest entry for
3164 # the file. There are two ways we decide the merge
3165 # itself didn't delete a file:
3166 # - neither parent (nor the merge) contain the file
3167 # - exactly one parent contains the file, and that
3168 # parent has the same filelog entry as the merge
3169 # ancestor (or all of them if there two). In other
3170 # words, that parent left the file unchanged while the
3171 # other one deleted it.
3172 # One way to think about this is that deleting a file is
3173 # similar to emptying it, so the list of changed files
3174 # should be similar either way. The computation
3175 # described above is not done directly in _filecommit
3176 # when creating the list of changed files, however
3177 # it does something very similar by comparing filelog
3178 # nodes.
3179 if f in m1:
3180 return f not in m2 and all(
3181 f in ma and ma.find(f) == m1.find(f)
3182 for ma in mas()
3183 )
3184 elif f in m2:
3185 return all(
3186 f in ma and ma.find(f) == m2.find(f)
3187 for ma in mas()
3188 )
3189 else:
3190 return True
3191
3192 removed = [f for f in removed if not deletionfromparent(f)]
3149 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
3150 removed = [f for f in removed if not rf(f)]
3193 3151
3194 3152 files = changed + removed
3195 3153 md = None
3196 3154 if not files:
3197 3155 # if no "files" actually changed in terms of the changelog,
3198 3156 # try hard to detect unmodified manifest entry so that the
3199 3157 # exact same commit can be reproduced later on convert.
3200 3158 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3201 3159 if not files and md:
3202 3160 self.ui.debug(
3203 3161 b'not reusing manifest (no file change in '
3204 3162 b'changelog, but manifest differs)\n'
3205 3163 )
3206 3164 if files or md:
3207 3165 self.ui.note(_(b"committing manifest\n"))
3208 3166 # we're using narrowmatch here since it's already applied at
3209 3167 # other stages (such as dirstate.walk), so we're already
3210 3168 # ignoring things outside of narrowspec in most cases. The
3211 3169 # one case where we might have files outside the narrowspec
3212 3170 # at this point is merges, and we already error out in the
3213 3171 # case where the merge has files outside of the narrowspec,
3214 3172 # so this is safe.
3215 3173 mn = mctx.write(
3216 3174 trp,
3217 3175 linkrev,
3218 3176 p1.manifestnode(),
3219 3177 p2.manifestnode(),
3220 3178 added,
3221 3179 drop,
3222 3180 match=self.narrowmatch(),
3223 3181 )
3224 3182
3225 3183 if writechangesetcopy:
3226 3184 filesadded = [
3227 3185 f for f in changed if not (f in m1 or f in m2)
3228 3186 ]
3229 3187 filesremoved = removed
3230 3188 else:
3231 3189 self.ui.debug(
3232 3190 b'reusing manifest from p1 (listed files '
3233 3191 b'actually unchanged)\n'
3234 3192 )
3235 3193 mn = p1.manifestnode()
3236 3194 else:
3237 3195 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3238 3196 mn = p1.manifestnode()
3239 3197 files = []
3240 3198
3241 3199 if writecopiesto == b'changeset-only':
3242 3200 # If writing only to changeset extras, use None to indicate that
3243 3201 # no entry should be written. If writing to both, write an empty
3244 3202 # entry to prevent the reader from falling back to reading
3245 3203 # filelogs.
3246 3204 p1copies = p1copies or None
3247 3205 p2copies = p2copies or None
3248 3206 filesadded = filesadded or None
3249 3207 filesremoved = filesremoved or None
3250 3208
3251 3209 if origctx and origctx.manifestnode() == mn:
3252 3210 files = origctx.files()
3253 3211
3254 3212 # update changelog
3255 3213 self.ui.note(_(b"committing changelog\n"))
3256 3214 self.changelog.delayupdate(tr)
3257 3215 n = self.changelog.add(
3258 3216 mn,
3259 3217 files,
3260 3218 ctx.description(),
3261 3219 trp,
3262 3220 p1.node(),
3263 3221 p2.node(),
3264 3222 user,
3265 3223 ctx.date(),
3266 3224 ctx.extra().copy(),
3267 3225 p1copies,
3268 3226 p2copies,
3269 3227 filesadded,
3270 3228 filesremoved,
3271 3229 )
3272 3230 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3273 3231 self.hook(
3274 3232 b'pretxncommit',
3275 3233 throw=True,
3276 3234 node=hex(n),
3277 3235 parent1=xp1,
3278 3236 parent2=xp2,
3279 3237 )
3280 3238 # set the new commit is proper phase
3281 3239 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3282 3240 if targetphase:
3283 3241 # retract boundary do not alter parent changeset.
3284 3242 # if a parent have higher the resulting phase will
3285 3243 # be compliant anyway
3286 3244 #
3287 3245 # if minimal phase was 0 we don't need to retract anything
3288 3246 phases.registernew(self, tr, targetphase, [n])
3289 3247 return n
3290 3248
3291 3249 @unfilteredmethod
3292 3250 def destroying(self):
3293 3251 '''Inform the repository that nodes are about to be destroyed.
3294 3252 Intended for use by strip and rollback, so there's a common
3295 3253 place for anything that has to be done before destroying history.
3296 3254
3297 3255 This is mostly useful for saving state that is in memory and waiting
3298 3256 to be flushed when the current lock is released. Because a call to
3299 3257 destroyed is imminent, the repo will be invalidated causing those
3300 3258 changes to stay in memory (waiting for the next unlock), or vanish
3301 3259 completely.
3302 3260 '''
3303 3261 # When using the same lock to commit and strip, the phasecache is left
3304 3262 # dirty after committing. Then when we strip, the repo is invalidated,
3305 3263 # causing those changes to disappear.
3306 3264 if '_phasecache' in vars(self):
3307 3265 self._phasecache.write()
3308 3266
3309 3267 @unfilteredmethod
3310 3268 def destroyed(self):
3311 3269 '''Inform the repository that nodes have been destroyed.
3312 3270 Intended for use by strip and rollback, so there's a common
3313 3271 place for anything that has to be done after destroying history.
3314 3272 '''
3315 3273 # When one tries to:
3316 3274 # 1) destroy nodes thus calling this method (e.g. strip)
3317 3275 # 2) use phasecache somewhere (e.g. commit)
3318 3276 #
3319 3277 # then 2) will fail because the phasecache contains nodes that were
3320 3278 # removed. We can either remove phasecache from the filecache,
3321 3279 # causing it to reload next time it is accessed, or simply filter
3322 3280 # the removed nodes now and write the updated cache.
3323 3281 self._phasecache.filterunknown(self)
3324 3282 self._phasecache.write()
3325 3283
3326 3284 # refresh all repository caches
3327 3285 self.updatecaches()
3328 3286
3329 3287 # Ensure the persistent tag cache is updated. Doing it now
3330 3288 # means that the tag cache only has to worry about destroyed
3331 3289 # heads immediately after a strip/rollback. That in turn
3332 3290 # guarantees that "cachetip == currenttip" (comparing both rev
3333 3291 # and node) always means no nodes have been added or destroyed.
3334 3292
3335 3293 # XXX this is suboptimal when qrefresh'ing: we strip the current
3336 3294 # head, refresh the tag cache, then immediately add a new head.
3337 3295 # But I think doing it this way is necessary for the "instant
3338 3296 # tag cache retrieval" case to work.
3339 3297 self.invalidate()
3340 3298
3341 3299 def status(
3342 3300 self,
3343 3301 node1=b'.',
3344 3302 node2=None,
3345 3303 match=None,
3346 3304 ignored=False,
3347 3305 clean=False,
3348 3306 unknown=False,
3349 3307 listsubrepos=False,
3350 3308 ):
3351 3309 '''a convenience method that calls node1.status(node2)'''
3352 3310 return self[node1].status(
3353 3311 node2, match, ignored, clean, unknown, listsubrepos
3354 3312 )
3355 3313
3356 3314 def addpostdsstatus(self, ps):
3357 3315 """Add a callback to run within the wlock, at the point at which status
3358 3316 fixups happen.
3359 3317
3360 3318 On status completion, callback(wctx, status) will be called with the
3361 3319 wlock held, unless the dirstate has changed from underneath or the wlock
3362 3320 couldn't be grabbed.
3363 3321
3364 3322 Callbacks should not capture and use a cached copy of the dirstate --
3365 3323 it might change in the meanwhile. Instead, they should access the
3366 3324 dirstate via wctx.repo().dirstate.
3367 3325
3368 3326 This list is emptied out after each status run -- extensions should
3369 3327 make sure it adds to this list each time dirstate.status is called.
3370 3328 Extensions should also make sure they don't call this for statuses
3371 3329 that don't involve the dirstate.
3372 3330 """
3373 3331
3374 3332 # The list is located here for uniqueness reasons -- it is actually
3375 3333 # managed by the workingctx, but that isn't unique per-repo.
3376 3334 self._postdsstatus.append(ps)
3377 3335
3378 3336 def postdsstatus(self):
3379 3337 """Used by workingctx to get the list of post-dirstate-status hooks."""
3380 3338 return self._postdsstatus
3381 3339
3382 3340 def clearpostdsstatus(self):
3383 3341 """Used by workingctx to clear post-dirstate-status hooks."""
3384 3342 del self._postdsstatus[:]
3385 3343
3386 3344 def heads(self, start=None):
3387 3345 if start is None:
3388 3346 cl = self.changelog
3389 3347 headrevs = reversed(cl.headrevs())
3390 3348 return [cl.node(rev) for rev in headrevs]
3391 3349
3392 3350 heads = self.changelog.heads(start)
3393 3351 # sort the output in rev descending order
3394 3352 return sorted(heads, key=self.changelog.rev, reverse=True)
3395 3353
3396 3354 def branchheads(self, branch=None, start=None, closed=False):
3397 3355 '''return a (possibly filtered) list of heads for the given branch
3398 3356
3399 3357 Heads are returned in topological order, from newest to oldest.
3400 3358 If branch is None, use the dirstate branch.
3401 3359 If start is not None, return only heads reachable from start.
3402 3360 If closed is True, return heads that are marked as closed as well.
3403 3361 '''
3404 3362 if branch is None:
3405 3363 branch = self[None].branch()
3406 3364 branches = self.branchmap()
3407 3365 if not branches.hasbranch(branch):
3408 3366 return []
3409 3367 # the cache returns heads ordered lowest to highest
3410 3368 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3411 3369 if start is not None:
3412 3370 # filter out the heads that cannot be reached from startrev
3413 3371 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3414 3372 bheads = [h for h in bheads if h in fbheads]
3415 3373 return bheads
3416 3374
3417 3375 def branches(self, nodes):
3418 3376 if not nodes:
3419 3377 nodes = [self.changelog.tip()]
3420 3378 b = []
3421 3379 for n in nodes:
3422 3380 t = n
3423 3381 while True:
3424 3382 p = self.changelog.parents(n)
3425 3383 if p[1] != nullid or p[0] == nullid:
3426 3384 b.append((t, n, p[0], p[1]))
3427 3385 break
3428 3386 n = p[0]
3429 3387 return b
3430 3388
3431 3389 def between(self, pairs):
3432 3390 r = []
3433 3391
3434 3392 for top, bottom in pairs:
3435 3393 n, l, i = top, [], 0
3436 3394 f = 1
3437 3395
3438 3396 while n != bottom and n != nullid:
3439 3397 p = self.changelog.parents(n)[0]
3440 3398 if i == f:
3441 3399 l.append(n)
3442 3400 f = f * 2
3443 3401 n = p
3444 3402 i += 1
3445 3403
3446 3404 r.append(l)
3447 3405
3448 3406 return r
3449 3407
3450 3408 def checkpush(self, pushop):
3451 3409 """Extensions can override this function if additional checks have
3452 3410 to be performed before pushing, or call it if they override push
3453 3411 command.
3454 3412 """
3455 3413
3456 3414 @unfilteredpropertycache
3457 3415 def prepushoutgoinghooks(self):
3458 3416 """Return util.hooks consists of a pushop with repo, remote, outgoing
3459 3417 methods, which are called before pushing changesets.
3460 3418 """
3461 3419 return util.hooks()
3462 3420
3463 3421 def pushkey(self, namespace, key, old, new):
3464 3422 try:
3465 3423 tr = self.currenttransaction()
3466 3424 hookargs = {}
3467 3425 if tr is not None:
3468 3426 hookargs.update(tr.hookargs)
3469 3427 hookargs = pycompat.strkwargs(hookargs)
3470 3428 hookargs['namespace'] = namespace
3471 3429 hookargs['key'] = key
3472 3430 hookargs['old'] = old
3473 3431 hookargs['new'] = new
3474 3432 self.hook(b'prepushkey', throw=True, **hookargs)
3475 3433 except error.HookAbort as exc:
3476 3434 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3477 3435 if exc.hint:
3478 3436 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3479 3437 return False
3480 3438 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3481 3439 ret = pushkey.push(self, namespace, key, old, new)
3482 3440
3483 3441 def runhook(unused_success):
3484 3442 self.hook(
3485 3443 b'pushkey',
3486 3444 namespace=namespace,
3487 3445 key=key,
3488 3446 old=old,
3489 3447 new=new,
3490 3448 ret=ret,
3491 3449 )
3492 3450
3493 3451 self._afterlock(runhook)
3494 3452 return ret
3495 3453
3496 3454 def listkeys(self, namespace):
3497 3455 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3498 3456 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3499 3457 values = pushkey.list(self, namespace)
3500 3458 self.hook(b'listkeys', namespace=namespace, values=values)
3501 3459 return values
3502 3460
3503 3461 def debugwireargs(self, one, two, three=None, four=None, five=None):
3504 3462 '''used to test argument passing over the wire'''
3505 3463 return b"%s %s %s %s %s" % (
3506 3464 one,
3507 3465 two,
3508 3466 pycompat.bytestr(three),
3509 3467 pycompat.bytestr(four),
3510 3468 pycompat.bytestr(five),
3511 3469 )
3512 3470
3513 3471 def savecommitmessage(self, text):
3514 3472 fp = self.vfs(b'last-message.txt', b'wb')
3515 3473 try:
3516 3474 fp.write(text)
3517 3475 finally:
3518 3476 fp.close()
3519 3477 return self.pathto(fp.name[len(self.root) + 1 :])
3520 3478
3521 3479
3522 3480 # used to avoid circular references so destructors work
3523 3481 def aftertrans(files):
3524 3482 renamefiles = [tuple(t) for t in files]
3525 3483
3526 3484 def a():
3527 3485 for vfs, src, dest in renamefiles:
3528 3486 # if src and dest refer to a same file, vfs.rename is a no-op,
3529 3487 # leaving both src and dest on disk. delete dest to make sure
3530 3488 # the rename couldn't be such a no-op.
3531 3489 vfs.tryunlink(dest)
3532 3490 try:
3533 3491 vfs.rename(src, dest)
3534 3492 except OSError: # journal file does not yet exist
3535 3493 pass
3536 3494
3537 3495 return a
3538 3496
3539 3497
3540 3498 def undoname(fn):
3541 3499 base, name = os.path.split(fn)
3542 3500 assert name.startswith(b'journal')
3543 3501 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3544 3502
3545 3503
3546 3504 def instance(ui, path, create, intents=None, createopts=None):
3547 3505 localpath = util.urllocalpath(path)
3548 3506 if create:
3549 3507 createrepository(ui, localpath, createopts=createopts)
3550 3508
3551 3509 return makelocalrepository(ui, localpath, intents=intents)
3552 3510
3553 3511
3554 3512 def islocal(path):
3555 3513 return True
3556 3514
3557 3515
3558 3516 def defaultcreateopts(ui, createopts=None):
3559 3517 """Populate the default creation options for a repository.
3560 3518
3561 3519 A dictionary of explicitly requested creation options can be passed
3562 3520 in. Missing keys will be populated.
3563 3521 """
3564 3522 createopts = dict(createopts or {})
3565 3523
3566 3524 if b'backend' not in createopts:
3567 3525 # experimental config: storage.new-repo-backend
3568 3526 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3569 3527
3570 3528 return createopts
3571 3529
3572 3530
3573 3531 def newreporequirements(ui, createopts):
3574 3532 """Determine the set of requirements for a new local repository.
3575 3533
3576 3534 Extensions can wrap this function to specify custom requirements for
3577 3535 new repositories.
3578 3536 """
3579 3537 # If the repo is being created from a shared repository, we copy
3580 3538 # its requirements.
3581 3539 if b'sharedrepo' in createopts:
3582 3540 requirements = set(createopts[b'sharedrepo'].requirements)
3583 3541 if createopts.get(b'sharedrelative'):
3584 3542 requirements.add(b'relshared')
3585 3543 else:
3586 3544 requirements.add(b'shared')
3587 3545
3588 3546 return requirements
3589 3547
3590 3548 if b'backend' not in createopts:
3591 3549 raise error.ProgrammingError(
3592 3550 b'backend key not present in createopts; '
3593 3551 b'was defaultcreateopts() called?'
3594 3552 )
3595 3553
3596 3554 if createopts[b'backend'] != b'revlogv1':
3597 3555 raise error.Abort(
3598 3556 _(
3599 3557 b'unable to determine repository requirements for '
3600 3558 b'storage backend: %s'
3601 3559 )
3602 3560 % createopts[b'backend']
3603 3561 )
3604 3562
3605 3563 requirements = {b'revlogv1'}
3606 3564 if ui.configbool(b'format', b'usestore'):
3607 3565 requirements.add(b'store')
3608 3566 if ui.configbool(b'format', b'usefncache'):
3609 3567 requirements.add(b'fncache')
3610 3568 if ui.configbool(b'format', b'dotencode'):
3611 3569 requirements.add(b'dotencode')
3612 3570
3613 3571 compengines = ui.configlist(b'format', b'revlog-compression')
3614 3572 for compengine in compengines:
3615 3573 if compengine in util.compengines:
3616 3574 break
3617 3575 else:
3618 3576 raise error.Abort(
3619 3577 _(
3620 3578 b'compression engines %s defined by '
3621 3579 b'format.revlog-compression not available'
3622 3580 )
3623 3581 % b', '.join(b'"%s"' % e for e in compengines),
3624 3582 hint=_(
3625 3583 b'run "hg debuginstall" to list available '
3626 3584 b'compression engines'
3627 3585 ),
3628 3586 )
3629 3587
3630 3588 # zlib is the historical default and doesn't need an explicit requirement.
3631 3589 if compengine == b'zstd':
3632 3590 requirements.add(b'revlog-compression-zstd')
3633 3591 elif compengine != b'zlib':
3634 3592 requirements.add(b'exp-compression-%s' % compengine)
3635 3593
3636 3594 if scmutil.gdinitconfig(ui):
3637 3595 requirements.add(b'generaldelta')
3638 3596 if ui.configbool(b'format', b'sparse-revlog'):
3639 3597 requirements.add(SPARSEREVLOG_REQUIREMENT)
3640 3598
3641 3599 # experimental config: format.exp-use-side-data
3642 3600 if ui.configbool(b'format', b'exp-use-side-data'):
3643 3601 requirements.add(SIDEDATA_REQUIREMENT)
3644 3602 # experimental config: format.exp-use-copies-side-data-changeset
3645 3603 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3646 3604 requirements.add(SIDEDATA_REQUIREMENT)
3647 3605 requirements.add(COPIESSDC_REQUIREMENT)
3648 3606 if ui.configbool(b'experimental', b'treemanifest'):
3649 3607 requirements.add(b'treemanifest')
3650 3608
3651 3609 revlogv2 = ui.config(b'experimental', b'revlogv2')
3652 3610 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3653 3611 requirements.remove(b'revlogv1')
3654 3612 # generaldelta is implied by revlogv2.
3655 3613 requirements.discard(b'generaldelta')
3656 3614 requirements.add(REVLOGV2_REQUIREMENT)
3657 3615 # experimental config: format.internal-phase
3658 3616 if ui.configbool(b'format', b'internal-phase'):
3659 3617 requirements.add(b'internal-phase')
3660 3618
3661 3619 if createopts.get(b'narrowfiles'):
3662 3620 requirements.add(repository.NARROW_REQUIREMENT)
3663 3621
3664 3622 if createopts.get(b'lfs'):
3665 3623 requirements.add(b'lfs')
3666 3624
3667 3625 if ui.configbool(b'format', b'bookmarks-in-store'):
3668 3626 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3669 3627
3670 3628 if ui.configbool(b'format', b'use-persistent-nodemap'):
3671 3629 requirements.add(NODEMAP_REQUIREMENT)
3672 3630
3673 3631 return requirements
3674 3632
3675 3633
3676 3634 def filterknowncreateopts(ui, createopts):
3677 3635 """Filters a dict of repo creation options against options that are known.
3678 3636
3679 3637 Receives a dict of repo creation options and returns a dict of those
3680 3638 options that we don't know how to handle.
3681 3639
3682 3640 This function is called as part of repository creation. If the
3683 3641 returned dict contains any items, repository creation will not
3684 3642 be allowed, as it means there was a request to create a repository
3685 3643 with options not recognized by loaded code.
3686 3644
3687 3645 Extensions can wrap this function to filter out creation options
3688 3646 they know how to handle.
3689 3647 """
3690 3648 known = {
3691 3649 b'backend',
3692 3650 b'lfs',
3693 3651 b'narrowfiles',
3694 3652 b'sharedrepo',
3695 3653 b'sharedrelative',
3696 3654 b'shareditems',
3697 3655 b'shallowfilestore',
3698 3656 }
3699 3657
3700 3658 return {k: v for k, v in createopts.items() if k not in known}
3701 3659
3702 3660
3703 3661 def createrepository(ui, path, createopts=None):
3704 3662 """Create a new repository in a vfs.
3705 3663
3706 3664 ``path`` path to the new repo's working directory.
3707 3665 ``createopts`` options for the new repository.
3708 3666
3709 3667 The following keys for ``createopts`` are recognized:
3710 3668
3711 3669 backend
3712 3670 The storage backend to use.
3713 3671 lfs
3714 3672 Repository will be created with ``lfs`` requirement. The lfs extension
3715 3673 will automatically be loaded when the repository is accessed.
3716 3674 narrowfiles
3717 3675 Set up repository to support narrow file storage.
3718 3676 sharedrepo
3719 3677 Repository object from which storage should be shared.
3720 3678 sharedrelative
3721 3679 Boolean indicating if the path to the shared repo should be
3722 3680 stored as relative. By default, the pointer to the "parent" repo
3723 3681 is stored as an absolute path.
3724 3682 shareditems
3725 3683 Set of items to share to the new repository (in addition to storage).
3726 3684 shallowfilestore
3727 3685 Indicates that storage for files should be shallow (not all ancestor
3728 3686 revisions are known).
3729 3687 """
3730 3688 createopts = defaultcreateopts(ui, createopts=createopts)
3731 3689
3732 3690 unknownopts = filterknowncreateopts(ui, createopts)
3733 3691
3734 3692 if not isinstance(unknownopts, dict):
3735 3693 raise error.ProgrammingError(
3736 3694 b'filterknowncreateopts() did not return a dict'
3737 3695 )
3738 3696
3739 3697 if unknownopts:
3740 3698 raise error.Abort(
3741 3699 _(
3742 3700 b'unable to create repository because of unknown '
3743 3701 b'creation option: %s'
3744 3702 )
3745 3703 % b', '.join(sorted(unknownopts)),
3746 3704 hint=_(b'is a required extension not loaded?'),
3747 3705 )
3748 3706
3749 3707 requirements = newreporequirements(ui, createopts=createopts)
3750 3708
3751 3709 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3752 3710
3753 3711 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3754 3712 if hgvfs.exists():
3755 3713 raise error.RepoError(_(b'repository %s already exists') % path)
3756 3714
3757 3715 if b'sharedrepo' in createopts:
3758 3716 sharedpath = createopts[b'sharedrepo'].sharedpath
3759 3717
3760 3718 if createopts.get(b'sharedrelative'):
3761 3719 try:
3762 3720 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3763 3721 except (IOError, ValueError) as e:
3764 3722 # ValueError is raised on Windows if the drive letters differ
3765 3723 # on each path.
3766 3724 raise error.Abort(
3767 3725 _(b'cannot calculate relative path'),
3768 3726 hint=stringutil.forcebytestr(e),
3769 3727 )
3770 3728
3771 3729 if not wdirvfs.exists():
3772 3730 wdirvfs.makedirs()
3773 3731
3774 3732 hgvfs.makedir(notindexed=True)
3775 3733 if b'sharedrepo' not in createopts:
3776 3734 hgvfs.mkdir(b'cache')
3777 3735 hgvfs.mkdir(b'wcache')
3778 3736
3779 3737 if b'store' in requirements and b'sharedrepo' not in createopts:
3780 3738 hgvfs.mkdir(b'store')
3781 3739
3782 3740 # We create an invalid changelog outside the store so very old
3783 3741 # Mercurial versions (which didn't know about the requirements
3784 3742 # file) encounter an error on reading the changelog. This
3785 3743 # effectively locks out old clients and prevents them from
3786 3744 # mucking with a repo in an unknown format.
3787 3745 #
3788 3746 # The revlog header has version 2, which won't be recognized by
3789 3747 # such old clients.
3790 3748 hgvfs.append(
3791 3749 b'00changelog.i',
3792 3750 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3793 3751 b'layout',
3794 3752 )
3795 3753
3796 3754 scmutil.writerequires(hgvfs, requirements)
3797 3755
3798 3756 # Write out file telling readers where to find the shared store.
3799 3757 if b'sharedrepo' in createopts:
3800 3758 hgvfs.write(b'sharedpath', sharedpath)
3801 3759
3802 3760 if createopts.get(b'shareditems'):
3803 3761 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3804 3762 hgvfs.write(b'shared', shared)
3805 3763
3806 3764
3807 3765 def poisonrepository(repo):
3808 3766 """Poison a repository instance so it can no longer be used."""
3809 3767 # Perform any cleanup on the instance.
3810 3768 repo.close()
3811 3769
3812 3770 # Our strategy is to replace the type of the object with one that
3813 3771 # has all attribute lookups result in error.
3814 3772 #
3815 3773 # But we have to allow the close() method because some constructors
3816 3774 # of repos call close() on repo references.
3817 3775 class poisonedrepository(object):
3818 3776 def __getattribute__(self, item):
3819 3777 if item == 'close':
3820 3778 return object.__getattribute__(self, item)
3821 3779
3822 3780 raise error.ProgrammingError(
3823 3781 b'repo instances should not be used after unshare'
3824 3782 )
3825 3783
3826 3784 def close(self):
3827 3785 pass
3828 3786
3829 3787 # We may have a repoview, which intercepts __setattr__. So be sure
3830 3788 # we operate at the lowest level possible.
3831 3789 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,268 +1,324 b''
1 1 # metadata.py -- code related to various metadata computation and access.
2 2 #
3 3 # Copyright 2019 Google, Inc <martinvonz@google.com>
4 4 # Copyright 2020 Pierre-Yves David <pierre-yves.david@octobus.net>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8 from __future__ import absolute_import, print_function
9 9
10 10 import multiprocessing
11 11
12 12 from . import (
13 13 error,
14 node,
14 15 pycompat,
15 16 util,
16 17 )
17 18
18 19 from .revlogutils import (
19 20 flagutil as sidedataflag,
20 21 sidedata as sidedatamod,
21 22 )
22 23
23 24
24 25 def computechangesetfilesadded(ctx):
25 26 """return the list of files added in a changeset
26 27 """
27 28 added = []
28 29 for f in ctx.files():
29 30 if not any(f in p for p in ctx.parents()):
30 31 added.append(f)
31 32 return added
32 33
33 34
35 def get_removal_filter(ctx, x=None):
36 """return a function to detect files "wrongly" detected as `removed`
37
38 When a file is removed relative to p1 in a merge, this
39 function determines whether the absence is due to a
40 deletion from a parent, or whether the merge commit
41 itself deletes the file. We decide this by doing a
42 simplified three way merge of the manifest entry for
43 the file. There are two ways we decide the merge
44 itself didn't delete a file:
45 - neither parent (nor the merge) contain the file
46 - exactly one parent contains the file, and that
47 parent has the same filelog entry as the merge
48 ancestor (or all of them if there two). In other
49 words, that parent left the file unchanged while the
50 other one deleted it.
51 One way to think about this is that deleting a file is
52 similar to emptying it, so the list of changed files
53 should be similar either way. The computation
54 described above is not done directly in _filecommit
55 when creating the list of changed files, however
56 it does something very similar by comparing filelog
57 nodes.
58 """
59
60 if x is not None:
61 p1, p2, m1, m2 = x
62 else:
63 p1 = ctx.p1()
64 p2 = ctx.p2()
65 m1 = p1.manifest()
66 m2 = p2.manifest()
67
68 @util.cachefunc
69 def mas():
70 p1n = p1.node()
71 p2n = p2.node()
72 cahs = ctx.repo().changelog.commonancestorsheads(p1n, p2n)
73 if not cahs:
74 cahs = [node.nullrev]
75 return [ctx.repo()[r].manifest() for r in cahs]
76
77 def deletionfromparent(f):
78 if f in m1:
79 return f not in m2 and all(
80 f in ma and ma.find(f) == m1.find(f) for ma in mas()
81 )
82 elif f in m2:
83 return all(f in ma and ma.find(f) == m2.find(f) for ma in mas())
84 else:
85 return True
86
87 return deletionfromparent
88
89
34 90 def computechangesetfilesremoved(ctx):
35 91 """return the list of files removed in a changeset
36 92 """
37 93 removed = []
38 94 for f in ctx.files():
39 95 if f not in ctx:
40 96 removed.append(f)
41 97 return removed
42 98
43 99
44 100 def computechangesetcopies(ctx):
45 101 """return the copies data for a changeset
46 102
47 103 The copies data are returned as a pair of dictionnary (p1copies, p2copies).
48 104
49 105 Each dictionnary are in the form: `{newname: oldname}`
50 106 """
51 107 p1copies = {}
52 108 p2copies = {}
53 109 p1 = ctx.p1()
54 110 p2 = ctx.p2()
55 111 narrowmatch = ctx._repo.narrowmatch()
56 112 for dst in ctx.files():
57 113 if not narrowmatch(dst) or dst not in ctx:
58 114 continue
59 115 copied = ctx[dst].renamed()
60 116 if not copied:
61 117 continue
62 118 src, srcnode = copied
63 119 if src in p1 and p1[src].filenode() == srcnode:
64 120 p1copies[dst] = src
65 121 elif src in p2 and p2[src].filenode() == srcnode:
66 122 p2copies[dst] = src
67 123 return p1copies, p2copies
68 124
69 125
70 126 def encodecopies(files, copies):
71 127 items = []
72 128 for i, dst in enumerate(files):
73 129 if dst in copies:
74 130 items.append(b'%d\0%s' % (i, copies[dst]))
75 131 if len(items) != len(copies):
76 132 raise error.ProgrammingError(
77 133 b'some copy targets missing from file list'
78 134 )
79 135 return b"\n".join(items)
80 136
81 137
82 138 def decodecopies(files, data):
83 139 try:
84 140 copies = {}
85 141 if not data:
86 142 return copies
87 143 for l in data.split(b'\n'):
88 144 strindex, src = l.split(b'\0')
89 145 i = int(strindex)
90 146 dst = files[i]
91 147 copies[dst] = src
92 148 return copies
93 149 except (ValueError, IndexError):
94 150 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
95 151 # used different syntax for the value.
96 152 return None
97 153
98 154
99 155 def encodefileindices(files, subset):
100 156 subset = set(subset)
101 157 indices = []
102 158 for i, f in enumerate(files):
103 159 if f in subset:
104 160 indices.append(b'%d' % i)
105 161 return b'\n'.join(indices)
106 162
107 163
108 164 def decodefileindices(files, data):
109 165 try:
110 166 subset = []
111 167 if not data:
112 168 return subset
113 169 for strindex in data.split(b'\n'):
114 170 i = int(strindex)
115 171 if i < 0 or i >= len(files):
116 172 return None
117 173 subset.append(files[i])
118 174 return subset
119 175 except (ValueError, IndexError):
120 176 # Perhaps someone had chosen the same key name (e.g. "added") and
121 177 # used different syntax for the value.
122 178 return None
123 179
124 180
125 181 def _getsidedata(srcrepo, rev):
126 182 ctx = srcrepo[rev]
127 183 filescopies = computechangesetcopies(ctx)
128 184 filesadded = computechangesetfilesadded(ctx)
129 185 filesremoved = computechangesetfilesremoved(ctx)
130 186 sidedata = {}
131 187 if any([filescopies, filesadded, filesremoved]):
132 188 sortedfiles = sorted(ctx.files())
133 189 p1copies, p2copies = filescopies
134 190 p1copies = encodecopies(sortedfiles, p1copies)
135 191 p2copies = encodecopies(sortedfiles, p2copies)
136 192 filesadded = encodefileindices(sortedfiles, filesadded)
137 193 filesremoved = encodefileindices(sortedfiles, filesremoved)
138 194 if p1copies:
139 195 sidedata[sidedatamod.SD_P1COPIES] = p1copies
140 196 if p2copies:
141 197 sidedata[sidedatamod.SD_P2COPIES] = p2copies
142 198 if filesadded:
143 199 sidedata[sidedatamod.SD_FILESADDED] = filesadded
144 200 if filesremoved:
145 201 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
146 202 return sidedata
147 203
148 204
149 205 def getsidedataadder(srcrepo, destrepo):
150 206 use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
151 207 if pycompat.iswindows or not use_w:
152 208 return _get_simple_sidedata_adder(srcrepo, destrepo)
153 209 else:
154 210 return _get_worker_sidedata_adder(srcrepo, destrepo)
155 211
156 212
157 213 def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
158 214 """The function used by worker precomputing sidedata
159 215
160 216 It read an input queue containing revision numbers
161 217 It write in an output queue containing (rev, <sidedata-map>)
162 218
163 219 The `None` input value is used as a stop signal.
164 220
165 221 The `tokens` semaphore is user to avoid having too many unprocessed
166 222 entries. The workers needs to acquire one token before fetching a task.
167 223 They will be released by the consumer of the produced data.
168 224 """
169 225 tokens.acquire()
170 226 rev = revs_queue.get()
171 227 while rev is not None:
172 228 data = _getsidedata(srcrepo, rev)
173 229 sidedata_queue.put((rev, data))
174 230 tokens.acquire()
175 231 rev = revs_queue.get()
176 232 # processing of `None` is completed, release the token.
177 233 tokens.release()
178 234
179 235
180 236 BUFF_PER_WORKER = 50
181 237
182 238
183 239 def _get_worker_sidedata_adder(srcrepo, destrepo):
184 240 """The parallel version of the sidedata computation
185 241
186 242 This code spawn a pool of worker that precompute a buffer of sidedata
187 243 before we actually need them"""
188 244 # avoid circular import copies -> scmutil -> worker -> copies
189 245 from . import worker
190 246
191 247 nbworkers = worker._numworkers(srcrepo.ui)
192 248
193 249 tokens = multiprocessing.BoundedSemaphore(nbworkers * BUFF_PER_WORKER)
194 250 revsq = multiprocessing.Queue()
195 251 sidedataq = multiprocessing.Queue()
196 252
197 253 assert srcrepo.filtername is None
198 254 # queue all tasks beforehand, revision numbers are small and it make
199 255 # synchronisation simpler
200 256 #
201 257 # Since the computation for each node can be quite expensive, the overhead
202 258 # of using a single queue is not revelant. In practice, most computation
203 259 # are fast but some are very expensive and dominate all the other smaller
204 260 # cost.
205 261 for r in srcrepo.changelog.revs():
206 262 revsq.put(r)
207 263 # queue the "no more tasks" markers
208 264 for i in range(nbworkers):
209 265 revsq.put(None)
210 266
211 267 allworkers = []
212 268 for i in range(nbworkers):
213 269 args = (srcrepo, revsq, sidedataq, tokens)
214 270 w = multiprocessing.Process(target=_sidedata_worker, args=args)
215 271 allworkers.append(w)
216 272 w.start()
217 273
218 274 # dictionnary to store results for revision higher than we one we are
219 275 # looking for. For example, if we need the sidedatamap for 42, and 43 is
220 276 # received, when shelve 43 for later use.
221 277 staging = {}
222 278
223 279 def sidedata_companion(revlog, rev):
224 280 sidedata = {}
225 281 if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
226 282 # Is the data previously shelved ?
227 283 sidedata = staging.pop(rev, None)
228 284 if sidedata is None:
229 285 # look at the queued result until we find the one we are lookig
230 286 # for (shelve the other ones)
231 287 r, sidedata = sidedataq.get()
232 288 while r != rev:
233 289 staging[r] = sidedata
234 290 r, sidedata = sidedataq.get()
235 291 tokens.release()
236 292 return False, (), sidedata
237 293
238 294 return sidedata_companion
239 295
240 296
241 297 def _get_simple_sidedata_adder(srcrepo, destrepo):
242 298 """The simple version of the sidedata computation
243 299
244 300 It just compute it in the same thread on request"""
245 301
246 302 def sidedatacompanion(revlog, rev):
247 303 sidedata = {}
248 304 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
249 305 sidedata = _getsidedata(srcrepo, rev)
250 306 return False, (), sidedata
251 307
252 308 return sidedatacompanion
253 309
254 310
255 311 def getsidedataremover(srcrepo, destrepo):
256 312 def sidedatacompanion(revlog, rev):
257 313 f = ()
258 314 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
259 315 if revlog.flags(rev) & sidedataflag.REVIDX_SIDEDATA:
260 316 f = (
261 317 sidedatamod.SD_P1COPIES,
262 318 sidedatamod.SD_P2COPIES,
263 319 sidedatamod.SD_FILESADDED,
264 320 sidedatamod.SD_FILESREMOVED,
265 321 )
266 322 return False, f, {}
267 323
268 324 return sidedatacompanion
General Comments 0
You need to be logged in to leave comments. Login now