##// END OF EJS Templates
localrepo: mark nullrev has never filtered...
marmoute -
r44200:b38cd2e6 default
parent child Browse files
Show More
@@ -1,3729 +1,3729 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from .pycompat import (
27 27 delattr,
28 28 getattr,
29 29 )
30 30 from . import (
31 31 bookmarks,
32 32 branchmap,
33 33 bundle2,
34 34 changegroup,
35 35 color,
36 36 context,
37 37 dirstate,
38 38 dirstateguard,
39 39 discovery,
40 40 encoding,
41 41 error,
42 42 exchange,
43 43 extensions,
44 44 filelog,
45 45 hook,
46 46 lock as lockmod,
47 47 match as matchmod,
48 48 merge as mergemod,
49 49 mergeutil,
50 50 namespaces,
51 51 narrowspec,
52 52 obsolete,
53 53 pathutil,
54 54 phases,
55 55 pushkey,
56 56 pycompat,
57 57 repoview,
58 58 revset,
59 59 revsetlang,
60 60 scmutil,
61 61 sparse,
62 62 store as storemod,
63 63 subrepoutil,
64 64 tags as tagsmod,
65 65 transaction,
66 66 txnutil,
67 67 util,
68 68 vfs as vfsmod,
69 69 )
70 70
71 71 from .interfaces import (
72 72 repository,
73 73 util as interfaceutil,
74 74 )
75 75
76 76 from .utils import (
77 77 procutil,
78 78 stringutil,
79 79 )
80 80
81 81 from .revlogutils import constants as revlogconst
82 82
83 83 release = lockmod.release
84 84 urlerr = util.urlerr
85 85 urlreq = util.urlreq
86 86
87 87 # set of (path, vfs-location) tuples. vfs-location is:
88 88 # - 'plain for vfs relative paths
89 89 # - '' for svfs relative paths
90 90 _cachedfiles = set()
91 91
92 92
93 93 class _basefilecache(scmutil.filecache):
94 94 """All filecache usage on repo are done for logic that should be unfiltered
95 95 """
96 96
97 97 def __get__(self, repo, type=None):
98 98 if repo is None:
99 99 return self
100 100 # proxy to unfiltered __dict__ since filtered repo has no entry
101 101 unfi = repo.unfiltered()
102 102 try:
103 103 return unfi.__dict__[self.sname]
104 104 except KeyError:
105 105 pass
106 106 return super(_basefilecache, self).__get__(unfi, type)
107 107
108 108 def set(self, repo, value):
109 109 return super(_basefilecache, self).set(repo.unfiltered(), value)
110 110
111 111
112 112 class repofilecache(_basefilecache):
113 113 """filecache for files in .hg but outside of .hg/store"""
114 114
115 115 def __init__(self, *paths):
116 116 super(repofilecache, self).__init__(*paths)
117 117 for path in paths:
118 118 _cachedfiles.add((path, b'plain'))
119 119
120 120 def join(self, obj, fname):
121 121 return obj.vfs.join(fname)
122 122
123 123
124 124 class storecache(_basefilecache):
125 125 """filecache for files in the store"""
126 126
127 127 def __init__(self, *paths):
128 128 super(storecache, self).__init__(*paths)
129 129 for path in paths:
130 130 _cachedfiles.add((path, b''))
131 131
132 132 def join(self, obj, fname):
133 133 return obj.sjoin(fname)
134 134
135 135
136 136 class mixedrepostorecache(_basefilecache):
137 137 """filecache for a mix files in .hg/store and outside"""
138 138
139 139 def __init__(self, *pathsandlocations):
140 140 # scmutil.filecache only uses the path for passing back into our
141 141 # join(), so we can safely pass a list of paths and locations
142 142 super(mixedrepostorecache, self).__init__(*pathsandlocations)
143 143 _cachedfiles.update(pathsandlocations)
144 144
145 145 def join(self, obj, fnameandlocation):
146 146 fname, location = fnameandlocation
147 147 if location == b'plain':
148 148 return obj.vfs.join(fname)
149 149 else:
150 150 if location != b'':
151 151 raise error.ProgrammingError(
152 152 b'unexpected location: %s' % location
153 153 )
154 154 return obj.sjoin(fname)
155 155
156 156
157 157 def isfilecached(repo, name):
158 158 """check if a repo has already cached "name" filecache-ed property
159 159
160 160 This returns (cachedobj-or-None, iscached) tuple.
161 161 """
162 162 cacheentry = repo.unfiltered()._filecache.get(name, None)
163 163 if not cacheentry:
164 164 return None, False
165 165 return cacheentry.obj, True
166 166
167 167
168 168 class unfilteredpropertycache(util.propertycache):
169 169 """propertycache that apply to unfiltered repo only"""
170 170
171 171 def __get__(self, repo, type=None):
172 172 unfi = repo.unfiltered()
173 173 if unfi is repo:
174 174 return super(unfilteredpropertycache, self).__get__(unfi)
175 175 return getattr(unfi, self.name)
176 176
177 177
178 178 class filteredpropertycache(util.propertycache):
179 179 """propertycache that must take filtering in account"""
180 180
181 181 def cachevalue(self, obj, value):
182 182 object.__setattr__(obj, self.name, value)
183 183
184 184
185 185 def hasunfilteredcache(repo, name):
186 186 """check if a repo has an unfilteredpropertycache value for <name>"""
187 187 return name in vars(repo.unfiltered())
188 188
189 189
190 190 def unfilteredmethod(orig):
191 191 """decorate method that always need to be run on unfiltered version"""
192 192
193 193 def wrapper(repo, *args, **kwargs):
194 194 return orig(repo.unfiltered(), *args, **kwargs)
195 195
196 196 return wrapper
197 197
198 198
199 199 moderncaps = {
200 200 b'lookup',
201 201 b'branchmap',
202 202 b'pushkey',
203 203 b'known',
204 204 b'getbundle',
205 205 b'unbundle',
206 206 }
207 207 legacycaps = moderncaps.union({b'changegroupsubset'})
208 208
209 209
210 210 @interfaceutil.implementer(repository.ipeercommandexecutor)
211 211 class localcommandexecutor(object):
212 212 def __init__(self, peer):
213 213 self._peer = peer
214 214 self._sent = False
215 215 self._closed = False
216 216
217 217 def __enter__(self):
218 218 return self
219 219
220 220 def __exit__(self, exctype, excvalue, exctb):
221 221 self.close()
222 222
223 223 def callcommand(self, command, args):
224 224 if self._sent:
225 225 raise error.ProgrammingError(
226 226 b'callcommand() cannot be used after sendcommands()'
227 227 )
228 228
229 229 if self._closed:
230 230 raise error.ProgrammingError(
231 231 b'callcommand() cannot be used after close()'
232 232 )
233 233
234 234 # We don't need to support anything fancy. Just call the named
235 235 # method on the peer and return a resolved future.
236 236 fn = getattr(self._peer, pycompat.sysstr(command))
237 237
238 238 f = pycompat.futures.Future()
239 239
240 240 try:
241 241 result = fn(**pycompat.strkwargs(args))
242 242 except Exception:
243 243 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 244 else:
245 245 f.set_result(result)
246 246
247 247 return f
248 248
249 249 def sendcommands(self):
250 250 self._sent = True
251 251
252 252 def close(self):
253 253 self._closed = True
254 254
255 255
256 256 @interfaceutil.implementer(repository.ipeercommands)
257 257 class localpeer(repository.peer):
258 258 '''peer for a local repo; reflects only the most recent API'''
259 259
260 260 def __init__(self, repo, caps=None):
261 261 super(localpeer, self).__init__()
262 262
263 263 if caps is None:
264 264 caps = moderncaps.copy()
265 265 self._repo = repo.filtered(b'served')
266 266 self.ui = repo.ui
267 267 self._caps = repo._restrictcapabilities(caps)
268 268
269 269 # Begin of _basepeer interface.
270 270
271 271 def url(self):
272 272 return self._repo.url()
273 273
274 274 def local(self):
275 275 return self._repo
276 276
277 277 def peer(self):
278 278 return self
279 279
280 280 def canpush(self):
281 281 return True
282 282
283 283 def close(self):
284 284 self._repo.close()
285 285
286 286 # End of _basepeer interface.
287 287
288 288 # Begin of _basewirecommands interface.
289 289
290 290 def branchmap(self):
291 291 return self._repo.branchmap()
292 292
293 293 def capabilities(self):
294 294 return self._caps
295 295
296 296 def clonebundles(self):
297 297 return self._repo.tryread(b'clonebundles.manifest')
298 298
299 299 def debugwireargs(self, one, two, three=None, four=None, five=None):
300 300 """Used to test argument passing over the wire"""
301 301 return b"%s %s %s %s %s" % (
302 302 one,
303 303 two,
304 304 pycompat.bytestr(three),
305 305 pycompat.bytestr(four),
306 306 pycompat.bytestr(five),
307 307 )
308 308
309 309 def getbundle(
310 310 self, source, heads=None, common=None, bundlecaps=None, **kwargs
311 311 ):
312 312 chunks = exchange.getbundlechunks(
313 313 self._repo,
314 314 source,
315 315 heads=heads,
316 316 common=common,
317 317 bundlecaps=bundlecaps,
318 318 **kwargs
319 319 )[1]
320 320 cb = util.chunkbuffer(chunks)
321 321
322 322 if exchange.bundle2requested(bundlecaps):
323 323 # When requesting a bundle2, getbundle returns a stream to make the
324 324 # wire level function happier. We need to build a proper object
325 325 # from it in local peer.
326 326 return bundle2.getunbundler(self.ui, cb)
327 327 else:
328 328 return changegroup.getunbundler(b'01', cb, None)
329 329
330 330 def heads(self):
331 331 return self._repo.heads()
332 332
333 333 def known(self, nodes):
334 334 return self._repo.known(nodes)
335 335
336 336 def listkeys(self, namespace):
337 337 return self._repo.listkeys(namespace)
338 338
339 339 def lookup(self, key):
340 340 return self._repo.lookup(key)
341 341
342 342 def pushkey(self, namespace, key, old, new):
343 343 return self._repo.pushkey(namespace, key, old, new)
344 344
345 345 def stream_out(self):
346 346 raise error.Abort(_(b'cannot perform stream clone against local peer'))
347 347
348 348 def unbundle(self, bundle, heads, url):
349 349 """apply a bundle on a repo
350 350
351 351 This function handles the repo locking itself."""
352 352 try:
353 353 try:
354 354 bundle = exchange.readbundle(self.ui, bundle, None)
355 355 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
356 356 if util.safehasattr(ret, b'getchunks'):
357 357 # This is a bundle20 object, turn it into an unbundler.
358 358 # This little dance should be dropped eventually when the
359 359 # API is finally improved.
360 360 stream = util.chunkbuffer(ret.getchunks())
361 361 ret = bundle2.getunbundler(self.ui, stream)
362 362 return ret
363 363 except Exception as exc:
364 364 # If the exception contains output salvaged from a bundle2
365 365 # reply, we need to make sure it is printed before continuing
366 366 # to fail. So we build a bundle2 with such output and consume
367 367 # it directly.
368 368 #
369 369 # This is not very elegant but allows a "simple" solution for
370 370 # issue4594
371 371 output = getattr(exc, '_bundle2salvagedoutput', ())
372 372 if output:
373 373 bundler = bundle2.bundle20(self._repo.ui)
374 374 for out in output:
375 375 bundler.addpart(out)
376 376 stream = util.chunkbuffer(bundler.getchunks())
377 377 b = bundle2.getunbundler(self.ui, stream)
378 378 bundle2.processbundle(self._repo, b)
379 379 raise
380 380 except error.PushRaced as exc:
381 381 raise error.ResponseError(
382 382 _(b'push failed:'), stringutil.forcebytestr(exc)
383 383 )
384 384
385 385 # End of _basewirecommands interface.
386 386
387 387 # Begin of peer interface.
388 388
389 389 def commandexecutor(self):
390 390 return localcommandexecutor(self)
391 391
392 392 # End of peer interface.
393 393
394 394
395 395 @interfaceutil.implementer(repository.ipeerlegacycommands)
396 396 class locallegacypeer(localpeer):
397 397 '''peer extension which implements legacy methods too; used for tests with
398 398 restricted capabilities'''
399 399
400 400 def __init__(self, repo):
401 401 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
402 402
403 403 # Begin of baselegacywirecommands interface.
404 404
405 405 def between(self, pairs):
406 406 return self._repo.between(pairs)
407 407
408 408 def branches(self, nodes):
409 409 return self._repo.branches(nodes)
410 410
411 411 def changegroup(self, nodes, source):
412 412 outgoing = discovery.outgoing(
413 413 self._repo, missingroots=nodes, missingheads=self._repo.heads()
414 414 )
415 415 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
416 416
417 417 def changegroupsubset(self, bases, heads, source):
418 418 outgoing = discovery.outgoing(
419 419 self._repo, missingroots=bases, missingheads=heads
420 420 )
421 421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422 422
423 423 # End of baselegacywirecommands interface.
424 424
425 425
426 426 # Increment the sub-version when the revlog v2 format changes to lock out old
427 427 # clients.
428 428 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
429 429
430 430 # A repository with the sparserevlog feature will have delta chains that
431 431 # can spread over a larger span. Sparse reading cuts these large spans into
432 432 # pieces, so that each piece isn't too big.
433 433 # Without the sparserevlog capability, reading from the repository could use
434 434 # huge amounts of memory, because the whole span would be read at once,
435 435 # including all the intermediate revisions that aren't pertinent for the chain.
436 436 # This is why once a repository has enabled sparse-read, it becomes required.
437 437 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
438 438
439 439 # A repository with the sidedataflag requirement will allow to store extra
440 440 # information for revision without altering their original hashes.
441 441 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
442 442
443 443 # A repository with the the copies-sidedata-changeset requirement will store
444 444 # copies related information in changeset's sidedata.
445 445 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
446 446
447 447 # Functions receiving (ui, features) that extensions can register to impact
448 448 # the ability to load repositories with custom requirements. Only
449 449 # functions defined in loaded extensions are called.
450 450 #
451 451 # The function receives a set of requirement strings that the repository
452 452 # is capable of opening. Functions will typically add elements to the
453 453 # set to reflect that the extension knows how to handle that requirements.
454 454 featuresetupfuncs = set()
455 455
456 456
457 457 def makelocalrepository(baseui, path, intents=None):
458 458 """Create a local repository object.
459 459
460 460 Given arguments needed to construct a local repository, this function
461 461 performs various early repository loading functionality (such as
462 462 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
463 463 the repository can be opened, derives a type suitable for representing
464 464 that repository, and returns an instance of it.
465 465
466 466 The returned object conforms to the ``repository.completelocalrepository``
467 467 interface.
468 468
469 469 The repository type is derived by calling a series of factory functions
470 470 for each aspect/interface of the final repository. These are defined by
471 471 ``REPO_INTERFACES``.
472 472
473 473 Each factory function is called to produce a type implementing a specific
474 474 interface. The cumulative list of returned types will be combined into a
475 475 new type and that type will be instantiated to represent the local
476 476 repository.
477 477
478 478 The factory functions each receive various state that may be consulted
479 479 as part of deriving a type.
480 480
481 481 Extensions should wrap these factory functions to customize repository type
482 482 creation. Note that an extension's wrapped function may be called even if
483 483 that extension is not loaded for the repo being constructed. Extensions
484 484 should check if their ``__name__`` appears in the
485 485 ``extensionmodulenames`` set passed to the factory function and no-op if
486 486 not.
487 487 """
488 488 ui = baseui.copy()
489 489 # Prevent copying repo configuration.
490 490 ui.copy = baseui.copy
491 491
492 492 # Working directory VFS rooted at repository root.
493 493 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
494 494
495 495 # Main VFS for .hg/ directory.
496 496 hgpath = wdirvfs.join(b'.hg')
497 497 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
498 498
499 499 # The .hg/ path should exist and should be a directory. All other
500 500 # cases are errors.
501 501 if not hgvfs.isdir():
502 502 try:
503 503 hgvfs.stat()
504 504 except OSError as e:
505 505 if e.errno != errno.ENOENT:
506 506 raise
507 507
508 508 raise error.RepoError(_(b'repository %s not found') % path)
509 509
510 510 # .hg/requires file contains a newline-delimited list of
511 511 # features/capabilities the opener (us) must have in order to use
512 512 # the repository. This file was introduced in Mercurial 0.9.2,
513 513 # which means very old repositories may not have one. We assume
514 514 # a missing file translates to no requirements.
515 515 try:
516 516 requirements = set(hgvfs.read(b'requires').splitlines())
517 517 except IOError as e:
518 518 if e.errno != errno.ENOENT:
519 519 raise
520 520 requirements = set()
521 521
522 522 # The .hg/hgrc file may load extensions or contain config options
523 523 # that influence repository construction. Attempt to load it and
524 524 # process any new extensions that it may have pulled in.
525 525 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
526 526 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
527 527 extensions.loadall(ui)
528 528 extensions.populateui(ui)
529 529
530 530 # Set of module names of extensions loaded for this repository.
531 531 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
532 532
533 533 supportedrequirements = gathersupportedrequirements(ui)
534 534
535 535 # We first validate the requirements are known.
536 536 ensurerequirementsrecognized(requirements, supportedrequirements)
537 537
538 538 # Then we validate that the known set is reasonable to use together.
539 539 ensurerequirementscompatible(ui, requirements)
540 540
541 541 # TODO there are unhandled edge cases related to opening repositories with
542 542 # shared storage. If storage is shared, we should also test for requirements
543 543 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
544 544 # that repo, as that repo may load extensions needed to open it. This is a
545 545 # bit complicated because we don't want the other hgrc to overwrite settings
546 546 # in this hgrc.
547 547 #
548 548 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
549 549 # file when sharing repos. But if a requirement is added after the share is
550 550 # performed, thereby introducing a new requirement for the opener, we may
551 551 # will not see that and could encounter a run-time error interacting with
552 552 # that shared store since it has an unknown-to-us requirement.
553 553
554 554 # At this point, we know we should be capable of opening the repository.
555 555 # Now get on with doing that.
556 556
557 557 features = set()
558 558
559 559 # The "store" part of the repository holds versioned data. How it is
560 560 # accessed is determined by various requirements. The ``shared`` or
561 561 # ``relshared`` requirements indicate the store lives in the path contained
562 562 # in the ``.hg/sharedpath`` file. This is an absolute path for
563 563 # ``shared`` and relative to ``.hg/`` for ``relshared``.
564 564 if b'shared' in requirements or b'relshared' in requirements:
565 565 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
566 566 if b'relshared' in requirements:
567 567 sharedpath = hgvfs.join(sharedpath)
568 568
569 569 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
570 570
571 571 if not sharedvfs.exists():
572 572 raise error.RepoError(
573 573 _(b'.hg/sharedpath points to nonexistent directory %s')
574 574 % sharedvfs.base
575 575 )
576 576
577 577 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
578 578
579 579 storebasepath = sharedvfs.base
580 580 cachepath = sharedvfs.join(b'cache')
581 581 else:
582 582 storebasepath = hgvfs.base
583 583 cachepath = hgvfs.join(b'cache')
584 584 wcachepath = hgvfs.join(b'wcache')
585 585
586 586 # The store has changed over time and the exact layout is dictated by
587 587 # requirements. The store interface abstracts differences across all
588 588 # of them.
589 589 store = makestore(
590 590 requirements,
591 591 storebasepath,
592 592 lambda base: vfsmod.vfs(base, cacheaudited=True),
593 593 )
594 594 hgvfs.createmode = store.createmode
595 595
596 596 storevfs = store.vfs
597 597 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
598 598
599 599 # The cache vfs is used to manage cache files.
600 600 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
601 601 cachevfs.createmode = store.createmode
602 602 # The cache vfs is used to manage cache files related to the working copy
603 603 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
604 604 wcachevfs.createmode = store.createmode
605 605
606 606 # Now resolve the type for the repository object. We do this by repeatedly
607 607 # calling a factory function to produces types for specific aspects of the
608 608 # repo's operation. The aggregate returned types are used as base classes
609 609 # for a dynamically-derived type, which will represent our new repository.
610 610
611 611 bases = []
612 612 extrastate = {}
613 613
614 614 for iface, fn in REPO_INTERFACES:
615 615 # We pass all potentially useful state to give extensions tons of
616 616 # flexibility.
617 617 typ = fn()(
618 618 ui=ui,
619 619 intents=intents,
620 620 requirements=requirements,
621 621 features=features,
622 622 wdirvfs=wdirvfs,
623 623 hgvfs=hgvfs,
624 624 store=store,
625 625 storevfs=storevfs,
626 626 storeoptions=storevfs.options,
627 627 cachevfs=cachevfs,
628 628 wcachevfs=wcachevfs,
629 629 extensionmodulenames=extensionmodulenames,
630 630 extrastate=extrastate,
631 631 baseclasses=bases,
632 632 )
633 633
634 634 if not isinstance(typ, type):
635 635 raise error.ProgrammingError(
636 636 b'unable to construct type for %s' % iface
637 637 )
638 638
639 639 bases.append(typ)
640 640
641 641 # type() allows you to use characters in type names that wouldn't be
642 642 # recognized as Python symbols in source code. We abuse that to add
643 643 # rich information about our constructed repo.
644 644 name = pycompat.sysstr(
645 645 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
646 646 )
647 647
648 648 cls = type(name, tuple(bases), {})
649 649
650 650 return cls(
651 651 baseui=baseui,
652 652 ui=ui,
653 653 origroot=path,
654 654 wdirvfs=wdirvfs,
655 655 hgvfs=hgvfs,
656 656 requirements=requirements,
657 657 supportedrequirements=supportedrequirements,
658 658 sharedpath=storebasepath,
659 659 store=store,
660 660 cachevfs=cachevfs,
661 661 wcachevfs=wcachevfs,
662 662 features=features,
663 663 intents=intents,
664 664 )
665 665
666 666
667 667 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
668 668 """Load hgrc files/content into a ui instance.
669 669
670 670 This is called during repository opening to load any additional
671 671 config files or settings relevant to the current repository.
672 672
673 673 Returns a bool indicating whether any additional configs were loaded.
674 674
675 675 Extensions should monkeypatch this function to modify how per-repo
676 676 configs are loaded. For example, an extension may wish to pull in
677 677 configs from alternate files or sources.
678 678 """
679 679 try:
680 680 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
681 681 return True
682 682 except IOError:
683 683 return False
684 684
685 685
686 686 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
687 687 """Perform additional actions after .hg/hgrc is loaded.
688 688
689 689 This function is called during repository loading immediately after
690 690 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
691 691
692 692 The function can be used to validate configs, automatically add
693 693 options (including extensions) based on requirements, etc.
694 694 """
695 695
696 696 # Map of requirements to list of extensions to load automatically when
697 697 # requirement is present.
698 698 autoextensions = {
699 699 b'largefiles': [b'largefiles'],
700 700 b'lfs': [b'lfs'],
701 701 }
702 702
703 703 for requirement, names in sorted(autoextensions.items()):
704 704 if requirement not in requirements:
705 705 continue
706 706
707 707 for name in names:
708 708 if not ui.hasconfig(b'extensions', name):
709 709 ui.setconfig(b'extensions', name, b'', source=b'autoload')
710 710
711 711
712 712 def gathersupportedrequirements(ui):
713 713 """Determine the complete set of recognized requirements."""
714 714 # Start with all requirements supported by this file.
715 715 supported = set(localrepository._basesupported)
716 716
717 717 # Execute ``featuresetupfuncs`` entries if they belong to an extension
718 718 # relevant to this ui instance.
719 719 modules = {m.__name__ for n, m in extensions.extensions(ui)}
720 720
721 721 for fn in featuresetupfuncs:
722 722 if fn.__module__ in modules:
723 723 fn(ui, supported)
724 724
725 725 # Add derived requirements from registered compression engines.
726 726 for name in util.compengines:
727 727 engine = util.compengines[name]
728 728 if engine.available() and engine.revlogheader():
729 729 supported.add(b'exp-compression-%s' % name)
730 730 if engine.name() == b'zstd':
731 731 supported.add(b'revlog-compression-zstd')
732 732
733 733 return supported
734 734
735 735
736 736 def ensurerequirementsrecognized(requirements, supported):
737 737 """Validate that a set of local requirements is recognized.
738 738
739 739 Receives a set of requirements. Raises an ``error.RepoError`` if there
740 740 exists any requirement in that set that currently loaded code doesn't
741 741 recognize.
742 742
743 743 Returns a set of supported requirements.
744 744 """
745 745 missing = set()
746 746
747 747 for requirement in requirements:
748 748 if requirement in supported:
749 749 continue
750 750
751 751 if not requirement or not requirement[0:1].isalnum():
752 752 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
753 753
754 754 missing.add(requirement)
755 755
756 756 if missing:
757 757 raise error.RequirementError(
758 758 _(b'repository requires features unknown to this Mercurial: %s')
759 759 % b' '.join(sorted(missing)),
760 760 hint=_(
761 761 b'see https://mercurial-scm.org/wiki/MissingRequirement '
762 762 b'for more information'
763 763 ),
764 764 )
765 765
766 766
767 767 def ensurerequirementscompatible(ui, requirements):
768 768 """Validates that a set of recognized requirements is mutually compatible.
769 769
770 770 Some requirements may not be compatible with others or require
771 771 config options that aren't enabled. This function is called during
772 772 repository opening to ensure that the set of requirements needed
773 773 to open a repository is sane and compatible with config options.
774 774
775 775 Extensions can monkeypatch this function to perform additional
776 776 checking.
777 777
778 778 ``error.RepoError`` should be raised on failure.
779 779 """
780 780 if b'exp-sparse' in requirements and not sparse.enabled:
781 781 raise error.RepoError(
782 782 _(
783 783 b'repository is using sparse feature but '
784 784 b'sparse is not enabled; enable the '
785 785 b'"sparse" extensions to access'
786 786 )
787 787 )
788 788
789 789
790 790 def makestore(requirements, path, vfstype):
791 791 """Construct a storage object for a repository."""
792 792 if b'store' in requirements:
793 793 if b'fncache' in requirements:
794 794 return storemod.fncachestore(
795 795 path, vfstype, b'dotencode' in requirements
796 796 )
797 797
798 798 return storemod.encodedstore(path, vfstype)
799 799
800 800 return storemod.basicstore(path, vfstype)
801 801
802 802
803 803 def resolvestorevfsoptions(ui, requirements, features):
804 804 """Resolve the options to pass to the store vfs opener.
805 805
806 806 The returned dict is used to influence behavior of the storage layer.
807 807 """
808 808 options = {}
809 809
810 810 if b'treemanifest' in requirements:
811 811 options[b'treemanifest'] = True
812 812
813 813 # experimental config: format.manifestcachesize
814 814 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
815 815 if manifestcachesize is not None:
816 816 options[b'manifestcachesize'] = manifestcachesize
817 817
818 818 # In the absence of another requirement superseding a revlog-related
819 819 # requirement, we have to assume the repo is using revlog version 0.
820 820 # This revlog format is super old and we don't bother trying to parse
821 821 # opener options for it because those options wouldn't do anything
822 822 # meaningful on such old repos.
823 823 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
824 824 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
825 825 else: # explicitly mark repo as using revlogv0
826 826 options[b'revlogv0'] = True
827 827
828 828 if COPIESSDC_REQUIREMENT in requirements:
829 829 options[b'copies-storage'] = b'changeset-sidedata'
830 830 else:
831 831 writecopiesto = ui.config(b'experimental', b'copies.write-to')
832 832 copiesextramode = (b'changeset-only', b'compatibility')
833 833 if writecopiesto in copiesextramode:
834 834 options[b'copies-storage'] = b'extra'
835 835
836 836 return options
837 837
838 838
839 839 def resolverevlogstorevfsoptions(ui, requirements, features):
840 840 """Resolve opener options specific to revlogs."""
841 841
842 842 options = {}
843 843 options[b'flagprocessors'] = {}
844 844
845 845 if b'revlogv1' in requirements:
846 846 options[b'revlogv1'] = True
847 847 if REVLOGV2_REQUIREMENT in requirements:
848 848 options[b'revlogv2'] = True
849 849
850 850 if b'generaldelta' in requirements:
851 851 options[b'generaldelta'] = True
852 852
853 853 # experimental config: format.chunkcachesize
854 854 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
855 855 if chunkcachesize is not None:
856 856 options[b'chunkcachesize'] = chunkcachesize
857 857
858 858 deltabothparents = ui.configbool(
859 859 b'storage', b'revlog.optimize-delta-parent-choice'
860 860 )
861 861 options[b'deltabothparents'] = deltabothparents
862 862
863 863 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
864 864 lazydeltabase = False
865 865 if lazydelta:
866 866 lazydeltabase = ui.configbool(
867 867 b'storage', b'revlog.reuse-external-delta-parent'
868 868 )
869 869 if lazydeltabase is None:
870 870 lazydeltabase = not scmutil.gddeltaconfig(ui)
871 871 options[b'lazydelta'] = lazydelta
872 872 options[b'lazydeltabase'] = lazydeltabase
873 873
874 874 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
875 875 if 0 <= chainspan:
876 876 options[b'maxdeltachainspan'] = chainspan
877 877
878 878 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
879 879 if mmapindexthreshold is not None:
880 880 options[b'mmapindexthreshold'] = mmapindexthreshold
881 881
882 882 withsparseread = ui.configbool(b'experimental', b'sparse-read')
883 883 srdensitythres = float(
884 884 ui.config(b'experimental', b'sparse-read.density-threshold')
885 885 )
886 886 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
887 887 options[b'with-sparse-read'] = withsparseread
888 888 options[b'sparse-read-density-threshold'] = srdensitythres
889 889 options[b'sparse-read-min-gap-size'] = srmingapsize
890 890
891 891 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
892 892 options[b'sparse-revlog'] = sparserevlog
893 893 if sparserevlog:
894 894 options[b'generaldelta'] = True
895 895
896 896 sidedata = SIDEDATA_REQUIREMENT in requirements
897 897 options[b'side-data'] = sidedata
898 898
899 899 maxchainlen = None
900 900 if sparserevlog:
901 901 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
902 902 # experimental config: format.maxchainlen
903 903 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
904 904 if maxchainlen is not None:
905 905 options[b'maxchainlen'] = maxchainlen
906 906
907 907 for r in requirements:
908 908 # we allow multiple compression engine requirement to co-exist because
909 909 # strickly speaking, revlog seems to support mixed compression style.
910 910 #
911 911 # The compression used for new entries will be "the last one"
912 912 prefix = r.startswith
913 913 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
914 914 options[b'compengine'] = r.split(b'-', 2)[2]
915 915
916 916 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
917 917 if options[b'zlib.level'] is not None:
918 918 if not (0 <= options[b'zlib.level'] <= 9):
919 919 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
920 920 raise error.Abort(msg % options[b'zlib.level'])
921 921 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
922 922 if options[b'zstd.level'] is not None:
923 923 if not (0 <= options[b'zstd.level'] <= 22):
924 924 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
925 925 raise error.Abort(msg % options[b'zstd.level'])
926 926
927 927 if repository.NARROW_REQUIREMENT in requirements:
928 928 options[b'enableellipsis'] = True
929 929
930 930 return options
931 931
932 932
933 933 def makemain(**kwargs):
934 934 """Produce a type conforming to ``ilocalrepositorymain``."""
935 935 return localrepository
936 936
937 937
938 938 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
939 939 class revlogfilestorage(object):
940 940 """File storage when using revlogs."""
941 941
942 942 def file(self, path):
943 943 if path[0] == b'/':
944 944 path = path[1:]
945 945
946 946 return filelog.filelog(self.svfs, path)
947 947
948 948
949 949 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
950 950 class revlognarrowfilestorage(object):
951 951 """File storage when using revlogs and narrow files."""
952 952
953 953 def file(self, path):
954 954 if path[0] == b'/':
955 955 path = path[1:]
956 956
957 957 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
958 958
959 959
960 960 def makefilestorage(requirements, features, **kwargs):
961 961 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
962 962 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
963 963 features.add(repository.REPO_FEATURE_STREAM_CLONE)
964 964
965 965 if repository.NARROW_REQUIREMENT in requirements:
966 966 return revlognarrowfilestorage
967 967 else:
968 968 return revlogfilestorage
969 969
970 970
971 971 # List of repository interfaces and factory functions for them. Each
972 972 # will be called in order during ``makelocalrepository()`` to iteratively
973 973 # derive the final type for a local repository instance. We capture the
974 974 # function as a lambda so we don't hold a reference and the module-level
975 975 # functions can be wrapped.
976 976 REPO_INTERFACES = [
977 977 (repository.ilocalrepositorymain, lambda: makemain),
978 978 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
979 979 ]
980 980
981 981
982 982 @interfaceutil.implementer(repository.ilocalrepositorymain)
983 983 class localrepository(object):
984 984 """Main class for representing local repositories.
985 985
986 986 All local repositories are instances of this class.
987 987
988 988 Constructed on its own, instances of this class are not usable as
989 989 repository objects. To obtain a usable repository object, call
990 990 ``hg.repository()``, ``localrepo.instance()``, or
991 991 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
992 992 ``instance()`` adds support for creating new repositories.
993 993 ``hg.repository()`` adds more extension integration, including calling
994 994 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
995 995 used.
996 996 """
997 997
998 998 # obsolete experimental requirements:
999 999 # - manifestv2: An experimental new manifest format that allowed
1000 1000 # for stem compression of long paths. Experiment ended up not
1001 1001 # being successful (repository sizes went up due to worse delta
1002 1002 # chains), and the code was deleted in 4.6.
1003 1003 supportedformats = {
1004 1004 b'revlogv1',
1005 1005 b'generaldelta',
1006 1006 b'treemanifest',
1007 1007 COPIESSDC_REQUIREMENT,
1008 1008 REVLOGV2_REQUIREMENT,
1009 1009 SIDEDATA_REQUIREMENT,
1010 1010 SPARSEREVLOG_REQUIREMENT,
1011 1011 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1012 1012 }
1013 1013 _basesupported = supportedformats | {
1014 1014 b'store',
1015 1015 b'fncache',
1016 1016 b'shared',
1017 1017 b'relshared',
1018 1018 b'dotencode',
1019 1019 b'exp-sparse',
1020 1020 b'internal-phase',
1021 1021 }
1022 1022
1023 1023 # list of prefix for file which can be written without 'wlock'
1024 1024 # Extensions should extend this list when needed
1025 1025 _wlockfreeprefix = {
1026 1026 # We migh consider requiring 'wlock' for the next
1027 1027 # two, but pretty much all the existing code assume
1028 1028 # wlock is not needed so we keep them excluded for
1029 1029 # now.
1030 1030 b'hgrc',
1031 1031 b'requires',
1032 1032 # XXX cache is a complicatged business someone
1033 1033 # should investigate this in depth at some point
1034 1034 b'cache/',
1035 1035 # XXX shouldn't be dirstate covered by the wlock?
1036 1036 b'dirstate',
1037 1037 # XXX bisect was still a bit too messy at the time
1038 1038 # this changeset was introduced. Someone should fix
1039 1039 # the remainig bit and drop this line
1040 1040 b'bisect.state',
1041 1041 }
1042 1042
1043 1043 def __init__(
1044 1044 self,
1045 1045 baseui,
1046 1046 ui,
1047 1047 origroot,
1048 1048 wdirvfs,
1049 1049 hgvfs,
1050 1050 requirements,
1051 1051 supportedrequirements,
1052 1052 sharedpath,
1053 1053 store,
1054 1054 cachevfs,
1055 1055 wcachevfs,
1056 1056 features,
1057 1057 intents=None,
1058 1058 ):
1059 1059 """Create a new local repository instance.
1060 1060
1061 1061 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1062 1062 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1063 1063 object.
1064 1064
1065 1065 Arguments:
1066 1066
1067 1067 baseui
1068 1068 ``ui.ui`` instance that ``ui`` argument was based off of.
1069 1069
1070 1070 ui
1071 1071 ``ui.ui`` instance for use by the repository.
1072 1072
1073 1073 origroot
1074 1074 ``bytes`` path to working directory root of this repository.
1075 1075
1076 1076 wdirvfs
1077 1077 ``vfs.vfs`` rooted at the working directory.
1078 1078
1079 1079 hgvfs
1080 1080 ``vfs.vfs`` rooted at .hg/
1081 1081
1082 1082 requirements
1083 1083 ``set`` of bytestrings representing repository opening requirements.
1084 1084
1085 1085 supportedrequirements
1086 1086 ``set`` of bytestrings representing repository requirements that we
1087 1087 know how to open. May be a supetset of ``requirements``.
1088 1088
1089 1089 sharedpath
1090 1090 ``bytes`` Defining path to storage base directory. Points to a
1091 1091 ``.hg/`` directory somewhere.
1092 1092
1093 1093 store
1094 1094 ``store.basicstore`` (or derived) instance providing access to
1095 1095 versioned storage.
1096 1096
1097 1097 cachevfs
1098 1098 ``vfs.vfs`` used for cache files.
1099 1099
1100 1100 wcachevfs
1101 1101 ``vfs.vfs`` used for cache files related to the working copy.
1102 1102
1103 1103 features
1104 1104 ``set`` of bytestrings defining features/capabilities of this
1105 1105 instance.
1106 1106
1107 1107 intents
1108 1108 ``set`` of system strings indicating what this repo will be used
1109 1109 for.
1110 1110 """
1111 1111 self.baseui = baseui
1112 1112 self.ui = ui
1113 1113 self.origroot = origroot
1114 1114 # vfs rooted at working directory.
1115 1115 self.wvfs = wdirvfs
1116 1116 self.root = wdirvfs.base
1117 1117 # vfs rooted at .hg/. Used to access most non-store paths.
1118 1118 self.vfs = hgvfs
1119 1119 self.path = hgvfs.base
1120 1120 self.requirements = requirements
1121 1121 self.supported = supportedrequirements
1122 1122 self.sharedpath = sharedpath
1123 1123 self.store = store
1124 1124 self.cachevfs = cachevfs
1125 1125 self.wcachevfs = wcachevfs
1126 1126 self.features = features
1127 1127
1128 1128 self.filtername = None
1129 1129
1130 1130 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1131 1131 b'devel', b'check-locks'
1132 1132 ):
1133 1133 self.vfs.audit = self._getvfsward(self.vfs.audit)
1134 1134 # A list of callback to shape the phase if no data were found.
1135 1135 # Callback are in the form: func(repo, roots) --> processed root.
1136 1136 # This list it to be filled by extension during repo setup
1137 1137 self._phasedefaults = []
1138 1138
1139 1139 color.setup(self.ui)
1140 1140
1141 1141 self.spath = self.store.path
1142 1142 self.svfs = self.store.vfs
1143 1143 self.sjoin = self.store.join
1144 1144 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1145 1145 b'devel', b'check-locks'
1146 1146 ):
1147 1147 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1148 1148 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1149 1149 else: # standard vfs
1150 1150 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1151 1151
1152 1152 self._dirstatevalidatewarned = False
1153 1153
1154 1154 self._branchcaches = branchmap.BranchMapCache()
1155 1155 self._revbranchcache = None
1156 1156 self._filterpats = {}
1157 1157 self._datafilters = {}
1158 1158 self._transref = self._lockref = self._wlockref = None
1159 1159
1160 1160 # A cache for various files under .hg/ that tracks file changes,
1161 1161 # (used by the filecache decorator)
1162 1162 #
1163 1163 # Maps a property name to its util.filecacheentry
1164 1164 self._filecache = {}
1165 1165
1166 1166 # hold sets of revision to be filtered
1167 1167 # should be cleared when something might have changed the filter value:
1168 1168 # - new changesets,
1169 1169 # - phase change,
1170 1170 # - new obsolescence marker,
1171 1171 # - working directory parent change,
1172 1172 # - bookmark changes
1173 1173 self.filteredrevcache = {}
1174 1174
1175 1175 # post-dirstate-status hooks
1176 1176 self._postdsstatus = []
1177 1177
1178 1178 # generic mapping between names and nodes
1179 1179 self.names = namespaces.namespaces()
1180 1180
1181 1181 # Key to signature value.
1182 1182 self._sparsesignaturecache = {}
1183 1183 # Signature to cached matcher instance.
1184 1184 self._sparsematchercache = {}
1185 1185
1186 1186 self._extrafilterid = repoview.extrafilter(ui)
1187 1187
1188 1188 self.filecopiesmode = None
1189 1189 if COPIESSDC_REQUIREMENT in self.requirements:
1190 1190 self.filecopiesmode = b'changeset-sidedata'
1191 1191
1192 1192 def _getvfsward(self, origfunc):
1193 1193 """build a ward for self.vfs"""
1194 1194 rref = weakref.ref(self)
1195 1195
1196 1196 def checkvfs(path, mode=None):
1197 1197 ret = origfunc(path, mode=mode)
1198 1198 repo = rref()
1199 1199 if (
1200 1200 repo is None
1201 1201 or not util.safehasattr(repo, b'_wlockref')
1202 1202 or not util.safehasattr(repo, b'_lockref')
1203 1203 ):
1204 1204 return
1205 1205 if mode in (None, b'r', b'rb'):
1206 1206 return
1207 1207 if path.startswith(repo.path):
1208 1208 # truncate name relative to the repository (.hg)
1209 1209 path = path[len(repo.path) + 1 :]
1210 1210 if path.startswith(b'cache/'):
1211 1211 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1212 1212 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1213 1213 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1214 1214 # journal is covered by 'lock'
1215 1215 if repo._currentlock(repo._lockref) is None:
1216 1216 repo.ui.develwarn(
1217 1217 b'write with no lock: "%s"' % path,
1218 1218 stacklevel=3,
1219 1219 config=b'check-locks',
1220 1220 )
1221 1221 elif repo._currentlock(repo._wlockref) is None:
1222 1222 # rest of vfs files are covered by 'wlock'
1223 1223 #
1224 1224 # exclude special files
1225 1225 for prefix in self._wlockfreeprefix:
1226 1226 if path.startswith(prefix):
1227 1227 return
1228 1228 repo.ui.develwarn(
1229 1229 b'write with no wlock: "%s"' % path,
1230 1230 stacklevel=3,
1231 1231 config=b'check-locks',
1232 1232 )
1233 1233 return ret
1234 1234
1235 1235 return checkvfs
1236 1236
1237 1237 def _getsvfsward(self, origfunc):
1238 1238 """build a ward for self.svfs"""
1239 1239 rref = weakref.ref(self)
1240 1240
1241 1241 def checksvfs(path, mode=None):
1242 1242 ret = origfunc(path, mode=mode)
1243 1243 repo = rref()
1244 1244 if repo is None or not util.safehasattr(repo, b'_lockref'):
1245 1245 return
1246 1246 if mode in (None, b'r', b'rb'):
1247 1247 return
1248 1248 if path.startswith(repo.sharedpath):
1249 1249 # truncate name relative to the repository (.hg)
1250 1250 path = path[len(repo.sharedpath) + 1 :]
1251 1251 if repo._currentlock(repo._lockref) is None:
1252 1252 repo.ui.develwarn(
1253 1253 b'write with no lock: "%s"' % path, stacklevel=4
1254 1254 )
1255 1255 return ret
1256 1256
1257 1257 return checksvfs
1258 1258
1259 1259 def close(self):
1260 1260 self._writecaches()
1261 1261
1262 1262 def _writecaches(self):
1263 1263 if self._revbranchcache:
1264 1264 self._revbranchcache.write()
1265 1265
1266 1266 def _restrictcapabilities(self, caps):
1267 1267 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1268 1268 caps = set(caps)
1269 1269 capsblob = bundle2.encodecaps(
1270 1270 bundle2.getrepocaps(self, role=b'client')
1271 1271 )
1272 1272 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1273 1273 return caps
1274 1274
1275 1275 def _writerequirements(self):
1276 1276 scmutil.writerequires(self.vfs, self.requirements)
1277 1277
1278 1278 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1279 1279 # self -> auditor -> self._checknested -> self
1280 1280
1281 1281 @property
1282 1282 def auditor(self):
1283 1283 # This is only used by context.workingctx.match in order to
1284 1284 # detect files in subrepos.
1285 1285 return pathutil.pathauditor(self.root, callback=self._checknested)
1286 1286
1287 1287 @property
1288 1288 def nofsauditor(self):
1289 1289 # This is only used by context.basectx.match in order to detect
1290 1290 # files in subrepos.
1291 1291 return pathutil.pathauditor(
1292 1292 self.root, callback=self._checknested, realfs=False, cached=True
1293 1293 )
1294 1294
1295 1295 def _checknested(self, path):
1296 1296 """Determine if path is a legal nested repository."""
1297 1297 if not path.startswith(self.root):
1298 1298 return False
1299 1299 subpath = path[len(self.root) + 1 :]
1300 1300 normsubpath = util.pconvert(subpath)
1301 1301
1302 1302 # XXX: Checking against the current working copy is wrong in
1303 1303 # the sense that it can reject things like
1304 1304 #
1305 1305 # $ hg cat -r 10 sub/x.txt
1306 1306 #
1307 1307 # if sub/ is no longer a subrepository in the working copy
1308 1308 # parent revision.
1309 1309 #
1310 1310 # However, it can of course also allow things that would have
1311 1311 # been rejected before, such as the above cat command if sub/
1312 1312 # is a subrepository now, but was a normal directory before.
1313 1313 # The old path auditor would have rejected by mistake since it
1314 1314 # panics when it sees sub/.hg/.
1315 1315 #
1316 1316 # All in all, checking against the working copy seems sensible
1317 1317 # since we want to prevent access to nested repositories on
1318 1318 # the filesystem *now*.
1319 1319 ctx = self[None]
1320 1320 parts = util.splitpath(subpath)
1321 1321 while parts:
1322 1322 prefix = b'/'.join(parts)
1323 1323 if prefix in ctx.substate:
1324 1324 if prefix == normsubpath:
1325 1325 return True
1326 1326 else:
1327 1327 sub = ctx.sub(prefix)
1328 1328 return sub.checknested(subpath[len(prefix) + 1 :])
1329 1329 else:
1330 1330 parts.pop()
1331 1331 return False
1332 1332
1333 1333 def peer(self):
1334 1334 return localpeer(self) # not cached to avoid reference cycle
1335 1335
1336 1336 def unfiltered(self):
1337 1337 """Return unfiltered version of the repository
1338 1338
1339 1339 Intended to be overwritten by filtered repo."""
1340 1340 return self
1341 1341
1342 1342 def filtered(self, name, visibilityexceptions=None):
1343 1343 """Return a filtered version of a repository
1344 1344
1345 1345 The `name` parameter is the identifier of the requested view. This
1346 1346 will return a repoview object set "exactly" to the specified view.
1347 1347
1348 1348 This function does not apply recursive filtering to a repository. For
1349 1349 example calling `repo.filtered("served")` will return a repoview using
1350 1350 the "served" view, regardless of the initial view used by `repo`.
1351 1351
1352 1352 In other word, there is always only one level of `repoview` "filtering".
1353 1353 """
1354 1354 if self._extrafilterid is not None and b'%' not in name:
1355 1355 name = name + b'%' + self._extrafilterid
1356 1356
1357 1357 cls = repoview.newtype(self.unfiltered().__class__)
1358 1358 return cls(self, name, visibilityexceptions)
1359 1359
1360 1360 @mixedrepostorecache(
1361 1361 (b'bookmarks', b'plain'),
1362 1362 (b'bookmarks.current', b'plain'),
1363 1363 (b'bookmarks', b''),
1364 1364 (b'00changelog.i', b''),
1365 1365 )
1366 1366 def _bookmarks(self):
1367 1367 # Since the multiple files involved in the transaction cannot be
1368 1368 # written atomically (with current repository format), there is a race
1369 1369 # condition here.
1370 1370 #
1371 1371 # 1) changelog content A is read
1372 1372 # 2) outside transaction update changelog to content B
1373 1373 # 3) outside transaction update bookmark file referring to content B
1374 1374 # 4) bookmarks file content is read and filtered against changelog-A
1375 1375 #
1376 1376 # When this happens, bookmarks against nodes missing from A are dropped.
1377 1377 #
1378 1378 # Having this happening during read is not great, but it become worse
1379 1379 # when this happen during write because the bookmarks to the "unknown"
1380 1380 # nodes will be dropped for good. However, writes happen within locks.
1381 1381 # This locking makes it possible to have a race free consistent read.
1382 1382 # For this purpose data read from disc before locking are
1383 1383 # "invalidated" right after the locks are taken. This invalidations are
1384 1384 # "light", the `filecache` mechanism keep the data in memory and will
1385 1385 # reuse them if the underlying files did not changed. Not parsing the
1386 1386 # same data multiple times helps performances.
1387 1387 #
1388 1388 # Unfortunately in the case describe above, the files tracked by the
1389 1389 # bookmarks file cache might not have changed, but the in-memory
1390 1390 # content is still "wrong" because we used an older changelog content
1391 1391 # to process the on-disk data. So after locking, the changelog would be
1392 1392 # refreshed but `_bookmarks` would be preserved.
1393 1393 # Adding `00changelog.i` to the list of tracked file is not
1394 1394 # enough, because at the time we build the content for `_bookmarks` in
1395 1395 # (4), the changelog file has already diverged from the content used
1396 1396 # for loading `changelog` in (1)
1397 1397 #
1398 1398 # To prevent the issue, we force the changelog to be explicitly
1399 1399 # reloaded while computing `_bookmarks`. The data race can still happen
1400 1400 # without the lock (with a narrower window), but it would no longer go
1401 1401 # undetected during the lock time refresh.
1402 1402 #
1403 1403 # The new schedule is as follow
1404 1404 #
1405 1405 # 1) filecache logic detect that `_bookmarks` needs to be computed
1406 1406 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1407 1407 # 3) We force `changelog` filecache to be tested
1408 1408 # 4) cachestat for `changelog` are captured (for changelog)
1409 1409 # 5) `_bookmarks` is computed and cached
1410 1410 #
1411 1411 # The step in (3) ensure we have a changelog at least as recent as the
1412 1412 # cache stat computed in (1). As a result at locking time:
1413 1413 # * if the changelog did not changed since (1) -> we can reuse the data
1414 1414 # * otherwise -> the bookmarks get refreshed.
1415 1415 self._refreshchangelog()
1416 1416 return bookmarks.bmstore(self)
1417 1417
1418 1418 def _refreshchangelog(self):
1419 1419 """make sure the in memory changelog match the on-disk one"""
1420 1420 if 'changelog' in vars(self) and self.currenttransaction() is None:
1421 1421 del self.changelog
1422 1422
1423 1423 @property
1424 1424 def _activebookmark(self):
1425 1425 return self._bookmarks.active
1426 1426
1427 1427 # _phasesets depend on changelog. what we need is to call
1428 1428 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1429 1429 # can't be easily expressed in filecache mechanism.
1430 1430 @storecache(b'phaseroots', b'00changelog.i')
1431 1431 def _phasecache(self):
1432 1432 return phases.phasecache(self, self._phasedefaults)
1433 1433
1434 1434 @storecache(b'obsstore')
1435 1435 def obsstore(self):
1436 1436 return obsolete.makestore(self.ui, self)
1437 1437
1438 1438 @storecache(b'00changelog.i')
1439 1439 def changelog(self):
1440 1440 return self.store.changelog(txnutil.mayhavepending(self.root))
1441 1441
1442 1442 @storecache(b'00manifest.i')
1443 1443 def manifestlog(self):
1444 1444 return self.store.manifestlog(self, self._storenarrowmatch)
1445 1445
1446 1446 @repofilecache(b'dirstate')
1447 1447 def dirstate(self):
1448 1448 return self._makedirstate()
1449 1449
1450 1450 def _makedirstate(self):
1451 1451 """Extension point for wrapping the dirstate per-repo."""
1452 1452 sparsematchfn = lambda: sparse.matcher(self)
1453 1453
1454 1454 return dirstate.dirstate(
1455 1455 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1456 1456 )
1457 1457
1458 1458 def _dirstatevalidate(self, node):
1459 1459 try:
1460 1460 self.changelog.rev(node)
1461 1461 return node
1462 1462 except error.LookupError:
1463 1463 if not self._dirstatevalidatewarned:
1464 1464 self._dirstatevalidatewarned = True
1465 1465 self.ui.warn(
1466 1466 _(b"warning: ignoring unknown working parent %s!\n")
1467 1467 % short(node)
1468 1468 )
1469 1469 return nullid
1470 1470
1471 1471 @storecache(narrowspec.FILENAME)
1472 1472 def narrowpats(self):
1473 1473 """matcher patterns for this repository's narrowspec
1474 1474
1475 1475 A tuple of (includes, excludes).
1476 1476 """
1477 1477 return narrowspec.load(self)
1478 1478
1479 1479 @storecache(narrowspec.FILENAME)
1480 1480 def _storenarrowmatch(self):
1481 1481 if repository.NARROW_REQUIREMENT not in self.requirements:
1482 1482 return matchmod.always()
1483 1483 include, exclude = self.narrowpats
1484 1484 return narrowspec.match(self.root, include=include, exclude=exclude)
1485 1485
1486 1486 @storecache(narrowspec.FILENAME)
1487 1487 def _narrowmatch(self):
1488 1488 if repository.NARROW_REQUIREMENT not in self.requirements:
1489 1489 return matchmod.always()
1490 1490 narrowspec.checkworkingcopynarrowspec(self)
1491 1491 include, exclude = self.narrowpats
1492 1492 return narrowspec.match(self.root, include=include, exclude=exclude)
1493 1493
1494 1494 def narrowmatch(self, match=None, includeexact=False):
1495 1495 """matcher corresponding the the repo's narrowspec
1496 1496
1497 1497 If `match` is given, then that will be intersected with the narrow
1498 1498 matcher.
1499 1499
1500 1500 If `includeexact` is True, then any exact matches from `match` will
1501 1501 be included even if they're outside the narrowspec.
1502 1502 """
1503 1503 if match:
1504 1504 if includeexact and not self._narrowmatch.always():
1505 1505 # do not exclude explicitly-specified paths so that they can
1506 1506 # be warned later on
1507 1507 em = matchmod.exact(match.files())
1508 1508 nm = matchmod.unionmatcher([self._narrowmatch, em])
1509 1509 return matchmod.intersectmatchers(match, nm)
1510 1510 return matchmod.intersectmatchers(match, self._narrowmatch)
1511 1511 return self._narrowmatch
1512 1512
1513 1513 def setnarrowpats(self, newincludes, newexcludes):
1514 1514 narrowspec.save(self, newincludes, newexcludes)
1515 1515 self.invalidate(clearfilecache=True)
1516 1516
1517 1517 def __getitem__(self, changeid):
1518 1518 # dealing with special cases
1519 1519 if changeid is None:
1520 1520 return context.workingctx(self)
1521 1521 if isinstance(changeid, context.basectx):
1522 1522 return changeid
1523 1523
1524 1524 # dealing with multiple revisions
1525 1525 if isinstance(changeid, slice):
1526 1526 # wdirrev isn't contiguous so the slice shouldn't include it
1527 1527 return [
1528 1528 self[i]
1529 1529 for i in pycompat.xrange(*changeid.indices(len(self)))
1530 1530 if i not in self.changelog.filteredrevs
1531 1531 ]
1532 1532
1533 1533 # dealing with some special values
1534 1534 if changeid == b'null' or changeid == nullrev:
1535 return context.changectx(self, nullrev, nullid)
1535 return context.changectx(self, nullrev, nullid, maybe_filtered=False)
1536 1536 if changeid == b'tip':
1537 1537 node = self.changelog.tip()
1538 1538 rev = self.changelog.rev(node)
1539 1539 return context.changectx(self, rev, node)
1540 1540
1541 1541 # dealing with arbitrary values
1542 1542 try:
1543 1543 if isinstance(changeid, int):
1544 1544 node = self.changelog.node(changeid)
1545 1545 rev = changeid
1546 1546 elif changeid == b'.':
1547 1547 # this is a hack to delay/avoid loading obsmarkers
1548 1548 # when we know that '.' won't be hidden
1549 1549 node = self.dirstate.p1()
1550 1550 rev = self.unfiltered().changelog.rev(node)
1551 1551 elif len(changeid) == 20:
1552 1552 try:
1553 1553 node = changeid
1554 1554 rev = self.changelog.rev(changeid)
1555 1555 except error.FilteredLookupError:
1556 1556 changeid = hex(changeid) # for the error message
1557 1557 raise
1558 1558 except LookupError:
1559 1559 # check if it might have come from damaged dirstate
1560 1560 #
1561 1561 # XXX we could avoid the unfiltered if we had a recognizable
1562 1562 # exception for filtered changeset access
1563 1563 if (
1564 1564 self.local()
1565 1565 and changeid in self.unfiltered().dirstate.parents()
1566 1566 ):
1567 1567 msg = _(b"working directory has unknown parent '%s'!")
1568 1568 raise error.Abort(msg % short(changeid))
1569 1569 changeid = hex(changeid) # for the error message
1570 1570 raise
1571 1571
1572 1572 elif len(changeid) == 40:
1573 1573 node = bin(changeid)
1574 1574 rev = self.changelog.rev(node)
1575 1575 else:
1576 1576 raise error.ProgrammingError(
1577 1577 b"unsupported changeid '%s' of type %s"
1578 1578 % (changeid, pycompat.bytestr(type(changeid)))
1579 1579 )
1580 1580
1581 1581 return context.changectx(self, rev, node)
1582 1582
1583 1583 except (error.FilteredIndexError, error.FilteredLookupError):
1584 1584 raise error.FilteredRepoLookupError(
1585 1585 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1586 1586 )
1587 1587 except (IndexError, LookupError):
1588 1588 raise error.RepoLookupError(
1589 1589 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1590 1590 )
1591 1591 except error.WdirUnsupported:
1592 1592 return context.workingctx(self)
1593 1593
1594 1594 def __contains__(self, changeid):
1595 1595 """True if the given changeid exists
1596 1596
1597 1597 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1598 1598 specified.
1599 1599 """
1600 1600 try:
1601 1601 self[changeid]
1602 1602 return True
1603 1603 except error.RepoLookupError:
1604 1604 return False
1605 1605
1606 1606 def __nonzero__(self):
1607 1607 return True
1608 1608
1609 1609 __bool__ = __nonzero__
1610 1610
1611 1611 def __len__(self):
1612 1612 # no need to pay the cost of repoview.changelog
1613 1613 unfi = self.unfiltered()
1614 1614 return len(unfi.changelog)
1615 1615
1616 1616 def __iter__(self):
1617 1617 return iter(self.changelog)
1618 1618
1619 1619 def revs(self, expr, *args):
1620 1620 '''Find revisions matching a revset.
1621 1621
1622 1622 The revset is specified as a string ``expr`` that may contain
1623 1623 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1624 1624
1625 1625 Revset aliases from the configuration are not expanded. To expand
1626 1626 user aliases, consider calling ``scmutil.revrange()`` or
1627 1627 ``repo.anyrevs([expr], user=True)``.
1628 1628
1629 1629 Returns a revset.abstractsmartset, which is a list-like interface
1630 1630 that contains integer revisions.
1631 1631 '''
1632 1632 tree = revsetlang.spectree(expr, *args)
1633 1633 return revset.makematcher(tree)(self)
1634 1634
1635 1635 def set(self, expr, *args):
1636 1636 '''Find revisions matching a revset and emit changectx instances.
1637 1637
1638 1638 This is a convenience wrapper around ``revs()`` that iterates the
1639 1639 result and is a generator of changectx instances.
1640 1640
1641 1641 Revset aliases from the configuration are not expanded. To expand
1642 1642 user aliases, consider calling ``scmutil.revrange()``.
1643 1643 '''
1644 1644 for r in self.revs(expr, *args):
1645 1645 yield self[r]
1646 1646
1647 1647 def anyrevs(self, specs, user=False, localalias=None):
1648 1648 '''Find revisions matching one of the given revsets.
1649 1649
1650 1650 Revset aliases from the configuration are not expanded by default. To
1651 1651 expand user aliases, specify ``user=True``. To provide some local
1652 1652 definitions overriding user aliases, set ``localalias`` to
1653 1653 ``{name: definitionstring}``.
1654 1654 '''
1655 1655 if specs == [b'null']:
1656 1656 return revset.baseset([nullrev])
1657 1657 if user:
1658 1658 m = revset.matchany(
1659 1659 self.ui,
1660 1660 specs,
1661 1661 lookup=revset.lookupfn(self),
1662 1662 localalias=localalias,
1663 1663 )
1664 1664 else:
1665 1665 m = revset.matchany(None, specs, localalias=localalias)
1666 1666 return m(self)
1667 1667
1668 1668 def url(self):
1669 1669 return b'file:' + self.root
1670 1670
1671 1671 def hook(self, name, throw=False, **args):
1672 1672 """Call a hook, passing this repo instance.
1673 1673
1674 1674 This a convenience method to aid invoking hooks. Extensions likely
1675 1675 won't call this unless they have registered a custom hook or are
1676 1676 replacing code that is expected to call a hook.
1677 1677 """
1678 1678 return hook.hook(self.ui, self, name, throw, **args)
1679 1679
1680 1680 @filteredpropertycache
1681 1681 def _tagscache(self):
1682 1682 '''Returns a tagscache object that contains various tags related
1683 1683 caches.'''
1684 1684
1685 1685 # This simplifies its cache management by having one decorated
1686 1686 # function (this one) and the rest simply fetch things from it.
1687 1687 class tagscache(object):
1688 1688 def __init__(self):
1689 1689 # These two define the set of tags for this repository. tags
1690 1690 # maps tag name to node; tagtypes maps tag name to 'global' or
1691 1691 # 'local'. (Global tags are defined by .hgtags across all
1692 1692 # heads, and local tags are defined in .hg/localtags.)
1693 1693 # They constitute the in-memory cache of tags.
1694 1694 self.tags = self.tagtypes = None
1695 1695
1696 1696 self.nodetagscache = self.tagslist = None
1697 1697
1698 1698 cache = tagscache()
1699 1699 cache.tags, cache.tagtypes = self._findtags()
1700 1700
1701 1701 return cache
1702 1702
1703 1703 def tags(self):
1704 1704 '''return a mapping of tag to node'''
1705 1705 t = {}
1706 1706 if self.changelog.filteredrevs:
1707 1707 tags, tt = self._findtags()
1708 1708 else:
1709 1709 tags = self._tagscache.tags
1710 1710 rev = self.changelog.rev
1711 1711 for k, v in pycompat.iteritems(tags):
1712 1712 try:
1713 1713 # ignore tags to unknown nodes
1714 1714 rev(v)
1715 1715 t[k] = v
1716 1716 except (error.LookupError, ValueError):
1717 1717 pass
1718 1718 return t
1719 1719
1720 1720 def _findtags(self):
1721 1721 '''Do the hard work of finding tags. Return a pair of dicts
1722 1722 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1723 1723 maps tag name to a string like \'global\' or \'local\'.
1724 1724 Subclasses or extensions are free to add their own tags, but
1725 1725 should be aware that the returned dicts will be retained for the
1726 1726 duration of the localrepo object.'''
1727 1727
1728 1728 # XXX what tagtype should subclasses/extensions use? Currently
1729 1729 # mq and bookmarks add tags, but do not set the tagtype at all.
1730 1730 # Should each extension invent its own tag type? Should there
1731 1731 # be one tagtype for all such "virtual" tags? Or is the status
1732 1732 # quo fine?
1733 1733
1734 1734 # map tag name to (node, hist)
1735 1735 alltags = tagsmod.findglobaltags(self.ui, self)
1736 1736 # map tag name to tag type
1737 1737 tagtypes = dict((tag, b'global') for tag in alltags)
1738 1738
1739 1739 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1740 1740
1741 1741 # Build the return dicts. Have to re-encode tag names because
1742 1742 # the tags module always uses UTF-8 (in order not to lose info
1743 1743 # writing to the cache), but the rest of Mercurial wants them in
1744 1744 # local encoding.
1745 1745 tags = {}
1746 1746 for (name, (node, hist)) in pycompat.iteritems(alltags):
1747 1747 if node != nullid:
1748 1748 tags[encoding.tolocal(name)] = node
1749 1749 tags[b'tip'] = self.changelog.tip()
1750 1750 tagtypes = dict(
1751 1751 [
1752 1752 (encoding.tolocal(name), value)
1753 1753 for (name, value) in pycompat.iteritems(tagtypes)
1754 1754 ]
1755 1755 )
1756 1756 return (tags, tagtypes)
1757 1757
1758 1758 def tagtype(self, tagname):
1759 1759 '''
1760 1760 return the type of the given tag. result can be:
1761 1761
1762 1762 'local' : a local tag
1763 1763 'global' : a global tag
1764 1764 None : tag does not exist
1765 1765 '''
1766 1766
1767 1767 return self._tagscache.tagtypes.get(tagname)
1768 1768
1769 1769 def tagslist(self):
1770 1770 '''return a list of tags ordered by revision'''
1771 1771 if not self._tagscache.tagslist:
1772 1772 l = []
1773 1773 for t, n in pycompat.iteritems(self.tags()):
1774 1774 l.append((self.changelog.rev(n), t, n))
1775 1775 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1776 1776
1777 1777 return self._tagscache.tagslist
1778 1778
1779 1779 def nodetags(self, node):
1780 1780 '''return the tags associated with a node'''
1781 1781 if not self._tagscache.nodetagscache:
1782 1782 nodetagscache = {}
1783 1783 for t, n in pycompat.iteritems(self._tagscache.tags):
1784 1784 nodetagscache.setdefault(n, []).append(t)
1785 1785 for tags in pycompat.itervalues(nodetagscache):
1786 1786 tags.sort()
1787 1787 self._tagscache.nodetagscache = nodetagscache
1788 1788 return self._tagscache.nodetagscache.get(node, [])
1789 1789
1790 1790 def nodebookmarks(self, node):
1791 1791 """return the list of bookmarks pointing to the specified node"""
1792 1792 return self._bookmarks.names(node)
1793 1793
1794 1794 def branchmap(self):
1795 1795 '''returns a dictionary {branch: [branchheads]} with branchheads
1796 1796 ordered by increasing revision number'''
1797 1797 return self._branchcaches[self]
1798 1798
1799 1799 @unfilteredmethod
1800 1800 def revbranchcache(self):
1801 1801 if not self._revbranchcache:
1802 1802 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1803 1803 return self._revbranchcache
1804 1804
1805 1805 def branchtip(self, branch, ignoremissing=False):
1806 1806 '''return the tip node for a given branch
1807 1807
1808 1808 If ignoremissing is True, then this method will not raise an error.
1809 1809 This is helpful for callers that only expect None for a missing branch
1810 1810 (e.g. namespace).
1811 1811
1812 1812 '''
1813 1813 try:
1814 1814 return self.branchmap().branchtip(branch)
1815 1815 except KeyError:
1816 1816 if not ignoremissing:
1817 1817 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1818 1818 else:
1819 1819 pass
1820 1820
1821 1821 def lookup(self, key):
1822 1822 node = scmutil.revsymbol(self, key).node()
1823 1823 if node is None:
1824 1824 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1825 1825 return node
1826 1826
1827 1827 def lookupbranch(self, key):
1828 1828 if self.branchmap().hasbranch(key):
1829 1829 return key
1830 1830
1831 1831 return scmutil.revsymbol(self, key).branch()
1832 1832
1833 1833 def known(self, nodes):
1834 1834 cl = self.changelog
1835 1835 get_rev = cl.index.get_rev
1836 1836 filtered = cl.filteredrevs
1837 1837 result = []
1838 1838 for n in nodes:
1839 1839 r = get_rev(n)
1840 1840 resp = not (r is None or r in filtered)
1841 1841 result.append(resp)
1842 1842 return result
1843 1843
1844 1844 def local(self):
1845 1845 return self
1846 1846
1847 1847 def publishing(self):
1848 1848 # it's safe (and desirable) to trust the publish flag unconditionally
1849 1849 # so that we don't finalize changes shared between users via ssh or nfs
1850 1850 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1851 1851
1852 1852 def cancopy(self):
1853 1853 # so statichttprepo's override of local() works
1854 1854 if not self.local():
1855 1855 return False
1856 1856 if not self.publishing():
1857 1857 return True
1858 1858 # if publishing we can't copy if there is filtered content
1859 1859 return not self.filtered(b'visible').changelog.filteredrevs
1860 1860
1861 1861 def shared(self):
1862 1862 '''the type of shared repository (None if not shared)'''
1863 1863 if self.sharedpath != self.path:
1864 1864 return b'store'
1865 1865 return None
1866 1866
1867 1867 def wjoin(self, f, *insidef):
1868 1868 return self.vfs.reljoin(self.root, f, *insidef)
1869 1869
1870 1870 def setparents(self, p1, p2=nullid):
1871 1871 with self.dirstate.parentchange():
1872 1872 copies = self.dirstate.setparents(p1, p2)
1873 1873 pctx = self[p1]
1874 1874 if copies:
1875 1875 # Adjust copy records, the dirstate cannot do it, it
1876 1876 # requires access to parents manifests. Preserve them
1877 1877 # only for entries added to first parent.
1878 1878 for f in copies:
1879 1879 if f not in pctx and copies[f] in pctx:
1880 1880 self.dirstate.copy(copies[f], f)
1881 1881 if p2 == nullid:
1882 1882 for f, s in sorted(self.dirstate.copies().items()):
1883 1883 if f not in pctx and s not in pctx:
1884 1884 self.dirstate.copy(None, f)
1885 1885
1886 1886 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1887 1887 """changeid must be a changeset revision, if specified.
1888 1888 fileid can be a file revision or node."""
1889 1889 return context.filectx(
1890 1890 self, path, changeid, fileid, changectx=changectx
1891 1891 )
1892 1892
1893 1893 def getcwd(self):
1894 1894 return self.dirstate.getcwd()
1895 1895
1896 1896 def pathto(self, f, cwd=None):
1897 1897 return self.dirstate.pathto(f, cwd)
1898 1898
1899 1899 def _loadfilter(self, filter):
1900 1900 if filter not in self._filterpats:
1901 1901 l = []
1902 1902 for pat, cmd in self.ui.configitems(filter):
1903 1903 if cmd == b'!':
1904 1904 continue
1905 1905 mf = matchmod.match(self.root, b'', [pat])
1906 1906 fn = None
1907 1907 params = cmd
1908 1908 for name, filterfn in pycompat.iteritems(self._datafilters):
1909 1909 if cmd.startswith(name):
1910 1910 fn = filterfn
1911 1911 params = cmd[len(name) :].lstrip()
1912 1912 break
1913 1913 if not fn:
1914 1914 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1915 1915 fn.__name__ = 'commandfilter'
1916 1916 # Wrap old filters not supporting keyword arguments
1917 1917 if not pycompat.getargspec(fn)[2]:
1918 1918 oldfn = fn
1919 1919 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1920 1920 fn.__name__ = 'compat-' + oldfn.__name__
1921 1921 l.append((mf, fn, params))
1922 1922 self._filterpats[filter] = l
1923 1923 return self._filterpats[filter]
1924 1924
1925 1925 def _filter(self, filterpats, filename, data):
1926 1926 for mf, fn, cmd in filterpats:
1927 1927 if mf(filename):
1928 1928 self.ui.debug(
1929 1929 b"filtering %s through %s\n"
1930 1930 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1931 1931 )
1932 1932 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1933 1933 break
1934 1934
1935 1935 return data
1936 1936
1937 1937 @unfilteredpropertycache
1938 1938 def _encodefilterpats(self):
1939 1939 return self._loadfilter(b'encode')
1940 1940
1941 1941 @unfilteredpropertycache
1942 1942 def _decodefilterpats(self):
1943 1943 return self._loadfilter(b'decode')
1944 1944
1945 1945 def adddatafilter(self, name, filter):
1946 1946 self._datafilters[name] = filter
1947 1947
1948 1948 def wread(self, filename):
1949 1949 if self.wvfs.islink(filename):
1950 1950 data = self.wvfs.readlink(filename)
1951 1951 else:
1952 1952 data = self.wvfs.read(filename)
1953 1953 return self._filter(self._encodefilterpats, filename, data)
1954 1954
1955 1955 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1956 1956 """write ``data`` into ``filename`` in the working directory
1957 1957
1958 1958 This returns length of written (maybe decoded) data.
1959 1959 """
1960 1960 data = self._filter(self._decodefilterpats, filename, data)
1961 1961 if b'l' in flags:
1962 1962 self.wvfs.symlink(data, filename)
1963 1963 else:
1964 1964 self.wvfs.write(
1965 1965 filename, data, backgroundclose=backgroundclose, **kwargs
1966 1966 )
1967 1967 if b'x' in flags:
1968 1968 self.wvfs.setflags(filename, False, True)
1969 1969 else:
1970 1970 self.wvfs.setflags(filename, False, False)
1971 1971 return len(data)
1972 1972
1973 1973 def wwritedata(self, filename, data):
1974 1974 return self._filter(self._decodefilterpats, filename, data)
1975 1975
1976 1976 def currenttransaction(self):
1977 1977 """return the current transaction or None if non exists"""
1978 1978 if self._transref:
1979 1979 tr = self._transref()
1980 1980 else:
1981 1981 tr = None
1982 1982
1983 1983 if tr and tr.running():
1984 1984 return tr
1985 1985 return None
1986 1986
1987 1987 def transaction(self, desc, report=None):
1988 1988 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1989 1989 b'devel', b'check-locks'
1990 1990 ):
1991 1991 if self._currentlock(self._lockref) is None:
1992 1992 raise error.ProgrammingError(b'transaction requires locking')
1993 1993 tr = self.currenttransaction()
1994 1994 if tr is not None:
1995 1995 return tr.nest(name=desc)
1996 1996
1997 1997 # abort here if the journal already exists
1998 1998 if self.svfs.exists(b"journal"):
1999 1999 raise error.RepoError(
2000 2000 _(b"abandoned transaction found"),
2001 2001 hint=_(b"run 'hg recover' to clean up transaction"),
2002 2002 )
2003 2003
2004 2004 idbase = b"%.40f#%f" % (random.random(), time.time())
2005 2005 ha = hex(hashlib.sha1(idbase).digest())
2006 2006 txnid = b'TXN:' + ha
2007 2007 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2008 2008
2009 2009 self._writejournal(desc)
2010 2010 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2011 2011 if report:
2012 2012 rp = report
2013 2013 else:
2014 2014 rp = self.ui.warn
2015 2015 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2016 2016 # we must avoid cyclic reference between repo and transaction.
2017 2017 reporef = weakref.ref(self)
2018 2018 # Code to track tag movement
2019 2019 #
2020 2020 # Since tags are all handled as file content, it is actually quite hard
2021 2021 # to track these movement from a code perspective. So we fallback to a
2022 2022 # tracking at the repository level. One could envision to track changes
2023 2023 # to the '.hgtags' file through changegroup apply but that fails to
2024 2024 # cope with case where transaction expose new heads without changegroup
2025 2025 # being involved (eg: phase movement).
2026 2026 #
2027 2027 # For now, We gate the feature behind a flag since this likely comes
2028 2028 # with performance impacts. The current code run more often than needed
2029 2029 # and do not use caches as much as it could. The current focus is on
2030 2030 # the behavior of the feature so we disable it by default. The flag
2031 2031 # will be removed when we are happy with the performance impact.
2032 2032 #
2033 2033 # Once this feature is no longer experimental move the following
2034 2034 # documentation to the appropriate help section:
2035 2035 #
2036 2036 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2037 2037 # tags (new or changed or deleted tags). In addition the details of
2038 2038 # these changes are made available in a file at:
2039 2039 # ``REPOROOT/.hg/changes/tags.changes``.
2040 2040 # Make sure you check for HG_TAG_MOVED before reading that file as it
2041 2041 # might exist from a previous transaction even if no tag were touched
2042 2042 # in this one. Changes are recorded in a line base format::
2043 2043 #
2044 2044 # <action> <hex-node> <tag-name>\n
2045 2045 #
2046 2046 # Actions are defined as follow:
2047 2047 # "-R": tag is removed,
2048 2048 # "+A": tag is added,
2049 2049 # "-M": tag is moved (old value),
2050 2050 # "+M": tag is moved (new value),
2051 2051 tracktags = lambda x: None
2052 2052 # experimental config: experimental.hook-track-tags
2053 2053 shouldtracktags = self.ui.configbool(
2054 2054 b'experimental', b'hook-track-tags'
2055 2055 )
2056 2056 if desc != b'strip' and shouldtracktags:
2057 2057 oldheads = self.changelog.headrevs()
2058 2058
2059 2059 def tracktags(tr2):
2060 2060 repo = reporef()
2061 2061 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2062 2062 newheads = repo.changelog.headrevs()
2063 2063 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2064 2064 # notes: we compare lists here.
2065 2065 # As we do it only once buiding set would not be cheaper
2066 2066 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2067 2067 if changes:
2068 2068 tr2.hookargs[b'tag_moved'] = b'1'
2069 2069 with repo.vfs(
2070 2070 b'changes/tags.changes', b'w', atomictemp=True
2071 2071 ) as changesfile:
2072 2072 # note: we do not register the file to the transaction
2073 2073 # because we needs it to still exist on the transaction
2074 2074 # is close (for txnclose hooks)
2075 2075 tagsmod.writediff(changesfile, changes)
2076 2076
2077 2077 def validate(tr2):
2078 2078 """will run pre-closing hooks"""
2079 2079 # XXX the transaction API is a bit lacking here so we take a hacky
2080 2080 # path for now
2081 2081 #
2082 2082 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2083 2083 # dict is copied before these run. In addition we needs the data
2084 2084 # available to in memory hooks too.
2085 2085 #
2086 2086 # Moreover, we also need to make sure this runs before txnclose
2087 2087 # hooks and there is no "pending" mechanism that would execute
2088 2088 # logic only if hooks are about to run.
2089 2089 #
2090 2090 # Fixing this limitation of the transaction is also needed to track
2091 2091 # other families of changes (bookmarks, phases, obsolescence).
2092 2092 #
2093 2093 # This will have to be fixed before we remove the experimental
2094 2094 # gating.
2095 2095 tracktags(tr2)
2096 2096 repo = reporef()
2097 2097
2098 2098 singleheadopt = (b'experimental', b'single-head-per-branch')
2099 2099 singlehead = repo.ui.configbool(*singleheadopt)
2100 2100 if singlehead:
2101 2101 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2102 2102 accountclosed = singleheadsub.get(
2103 2103 b"account-closed-heads", False
2104 2104 )
2105 2105 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2106 2106 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2107 2107 for name, (old, new) in sorted(
2108 2108 tr.changes[b'bookmarks'].items()
2109 2109 ):
2110 2110 args = tr.hookargs.copy()
2111 2111 args.update(bookmarks.preparehookargs(name, old, new))
2112 2112 repo.hook(
2113 2113 b'pretxnclose-bookmark',
2114 2114 throw=True,
2115 2115 **pycompat.strkwargs(args)
2116 2116 )
2117 2117 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2118 2118 cl = repo.unfiltered().changelog
2119 2119 for rev, (old, new) in tr.changes[b'phases'].items():
2120 2120 args = tr.hookargs.copy()
2121 2121 node = hex(cl.node(rev))
2122 2122 args.update(phases.preparehookargs(node, old, new))
2123 2123 repo.hook(
2124 2124 b'pretxnclose-phase',
2125 2125 throw=True,
2126 2126 **pycompat.strkwargs(args)
2127 2127 )
2128 2128
2129 2129 repo.hook(
2130 2130 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2131 2131 )
2132 2132
2133 2133 def releasefn(tr, success):
2134 2134 repo = reporef()
2135 2135 if repo is None:
2136 2136 # If the repo has been GC'd (and this release function is being
2137 2137 # called from transaction.__del__), there's not much we can do,
2138 2138 # so just leave the unfinished transaction there and let the
2139 2139 # user run `hg recover`.
2140 2140 return
2141 2141 if success:
2142 2142 # this should be explicitly invoked here, because
2143 2143 # in-memory changes aren't written out at closing
2144 2144 # transaction, if tr.addfilegenerator (via
2145 2145 # dirstate.write or so) isn't invoked while
2146 2146 # transaction running
2147 2147 repo.dirstate.write(None)
2148 2148 else:
2149 2149 # discard all changes (including ones already written
2150 2150 # out) in this transaction
2151 2151 narrowspec.restorebackup(self, b'journal.narrowspec')
2152 2152 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2153 2153 repo.dirstate.restorebackup(None, b'journal.dirstate')
2154 2154
2155 2155 repo.invalidate(clearfilecache=True)
2156 2156
2157 2157 tr = transaction.transaction(
2158 2158 rp,
2159 2159 self.svfs,
2160 2160 vfsmap,
2161 2161 b"journal",
2162 2162 b"undo",
2163 2163 aftertrans(renames),
2164 2164 self.store.createmode,
2165 2165 validator=validate,
2166 2166 releasefn=releasefn,
2167 2167 checkambigfiles=_cachedfiles,
2168 2168 name=desc,
2169 2169 )
2170 2170 tr.changes[b'origrepolen'] = len(self)
2171 2171 tr.changes[b'obsmarkers'] = set()
2172 2172 tr.changes[b'phases'] = {}
2173 2173 tr.changes[b'bookmarks'] = {}
2174 2174
2175 2175 tr.hookargs[b'txnid'] = txnid
2176 2176 tr.hookargs[b'txnname'] = desc
2177 2177 # note: writing the fncache only during finalize mean that the file is
2178 2178 # outdated when running hooks. As fncache is used for streaming clone,
2179 2179 # this is not expected to break anything that happen during the hooks.
2180 2180 tr.addfinalize(b'flush-fncache', self.store.write)
2181 2181
2182 2182 def txnclosehook(tr2):
2183 2183 """To be run if transaction is successful, will schedule a hook run
2184 2184 """
2185 2185 # Don't reference tr2 in hook() so we don't hold a reference.
2186 2186 # This reduces memory consumption when there are multiple
2187 2187 # transactions per lock. This can likely go away if issue5045
2188 2188 # fixes the function accumulation.
2189 2189 hookargs = tr2.hookargs
2190 2190
2191 2191 def hookfunc():
2192 2192 repo = reporef()
2193 2193 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2194 2194 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2195 2195 for name, (old, new) in bmchanges:
2196 2196 args = tr.hookargs.copy()
2197 2197 args.update(bookmarks.preparehookargs(name, old, new))
2198 2198 repo.hook(
2199 2199 b'txnclose-bookmark',
2200 2200 throw=False,
2201 2201 **pycompat.strkwargs(args)
2202 2202 )
2203 2203
2204 2204 if hook.hashook(repo.ui, b'txnclose-phase'):
2205 2205 cl = repo.unfiltered().changelog
2206 2206 phasemv = sorted(tr.changes[b'phases'].items())
2207 2207 for rev, (old, new) in phasemv:
2208 2208 args = tr.hookargs.copy()
2209 2209 node = hex(cl.node(rev))
2210 2210 args.update(phases.preparehookargs(node, old, new))
2211 2211 repo.hook(
2212 2212 b'txnclose-phase',
2213 2213 throw=False,
2214 2214 **pycompat.strkwargs(args)
2215 2215 )
2216 2216
2217 2217 repo.hook(
2218 2218 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2219 2219 )
2220 2220
2221 2221 reporef()._afterlock(hookfunc)
2222 2222
2223 2223 tr.addfinalize(b'txnclose-hook', txnclosehook)
2224 2224 # Include a leading "-" to make it happen before the transaction summary
2225 2225 # reports registered via scmutil.registersummarycallback() whose names
2226 2226 # are 00-txnreport etc. That way, the caches will be warm when the
2227 2227 # callbacks run.
2228 2228 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2229 2229
2230 2230 def txnaborthook(tr2):
2231 2231 """To be run if transaction is aborted
2232 2232 """
2233 2233 reporef().hook(
2234 2234 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2235 2235 )
2236 2236
2237 2237 tr.addabort(b'txnabort-hook', txnaborthook)
2238 2238 # avoid eager cache invalidation. in-memory data should be identical
2239 2239 # to stored data if transaction has no error.
2240 2240 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2241 2241 self._transref = weakref.ref(tr)
2242 2242 scmutil.registersummarycallback(self, tr, desc)
2243 2243 return tr
2244 2244
2245 2245 def _journalfiles(self):
2246 2246 return (
2247 2247 (self.svfs, b'journal'),
2248 2248 (self.svfs, b'journal.narrowspec'),
2249 2249 (self.vfs, b'journal.narrowspec.dirstate'),
2250 2250 (self.vfs, b'journal.dirstate'),
2251 2251 (self.vfs, b'journal.branch'),
2252 2252 (self.vfs, b'journal.desc'),
2253 2253 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2254 2254 (self.svfs, b'journal.phaseroots'),
2255 2255 )
2256 2256
2257 2257 def undofiles(self):
2258 2258 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2259 2259
2260 2260 @unfilteredmethod
2261 2261 def _writejournal(self, desc):
2262 2262 self.dirstate.savebackup(None, b'journal.dirstate')
2263 2263 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2264 2264 narrowspec.savebackup(self, b'journal.narrowspec')
2265 2265 self.vfs.write(
2266 2266 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2267 2267 )
2268 2268 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2269 2269 bookmarksvfs = bookmarks.bookmarksvfs(self)
2270 2270 bookmarksvfs.write(
2271 2271 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2272 2272 )
2273 2273 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2274 2274
2275 2275 def recover(self):
2276 2276 with self.lock():
2277 2277 if self.svfs.exists(b"journal"):
2278 2278 self.ui.status(_(b"rolling back interrupted transaction\n"))
2279 2279 vfsmap = {
2280 2280 b'': self.svfs,
2281 2281 b'plain': self.vfs,
2282 2282 }
2283 2283 transaction.rollback(
2284 2284 self.svfs,
2285 2285 vfsmap,
2286 2286 b"journal",
2287 2287 self.ui.warn,
2288 2288 checkambigfiles=_cachedfiles,
2289 2289 )
2290 2290 self.invalidate()
2291 2291 return True
2292 2292 else:
2293 2293 self.ui.warn(_(b"no interrupted transaction available\n"))
2294 2294 return False
2295 2295
2296 2296 def rollback(self, dryrun=False, force=False):
2297 2297 wlock = lock = dsguard = None
2298 2298 try:
2299 2299 wlock = self.wlock()
2300 2300 lock = self.lock()
2301 2301 if self.svfs.exists(b"undo"):
2302 2302 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2303 2303
2304 2304 return self._rollback(dryrun, force, dsguard)
2305 2305 else:
2306 2306 self.ui.warn(_(b"no rollback information available\n"))
2307 2307 return 1
2308 2308 finally:
2309 2309 release(dsguard, lock, wlock)
2310 2310
2311 2311 @unfilteredmethod # Until we get smarter cache management
2312 2312 def _rollback(self, dryrun, force, dsguard):
2313 2313 ui = self.ui
2314 2314 try:
2315 2315 args = self.vfs.read(b'undo.desc').splitlines()
2316 2316 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2317 2317 if len(args) >= 3:
2318 2318 detail = args[2]
2319 2319 oldtip = oldlen - 1
2320 2320
2321 2321 if detail and ui.verbose:
2322 2322 msg = _(
2323 2323 b'repository tip rolled back to revision %d'
2324 2324 b' (undo %s: %s)\n'
2325 2325 ) % (oldtip, desc, detail)
2326 2326 else:
2327 2327 msg = _(
2328 2328 b'repository tip rolled back to revision %d (undo %s)\n'
2329 2329 ) % (oldtip, desc)
2330 2330 except IOError:
2331 2331 msg = _(b'rolling back unknown transaction\n')
2332 2332 desc = None
2333 2333
2334 2334 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2335 2335 raise error.Abort(
2336 2336 _(
2337 2337 b'rollback of last commit while not checked out '
2338 2338 b'may lose data'
2339 2339 ),
2340 2340 hint=_(b'use -f to force'),
2341 2341 )
2342 2342
2343 2343 ui.status(msg)
2344 2344 if dryrun:
2345 2345 return 0
2346 2346
2347 2347 parents = self.dirstate.parents()
2348 2348 self.destroying()
2349 2349 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2350 2350 transaction.rollback(
2351 2351 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2352 2352 )
2353 2353 bookmarksvfs = bookmarks.bookmarksvfs(self)
2354 2354 if bookmarksvfs.exists(b'undo.bookmarks'):
2355 2355 bookmarksvfs.rename(
2356 2356 b'undo.bookmarks', b'bookmarks', checkambig=True
2357 2357 )
2358 2358 if self.svfs.exists(b'undo.phaseroots'):
2359 2359 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2360 2360 self.invalidate()
2361 2361
2362 2362 has_node = self.changelog.index.has_node
2363 2363 parentgone = any(not has_node(p) for p in parents)
2364 2364 if parentgone:
2365 2365 # prevent dirstateguard from overwriting already restored one
2366 2366 dsguard.close()
2367 2367
2368 2368 narrowspec.restorebackup(self, b'undo.narrowspec')
2369 2369 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2370 2370 self.dirstate.restorebackup(None, b'undo.dirstate')
2371 2371 try:
2372 2372 branch = self.vfs.read(b'undo.branch')
2373 2373 self.dirstate.setbranch(encoding.tolocal(branch))
2374 2374 except IOError:
2375 2375 ui.warn(
2376 2376 _(
2377 2377 b'named branch could not be reset: '
2378 2378 b'current branch is still \'%s\'\n'
2379 2379 )
2380 2380 % self.dirstate.branch()
2381 2381 )
2382 2382
2383 2383 parents = tuple([p.rev() for p in self[None].parents()])
2384 2384 if len(parents) > 1:
2385 2385 ui.status(
2386 2386 _(
2387 2387 b'working directory now based on '
2388 2388 b'revisions %d and %d\n'
2389 2389 )
2390 2390 % parents
2391 2391 )
2392 2392 else:
2393 2393 ui.status(
2394 2394 _(b'working directory now based on revision %d\n') % parents
2395 2395 )
2396 2396 mergemod.mergestate.clean(self, self[b'.'].node())
2397 2397
2398 2398 # TODO: if we know which new heads may result from this rollback, pass
2399 2399 # them to destroy(), which will prevent the branchhead cache from being
2400 2400 # invalidated.
2401 2401 self.destroyed()
2402 2402 return 0
2403 2403
2404 2404 def _buildcacheupdater(self, newtransaction):
2405 2405 """called during transaction to build the callback updating cache
2406 2406
2407 2407 Lives on the repository to help extension who might want to augment
2408 2408 this logic. For this purpose, the created transaction is passed to the
2409 2409 method.
2410 2410 """
2411 2411 # we must avoid cyclic reference between repo and transaction.
2412 2412 reporef = weakref.ref(self)
2413 2413
2414 2414 def updater(tr):
2415 2415 repo = reporef()
2416 2416 repo.updatecaches(tr)
2417 2417
2418 2418 return updater
2419 2419
2420 2420 @unfilteredmethod
2421 2421 def updatecaches(self, tr=None, full=False):
2422 2422 """warm appropriate caches
2423 2423
2424 2424 If this function is called after a transaction closed. The transaction
2425 2425 will be available in the 'tr' argument. This can be used to selectively
2426 2426 update caches relevant to the changes in that transaction.
2427 2427
2428 2428 If 'full' is set, make sure all caches the function knows about have
2429 2429 up-to-date data. Even the ones usually loaded more lazily.
2430 2430 """
2431 2431 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2432 2432 # During strip, many caches are invalid but
2433 2433 # later call to `destroyed` will refresh them.
2434 2434 return
2435 2435
2436 2436 if tr is None or tr.changes[b'origrepolen'] < len(self):
2437 2437 # accessing the 'ser ved' branchmap should refresh all the others,
2438 2438 self.ui.debug(b'updating the branch cache\n')
2439 2439 self.filtered(b'served').branchmap()
2440 2440 self.filtered(b'served.hidden').branchmap()
2441 2441
2442 2442 if full:
2443 2443 unfi = self.unfiltered()
2444 2444 rbc = unfi.revbranchcache()
2445 2445 for r in unfi.changelog:
2446 2446 rbc.branchinfo(r)
2447 2447 rbc.write()
2448 2448
2449 2449 # ensure the working copy parents are in the manifestfulltextcache
2450 2450 for ctx in self[b'.'].parents():
2451 2451 ctx.manifest() # accessing the manifest is enough
2452 2452
2453 2453 # accessing fnode cache warms the cache
2454 2454 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2455 2455 # accessing tags warm the cache
2456 2456 self.tags()
2457 2457 self.filtered(b'served').tags()
2458 2458
2459 2459 # The `full` arg is documented as updating even the lazily-loaded
2460 2460 # caches immediately, so we're forcing a write to cause these caches
2461 2461 # to be warmed up even if they haven't explicitly been requested
2462 2462 # yet (if they've never been used by hg, they won't ever have been
2463 2463 # written, even if they're a subset of another kind of cache that
2464 2464 # *has* been used).
2465 2465 for filt in repoview.filtertable.keys():
2466 2466 filtered = self.filtered(filt)
2467 2467 filtered.branchmap().write(filtered)
2468 2468
2469 2469 def invalidatecaches(self):
2470 2470
2471 2471 if '_tagscache' in vars(self):
2472 2472 # can't use delattr on proxy
2473 2473 del self.__dict__['_tagscache']
2474 2474
2475 2475 self._branchcaches.clear()
2476 2476 self.invalidatevolatilesets()
2477 2477 self._sparsesignaturecache.clear()
2478 2478
2479 2479 def invalidatevolatilesets(self):
2480 2480 self.filteredrevcache.clear()
2481 2481 obsolete.clearobscaches(self)
2482 2482
2483 2483 def invalidatedirstate(self):
2484 2484 '''Invalidates the dirstate, causing the next call to dirstate
2485 2485 to check if it was modified since the last time it was read,
2486 2486 rereading it if it has.
2487 2487
2488 2488 This is different to dirstate.invalidate() that it doesn't always
2489 2489 rereads the dirstate. Use dirstate.invalidate() if you want to
2490 2490 explicitly read the dirstate again (i.e. restoring it to a previous
2491 2491 known good state).'''
2492 2492 if hasunfilteredcache(self, 'dirstate'):
2493 2493 for k in self.dirstate._filecache:
2494 2494 try:
2495 2495 delattr(self.dirstate, k)
2496 2496 except AttributeError:
2497 2497 pass
2498 2498 delattr(self.unfiltered(), 'dirstate')
2499 2499
2500 2500 def invalidate(self, clearfilecache=False):
2501 2501 '''Invalidates both store and non-store parts other than dirstate
2502 2502
2503 2503 If a transaction is running, invalidation of store is omitted,
2504 2504 because discarding in-memory changes might cause inconsistency
2505 2505 (e.g. incomplete fncache causes unintentional failure, but
2506 2506 redundant one doesn't).
2507 2507 '''
2508 2508 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2509 2509 for k in list(self._filecache.keys()):
2510 2510 # dirstate is invalidated separately in invalidatedirstate()
2511 2511 if k == b'dirstate':
2512 2512 continue
2513 2513 if (
2514 2514 k == b'changelog'
2515 2515 and self.currenttransaction()
2516 2516 and self.changelog._delayed
2517 2517 ):
2518 2518 # The changelog object may store unwritten revisions. We don't
2519 2519 # want to lose them.
2520 2520 # TODO: Solve the problem instead of working around it.
2521 2521 continue
2522 2522
2523 2523 if clearfilecache:
2524 2524 del self._filecache[k]
2525 2525 try:
2526 2526 delattr(unfiltered, k)
2527 2527 except AttributeError:
2528 2528 pass
2529 2529 self.invalidatecaches()
2530 2530 if not self.currenttransaction():
2531 2531 # TODO: Changing contents of store outside transaction
2532 2532 # causes inconsistency. We should make in-memory store
2533 2533 # changes detectable, and abort if changed.
2534 2534 self.store.invalidatecaches()
2535 2535
2536 2536 def invalidateall(self):
2537 2537 '''Fully invalidates both store and non-store parts, causing the
2538 2538 subsequent operation to reread any outside changes.'''
2539 2539 # extension should hook this to invalidate its caches
2540 2540 self.invalidate()
2541 2541 self.invalidatedirstate()
2542 2542
2543 2543 @unfilteredmethod
2544 2544 def _refreshfilecachestats(self, tr):
2545 2545 """Reload stats of cached files so that they are flagged as valid"""
2546 2546 for k, ce in self._filecache.items():
2547 2547 k = pycompat.sysstr(k)
2548 2548 if k == 'dirstate' or k not in self.__dict__:
2549 2549 continue
2550 2550 ce.refresh()
2551 2551
2552 2552 def _lock(
2553 2553 self,
2554 2554 vfs,
2555 2555 lockname,
2556 2556 wait,
2557 2557 releasefn,
2558 2558 acquirefn,
2559 2559 desc,
2560 2560 inheritchecker=None,
2561 2561 parentenvvar=None,
2562 2562 ):
2563 2563 parentlock = None
2564 2564 # the contents of parentenvvar are used by the underlying lock to
2565 2565 # determine whether it can be inherited
2566 2566 if parentenvvar is not None:
2567 2567 parentlock = encoding.environ.get(parentenvvar)
2568 2568
2569 2569 timeout = 0
2570 2570 warntimeout = 0
2571 2571 if wait:
2572 2572 timeout = self.ui.configint(b"ui", b"timeout")
2573 2573 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2574 2574 # internal config: ui.signal-safe-lock
2575 2575 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2576 2576
2577 2577 l = lockmod.trylock(
2578 2578 self.ui,
2579 2579 vfs,
2580 2580 lockname,
2581 2581 timeout,
2582 2582 warntimeout,
2583 2583 releasefn=releasefn,
2584 2584 acquirefn=acquirefn,
2585 2585 desc=desc,
2586 2586 inheritchecker=inheritchecker,
2587 2587 parentlock=parentlock,
2588 2588 signalsafe=signalsafe,
2589 2589 )
2590 2590 return l
2591 2591
2592 2592 def _afterlock(self, callback):
2593 2593 """add a callback to be run when the repository is fully unlocked
2594 2594
2595 2595 The callback will be executed when the outermost lock is released
2596 2596 (with wlock being higher level than 'lock')."""
2597 2597 for ref in (self._wlockref, self._lockref):
2598 2598 l = ref and ref()
2599 2599 if l and l.held:
2600 2600 l.postrelease.append(callback)
2601 2601 break
2602 2602 else: # no lock have been found.
2603 2603 callback()
2604 2604
2605 2605 def lock(self, wait=True):
2606 2606 '''Lock the repository store (.hg/store) and return a weak reference
2607 2607 to the lock. Use this before modifying the store (e.g. committing or
2608 2608 stripping). If you are opening a transaction, get a lock as well.)
2609 2609
2610 2610 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2611 2611 'wlock' first to avoid a dead-lock hazard.'''
2612 2612 l = self._currentlock(self._lockref)
2613 2613 if l is not None:
2614 2614 l.lock()
2615 2615 return l
2616 2616
2617 2617 l = self._lock(
2618 2618 vfs=self.svfs,
2619 2619 lockname=b"lock",
2620 2620 wait=wait,
2621 2621 releasefn=None,
2622 2622 acquirefn=self.invalidate,
2623 2623 desc=_(b'repository %s') % self.origroot,
2624 2624 )
2625 2625 self._lockref = weakref.ref(l)
2626 2626 return l
2627 2627
2628 2628 def _wlockchecktransaction(self):
2629 2629 if self.currenttransaction() is not None:
2630 2630 raise error.LockInheritanceContractViolation(
2631 2631 b'wlock cannot be inherited in the middle of a transaction'
2632 2632 )
2633 2633
2634 2634 def wlock(self, wait=True):
2635 2635 '''Lock the non-store parts of the repository (everything under
2636 2636 .hg except .hg/store) and return a weak reference to the lock.
2637 2637
2638 2638 Use this before modifying files in .hg.
2639 2639
2640 2640 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2641 2641 'wlock' first to avoid a dead-lock hazard.'''
2642 2642 l = self._wlockref and self._wlockref()
2643 2643 if l is not None and l.held:
2644 2644 l.lock()
2645 2645 return l
2646 2646
2647 2647 # We do not need to check for non-waiting lock acquisition. Such
2648 2648 # acquisition would not cause dead-lock as they would just fail.
2649 2649 if wait and (
2650 2650 self.ui.configbool(b'devel', b'all-warnings')
2651 2651 or self.ui.configbool(b'devel', b'check-locks')
2652 2652 ):
2653 2653 if self._currentlock(self._lockref) is not None:
2654 2654 self.ui.develwarn(b'"wlock" acquired after "lock"')
2655 2655
2656 2656 def unlock():
2657 2657 if self.dirstate.pendingparentchange():
2658 2658 self.dirstate.invalidate()
2659 2659 else:
2660 2660 self.dirstate.write(None)
2661 2661
2662 2662 self._filecache[b'dirstate'].refresh()
2663 2663
2664 2664 l = self._lock(
2665 2665 self.vfs,
2666 2666 b"wlock",
2667 2667 wait,
2668 2668 unlock,
2669 2669 self.invalidatedirstate,
2670 2670 _(b'working directory of %s') % self.origroot,
2671 2671 inheritchecker=self._wlockchecktransaction,
2672 2672 parentenvvar=b'HG_WLOCK_LOCKER',
2673 2673 )
2674 2674 self._wlockref = weakref.ref(l)
2675 2675 return l
2676 2676
2677 2677 def _currentlock(self, lockref):
2678 2678 """Returns the lock if it's held, or None if it's not."""
2679 2679 if lockref is None:
2680 2680 return None
2681 2681 l = lockref()
2682 2682 if l is None or not l.held:
2683 2683 return None
2684 2684 return l
2685 2685
2686 2686 def currentwlock(self):
2687 2687 """Returns the wlock if it's held, or None if it's not."""
2688 2688 return self._currentlock(self._wlockref)
2689 2689
2690 2690 def _filecommit(
2691 2691 self,
2692 2692 fctx,
2693 2693 manifest1,
2694 2694 manifest2,
2695 2695 linkrev,
2696 2696 tr,
2697 2697 changelist,
2698 2698 includecopymeta,
2699 2699 ):
2700 2700 """
2701 2701 commit an individual file as part of a larger transaction
2702 2702 """
2703 2703
2704 2704 fname = fctx.path()
2705 2705 fparent1 = manifest1.get(fname, nullid)
2706 2706 fparent2 = manifest2.get(fname, nullid)
2707 2707 if isinstance(fctx, context.filectx):
2708 2708 node = fctx.filenode()
2709 2709 if node in [fparent1, fparent2]:
2710 2710 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2711 2711 if (
2712 2712 fparent1 != nullid
2713 2713 and manifest1.flags(fname) != fctx.flags()
2714 2714 ) or (
2715 2715 fparent2 != nullid
2716 2716 and manifest2.flags(fname) != fctx.flags()
2717 2717 ):
2718 2718 changelist.append(fname)
2719 2719 return node
2720 2720
2721 2721 flog = self.file(fname)
2722 2722 meta = {}
2723 2723 cfname = fctx.copysource()
2724 2724 if cfname and cfname != fname:
2725 2725 # Mark the new revision of this file as a copy of another
2726 2726 # file. This copy data will effectively act as a parent
2727 2727 # of this new revision. If this is a merge, the first
2728 2728 # parent will be the nullid (meaning "look up the copy data")
2729 2729 # and the second one will be the other parent. For example:
2730 2730 #
2731 2731 # 0 --- 1 --- 3 rev1 changes file foo
2732 2732 # \ / rev2 renames foo to bar and changes it
2733 2733 # \- 2 -/ rev3 should have bar with all changes and
2734 2734 # should record that bar descends from
2735 2735 # bar in rev2 and foo in rev1
2736 2736 #
2737 2737 # this allows this merge to succeed:
2738 2738 #
2739 2739 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2740 2740 # \ / merging rev3 and rev4 should use bar@rev2
2741 2741 # \- 2 --- 4 as the merge base
2742 2742 #
2743 2743
2744 2744 cnode = manifest1.get(cfname)
2745 2745 newfparent = fparent2
2746 2746
2747 2747 if manifest2: # branch merge
2748 2748 if fparent2 == nullid or cnode is None: # copied on remote side
2749 2749 if cfname in manifest2:
2750 2750 cnode = manifest2[cfname]
2751 2751 newfparent = fparent1
2752 2752
2753 2753 # Here, we used to search backwards through history to try to find
2754 2754 # where the file copy came from if the source of a copy was not in
2755 2755 # the parent directory. However, this doesn't actually make sense to
2756 2756 # do (what does a copy from something not in your working copy even
2757 2757 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2758 2758 # the user that copy information was dropped, so if they didn't
2759 2759 # expect this outcome it can be fixed, but this is the correct
2760 2760 # behavior in this circumstance.
2761 2761
2762 2762 if cnode:
2763 2763 self.ui.debug(
2764 2764 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2765 2765 )
2766 2766 if includecopymeta:
2767 2767 meta[b"copy"] = cfname
2768 2768 meta[b"copyrev"] = hex(cnode)
2769 2769 fparent1, fparent2 = nullid, newfparent
2770 2770 else:
2771 2771 self.ui.warn(
2772 2772 _(
2773 2773 b"warning: can't find ancestor for '%s' "
2774 2774 b"copied from '%s'!\n"
2775 2775 )
2776 2776 % (fname, cfname)
2777 2777 )
2778 2778
2779 2779 elif fparent1 == nullid:
2780 2780 fparent1, fparent2 = fparent2, nullid
2781 2781 elif fparent2 != nullid:
2782 2782 # is one parent an ancestor of the other?
2783 2783 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2784 2784 if fparent1 in fparentancestors:
2785 2785 fparent1, fparent2 = fparent2, nullid
2786 2786 elif fparent2 in fparentancestors:
2787 2787 fparent2 = nullid
2788 2788
2789 2789 # is the file changed?
2790 2790 text = fctx.data()
2791 2791 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2792 2792 changelist.append(fname)
2793 2793 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2794 2794 # are just the flags changed during merge?
2795 2795 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2796 2796 changelist.append(fname)
2797 2797
2798 2798 return fparent1
2799 2799
2800 2800 def checkcommitpatterns(self, wctx, match, status, fail):
2801 2801 """check for commit arguments that aren't committable"""
2802 2802 if match.isexact() or match.prefix():
2803 2803 matched = set(status.modified + status.added + status.removed)
2804 2804
2805 2805 for f in match.files():
2806 2806 f = self.dirstate.normalize(f)
2807 2807 if f == b'.' or f in matched or f in wctx.substate:
2808 2808 continue
2809 2809 if f in status.deleted:
2810 2810 fail(f, _(b'file not found!'))
2811 2811 # Is it a directory that exists or used to exist?
2812 2812 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2813 2813 d = f + b'/'
2814 2814 for mf in matched:
2815 2815 if mf.startswith(d):
2816 2816 break
2817 2817 else:
2818 2818 fail(f, _(b"no match under directory!"))
2819 2819 elif f not in self.dirstate:
2820 2820 fail(f, _(b"file not tracked!"))
2821 2821
2822 2822 @unfilteredmethod
2823 2823 def commit(
2824 2824 self,
2825 2825 text=b"",
2826 2826 user=None,
2827 2827 date=None,
2828 2828 match=None,
2829 2829 force=False,
2830 2830 editor=False,
2831 2831 extra=None,
2832 2832 ):
2833 2833 """Add a new revision to current repository.
2834 2834
2835 2835 Revision information is gathered from the working directory,
2836 2836 match can be used to filter the committed files. If editor is
2837 2837 supplied, it is called to get a commit message.
2838 2838 """
2839 2839 if extra is None:
2840 2840 extra = {}
2841 2841
2842 2842 def fail(f, msg):
2843 2843 raise error.Abort(b'%s: %s' % (f, msg))
2844 2844
2845 2845 if not match:
2846 2846 match = matchmod.always()
2847 2847
2848 2848 if not force:
2849 2849 match.bad = fail
2850 2850
2851 2851 # lock() for recent changelog (see issue4368)
2852 2852 with self.wlock(), self.lock():
2853 2853 wctx = self[None]
2854 2854 merge = len(wctx.parents()) > 1
2855 2855
2856 2856 if not force and merge and not match.always():
2857 2857 raise error.Abort(
2858 2858 _(
2859 2859 b'cannot partially commit a merge '
2860 2860 b'(do not specify files or patterns)'
2861 2861 )
2862 2862 )
2863 2863
2864 2864 status = self.status(match=match, clean=force)
2865 2865 if force:
2866 2866 status.modified.extend(
2867 2867 status.clean
2868 2868 ) # mq may commit clean files
2869 2869
2870 2870 # check subrepos
2871 2871 subs, commitsubs, newstate = subrepoutil.precommit(
2872 2872 self.ui, wctx, status, match, force=force
2873 2873 )
2874 2874
2875 2875 # make sure all explicit patterns are matched
2876 2876 if not force:
2877 2877 self.checkcommitpatterns(wctx, match, status, fail)
2878 2878
2879 2879 cctx = context.workingcommitctx(
2880 2880 self, status, text, user, date, extra
2881 2881 )
2882 2882
2883 2883 # internal config: ui.allowemptycommit
2884 2884 allowemptycommit = (
2885 2885 wctx.branch() != wctx.p1().branch()
2886 2886 or extra.get(b'close')
2887 2887 or merge
2888 2888 or cctx.files()
2889 2889 or self.ui.configbool(b'ui', b'allowemptycommit')
2890 2890 )
2891 2891 if not allowemptycommit:
2892 2892 return None
2893 2893
2894 2894 if merge and cctx.deleted():
2895 2895 raise error.Abort(_(b"cannot commit merge with missing files"))
2896 2896
2897 2897 ms = mergemod.mergestate.read(self)
2898 2898 mergeutil.checkunresolved(ms)
2899 2899
2900 2900 if editor:
2901 2901 cctx._text = editor(self, cctx, subs)
2902 2902 edited = text != cctx._text
2903 2903
2904 2904 # Save commit message in case this transaction gets rolled back
2905 2905 # (e.g. by a pretxncommit hook). Leave the content alone on
2906 2906 # the assumption that the user will use the same editor again.
2907 2907 msgfn = self.savecommitmessage(cctx._text)
2908 2908
2909 2909 # commit subs and write new state
2910 2910 if subs:
2911 2911 uipathfn = scmutil.getuipathfn(self)
2912 2912 for s in sorted(commitsubs):
2913 2913 sub = wctx.sub(s)
2914 2914 self.ui.status(
2915 2915 _(b'committing subrepository %s\n')
2916 2916 % uipathfn(subrepoutil.subrelpath(sub))
2917 2917 )
2918 2918 sr = sub.commit(cctx._text, user, date)
2919 2919 newstate[s] = (newstate[s][0], sr)
2920 2920 subrepoutil.writestate(self, newstate)
2921 2921
2922 2922 p1, p2 = self.dirstate.parents()
2923 2923 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2924 2924 try:
2925 2925 self.hook(
2926 2926 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2927 2927 )
2928 2928 with self.transaction(b'commit'):
2929 2929 ret = self.commitctx(cctx, True)
2930 2930 # update bookmarks, dirstate and mergestate
2931 2931 bookmarks.update(self, [p1, p2], ret)
2932 2932 cctx.markcommitted(ret)
2933 2933 ms.reset()
2934 2934 except: # re-raises
2935 2935 if edited:
2936 2936 self.ui.write(
2937 2937 _(b'note: commit message saved in %s\n') % msgfn
2938 2938 )
2939 2939 raise
2940 2940
2941 2941 def commithook():
2942 2942 # hack for command that use a temporary commit (eg: histedit)
2943 2943 # temporary commit got stripped before hook release
2944 2944 if self.changelog.hasnode(ret):
2945 2945 self.hook(
2946 2946 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2947 2947 )
2948 2948
2949 2949 self._afterlock(commithook)
2950 2950 return ret
2951 2951
2952 2952 @unfilteredmethod
2953 2953 def commitctx(self, ctx, error=False, origctx=None):
2954 2954 """Add a new revision to current repository.
2955 2955 Revision information is passed via the context argument.
2956 2956
2957 2957 ctx.files() should list all files involved in this commit, i.e.
2958 2958 modified/added/removed files. On merge, it may be wider than the
2959 2959 ctx.files() to be committed, since any file nodes derived directly
2960 2960 from p1 or p2 are excluded from the committed ctx.files().
2961 2961
2962 2962 origctx is for convert to work around the problem that bug
2963 2963 fixes to the files list in changesets change hashes. For
2964 2964 convert to be the identity, it can pass an origctx and this
2965 2965 function will use the same files list when it makes sense to
2966 2966 do so.
2967 2967 """
2968 2968
2969 2969 p1, p2 = ctx.p1(), ctx.p2()
2970 2970 user = ctx.user()
2971 2971
2972 2972 if self.filecopiesmode == b'changeset-sidedata':
2973 2973 writechangesetcopy = True
2974 2974 writefilecopymeta = True
2975 2975 writecopiesto = None
2976 2976 else:
2977 2977 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
2978 2978 writefilecopymeta = writecopiesto != b'changeset-only'
2979 2979 writechangesetcopy = writecopiesto in (
2980 2980 b'changeset-only',
2981 2981 b'compatibility',
2982 2982 )
2983 2983 p1copies, p2copies = None, None
2984 2984 if writechangesetcopy:
2985 2985 p1copies = ctx.p1copies()
2986 2986 p2copies = ctx.p2copies()
2987 2987 filesadded, filesremoved = None, None
2988 2988 with self.lock(), self.transaction(b"commit") as tr:
2989 2989 trp = weakref.proxy(tr)
2990 2990
2991 2991 if ctx.manifestnode():
2992 2992 # reuse an existing manifest revision
2993 2993 self.ui.debug(b'reusing known manifest\n')
2994 2994 mn = ctx.manifestnode()
2995 2995 files = ctx.files()
2996 2996 if writechangesetcopy:
2997 2997 filesadded = ctx.filesadded()
2998 2998 filesremoved = ctx.filesremoved()
2999 2999 elif ctx.files():
3000 3000 m1ctx = p1.manifestctx()
3001 3001 m2ctx = p2.manifestctx()
3002 3002 mctx = m1ctx.copy()
3003 3003
3004 3004 m = mctx.read()
3005 3005 m1 = m1ctx.read()
3006 3006 m2 = m2ctx.read()
3007 3007
3008 3008 # check in files
3009 3009 added = []
3010 3010 changed = []
3011 3011 removed = list(ctx.removed())
3012 3012 linkrev = len(self)
3013 3013 self.ui.note(_(b"committing files:\n"))
3014 3014 uipathfn = scmutil.getuipathfn(self)
3015 3015 for f in sorted(ctx.modified() + ctx.added()):
3016 3016 self.ui.note(uipathfn(f) + b"\n")
3017 3017 try:
3018 3018 fctx = ctx[f]
3019 3019 if fctx is None:
3020 3020 removed.append(f)
3021 3021 else:
3022 3022 added.append(f)
3023 3023 m[f] = self._filecommit(
3024 3024 fctx,
3025 3025 m1,
3026 3026 m2,
3027 3027 linkrev,
3028 3028 trp,
3029 3029 changed,
3030 3030 writefilecopymeta,
3031 3031 )
3032 3032 m.setflag(f, fctx.flags())
3033 3033 except OSError:
3034 3034 self.ui.warn(
3035 3035 _(b"trouble committing %s!\n") % uipathfn(f)
3036 3036 )
3037 3037 raise
3038 3038 except IOError as inst:
3039 3039 errcode = getattr(inst, 'errno', errno.ENOENT)
3040 3040 if error or errcode and errcode != errno.ENOENT:
3041 3041 self.ui.warn(
3042 3042 _(b"trouble committing %s!\n") % uipathfn(f)
3043 3043 )
3044 3044 raise
3045 3045
3046 3046 # update manifest
3047 3047 removed = [f for f in removed if f in m1 or f in m2]
3048 3048 drop = sorted([f for f in removed if f in m])
3049 3049 for f in drop:
3050 3050 del m[f]
3051 3051 if p2.rev() != nullrev:
3052 3052
3053 3053 @util.cachefunc
3054 3054 def mas():
3055 3055 p1n = p1.node()
3056 3056 p2n = p2.node()
3057 3057 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3058 3058 if not cahs:
3059 3059 cahs = [nullrev]
3060 3060 return [self[r].manifest() for r in cahs]
3061 3061
3062 3062 def deletionfromparent(f):
3063 3063 # When a file is removed relative to p1 in a merge, this
3064 3064 # function determines whether the absence is due to a
3065 3065 # deletion from a parent, or whether the merge commit
3066 3066 # itself deletes the file. We decide this by doing a
3067 3067 # simplified three way merge of the manifest entry for
3068 3068 # the file. There are two ways we decide the merge
3069 3069 # itself didn't delete a file:
3070 3070 # - neither parent (nor the merge) contain the file
3071 3071 # - exactly one parent contains the file, and that
3072 3072 # parent has the same filelog entry as the merge
3073 3073 # ancestor (or all of them if there two). In other
3074 3074 # words, that parent left the file unchanged while the
3075 3075 # other one deleted it.
3076 3076 # One way to think about this is that deleting a file is
3077 3077 # similar to emptying it, so the list of changed files
3078 3078 # should be similar either way. The computation
3079 3079 # described above is not done directly in _filecommit
3080 3080 # when creating the list of changed files, however
3081 3081 # it does something very similar by comparing filelog
3082 3082 # nodes.
3083 3083 if f in m1:
3084 3084 return f not in m2 and all(
3085 3085 f in ma and ma.find(f) == m1.find(f)
3086 3086 for ma in mas()
3087 3087 )
3088 3088 elif f in m2:
3089 3089 return all(
3090 3090 f in ma and ma.find(f) == m2.find(f)
3091 3091 for ma in mas()
3092 3092 )
3093 3093 else:
3094 3094 return True
3095 3095
3096 3096 removed = [f for f in removed if not deletionfromparent(f)]
3097 3097
3098 3098 files = changed + removed
3099 3099 md = None
3100 3100 if not files:
3101 3101 # if no "files" actually changed in terms of the changelog,
3102 3102 # try hard to detect unmodified manifest entry so that the
3103 3103 # exact same commit can be reproduced later on convert.
3104 3104 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3105 3105 if not files and md:
3106 3106 self.ui.debug(
3107 3107 b'not reusing manifest (no file change in '
3108 3108 b'changelog, but manifest differs)\n'
3109 3109 )
3110 3110 if files or md:
3111 3111 self.ui.note(_(b"committing manifest\n"))
3112 3112 # we're using narrowmatch here since it's already applied at
3113 3113 # other stages (such as dirstate.walk), so we're already
3114 3114 # ignoring things outside of narrowspec in most cases. The
3115 3115 # one case where we might have files outside the narrowspec
3116 3116 # at this point is merges, and we already error out in the
3117 3117 # case where the merge has files outside of the narrowspec,
3118 3118 # so this is safe.
3119 3119 mn = mctx.write(
3120 3120 trp,
3121 3121 linkrev,
3122 3122 p1.manifestnode(),
3123 3123 p2.manifestnode(),
3124 3124 added,
3125 3125 drop,
3126 3126 match=self.narrowmatch(),
3127 3127 )
3128 3128
3129 3129 if writechangesetcopy:
3130 3130 filesadded = [
3131 3131 f for f in changed if not (f in m1 or f in m2)
3132 3132 ]
3133 3133 filesremoved = removed
3134 3134 else:
3135 3135 self.ui.debug(
3136 3136 b'reusing manifest from p1 (listed files '
3137 3137 b'actually unchanged)\n'
3138 3138 )
3139 3139 mn = p1.manifestnode()
3140 3140 else:
3141 3141 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3142 3142 mn = p1.manifestnode()
3143 3143 files = []
3144 3144
3145 3145 if writecopiesto == b'changeset-only':
3146 3146 # If writing only to changeset extras, use None to indicate that
3147 3147 # no entry should be written. If writing to both, write an empty
3148 3148 # entry to prevent the reader from falling back to reading
3149 3149 # filelogs.
3150 3150 p1copies = p1copies or None
3151 3151 p2copies = p2copies or None
3152 3152 filesadded = filesadded or None
3153 3153 filesremoved = filesremoved or None
3154 3154
3155 3155 if origctx and origctx.manifestnode() == mn:
3156 3156 files = origctx.files()
3157 3157
3158 3158 # update changelog
3159 3159 self.ui.note(_(b"committing changelog\n"))
3160 3160 self.changelog.delayupdate(tr)
3161 3161 n = self.changelog.add(
3162 3162 mn,
3163 3163 files,
3164 3164 ctx.description(),
3165 3165 trp,
3166 3166 p1.node(),
3167 3167 p2.node(),
3168 3168 user,
3169 3169 ctx.date(),
3170 3170 ctx.extra().copy(),
3171 3171 p1copies,
3172 3172 p2copies,
3173 3173 filesadded,
3174 3174 filesremoved,
3175 3175 )
3176 3176 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3177 3177 self.hook(
3178 3178 b'pretxncommit',
3179 3179 throw=True,
3180 3180 node=hex(n),
3181 3181 parent1=xp1,
3182 3182 parent2=xp2,
3183 3183 )
3184 3184 # set the new commit is proper phase
3185 3185 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3186 3186 if targetphase:
3187 3187 # retract boundary do not alter parent changeset.
3188 3188 # if a parent have higher the resulting phase will
3189 3189 # be compliant anyway
3190 3190 #
3191 3191 # if minimal phase was 0 we don't need to retract anything
3192 3192 phases.registernew(self, tr, targetphase, [n])
3193 3193 return n
3194 3194
3195 3195 @unfilteredmethod
3196 3196 def destroying(self):
3197 3197 '''Inform the repository that nodes are about to be destroyed.
3198 3198 Intended for use by strip and rollback, so there's a common
3199 3199 place for anything that has to be done before destroying history.
3200 3200
3201 3201 This is mostly useful for saving state that is in memory and waiting
3202 3202 to be flushed when the current lock is released. Because a call to
3203 3203 destroyed is imminent, the repo will be invalidated causing those
3204 3204 changes to stay in memory (waiting for the next unlock), or vanish
3205 3205 completely.
3206 3206 '''
3207 3207 # When using the same lock to commit and strip, the phasecache is left
3208 3208 # dirty after committing. Then when we strip, the repo is invalidated,
3209 3209 # causing those changes to disappear.
3210 3210 if '_phasecache' in vars(self):
3211 3211 self._phasecache.write()
3212 3212
3213 3213 @unfilteredmethod
3214 3214 def destroyed(self):
3215 3215 '''Inform the repository that nodes have been destroyed.
3216 3216 Intended for use by strip and rollback, so there's a common
3217 3217 place for anything that has to be done after destroying history.
3218 3218 '''
3219 3219 # When one tries to:
3220 3220 # 1) destroy nodes thus calling this method (e.g. strip)
3221 3221 # 2) use phasecache somewhere (e.g. commit)
3222 3222 #
3223 3223 # then 2) will fail because the phasecache contains nodes that were
3224 3224 # removed. We can either remove phasecache from the filecache,
3225 3225 # causing it to reload next time it is accessed, or simply filter
3226 3226 # the removed nodes now and write the updated cache.
3227 3227 self._phasecache.filterunknown(self)
3228 3228 self._phasecache.write()
3229 3229
3230 3230 # refresh all repository caches
3231 3231 self.updatecaches()
3232 3232
3233 3233 # Ensure the persistent tag cache is updated. Doing it now
3234 3234 # means that the tag cache only has to worry about destroyed
3235 3235 # heads immediately after a strip/rollback. That in turn
3236 3236 # guarantees that "cachetip == currenttip" (comparing both rev
3237 3237 # and node) always means no nodes have been added or destroyed.
3238 3238
3239 3239 # XXX this is suboptimal when qrefresh'ing: we strip the current
3240 3240 # head, refresh the tag cache, then immediately add a new head.
3241 3241 # But I think doing it this way is necessary for the "instant
3242 3242 # tag cache retrieval" case to work.
3243 3243 self.invalidate()
3244 3244
3245 3245 def status(
3246 3246 self,
3247 3247 node1=b'.',
3248 3248 node2=None,
3249 3249 match=None,
3250 3250 ignored=False,
3251 3251 clean=False,
3252 3252 unknown=False,
3253 3253 listsubrepos=False,
3254 3254 ):
3255 3255 '''a convenience method that calls node1.status(node2)'''
3256 3256 return self[node1].status(
3257 3257 node2, match, ignored, clean, unknown, listsubrepos
3258 3258 )
3259 3259
3260 3260 def addpostdsstatus(self, ps):
3261 3261 """Add a callback to run within the wlock, at the point at which status
3262 3262 fixups happen.
3263 3263
3264 3264 On status completion, callback(wctx, status) will be called with the
3265 3265 wlock held, unless the dirstate has changed from underneath or the wlock
3266 3266 couldn't be grabbed.
3267 3267
3268 3268 Callbacks should not capture and use a cached copy of the dirstate --
3269 3269 it might change in the meanwhile. Instead, they should access the
3270 3270 dirstate via wctx.repo().dirstate.
3271 3271
3272 3272 This list is emptied out after each status run -- extensions should
3273 3273 make sure it adds to this list each time dirstate.status is called.
3274 3274 Extensions should also make sure they don't call this for statuses
3275 3275 that don't involve the dirstate.
3276 3276 """
3277 3277
3278 3278 # The list is located here for uniqueness reasons -- it is actually
3279 3279 # managed by the workingctx, but that isn't unique per-repo.
3280 3280 self._postdsstatus.append(ps)
3281 3281
3282 3282 def postdsstatus(self):
3283 3283 """Used by workingctx to get the list of post-dirstate-status hooks."""
3284 3284 return self._postdsstatus
3285 3285
3286 3286 def clearpostdsstatus(self):
3287 3287 """Used by workingctx to clear post-dirstate-status hooks."""
3288 3288 del self._postdsstatus[:]
3289 3289
3290 3290 def heads(self, start=None):
3291 3291 if start is None:
3292 3292 cl = self.changelog
3293 3293 headrevs = reversed(cl.headrevs())
3294 3294 return [cl.node(rev) for rev in headrevs]
3295 3295
3296 3296 heads = self.changelog.heads(start)
3297 3297 # sort the output in rev descending order
3298 3298 return sorted(heads, key=self.changelog.rev, reverse=True)
3299 3299
3300 3300 def branchheads(self, branch=None, start=None, closed=False):
3301 3301 '''return a (possibly filtered) list of heads for the given branch
3302 3302
3303 3303 Heads are returned in topological order, from newest to oldest.
3304 3304 If branch is None, use the dirstate branch.
3305 3305 If start is not None, return only heads reachable from start.
3306 3306 If closed is True, return heads that are marked as closed as well.
3307 3307 '''
3308 3308 if branch is None:
3309 3309 branch = self[None].branch()
3310 3310 branches = self.branchmap()
3311 3311 if not branches.hasbranch(branch):
3312 3312 return []
3313 3313 # the cache returns heads ordered lowest to highest
3314 3314 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3315 3315 if start is not None:
3316 3316 # filter out the heads that cannot be reached from startrev
3317 3317 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3318 3318 bheads = [h for h in bheads if h in fbheads]
3319 3319 return bheads
3320 3320
3321 3321 def branches(self, nodes):
3322 3322 if not nodes:
3323 3323 nodes = [self.changelog.tip()]
3324 3324 b = []
3325 3325 for n in nodes:
3326 3326 t = n
3327 3327 while True:
3328 3328 p = self.changelog.parents(n)
3329 3329 if p[1] != nullid or p[0] == nullid:
3330 3330 b.append((t, n, p[0], p[1]))
3331 3331 break
3332 3332 n = p[0]
3333 3333 return b
3334 3334
3335 3335 def between(self, pairs):
3336 3336 r = []
3337 3337
3338 3338 for top, bottom in pairs:
3339 3339 n, l, i = top, [], 0
3340 3340 f = 1
3341 3341
3342 3342 while n != bottom and n != nullid:
3343 3343 p = self.changelog.parents(n)[0]
3344 3344 if i == f:
3345 3345 l.append(n)
3346 3346 f = f * 2
3347 3347 n = p
3348 3348 i += 1
3349 3349
3350 3350 r.append(l)
3351 3351
3352 3352 return r
3353 3353
3354 3354 def checkpush(self, pushop):
3355 3355 """Extensions can override this function if additional checks have
3356 3356 to be performed before pushing, or call it if they override push
3357 3357 command.
3358 3358 """
3359 3359
3360 3360 @unfilteredpropertycache
3361 3361 def prepushoutgoinghooks(self):
3362 3362 """Return util.hooks consists of a pushop with repo, remote, outgoing
3363 3363 methods, which are called before pushing changesets.
3364 3364 """
3365 3365 return util.hooks()
3366 3366
3367 3367 def pushkey(self, namespace, key, old, new):
3368 3368 try:
3369 3369 tr = self.currenttransaction()
3370 3370 hookargs = {}
3371 3371 if tr is not None:
3372 3372 hookargs.update(tr.hookargs)
3373 3373 hookargs = pycompat.strkwargs(hookargs)
3374 3374 hookargs['namespace'] = namespace
3375 3375 hookargs['key'] = key
3376 3376 hookargs['old'] = old
3377 3377 hookargs['new'] = new
3378 3378 self.hook(b'prepushkey', throw=True, **hookargs)
3379 3379 except error.HookAbort as exc:
3380 3380 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3381 3381 if exc.hint:
3382 3382 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3383 3383 return False
3384 3384 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3385 3385 ret = pushkey.push(self, namespace, key, old, new)
3386 3386
3387 3387 def runhook():
3388 3388 self.hook(
3389 3389 b'pushkey',
3390 3390 namespace=namespace,
3391 3391 key=key,
3392 3392 old=old,
3393 3393 new=new,
3394 3394 ret=ret,
3395 3395 )
3396 3396
3397 3397 self._afterlock(runhook)
3398 3398 return ret
3399 3399
3400 3400 def listkeys(self, namespace):
3401 3401 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3402 3402 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3403 3403 values = pushkey.list(self, namespace)
3404 3404 self.hook(b'listkeys', namespace=namespace, values=values)
3405 3405 return values
3406 3406
3407 3407 def debugwireargs(self, one, two, three=None, four=None, five=None):
3408 3408 '''used to test argument passing over the wire'''
3409 3409 return b"%s %s %s %s %s" % (
3410 3410 one,
3411 3411 two,
3412 3412 pycompat.bytestr(three),
3413 3413 pycompat.bytestr(four),
3414 3414 pycompat.bytestr(five),
3415 3415 )
3416 3416
3417 3417 def savecommitmessage(self, text):
3418 3418 fp = self.vfs(b'last-message.txt', b'wb')
3419 3419 try:
3420 3420 fp.write(text)
3421 3421 finally:
3422 3422 fp.close()
3423 3423 return self.pathto(fp.name[len(self.root) + 1 :])
3424 3424
3425 3425
3426 3426 # used to avoid circular references so destructors work
3427 3427 def aftertrans(files):
3428 3428 renamefiles = [tuple(t) for t in files]
3429 3429
3430 3430 def a():
3431 3431 for vfs, src, dest in renamefiles:
3432 3432 # if src and dest refer to a same file, vfs.rename is a no-op,
3433 3433 # leaving both src and dest on disk. delete dest to make sure
3434 3434 # the rename couldn't be such a no-op.
3435 3435 vfs.tryunlink(dest)
3436 3436 try:
3437 3437 vfs.rename(src, dest)
3438 3438 except OSError: # journal file does not yet exist
3439 3439 pass
3440 3440
3441 3441 return a
3442 3442
3443 3443
3444 3444 def undoname(fn):
3445 3445 base, name = os.path.split(fn)
3446 3446 assert name.startswith(b'journal')
3447 3447 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3448 3448
3449 3449
3450 3450 def instance(ui, path, create, intents=None, createopts=None):
3451 3451 localpath = util.urllocalpath(path)
3452 3452 if create:
3453 3453 createrepository(ui, localpath, createopts=createopts)
3454 3454
3455 3455 return makelocalrepository(ui, localpath, intents=intents)
3456 3456
3457 3457
3458 3458 def islocal(path):
3459 3459 return True
3460 3460
3461 3461
3462 3462 def defaultcreateopts(ui, createopts=None):
3463 3463 """Populate the default creation options for a repository.
3464 3464
3465 3465 A dictionary of explicitly requested creation options can be passed
3466 3466 in. Missing keys will be populated.
3467 3467 """
3468 3468 createopts = dict(createopts or {})
3469 3469
3470 3470 if b'backend' not in createopts:
3471 3471 # experimental config: storage.new-repo-backend
3472 3472 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3473 3473
3474 3474 return createopts
3475 3475
3476 3476
3477 3477 def newreporequirements(ui, createopts):
3478 3478 """Determine the set of requirements for a new local repository.
3479 3479
3480 3480 Extensions can wrap this function to specify custom requirements for
3481 3481 new repositories.
3482 3482 """
3483 3483 # If the repo is being created from a shared repository, we copy
3484 3484 # its requirements.
3485 3485 if b'sharedrepo' in createopts:
3486 3486 requirements = set(createopts[b'sharedrepo'].requirements)
3487 3487 if createopts.get(b'sharedrelative'):
3488 3488 requirements.add(b'relshared')
3489 3489 else:
3490 3490 requirements.add(b'shared')
3491 3491
3492 3492 return requirements
3493 3493
3494 3494 if b'backend' not in createopts:
3495 3495 raise error.ProgrammingError(
3496 3496 b'backend key not present in createopts; '
3497 3497 b'was defaultcreateopts() called?'
3498 3498 )
3499 3499
3500 3500 if createopts[b'backend'] != b'revlogv1':
3501 3501 raise error.Abort(
3502 3502 _(
3503 3503 b'unable to determine repository requirements for '
3504 3504 b'storage backend: %s'
3505 3505 )
3506 3506 % createopts[b'backend']
3507 3507 )
3508 3508
3509 3509 requirements = {b'revlogv1'}
3510 3510 if ui.configbool(b'format', b'usestore'):
3511 3511 requirements.add(b'store')
3512 3512 if ui.configbool(b'format', b'usefncache'):
3513 3513 requirements.add(b'fncache')
3514 3514 if ui.configbool(b'format', b'dotencode'):
3515 3515 requirements.add(b'dotencode')
3516 3516
3517 3517 compengine = ui.config(b'format', b'revlog-compression')
3518 3518 if compengine not in util.compengines:
3519 3519 raise error.Abort(
3520 3520 _(
3521 3521 b'compression engine %s defined by '
3522 3522 b'format.revlog-compression not available'
3523 3523 )
3524 3524 % compengine,
3525 3525 hint=_(
3526 3526 b'run "hg debuginstall" to list available '
3527 3527 b'compression engines'
3528 3528 ),
3529 3529 )
3530 3530
3531 3531 # zlib is the historical default and doesn't need an explicit requirement.
3532 3532 elif compengine == b'zstd':
3533 3533 requirements.add(b'revlog-compression-zstd')
3534 3534 elif compengine != b'zlib':
3535 3535 requirements.add(b'exp-compression-%s' % compengine)
3536 3536
3537 3537 if scmutil.gdinitconfig(ui):
3538 3538 requirements.add(b'generaldelta')
3539 3539 if ui.configbool(b'format', b'sparse-revlog'):
3540 3540 requirements.add(SPARSEREVLOG_REQUIREMENT)
3541 3541
3542 3542 # experimental config: format.exp-use-side-data
3543 3543 if ui.configbool(b'format', b'exp-use-side-data'):
3544 3544 requirements.add(SIDEDATA_REQUIREMENT)
3545 3545 # experimental config: format.exp-use-copies-side-data-changeset
3546 3546 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3547 3547 requirements.add(SIDEDATA_REQUIREMENT)
3548 3548 requirements.add(COPIESSDC_REQUIREMENT)
3549 3549 if ui.configbool(b'experimental', b'treemanifest'):
3550 3550 requirements.add(b'treemanifest')
3551 3551
3552 3552 revlogv2 = ui.config(b'experimental', b'revlogv2')
3553 3553 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3554 3554 requirements.remove(b'revlogv1')
3555 3555 # generaldelta is implied by revlogv2.
3556 3556 requirements.discard(b'generaldelta')
3557 3557 requirements.add(REVLOGV2_REQUIREMENT)
3558 3558 # experimental config: format.internal-phase
3559 3559 if ui.configbool(b'format', b'internal-phase'):
3560 3560 requirements.add(b'internal-phase')
3561 3561
3562 3562 if createopts.get(b'narrowfiles'):
3563 3563 requirements.add(repository.NARROW_REQUIREMENT)
3564 3564
3565 3565 if createopts.get(b'lfs'):
3566 3566 requirements.add(b'lfs')
3567 3567
3568 3568 if ui.configbool(b'format', b'bookmarks-in-store'):
3569 3569 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3570 3570
3571 3571 return requirements
3572 3572
3573 3573
3574 3574 def filterknowncreateopts(ui, createopts):
3575 3575 """Filters a dict of repo creation options against options that are known.
3576 3576
3577 3577 Receives a dict of repo creation options and returns a dict of those
3578 3578 options that we don't know how to handle.
3579 3579
3580 3580 This function is called as part of repository creation. If the
3581 3581 returned dict contains any items, repository creation will not
3582 3582 be allowed, as it means there was a request to create a repository
3583 3583 with options not recognized by loaded code.
3584 3584
3585 3585 Extensions can wrap this function to filter out creation options
3586 3586 they know how to handle.
3587 3587 """
3588 3588 known = {
3589 3589 b'backend',
3590 3590 b'lfs',
3591 3591 b'narrowfiles',
3592 3592 b'sharedrepo',
3593 3593 b'sharedrelative',
3594 3594 b'shareditems',
3595 3595 b'shallowfilestore',
3596 3596 }
3597 3597
3598 3598 return {k: v for k, v in createopts.items() if k not in known}
3599 3599
3600 3600
3601 3601 def createrepository(ui, path, createopts=None):
3602 3602 """Create a new repository in a vfs.
3603 3603
3604 3604 ``path`` path to the new repo's working directory.
3605 3605 ``createopts`` options for the new repository.
3606 3606
3607 3607 The following keys for ``createopts`` are recognized:
3608 3608
3609 3609 backend
3610 3610 The storage backend to use.
3611 3611 lfs
3612 3612 Repository will be created with ``lfs`` requirement. The lfs extension
3613 3613 will automatically be loaded when the repository is accessed.
3614 3614 narrowfiles
3615 3615 Set up repository to support narrow file storage.
3616 3616 sharedrepo
3617 3617 Repository object from which storage should be shared.
3618 3618 sharedrelative
3619 3619 Boolean indicating if the path to the shared repo should be
3620 3620 stored as relative. By default, the pointer to the "parent" repo
3621 3621 is stored as an absolute path.
3622 3622 shareditems
3623 3623 Set of items to share to the new repository (in addition to storage).
3624 3624 shallowfilestore
3625 3625 Indicates that storage for files should be shallow (not all ancestor
3626 3626 revisions are known).
3627 3627 """
3628 3628 createopts = defaultcreateopts(ui, createopts=createopts)
3629 3629
3630 3630 unknownopts = filterknowncreateopts(ui, createopts)
3631 3631
3632 3632 if not isinstance(unknownopts, dict):
3633 3633 raise error.ProgrammingError(
3634 3634 b'filterknowncreateopts() did not return a dict'
3635 3635 )
3636 3636
3637 3637 if unknownopts:
3638 3638 raise error.Abort(
3639 3639 _(
3640 3640 b'unable to create repository because of unknown '
3641 3641 b'creation option: %s'
3642 3642 )
3643 3643 % b', '.join(sorted(unknownopts)),
3644 3644 hint=_(b'is a required extension not loaded?'),
3645 3645 )
3646 3646
3647 3647 requirements = newreporequirements(ui, createopts=createopts)
3648 3648
3649 3649 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3650 3650
3651 3651 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3652 3652 if hgvfs.exists():
3653 3653 raise error.RepoError(_(b'repository %s already exists') % path)
3654 3654
3655 3655 if b'sharedrepo' in createopts:
3656 3656 sharedpath = createopts[b'sharedrepo'].sharedpath
3657 3657
3658 3658 if createopts.get(b'sharedrelative'):
3659 3659 try:
3660 3660 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3661 3661 except (IOError, ValueError) as e:
3662 3662 # ValueError is raised on Windows if the drive letters differ
3663 3663 # on each path.
3664 3664 raise error.Abort(
3665 3665 _(b'cannot calculate relative path'),
3666 3666 hint=stringutil.forcebytestr(e),
3667 3667 )
3668 3668
3669 3669 if not wdirvfs.exists():
3670 3670 wdirvfs.makedirs()
3671 3671
3672 3672 hgvfs.makedir(notindexed=True)
3673 3673 if b'sharedrepo' not in createopts:
3674 3674 hgvfs.mkdir(b'cache')
3675 3675 hgvfs.mkdir(b'wcache')
3676 3676
3677 3677 if b'store' in requirements and b'sharedrepo' not in createopts:
3678 3678 hgvfs.mkdir(b'store')
3679 3679
3680 3680 # We create an invalid changelog outside the store so very old
3681 3681 # Mercurial versions (which didn't know about the requirements
3682 3682 # file) encounter an error on reading the changelog. This
3683 3683 # effectively locks out old clients and prevents them from
3684 3684 # mucking with a repo in an unknown format.
3685 3685 #
3686 3686 # The revlog header has version 2, which won't be recognized by
3687 3687 # such old clients.
3688 3688 hgvfs.append(
3689 3689 b'00changelog.i',
3690 3690 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3691 3691 b'layout',
3692 3692 )
3693 3693
3694 3694 scmutil.writerequires(hgvfs, requirements)
3695 3695
3696 3696 # Write out file telling readers where to find the shared store.
3697 3697 if b'sharedrepo' in createopts:
3698 3698 hgvfs.write(b'sharedpath', sharedpath)
3699 3699
3700 3700 if createopts.get(b'shareditems'):
3701 3701 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3702 3702 hgvfs.write(b'shared', shared)
3703 3703
3704 3704
3705 3705 def poisonrepository(repo):
3706 3706 """Poison a repository instance so it can no longer be used."""
3707 3707 # Perform any cleanup on the instance.
3708 3708 repo.close()
3709 3709
3710 3710 # Our strategy is to replace the type of the object with one that
3711 3711 # has all attribute lookups result in error.
3712 3712 #
3713 3713 # But we have to allow the close() method because some constructors
3714 3714 # of repos call close() on repo references.
3715 3715 class poisonedrepository(object):
3716 3716 def __getattribute__(self, item):
3717 3717 if item == 'close':
3718 3718 return object.__getattribute__(self, item)
3719 3719
3720 3720 raise error.ProgrammingError(
3721 3721 b'repo instances should not be used after unshare'
3722 3722 )
3723 3723
3724 3724 def close(self):
3725 3725 pass
3726 3726
3727 3727 # We may have a repoview, which intercepts __setattr__. So be sure
3728 3728 # we operate at the lowest level possible.
3729 3729 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,59 +1,58 b''
1 1 ===================================
2 2 Test repository filtering avoidance
3 3 ===================================
4 4
5 5 This test file is a bit special as he does not check feature, but performance related internal code path.
6 6
7 7 Right now, filtering a repository comes with a cost that might be significant.
8 8 Until this get better, ther are various operation that try hard not to trigger
9 9 a filtering computation. This test file make sure we don't reintroduce code that trigger the filtering for these operation:
10 10
11 11 Setup
12 12 -----
13 13 $ hg init test-repo
14 14 $ cd test-repo
15 15 $ echo "some line" > z
16 16 $ echo a > a
17 17 $ hg commit -Am a
18 18 adding a
19 19 adding z
20 20 $ echo "in a" >> z
21 21 $ echo b > b
22 22 $ hg commit -Am b
23 23 adding b
24 24 $ echo "file" >> z
25 25 $ echo c > c
26 26 $ hg commit -Am c
27 27 adding c
28 28 $ hg rm a
29 29 $ echo c1 > c
30 30 $ hg add c
31 31 c already tracked!
32 32 $ echo d > d
33 33 $ hg add d
34 34 $ rm b
35 35
36 36 $ cat << EOF >> $HGRCPATH
37 37 > [devel]
38 38 > debug.repo-filters = yes
39 39 > [ui]
40 40 > debug = yes
41 41 > EOF
42 42
43 43
44 44 tests
45 45 -----
46 46
47 47 Getting the node of `null`
48 48
49 49 $ hg init test-repo
50 50 $ cd test-repo
51 51 $ hg log -r null -T "{node}\n"
52 52 0000000000000000000000000000000000000000
53 53
54 54 Getting basic changeset inforation about `null`
55 55
56 56 $ hg log -r null -T "{node}\n{date}\n"
57 debug.filters: computing revision filter for "visible"
58 57 0000000000000000000000000000000000000000
59 58 0.00
General Comments 0
You need to be logged in to leave comments. Login now