##// END OF EJS Templates
repo: remove the last few "pass" statements in localrepo.__getitem__...
Martin von Zweigbergk -
r40099:f84d7ed3 default
parent child Browse files
Show More
@@ -1,3026 +1,3020
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from . import (
27 27 bookmarks,
28 28 branchmap,
29 29 bundle2,
30 30 changegroup,
31 31 changelog,
32 32 color,
33 33 context,
34 34 dirstate,
35 35 dirstateguard,
36 36 discovery,
37 37 encoding,
38 38 error,
39 39 exchange,
40 40 extensions,
41 41 filelog,
42 42 hook,
43 43 lock as lockmod,
44 44 manifest,
45 45 match as matchmod,
46 46 merge as mergemod,
47 47 mergeutil,
48 48 namespaces,
49 49 narrowspec,
50 50 obsolete,
51 51 pathutil,
52 52 phases,
53 53 pushkey,
54 54 pycompat,
55 55 repository,
56 56 repoview,
57 57 revset,
58 58 revsetlang,
59 59 scmutil,
60 60 sparse,
61 61 store as storemod,
62 62 subrepoutil,
63 63 tags as tagsmod,
64 64 transaction,
65 65 txnutil,
66 66 util,
67 67 vfs as vfsmod,
68 68 )
69 69 from .utils import (
70 70 interfaceutil,
71 71 procutil,
72 72 stringutil,
73 73 )
74 74
75 75 from .revlogutils import (
76 76 constants as revlogconst,
77 77 )
78 78
79 79 release = lockmod.release
80 80 urlerr = util.urlerr
81 81 urlreq = util.urlreq
82 82
83 83 # set of (path, vfs-location) tuples. vfs-location is:
84 84 # - 'plain for vfs relative paths
85 85 # - '' for svfs relative paths
86 86 _cachedfiles = set()
87 87
88 88 class _basefilecache(scmutil.filecache):
89 89 """All filecache usage on repo are done for logic that should be unfiltered
90 90 """
91 91 def __get__(self, repo, type=None):
92 92 if repo is None:
93 93 return self
94 94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
95 95 def __set__(self, repo, value):
96 96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
97 97 def __delete__(self, repo):
98 98 return super(_basefilecache, self).__delete__(repo.unfiltered())
99 99
100 100 class repofilecache(_basefilecache):
101 101 """filecache for files in .hg but outside of .hg/store"""
102 102 def __init__(self, *paths):
103 103 super(repofilecache, self).__init__(*paths)
104 104 for path in paths:
105 105 _cachedfiles.add((path, 'plain'))
106 106
107 107 def join(self, obj, fname):
108 108 return obj.vfs.join(fname)
109 109
110 110 class storecache(_basefilecache):
111 111 """filecache for files in the store"""
112 112 def __init__(self, *paths):
113 113 super(storecache, self).__init__(*paths)
114 114 for path in paths:
115 115 _cachedfiles.add((path, ''))
116 116
117 117 def join(self, obj, fname):
118 118 return obj.sjoin(fname)
119 119
120 120 def isfilecached(repo, name):
121 121 """check if a repo has already cached "name" filecache-ed property
122 122
123 123 This returns (cachedobj-or-None, iscached) tuple.
124 124 """
125 125 cacheentry = repo.unfiltered()._filecache.get(name, None)
126 126 if not cacheentry:
127 127 return None, False
128 128 return cacheentry.obj, True
129 129
130 130 class unfilteredpropertycache(util.propertycache):
131 131 """propertycache that apply to unfiltered repo only"""
132 132
133 133 def __get__(self, repo, type=None):
134 134 unfi = repo.unfiltered()
135 135 if unfi is repo:
136 136 return super(unfilteredpropertycache, self).__get__(unfi)
137 137 return getattr(unfi, self.name)
138 138
139 139 class filteredpropertycache(util.propertycache):
140 140 """propertycache that must take filtering in account"""
141 141
142 142 def cachevalue(self, obj, value):
143 143 object.__setattr__(obj, self.name, value)
144 144
145 145
146 146 def hasunfilteredcache(repo, name):
147 147 """check if a repo has an unfilteredpropertycache value for <name>"""
148 148 return name in vars(repo.unfiltered())
149 149
150 150 def unfilteredmethod(orig):
151 151 """decorate method that always need to be run on unfiltered version"""
152 152 def wrapper(repo, *args, **kwargs):
153 153 return orig(repo.unfiltered(), *args, **kwargs)
154 154 return wrapper
155 155
156 156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
157 157 'unbundle'}
158 158 legacycaps = moderncaps.union({'changegroupsubset'})
159 159
160 160 @interfaceutil.implementer(repository.ipeercommandexecutor)
161 161 class localcommandexecutor(object):
162 162 def __init__(self, peer):
163 163 self._peer = peer
164 164 self._sent = False
165 165 self._closed = False
166 166
167 167 def __enter__(self):
168 168 return self
169 169
170 170 def __exit__(self, exctype, excvalue, exctb):
171 171 self.close()
172 172
173 173 def callcommand(self, command, args):
174 174 if self._sent:
175 175 raise error.ProgrammingError('callcommand() cannot be used after '
176 176 'sendcommands()')
177 177
178 178 if self._closed:
179 179 raise error.ProgrammingError('callcommand() cannot be used after '
180 180 'close()')
181 181
182 182 # We don't need to support anything fancy. Just call the named
183 183 # method on the peer and return a resolved future.
184 184 fn = getattr(self._peer, pycompat.sysstr(command))
185 185
186 186 f = pycompat.futures.Future()
187 187
188 188 try:
189 189 result = fn(**pycompat.strkwargs(args))
190 190 except Exception:
191 191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
192 192 else:
193 193 f.set_result(result)
194 194
195 195 return f
196 196
197 197 def sendcommands(self):
198 198 self._sent = True
199 199
200 200 def close(self):
201 201 self._closed = True
202 202
203 203 @interfaceutil.implementer(repository.ipeercommands)
204 204 class localpeer(repository.peer):
205 205 '''peer for a local repo; reflects only the most recent API'''
206 206
207 207 def __init__(self, repo, caps=None):
208 208 super(localpeer, self).__init__()
209 209
210 210 if caps is None:
211 211 caps = moderncaps.copy()
212 212 self._repo = repo.filtered('served')
213 213 self.ui = repo.ui
214 214 self._caps = repo._restrictcapabilities(caps)
215 215
216 216 # Begin of _basepeer interface.
217 217
218 218 def url(self):
219 219 return self._repo.url()
220 220
221 221 def local(self):
222 222 return self._repo
223 223
224 224 def peer(self):
225 225 return self
226 226
227 227 def canpush(self):
228 228 return True
229 229
230 230 def close(self):
231 231 self._repo.close()
232 232
233 233 # End of _basepeer interface.
234 234
235 235 # Begin of _basewirecommands interface.
236 236
237 237 def branchmap(self):
238 238 return self._repo.branchmap()
239 239
240 240 def capabilities(self):
241 241 return self._caps
242 242
243 243 def clonebundles(self):
244 244 return self._repo.tryread('clonebundles.manifest')
245 245
246 246 def debugwireargs(self, one, two, three=None, four=None, five=None):
247 247 """Used to test argument passing over the wire"""
248 248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
249 249 pycompat.bytestr(four),
250 250 pycompat.bytestr(five))
251 251
252 252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
253 253 **kwargs):
254 254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
255 255 common=common, bundlecaps=bundlecaps,
256 256 **kwargs)[1]
257 257 cb = util.chunkbuffer(chunks)
258 258
259 259 if exchange.bundle2requested(bundlecaps):
260 260 # When requesting a bundle2, getbundle returns a stream to make the
261 261 # wire level function happier. We need to build a proper object
262 262 # from it in local peer.
263 263 return bundle2.getunbundler(self.ui, cb)
264 264 else:
265 265 return changegroup.getunbundler('01', cb, None)
266 266
267 267 def heads(self):
268 268 return self._repo.heads()
269 269
270 270 def known(self, nodes):
271 271 return self._repo.known(nodes)
272 272
273 273 def listkeys(self, namespace):
274 274 return self._repo.listkeys(namespace)
275 275
276 276 def lookup(self, key):
277 277 return self._repo.lookup(key)
278 278
279 279 def pushkey(self, namespace, key, old, new):
280 280 return self._repo.pushkey(namespace, key, old, new)
281 281
282 282 def stream_out(self):
283 283 raise error.Abort(_('cannot perform stream clone against local '
284 284 'peer'))
285 285
286 286 def unbundle(self, bundle, heads, url):
287 287 """apply a bundle on a repo
288 288
289 289 This function handles the repo locking itself."""
290 290 try:
291 291 try:
292 292 bundle = exchange.readbundle(self.ui, bundle, None)
293 293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
294 294 if util.safehasattr(ret, 'getchunks'):
295 295 # This is a bundle20 object, turn it into an unbundler.
296 296 # This little dance should be dropped eventually when the
297 297 # API is finally improved.
298 298 stream = util.chunkbuffer(ret.getchunks())
299 299 ret = bundle2.getunbundler(self.ui, stream)
300 300 return ret
301 301 except Exception as exc:
302 302 # If the exception contains output salvaged from a bundle2
303 303 # reply, we need to make sure it is printed before continuing
304 304 # to fail. So we build a bundle2 with such output and consume
305 305 # it directly.
306 306 #
307 307 # This is not very elegant but allows a "simple" solution for
308 308 # issue4594
309 309 output = getattr(exc, '_bundle2salvagedoutput', ())
310 310 if output:
311 311 bundler = bundle2.bundle20(self._repo.ui)
312 312 for out in output:
313 313 bundler.addpart(out)
314 314 stream = util.chunkbuffer(bundler.getchunks())
315 315 b = bundle2.getunbundler(self.ui, stream)
316 316 bundle2.processbundle(self._repo, b)
317 317 raise
318 318 except error.PushRaced as exc:
319 319 raise error.ResponseError(_('push failed:'),
320 320 stringutil.forcebytestr(exc))
321 321
322 322 # End of _basewirecommands interface.
323 323
324 324 # Begin of peer interface.
325 325
326 326 def commandexecutor(self):
327 327 return localcommandexecutor(self)
328 328
329 329 # End of peer interface.
330 330
331 331 @interfaceutil.implementer(repository.ipeerlegacycommands)
332 332 class locallegacypeer(localpeer):
333 333 '''peer extension which implements legacy methods too; used for tests with
334 334 restricted capabilities'''
335 335
336 336 def __init__(self, repo):
337 337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
338 338
339 339 # Begin of baselegacywirecommands interface.
340 340
341 341 def between(self, pairs):
342 342 return self._repo.between(pairs)
343 343
344 344 def branches(self, nodes):
345 345 return self._repo.branches(nodes)
346 346
347 347 def changegroup(self, nodes, source):
348 348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
349 349 missingheads=self._repo.heads())
350 350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
351 351
352 352 def changegroupsubset(self, bases, heads, source):
353 353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
354 354 missingheads=heads)
355 355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356 356
357 357 # End of baselegacywirecommands interface.
358 358
359 359 # Increment the sub-version when the revlog v2 format changes to lock out old
360 360 # clients.
361 361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
362 362
363 363 # A repository with the sparserevlog feature will have delta chains that
364 364 # can spread over a larger span. Sparse reading cuts these large spans into
365 365 # pieces, so that each piece isn't too big.
366 366 # Without the sparserevlog capability, reading from the repository could use
367 367 # huge amounts of memory, because the whole span would be read at once,
368 368 # including all the intermediate revisions that aren't pertinent for the chain.
369 369 # This is why once a repository has enabled sparse-read, it becomes required.
370 370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
371 371
372 372 # Functions receiving (ui, features) that extensions can register to impact
373 373 # the ability to load repositories with custom requirements. Only
374 374 # functions defined in loaded extensions are called.
375 375 #
376 376 # The function receives a set of requirement strings that the repository
377 377 # is capable of opening. Functions will typically add elements to the
378 378 # set to reflect that the extension knows how to handle that requirements.
379 379 featuresetupfuncs = set()
380 380
381 381 def makelocalrepository(baseui, path, intents=None):
382 382 """Create a local repository object.
383 383
384 384 Given arguments needed to construct a local repository, this function
385 385 performs various early repository loading functionality (such as
386 386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
387 387 the repository can be opened, derives a type suitable for representing
388 388 that repository, and returns an instance of it.
389 389
390 390 The returned object conforms to the ``repository.completelocalrepository``
391 391 interface.
392 392
393 393 The repository type is derived by calling a series of factory functions
394 394 for each aspect/interface of the final repository. These are defined by
395 395 ``REPO_INTERFACES``.
396 396
397 397 Each factory function is called to produce a type implementing a specific
398 398 interface. The cumulative list of returned types will be combined into a
399 399 new type and that type will be instantiated to represent the local
400 400 repository.
401 401
402 402 The factory functions each receive various state that may be consulted
403 403 as part of deriving a type.
404 404
405 405 Extensions should wrap these factory functions to customize repository type
406 406 creation. Note that an extension's wrapped function may be called even if
407 407 that extension is not loaded for the repo being constructed. Extensions
408 408 should check if their ``__name__`` appears in the
409 409 ``extensionmodulenames`` set passed to the factory function and no-op if
410 410 not.
411 411 """
412 412 ui = baseui.copy()
413 413 # Prevent copying repo configuration.
414 414 ui.copy = baseui.copy
415 415
416 416 # Working directory VFS rooted at repository root.
417 417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
418 418
419 419 # Main VFS for .hg/ directory.
420 420 hgpath = wdirvfs.join(b'.hg')
421 421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
422 422
423 423 # The .hg/ path should exist and should be a directory. All other
424 424 # cases are errors.
425 425 if not hgvfs.isdir():
426 426 try:
427 427 hgvfs.stat()
428 428 except OSError as e:
429 429 if e.errno != errno.ENOENT:
430 430 raise
431 431
432 432 raise error.RepoError(_(b'repository %s not found') % path)
433 433
434 434 # .hg/requires file contains a newline-delimited list of
435 435 # features/capabilities the opener (us) must have in order to use
436 436 # the repository. This file was introduced in Mercurial 0.9.2,
437 437 # which means very old repositories may not have one. We assume
438 438 # a missing file translates to no requirements.
439 439 try:
440 440 requirements = set(hgvfs.read(b'requires').splitlines())
441 441 except IOError as e:
442 442 if e.errno != errno.ENOENT:
443 443 raise
444 444 requirements = set()
445 445
446 446 # The .hg/hgrc file may load extensions or contain config options
447 447 # that influence repository construction. Attempt to load it and
448 448 # process any new extensions that it may have pulled in.
449 449 try:
450 450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
451 451 # Run this before extensions.loadall() so extensions can be
452 452 # automatically enabled.
453 453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
454 454 except IOError:
455 455 pass
456 456 else:
457 457 extensions.loadall(ui)
458 458
459 459 # Set of module names of extensions loaded for this repository.
460 460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461 461
462 462 supportedrequirements = gathersupportedrequirements(ui)
463 463
464 464 # We first validate the requirements are known.
465 465 ensurerequirementsrecognized(requirements, supportedrequirements)
466 466
467 467 # Then we validate that the known set is reasonable to use together.
468 468 ensurerequirementscompatible(ui, requirements)
469 469
470 470 # TODO there are unhandled edge cases related to opening repositories with
471 471 # shared storage. If storage is shared, we should also test for requirements
472 472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 473 # that repo, as that repo may load extensions needed to open it. This is a
474 474 # bit complicated because we don't want the other hgrc to overwrite settings
475 475 # in this hgrc.
476 476 #
477 477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 478 # file when sharing repos. But if a requirement is added after the share is
479 479 # performed, thereby introducing a new requirement for the opener, we may
480 480 # will not see that and could encounter a run-time error interacting with
481 481 # that shared store since it has an unknown-to-us requirement.
482 482
483 483 # At this point, we know we should be capable of opening the repository.
484 484 # Now get on with doing that.
485 485
486 486 features = set()
487 487
488 488 # The "store" part of the repository holds versioned data. How it is
489 489 # accessed is determined by various requirements. The ``shared`` or
490 490 # ``relshared`` requirements indicate the store lives in the path contained
491 491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 493 if b'shared' in requirements or b'relshared' in requirements:
494 494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 495 if b'relshared' in requirements:
496 496 sharedpath = hgvfs.join(sharedpath)
497 497
498 498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499 499
500 500 if not sharedvfs.exists():
501 501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 502 b'directory %s') % sharedvfs.base)
503 503
504 504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505 505
506 506 storebasepath = sharedvfs.base
507 507 cachepath = sharedvfs.join(b'cache')
508 508 else:
509 509 storebasepath = hgvfs.base
510 510 cachepath = hgvfs.join(b'cache')
511 511
512 512 # The store has changed over time and the exact layout is dictated by
513 513 # requirements. The store interface abstracts differences across all
514 514 # of them.
515 515 store = makestore(requirements, storebasepath,
516 516 lambda base: vfsmod.vfs(base, cacheaudited=True))
517 517 hgvfs.createmode = store.createmode
518 518
519 519 storevfs = store.vfs
520 520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
521 521
522 522 # The cache vfs is used to manage cache files.
523 523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
524 524 cachevfs.createmode = store.createmode
525 525
526 526 # Now resolve the type for the repository object. We do this by repeatedly
527 527 # calling a factory function to produces types for specific aspects of the
528 528 # repo's operation. The aggregate returned types are used as base classes
529 529 # for a dynamically-derived type, which will represent our new repository.
530 530
531 531 bases = []
532 532 extrastate = {}
533 533
534 534 for iface, fn in REPO_INTERFACES:
535 535 # We pass all potentially useful state to give extensions tons of
536 536 # flexibility.
537 537 typ = fn()(ui=ui,
538 538 intents=intents,
539 539 requirements=requirements,
540 540 features=features,
541 541 wdirvfs=wdirvfs,
542 542 hgvfs=hgvfs,
543 543 store=store,
544 544 storevfs=storevfs,
545 545 storeoptions=storevfs.options,
546 546 cachevfs=cachevfs,
547 547 extensionmodulenames=extensionmodulenames,
548 548 extrastate=extrastate,
549 549 baseclasses=bases)
550 550
551 551 if not isinstance(typ, type):
552 552 raise error.ProgrammingError('unable to construct type for %s' %
553 553 iface)
554 554
555 555 bases.append(typ)
556 556
557 557 # type() allows you to use characters in type names that wouldn't be
558 558 # recognized as Python symbols in source code. We abuse that to add
559 559 # rich information about our constructed repo.
560 560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
561 561 wdirvfs.base,
562 562 b','.join(sorted(requirements))))
563 563
564 564 cls = type(name, tuple(bases), {})
565 565
566 566 return cls(
567 567 baseui=baseui,
568 568 ui=ui,
569 569 origroot=path,
570 570 wdirvfs=wdirvfs,
571 571 hgvfs=hgvfs,
572 572 requirements=requirements,
573 573 supportedrequirements=supportedrequirements,
574 574 sharedpath=storebasepath,
575 575 store=store,
576 576 cachevfs=cachevfs,
577 577 features=features,
578 578 intents=intents)
579 579
580 580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
581 581 """Perform additional actions after .hg/hgrc is loaded.
582 582
583 583 This function is called during repository loading immediately after
584 584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
585 585
586 586 The function can be used to validate configs, automatically add
587 587 options (including extensions) based on requirements, etc.
588 588 """
589 589
590 590 # Map of requirements to list of extensions to load automatically when
591 591 # requirement is present.
592 592 autoextensions = {
593 593 b'largefiles': [b'largefiles'],
594 594 b'lfs': [b'lfs'],
595 595 }
596 596
597 597 for requirement, names in sorted(autoextensions.items()):
598 598 if requirement not in requirements:
599 599 continue
600 600
601 601 for name in names:
602 602 if not ui.hasconfig(b'extensions', name):
603 603 ui.setconfig(b'extensions', name, b'', source='autoload')
604 604
605 605 def gathersupportedrequirements(ui):
606 606 """Determine the complete set of recognized requirements."""
607 607 # Start with all requirements supported by this file.
608 608 supported = set(localrepository._basesupported)
609 609
610 610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
611 611 # relevant to this ui instance.
612 612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
613 613
614 614 for fn in featuresetupfuncs:
615 615 if fn.__module__ in modules:
616 616 fn(ui, supported)
617 617
618 618 # Add derived requirements from registered compression engines.
619 619 for name in util.compengines:
620 620 engine = util.compengines[name]
621 621 if engine.revlogheader():
622 622 supported.add(b'exp-compression-%s' % name)
623 623
624 624 return supported
625 625
626 626 def ensurerequirementsrecognized(requirements, supported):
627 627 """Validate that a set of local requirements is recognized.
628 628
629 629 Receives a set of requirements. Raises an ``error.RepoError`` if there
630 630 exists any requirement in that set that currently loaded code doesn't
631 631 recognize.
632 632
633 633 Returns a set of supported requirements.
634 634 """
635 635 missing = set()
636 636
637 637 for requirement in requirements:
638 638 if requirement in supported:
639 639 continue
640 640
641 641 if not requirement or not requirement[0:1].isalnum():
642 642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
643 643
644 644 missing.add(requirement)
645 645
646 646 if missing:
647 647 raise error.RequirementError(
648 648 _(b'repository requires features unknown to this Mercurial: %s') %
649 649 b' '.join(sorted(missing)),
650 650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
651 651 b'for more information'))
652 652
653 653 def ensurerequirementscompatible(ui, requirements):
654 654 """Validates that a set of recognized requirements is mutually compatible.
655 655
656 656 Some requirements may not be compatible with others or require
657 657 config options that aren't enabled. This function is called during
658 658 repository opening to ensure that the set of requirements needed
659 659 to open a repository is sane and compatible with config options.
660 660
661 661 Extensions can monkeypatch this function to perform additional
662 662 checking.
663 663
664 664 ``error.RepoError`` should be raised on failure.
665 665 """
666 666 if b'exp-sparse' in requirements and not sparse.enabled:
667 667 raise error.RepoError(_(b'repository is using sparse feature but '
668 668 b'sparse is not enabled; enable the '
669 669 b'"sparse" extensions to access'))
670 670
671 671 def makestore(requirements, path, vfstype):
672 672 """Construct a storage object for a repository."""
673 673 if b'store' in requirements:
674 674 if b'fncache' in requirements:
675 675 return storemod.fncachestore(path, vfstype,
676 676 b'dotencode' in requirements)
677 677
678 678 return storemod.encodedstore(path, vfstype)
679 679
680 680 return storemod.basicstore(path, vfstype)
681 681
682 682 def resolvestorevfsoptions(ui, requirements, features):
683 683 """Resolve the options to pass to the store vfs opener.
684 684
685 685 The returned dict is used to influence behavior of the storage layer.
686 686 """
687 687 options = {}
688 688
689 689 if b'treemanifest' in requirements:
690 690 options[b'treemanifest'] = True
691 691
692 692 # experimental config: format.manifestcachesize
693 693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
694 694 if manifestcachesize is not None:
695 695 options[b'manifestcachesize'] = manifestcachesize
696 696
697 697 # In the absence of another requirement superseding a revlog-related
698 698 # requirement, we have to assume the repo is using revlog version 0.
699 699 # This revlog format is super old and we don't bother trying to parse
700 700 # opener options for it because those options wouldn't do anything
701 701 # meaningful on such old repos.
702 702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
703 703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
704 704
705 705 return options
706 706
707 707 def resolverevlogstorevfsoptions(ui, requirements, features):
708 708 """Resolve opener options specific to revlogs."""
709 709
710 710 options = {}
711 711
712 712 if b'revlogv1' in requirements:
713 713 options[b'revlogv1'] = True
714 714 if REVLOGV2_REQUIREMENT in requirements:
715 715 options[b'revlogv2'] = True
716 716
717 717 if b'generaldelta' in requirements:
718 718 options[b'generaldelta'] = True
719 719
720 720 # experimental config: format.chunkcachesize
721 721 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
722 722 if chunkcachesize is not None:
723 723 options[b'chunkcachesize'] = chunkcachesize
724 724
725 725 deltabothparents = ui.configbool(b'storage',
726 726 b'revlog.optimize-delta-parent-choice')
727 727 options[b'deltabothparents'] = deltabothparents
728 728
729 729 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
730 730
731 731 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
732 732 if 0 <= chainspan:
733 733 options[b'maxdeltachainspan'] = chainspan
734 734
735 735 mmapindexthreshold = ui.configbytes(b'experimental',
736 736 b'mmapindexthreshold')
737 737 if mmapindexthreshold is not None:
738 738 options[b'mmapindexthreshold'] = mmapindexthreshold
739 739
740 740 withsparseread = ui.configbool(b'experimental', b'sparse-read')
741 741 srdensitythres = float(ui.config(b'experimental',
742 742 b'sparse-read.density-threshold'))
743 743 srmingapsize = ui.configbytes(b'experimental',
744 744 b'sparse-read.min-gap-size')
745 745 options[b'with-sparse-read'] = withsparseread
746 746 options[b'sparse-read-density-threshold'] = srdensitythres
747 747 options[b'sparse-read-min-gap-size'] = srmingapsize
748 748
749 749 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
750 750 options[b'sparse-revlog'] = sparserevlog
751 751 if sparserevlog:
752 752 options[b'generaldelta'] = True
753 753
754 754 maxchainlen = None
755 755 if sparserevlog:
756 756 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
757 757 # experimental config: format.maxchainlen
758 758 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
759 759 if maxchainlen is not None:
760 760 options[b'maxchainlen'] = maxchainlen
761 761
762 762 for r in requirements:
763 763 if r.startswith(b'exp-compression-'):
764 764 options[b'compengine'] = r[len(b'exp-compression-'):]
765 765
766 766 if repository.NARROW_REQUIREMENT in requirements:
767 767 options[b'enableellipsis'] = True
768 768
769 769 return options
770 770
771 771 def makemain(**kwargs):
772 772 """Produce a type conforming to ``ilocalrepositorymain``."""
773 773 return localrepository
774 774
775 775 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
776 776 class revlogfilestorage(object):
777 777 """File storage when using revlogs."""
778 778
779 779 def file(self, path):
780 780 if path[0] == b'/':
781 781 path = path[1:]
782 782
783 783 return filelog.filelog(self.svfs, path)
784 784
785 785 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
786 786 class revlognarrowfilestorage(object):
787 787 """File storage when using revlogs and narrow files."""
788 788
789 789 def file(self, path):
790 790 if path[0] == b'/':
791 791 path = path[1:]
792 792
793 793 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
794 794
795 795 def makefilestorage(requirements, features, **kwargs):
796 796 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
797 797 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
798 798 features.add(repository.REPO_FEATURE_STREAM_CLONE)
799 799
800 800 if repository.NARROW_REQUIREMENT in requirements:
801 801 return revlognarrowfilestorage
802 802 else:
803 803 return revlogfilestorage
804 804
805 805 # List of repository interfaces and factory functions for them. Each
806 806 # will be called in order during ``makelocalrepository()`` to iteratively
807 807 # derive the final type for a local repository instance. We capture the
808 808 # function as a lambda so we don't hold a reference and the module-level
809 809 # functions can be wrapped.
810 810 REPO_INTERFACES = [
811 811 (repository.ilocalrepositorymain, lambda: makemain),
812 812 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
813 813 ]
814 814
815 815 @interfaceutil.implementer(repository.ilocalrepositorymain)
816 816 class localrepository(object):
817 817 """Main class for representing local repositories.
818 818
819 819 All local repositories are instances of this class.
820 820
821 821 Constructed on its own, instances of this class are not usable as
822 822 repository objects. To obtain a usable repository object, call
823 823 ``hg.repository()``, ``localrepo.instance()``, or
824 824 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
825 825 ``instance()`` adds support for creating new repositories.
826 826 ``hg.repository()`` adds more extension integration, including calling
827 827 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
828 828 used.
829 829 """
830 830
831 831 # obsolete experimental requirements:
832 832 # - manifestv2: An experimental new manifest format that allowed
833 833 # for stem compression of long paths. Experiment ended up not
834 834 # being successful (repository sizes went up due to worse delta
835 835 # chains), and the code was deleted in 4.6.
836 836 supportedformats = {
837 837 'revlogv1',
838 838 'generaldelta',
839 839 'treemanifest',
840 840 REVLOGV2_REQUIREMENT,
841 841 SPARSEREVLOG_REQUIREMENT,
842 842 }
843 843 _basesupported = supportedformats | {
844 844 'store',
845 845 'fncache',
846 846 'shared',
847 847 'relshared',
848 848 'dotencode',
849 849 'exp-sparse',
850 850 'internal-phase'
851 851 }
852 852
853 853 # list of prefix for file which can be written without 'wlock'
854 854 # Extensions should extend this list when needed
855 855 _wlockfreeprefix = {
856 856 # We migh consider requiring 'wlock' for the next
857 857 # two, but pretty much all the existing code assume
858 858 # wlock is not needed so we keep them excluded for
859 859 # now.
860 860 'hgrc',
861 861 'requires',
862 862 # XXX cache is a complicatged business someone
863 863 # should investigate this in depth at some point
864 864 'cache/',
865 865 # XXX shouldn't be dirstate covered by the wlock?
866 866 'dirstate',
867 867 # XXX bisect was still a bit too messy at the time
868 868 # this changeset was introduced. Someone should fix
869 869 # the remainig bit and drop this line
870 870 'bisect.state',
871 871 }
872 872
873 873 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
874 874 supportedrequirements, sharedpath, store, cachevfs,
875 875 features, intents=None):
876 876 """Create a new local repository instance.
877 877
878 878 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
879 879 or ``localrepo.makelocalrepository()`` for obtaining a new repository
880 880 object.
881 881
882 882 Arguments:
883 883
884 884 baseui
885 885 ``ui.ui`` instance that ``ui`` argument was based off of.
886 886
887 887 ui
888 888 ``ui.ui`` instance for use by the repository.
889 889
890 890 origroot
891 891 ``bytes`` path to working directory root of this repository.
892 892
893 893 wdirvfs
894 894 ``vfs.vfs`` rooted at the working directory.
895 895
896 896 hgvfs
897 897 ``vfs.vfs`` rooted at .hg/
898 898
899 899 requirements
900 900 ``set`` of bytestrings representing repository opening requirements.
901 901
902 902 supportedrequirements
903 903 ``set`` of bytestrings representing repository requirements that we
904 904 know how to open. May be a supetset of ``requirements``.
905 905
906 906 sharedpath
907 907 ``bytes`` Defining path to storage base directory. Points to a
908 908 ``.hg/`` directory somewhere.
909 909
910 910 store
911 911 ``store.basicstore`` (or derived) instance providing access to
912 912 versioned storage.
913 913
914 914 cachevfs
915 915 ``vfs.vfs`` used for cache files.
916 916
917 917 features
918 918 ``set`` of bytestrings defining features/capabilities of this
919 919 instance.
920 920
921 921 intents
922 922 ``set`` of system strings indicating what this repo will be used
923 923 for.
924 924 """
925 925 self.baseui = baseui
926 926 self.ui = ui
927 927 self.origroot = origroot
928 928 # vfs rooted at working directory.
929 929 self.wvfs = wdirvfs
930 930 self.root = wdirvfs.base
931 931 # vfs rooted at .hg/. Used to access most non-store paths.
932 932 self.vfs = hgvfs
933 933 self.path = hgvfs.base
934 934 self.requirements = requirements
935 935 self.supported = supportedrequirements
936 936 self.sharedpath = sharedpath
937 937 self.store = store
938 938 self.cachevfs = cachevfs
939 939 self.features = features
940 940
941 941 self.filtername = None
942 942
943 943 if (self.ui.configbool('devel', 'all-warnings') or
944 944 self.ui.configbool('devel', 'check-locks')):
945 945 self.vfs.audit = self._getvfsward(self.vfs.audit)
946 946 # A list of callback to shape the phase if no data were found.
947 947 # Callback are in the form: func(repo, roots) --> processed root.
948 948 # This list it to be filled by extension during repo setup
949 949 self._phasedefaults = []
950 950
951 951 color.setup(self.ui)
952 952
953 953 self.spath = self.store.path
954 954 self.svfs = self.store.vfs
955 955 self.sjoin = self.store.join
956 956 if (self.ui.configbool('devel', 'all-warnings') or
957 957 self.ui.configbool('devel', 'check-locks')):
958 958 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
959 959 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
960 960 else: # standard vfs
961 961 self.svfs.audit = self._getsvfsward(self.svfs.audit)
962 962
963 963 self._dirstatevalidatewarned = False
964 964
965 965 self._branchcaches = {}
966 966 self._revbranchcache = None
967 967 self._filterpats = {}
968 968 self._datafilters = {}
969 969 self._transref = self._lockref = self._wlockref = None
970 970
971 971 # A cache for various files under .hg/ that tracks file changes,
972 972 # (used by the filecache decorator)
973 973 #
974 974 # Maps a property name to its util.filecacheentry
975 975 self._filecache = {}
976 976
977 977 # hold sets of revision to be filtered
978 978 # should be cleared when something might have changed the filter value:
979 979 # - new changesets,
980 980 # - phase change,
981 981 # - new obsolescence marker,
982 982 # - working directory parent change,
983 983 # - bookmark changes
984 984 self.filteredrevcache = {}
985 985
986 986 # post-dirstate-status hooks
987 987 self._postdsstatus = []
988 988
989 989 # generic mapping between names and nodes
990 990 self.names = namespaces.namespaces()
991 991
992 992 # Key to signature value.
993 993 self._sparsesignaturecache = {}
994 994 # Signature to cached matcher instance.
995 995 self._sparsematchercache = {}
996 996
997 997 def _getvfsward(self, origfunc):
998 998 """build a ward for self.vfs"""
999 999 rref = weakref.ref(self)
1000 1000 def checkvfs(path, mode=None):
1001 1001 ret = origfunc(path, mode=mode)
1002 1002 repo = rref()
1003 1003 if (repo is None
1004 1004 or not util.safehasattr(repo, '_wlockref')
1005 1005 or not util.safehasattr(repo, '_lockref')):
1006 1006 return
1007 1007 if mode in (None, 'r', 'rb'):
1008 1008 return
1009 1009 if path.startswith(repo.path):
1010 1010 # truncate name relative to the repository (.hg)
1011 1011 path = path[len(repo.path) + 1:]
1012 1012 if path.startswith('cache/'):
1013 1013 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1014 1014 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1015 1015 if path.startswith('journal.'):
1016 1016 # journal is covered by 'lock'
1017 1017 if repo._currentlock(repo._lockref) is None:
1018 1018 repo.ui.develwarn('write with no lock: "%s"' % path,
1019 1019 stacklevel=2, config='check-locks')
1020 1020 elif repo._currentlock(repo._wlockref) is None:
1021 1021 # rest of vfs files are covered by 'wlock'
1022 1022 #
1023 1023 # exclude special files
1024 1024 for prefix in self._wlockfreeprefix:
1025 1025 if path.startswith(prefix):
1026 1026 return
1027 1027 repo.ui.develwarn('write with no wlock: "%s"' % path,
1028 1028 stacklevel=2, config='check-locks')
1029 1029 return ret
1030 1030 return checkvfs
1031 1031
1032 1032 def _getsvfsward(self, origfunc):
1033 1033 """build a ward for self.svfs"""
1034 1034 rref = weakref.ref(self)
1035 1035 def checksvfs(path, mode=None):
1036 1036 ret = origfunc(path, mode=mode)
1037 1037 repo = rref()
1038 1038 if repo is None or not util.safehasattr(repo, '_lockref'):
1039 1039 return
1040 1040 if mode in (None, 'r', 'rb'):
1041 1041 return
1042 1042 if path.startswith(repo.sharedpath):
1043 1043 # truncate name relative to the repository (.hg)
1044 1044 path = path[len(repo.sharedpath) + 1:]
1045 1045 if repo._currentlock(repo._lockref) is None:
1046 1046 repo.ui.develwarn('write with no lock: "%s"' % path,
1047 1047 stacklevel=3)
1048 1048 return ret
1049 1049 return checksvfs
1050 1050
1051 1051 def close(self):
1052 1052 self._writecaches()
1053 1053
1054 1054 def _writecaches(self):
1055 1055 if self._revbranchcache:
1056 1056 self._revbranchcache.write()
1057 1057
1058 1058 def _restrictcapabilities(self, caps):
1059 1059 if self.ui.configbool('experimental', 'bundle2-advertise'):
1060 1060 caps = set(caps)
1061 1061 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1062 1062 role='client'))
1063 1063 caps.add('bundle2=' + urlreq.quote(capsblob))
1064 1064 return caps
1065 1065
1066 1066 def _writerequirements(self):
1067 1067 scmutil.writerequires(self.vfs, self.requirements)
1068 1068
1069 1069 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1070 1070 # self -> auditor -> self._checknested -> self
1071 1071
1072 1072 @property
1073 1073 def auditor(self):
1074 1074 # This is only used by context.workingctx.match in order to
1075 1075 # detect files in subrepos.
1076 1076 return pathutil.pathauditor(self.root, callback=self._checknested)
1077 1077
1078 1078 @property
1079 1079 def nofsauditor(self):
1080 1080 # This is only used by context.basectx.match in order to detect
1081 1081 # files in subrepos.
1082 1082 return pathutil.pathauditor(self.root, callback=self._checknested,
1083 1083 realfs=False, cached=True)
1084 1084
1085 1085 def _checknested(self, path):
1086 1086 """Determine if path is a legal nested repository."""
1087 1087 if not path.startswith(self.root):
1088 1088 return False
1089 1089 subpath = path[len(self.root) + 1:]
1090 1090 normsubpath = util.pconvert(subpath)
1091 1091
1092 1092 # XXX: Checking against the current working copy is wrong in
1093 1093 # the sense that it can reject things like
1094 1094 #
1095 1095 # $ hg cat -r 10 sub/x.txt
1096 1096 #
1097 1097 # if sub/ is no longer a subrepository in the working copy
1098 1098 # parent revision.
1099 1099 #
1100 1100 # However, it can of course also allow things that would have
1101 1101 # been rejected before, such as the above cat command if sub/
1102 1102 # is a subrepository now, but was a normal directory before.
1103 1103 # The old path auditor would have rejected by mistake since it
1104 1104 # panics when it sees sub/.hg/.
1105 1105 #
1106 1106 # All in all, checking against the working copy seems sensible
1107 1107 # since we want to prevent access to nested repositories on
1108 1108 # the filesystem *now*.
1109 1109 ctx = self[None]
1110 1110 parts = util.splitpath(subpath)
1111 1111 while parts:
1112 1112 prefix = '/'.join(parts)
1113 1113 if prefix in ctx.substate:
1114 1114 if prefix == normsubpath:
1115 1115 return True
1116 1116 else:
1117 1117 sub = ctx.sub(prefix)
1118 1118 return sub.checknested(subpath[len(prefix) + 1:])
1119 1119 else:
1120 1120 parts.pop()
1121 1121 return False
1122 1122
1123 1123 def peer(self):
1124 1124 return localpeer(self) # not cached to avoid reference cycle
1125 1125
1126 1126 def unfiltered(self):
1127 1127 """Return unfiltered version of the repository
1128 1128
1129 1129 Intended to be overwritten by filtered repo."""
1130 1130 return self
1131 1131
1132 1132 def filtered(self, name, visibilityexceptions=None):
1133 1133 """Return a filtered version of a repository"""
1134 1134 cls = repoview.newtype(self.unfiltered().__class__)
1135 1135 return cls(self, name, visibilityexceptions)
1136 1136
1137 1137 @repofilecache('bookmarks', 'bookmarks.current')
1138 1138 def _bookmarks(self):
1139 1139 return bookmarks.bmstore(self)
1140 1140
1141 1141 @property
1142 1142 def _activebookmark(self):
1143 1143 return self._bookmarks.active
1144 1144
1145 1145 # _phasesets depend on changelog. what we need is to call
1146 1146 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1147 1147 # can't be easily expressed in filecache mechanism.
1148 1148 @storecache('phaseroots', '00changelog.i')
1149 1149 def _phasecache(self):
1150 1150 return phases.phasecache(self, self._phasedefaults)
1151 1151
1152 1152 @storecache('obsstore')
1153 1153 def obsstore(self):
1154 1154 return obsolete.makestore(self.ui, self)
1155 1155
1156 1156 @storecache('00changelog.i')
1157 1157 def changelog(self):
1158 1158 return changelog.changelog(self.svfs,
1159 1159 trypending=txnutil.mayhavepending(self.root))
1160 1160
1161 1161 @storecache('00manifest.i')
1162 1162 def manifestlog(self):
1163 1163 rootstore = manifest.manifestrevlog(self.svfs)
1164 1164 return manifest.manifestlog(self.svfs, self, rootstore)
1165 1165
1166 1166 @repofilecache('dirstate')
1167 1167 def dirstate(self):
1168 1168 return self._makedirstate()
1169 1169
1170 1170 def _makedirstate(self):
1171 1171 """Extension point for wrapping the dirstate per-repo."""
1172 1172 sparsematchfn = lambda: sparse.matcher(self)
1173 1173
1174 1174 return dirstate.dirstate(self.vfs, self.ui, self.root,
1175 1175 self._dirstatevalidate, sparsematchfn)
1176 1176
1177 1177 def _dirstatevalidate(self, node):
1178 1178 try:
1179 1179 self.changelog.rev(node)
1180 1180 return node
1181 1181 except error.LookupError:
1182 1182 if not self._dirstatevalidatewarned:
1183 1183 self._dirstatevalidatewarned = True
1184 1184 self.ui.warn(_("warning: ignoring unknown"
1185 1185 " working parent %s!\n") % short(node))
1186 1186 return nullid
1187 1187
1188 1188 @storecache(narrowspec.FILENAME)
1189 1189 def narrowpats(self):
1190 1190 """matcher patterns for this repository's narrowspec
1191 1191
1192 1192 A tuple of (includes, excludes).
1193 1193 """
1194 1194 return narrowspec.load(self)
1195 1195
1196 1196 @storecache(narrowspec.FILENAME)
1197 1197 def _narrowmatch(self):
1198 1198 if repository.NARROW_REQUIREMENT not in self.requirements:
1199 1199 return matchmod.always(self.root, '')
1200 1200 include, exclude = self.narrowpats
1201 1201 return narrowspec.match(self.root, include=include, exclude=exclude)
1202 1202
1203 1203 # TODO(martinvonz): make this property-like instead?
1204 1204 def narrowmatch(self):
1205 1205 return self._narrowmatch
1206 1206
1207 1207 def setnarrowpats(self, newincludes, newexcludes):
1208 1208 narrowspec.save(self, newincludes, newexcludes)
1209 1209 self.invalidate(clearfilecache=True)
1210 1210
1211 1211 def __getitem__(self, changeid):
1212 1212 if changeid is None:
1213 1213 return context.workingctx(self)
1214 1214 if isinstance(changeid, context.basectx):
1215 1215 return changeid
1216 1216 if isinstance(changeid, slice):
1217 1217 # wdirrev isn't contiguous so the slice shouldn't include it
1218 1218 return [self[i]
1219 1219 for i in pycompat.xrange(*changeid.indices(len(self)))
1220 1220 if i not in self.changelog.filteredrevs]
1221 1221 try:
1222 1222 if isinstance(changeid, int):
1223 1223 node = self.changelog.node(changeid)
1224 1224 rev = changeid
1225 1225 return context.changectx(self, rev, node)
1226 1226 elif changeid == 'null':
1227 1227 node = nullid
1228 1228 rev = nullrev
1229 1229 return context.changectx(self, rev, node)
1230 1230 elif changeid == 'tip':
1231 1231 node = self.changelog.tip()
1232 1232 rev = self.changelog.rev(node)
1233 1233 return context.changectx(self, rev, node)
1234 1234 elif changeid == '.':
1235 1235 # this is a hack to delay/avoid loading obsmarkers
1236 1236 # when we know that '.' won't be hidden
1237 1237 node = self.dirstate.p1()
1238 1238 rev = self.unfiltered().changelog.rev(node)
1239 1239 return context.changectx(self, rev, node)
1240 1240 elif len(changeid) == 20:
1241 1241 try:
1242 1242 node = changeid
1243 1243 rev = self.changelog.rev(changeid)
1244 1244 return context.changectx(self, rev, node)
1245 1245 except error.FilteredLookupError:
1246 1246 changeid = hex(changeid) # for the error message
1247 1247 raise
1248 1248 except LookupError:
1249 1249 # check if it might have come from damaged dirstate
1250 1250 #
1251 1251 # XXX we could avoid the unfiltered if we had a recognizable
1252 1252 # exception for filtered changeset access
1253 1253 if (self.local()
1254 1254 and changeid in self.unfiltered().dirstate.parents()):
1255 1255 msg = _("working directory has unknown parent '%s'!")
1256 1256 raise error.Abort(msg % short(changeid))
1257 1257 changeid = hex(changeid) # for the error message
1258 raise
1258 1259
1259 1260 elif len(changeid) == 40:
1260 try:
1261 node = bin(changeid)
1262 rev = self.changelog.rev(node)
1263 return context.changectx(self, rev, node)
1264 except error.FilteredLookupError:
1265 raise
1266 except LookupError:
1267 pass
1261 node = bin(changeid)
1262 rev = self.changelog.rev(node)
1263 return context.changectx(self, rev, node)
1268 1264 else:
1269 1265 raise error.ProgrammingError(
1270 1266 "unsupported changeid '%s' of type %s" %
1271 1267 (changeid, type(changeid)))
1272 1268
1273 1269 except (error.FilteredIndexError, error.FilteredLookupError):
1274 1270 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1275 1271 % pycompat.bytestr(changeid))
1276 except IndexError:
1277 pass
1272 except (IndexError, LookupError):
1273 raise error.RepoLookupError(_("unknown revision '%s'") % changeid)
1278 1274 except error.WdirUnsupported:
1279 1275 return context.workingctx(self)
1280 raise error.RepoLookupError(
1281 _("unknown revision '%s'") % changeid)
1282 1276
1283 1277 def __contains__(self, changeid):
1284 1278 """True if the given changeid exists
1285 1279
1286 1280 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1287 1281 specified.
1288 1282 """
1289 1283 try:
1290 1284 self[changeid]
1291 1285 return True
1292 1286 except error.RepoLookupError:
1293 1287 return False
1294 1288
1295 1289 def __nonzero__(self):
1296 1290 return True
1297 1291
1298 1292 __bool__ = __nonzero__
1299 1293
1300 1294 def __len__(self):
1301 1295 # no need to pay the cost of repoview.changelog
1302 1296 unfi = self.unfiltered()
1303 1297 return len(unfi.changelog)
1304 1298
1305 1299 def __iter__(self):
1306 1300 return iter(self.changelog)
1307 1301
1308 1302 def revs(self, expr, *args):
1309 1303 '''Find revisions matching a revset.
1310 1304
1311 1305 The revset is specified as a string ``expr`` that may contain
1312 1306 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1313 1307
1314 1308 Revset aliases from the configuration are not expanded. To expand
1315 1309 user aliases, consider calling ``scmutil.revrange()`` or
1316 1310 ``repo.anyrevs([expr], user=True)``.
1317 1311
1318 1312 Returns a revset.abstractsmartset, which is a list-like interface
1319 1313 that contains integer revisions.
1320 1314 '''
1321 1315 expr = revsetlang.formatspec(expr, *args)
1322 1316 m = revset.match(None, expr)
1323 1317 return m(self)
1324 1318
1325 1319 def set(self, expr, *args):
1326 1320 '''Find revisions matching a revset and emit changectx instances.
1327 1321
1328 1322 This is a convenience wrapper around ``revs()`` that iterates the
1329 1323 result and is a generator of changectx instances.
1330 1324
1331 1325 Revset aliases from the configuration are not expanded. To expand
1332 1326 user aliases, consider calling ``scmutil.revrange()``.
1333 1327 '''
1334 1328 for r in self.revs(expr, *args):
1335 1329 yield self[r]
1336 1330
1337 1331 def anyrevs(self, specs, user=False, localalias=None):
1338 1332 '''Find revisions matching one of the given revsets.
1339 1333
1340 1334 Revset aliases from the configuration are not expanded by default. To
1341 1335 expand user aliases, specify ``user=True``. To provide some local
1342 1336 definitions overriding user aliases, set ``localalias`` to
1343 1337 ``{name: definitionstring}``.
1344 1338 '''
1345 1339 if user:
1346 1340 m = revset.matchany(self.ui, specs,
1347 1341 lookup=revset.lookupfn(self),
1348 1342 localalias=localalias)
1349 1343 else:
1350 1344 m = revset.matchany(None, specs, localalias=localalias)
1351 1345 return m(self)
1352 1346
1353 1347 def url(self):
1354 1348 return 'file:' + self.root
1355 1349
1356 1350 def hook(self, name, throw=False, **args):
1357 1351 """Call a hook, passing this repo instance.
1358 1352
1359 1353 This a convenience method to aid invoking hooks. Extensions likely
1360 1354 won't call this unless they have registered a custom hook or are
1361 1355 replacing code that is expected to call a hook.
1362 1356 """
1363 1357 return hook.hook(self.ui, self, name, throw, **args)
1364 1358
1365 1359 @filteredpropertycache
1366 1360 def _tagscache(self):
1367 1361 '''Returns a tagscache object that contains various tags related
1368 1362 caches.'''
1369 1363
1370 1364 # This simplifies its cache management by having one decorated
1371 1365 # function (this one) and the rest simply fetch things from it.
1372 1366 class tagscache(object):
1373 1367 def __init__(self):
1374 1368 # These two define the set of tags for this repository. tags
1375 1369 # maps tag name to node; tagtypes maps tag name to 'global' or
1376 1370 # 'local'. (Global tags are defined by .hgtags across all
1377 1371 # heads, and local tags are defined in .hg/localtags.)
1378 1372 # They constitute the in-memory cache of tags.
1379 1373 self.tags = self.tagtypes = None
1380 1374
1381 1375 self.nodetagscache = self.tagslist = None
1382 1376
1383 1377 cache = tagscache()
1384 1378 cache.tags, cache.tagtypes = self._findtags()
1385 1379
1386 1380 return cache
1387 1381
1388 1382 def tags(self):
1389 1383 '''return a mapping of tag to node'''
1390 1384 t = {}
1391 1385 if self.changelog.filteredrevs:
1392 1386 tags, tt = self._findtags()
1393 1387 else:
1394 1388 tags = self._tagscache.tags
1395 1389 for k, v in tags.iteritems():
1396 1390 try:
1397 1391 # ignore tags to unknown nodes
1398 1392 self.changelog.rev(v)
1399 1393 t[k] = v
1400 1394 except (error.LookupError, ValueError):
1401 1395 pass
1402 1396 return t
1403 1397
1404 1398 def _findtags(self):
1405 1399 '''Do the hard work of finding tags. Return a pair of dicts
1406 1400 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1407 1401 maps tag name to a string like \'global\' or \'local\'.
1408 1402 Subclasses or extensions are free to add their own tags, but
1409 1403 should be aware that the returned dicts will be retained for the
1410 1404 duration of the localrepo object.'''
1411 1405
1412 1406 # XXX what tagtype should subclasses/extensions use? Currently
1413 1407 # mq and bookmarks add tags, but do not set the tagtype at all.
1414 1408 # Should each extension invent its own tag type? Should there
1415 1409 # be one tagtype for all such "virtual" tags? Or is the status
1416 1410 # quo fine?
1417 1411
1418 1412
1419 1413 # map tag name to (node, hist)
1420 1414 alltags = tagsmod.findglobaltags(self.ui, self)
1421 1415 # map tag name to tag type
1422 1416 tagtypes = dict((tag, 'global') for tag in alltags)
1423 1417
1424 1418 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1425 1419
1426 1420 # Build the return dicts. Have to re-encode tag names because
1427 1421 # the tags module always uses UTF-8 (in order not to lose info
1428 1422 # writing to the cache), but the rest of Mercurial wants them in
1429 1423 # local encoding.
1430 1424 tags = {}
1431 1425 for (name, (node, hist)) in alltags.iteritems():
1432 1426 if node != nullid:
1433 1427 tags[encoding.tolocal(name)] = node
1434 1428 tags['tip'] = self.changelog.tip()
1435 1429 tagtypes = dict([(encoding.tolocal(name), value)
1436 1430 for (name, value) in tagtypes.iteritems()])
1437 1431 return (tags, tagtypes)
1438 1432
1439 1433 def tagtype(self, tagname):
1440 1434 '''
1441 1435 return the type of the given tag. result can be:
1442 1436
1443 1437 'local' : a local tag
1444 1438 'global' : a global tag
1445 1439 None : tag does not exist
1446 1440 '''
1447 1441
1448 1442 return self._tagscache.tagtypes.get(tagname)
1449 1443
1450 1444 def tagslist(self):
1451 1445 '''return a list of tags ordered by revision'''
1452 1446 if not self._tagscache.tagslist:
1453 1447 l = []
1454 1448 for t, n in self.tags().iteritems():
1455 1449 l.append((self.changelog.rev(n), t, n))
1456 1450 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1457 1451
1458 1452 return self._tagscache.tagslist
1459 1453
1460 1454 def nodetags(self, node):
1461 1455 '''return the tags associated with a node'''
1462 1456 if not self._tagscache.nodetagscache:
1463 1457 nodetagscache = {}
1464 1458 for t, n in self._tagscache.tags.iteritems():
1465 1459 nodetagscache.setdefault(n, []).append(t)
1466 1460 for tags in nodetagscache.itervalues():
1467 1461 tags.sort()
1468 1462 self._tagscache.nodetagscache = nodetagscache
1469 1463 return self._tagscache.nodetagscache.get(node, [])
1470 1464
1471 1465 def nodebookmarks(self, node):
1472 1466 """return the list of bookmarks pointing to the specified node"""
1473 1467 return self._bookmarks.names(node)
1474 1468
1475 1469 def branchmap(self):
1476 1470 '''returns a dictionary {branch: [branchheads]} with branchheads
1477 1471 ordered by increasing revision number'''
1478 1472 branchmap.updatecache(self)
1479 1473 return self._branchcaches[self.filtername]
1480 1474
1481 1475 @unfilteredmethod
1482 1476 def revbranchcache(self):
1483 1477 if not self._revbranchcache:
1484 1478 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1485 1479 return self._revbranchcache
1486 1480
1487 1481 def branchtip(self, branch, ignoremissing=False):
1488 1482 '''return the tip node for a given branch
1489 1483
1490 1484 If ignoremissing is True, then this method will not raise an error.
1491 1485 This is helpful for callers that only expect None for a missing branch
1492 1486 (e.g. namespace).
1493 1487
1494 1488 '''
1495 1489 try:
1496 1490 return self.branchmap().branchtip(branch)
1497 1491 except KeyError:
1498 1492 if not ignoremissing:
1499 1493 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1500 1494 else:
1501 1495 pass
1502 1496
1503 1497 def lookup(self, key):
1504 1498 return scmutil.revsymbol(self, key).node()
1505 1499
1506 1500 def lookupbranch(self, key):
1507 1501 if key in self.branchmap():
1508 1502 return key
1509 1503
1510 1504 return scmutil.revsymbol(self, key).branch()
1511 1505
1512 1506 def known(self, nodes):
1513 1507 cl = self.changelog
1514 1508 nm = cl.nodemap
1515 1509 filtered = cl.filteredrevs
1516 1510 result = []
1517 1511 for n in nodes:
1518 1512 r = nm.get(n)
1519 1513 resp = not (r is None or r in filtered)
1520 1514 result.append(resp)
1521 1515 return result
1522 1516
1523 1517 def local(self):
1524 1518 return self
1525 1519
1526 1520 def publishing(self):
1527 1521 # it's safe (and desirable) to trust the publish flag unconditionally
1528 1522 # so that we don't finalize changes shared between users via ssh or nfs
1529 1523 return self.ui.configbool('phases', 'publish', untrusted=True)
1530 1524
1531 1525 def cancopy(self):
1532 1526 # so statichttprepo's override of local() works
1533 1527 if not self.local():
1534 1528 return False
1535 1529 if not self.publishing():
1536 1530 return True
1537 1531 # if publishing we can't copy if there is filtered content
1538 1532 return not self.filtered('visible').changelog.filteredrevs
1539 1533
1540 1534 def shared(self):
1541 1535 '''the type of shared repository (None if not shared)'''
1542 1536 if self.sharedpath != self.path:
1543 1537 return 'store'
1544 1538 return None
1545 1539
1546 1540 def wjoin(self, f, *insidef):
1547 1541 return self.vfs.reljoin(self.root, f, *insidef)
1548 1542
1549 1543 def setparents(self, p1, p2=nullid):
1550 1544 with self.dirstate.parentchange():
1551 1545 copies = self.dirstate.setparents(p1, p2)
1552 1546 pctx = self[p1]
1553 1547 if copies:
1554 1548 # Adjust copy records, the dirstate cannot do it, it
1555 1549 # requires access to parents manifests. Preserve them
1556 1550 # only for entries added to first parent.
1557 1551 for f in copies:
1558 1552 if f not in pctx and copies[f] in pctx:
1559 1553 self.dirstate.copy(copies[f], f)
1560 1554 if p2 == nullid:
1561 1555 for f, s in sorted(self.dirstate.copies().items()):
1562 1556 if f not in pctx and s not in pctx:
1563 1557 self.dirstate.copy(None, f)
1564 1558
1565 1559 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1566 1560 """changeid can be a changeset revision, node, or tag.
1567 1561 fileid can be a file revision or node."""
1568 1562 return context.filectx(self, path, changeid, fileid,
1569 1563 changectx=changectx)
1570 1564
1571 1565 def getcwd(self):
1572 1566 return self.dirstate.getcwd()
1573 1567
1574 1568 def pathto(self, f, cwd=None):
1575 1569 return self.dirstate.pathto(f, cwd)
1576 1570
1577 1571 def _loadfilter(self, filter):
1578 1572 if filter not in self._filterpats:
1579 1573 l = []
1580 1574 for pat, cmd in self.ui.configitems(filter):
1581 1575 if cmd == '!':
1582 1576 continue
1583 1577 mf = matchmod.match(self.root, '', [pat])
1584 1578 fn = None
1585 1579 params = cmd
1586 1580 for name, filterfn in self._datafilters.iteritems():
1587 1581 if cmd.startswith(name):
1588 1582 fn = filterfn
1589 1583 params = cmd[len(name):].lstrip()
1590 1584 break
1591 1585 if not fn:
1592 1586 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1593 1587 # Wrap old filters not supporting keyword arguments
1594 1588 if not pycompat.getargspec(fn)[2]:
1595 1589 oldfn = fn
1596 1590 fn = lambda s, c, **kwargs: oldfn(s, c)
1597 1591 l.append((mf, fn, params))
1598 1592 self._filterpats[filter] = l
1599 1593 return self._filterpats[filter]
1600 1594
1601 1595 def _filter(self, filterpats, filename, data):
1602 1596 for mf, fn, cmd in filterpats:
1603 1597 if mf(filename):
1604 1598 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1605 1599 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1606 1600 break
1607 1601
1608 1602 return data
1609 1603
1610 1604 @unfilteredpropertycache
1611 1605 def _encodefilterpats(self):
1612 1606 return self._loadfilter('encode')
1613 1607
1614 1608 @unfilteredpropertycache
1615 1609 def _decodefilterpats(self):
1616 1610 return self._loadfilter('decode')
1617 1611
1618 1612 def adddatafilter(self, name, filter):
1619 1613 self._datafilters[name] = filter
1620 1614
1621 1615 def wread(self, filename):
1622 1616 if self.wvfs.islink(filename):
1623 1617 data = self.wvfs.readlink(filename)
1624 1618 else:
1625 1619 data = self.wvfs.read(filename)
1626 1620 return self._filter(self._encodefilterpats, filename, data)
1627 1621
1628 1622 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1629 1623 """write ``data`` into ``filename`` in the working directory
1630 1624
1631 1625 This returns length of written (maybe decoded) data.
1632 1626 """
1633 1627 data = self._filter(self._decodefilterpats, filename, data)
1634 1628 if 'l' in flags:
1635 1629 self.wvfs.symlink(data, filename)
1636 1630 else:
1637 1631 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1638 1632 **kwargs)
1639 1633 if 'x' in flags:
1640 1634 self.wvfs.setflags(filename, False, True)
1641 1635 else:
1642 1636 self.wvfs.setflags(filename, False, False)
1643 1637 return len(data)
1644 1638
1645 1639 def wwritedata(self, filename, data):
1646 1640 return self._filter(self._decodefilterpats, filename, data)
1647 1641
1648 1642 def currenttransaction(self):
1649 1643 """return the current transaction or None if non exists"""
1650 1644 if self._transref:
1651 1645 tr = self._transref()
1652 1646 else:
1653 1647 tr = None
1654 1648
1655 1649 if tr and tr.running():
1656 1650 return tr
1657 1651 return None
1658 1652
1659 1653 def transaction(self, desc, report=None):
1660 1654 if (self.ui.configbool('devel', 'all-warnings')
1661 1655 or self.ui.configbool('devel', 'check-locks')):
1662 1656 if self._currentlock(self._lockref) is None:
1663 1657 raise error.ProgrammingError('transaction requires locking')
1664 1658 tr = self.currenttransaction()
1665 1659 if tr is not None:
1666 1660 return tr.nest(name=desc)
1667 1661
1668 1662 # abort here if the journal already exists
1669 1663 if self.svfs.exists("journal"):
1670 1664 raise error.RepoError(
1671 1665 _("abandoned transaction found"),
1672 1666 hint=_("run 'hg recover' to clean up transaction"))
1673 1667
1674 1668 idbase = "%.40f#%f" % (random.random(), time.time())
1675 1669 ha = hex(hashlib.sha1(idbase).digest())
1676 1670 txnid = 'TXN:' + ha
1677 1671 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1678 1672
1679 1673 self._writejournal(desc)
1680 1674 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1681 1675 if report:
1682 1676 rp = report
1683 1677 else:
1684 1678 rp = self.ui.warn
1685 1679 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1686 1680 # we must avoid cyclic reference between repo and transaction.
1687 1681 reporef = weakref.ref(self)
1688 1682 # Code to track tag movement
1689 1683 #
1690 1684 # Since tags are all handled as file content, it is actually quite hard
1691 1685 # to track these movement from a code perspective. So we fallback to a
1692 1686 # tracking at the repository level. One could envision to track changes
1693 1687 # to the '.hgtags' file through changegroup apply but that fails to
1694 1688 # cope with case where transaction expose new heads without changegroup
1695 1689 # being involved (eg: phase movement).
1696 1690 #
1697 1691 # For now, We gate the feature behind a flag since this likely comes
1698 1692 # with performance impacts. The current code run more often than needed
1699 1693 # and do not use caches as much as it could. The current focus is on
1700 1694 # the behavior of the feature so we disable it by default. The flag
1701 1695 # will be removed when we are happy with the performance impact.
1702 1696 #
1703 1697 # Once this feature is no longer experimental move the following
1704 1698 # documentation to the appropriate help section:
1705 1699 #
1706 1700 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1707 1701 # tags (new or changed or deleted tags). In addition the details of
1708 1702 # these changes are made available in a file at:
1709 1703 # ``REPOROOT/.hg/changes/tags.changes``.
1710 1704 # Make sure you check for HG_TAG_MOVED before reading that file as it
1711 1705 # might exist from a previous transaction even if no tag were touched
1712 1706 # in this one. Changes are recorded in a line base format::
1713 1707 #
1714 1708 # <action> <hex-node> <tag-name>\n
1715 1709 #
1716 1710 # Actions are defined as follow:
1717 1711 # "-R": tag is removed,
1718 1712 # "+A": tag is added,
1719 1713 # "-M": tag is moved (old value),
1720 1714 # "+M": tag is moved (new value),
1721 1715 tracktags = lambda x: None
1722 1716 # experimental config: experimental.hook-track-tags
1723 1717 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1724 1718 if desc != 'strip' and shouldtracktags:
1725 1719 oldheads = self.changelog.headrevs()
1726 1720 def tracktags(tr2):
1727 1721 repo = reporef()
1728 1722 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1729 1723 newheads = repo.changelog.headrevs()
1730 1724 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1731 1725 # notes: we compare lists here.
1732 1726 # As we do it only once buiding set would not be cheaper
1733 1727 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1734 1728 if changes:
1735 1729 tr2.hookargs['tag_moved'] = '1'
1736 1730 with repo.vfs('changes/tags.changes', 'w',
1737 1731 atomictemp=True) as changesfile:
1738 1732 # note: we do not register the file to the transaction
1739 1733 # because we needs it to still exist on the transaction
1740 1734 # is close (for txnclose hooks)
1741 1735 tagsmod.writediff(changesfile, changes)
1742 1736 def validate(tr2):
1743 1737 """will run pre-closing hooks"""
1744 1738 # XXX the transaction API is a bit lacking here so we take a hacky
1745 1739 # path for now
1746 1740 #
1747 1741 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1748 1742 # dict is copied before these run. In addition we needs the data
1749 1743 # available to in memory hooks too.
1750 1744 #
1751 1745 # Moreover, we also need to make sure this runs before txnclose
1752 1746 # hooks and there is no "pending" mechanism that would execute
1753 1747 # logic only if hooks are about to run.
1754 1748 #
1755 1749 # Fixing this limitation of the transaction is also needed to track
1756 1750 # other families of changes (bookmarks, phases, obsolescence).
1757 1751 #
1758 1752 # This will have to be fixed before we remove the experimental
1759 1753 # gating.
1760 1754 tracktags(tr2)
1761 1755 repo = reporef()
1762 1756 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1763 1757 scmutil.enforcesinglehead(repo, tr2, desc)
1764 1758 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1765 1759 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1766 1760 args = tr.hookargs.copy()
1767 1761 args.update(bookmarks.preparehookargs(name, old, new))
1768 1762 repo.hook('pretxnclose-bookmark', throw=True,
1769 1763 txnname=desc,
1770 1764 **pycompat.strkwargs(args))
1771 1765 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1772 1766 cl = repo.unfiltered().changelog
1773 1767 for rev, (old, new) in tr.changes['phases'].items():
1774 1768 args = tr.hookargs.copy()
1775 1769 node = hex(cl.node(rev))
1776 1770 args.update(phases.preparehookargs(node, old, new))
1777 1771 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1778 1772 **pycompat.strkwargs(args))
1779 1773
1780 1774 repo.hook('pretxnclose', throw=True,
1781 1775 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1782 1776 def releasefn(tr, success):
1783 1777 repo = reporef()
1784 1778 if success:
1785 1779 # this should be explicitly invoked here, because
1786 1780 # in-memory changes aren't written out at closing
1787 1781 # transaction, if tr.addfilegenerator (via
1788 1782 # dirstate.write or so) isn't invoked while
1789 1783 # transaction running
1790 1784 repo.dirstate.write(None)
1791 1785 else:
1792 1786 # discard all changes (including ones already written
1793 1787 # out) in this transaction
1794 1788 narrowspec.restorebackup(self, 'journal.narrowspec')
1795 1789 repo.dirstate.restorebackup(None, 'journal.dirstate')
1796 1790
1797 1791 repo.invalidate(clearfilecache=True)
1798 1792
1799 1793 tr = transaction.transaction(rp, self.svfs, vfsmap,
1800 1794 "journal",
1801 1795 "undo",
1802 1796 aftertrans(renames),
1803 1797 self.store.createmode,
1804 1798 validator=validate,
1805 1799 releasefn=releasefn,
1806 1800 checkambigfiles=_cachedfiles,
1807 1801 name=desc)
1808 1802 tr.changes['origrepolen'] = len(self)
1809 1803 tr.changes['obsmarkers'] = set()
1810 1804 tr.changes['phases'] = {}
1811 1805 tr.changes['bookmarks'] = {}
1812 1806
1813 1807 tr.hookargs['txnid'] = txnid
1814 1808 # note: writing the fncache only during finalize mean that the file is
1815 1809 # outdated when running hooks. As fncache is used for streaming clone,
1816 1810 # this is not expected to break anything that happen during the hooks.
1817 1811 tr.addfinalize('flush-fncache', self.store.write)
1818 1812 def txnclosehook(tr2):
1819 1813 """To be run if transaction is successful, will schedule a hook run
1820 1814 """
1821 1815 # Don't reference tr2 in hook() so we don't hold a reference.
1822 1816 # This reduces memory consumption when there are multiple
1823 1817 # transactions per lock. This can likely go away if issue5045
1824 1818 # fixes the function accumulation.
1825 1819 hookargs = tr2.hookargs
1826 1820
1827 1821 def hookfunc():
1828 1822 repo = reporef()
1829 1823 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1830 1824 bmchanges = sorted(tr.changes['bookmarks'].items())
1831 1825 for name, (old, new) in bmchanges:
1832 1826 args = tr.hookargs.copy()
1833 1827 args.update(bookmarks.preparehookargs(name, old, new))
1834 1828 repo.hook('txnclose-bookmark', throw=False,
1835 1829 txnname=desc, **pycompat.strkwargs(args))
1836 1830
1837 1831 if hook.hashook(repo.ui, 'txnclose-phase'):
1838 1832 cl = repo.unfiltered().changelog
1839 1833 phasemv = sorted(tr.changes['phases'].items())
1840 1834 for rev, (old, new) in phasemv:
1841 1835 args = tr.hookargs.copy()
1842 1836 node = hex(cl.node(rev))
1843 1837 args.update(phases.preparehookargs(node, old, new))
1844 1838 repo.hook('txnclose-phase', throw=False, txnname=desc,
1845 1839 **pycompat.strkwargs(args))
1846 1840
1847 1841 repo.hook('txnclose', throw=False, txnname=desc,
1848 1842 **pycompat.strkwargs(hookargs))
1849 1843 reporef()._afterlock(hookfunc)
1850 1844 tr.addfinalize('txnclose-hook', txnclosehook)
1851 1845 # Include a leading "-" to make it happen before the transaction summary
1852 1846 # reports registered via scmutil.registersummarycallback() whose names
1853 1847 # are 00-txnreport etc. That way, the caches will be warm when the
1854 1848 # callbacks run.
1855 1849 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1856 1850 def txnaborthook(tr2):
1857 1851 """To be run if transaction is aborted
1858 1852 """
1859 1853 reporef().hook('txnabort', throw=False, txnname=desc,
1860 1854 **pycompat.strkwargs(tr2.hookargs))
1861 1855 tr.addabort('txnabort-hook', txnaborthook)
1862 1856 # avoid eager cache invalidation. in-memory data should be identical
1863 1857 # to stored data if transaction has no error.
1864 1858 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1865 1859 self._transref = weakref.ref(tr)
1866 1860 scmutil.registersummarycallback(self, tr, desc)
1867 1861 return tr
1868 1862
1869 1863 def _journalfiles(self):
1870 1864 return ((self.svfs, 'journal'),
1871 1865 (self.vfs, 'journal.dirstate'),
1872 1866 (self.vfs, 'journal.branch'),
1873 1867 (self.vfs, 'journal.desc'),
1874 1868 (self.vfs, 'journal.bookmarks'),
1875 1869 (self.svfs, 'journal.phaseroots'))
1876 1870
1877 1871 def undofiles(self):
1878 1872 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1879 1873
1880 1874 @unfilteredmethod
1881 1875 def _writejournal(self, desc):
1882 1876 self.dirstate.savebackup(None, 'journal.dirstate')
1883 1877 narrowspec.savebackup(self, 'journal.narrowspec')
1884 1878 self.vfs.write("journal.branch",
1885 1879 encoding.fromlocal(self.dirstate.branch()))
1886 1880 self.vfs.write("journal.desc",
1887 1881 "%d\n%s\n" % (len(self), desc))
1888 1882 self.vfs.write("journal.bookmarks",
1889 1883 self.vfs.tryread("bookmarks"))
1890 1884 self.svfs.write("journal.phaseroots",
1891 1885 self.svfs.tryread("phaseroots"))
1892 1886
1893 1887 def recover(self):
1894 1888 with self.lock():
1895 1889 if self.svfs.exists("journal"):
1896 1890 self.ui.status(_("rolling back interrupted transaction\n"))
1897 1891 vfsmap = {'': self.svfs,
1898 1892 'plain': self.vfs,}
1899 1893 transaction.rollback(self.svfs, vfsmap, "journal",
1900 1894 self.ui.warn,
1901 1895 checkambigfiles=_cachedfiles)
1902 1896 self.invalidate()
1903 1897 return True
1904 1898 else:
1905 1899 self.ui.warn(_("no interrupted transaction available\n"))
1906 1900 return False
1907 1901
1908 1902 def rollback(self, dryrun=False, force=False):
1909 1903 wlock = lock = dsguard = None
1910 1904 try:
1911 1905 wlock = self.wlock()
1912 1906 lock = self.lock()
1913 1907 if self.svfs.exists("undo"):
1914 1908 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1915 1909
1916 1910 return self._rollback(dryrun, force, dsguard)
1917 1911 else:
1918 1912 self.ui.warn(_("no rollback information available\n"))
1919 1913 return 1
1920 1914 finally:
1921 1915 release(dsguard, lock, wlock)
1922 1916
1923 1917 @unfilteredmethod # Until we get smarter cache management
1924 1918 def _rollback(self, dryrun, force, dsguard):
1925 1919 ui = self.ui
1926 1920 try:
1927 1921 args = self.vfs.read('undo.desc').splitlines()
1928 1922 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1929 1923 if len(args) >= 3:
1930 1924 detail = args[2]
1931 1925 oldtip = oldlen - 1
1932 1926
1933 1927 if detail and ui.verbose:
1934 1928 msg = (_('repository tip rolled back to revision %d'
1935 1929 ' (undo %s: %s)\n')
1936 1930 % (oldtip, desc, detail))
1937 1931 else:
1938 1932 msg = (_('repository tip rolled back to revision %d'
1939 1933 ' (undo %s)\n')
1940 1934 % (oldtip, desc))
1941 1935 except IOError:
1942 1936 msg = _('rolling back unknown transaction\n')
1943 1937 desc = None
1944 1938
1945 1939 if not force and self['.'] != self['tip'] and desc == 'commit':
1946 1940 raise error.Abort(
1947 1941 _('rollback of last commit while not checked out '
1948 1942 'may lose data'), hint=_('use -f to force'))
1949 1943
1950 1944 ui.status(msg)
1951 1945 if dryrun:
1952 1946 return 0
1953 1947
1954 1948 parents = self.dirstate.parents()
1955 1949 self.destroying()
1956 1950 vfsmap = {'plain': self.vfs, '': self.svfs}
1957 1951 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1958 1952 checkambigfiles=_cachedfiles)
1959 1953 if self.vfs.exists('undo.bookmarks'):
1960 1954 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1961 1955 if self.svfs.exists('undo.phaseroots'):
1962 1956 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1963 1957 self.invalidate()
1964 1958
1965 1959 parentgone = (parents[0] not in self.changelog.nodemap or
1966 1960 parents[1] not in self.changelog.nodemap)
1967 1961 if parentgone:
1968 1962 # prevent dirstateguard from overwriting already restored one
1969 1963 dsguard.close()
1970 1964
1971 1965 narrowspec.restorebackup(self, 'undo.narrowspec')
1972 1966 self.dirstate.restorebackup(None, 'undo.dirstate')
1973 1967 try:
1974 1968 branch = self.vfs.read('undo.branch')
1975 1969 self.dirstate.setbranch(encoding.tolocal(branch))
1976 1970 except IOError:
1977 1971 ui.warn(_('named branch could not be reset: '
1978 1972 'current branch is still \'%s\'\n')
1979 1973 % self.dirstate.branch())
1980 1974
1981 1975 parents = tuple([p.rev() for p in self[None].parents()])
1982 1976 if len(parents) > 1:
1983 1977 ui.status(_('working directory now based on '
1984 1978 'revisions %d and %d\n') % parents)
1985 1979 else:
1986 1980 ui.status(_('working directory now based on '
1987 1981 'revision %d\n') % parents)
1988 1982 mergemod.mergestate.clean(self, self['.'].node())
1989 1983
1990 1984 # TODO: if we know which new heads may result from this rollback, pass
1991 1985 # them to destroy(), which will prevent the branchhead cache from being
1992 1986 # invalidated.
1993 1987 self.destroyed()
1994 1988 return 0
1995 1989
1996 1990 def _buildcacheupdater(self, newtransaction):
1997 1991 """called during transaction to build the callback updating cache
1998 1992
1999 1993 Lives on the repository to help extension who might want to augment
2000 1994 this logic. For this purpose, the created transaction is passed to the
2001 1995 method.
2002 1996 """
2003 1997 # we must avoid cyclic reference between repo and transaction.
2004 1998 reporef = weakref.ref(self)
2005 1999 def updater(tr):
2006 2000 repo = reporef()
2007 2001 repo.updatecaches(tr)
2008 2002 return updater
2009 2003
2010 2004 @unfilteredmethod
2011 2005 def updatecaches(self, tr=None, full=False):
2012 2006 """warm appropriate caches
2013 2007
2014 2008 If this function is called after a transaction closed. The transaction
2015 2009 will be available in the 'tr' argument. This can be used to selectively
2016 2010 update caches relevant to the changes in that transaction.
2017 2011
2018 2012 If 'full' is set, make sure all caches the function knows about have
2019 2013 up-to-date data. Even the ones usually loaded more lazily.
2020 2014 """
2021 2015 if tr is not None and tr.hookargs.get('source') == 'strip':
2022 2016 # During strip, many caches are invalid but
2023 2017 # later call to `destroyed` will refresh them.
2024 2018 return
2025 2019
2026 2020 if tr is None or tr.changes['origrepolen'] < len(self):
2027 2021 # updating the unfiltered branchmap should refresh all the others,
2028 2022 self.ui.debug('updating the branch cache\n')
2029 2023 branchmap.updatecache(self.filtered('served'))
2030 2024
2031 2025 if full:
2032 2026 rbc = self.revbranchcache()
2033 2027 for r in self.changelog:
2034 2028 rbc.branchinfo(r)
2035 2029 rbc.write()
2036 2030
2037 2031 # ensure the working copy parents are in the manifestfulltextcache
2038 2032 for ctx in self['.'].parents():
2039 2033 ctx.manifest() # accessing the manifest is enough
2040 2034
2041 2035 def invalidatecaches(self):
2042 2036
2043 2037 if '_tagscache' in vars(self):
2044 2038 # can't use delattr on proxy
2045 2039 del self.__dict__['_tagscache']
2046 2040
2047 2041 self.unfiltered()._branchcaches.clear()
2048 2042 self.invalidatevolatilesets()
2049 2043 self._sparsesignaturecache.clear()
2050 2044
2051 2045 def invalidatevolatilesets(self):
2052 2046 self.filteredrevcache.clear()
2053 2047 obsolete.clearobscaches(self)
2054 2048
2055 2049 def invalidatedirstate(self):
2056 2050 '''Invalidates the dirstate, causing the next call to dirstate
2057 2051 to check if it was modified since the last time it was read,
2058 2052 rereading it if it has.
2059 2053
2060 2054 This is different to dirstate.invalidate() that it doesn't always
2061 2055 rereads the dirstate. Use dirstate.invalidate() if you want to
2062 2056 explicitly read the dirstate again (i.e. restoring it to a previous
2063 2057 known good state).'''
2064 2058 if hasunfilteredcache(self, 'dirstate'):
2065 2059 for k in self.dirstate._filecache:
2066 2060 try:
2067 2061 delattr(self.dirstate, k)
2068 2062 except AttributeError:
2069 2063 pass
2070 2064 delattr(self.unfiltered(), 'dirstate')
2071 2065
2072 2066 def invalidate(self, clearfilecache=False):
2073 2067 '''Invalidates both store and non-store parts other than dirstate
2074 2068
2075 2069 If a transaction is running, invalidation of store is omitted,
2076 2070 because discarding in-memory changes might cause inconsistency
2077 2071 (e.g. incomplete fncache causes unintentional failure, but
2078 2072 redundant one doesn't).
2079 2073 '''
2080 2074 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2081 2075 for k in list(self._filecache.keys()):
2082 2076 # dirstate is invalidated separately in invalidatedirstate()
2083 2077 if k == 'dirstate':
2084 2078 continue
2085 2079 if (k == 'changelog' and
2086 2080 self.currenttransaction() and
2087 2081 self.changelog._delayed):
2088 2082 # The changelog object may store unwritten revisions. We don't
2089 2083 # want to lose them.
2090 2084 # TODO: Solve the problem instead of working around it.
2091 2085 continue
2092 2086
2093 2087 if clearfilecache:
2094 2088 del self._filecache[k]
2095 2089 try:
2096 2090 delattr(unfiltered, k)
2097 2091 except AttributeError:
2098 2092 pass
2099 2093 self.invalidatecaches()
2100 2094 if not self.currenttransaction():
2101 2095 # TODO: Changing contents of store outside transaction
2102 2096 # causes inconsistency. We should make in-memory store
2103 2097 # changes detectable, and abort if changed.
2104 2098 self.store.invalidatecaches()
2105 2099
2106 2100 def invalidateall(self):
2107 2101 '''Fully invalidates both store and non-store parts, causing the
2108 2102 subsequent operation to reread any outside changes.'''
2109 2103 # extension should hook this to invalidate its caches
2110 2104 self.invalidate()
2111 2105 self.invalidatedirstate()
2112 2106
2113 2107 @unfilteredmethod
2114 2108 def _refreshfilecachestats(self, tr):
2115 2109 """Reload stats of cached files so that they are flagged as valid"""
2116 2110 for k, ce in self._filecache.items():
2117 2111 k = pycompat.sysstr(k)
2118 2112 if k == r'dirstate' or k not in self.__dict__:
2119 2113 continue
2120 2114 ce.refresh()
2121 2115
2122 2116 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2123 2117 inheritchecker=None, parentenvvar=None):
2124 2118 parentlock = None
2125 2119 # the contents of parentenvvar are used by the underlying lock to
2126 2120 # determine whether it can be inherited
2127 2121 if parentenvvar is not None:
2128 2122 parentlock = encoding.environ.get(parentenvvar)
2129 2123
2130 2124 timeout = 0
2131 2125 warntimeout = 0
2132 2126 if wait:
2133 2127 timeout = self.ui.configint("ui", "timeout")
2134 2128 warntimeout = self.ui.configint("ui", "timeout.warn")
2135 2129 # internal config: ui.signal-safe-lock
2136 2130 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2137 2131
2138 2132 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2139 2133 releasefn=releasefn,
2140 2134 acquirefn=acquirefn, desc=desc,
2141 2135 inheritchecker=inheritchecker,
2142 2136 parentlock=parentlock,
2143 2137 signalsafe=signalsafe)
2144 2138 return l
2145 2139
2146 2140 def _afterlock(self, callback):
2147 2141 """add a callback to be run when the repository is fully unlocked
2148 2142
2149 2143 The callback will be executed when the outermost lock is released
2150 2144 (with wlock being higher level than 'lock')."""
2151 2145 for ref in (self._wlockref, self._lockref):
2152 2146 l = ref and ref()
2153 2147 if l and l.held:
2154 2148 l.postrelease.append(callback)
2155 2149 break
2156 2150 else: # no lock have been found.
2157 2151 callback()
2158 2152
2159 2153 def lock(self, wait=True):
2160 2154 '''Lock the repository store (.hg/store) and return a weak reference
2161 2155 to the lock. Use this before modifying the store (e.g. committing or
2162 2156 stripping). If you are opening a transaction, get a lock as well.)
2163 2157
2164 2158 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2165 2159 'wlock' first to avoid a dead-lock hazard.'''
2166 2160 l = self._currentlock(self._lockref)
2167 2161 if l is not None:
2168 2162 l.lock()
2169 2163 return l
2170 2164
2171 2165 l = self._lock(self.svfs, "lock", wait, None,
2172 2166 self.invalidate, _('repository %s') % self.origroot)
2173 2167 self._lockref = weakref.ref(l)
2174 2168 return l
2175 2169
2176 2170 def _wlockchecktransaction(self):
2177 2171 if self.currenttransaction() is not None:
2178 2172 raise error.LockInheritanceContractViolation(
2179 2173 'wlock cannot be inherited in the middle of a transaction')
2180 2174
2181 2175 def wlock(self, wait=True):
2182 2176 '''Lock the non-store parts of the repository (everything under
2183 2177 .hg except .hg/store) and return a weak reference to the lock.
2184 2178
2185 2179 Use this before modifying files in .hg.
2186 2180
2187 2181 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2188 2182 'wlock' first to avoid a dead-lock hazard.'''
2189 2183 l = self._wlockref and self._wlockref()
2190 2184 if l is not None and l.held:
2191 2185 l.lock()
2192 2186 return l
2193 2187
2194 2188 # We do not need to check for non-waiting lock acquisition. Such
2195 2189 # acquisition would not cause dead-lock as they would just fail.
2196 2190 if wait and (self.ui.configbool('devel', 'all-warnings')
2197 2191 or self.ui.configbool('devel', 'check-locks')):
2198 2192 if self._currentlock(self._lockref) is not None:
2199 2193 self.ui.develwarn('"wlock" acquired after "lock"')
2200 2194
2201 2195 def unlock():
2202 2196 if self.dirstate.pendingparentchange():
2203 2197 self.dirstate.invalidate()
2204 2198 else:
2205 2199 self.dirstate.write(None)
2206 2200
2207 2201 self._filecache['dirstate'].refresh()
2208 2202
2209 2203 l = self._lock(self.vfs, "wlock", wait, unlock,
2210 2204 self.invalidatedirstate, _('working directory of %s') %
2211 2205 self.origroot,
2212 2206 inheritchecker=self._wlockchecktransaction,
2213 2207 parentenvvar='HG_WLOCK_LOCKER')
2214 2208 self._wlockref = weakref.ref(l)
2215 2209 return l
2216 2210
2217 2211 def _currentlock(self, lockref):
2218 2212 """Returns the lock if it's held, or None if it's not."""
2219 2213 if lockref is None:
2220 2214 return None
2221 2215 l = lockref()
2222 2216 if l is None or not l.held:
2223 2217 return None
2224 2218 return l
2225 2219
2226 2220 def currentwlock(self):
2227 2221 """Returns the wlock if it's held, or None if it's not."""
2228 2222 return self._currentlock(self._wlockref)
2229 2223
2230 2224 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2231 2225 """
2232 2226 commit an individual file as part of a larger transaction
2233 2227 """
2234 2228
2235 2229 fname = fctx.path()
2236 2230 fparent1 = manifest1.get(fname, nullid)
2237 2231 fparent2 = manifest2.get(fname, nullid)
2238 2232 if isinstance(fctx, context.filectx):
2239 2233 node = fctx.filenode()
2240 2234 if node in [fparent1, fparent2]:
2241 2235 self.ui.debug('reusing %s filelog entry\n' % fname)
2242 2236 if manifest1.flags(fname) != fctx.flags():
2243 2237 changelist.append(fname)
2244 2238 return node
2245 2239
2246 2240 flog = self.file(fname)
2247 2241 meta = {}
2248 2242 copy = fctx.renamed()
2249 2243 if copy and copy[0] != fname:
2250 2244 # Mark the new revision of this file as a copy of another
2251 2245 # file. This copy data will effectively act as a parent
2252 2246 # of this new revision. If this is a merge, the first
2253 2247 # parent will be the nullid (meaning "look up the copy data")
2254 2248 # and the second one will be the other parent. For example:
2255 2249 #
2256 2250 # 0 --- 1 --- 3 rev1 changes file foo
2257 2251 # \ / rev2 renames foo to bar and changes it
2258 2252 # \- 2 -/ rev3 should have bar with all changes and
2259 2253 # should record that bar descends from
2260 2254 # bar in rev2 and foo in rev1
2261 2255 #
2262 2256 # this allows this merge to succeed:
2263 2257 #
2264 2258 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2265 2259 # \ / merging rev3 and rev4 should use bar@rev2
2266 2260 # \- 2 --- 4 as the merge base
2267 2261 #
2268 2262
2269 2263 cfname = copy[0]
2270 2264 crev = manifest1.get(cfname)
2271 2265 newfparent = fparent2
2272 2266
2273 2267 if manifest2: # branch merge
2274 2268 if fparent2 == nullid or crev is None: # copied on remote side
2275 2269 if cfname in manifest2:
2276 2270 crev = manifest2[cfname]
2277 2271 newfparent = fparent1
2278 2272
2279 2273 # Here, we used to search backwards through history to try to find
2280 2274 # where the file copy came from if the source of a copy was not in
2281 2275 # the parent directory. However, this doesn't actually make sense to
2282 2276 # do (what does a copy from something not in your working copy even
2283 2277 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2284 2278 # the user that copy information was dropped, so if they didn't
2285 2279 # expect this outcome it can be fixed, but this is the correct
2286 2280 # behavior in this circumstance.
2287 2281
2288 2282 if crev:
2289 2283 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2290 2284 meta["copy"] = cfname
2291 2285 meta["copyrev"] = hex(crev)
2292 2286 fparent1, fparent2 = nullid, newfparent
2293 2287 else:
2294 2288 self.ui.warn(_("warning: can't find ancestor for '%s' "
2295 2289 "copied from '%s'!\n") % (fname, cfname))
2296 2290
2297 2291 elif fparent1 == nullid:
2298 2292 fparent1, fparent2 = fparent2, nullid
2299 2293 elif fparent2 != nullid:
2300 2294 # is one parent an ancestor of the other?
2301 2295 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2302 2296 if fparent1 in fparentancestors:
2303 2297 fparent1, fparent2 = fparent2, nullid
2304 2298 elif fparent2 in fparentancestors:
2305 2299 fparent2 = nullid
2306 2300
2307 2301 # is the file changed?
2308 2302 text = fctx.data()
2309 2303 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2310 2304 changelist.append(fname)
2311 2305 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2312 2306 # are just the flags changed during merge?
2313 2307 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2314 2308 changelist.append(fname)
2315 2309
2316 2310 return fparent1
2317 2311
2318 2312 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2319 2313 """check for commit arguments that aren't committable"""
2320 2314 if match.isexact() or match.prefix():
2321 2315 matched = set(status.modified + status.added + status.removed)
2322 2316
2323 2317 for f in match.files():
2324 2318 f = self.dirstate.normalize(f)
2325 2319 if f == '.' or f in matched or f in wctx.substate:
2326 2320 continue
2327 2321 if f in status.deleted:
2328 2322 fail(f, _('file not found!'))
2329 2323 if f in vdirs: # visited directory
2330 2324 d = f + '/'
2331 2325 for mf in matched:
2332 2326 if mf.startswith(d):
2333 2327 break
2334 2328 else:
2335 2329 fail(f, _("no match under directory!"))
2336 2330 elif f not in self.dirstate:
2337 2331 fail(f, _("file not tracked!"))
2338 2332
2339 2333 @unfilteredmethod
2340 2334 def commit(self, text="", user=None, date=None, match=None, force=False,
2341 2335 editor=False, extra=None):
2342 2336 """Add a new revision to current repository.
2343 2337
2344 2338 Revision information is gathered from the working directory,
2345 2339 match can be used to filter the committed files. If editor is
2346 2340 supplied, it is called to get a commit message.
2347 2341 """
2348 2342 if extra is None:
2349 2343 extra = {}
2350 2344
2351 2345 def fail(f, msg):
2352 2346 raise error.Abort('%s: %s' % (f, msg))
2353 2347
2354 2348 if not match:
2355 2349 match = matchmod.always(self.root, '')
2356 2350
2357 2351 if not force:
2358 2352 vdirs = []
2359 2353 match.explicitdir = vdirs.append
2360 2354 match.bad = fail
2361 2355
2362 2356 wlock = lock = tr = None
2363 2357 try:
2364 2358 wlock = self.wlock()
2365 2359 lock = self.lock() # for recent changelog (see issue4368)
2366 2360
2367 2361 wctx = self[None]
2368 2362 merge = len(wctx.parents()) > 1
2369 2363
2370 2364 if not force and merge and not match.always():
2371 2365 raise error.Abort(_('cannot partially commit a merge '
2372 2366 '(do not specify files or patterns)'))
2373 2367
2374 2368 status = self.status(match=match, clean=force)
2375 2369 if force:
2376 2370 status.modified.extend(status.clean) # mq may commit clean files
2377 2371
2378 2372 # check subrepos
2379 2373 subs, commitsubs, newstate = subrepoutil.precommit(
2380 2374 self.ui, wctx, status, match, force=force)
2381 2375
2382 2376 # make sure all explicit patterns are matched
2383 2377 if not force:
2384 2378 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2385 2379
2386 2380 cctx = context.workingcommitctx(self, status,
2387 2381 text, user, date, extra)
2388 2382
2389 2383 # internal config: ui.allowemptycommit
2390 2384 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2391 2385 or extra.get('close') or merge or cctx.files()
2392 2386 or self.ui.configbool('ui', 'allowemptycommit'))
2393 2387 if not allowemptycommit:
2394 2388 return None
2395 2389
2396 2390 if merge and cctx.deleted():
2397 2391 raise error.Abort(_("cannot commit merge with missing files"))
2398 2392
2399 2393 ms = mergemod.mergestate.read(self)
2400 2394 mergeutil.checkunresolved(ms)
2401 2395
2402 2396 if editor:
2403 2397 cctx._text = editor(self, cctx, subs)
2404 2398 edited = (text != cctx._text)
2405 2399
2406 2400 # Save commit message in case this transaction gets rolled back
2407 2401 # (e.g. by a pretxncommit hook). Leave the content alone on
2408 2402 # the assumption that the user will use the same editor again.
2409 2403 msgfn = self.savecommitmessage(cctx._text)
2410 2404
2411 2405 # commit subs and write new state
2412 2406 if subs:
2413 2407 for s in sorted(commitsubs):
2414 2408 sub = wctx.sub(s)
2415 2409 self.ui.status(_('committing subrepository %s\n') %
2416 2410 subrepoutil.subrelpath(sub))
2417 2411 sr = sub.commit(cctx._text, user, date)
2418 2412 newstate[s] = (newstate[s][0], sr)
2419 2413 subrepoutil.writestate(self, newstate)
2420 2414
2421 2415 p1, p2 = self.dirstate.parents()
2422 2416 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2423 2417 try:
2424 2418 self.hook("precommit", throw=True, parent1=hookp1,
2425 2419 parent2=hookp2)
2426 2420 tr = self.transaction('commit')
2427 2421 ret = self.commitctx(cctx, True)
2428 2422 except: # re-raises
2429 2423 if edited:
2430 2424 self.ui.write(
2431 2425 _('note: commit message saved in %s\n') % msgfn)
2432 2426 raise
2433 2427 # update bookmarks, dirstate and mergestate
2434 2428 bookmarks.update(self, [p1, p2], ret)
2435 2429 cctx.markcommitted(ret)
2436 2430 ms.reset()
2437 2431 tr.close()
2438 2432
2439 2433 finally:
2440 2434 lockmod.release(tr, lock, wlock)
2441 2435
2442 2436 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2443 2437 # hack for command that use a temporary commit (eg: histedit)
2444 2438 # temporary commit got stripped before hook release
2445 2439 if self.changelog.hasnode(ret):
2446 2440 self.hook("commit", node=node, parent1=parent1,
2447 2441 parent2=parent2)
2448 2442 self._afterlock(commithook)
2449 2443 return ret
2450 2444
2451 2445 @unfilteredmethod
2452 2446 def commitctx(self, ctx, error=False):
2453 2447 """Add a new revision to current repository.
2454 2448 Revision information is passed via the context argument.
2455 2449
2456 2450 ctx.files() should list all files involved in this commit, i.e.
2457 2451 modified/added/removed files. On merge, it may be wider than the
2458 2452 ctx.files() to be committed, since any file nodes derived directly
2459 2453 from p1 or p2 are excluded from the committed ctx.files().
2460 2454 """
2461 2455
2462 2456 tr = None
2463 2457 p1, p2 = ctx.p1(), ctx.p2()
2464 2458 user = ctx.user()
2465 2459
2466 2460 lock = self.lock()
2467 2461 try:
2468 2462 tr = self.transaction("commit")
2469 2463 trp = weakref.proxy(tr)
2470 2464
2471 2465 if ctx.manifestnode():
2472 2466 # reuse an existing manifest revision
2473 2467 self.ui.debug('reusing known manifest\n')
2474 2468 mn = ctx.manifestnode()
2475 2469 files = ctx.files()
2476 2470 elif ctx.files():
2477 2471 m1ctx = p1.manifestctx()
2478 2472 m2ctx = p2.manifestctx()
2479 2473 mctx = m1ctx.copy()
2480 2474
2481 2475 m = mctx.read()
2482 2476 m1 = m1ctx.read()
2483 2477 m2 = m2ctx.read()
2484 2478
2485 2479 # check in files
2486 2480 added = []
2487 2481 changed = []
2488 2482 removed = list(ctx.removed())
2489 2483 linkrev = len(self)
2490 2484 self.ui.note(_("committing files:\n"))
2491 2485 for f in sorted(ctx.modified() + ctx.added()):
2492 2486 self.ui.note(f + "\n")
2493 2487 try:
2494 2488 fctx = ctx[f]
2495 2489 if fctx is None:
2496 2490 removed.append(f)
2497 2491 else:
2498 2492 added.append(f)
2499 2493 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2500 2494 trp, changed)
2501 2495 m.setflag(f, fctx.flags())
2502 2496 except OSError as inst:
2503 2497 self.ui.warn(_("trouble committing %s!\n") % f)
2504 2498 raise
2505 2499 except IOError as inst:
2506 2500 errcode = getattr(inst, 'errno', errno.ENOENT)
2507 2501 if error or errcode and errcode != errno.ENOENT:
2508 2502 self.ui.warn(_("trouble committing %s!\n") % f)
2509 2503 raise
2510 2504
2511 2505 # update manifest
2512 2506 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2513 2507 drop = [f for f in removed if f in m]
2514 2508 for f in drop:
2515 2509 del m[f]
2516 2510 files = changed + removed
2517 2511 md = None
2518 2512 if not files:
2519 2513 # if no "files" actually changed in terms of the changelog,
2520 2514 # try hard to detect unmodified manifest entry so that the
2521 2515 # exact same commit can be reproduced later on convert.
2522 2516 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2523 2517 if not files and md:
2524 2518 self.ui.debug('not reusing manifest (no file change in '
2525 2519 'changelog, but manifest differs)\n')
2526 2520 if files or md:
2527 2521 self.ui.note(_("committing manifest\n"))
2528 2522 # we're using narrowmatch here since it's already applied at
2529 2523 # other stages (such as dirstate.walk), so we're already
2530 2524 # ignoring things outside of narrowspec in most cases. The
2531 2525 # one case where we might have files outside the narrowspec
2532 2526 # at this point is merges, and we already error out in the
2533 2527 # case where the merge has files outside of the narrowspec,
2534 2528 # so this is safe.
2535 2529 mn = mctx.write(trp, linkrev,
2536 2530 p1.manifestnode(), p2.manifestnode(),
2537 2531 added, drop, match=self.narrowmatch())
2538 2532 else:
2539 2533 self.ui.debug('reusing manifest form p1 (listed files '
2540 2534 'actually unchanged)\n')
2541 2535 mn = p1.manifestnode()
2542 2536 else:
2543 2537 self.ui.debug('reusing manifest from p1 (no file change)\n')
2544 2538 mn = p1.manifestnode()
2545 2539 files = []
2546 2540
2547 2541 # update changelog
2548 2542 self.ui.note(_("committing changelog\n"))
2549 2543 self.changelog.delayupdate(tr)
2550 2544 n = self.changelog.add(mn, files, ctx.description(),
2551 2545 trp, p1.node(), p2.node(),
2552 2546 user, ctx.date(), ctx.extra().copy())
2553 2547 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2554 2548 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2555 2549 parent2=xp2)
2556 2550 # set the new commit is proper phase
2557 2551 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2558 2552 if targetphase:
2559 2553 # retract boundary do not alter parent changeset.
2560 2554 # if a parent have higher the resulting phase will
2561 2555 # be compliant anyway
2562 2556 #
2563 2557 # if minimal phase was 0 we don't need to retract anything
2564 2558 phases.registernew(self, tr, targetphase, [n])
2565 2559 tr.close()
2566 2560 return n
2567 2561 finally:
2568 2562 if tr:
2569 2563 tr.release()
2570 2564 lock.release()
2571 2565
2572 2566 @unfilteredmethod
2573 2567 def destroying(self):
2574 2568 '''Inform the repository that nodes are about to be destroyed.
2575 2569 Intended for use by strip and rollback, so there's a common
2576 2570 place for anything that has to be done before destroying history.
2577 2571
2578 2572 This is mostly useful for saving state that is in memory and waiting
2579 2573 to be flushed when the current lock is released. Because a call to
2580 2574 destroyed is imminent, the repo will be invalidated causing those
2581 2575 changes to stay in memory (waiting for the next unlock), or vanish
2582 2576 completely.
2583 2577 '''
2584 2578 # When using the same lock to commit and strip, the phasecache is left
2585 2579 # dirty after committing. Then when we strip, the repo is invalidated,
2586 2580 # causing those changes to disappear.
2587 2581 if '_phasecache' in vars(self):
2588 2582 self._phasecache.write()
2589 2583
2590 2584 @unfilteredmethod
2591 2585 def destroyed(self):
2592 2586 '''Inform the repository that nodes have been destroyed.
2593 2587 Intended for use by strip and rollback, so there's a common
2594 2588 place for anything that has to be done after destroying history.
2595 2589 '''
2596 2590 # When one tries to:
2597 2591 # 1) destroy nodes thus calling this method (e.g. strip)
2598 2592 # 2) use phasecache somewhere (e.g. commit)
2599 2593 #
2600 2594 # then 2) will fail because the phasecache contains nodes that were
2601 2595 # removed. We can either remove phasecache from the filecache,
2602 2596 # causing it to reload next time it is accessed, or simply filter
2603 2597 # the removed nodes now and write the updated cache.
2604 2598 self._phasecache.filterunknown(self)
2605 2599 self._phasecache.write()
2606 2600
2607 2601 # refresh all repository caches
2608 2602 self.updatecaches()
2609 2603
2610 2604 # Ensure the persistent tag cache is updated. Doing it now
2611 2605 # means that the tag cache only has to worry about destroyed
2612 2606 # heads immediately after a strip/rollback. That in turn
2613 2607 # guarantees that "cachetip == currenttip" (comparing both rev
2614 2608 # and node) always means no nodes have been added or destroyed.
2615 2609
2616 2610 # XXX this is suboptimal when qrefresh'ing: we strip the current
2617 2611 # head, refresh the tag cache, then immediately add a new head.
2618 2612 # But I think doing it this way is necessary for the "instant
2619 2613 # tag cache retrieval" case to work.
2620 2614 self.invalidate()
2621 2615
2622 2616 def status(self, node1='.', node2=None, match=None,
2623 2617 ignored=False, clean=False, unknown=False,
2624 2618 listsubrepos=False):
2625 2619 '''a convenience method that calls node1.status(node2)'''
2626 2620 return self[node1].status(node2, match, ignored, clean, unknown,
2627 2621 listsubrepos)
2628 2622
2629 2623 def addpostdsstatus(self, ps):
2630 2624 """Add a callback to run within the wlock, at the point at which status
2631 2625 fixups happen.
2632 2626
2633 2627 On status completion, callback(wctx, status) will be called with the
2634 2628 wlock held, unless the dirstate has changed from underneath or the wlock
2635 2629 couldn't be grabbed.
2636 2630
2637 2631 Callbacks should not capture and use a cached copy of the dirstate --
2638 2632 it might change in the meanwhile. Instead, they should access the
2639 2633 dirstate via wctx.repo().dirstate.
2640 2634
2641 2635 This list is emptied out after each status run -- extensions should
2642 2636 make sure it adds to this list each time dirstate.status is called.
2643 2637 Extensions should also make sure they don't call this for statuses
2644 2638 that don't involve the dirstate.
2645 2639 """
2646 2640
2647 2641 # The list is located here for uniqueness reasons -- it is actually
2648 2642 # managed by the workingctx, but that isn't unique per-repo.
2649 2643 self._postdsstatus.append(ps)
2650 2644
2651 2645 def postdsstatus(self):
2652 2646 """Used by workingctx to get the list of post-dirstate-status hooks."""
2653 2647 return self._postdsstatus
2654 2648
2655 2649 def clearpostdsstatus(self):
2656 2650 """Used by workingctx to clear post-dirstate-status hooks."""
2657 2651 del self._postdsstatus[:]
2658 2652
2659 2653 def heads(self, start=None):
2660 2654 if start is None:
2661 2655 cl = self.changelog
2662 2656 headrevs = reversed(cl.headrevs())
2663 2657 return [cl.node(rev) for rev in headrevs]
2664 2658
2665 2659 heads = self.changelog.heads(start)
2666 2660 # sort the output in rev descending order
2667 2661 return sorted(heads, key=self.changelog.rev, reverse=True)
2668 2662
2669 2663 def branchheads(self, branch=None, start=None, closed=False):
2670 2664 '''return a (possibly filtered) list of heads for the given branch
2671 2665
2672 2666 Heads are returned in topological order, from newest to oldest.
2673 2667 If branch is None, use the dirstate branch.
2674 2668 If start is not None, return only heads reachable from start.
2675 2669 If closed is True, return heads that are marked as closed as well.
2676 2670 '''
2677 2671 if branch is None:
2678 2672 branch = self[None].branch()
2679 2673 branches = self.branchmap()
2680 2674 if branch not in branches:
2681 2675 return []
2682 2676 # the cache returns heads ordered lowest to highest
2683 2677 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2684 2678 if start is not None:
2685 2679 # filter out the heads that cannot be reached from startrev
2686 2680 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2687 2681 bheads = [h for h in bheads if h in fbheads]
2688 2682 return bheads
2689 2683
2690 2684 def branches(self, nodes):
2691 2685 if not nodes:
2692 2686 nodes = [self.changelog.tip()]
2693 2687 b = []
2694 2688 for n in nodes:
2695 2689 t = n
2696 2690 while True:
2697 2691 p = self.changelog.parents(n)
2698 2692 if p[1] != nullid or p[0] == nullid:
2699 2693 b.append((t, n, p[0], p[1]))
2700 2694 break
2701 2695 n = p[0]
2702 2696 return b
2703 2697
2704 2698 def between(self, pairs):
2705 2699 r = []
2706 2700
2707 2701 for top, bottom in pairs:
2708 2702 n, l, i = top, [], 0
2709 2703 f = 1
2710 2704
2711 2705 while n != bottom and n != nullid:
2712 2706 p = self.changelog.parents(n)[0]
2713 2707 if i == f:
2714 2708 l.append(n)
2715 2709 f = f * 2
2716 2710 n = p
2717 2711 i += 1
2718 2712
2719 2713 r.append(l)
2720 2714
2721 2715 return r
2722 2716
2723 2717 def checkpush(self, pushop):
2724 2718 """Extensions can override this function if additional checks have
2725 2719 to be performed before pushing, or call it if they override push
2726 2720 command.
2727 2721 """
2728 2722
2729 2723 @unfilteredpropertycache
2730 2724 def prepushoutgoinghooks(self):
2731 2725 """Return util.hooks consists of a pushop with repo, remote, outgoing
2732 2726 methods, which are called before pushing changesets.
2733 2727 """
2734 2728 return util.hooks()
2735 2729
2736 2730 def pushkey(self, namespace, key, old, new):
2737 2731 try:
2738 2732 tr = self.currenttransaction()
2739 2733 hookargs = {}
2740 2734 if tr is not None:
2741 2735 hookargs.update(tr.hookargs)
2742 2736 hookargs = pycompat.strkwargs(hookargs)
2743 2737 hookargs[r'namespace'] = namespace
2744 2738 hookargs[r'key'] = key
2745 2739 hookargs[r'old'] = old
2746 2740 hookargs[r'new'] = new
2747 2741 self.hook('prepushkey', throw=True, **hookargs)
2748 2742 except error.HookAbort as exc:
2749 2743 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2750 2744 if exc.hint:
2751 2745 self.ui.write_err(_("(%s)\n") % exc.hint)
2752 2746 return False
2753 2747 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2754 2748 ret = pushkey.push(self, namespace, key, old, new)
2755 2749 def runhook():
2756 2750 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2757 2751 ret=ret)
2758 2752 self._afterlock(runhook)
2759 2753 return ret
2760 2754
2761 2755 def listkeys(self, namespace):
2762 2756 self.hook('prelistkeys', throw=True, namespace=namespace)
2763 2757 self.ui.debug('listing keys for "%s"\n' % namespace)
2764 2758 values = pushkey.list(self, namespace)
2765 2759 self.hook('listkeys', namespace=namespace, values=values)
2766 2760 return values
2767 2761
2768 2762 def debugwireargs(self, one, two, three=None, four=None, five=None):
2769 2763 '''used to test argument passing over the wire'''
2770 2764 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2771 2765 pycompat.bytestr(four),
2772 2766 pycompat.bytestr(five))
2773 2767
2774 2768 def savecommitmessage(self, text):
2775 2769 fp = self.vfs('last-message.txt', 'wb')
2776 2770 try:
2777 2771 fp.write(text)
2778 2772 finally:
2779 2773 fp.close()
2780 2774 return self.pathto(fp.name[len(self.root) + 1:])
2781 2775
2782 2776 # used to avoid circular references so destructors work
2783 2777 def aftertrans(files):
2784 2778 renamefiles = [tuple(t) for t in files]
2785 2779 def a():
2786 2780 for vfs, src, dest in renamefiles:
2787 2781 # if src and dest refer to a same file, vfs.rename is a no-op,
2788 2782 # leaving both src and dest on disk. delete dest to make sure
2789 2783 # the rename couldn't be such a no-op.
2790 2784 vfs.tryunlink(dest)
2791 2785 try:
2792 2786 vfs.rename(src, dest)
2793 2787 except OSError: # journal file does not yet exist
2794 2788 pass
2795 2789 return a
2796 2790
2797 2791 def undoname(fn):
2798 2792 base, name = os.path.split(fn)
2799 2793 assert name.startswith('journal')
2800 2794 return os.path.join(base, name.replace('journal', 'undo', 1))
2801 2795
2802 2796 def instance(ui, path, create, intents=None, createopts=None):
2803 2797 localpath = util.urllocalpath(path)
2804 2798 if create:
2805 2799 createrepository(ui, localpath, createopts=createopts)
2806 2800
2807 2801 return makelocalrepository(ui, localpath, intents=intents)
2808 2802
2809 2803 def islocal(path):
2810 2804 return True
2811 2805
2812 2806 def defaultcreateopts(ui, createopts=None):
2813 2807 """Populate the default creation options for a repository.
2814 2808
2815 2809 A dictionary of explicitly requested creation options can be passed
2816 2810 in. Missing keys will be populated.
2817 2811 """
2818 2812 createopts = dict(createopts or {})
2819 2813
2820 2814 if 'backend' not in createopts:
2821 2815 # experimental config: storage.new-repo-backend
2822 2816 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2823 2817
2824 2818 return createopts
2825 2819
2826 2820 def newreporequirements(ui, createopts):
2827 2821 """Determine the set of requirements for a new local repository.
2828 2822
2829 2823 Extensions can wrap this function to specify custom requirements for
2830 2824 new repositories.
2831 2825 """
2832 2826 # If the repo is being created from a shared repository, we copy
2833 2827 # its requirements.
2834 2828 if 'sharedrepo' in createopts:
2835 2829 requirements = set(createopts['sharedrepo'].requirements)
2836 2830 if createopts.get('sharedrelative'):
2837 2831 requirements.add('relshared')
2838 2832 else:
2839 2833 requirements.add('shared')
2840 2834
2841 2835 return requirements
2842 2836
2843 2837 if 'backend' not in createopts:
2844 2838 raise error.ProgrammingError('backend key not present in createopts; '
2845 2839 'was defaultcreateopts() called?')
2846 2840
2847 2841 if createopts['backend'] != 'revlogv1':
2848 2842 raise error.Abort(_('unable to determine repository requirements for '
2849 2843 'storage backend: %s') % createopts['backend'])
2850 2844
2851 2845 requirements = {'revlogv1'}
2852 2846 if ui.configbool('format', 'usestore'):
2853 2847 requirements.add('store')
2854 2848 if ui.configbool('format', 'usefncache'):
2855 2849 requirements.add('fncache')
2856 2850 if ui.configbool('format', 'dotencode'):
2857 2851 requirements.add('dotencode')
2858 2852
2859 2853 compengine = ui.config('experimental', 'format.compression')
2860 2854 if compengine not in util.compengines:
2861 2855 raise error.Abort(_('compression engine %s defined by '
2862 2856 'experimental.format.compression not available') %
2863 2857 compengine,
2864 2858 hint=_('run "hg debuginstall" to list available '
2865 2859 'compression engines'))
2866 2860
2867 2861 # zlib is the historical default and doesn't need an explicit requirement.
2868 2862 if compengine != 'zlib':
2869 2863 requirements.add('exp-compression-%s' % compengine)
2870 2864
2871 2865 if scmutil.gdinitconfig(ui):
2872 2866 requirements.add('generaldelta')
2873 2867 if ui.configbool('experimental', 'treemanifest'):
2874 2868 requirements.add('treemanifest')
2875 2869 # experimental config: format.sparse-revlog
2876 2870 if ui.configbool('format', 'sparse-revlog'):
2877 2871 requirements.add(SPARSEREVLOG_REQUIREMENT)
2878 2872
2879 2873 revlogv2 = ui.config('experimental', 'revlogv2')
2880 2874 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2881 2875 requirements.remove('revlogv1')
2882 2876 # generaldelta is implied by revlogv2.
2883 2877 requirements.discard('generaldelta')
2884 2878 requirements.add(REVLOGV2_REQUIREMENT)
2885 2879 # experimental config: format.internal-phase
2886 2880 if ui.configbool('format', 'internal-phase'):
2887 2881 requirements.add('internal-phase')
2888 2882
2889 2883 if createopts.get('narrowfiles'):
2890 2884 requirements.add(repository.NARROW_REQUIREMENT)
2891 2885
2892 2886 return requirements
2893 2887
2894 2888 def filterknowncreateopts(ui, createopts):
2895 2889 """Filters a dict of repo creation options against options that are known.
2896 2890
2897 2891 Receives a dict of repo creation options and returns a dict of those
2898 2892 options that we don't know how to handle.
2899 2893
2900 2894 This function is called as part of repository creation. If the
2901 2895 returned dict contains any items, repository creation will not
2902 2896 be allowed, as it means there was a request to create a repository
2903 2897 with options not recognized by loaded code.
2904 2898
2905 2899 Extensions can wrap this function to filter out creation options
2906 2900 they know how to handle.
2907 2901 """
2908 2902 known = {
2909 2903 'backend',
2910 2904 'narrowfiles',
2911 2905 'sharedrepo',
2912 2906 'sharedrelative',
2913 2907 'shareditems',
2914 2908 }
2915 2909
2916 2910 return {k: v for k, v in createopts.items() if k not in known}
2917 2911
2918 2912 def createrepository(ui, path, createopts=None):
2919 2913 """Create a new repository in a vfs.
2920 2914
2921 2915 ``path`` path to the new repo's working directory.
2922 2916 ``createopts`` options for the new repository.
2923 2917
2924 2918 The following keys for ``createopts`` are recognized:
2925 2919
2926 2920 backend
2927 2921 The storage backend to use.
2928 2922 narrowfiles
2929 2923 Set up repository to support narrow file storage.
2930 2924 sharedrepo
2931 2925 Repository object from which storage should be shared.
2932 2926 sharedrelative
2933 2927 Boolean indicating if the path to the shared repo should be
2934 2928 stored as relative. By default, the pointer to the "parent" repo
2935 2929 is stored as an absolute path.
2936 2930 shareditems
2937 2931 Set of items to share to the new repository (in addition to storage).
2938 2932 """
2939 2933 createopts = defaultcreateopts(ui, createopts=createopts)
2940 2934
2941 2935 unknownopts = filterknowncreateopts(ui, createopts)
2942 2936
2943 2937 if not isinstance(unknownopts, dict):
2944 2938 raise error.ProgrammingError('filterknowncreateopts() did not return '
2945 2939 'a dict')
2946 2940
2947 2941 if unknownopts:
2948 2942 raise error.Abort(_('unable to create repository because of unknown '
2949 2943 'creation option: %s') %
2950 2944 ', '.join(sorted(unknownopts)),
2951 2945 hint=_('is a required extension not loaded?'))
2952 2946
2953 2947 requirements = newreporequirements(ui, createopts=createopts)
2954 2948
2955 2949 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2956 2950
2957 2951 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2958 2952 if hgvfs.exists():
2959 2953 raise error.RepoError(_('repository %s already exists') % path)
2960 2954
2961 2955 if 'sharedrepo' in createopts:
2962 2956 sharedpath = createopts['sharedrepo'].sharedpath
2963 2957
2964 2958 if createopts.get('sharedrelative'):
2965 2959 try:
2966 2960 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2967 2961 except (IOError, ValueError) as e:
2968 2962 # ValueError is raised on Windows if the drive letters differ
2969 2963 # on each path.
2970 2964 raise error.Abort(_('cannot calculate relative path'),
2971 2965 hint=stringutil.forcebytestr(e))
2972 2966
2973 2967 if not wdirvfs.exists():
2974 2968 wdirvfs.makedirs()
2975 2969
2976 2970 hgvfs.makedir(notindexed=True)
2977 2971
2978 2972 if b'store' in requirements and 'sharedrepo' not in createopts:
2979 2973 hgvfs.mkdir(b'store')
2980 2974
2981 2975 # We create an invalid changelog outside the store so very old
2982 2976 # Mercurial versions (which didn't know about the requirements
2983 2977 # file) encounter an error on reading the changelog. This
2984 2978 # effectively locks out old clients and prevents them from
2985 2979 # mucking with a repo in an unknown format.
2986 2980 #
2987 2981 # The revlog header has version 2, which won't be recognized by
2988 2982 # such old clients.
2989 2983 hgvfs.append(b'00changelog.i',
2990 2984 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2991 2985 b'layout')
2992 2986
2993 2987 scmutil.writerequires(hgvfs, requirements)
2994 2988
2995 2989 # Write out file telling readers where to find the shared store.
2996 2990 if 'sharedrepo' in createopts:
2997 2991 hgvfs.write(b'sharedpath', sharedpath)
2998 2992
2999 2993 if createopts.get('shareditems'):
3000 2994 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3001 2995 hgvfs.write(b'shared', shared)
3002 2996
3003 2997 def poisonrepository(repo):
3004 2998 """Poison a repository instance so it can no longer be used."""
3005 2999 # Perform any cleanup on the instance.
3006 3000 repo.close()
3007 3001
3008 3002 # Our strategy is to replace the type of the object with one that
3009 3003 # has all attribute lookups result in error.
3010 3004 #
3011 3005 # But we have to allow the close() method because some constructors
3012 3006 # of repos call close() on repo references.
3013 3007 class poisonedrepository(object):
3014 3008 def __getattribute__(self, item):
3015 3009 if item == r'close':
3016 3010 return object.__getattribute__(self, item)
3017 3011
3018 3012 raise error.ProgrammingError('repo instances should not be used '
3019 3013 'after unshare')
3020 3014
3021 3015 def close(self):
3022 3016 pass
3023 3017
3024 3018 # We may have a repoview, which intercepts __setattr__. So be sure
3025 3019 # we operate at the lowest level possible.
3026 3020 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now