##// END OF EJS Templates
localrepo: capture repo interface factory functions as lambas...
Gregory Szorc -
r40030:6962ebc8 default
parent child Browse files
Show More
@@ -1,3000 +1,3002 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from . import (
27 27 bookmarks,
28 28 branchmap,
29 29 bundle2,
30 30 changegroup,
31 31 changelog,
32 32 color,
33 33 context,
34 34 dirstate,
35 35 dirstateguard,
36 36 discovery,
37 37 encoding,
38 38 error,
39 39 exchange,
40 40 extensions,
41 41 filelog,
42 42 hook,
43 43 lock as lockmod,
44 44 manifest,
45 45 match as matchmod,
46 46 merge as mergemod,
47 47 mergeutil,
48 48 namespaces,
49 49 narrowspec,
50 50 obsolete,
51 51 pathutil,
52 52 phases,
53 53 pushkey,
54 54 pycompat,
55 55 repository,
56 56 repoview,
57 57 revset,
58 58 revsetlang,
59 59 scmutil,
60 60 sparse,
61 61 store as storemod,
62 62 subrepoutil,
63 63 tags as tagsmod,
64 64 transaction,
65 65 txnutil,
66 66 util,
67 67 vfs as vfsmod,
68 68 )
69 69 from .utils import (
70 70 interfaceutil,
71 71 procutil,
72 72 stringutil,
73 73 )
74 74
75 75 from .revlogutils import (
76 76 constants as revlogconst,
77 77 )
78 78
79 79 release = lockmod.release
80 80 urlerr = util.urlerr
81 81 urlreq = util.urlreq
82 82
83 83 # set of (path, vfs-location) tuples. vfs-location is:
84 84 # - 'plain for vfs relative paths
85 85 # - '' for svfs relative paths
86 86 _cachedfiles = set()
87 87
88 88 class _basefilecache(scmutil.filecache):
89 89 """All filecache usage on repo are done for logic that should be unfiltered
90 90 """
91 91 def __get__(self, repo, type=None):
92 92 if repo is None:
93 93 return self
94 94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
95 95 def __set__(self, repo, value):
96 96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
97 97 def __delete__(self, repo):
98 98 return super(_basefilecache, self).__delete__(repo.unfiltered())
99 99
100 100 class repofilecache(_basefilecache):
101 101 """filecache for files in .hg but outside of .hg/store"""
102 102 def __init__(self, *paths):
103 103 super(repofilecache, self).__init__(*paths)
104 104 for path in paths:
105 105 _cachedfiles.add((path, 'plain'))
106 106
107 107 def join(self, obj, fname):
108 108 return obj.vfs.join(fname)
109 109
110 110 class storecache(_basefilecache):
111 111 """filecache for files in the store"""
112 112 def __init__(self, *paths):
113 113 super(storecache, self).__init__(*paths)
114 114 for path in paths:
115 115 _cachedfiles.add((path, ''))
116 116
117 117 def join(self, obj, fname):
118 118 return obj.sjoin(fname)
119 119
120 120 def isfilecached(repo, name):
121 121 """check if a repo has already cached "name" filecache-ed property
122 122
123 123 This returns (cachedobj-or-None, iscached) tuple.
124 124 """
125 125 cacheentry = repo.unfiltered()._filecache.get(name, None)
126 126 if not cacheentry:
127 127 return None, False
128 128 return cacheentry.obj, True
129 129
130 130 class unfilteredpropertycache(util.propertycache):
131 131 """propertycache that apply to unfiltered repo only"""
132 132
133 133 def __get__(self, repo, type=None):
134 134 unfi = repo.unfiltered()
135 135 if unfi is repo:
136 136 return super(unfilteredpropertycache, self).__get__(unfi)
137 137 return getattr(unfi, self.name)
138 138
139 139 class filteredpropertycache(util.propertycache):
140 140 """propertycache that must take filtering in account"""
141 141
142 142 def cachevalue(self, obj, value):
143 143 object.__setattr__(obj, self.name, value)
144 144
145 145
146 146 def hasunfilteredcache(repo, name):
147 147 """check if a repo has an unfilteredpropertycache value for <name>"""
148 148 return name in vars(repo.unfiltered())
149 149
150 150 def unfilteredmethod(orig):
151 151 """decorate method that always need to be run on unfiltered version"""
152 152 def wrapper(repo, *args, **kwargs):
153 153 return orig(repo.unfiltered(), *args, **kwargs)
154 154 return wrapper
155 155
156 156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
157 157 'unbundle'}
158 158 legacycaps = moderncaps.union({'changegroupsubset'})
159 159
160 160 @interfaceutil.implementer(repository.ipeercommandexecutor)
161 161 class localcommandexecutor(object):
162 162 def __init__(self, peer):
163 163 self._peer = peer
164 164 self._sent = False
165 165 self._closed = False
166 166
167 167 def __enter__(self):
168 168 return self
169 169
170 170 def __exit__(self, exctype, excvalue, exctb):
171 171 self.close()
172 172
173 173 def callcommand(self, command, args):
174 174 if self._sent:
175 175 raise error.ProgrammingError('callcommand() cannot be used after '
176 176 'sendcommands()')
177 177
178 178 if self._closed:
179 179 raise error.ProgrammingError('callcommand() cannot be used after '
180 180 'close()')
181 181
182 182 # We don't need to support anything fancy. Just call the named
183 183 # method on the peer and return a resolved future.
184 184 fn = getattr(self._peer, pycompat.sysstr(command))
185 185
186 186 f = pycompat.futures.Future()
187 187
188 188 try:
189 189 result = fn(**pycompat.strkwargs(args))
190 190 except Exception:
191 191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
192 192 else:
193 193 f.set_result(result)
194 194
195 195 return f
196 196
197 197 def sendcommands(self):
198 198 self._sent = True
199 199
200 200 def close(self):
201 201 self._closed = True
202 202
203 203 @interfaceutil.implementer(repository.ipeercommands)
204 204 class localpeer(repository.peer):
205 205 '''peer for a local repo; reflects only the most recent API'''
206 206
207 207 def __init__(self, repo, caps=None):
208 208 super(localpeer, self).__init__()
209 209
210 210 if caps is None:
211 211 caps = moderncaps.copy()
212 212 self._repo = repo.filtered('served')
213 213 self.ui = repo.ui
214 214 self._caps = repo._restrictcapabilities(caps)
215 215
216 216 # Begin of _basepeer interface.
217 217
218 218 def url(self):
219 219 return self._repo.url()
220 220
221 221 def local(self):
222 222 return self._repo
223 223
224 224 def peer(self):
225 225 return self
226 226
227 227 def canpush(self):
228 228 return True
229 229
230 230 def close(self):
231 231 self._repo.close()
232 232
233 233 # End of _basepeer interface.
234 234
235 235 # Begin of _basewirecommands interface.
236 236
237 237 def branchmap(self):
238 238 return self._repo.branchmap()
239 239
240 240 def capabilities(self):
241 241 return self._caps
242 242
243 243 def clonebundles(self):
244 244 return self._repo.tryread('clonebundles.manifest')
245 245
246 246 def debugwireargs(self, one, two, three=None, four=None, five=None):
247 247 """Used to test argument passing over the wire"""
248 248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
249 249 pycompat.bytestr(four),
250 250 pycompat.bytestr(five))
251 251
252 252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
253 253 **kwargs):
254 254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
255 255 common=common, bundlecaps=bundlecaps,
256 256 **kwargs)[1]
257 257 cb = util.chunkbuffer(chunks)
258 258
259 259 if exchange.bundle2requested(bundlecaps):
260 260 # When requesting a bundle2, getbundle returns a stream to make the
261 261 # wire level function happier. We need to build a proper object
262 262 # from it in local peer.
263 263 return bundle2.getunbundler(self.ui, cb)
264 264 else:
265 265 return changegroup.getunbundler('01', cb, None)
266 266
267 267 def heads(self):
268 268 return self._repo.heads()
269 269
270 270 def known(self, nodes):
271 271 return self._repo.known(nodes)
272 272
273 273 def listkeys(self, namespace):
274 274 return self._repo.listkeys(namespace)
275 275
276 276 def lookup(self, key):
277 277 return self._repo.lookup(key)
278 278
279 279 def pushkey(self, namespace, key, old, new):
280 280 return self._repo.pushkey(namespace, key, old, new)
281 281
282 282 def stream_out(self):
283 283 raise error.Abort(_('cannot perform stream clone against local '
284 284 'peer'))
285 285
286 286 def unbundle(self, bundle, heads, url):
287 287 """apply a bundle on a repo
288 288
289 289 This function handles the repo locking itself."""
290 290 try:
291 291 try:
292 292 bundle = exchange.readbundle(self.ui, bundle, None)
293 293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
294 294 if util.safehasattr(ret, 'getchunks'):
295 295 # This is a bundle20 object, turn it into an unbundler.
296 296 # This little dance should be dropped eventually when the
297 297 # API is finally improved.
298 298 stream = util.chunkbuffer(ret.getchunks())
299 299 ret = bundle2.getunbundler(self.ui, stream)
300 300 return ret
301 301 except Exception as exc:
302 302 # If the exception contains output salvaged from a bundle2
303 303 # reply, we need to make sure it is printed before continuing
304 304 # to fail. So we build a bundle2 with such output and consume
305 305 # it directly.
306 306 #
307 307 # This is not very elegant but allows a "simple" solution for
308 308 # issue4594
309 309 output = getattr(exc, '_bundle2salvagedoutput', ())
310 310 if output:
311 311 bundler = bundle2.bundle20(self._repo.ui)
312 312 for out in output:
313 313 bundler.addpart(out)
314 314 stream = util.chunkbuffer(bundler.getchunks())
315 315 b = bundle2.getunbundler(self.ui, stream)
316 316 bundle2.processbundle(self._repo, b)
317 317 raise
318 318 except error.PushRaced as exc:
319 319 raise error.ResponseError(_('push failed:'),
320 320 stringutil.forcebytestr(exc))
321 321
322 322 # End of _basewirecommands interface.
323 323
324 324 # Begin of peer interface.
325 325
326 326 def commandexecutor(self):
327 327 return localcommandexecutor(self)
328 328
329 329 # End of peer interface.
330 330
331 331 @interfaceutil.implementer(repository.ipeerlegacycommands)
332 332 class locallegacypeer(localpeer):
333 333 '''peer extension which implements legacy methods too; used for tests with
334 334 restricted capabilities'''
335 335
336 336 def __init__(self, repo):
337 337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
338 338
339 339 # Begin of baselegacywirecommands interface.
340 340
341 341 def between(self, pairs):
342 342 return self._repo.between(pairs)
343 343
344 344 def branches(self, nodes):
345 345 return self._repo.branches(nodes)
346 346
347 347 def changegroup(self, nodes, source):
348 348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
349 349 missingheads=self._repo.heads())
350 350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
351 351
352 352 def changegroupsubset(self, bases, heads, source):
353 353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
354 354 missingheads=heads)
355 355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356 356
357 357 # End of baselegacywirecommands interface.
358 358
359 359 # Increment the sub-version when the revlog v2 format changes to lock out old
360 360 # clients.
361 361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
362 362
363 363 # A repository with the sparserevlog feature will have delta chains that
364 364 # can spread over a larger span. Sparse reading cuts these large spans into
365 365 # pieces, so that each piece isn't too big.
366 366 # Without the sparserevlog capability, reading from the repository could use
367 367 # huge amounts of memory, because the whole span would be read at once,
368 368 # including all the intermediate revisions that aren't pertinent for the chain.
369 369 # This is why once a repository has enabled sparse-read, it becomes required.
370 370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
371 371
372 372 # Functions receiving (ui, features) that extensions can register to impact
373 373 # the ability to load repositories with custom requirements. Only
374 374 # functions defined in loaded extensions are called.
375 375 #
376 376 # The function receives a set of requirement strings that the repository
377 377 # is capable of opening. Functions will typically add elements to the
378 378 # set to reflect that the extension knows how to handle that requirements.
379 379 featuresetupfuncs = set()
380 380
381 381 def makelocalrepository(baseui, path, intents=None):
382 382 """Create a local repository object.
383 383
384 384 Given arguments needed to construct a local repository, this function
385 385 performs various early repository loading functionality (such as
386 386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
387 387 the repository can be opened, derives a type suitable for representing
388 388 that repository, and returns an instance of it.
389 389
390 390 The returned object conforms to the ``repository.completelocalrepository``
391 391 interface.
392 392
393 393 The repository type is derived by calling a series of factory functions
394 394 for each aspect/interface of the final repository. These are defined by
395 395 ``REPO_INTERFACES``.
396 396
397 397 Each factory function is called to produce a type implementing a specific
398 398 interface. The cumulative list of returned types will be combined into a
399 399 new type and that type will be instantiated to represent the local
400 400 repository.
401 401
402 402 The factory functions each receive various state that may be consulted
403 403 as part of deriving a type.
404 404
405 405 Extensions should wrap these factory functions to customize repository type
406 406 creation. Note that an extension's wrapped function may be called even if
407 407 that extension is not loaded for the repo being constructed. Extensions
408 408 should check if their ``__name__`` appears in the
409 409 ``extensionmodulenames`` set passed to the factory function and no-op if
410 410 not.
411 411 """
412 412 ui = baseui.copy()
413 413 # Prevent copying repo configuration.
414 414 ui.copy = baseui.copy
415 415
416 416 # Working directory VFS rooted at repository root.
417 417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
418 418
419 419 # Main VFS for .hg/ directory.
420 420 hgpath = wdirvfs.join(b'.hg')
421 421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
422 422
423 423 # The .hg/ path should exist and should be a directory. All other
424 424 # cases are errors.
425 425 if not hgvfs.isdir():
426 426 try:
427 427 hgvfs.stat()
428 428 except OSError as e:
429 429 if e.errno != errno.ENOENT:
430 430 raise
431 431
432 432 raise error.RepoError(_(b'repository %s not found') % path)
433 433
434 434 # .hg/requires file contains a newline-delimited list of
435 435 # features/capabilities the opener (us) must have in order to use
436 436 # the repository. This file was introduced in Mercurial 0.9.2,
437 437 # which means very old repositories may not have one. We assume
438 438 # a missing file translates to no requirements.
439 439 try:
440 440 requirements = set(hgvfs.read(b'requires').splitlines())
441 441 except IOError as e:
442 442 if e.errno != errno.ENOENT:
443 443 raise
444 444 requirements = set()
445 445
446 446 # The .hg/hgrc file may load extensions or contain config options
447 447 # that influence repository construction. Attempt to load it and
448 448 # process any new extensions that it may have pulled in.
449 449 try:
450 450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
451 451 # Run this before extensions.loadall() so extensions can be
452 452 # automatically enabled.
453 453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
454 454 except IOError:
455 455 pass
456 456 else:
457 457 extensions.loadall(ui)
458 458
459 459 # Set of module names of extensions loaded for this repository.
460 460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461 461
462 462 supportedrequirements = gathersupportedrequirements(ui)
463 463
464 464 # We first validate the requirements are known.
465 465 ensurerequirementsrecognized(requirements, supportedrequirements)
466 466
467 467 # Then we validate that the known set is reasonable to use together.
468 468 ensurerequirementscompatible(ui, requirements)
469 469
470 470 # TODO there are unhandled edge cases related to opening repositories with
471 471 # shared storage. If storage is shared, we should also test for requirements
472 472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 473 # that repo, as that repo may load extensions needed to open it. This is a
474 474 # bit complicated because we don't want the other hgrc to overwrite settings
475 475 # in this hgrc.
476 476 #
477 477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 478 # file when sharing repos. But if a requirement is added after the share is
479 479 # performed, thereby introducing a new requirement for the opener, we may
480 480 # will not see that and could encounter a run-time error interacting with
481 481 # that shared store since it has an unknown-to-us requirement.
482 482
483 483 # At this point, we know we should be capable of opening the repository.
484 484 # Now get on with doing that.
485 485
486 486 features = set()
487 487
488 488 # The "store" part of the repository holds versioned data. How it is
489 489 # accessed is determined by various requirements. The ``shared`` or
490 490 # ``relshared`` requirements indicate the store lives in the path contained
491 491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 493 if b'shared' in requirements or b'relshared' in requirements:
494 494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 495 if b'relshared' in requirements:
496 496 sharedpath = hgvfs.join(sharedpath)
497 497
498 498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499 499
500 500 if not sharedvfs.exists():
501 501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 502 b'directory %s') % sharedvfs.base)
503 503
504 504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505 505
506 506 storebasepath = sharedvfs.base
507 507 cachepath = sharedvfs.join(b'cache')
508 508 else:
509 509 storebasepath = hgvfs.base
510 510 cachepath = hgvfs.join(b'cache')
511 511
512 512 # The store has changed over time and the exact layout is dictated by
513 513 # requirements. The store interface abstracts differences across all
514 514 # of them.
515 515 store = makestore(requirements, storebasepath,
516 516 lambda base: vfsmod.vfs(base, cacheaudited=True))
517 517 hgvfs.createmode = store.createmode
518 518
519 519 storevfs = store.vfs
520 520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
521 521
522 522 # The cache vfs is used to manage cache files.
523 523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
524 524 cachevfs.createmode = store.createmode
525 525
526 526 # Now resolve the type for the repository object. We do this by repeatedly
527 527 # calling a factory function to produces types for specific aspects of the
528 528 # repo's operation. The aggregate returned types are used as base classes
529 529 # for a dynamically-derived type, which will represent our new repository.
530 530
531 531 bases = []
532 532 extrastate = {}
533 533
534 534 for iface, fn in REPO_INTERFACES:
535 535 # We pass all potentially useful state to give extensions tons of
536 536 # flexibility.
537 typ = fn(ui=ui,
537 typ = fn()(ui=ui,
538 538 intents=intents,
539 539 requirements=requirements,
540 540 features=features,
541 541 wdirvfs=wdirvfs,
542 542 hgvfs=hgvfs,
543 543 store=store,
544 544 storevfs=storevfs,
545 545 storeoptions=storevfs.options,
546 546 cachevfs=cachevfs,
547 547 extensionmodulenames=extensionmodulenames,
548 548 extrastate=extrastate,
549 549 baseclasses=bases)
550 550
551 551 if not isinstance(typ, type):
552 552 raise error.ProgrammingError('unable to construct type for %s' %
553 553 iface)
554 554
555 555 bases.append(typ)
556 556
557 557 # type() allows you to use characters in type names that wouldn't be
558 558 # recognized as Python symbols in source code. We abuse that to add
559 559 # rich information about our constructed repo.
560 560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
561 561 wdirvfs.base,
562 562 b','.join(sorted(requirements))))
563 563
564 564 cls = type(name, tuple(bases), {})
565 565
566 566 return cls(
567 567 baseui=baseui,
568 568 ui=ui,
569 569 origroot=path,
570 570 wdirvfs=wdirvfs,
571 571 hgvfs=hgvfs,
572 572 requirements=requirements,
573 573 supportedrequirements=supportedrequirements,
574 574 sharedpath=storebasepath,
575 575 store=store,
576 576 cachevfs=cachevfs,
577 577 features=features,
578 578 intents=intents)
579 579
580 580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
581 581 """Perform additional actions after .hg/hgrc is loaded.
582 582
583 583 This function is called during repository loading immediately after
584 584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
585 585
586 586 The function can be used to validate configs, automatically add
587 587 options (including extensions) based on requirements, etc.
588 588 """
589 589
590 590 # Map of requirements to list of extensions to load automatically when
591 591 # requirement is present.
592 592 autoextensions = {
593 593 b'largefiles': [b'largefiles'],
594 594 b'lfs': [b'lfs'],
595 595 }
596 596
597 597 for requirement, names in sorted(autoextensions.items()):
598 598 if requirement not in requirements:
599 599 continue
600 600
601 601 for name in names:
602 602 if not ui.hasconfig(b'extensions', name):
603 603 ui.setconfig(b'extensions', name, b'', source='autoload')
604 604
605 605 def gathersupportedrequirements(ui):
606 606 """Determine the complete set of recognized requirements."""
607 607 # Start with all requirements supported by this file.
608 608 supported = set(localrepository._basesupported)
609 609
610 610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
611 611 # relevant to this ui instance.
612 612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
613 613
614 614 for fn in featuresetupfuncs:
615 615 if fn.__module__ in modules:
616 616 fn(ui, supported)
617 617
618 618 # Add derived requirements from registered compression engines.
619 619 for name in util.compengines:
620 620 engine = util.compengines[name]
621 621 if engine.revlogheader():
622 622 supported.add(b'exp-compression-%s' % name)
623 623
624 624 return supported
625 625
626 626 def ensurerequirementsrecognized(requirements, supported):
627 627 """Validate that a set of local requirements is recognized.
628 628
629 629 Receives a set of requirements. Raises an ``error.RepoError`` if there
630 630 exists any requirement in that set that currently loaded code doesn't
631 631 recognize.
632 632
633 633 Returns a set of supported requirements.
634 634 """
635 635 missing = set()
636 636
637 637 for requirement in requirements:
638 638 if requirement in supported:
639 639 continue
640 640
641 641 if not requirement or not requirement[0:1].isalnum():
642 642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
643 643
644 644 missing.add(requirement)
645 645
646 646 if missing:
647 647 raise error.RequirementError(
648 648 _(b'repository requires features unknown to this Mercurial: %s') %
649 649 b' '.join(sorted(missing)),
650 650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
651 651 b'for more information'))
652 652
653 653 def ensurerequirementscompatible(ui, requirements):
654 654 """Validates that a set of recognized requirements is mutually compatible.
655 655
656 656 Some requirements may not be compatible with others or require
657 657 config options that aren't enabled. This function is called during
658 658 repository opening to ensure that the set of requirements needed
659 659 to open a repository is sane and compatible with config options.
660 660
661 661 Extensions can monkeypatch this function to perform additional
662 662 checking.
663 663
664 664 ``error.RepoError`` should be raised on failure.
665 665 """
666 666 if b'exp-sparse' in requirements and not sparse.enabled:
667 667 raise error.RepoError(_(b'repository is using sparse feature but '
668 668 b'sparse is not enabled; enable the '
669 669 b'"sparse" extensions to access'))
670 670
671 671 def makestore(requirements, path, vfstype):
672 672 """Construct a storage object for a repository."""
673 673 if b'store' in requirements:
674 674 if b'fncache' in requirements:
675 675 return storemod.fncachestore(path, vfstype,
676 676 b'dotencode' in requirements)
677 677
678 678 return storemod.encodedstore(path, vfstype)
679 679
680 680 return storemod.basicstore(path, vfstype)
681 681
682 682 def resolvestorevfsoptions(ui, requirements, features):
683 683 """Resolve the options to pass to the store vfs opener.
684 684
685 685 The returned dict is used to influence behavior of the storage layer.
686 686 """
687 687 options = {}
688 688
689 689 if b'treemanifest' in requirements:
690 690 options[b'treemanifest'] = True
691 691
692 692 # experimental config: format.manifestcachesize
693 693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
694 694 if manifestcachesize is not None:
695 695 options[b'manifestcachesize'] = manifestcachesize
696 696
697 697 # In the absence of another requirement superseding a revlog-related
698 698 # requirement, we have to assume the repo is using revlog version 0.
699 699 # This revlog format is super old and we don't bother trying to parse
700 700 # opener options for it because those options wouldn't do anything
701 701 # meaningful on such old repos.
702 702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
703 703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
704 704
705 705 return options
706 706
707 707 def resolverevlogstorevfsoptions(ui, requirements, features):
708 708 """Resolve opener options specific to revlogs."""
709 709
710 710 options = {}
711 711
712 712 if b'revlogv1' in requirements:
713 713 options[b'revlogv1'] = True
714 714 if REVLOGV2_REQUIREMENT in requirements:
715 715 options[b'revlogv2'] = True
716 716
717 717 if b'generaldelta' in requirements:
718 718 options[b'generaldelta'] = True
719 719
720 720 # experimental config: format.chunkcachesize
721 721 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
722 722 if chunkcachesize is not None:
723 723 options[b'chunkcachesize'] = chunkcachesize
724 724
725 725 deltabothparents = ui.configbool(b'storage',
726 726 b'revlog.optimize-delta-parent-choice')
727 727 options[b'deltabothparents'] = deltabothparents
728 728
729 729 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
730 730
731 731 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
732 732 if 0 <= chainspan:
733 733 options[b'maxdeltachainspan'] = chainspan
734 734
735 735 mmapindexthreshold = ui.configbytes(b'experimental',
736 736 b'mmapindexthreshold')
737 737 if mmapindexthreshold is not None:
738 738 options[b'mmapindexthreshold'] = mmapindexthreshold
739 739
740 740 withsparseread = ui.configbool(b'experimental', b'sparse-read')
741 741 srdensitythres = float(ui.config(b'experimental',
742 742 b'sparse-read.density-threshold'))
743 743 srmingapsize = ui.configbytes(b'experimental',
744 744 b'sparse-read.min-gap-size')
745 745 options[b'with-sparse-read'] = withsparseread
746 746 options[b'sparse-read-density-threshold'] = srdensitythres
747 747 options[b'sparse-read-min-gap-size'] = srmingapsize
748 748
749 749 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
750 750 options[b'sparse-revlog'] = sparserevlog
751 751 if sparserevlog:
752 752 options[b'generaldelta'] = True
753 753
754 754 maxchainlen = None
755 755 if sparserevlog:
756 756 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
757 757 # experimental config: format.maxchainlen
758 758 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
759 759 if maxchainlen is not None:
760 760 options[b'maxchainlen'] = maxchainlen
761 761
762 762 for r in requirements:
763 763 if r.startswith(b'exp-compression-'):
764 764 options[b'compengine'] = r[len(b'exp-compression-'):]
765 765
766 766 if repository.NARROW_REQUIREMENT in requirements:
767 767 options[b'enableellipsis'] = True
768 768
769 769 return options
770 770
771 771 def makemain(**kwargs):
772 772 """Produce a type conforming to ``ilocalrepositorymain``."""
773 773 return localrepository
774 774
775 775 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
776 776 class revlogfilestorage(object):
777 777 """File storage when using revlogs."""
778 778
779 779 def file(self, path):
780 780 if path[0] == b'/':
781 781 path = path[1:]
782 782
783 783 return filelog.filelog(self.svfs, path)
784 784
785 785 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
786 786 class revlognarrowfilestorage(object):
787 787 """File storage when using revlogs and narrow files."""
788 788
789 789 def file(self, path):
790 790 if path[0] == b'/':
791 791 path = path[1:]
792 792
793 793 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
794 794
795 795 def makefilestorage(requirements, features, **kwargs):
796 796 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
797 797 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
798 798
799 799 if repository.NARROW_REQUIREMENT in requirements:
800 800 return revlognarrowfilestorage
801 801 else:
802 802 return revlogfilestorage
803 803
804 804 # List of repository interfaces and factory functions for them. Each
805 805 # will be called in order during ``makelocalrepository()`` to iteratively
806 # derive the final type for a local repository instance.
806 # derive the final type for a local repository instance. We capture the
807 # function as a lambda so we don't hold a reference and the module-level
808 # functions can be wrapped.
807 809 REPO_INTERFACES = [
808 (repository.ilocalrepositorymain, makemain),
809 (repository.ilocalrepositoryfilestorage, makefilestorage),
810 (repository.ilocalrepositorymain, lambda: makemain),
811 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
810 812 ]
811 813
812 814 @interfaceutil.implementer(repository.ilocalrepositorymain)
813 815 class localrepository(object):
814 816 """Main class for representing local repositories.
815 817
816 818 All local repositories are instances of this class.
817 819
818 820 Constructed on its own, instances of this class are not usable as
819 821 repository objects. To obtain a usable repository object, call
820 822 ``hg.repository()``, ``localrepo.instance()``, or
821 823 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
822 824 ``instance()`` adds support for creating new repositories.
823 825 ``hg.repository()`` adds more extension integration, including calling
824 826 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
825 827 used.
826 828 """
827 829
828 830 # obsolete experimental requirements:
829 831 # - manifestv2: An experimental new manifest format that allowed
830 832 # for stem compression of long paths. Experiment ended up not
831 833 # being successful (repository sizes went up due to worse delta
832 834 # chains), and the code was deleted in 4.6.
833 835 supportedformats = {
834 836 'revlogv1',
835 837 'generaldelta',
836 838 'treemanifest',
837 839 REVLOGV2_REQUIREMENT,
838 840 SPARSEREVLOG_REQUIREMENT,
839 841 }
840 842 _basesupported = supportedformats | {
841 843 'store',
842 844 'fncache',
843 845 'shared',
844 846 'relshared',
845 847 'dotencode',
846 848 'exp-sparse',
847 849 'internal-phase'
848 850 }
849 851
850 852 # list of prefix for file which can be written without 'wlock'
851 853 # Extensions should extend this list when needed
852 854 _wlockfreeprefix = {
853 855 # We migh consider requiring 'wlock' for the next
854 856 # two, but pretty much all the existing code assume
855 857 # wlock is not needed so we keep them excluded for
856 858 # now.
857 859 'hgrc',
858 860 'requires',
859 861 # XXX cache is a complicatged business someone
860 862 # should investigate this in depth at some point
861 863 'cache/',
862 864 # XXX shouldn't be dirstate covered by the wlock?
863 865 'dirstate',
864 866 # XXX bisect was still a bit too messy at the time
865 867 # this changeset was introduced. Someone should fix
866 868 # the remainig bit and drop this line
867 869 'bisect.state',
868 870 }
869 871
870 872 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
871 873 supportedrequirements, sharedpath, store, cachevfs,
872 874 features, intents=None):
873 875 """Create a new local repository instance.
874 876
875 877 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
876 878 or ``localrepo.makelocalrepository()`` for obtaining a new repository
877 879 object.
878 880
879 881 Arguments:
880 882
881 883 baseui
882 884 ``ui.ui`` instance that ``ui`` argument was based off of.
883 885
884 886 ui
885 887 ``ui.ui`` instance for use by the repository.
886 888
887 889 origroot
888 890 ``bytes`` path to working directory root of this repository.
889 891
890 892 wdirvfs
891 893 ``vfs.vfs`` rooted at the working directory.
892 894
893 895 hgvfs
894 896 ``vfs.vfs`` rooted at .hg/
895 897
896 898 requirements
897 899 ``set`` of bytestrings representing repository opening requirements.
898 900
899 901 supportedrequirements
900 902 ``set`` of bytestrings representing repository requirements that we
901 903 know how to open. May be a supetset of ``requirements``.
902 904
903 905 sharedpath
904 906 ``bytes`` Defining path to storage base directory. Points to a
905 907 ``.hg/`` directory somewhere.
906 908
907 909 store
908 910 ``store.basicstore`` (or derived) instance providing access to
909 911 versioned storage.
910 912
911 913 cachevfs
912 914 ``vfs.vfs`` used for cache files.
913 915
914 916 features
915 917 ``set`` of bytestrings defining features/capabilities of this
916 918 instance.
917 919
918 920 intents
919 921 ``set`` of system strings indicating what this repo will be used
920 922 for.
921 923 """
922 924 self.baseui = baseui
923 925 self.ui = ui
924 926 self.origroot = origroot
925 927 # vfs rooted at working directory.
926 928 self.wvfs = wdirvfs
927 929 self.root = wdirvfs.base
928 930 # vfs rooted at .hg/. Used to access most non-store paths.
929 931 self.vfs = hgvfs
930 932 self.path = hgvfs.base
931 933 self.requirements = requirements
932 934 self.supported = supportedrequirements
933 935 self.sharedpath = sharedpath
934 936 self.store = store
935 937 self.cachevfs = cachevfs
936 938 self.features = features
937 939
938 940 self.filtername = None
939 941
940 942 if (self.ui.configbool('devel', 'all-warnings') or
941 943 self.ui.configbool('devel', 'check-locks')):
942 944 self.vfs.audit = self._getvfsward(self.vfs.audit)
943 945 # A list of callback to shape the phase if no data were found.
944 946 # Callback are in the form: func(repo, roots) --> processed root.
945 947 # This list it to be filled by extension during repo setup
946 948 self._phasedefaults = []
947 949
948 950 color.setup(self.ui)
949 951
950 952 self.spath = self.store.path
951 953 self.svfs = self.store.vfs
952 954 self.sjoin = self.store.join
953 955 if (self.ui.configbool('devel', 'all-warnings') or
954 956 self.ui.configbool('devel', 'check-locks')):
955 957 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
956 958 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
957 959 else: # standard vfs
958 960 self.svfs.audit = self._getsvfsward(self.svfs.audit)
959 961
960 962 self._dirstatevalidatewarned = False
961 963
962 964 self._branchcaches = {}
963 965 self._revbranchcache = None
964 966 self._filterpats = {}
965 967 self._datafilters = {}
966 968 self._transref = self._lockref = self._wlockref = None
967 969
968 970 # A cache for various files under .hg/ that tracks file changes,
969 971 # (used by the filecache decorator)
970 972 #
971 973 # Maps a property name to its util.filecacheentry
972 974 self._filecache = {}
973 975
974 976 # hold sets of revision to be filtered
975 977 # should be cleared when something might have changed the filter value:
976 978 # - new changesets,
977 979 # - phase change,
978 980 # - new obsolescence marker,
979 981 # - working directory parent change,
980 982 # - bookmark changes
981 983 self.filteredrevcache = {}
982 984
983 985 # post-dirstate-status hooks
984 986 self._postdsstatus = []
985 987
986 988 # generic mapping between names and nodes
987 989 self.names = namespaces.namespaces()
988 990
989 991 # Key to signature value.
990 992 self._sparsesignaturecache = {}
991 993 # Signature to cached matcher instance.
992 994 self._sparsematchercache = {}
993 995
994 996 def _getvfsward(self, origfunc):
995 997 """build a ward for self.vfs"""
996 998 rref = weakref.ref(self)
997 999 def checkvfs(path, mode=None):
998 1000 ret = origfunc(path, mode=mode)
999 1001 repo = rref()
1000 1002 if (repo is None
1001 1003 or not util.safehasattr(repo, '_wlockref')
1002 1004 or not util.safehasattr(repo, '_lockref')):
1003 1005 return
1004 1006 if mode in (None, 'r', 'rb'):
1005 1007 return
1006 1008 if path.startswith(repo.path):
1007 1009 # truncate name relative to the repository (.hg)
1008 1010 path = path[len(repo.path) + 1:]
1009 1011 if path.startswith('cache/'):
1010 1012 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1011 1013 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1012 1014 if path.startswith('journal.'):
1013 1015 # journal is covered by 'lock'
1014 1016 if repo._currentlock(repo._lockref) is None:
1015 1017 repo.ui.develwarn('write with no lock: "%s"' % path,
1016 1018 stacklevel=2, config='check-locks')
1017 1019 elif repo._currentlock(repo._wlockref) is None:
1018 1020 # rest of vfs files are covered by 'wlock'
1019 1021 #
1020 1022 # exclude special files
1021 1023 for prefix in self._wlockfreeprefix:
1022 1024 if path.startswith(prefix):
1023 1025 return
1024 1026 repo.ui.develwarn('write with no wlock: "%s"' % path,
1025 1027 stacklevel=2, config='check-locks')
1026 1028 return ret
1027 1029 return checkvfs
1028 1030
1029 1031 def _getsvfsward(self, origfunc):
1030 1032 """build a ward for self.svfs"""
1031 1033 rref = weakref.ref(self)
1032 1034 def checksvfs(path, mode=None):
1033 1035 ret = origfunc(path, mode=mode)
1034 1036 repo = rref()
1035 1037 if repo is None or not util.safehasattr(repo, '_lockref'):
1036 1038 return
1037 1039 if mode in (None, 'r', 'rb'):
1038 1040 return
1039 1041 if path.startswith(repo.sharedpath):
1040 1042 # truncate name relative to the repository (.hg)
1041 1043 path = path[len(repo.sharedpath) + 1:]
1042 1044 if repo._currentlock(repo._lockref) is None:
1043 1045 repo.ui.develwarn('write with no lock: "%s"' % path,
1044 1046 stacklevel=3)
1045 1047 return ret
1046 1048 return checksvfs
1047 1049
1048 1050 def close(self):
1049 1051 self._writecaches()
1050 1052
1051 1053 def _writecaches(self):
1052 1054 if self._revbranchcache:
1053 1055 self._revbranchcache.write()
1054 1056
1055 1057 def _restrictcapabilities(self, caps):
1056 1058 if self.ui.configbool('experimental', 'bundle2-advertise'):
1057 1059 caps = set(caps)
1058 1060 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1059 1061 role='client'))
1060 1062 caps.add('bundle2=' + urlreq.quote(capsblob))
1061 1063 return caps
1062 1064
1063 1065 def _writerequirements(self):
1064 1066 scmutil.writerequires(self.vfs, self.requirements)
1065 1067
1066 1068 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1067 1069 # self -> auditor -> self._checknested -> self
1068 1070
1069 1071 @property
1070 1072 def auditor(self):
1071 1073 # This is only used by context.workingctx.match in order to
1072 1074 # detect files in subrepos.
1073 1075 return pathutil.pathauditor(self.root, callback=self._checknested)
1074 1076
1075 1077 @property
1076 1078 def nofsauditor(self):
1077 1079 # This is only used by context.basectx.match in order to detect
1078 1080 # files in subrepos.
1079 1081 return pathutil.pathauditor(self.root, callback=self._checknested,
1080 1082 realfs=False, cached=True)
1081 1083
1082 1084 def _checknested(self, path):
1083 1085 """Determine if path is a legal nested repository."""
1084 1086 if not path.startswith(self.root):
1085 1087 return False
1086 1088 subpath = path[len(self.root) + 1:]
1087 1089 normsubpath = util.pconvert(subpath)
1088 1090
1089 1091 # XXX: Checking against the current working copy is wrong in
1090 1092 # the sense that it can reject things like
1091 1093 #
1092 1094 # $ hg cat -r 10 sub/x.txt
1093 1095 #
1094 1096 # if sub/ is no longer a subrepository in the working copy
1095 1097 # parent revision.
1096 1098 #
1097 1099 # However, it can of course also allow things that would have
1098 1100 # been rejected before, such as the above cat command if sub/
1099 1101 # is a subrepository now, but was a normal directory before.
1100 1102 # The old path auditor would have rejected by mistake since it
1101 1103 # panics when it sees sub/.hg/.
1102 1104 #
1103 1105 # All in all, checking against the working copy seems sensible
1104 1106 # since we want to prevent access to nested repositories on
1105 1107 # the filesystem *now*.
1106 1108 ctx = self[None]
1107 1109 parts = util.splitpath(subpath)
1108 1110 while parts:
1109 1111 prefix = '/'.join(parts)
1110 1112 if prefix in ctx.substate:
1111 1113 if prefix == normsubpath:
1112 1114 return True
1113 1115 else:
1114 1116 sub = ctx.sub(prefix)
1115 1117 return sub.checknested(subpath[len(prefix) + 1:])
1116 1118 else:
1117 1119 parts.pop()
1118 1120 return False
1119 1121
1120 1122 def peer(self):
1121 1123 return localpeer(self) # not cached to avoid reference cycle
1122 1124
1123 1125 def unfiltered(self):
1124 1126 """Return unfiltered version of the repository
1125 1127
1126 1128 Intended to be overwritten by filtered repo."""
1127 1129 return self
1128 1130
1129 1131 def filtered(self, name, visibilityexceptions=None):
1130 1132 """Return a filtered version of a repository"""
1131 1133 cls = repoview.newtype(self.unfiltered().__class__)
1132 1134 return cls(self, name, visibilityexceptions)
1133 1135
1134 1136 @repofilecache('bookmarks', 'bookmarks.current')
1135 1137 def _bookmarks(self):
1136 1138 return bookmarks.bmstore(self)
1137 1139
1138 1140 @property
1139 1141 def _activebookmark(self):
1140 1142 return self._bookmarks.active
1141 1143
1142 1144 # _phasesets depend on changelog. what we need is to call
1143 1145 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1144 1146 # can't be easily expressed in filecache mechanism.
1145 1147 @storecache('phaseroots', '00changelog.i')
1146 1148 def _phasecache(self):
1147 1149 return phases.phasecache(self, self._phasedefaults)
1148 1150
1149 1151 @storecache('obsstore')
1150 1152 def obsstore(self):
1151 1153 return obsolete.makestore(self.ui, self)
1152 1154
1153 1155 @storecache('00changelog.i')
1154 1156 def changelog(self):
1155 1157 return changelog.changelog(self.svfs,
1156 1158 trypending=txnutil.mayhavepending(self.root))
1157 1159
1158 1160 @storecache('00manifest.i')
1159 1161 def manifestlog(self):
1160 1162 rootstore = manifest.manifestrevlog(self.svfs)
1161 1163 return manifest.manifestlog(self.svfs, self, rootstore)
1162 1164
1163 1165 @repofilecache('dirstate')
1164 1166 def dirstate(self):
1165 1167 return self._makedirstate()
1166 1168
1167 1169 def _makedirstate(self):
1168 1170 """Extension point for wrapping the dirstate per-repo."""
1169 1171 sparsematchfn = lambda: sparse.matcher(self)
1170 1172
1171 1173 return dirstate.dirstate(self.vfs, self.ui, self.root,
1172 1174 self._dirstatevalidate, sparsematchfn)
1173 1175
1174 1176 def _dirstatevalidate(self, node):
1175 1177 try:
1176 1178 self.changelog.rev(node)
1177 1179 return node
1178 1180 except error.LookupError:
1179 1181 if not self._dirstatevalidatewarned:
1180 1182 self._dirstatevalidatewarned = True
1181 1183 self.ui.warn(_("warning: ignoring unknown"
1182 1184 " working parent %s!\n") % short(node))
1183 1185 return nullid
1184 1186
1185 1187 @storecache(narrowspec.FILENAME)
1186 1188 def narrowpats(self):
1187 1189 """matcher patterns for this repository's narrowspec
1188 1190
1189 1191 A tuple of (includes, excludes).
1190 1192 """
1191 1193 return narrowspec.load(self)
1192 1194
1193 1195 @storecache(narrowspec.FILENAME)
1194 1196 def _narrowmatch(self):
1195 1197 if repository.NARROW_REQUIREMENT not in self.requirements:
1196 1198 return matchmod.always(self.root, '')
1197 1199 include, exclude = self.narrowpats
1198 1200 return narrowspec.match(self.root, include=include, exclude=exclude)
1199 1201
1200 1202 # TODO(martinvonz): make this property-like instead?
1201 1203 def narrowmatch(self):
1202 1204 return self._narrowmatch
1203 1205
1204 1206 def setnarrowpats(self, newincludes, newexcludes):
1205 1207 narrowspec.save(self, newincludes, newexcludes)
1206 1208 self.invalidate(clearfilecache=True)
1207 1209
1208 1210 def __getitem__(self, changeid):
1209 1211 if changeid is None:
1210 1212 return context.workingctx(self)
1211 1213 if isinstance(changeid, context.basectx):
1212 1214 return changeid
1213 1215 if isinstance(changeid, slice):
1214 1216 # wdirrev isn't contiguous so the slice shouldn't include it
1215 1217 return [self[i]
1216 1218 for i in pycompat.xrange(*changeid.indices(len(self)))
1217 1219 if i not in self.changelog.filteredrevs]
1218 1220 try:
1219 1221 if isinstance(changeid, int):
1220 1222 node = self.changelog.node(changeid)
1221 1223 rev = changeid
1222 1224 return context.changectx(self, rev, node)
1223 1225 elif changeid == 'null':
1224 1226 node = nullid
1225 1227 rev = nullrev
1226 1228 return context.changectx(self, rev, node)
1227 1229 elif changeid == 'tip':
1228 1230 node = self.changelog.tip()
1229 1231 rev = self.changelog.rev(node)
1230 1232 return context.changectx(self, rev, node)
1231 1233 elif changeid == '.':
1232 1234 # this is a hack to delay/avoid loading obsmarkers
1233 1235 # when we know that '.' won't be hidden
1234 1236 node = self.dirstate.p1()
1235 1237 rev = self.unfiltered().changelog.rev(node)
1236 1238 return context.changectx(self, rev, node)
1237 1239 elif len(changeid) == 20:
1238 1240 try:
1239 1241 node = changeid
1240 1242 rev = self.changelog.rev(changeid)
1241 1243 return context.changectx(self, rev, node)
1242 1244 except error.FilteredLookupError:
1243 1245 changeid = hex(changeid) # for the error message
1244 1246 raise
1245 1247 except LookupError:
1246 1248 # check if it might have come from damaged dirstate
1247 1249 #
1248 1250 # XXX we could avoid the unfiltered if we had a recognizable
1249 1251 # exception for filtered changeset access
1250 1252 if (self.local()
1251 1253 and changeid in self.unfiltered().dirstate.parents()):
1252 1254 msg = _("working directory has unknown parent '%s'!")
1253 1255 raise error.Abort(msg % short(changeid))
1254 1256 changeid = hex(changeid) # for the error message
1255 1257
1256 1258 elif len(changeid) == 40:
1257 1259 try:
1258 1260 node = bin(changeid)
1259 1261 rev = self.changelog.rev(node)
1260 1262 return context.changectx(self, rev, node)
1261 1263 except error.FilteredLookupError:
1262 1264 raise
1263 1265 except LookupError:
1264 1266 pass
1265 1267 else:
1266 1268 raise error.ProgrammingError(
1267 1269 "unsupported changeid '%s' of type %s" %
1268 1270 (changeid, type(changeid)))
1269 1271
1270 1272 except (error.FilteredIndexError, error.FilteredLookupError):
1271 1273 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1272 1274 % pycompat.bytestr(changeid))
1273 1275 except IndexError:
1274 1276 pass
1275 1277 except error.WdirUnsupported:
1276 1278 return context.workingctx(self)
1277 1279 raise error.RepoLookupError(
1278 1280 _("unknown revision '%s'") % changeid)
1279 1281
1280 1282 def __contains__(self, changeid):
1281 1283 """True if the given changeid exists
1282 1284
1283 1285 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1284 1286 specified.
1285 1287 """
1286 1288 try:
1287 1289 self[changeid]
1288 1290 return True
1289 1291 except error.RepoLookupError:
1290 1292 return False
1291 1293
1292 1294 def __nonzero__(self):
1293 1295 return True
1294 1296
1295 1297 __bool__ = __nonzero__
1296 1298
1297 1299 def __len__(self):
1298 1300 # no need to pay the cost of repoview.changelog
1299 1301 unfi = self.unfiltered()
1300 1302 return len(unfi.changelog)
1301 1303
1302 1304 def __iter__(self):
1303 1305 return iter(self.changelog)
1304 1306
1305 1307 def revs(self, expr, *args):
1306 1308 '''Find revisions matching a revset.
1307 1309
1308 1310 The revset is specified as a string ``expr`` that may contain
1309 1311 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1310 1312
1311 1313 Revset aliases from the configuration are not expanded. To expand
1312 1314 user aliases, consider calling ``scmutil.revrange()`` or
1313 1315 ``repo.anyrevs([expr], user=True)``.
1314 1316
1315 1317 Returns a revset.abstractsmartset, which is a list-like interface
1316 1318 that contains integer revisions.
1317 1319 '''
1318 1320 expr = revsetlang.formatspec(expr, *args)
1319 1321 m = revset.match(None, expr)
1320 1322 return m(self)
1321 1323
1322 1324 def set(self, expr, *args):
1323 1325 '''Find revisions matching a revset and emit changectx instances.
1324 1326
1325 1327 This is a convenience wrapper around ``revs()`` that iterates the
1326 1328 result and is a generator of changectx instances.
1327 1329
1328 1330 Revset aliases from the configuration are not expanded. To expand
1329 1331 user aliases, consider calling ``scmutil.revrange()``.
1330 1332 '''
1331 1333 for r in self.revs(expr, *args):
1332 1334 yield self[r]
1333 1335
1334 1336 def anyrevs(self, specs, user=False, localalias=None):
1335 1337 '''Find revisions matching one of the given revsets.
1336 1338
1337 1339 Revset aliases from the configuration are not expanded by default. To
1338 1340 expand user aliases, specify ``user=True``. To provide some local
1339 1341 definitions overriding user aliases, set ``localalias`` to
1340 1342 ``{name: definitionstring}``.
1341 1343 '''
1342 1344 if user:
1343 1345 m = revset.matchany(self.ui, specs,
1344 1346 lookup=revset.lookupfn(self),
1345 1347 localalias=localalias)
1346 1348 else:
1347 1349 m = revset.matchany(None, specs, localalias=localalias)
1348 1350 return m(self)
1349 1351
1350 1352 def url(self):
1351 1353 return 'file:' + self.root
1352 1354
1353 1355 def hook(self, name, throw=False, **args):
1354 1356 """Call a hook, passing this repo instance.
1355 1357
1356 1358 This a convenience method to aid invoking hooks. Extensions likely
1357 1359 won't call this unless they have registered a custom hook or are
1358 1360 replacing code that is expected to call a hook.
1359 1361 """
1360 1362 return hook.hook(self.ui, self, name, throw, **args)
1361 1363
1362 1364 @filteredpropertycache
1363 1365 def _tagscache(self):
1364 1366 '''Returns a tagscache object that contains various tags related
1365 1367 caches.'''
1366 1368
1367 1369 # This simplifies its cache management by having one decorated
1368 1370 # function (this one) and the rest simply fetch things from it.
1369 1371 class tagscache(object):
1370 1372 def __init__(self):
1371 1373 # These two define the set of tags for this repository. tags
1372 1374 # maps tag name to node; tagtypes maps tag name to 'global' or
1373 1375 # 'local'. (Global tags are defined by .hgtags across all
1374 1376 # heads, and local tags are defined in .hg/localtags.)
1375 1377 # They constitute the in-memory cache of tags.
1376 1378 self.tags = self.tagtypes = None
1377 1379
1378 1380 self.nodetagscache = self.tagslist = None
1379 1381
1380 1382 cache = tagscache()
1381 1383 cache.tags, cache.tagtypes = self._findtags()
1382 1384
1383 1385 return cache
1384 1386
1385 1387 def tags(self):
1386 1388 '''return a mapping of tag to node'''
1387 1389 t = {}
1388 1390 if self.changelog.filteredrevs:
1389 1391 tags, tt = self._findtags()
1390 1392 else:
1391 1393 tags = self._tagscache.tags
1392 1394 for k, v in tags.iteritems():
1393 1395 try:
1394 1396 # ignore tags to unknown nodes
1395 1397 self.changelog.rev(v)
1396 1398 t[k] = v
1397 1399 except (error.LookupError, ValueError):
1398 1400 pass
1399 1401 return t
1400 1402
1401 1403 def _findtags(self):
1402 1404 '''Do the hard work of finding tags. Return a pair of dicts
1403 1405 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1404 1406 maps tag name to a string like \'global\' or \'local\'.
1405 1407 Subclasses or extensions are free to add their own tags, but
1406 1408 should be aware that the returned dicts will be retained for the
1407 1409 duration of the localrepo object.'''
1408 1410
1409 1411 # XXX what tagtype should subclasses/extensions use? Currently
1410 1412 # mq and bookmarks add tags, but do not set the tagtype at all.
1411 1413 # Should each extension invent its own tag type? Should there
1412 1414 # be one tagtype for all such "virtual" tags? Or is the status
1413 1415 # quo fine?
1414 1416
1415 1417
1416 1418 # map tag name to (node, hist)
1417 1419 alltags = tagsmod.findglobaltags(self.ui, self)
1418 1420 # map tag name to tag type
1419 1421 tagtypes = dict((tag, 'global') for tag in alltags)
1420 1422
1421 1423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1422 1424
1423 1425 # Build the return dicts. Have to re-encode tag names because
1424 1426 # the tags module always uses UTF-8 (in order not to lose info
1425 1427 # writing to the cache), but the rest of Mercurial wants them in
1426 1428 # local encoding.
1427 1429 tags = {}
1428 1430 for (name, (node, hist)) in alltags.iteritems():
1429 1431 if node != nullid:
1430 1432 tags[encoding.tolocal(name)] = node
1431 1433 tags['tip'] = self.changelog.tip()
1432 1434 tagtypes = dict([(encoding.tolocal(name), value)
1433 1435 for (name, value) in tagtypes.iteritems()])
1434 1436 return (tags, tagtypes)
1435 1437
1436 1438 def tagtype(self, tagname):
1437 1439 '''
1438 1440 return the type of the given tag. result can be:
1439 1441
1440 1442 'local' : a local tag
1441 1443 'global' : a global tag
1442 1444 None : tag does not exist
1443 1445 '''
1444 1446
1445 1447 return self._tagscache.tagtypes.get(tagname)
1446 1448
1447 1449 def tagslist(self):
1448 1450 '''return a list of tags ordered by revision'''
1449 1451 if not self._tagscache.tagslist:
1450 1452 l = []
1451 1453 for t, n in self.tags().iteritems():
1452 1454 l.append((self.changelog.rev(n), t, n))
1453 1455 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1454 1456
1455 1457 return self._tagscache.tagslist
1456 1458
1457 1459 def nodetags(self, node):
1458 1460 '''return the tags associated with a node'''
1459 1461 if not self._tagscache.nodetagscache:
1460 1462 nodetagscache = {}
1461 1463 for t, n in self._tagscache.tags.iteritems():
1462 1464 nodetagscache.setdefault(n, []).append(t)
1463 1465 for tags in nodetagscache.itervalues():
1464 1466 tags.sort()
1465 1467 self._tagscache.nodetagscache = nodetagscache
1466 1468 return self._tagscache.nodetagscache.get(node, [])
1467 1469
1468 1470 def nodebookmarks(self, node):
1469 1471 """return the list of bookmarks pointing to the specified node"""
1470 1472 return self._bookmarks.names(node)
1471 1473
1472 1474 def branchmap(self):
1473 1475 '''returns a dictionary {branch: [branchheads]} with branchheads
1474 1476 ordered by increasing revision number'''
1475 1477 branchmap.updatecache(self)
1476 1478 return self._branchcaches[self.filtername]
1477 1479
1478 1480 @unfilteredmethod
1479 1481 def revbranchcache(self):
1480 1482 if not self._revbranchcache:
1481 1483 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1482 1484 return self._revbranchcache
1483 1485
1484 1486 def branchtip(self, branch, ignoremissing=False):
1485 1487 '''return the tip node for a given branch
1486 1488
1487 1489 If ignoremissing is True, then this method will not raise an error.
1488 1490 This is helpful for callers that only expect None for a missing branch
1489 1491 (e.g. namespace).
1490 1492
1491 1493 '''
1492 1494 try:
1493 1495 return self.branchmap().branchtip(branch)
1494 1496 except KeyError:
1495 1497 if not ignoremissing:
1496 1498 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1497 1499 else:
1498 1500 pass
1499 1501
1500 1502 def lookup(self, key):
1501 1503 return scmutil.revsymbol(self, key).node()
1502 1504
1503 1505 def lookupbranch(self, key):
1504 1506 if key in self.branchmap():
1505 1507 return key
1506 1508
1507 1509 return scmutil.revsymbol(self, key).branch()
1508 1510
1509 1511 def known(self, nodes):
1510 1512 cl = self.changelog
1511 1513 nm = cl.nodemap
1512 1514 filtered = cl.filteredrevs
1513 1515 result = []
1514 1516 for n in nodes:
1515 1517 r = nm.get(n)
1516 1518 resp = not (r is None or r in filtered)
1517 1519 result.append(resp)
1518 1520 return result
1519 1521
1520 1522 def local(self):
1521 1523 return self
1522 1524
1523 1525 def publishing(self):
1524 1526 # it's safe (and desirable) to trust the publish flag unconditionally
1525 1527 # so that we don't finalize changes shared between users via ssh or nfs
1526 1528 return self.ui.configbool('phases', 'publish', untrusted=True)
1527 1529
1528 1530 def cancopy(self):
1529 1531 # so statichttprepo's override of local() works
1530 1532 if not self.local():
1531 1533 return False
1532 1534 if not self.publishing():
1533 1535 return True
1534 1536 # if publishing we can't copy if there is filtered content
1535 1537 return not self.filtered('visible').changelog.filteredrevs
1536 1538
1537 1539 def shared(self):
1538 1540 '''the type of shared repository (None if not shared)'''
1539 1541 if self.sharedpath != self.path:
1540 1542 return 'store'
1541 1543 return None
1542 1544
1543 1545 def wjoin(self, f, *insidef):
1544 1546 return self.vfs.reljoin(self.root, f, *insidef)
1545 1547
1546 1548 def setparents(self, p1, p2=nullid):
1547 1549 with self.dirstate.parentchange():
1548 1550 copies = self.dirstate.setparents(p1, p2)
1549 1551 pctx = self[p1]
1550 1552 if copies:
1551 1553 # Adjust copy records, the dirstate cannot do it, it
1552 1554 # requires access to parents manifests. Preserve them
1553 1555 # only for entries added to first parent.
1554 1556 for f in copies:
1555 1557 if f not in pctx and copies[f] in pctx:
1556 1558 self.dirstate.copy(copies[f], f)
1557 1559 if p2 == nullid:
1558 1560 for f, s in sorted(self.dirstate.copies().items()):
1559 1561 if f not in pctx and s not in pctx:
1560 1562 self.dirstate.copy(None, f)
1561 1563
1562 1564 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1563 1565 """changeid can be a changeset revision, node, or tag.
1564 1566 fileid can be a file revision or node."""
1565 1567 return context.filectx(self, path, changeid, fileid,
1566 1568 changectx=changectx)
1567 1569
1568 1570 def getcwd(self):
1569 1571 return self.dirstate.getcwd()
1570 1572
1571 1573 def pathto(self, f, cwd=None):
1572 1574 return self.dirstate.pathto(f, cwd)
1573 1575
1574 1576 def _loadfilter(self, filter):
1575 1577 if filter not in self._filterpats:
1576 1578 l = []
1577 1579 for pat, cmd in self.ui.configitems(filter):
1578 1580 if cmd == '!':
1579 1581 continue
1580 1582 mf = matchmod.match(self.root, '', [pat])
1581 1583 fn = None
1582 1584 params = cmd
1583 1585 for name, filterfn in self._datafilters.iteritems():
1584 1586 if cmd.startswith(name):
1585 1587 fn = filterfn
1586 1588 params = cmd[len(name):].lstrip()
1587 1589 break
1588 1590 if not fn:
1589 1591 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1590 1592 # Wrap old filters not supporting keyword arguments
1591 1593 if not pycompat.getargspec(fn)[2]:
1592 1594 oldfn = fn
1593 1595 fn = lambda s, c, **kwargs: oldfn(s, c)
1594 1596 l.append((mf, fn, params))
1595 1597 self._filterpats[filter] = l
1596 1598 return self._filterpats[filter]
1597 1599
1598 1600 def _filter(self, filterpats, filename, data):
1599 1601 for mf, fn, cmd in filterpats:
1600 1602 if mf(filename):
1601 1603 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1602 1604 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1603 1605 break
1604 1606
1605 1607 return data
1606 1608
1607 1609 @unfilteredpropertycache
1608 1610 def _encodefilterpats(self):
1609 1611 return self._loadfilter('encode')
1610 1612
1611 1613 @unfilteredpropertycache
1612 1614 def _decodefilterpats(self):
1613 1615 return self._loadfilter('decode')
1614 1616
1615 1617 def adddatafilter(self, name, filter):
1616 1618 self._datafilters[name] = filter
1617 1619
1618 1620 def wread(self, filename):
1619 1621 if self.wvfs.islink(filename):
1620 1622 data = self.wvfs.readlink(filename)
1621 1623 else:
1622 1624 data = self.wvfs.read(filename)
1623 1625 return self._filter(self._encodefilterpats, filename, data)
1624 1626
1625 1627 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1626 1628 """write ``data`` into ``filename`` in the working directory
1627 1629
1628 1630 This returns length of written (maybe decoded) data.
1629 1631 """
1630 1632 data = self._filter(self._decodefilterpats, filename, data)
1631 1633 if 'l' in flags:
1632 1634 self.wvfs.symlink(data, filename)
1633 1635 else:
1634 1636 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1635 1637 **kwargs)
1636 1638 if 'x' in flags:
1637 1639 self.wvfs.setflags(filename, False, True)
1638 1640 else:
1639 1641 self.wvfs.setflags(filename, False, False)
1640 1642 return len(data)
1641 1643
1642 1644 def wwritedata(self, filename, data):
1643 1645 return self._filter(self._decodefilterpats, filename, data)
1644 1646
1645 1647 def currenttransaction(self):
1646 1648 """return the current transaction or None if non exists"""
1647 1649 if self._transref:
1648 1650 tr = self._transref()
1649 1651 else:
1650 1652 tr = None
1651 1653
1652 1654 if tr and tr.running():
1653 1655 return tr
1654 1656 return None
1655 1657
1656 1658 def transaction(self, desc, report=None):
1657 1659 if (self.ui.configbool('devel', 'all-warnings')
1658 1660 or self.ui.configbool('devel', 'check-locks')):
1659 1661 if self._currentlock(self._lockref) is None:
1660 1662 raise error.ProgrammingError('transaction requires locking')
1661 1663 tr = self.currenttransaction()
1662 1664 if tr is not None:
1663 1665 return tr.nest(name=desc)
1664 1666
1665 1667 # abort here if the journal already exists
1666 1668 if self.svfs.exists("journal"):
1667 1669 raise error.RepoError(
1668 1670 _("abandoned transaction found"),
1669 1671 hint=_("run 'hg recover' to clean up transaction"))
1670 1672
1671 1673 idbase = "%.40f#%f" % (random.random(), time.time())
1672 1674 ha = hex(hashlib.sha1(idbase).digest())
1673 1675 txnid = 'TXN:' + ha
1674 1676 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1675 1677
1676 1678 self._writejournal(desc)
1677 1679 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1678 1680 if report:
1679 1681 rp = report
1680 1682 else:
1681 1683 rp = self.ui.warn
1682 1684 vfsmap = {'plain': self.vfs} # root of .hg/
1683 1685 # we must avoid cyclic reference between repo and transaction.
1684 1686 reporef = weakref.ref(self)
1685 1687 # Code to track tag movement
1686 1688 #
1687 1689 # Since tags are all handled as file content, it is actually quite hard
1688 1690 # to track these movement from a code perspective. So we fallback to a
1689 1691 # tracking at the repository level. One could envision to track changes
1690 1692 # to the '.hgtags' file through changegroup apply but that fails to
1691 1693 # cope with case where transaction expose new heads without changegroup
1692 1694 # being involved (eg: phase movement).
1693 1695 #
1694 1696 # For now, We gate the feature behind a flag since this likely comes
1695 1697 # with performance impacts. The current code run more often than needed
1696 1698 # and do not use caches as much as it could. The current focus is on
1697 1699 # the behavior of the feature so we disable it by default. The flag
1698 1700 # will be removed when we are happy with the performance impact.
1699 1701 #
1700 1702 # Once this feature is no longer experimental move the following
1701 1703 # documentation to the appropriate help section:
1702 1704 #
1703 1705 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1704 1706 # tags (new or changed or deleted tags). In addition the details of
1705 1707 # these changes are made available in a file at:
1706 1708 # ``REPOROOT/.hg/changes/tags.changes``.
1707 1709 # Make sure you check for HG_TAG_MOVED before reading that file as it
1708 1710 # might exist from a previous transaction even if no tag were touched
1709 1711 # in this one. Changes are recorded in a line base format::
1710 1712 #
1711 1713 # <action> <hex-node> <tag-name>\n
1712 1714 #
1713 1715 # Actions are defined as follow:
1714 1716 # "-R": tag is removed,
1715 1717 # "+A": tag is added,
1716 1718 # "-M": tag is moved (old value),
1717 1719 # "+M": tag is moved (new value),
1718 1720 tracktags = lambda x: None
1719 1721 # experimental config: experimental.hook-track-tags
1720 1722 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1721 1723 if desc != 'strip' and shouldtracktags:
1722 1724 oldheads = self.changelog.headrevs()
1723 1725 def tracktags(tr2):
1724 1726 repo = reporef()
1725 1727 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1726 1728 newheads = repo.changelog.headrevs()
1727 1729 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1728 1730 # notes: we compare lists here.
1729 1731 # As we do it only once buiding set would not be cheaper
1730 1732 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1731 1733 if changes:
1732 1734 tr2.hookargs['tag_moved'] = '1'
1733 1735 with repo.vfs('changes/tags.changes', 'w',
1734 1736 atomictemp=True) as changesfile:
1735 1737 # note: we do not register the file to the transaction
1736 1738 # because we needs it to still exist on the transaction
1737 1739 # is close (for txnclose hooks)
1738 1740 tagsmod.writediff(changesfile, changes)
1739 1741 def validate(tr2):
1740 1742 """will run pre-closing hooks"""
1741 1743 # XXX the transaction API is a bit lacking here so we take a hacky
1742 1744 # path for now
1743 1745 #
1744 1746 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1745 1747 # dict is copied before these run. In addition we needs the data
1746 1748 # available to in memory hooks too.
1747 1749 #
1748 1750 # Moreover, we also need to make sure this runs before txnclose
1749 1751 # hooks and there is no "pending" mechanism that would execute
1750 1752 # logic only if hooks are about to run.
1751 1753 #
1752 1754 # Fixing this limitation of the transaction is also needed to track
1753 1755 # other families of changes (bookmarks, phases, obsolescence).
1754 1756 #
1755 1757 # This will have to be fixed before we remove the experimental
1756 1758 # gating.
1757 1759 tracktags(tr2)
1758 1760 repo = reporef()
1759 1761 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1760 1762 scmutil.enforcesinglehead(repo, tr2, desc)
1761 1763 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1762 1764 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1763 1765 args = tr.hookargs.copy()
1764 1766 args.update(bookmarks.preparehookargs(name, old, new))
1765 1767 repo.hook('pretxnclose-bookmark', throw=True,
1766 1768 txnname=desc,
1767 1769 **pycompat.strkwargs(args))
1768 1770 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1769 1771 cl = repo.unfiltered().changelog
1770 1772 for rev, (old, new) in tr.changes['phases'].items():
1771 1773 args = tr.hookargs.copy()
1772 1774 node = hex(cl.node(rev))
1773 1775 args.update(phases.preparehookargs(node, old, new))
1774 1776 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1775 1777 **pycompat.strkwargs(args))
1776 1778
1777 1779 repo.hook('pretxnclose', throw=True,
1778 1780 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1779 1781 def releasefn(tr, success):
1780 1782 repo = reporef()
1781 1783 if success:
1782 1784 # this should be explicitly invoked here, because
1783 1785 # in-memory changes aren't written out at closing
1784 1786 # transaction, if tr.addfilegenerator (via
1785 1787 # dirstate.write or so) isn't invoked while
1786 1788 # transaction running
1787 1789 repo.dirstate.write(None)
1788 1790 else:
1789 1791 # discard all changes (including ones already written
1790 1792 # out) in this transaction
1791 1793 narrowspec.restorebackup(self, 'journal.narrowspec')
1792 1794 repo.dirstate.restorebackup(None, 'journal.dirstate')
1793 1795
1794 1796 repo.invalidate(clearfilecache=True)
1795 1797
1796 1798 tr = transaction.transaction(rp, self.svfs, vfsmap,
1797 1799 "journal",
1798 1800 "undo",
1799 1801 aftertrans(renames),
1800 1802 self.store.createmode,
1801 1803 validator=validate,
1802 1804 releasefn=releasefn,
1803 1805 checkambigfiles=_cachedfiles,
1804 1806 name=desc)
1805 1807 tr.changes['origrepolen'] = len(self)
1806 1808 tr.changes['obsmarkers'] = set()
1807 1809 tr.changes['phases'] = {}
1808 1810 tr.changes['bookmarks'] = {}
1809 1811
1810 1812 tr.hookargs['txnid'] = txnid
1811 1813 # note: writing the fncache only during finalize mean that the file is
1812 1814 # outdated when running hooks. As fncache is used for streaming clone,
1813 1815 # this is not expected to break anything that happen during the hooks.
1814 1816 tr.addfinalize('flush-fncache', self.store.write)
1815 1817 def txnclosehook(tr2):
1816 1818 """To be run if transaction is successful, will schedule a hook run
1817 1819 """
1818 1820 # Don't reference tr2 in hook() so we don't hold a reference.
1819 1821 # This reduces memory consumption when there are multiple
1820 1822 # transactions per lock. This can likely go away if issue5045
1821 1823 # fixes the function accumulation.
1822 1824 hookargs = tr2.hookargs
1823 1825
1824 1826 def hookfunc():
1825 1827 repo = reporef()
1826 1828 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1827 1829 bmchanges = sorted(tr.changes['bookmarks'].items())
1828 1830 for name, (old, new) in bmchanges:
1829 1831 args = tr.hookargs.copy()
1830 1832 args.update(bookmarks.preparehookargs(name, old, new))
1831 1833 repo.hook('txnclose-bookmark', throw=False,
1832 1834 txnname=desc, **pycompat.strkwargs(args))
1833 1835
1834 1836 if hook.hashook(repo.ui, 'txnclose-phase'):
1835 1837 cl = repo.unfiltered().changelog
1836 1838 phasemv = sorted(tr.changes['phases'].items())
1837 1839 for rev, (old, new) in phasemv:
1838 1840 args = tr.hookargs.copy()
1839 1841 node = hex(cl.node(rev))
1840 1842 args.update(phases.preparehookargs(node, old, new))
1841 1843 repo.hook('txnclose-phase', throw=False, txnname=desc,
1842 1844 **pycompat.strkwargs(args))
1843 1845
1844 1846 repo.hook('txnclose', throw=False, txnname=desc,
1845 1847 **pycompat.strkwargs(hookargs))
1846 1848 reporef()._afterlock(hookfunc)
1847 1849 tr.addfinalize('txnclose-hook', txnclosehook)
1848 1850 # Include a leading "-" to make it happen before the transaction summary
1849 1851 # reports registered via scmutil.registersummarycallback() whose names
1850 1852 # are 00-txnreport etc. That way, the caches will be warm when the
1851 1853 # callbacks run.
1852 1854 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1853 1855 def txnaborthook(tr2):
1854 1856 """To be run if transaction is aborted
1855 1857 """
1856 1858 reporef().hook('txnabort', throw=False, txnname=desc,
1857 1859 **pycompat.strkwargs(tr2.hookargs))
1858 1860 tr.addabort('txnabort-hook', txnaborthook)
1859 1861 # avoid eager cache invalidation. in-memory data should be identical
1860 1862 # to stored data if transaction has no error.
1861 1863 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1862 1864 self._transref = weakref.ref(tr)
1863 1865 scmutil.registersummarycallback(self, tr, desc)
1864 1866 return tr
1865 1867
1866 1868 def _journalfiles(self):
1867 1869 return ((self.svfs, 'journal'),
1868 1870 (self.vfs, 'journal.dirstate'),
1869 1871 (self.vfs, 'journal.branch'),
1870 1872 (self.vfs, 'journal.desc'),
1871 1873 (self.vfs, 'journal.bookmarks'),
1872 1874 (self.svfs, 'journal.phaseroots'))
1873 1875
1874 1876 def undofiles(self):
1875 1877 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1876 1878
1877 1879 @unfilteredmethod
1878 1880 def _writejournal(self, desc):
1879 1881 self.dirstate.savebackup(None, 'journal.dirstate')
1880 1882 narrowspec.savebackup(self, 'journal.narrowspec')
1881 1883 self.vfs.write("journal.branch",
1882 1884 encoding.fromlocal(self.dirstate.branch()))
1883 1885 self.vfs.write("journal.desc",
1884 1886 "%d\n%s\n" % (len(self), desc))
1885 1887 self.vfs.write("journal.bookmarks",
1886 1888 self.vfs.tryread("bookmarks"))
1887 1889 self.svfs.write("journal.phaseroots",
1888 1890 self.svfs.tryread("phaseroots"))
1889 1891
1890 1892 def recover(self):
1891 1893 with self.lock():
1892 1894 if self.svfs.exists("journal"):
1893 1895 self.ui.status(_("rolling back interrupted transaction\n"))
1894 1896 vfsmap = {'': self.svfs,
1895 1897 'plain': self.vfs,}
1896 1898 transaction.rollback(self.svfs, vfsmap, "journal",
1897 1899 self.ui.warn,
1898 1900 checkambigfiles=_cachedfiles)
1899 1901 self.invalidate()
1900 1902 return True
1901 1903 else:
1902 1904 self.ui.warn(_("no interrupted transaction available\n"))
1903 1905 return False
1904 1906
1905 1907 def rollback(self, dryrun=False, force=False):
1906 1908 wlock = lock = dsguard = None
1907 1909 try:
1908 1910 wlock = self.wlock()
1909 1911 lock = self.lock()
1910 1912 if self.svfs.exists("undo"):
1911 1913 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1912 1914
1913 1915 return self._rollback(dryrun, force, dsguard)
1914 1916 else:
1915 1917 self.ui.warn(_("no rollback information available\n"))
1916 1918 return 1
1917 1919 finally:
1918 1920 release(dsguard, lock, wlock)
1919 1921
1920 1922 @unfilteredmethod # Until we get smarter cache management
1921 1923 def _rollback(self, dryrun, force, dsguard):
1922 1924 ui = self.ui
1923 1925 try:
1924 1926 args = self.vfs.read('undo.desc').splitlines()
1925 1927 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1926 1928 if len(args) >= 3:
1927 1929 detail = args[2]
1928 1930 oldtip = oldlen - 1
1929 1931
1930 1932 if detail and ui.verbose:
1931 1933 msg = (_('repository tip rolled back to revision %d'
1932 1934 ' (undo %s: %s)\n')
1933 1935 % (oldtip, desc, detail))
1934 1936 else:
1935 1937 msg = (_('repository tip rolled back to revision %d'
1936 1938 ' (undo %s)\n')
1937 1939 % (oldtip, desc))
1938 1940 except IOError:
1939 1941 msg = _('rolling back unknown transaction\n')
1940 1942 desc = None
1941 1943
1942 1944 if not force and self['.'] != self['tip'] and desc == 'commit':
1943 1945 raise error.Abort(
1944 1946 _('rollback of last commit while not checked out '
1945 1947 'may lose data'), hint=_('use -f to force'))
1946 1948
1947 1949 ui.status(msg)
1948 1950 if dryrun:
1949 1951 return 0
1950 1952
1951 1953 parents = self.dirstate.parents()
1952 1954 self.destroying()
1953 1955 vfsmap = {'plain': self.vfs, '': self.svfs}
1954 1956 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1955 1957 checkambigfiles=_cachedfiles)
1956 1958 if self.vfs.exists('undo.bookmarks'):
1957 1959 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1958 1960 if self.svfs.exists('undo.phaseroots'):
1959 1961 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1960 1962 self.invalidate()
1961 1963
1962 1964 parentgone = (parents[0] not in self.changelog.nodemap or
1963 1965 parents[1] not in self.changelog.nodemap)
1964 1966 if parentgone:
1965 1967 # prevent dirstateguard from overwriting already restored one
1966 1968 dsguard.close()
1967 1969
1968 1970 narrowspec.restorebackup(self, 'undo.narrowspec')
1969 1971 self.dirstate.restorebackup(None, 'undo.dirstate')
1970 1972 try:
1971 1973 branch = self.vfs.read('undo.branch')
1972 1974 self.dirstate.setbranch(encoding.tolocal(branch))
1973 1975 except IOError:
1974 1976 ui.warn(_('named branch could not be reset: '
1975 1977 'current branch is still \'%s\'\n')
1976 1978 % self.dirstate.branch())
1977 1979
1978 1980 parents = tuple([p.rev() for p in self[None].parents()])
1979 1981 if len(parents) > 1:
1980 1982 ui.status(_('working directory now based on '
1981 1983 'revisions %d and %d\n') % parents)
1982 1984 else:
1983 1985 ui.status(_('working directory now based on '
1984 1986 'revision %d\n') % parents)
1985 1987 mergemod.mergestate.clean(self, self['.'].node())
1986 1988
1987 1989 # TODO: if we know which new heads may result from this rollback, pass
1988 1990 # them to destroy(), which will prevent the branchhead cache from being
1989 1991 # invalidated.
1990 1992 self.destroyed()
1991 1993 return 0
1992 1994
1993 1995 def _buildcacheupdater(self, newtransaction):
1994 1996 """called during transaction to build the callback updating cache
1995 1997
1996 1998 Lives on the repository to help extension who might want to augment
1997 1999 this logic. For this purpose, the created transaction is passed to the
1998 2000 method.
1999 2001 """
2000 2002 # we must avoid cyclic reference between repo and transaction.
2001 2003 reporef = weakref.ref(self)
2002 2004 def updater(tr):
2003 2005 repo = reporef()
2004 2006 repo.updatecaches(tr)
2005 2007 return updater
2006 2008
2007 2009 @unfilteredmethod
2008 2010 def updatecaches(self, tr=None, full=False):
2009 2011 """warm appropriate caches
2010 2012
2011 2013 If this function is called after a transaction closed. The transaction
2012 2014 will be available in the 'tr' argument. This can be used to selectively
2013 2015 update caches relevant to the changes in that transaction.
2014 2016
2015 2017 If 'full' is set, make sure all caches the function knows about have
2016 2018 up-to-date data. Even the ones usually loaded more lazily.
2017 2019 """
2018 2020 if tr is not None and tr.hookargs.get('source') == 'strip':
2019 2021 # During strip, many caches are invalid but
2020 2022 # later call to `destroyed` will refresh them.
2021 2023 return
2022 2024
2023 2025 if tr is None or tr.changes['origrepolen'] < len(self):
2024 2026 # updating the unfiltered branchmap should refresh all the others,
2025 2027 self.ui.debug('updating the branch cache\n')
2026 2028 branchmap.updatecache(self.filtered('served'))
2027 2029
2028 2030 if full:
2029 2031 rbc = self.revbranchcache()
2030 2032 for r in self.changelog:
2031 2033 rbc.branchinfo(r)
2032 2034 rbc.write()
2033 2035
2034 2036 # ensure the working copy parents are in the manifestfulltextcache
2035 2037 for ctx in self['.'].parents():
2036 2038 ctx.manifest() # accessing the manifest is enough
2037 2039
2038 2040 def invalidatecaches(self):
2039 2041
2040 2042 if '_tagscache' in vars(self):
2041 2043 # can't use delattr on proxy
2042 2044 del self.__dict__['_tagscache']
2043 2045
2044 2046 self.unfiltered()._branchcaches.clear()
2045 2047 self.invalidatevolatilesets()
2046 2048 self._sparsesignaturecache.clear()
2047 2049
2048 2050 def invalidatevolatilesets(self):
2049 2051 self.filteredrevcache.clear()
2050 2052 obsolete.clearobscaches(self)
2051 2053
2052 2054 def invalidatedirstate(self):
2053 2055 '''Invalidates the dirstate, causing the next call to dirstate
2054 2056 to check if it was modified since the last time it was read,
2055 2057 rereading it if it has.
2056 2058
2057 2059 This is different to dirstate.invalidate() that it doesn't always
2058 2060 rereads the dirstate. Use dirstate.invalidate() if you want to
2059 2061 explicitly read the dirstate again (i.e. restoring it to a previous
2060 2062 known good state).'''
2061 2063 if hasunfilteredcache(self, 'dirstate'):
2062 2064 for k in self.dirstate._filecache:
2063 2065 try:
2064 2066 delattr(self.dirstate, k)
2065 2067 except AttributeError:
2066 2068 pass
2067 2069 delattr(self.unfiltered(), 'dirstate')
2068 2070
2069 2071 def invalidate(self, clearfilecache=False):
2070 2072 '''Invalidates both store and non-store parts other than dirstate
2071 2073
2072 2074 If a transaction is running, invalidation of store is omitted,
2073 2075 because discarding in-memory changes might cause inconsistency
2074 2076 (e.g. incomplete fncache causes unintentional failure, but
2075 2077 redundant one doesn't).
2076 2078 '''
2077 2079 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2078 2080 for k in list(self._filecache.keys()):
2079 2081 # dirstate is invalidated separately in invalidatedirstate()
2080 2082 if k == 'dirstate':
2081 2083 continue
2082 2084 if (k == 'changelog' and
2083 2085 self.currenttransaction() and
2084 2086 self.changelog._delayed):
2085 2087 # The changelog object may store unwritten revisions. We don't
2086 2088 # want to lose them.
2087 2089 # TODO: Solve the problem instead of working around it.
2088 2090 continue
2089 2091
2090 2092 if clearfilecache:
2091 2093 del self._filecache[k]
2092 2094 try:
2093 2095 delattr(unfiltered, k)
2094 2096 except AttributeError:
2095 2097 pass
2096 2098 self.invalidatecaches()
2097 2099 if not self.currenttransaction():
2098 2100 # TODO: Changing contents of store outside transaction
2099 2101 # causes inconsistency. We should make in-memory store
2100 2102 # changes detectable, and abort if changed.
2101 2103 self.store.invalidatecaches()
2102 2104
2103 2105 def invalidateall(self):
2104 2106 '''Fully invalidates both store and non-store parts, causing the
2105 2107 subsequent operation to reread any outside changes.'''
2106 2108 # extension should hook this to invalidate its caches
2107 2109 self.invalidate()
2108 2110 self.invalidatedirstate()
2109 2111
2110 2112 @unfilteredmethod
2111 2113 def _refreshfilecachestats(self, tr):
2112 2114 """Reload stats of cached files so that they are flagged as valid"""
2113 2115 for k, ce in self._filecache.items():
2114 2116 k = pycompat.sysstr(k)
2115 2117 if k == r'dirstate' or k not in self.__dict__:
2116 2118 continue
2117 2119 ce.refresh()
2118 2120
2119 2121 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2120 2122 inheritchecker=None, parentenvvar=None):
2121 2123 parentlock = None
2122 2124 # the contents of parentenvvar are used by the underlying lock to
2123 2125 # determine whether it can be inherited
2124 2126 if parentenvvar is not None:
2125 2127 parentlock = encoding.environ.get(parentenvvar)
2126 2128
2127 2129 timeout = 0
2128 2130 warntimeout = 0
2129 2131 if wait:
2130 2132 timeout = self.ui.configint("ui", "timeout")
2131 2133 warntimeout = self.ui.configint("ui", "timeout.warn")
2132 2134 # internal config: ui.signal-safe-lock
2133 2135 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2134 2136
2135 2137 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2136 2138 releasefn=releasefn,
2137 2139 acquirefn=acquirefn, desc=desc,
2138 2140 inheritchecker=inheritchecker,
2139 2141 parentlock=parentlock,
2140 2142 signalsafe=signalsafe)
2141 2143 return l
2142 2144
2143 2145 def _afterlock(self, callback):
2144 2146 """add a callback to be run when the repository is fully unlocked
2145 2147
2146 2148 The callback will be executed when the outermost lock is released
2147 2149 (with wlock being higher level than 'lock')."""
2148 2150 for ref in (self._wlockref, self._lockref):
2149 2151 l = ref and ref()
2150 2152 if l and l.held:
2151 2153 l.postrelease.append(callback)
2152 2154 break
2153 2155 else: # no lock have been found.
2154 2156 callback()
2155 2157
2156 2158 def lock(self, wait=True):
2157 2159 '''Lock the repository store (.hg/store) and return a weak reference
2158 2160 to the lock. Use this before modifying the store (e.g. committing or
2159 2161 stripping). If you are opening a transaction, get a lock as well.)
2160 2162
2161 2163 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2162 2164 'wlock' first to avoid a dead-lock hazard.'''
2163 2165 l = self._currentlock(self._lockref)
2164 2166 if l is not None:
2165 2167 l.lock()
2166 2168 return l
2167 2169
2168 2170 l = self._lock(self.svfs, "lock", wait, None,
2169 2171 self.invalidate, _('repository %s') % self.origroot)
2170 2172 self._lockref = weakref.ref(l)
2171 2173 return l
2172 2174
2173 2175 def _wlockchecktransaction(self):
2174 2176 if self.currenttransaction() is not None:
2175 2177 raise error.LockInheritanceContractViolation(
2176 2178 'wlock cannot be inherited in the middle of a transaction')
2177 2179
2178 2180 def wlock(self, wait=True):
2179 2181 '''Lock the non-store parts of the repository (everything under
2180 2182 .hg except .hg/store) and return a weak reference to the lock.
2181 2183
2182 2184 Use this before modifying files in .hg.
2183 2185
2184 2186 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2185 2187 'wlock' first to avoid a dead-lock hazard.'''
2186 2188 l = self._wlockref and self._wlockref()
2187 2189 if l is not None and l.held:
2188 2190 l.lock()
2189 2191 return l
2190 2192
2191 2193 # We do not need to check for non-waiting lock acquisition. Such
2192 2194 # acquisition would not cause dead-lock as they would just fail.
2193 2195 if wait and (self.ui.configbool('devel', 'all-warnings')
2194 2196 or self.ui.configbool('devel', 'check-locks')):
2195 2197 if self._currentlock(self._lockref) is not None:
2196 2198 self.ui.develwarn('"wlock" acquired after "lock"')
2197 2199
2198 2200 def unlock():
2199 2201 if self.dirstate.pendingparentchange():
2200 2202 self.dirstate.invalidate()
2201 2203 else:
2202 2204 self.dirstate.write(None)
2203 2205
2204 2206 self._filecache['dirstate'].refresh()
2205 2207
2206 2208 l = self._lock(self.vfs, "wlock", wait, unlock,
2207 2209 self.invalidatedirstate, _('working directory of %s') %
2208 2210 self.origroot,
2209 2211 inheritchecker=self._wlockchecktransaction,
2210 2212 parentenvvar='HG_WLOCK_LOCKER')
2211 2213 self._wlockref = weakref.ref(l)
2212 2214 return l
2213 2215
2214 2216 def _currentlock(self, lockref):
2215 2217 """Returns the lock if it's held, or None if it's not."""
2216 2218 if lockref is None:
2217 2219 return None
2218 2220 l = lockref()
2219 2221 if l is None or not l.held:
2220 2222 return None
2221 2223 return l
2222 2224
2223 2225 def currentwlock(self):
2224 2226 """Returns the wlock if it's held, or None if it's not."""
2225 2227 return self._currentlock(self._wlockref)
2226 2228
2227 2229 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2228 2230 """
2229 2231 commit an individual file as part of a larger transaction
2230 2232 """
2231 2233
2232 2234 fname = fctx.path()
2233 2235 fparent1 = manifest1.get(fname, nullid)
2234 2236 fparent2 = manifest2.get(fname, nullid)
2235 2237 if isinstance(fctx, context.filectx):
2236 2238 node = fctx.filenode()
2237 2239 if node in [fparent1, fparent2]:
2238 2240 self.ui.debug('reusing %s filelog entry\n' % fname)
2239 2241 if manifest1.flags(fname) != fctx.flags():
2240 2242 changelist.append(fname)
2241 2243 return node
2242 2244
2243 2245 flog = self.file(fname)
2244 2246 meta = {}
2245 2247 copy = fctx.renamed()
2246 2248 if copy and copy[0] != fname:
2247 2249 # Mark the new revision of this file as a copy of another
2248 2250 # file. This copy data will effectively act as a parent
2249 2251 # of this new revision. If this is a merge, the first
2250 2252 # parent will be the nullid (meaning "look up the copy data")
2251 2253 # and the second one will be the other parent. For example:
2252 2254 #
2253 2255 # 0 --- 1 --- 3 rev1 changes file foo
2254 2256 # \ / rev2 renames foo to bar and changes it
2255 2257 # \- 2 -/ rev3 should have bar with all changes and
2256 2258 # should record that bar descends from
2257 2259 # bar in rev2 and foo in rev1
2258 2260 #
2259 2261 # this allows this merge to succeed:
2260 2262 #
2261 2263 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2262 2264 # \ / merging rev3 and rev4 should use bar@rev2
2263 2265 # \- 2 --- 4 as the merge base
2264 2266 #
2265 2267
2266 2268 cfname = copy[0]
2267 2269 crev = manifest1.get(cfname)
2268 2270 newfparent = fparent2
2269 2271
2270 2272 if manifest2: # branch merge
2271 2273 if fparent2 == nullid or crev is None: # copied on remote side
2272 2274 if cfname in manifest2:
2273 2275 crev = manifest2[cfname]
2274 2276 newfparent = fparent1
2275 2277
2276 2278 # Here, we used to search backwards through history to try to find
2277 2279 # where the file copy came from if the source of a copy was not in
2278 2280 # the parent directory. However, this doesn't actually make sense to
2279 2281 # do (what does a copy from something not in your working copy even
2280 2282 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2281 2283 # the user that copy information was dropped, so if they didn't
2282 2284 # expect this outcome it can be fixed, but this is the correct
2283 2285 # behavior in this circumstance.
2284 2286
2285 2287 if crev:
2286 2288 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2287 2289 meta["copy"] = cfname
2288 2290 meta["copyrev"] = hex(crev)
2289 2291 fparent1, fparent2 = nullid, newfparent
2290 2292 else:
2291 2293 self.ui.warn(_("warning: can't find ancestor for '%s' "
2292 2294 "copied from '%s'!\n") % (fname, cfname))
2293 2295
2294 2296 elif fparent1 == nullid:
2295 2297 fparent1, fparent2 = fparent2, nullid
2296 2298 elif fparent2 != nullid:
2297 2299 # is one parent an ancestor of the other?
2298 2300 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2299 2301 if fparent1 in fparentancestors:
2300 2302 fparent1, fparent2 = fparent2, nullid
2301 2303 elif fparent2 in fparentancestors:
2302 2304 fparent2 = nullid
2303 2305
2304 2306 # is the file changed?
2305 2307 text = fctx.data()
2306 2308 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2307 2309 changelist.append(fname)
2308 2310 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2309 2311 # are just the flags changed during merge?
2310 2312 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2311 2313 changelist.append(fname)
2312 2314
2313 2315 return fparent1
2314 2316
2315 2317 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2316 2318 """check for commit arguments that aren't committable"""
2317 2319 if match.isexact() or match.prefix():
2318 2320 matched = set(status.modified + status.added + status.removed)
2319 2321
2320 2322 for f in match.files():
2321 2323 f = self.dirstate.normalize(f)
2322 2324 if f == '.' or f in matched or f in wctx.substate:
2323 2325 continue
2324 2326 if f in status.deleted:
2325 2327 fail(f, _('file not found!'))
2326 2328 if f in vdirs: # visited directory
2327 2329 d = f + '/'
2328 2330 for mf in matched:
2329 2331 if mf.startswith(d):
2330 2332 break
2331 2333 else:
2332 2334 fail(f, _("no match under directory!"))
2333 2335 elif f not in self.dirstate:
2334 2336 fail(f, _("file not tracked!"))
2335 2337
2336 2338 @unfilteredmethod
2337 2339 def commit(self, text="", user=None, date=None, match=None, force=False,
2338 2340 editor=False, extra=None):
2339 2341 """Add a new revision to current repository.
2340 2342
2341 2343 Revision information is gathered from the working directory,
2342 2344 match can be used to filter the committed files. If editor is
2343 2345 supplied, it is called to get a commit message.
2344 2346 """
2345 2347 if extra is None:
2346 2348 extra = {}
2347 2349
2348 2350 def fail(f, msg):
2349 2351 raise error.Abort('%s: %s' % (f, msg))
2350 2352
2351 2353 if not match:
2352 2354 match = matchmod.always(self.root, '')
2353 2355
2354 2356 if not force:
2355 2357 vdirs = []
2356 2358 match.explicitdir = vdirs.append
2357 2359 match.bad = fail
2358 2360
2359 2361 wlock = lock = tr = None
2360 2362 try:
2361 2363 wlock = self.wlock()
2362 2364 lock = self.lock() # for recent changelog (see issue4368)
2363 2365
2364 2366 wctx = self[None]
2365 2367 merge = len(wctx.parents()) > 1
2366 2368
2367 2369 if not force and merge and not match.always():
2368 2370 raise error.Abort(_('cannot partially commit a merge '
2369 2371 '(do not specify files or patterns)'))
2370 2372
2371 2373 status = self.status(match=match, clean=force)
2372 2374 if force:
2373 2375 status.modified.extend(status.clean) # mq may commit clean files
2374 2376
2375 2377 # check subrepos
2376 2378 subs, commitsubs, newstate = subrepoutil.precommit(
2377 2379 self.ui, wctx, status, match, force=force)
2378 2380
2379 2381 # make sure all explicit patterns are matched
2380 2382 if not force:
2381 2383 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2382 2384
2383 2385 cctx = context.workingcommitctx(self, status,
2384 2386 text, user, date, extra)
2385 2387
2386 2388 # internal config: ui.allowemptycommit
2387 2389 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2388 2390 or extra.get('close') or merge or cctx.files()
2389 2391 or self.ui.configbool('ui', 'allowemptycommit'))
2390 2392 if not allowemptycommit:
2391 2393 return None
2392 2394
2393 2395 if merge and cctx.deleted():
2394 2396 raise error.Abort(_("cannot commit merge with missing files"))
2395 2397
2396 2398 ms = mergemod.mergestate.read(self)
2397 2399 mergeutil.checkunresolved(ms)
2398 2400
2399 2401 if editor:
2400 2402 cctx._text = editor(self, cctx, subs)
2401 2403 edited = (text != cctx._text)
2402 2404
2403 2405 # Save commit message in case this transaction gets rolled back
2404 2406 # (e.g. by a pretxncommit hook). Leave the content alone on
2405 2407 # the assumption that the user will use the same editor again.
2406 2408 msgfn = self.savecommitmessage(cctx._text)
2407 2409
2408 2410 # commit subs and write new state
2409 2411 if subs:
2410 2412 for s in sorted(commitsubs):
2411 2413 sub = wctx.sub(s)
2412 2414 self.ui.status(_('committing subrepository %s\n') %
2413 2415 subrepoutil.subrelpath(sub))
2414 2416 sr = sub.commit(cctx._text, user, date)
2415 2417 newstate[s] = (newstate[s][0], sr)
2416 2418 subrepoutil.writestate(self, newstate)
2417 2419
2418 2420 p1, p2 = self.dirstate.parents()
2419 2421 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2420 2422 try:
2421 2423 self.hook("precommit", throw=True, parent1=hookp1,
2422 2424 parent2=hookp2)
2423 2425 tr = self.transaction('commit')
2424 2426 ret = self.commitctx(cctx, True)
2425 2427 except: # re-raises
2426 2428 if edited:
2427 2429 self.ui.write(
2428 2430 _('note: commit message saved in %s\n') % msgfn)
2429 2431 raise
2430 2432 # update bookmarks, dirstate and mergestate
2431 2433 bookmarks.update(self, [p1, p2], ret)
2432 2434 cctx.markcommitted(ret)
2433 2435 ms.reset()
2434 2436 tr.close()
2435 2437
2436 2438 finally:
2437 2439 lockmod.release(tr, lock, wlock)
2438 2440
2439 2441 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2440 2442 # hack for command that use a temporary commit (eg: histedit)
2441 2443 # temporary commit got stripped before hook release
2442 2444 if self.changelog.hasnode(ret):
2443 2445 self.hook("commit", node=node, parent1=parent1,
2444 2446 parent2=parent2)
2445 2447 self._afterlock(commithook)
2446 2448 return ret
2447 2449
2448 2450 @unfilteredmethod
2449 2451 def commitctx(self, ctx, error=False):
2450 2452 """Add a new revision to current repository.
2451 2453 Revision information is passed via the context argument.
2452 2454
2453 2455 ctx.files() should list all files involved in this commit, i.e.
2454 2456 modified/added/removed files. On merge, it may be wider than the
2455 2457 ctx.files() to be committed, since any file nodes derived directly
2456 2458 from p1 or p2 are excluded from the committed ctx.files().
2457 2459 """
2458 2460
2459 2461 tr = None
2460 2462 p1, p2 = ctx.p1(), ctx.p2()
2461 2463 user = ctx.user()
2462 2464
2463 2465 lock = self.lock()
2464 2466 try:
2465 2467 tr = self.transaction("commit")
2466 2468 trp = weakref.proxy(tr)
2467 2469
2468 2470 if ctx.manifestnode():
2469 2471 # reuse an existing manifest revision
2470 2472 self.ui.debug('reusing known manifest\n')
2471 2473 mn = ctx.manifestnode()
2472 2474 files = ctx.files()
2473 2475 elif ctx.files():
2474 2476 m1ctx = p1.manifestctx()
2475 2477 m2ctx = p2.manifestctx()
2476 2478 mctx = m1ctx.copy()
2477 2479
2478 2480 m = mctx.read()
2479 2481 m1 = m1ctx.read()
2480 2482 m2 = m2ctx.read()
2481 2483
2482 2484 # check in files
2483 2485 added = []
2484 2486 changed = []
2485 2487 removed = list(ctx.removed())
2486 2488 linkrev = len(self)
2487 2489 self.ui.note(_("committing files:\n"))
2488 2490 for f in sorted(ctx.modified() + ctx.added()):
2489 2491 self.ui.note(f + "\n")
2490 2492 try:
2491 2493 fctx = ctx[f]
2492 2494 if fctx is None:
2493 2495 removed.append(f)
2494 2496 else:
2495 2497 added.append(f)
2496 2498 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2497 2499 trp, changed)
2498 2500 m.setflag(f, fctx.flags())
2499 2501 except OSError as inst:
2500 2502 self.ui.warn(_("trouble committing %s!\n") % f)
2501 2503 raise
2502 2504 except IOError as inst:
2503 2505 errcode = getattr(inst, 'errno', errno.ENOENT)
2504 2506 if error or errcode and errcode != errno.ENOENT:
2505 2507 self.ui.warn(_("trouble committing %s!\n") % f)
2506 2508 raise
2507 2509
2508 2510 # update manifest
2509 2511 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2510 2512 drop = [f for f in removed if f in m]
2511 2513 for f in drop:
2512 2514 del m[f]
2513 2515 files = changed + removed
2514 2516 md = None
2515 2517 if not files:
2516 2518 # if no "files" actually changed in terms of the changelog,
2517 2519 # try hard to detect unmodified manifest entry so that the
2518 2520 # exact same commit can be reproduced later on convert.
2519 2521 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2520 2522 if not files and md:
2521 2523 self.ui.debug('not reusing manifest (no file change in '
2522 2524 'changelog, but manifest differs)\n')
2523 2525 if files or md:
2524 2526 self.ui.note(_("committing manifest\n"))
2525 2527 # we're using narrowmatch here since it's already applied at
2526 2528 # other stages (such as dirstate.walk), so we're already
2527 2529 # ignoring things outside of narrowspec in most cases. The
2528 2530 # one case where we might have files outside the narrowspec
2529 2531 # at this point is merges, and we already error out in the
2530 2532 # case where the merge has files outside of the narrowspec,
2531 2533 # so this is safe.
2532 2534 mn = mctx.write(trp, linkrev,
2533 2535 p1.manifestnode(), p2.manifestnode(),
2534 2536 added, drop, match=self.narrowmatch())
2535 2537 else:
2536 2538 self.ui.debug('reusing manifest form p1 (listed files '
2537 2539 'actually unchanged)\n')
2538 2540 mn = p1.manifestnode()
2539 2541 else:
2540 2542 self.ui.debug('reusing manifest from p1 (no file change)\n')
2541 2543 mn = p1.manifestnode()
2542 2544 files = []
2543 2545
2544 2546 # update changelog
2545 2547 self.ui.note(_("committing changelog\n"))
2546 2548 self.changelog.delayupdate(tr)
2547 2549 n = self.changelog.add(mn, files, ctx.description(),
2548 2550 trp, p1.node(), p2.node(),
2549 2551 user, ctx.date(), ctx.extra().copy())
2550 2552 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2551 2553 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2552 2554 parent2=xp2)
2553 2555 # set the new commit is proper phase
2554 2556 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2555 2557 if targetphase:
2556 2558 # retract boundary do not alter parent changeset.
2557 2559 # if a parent have higher the resulting phase will
2558 2560 # be compliant anyway
2559 2561 #
2560 2562 # if minimal phase was 0 we don't need to retract anything
2561 2563 phases.registernew(self, tr, targetphase, [n])
2562 2564 tr.close()
2563 2565 return n
2564 2566 finally:
2565 2567 if tr:
2566 2568 tr.release()
2567 2569 lock.release()
2568 2570
2569 2571 @unfilteredmethod
2570 2572 def destroying(self):
2571 2573 '''Inform the repository that nodes are about to be destroyed.
2572 2574 Intended for use by strip and rollback, so there's a common
2573 2575 place for anything that has to be done before destroying history.
2574 2576
2575 2577 This is mostly useful for saving state that is in memory and waiting
2576 2578 to be flushed when the current lock is released. Because a call to
2577 2579 destroyed is imminent, the repo will be invalidated causing those
2578 2580 changes to stay in memory (waiting for the next unlock), or vanish
2579 2581 completely.
2580 2582 '''
2581 2583 # When using the same lock to commit and strip, the phasecache is left
2582 2584 # dirty after committing. Then when we strip, the repo is invalidated,
2583 2585 # causing those changes to disappear.
2584 2586 if '_phasecache' in vars(self):
2585 2587 self._phasecache.write()
2586 2588
2587 2589 @unfilteredmethod
2588 2590 def destroyed(self):
2589 2591 '''Inform the repository that nodes have been destroyed.
2590 2592 Intended for use by strip and rollback, so there's a common
2591 2593 place for anything that has to be done after destroying history.
2592 2594 '''
2593 2595 # When one tries to:
2594 2596 # 1) destroy nodes thus calling this method (e.g. strip)
2595 2597 # 2) use phasecache somewhere (e.g. commit)
2596 2598 #
2597 2599 # then 2) will fail because the phasecache contains nodes that were
2598 2600 # removed. We can either remove phasecache from the filecache,
2599 2601 # causing it to reload next time it is accessed, or simply filter
2600 2602 # the removed nodes now and write the updated cache.
2601 2603 self._phasecache.filterunknown(self)
2602 2604 self._phasecache.write()
2603 2605
2604 2606 # refresh all repository caches
2605 2607 self.updatecaches()
2606 2608
2607 2609 # Ensure the persistent tag cache is updated. Doing it now
2608 2610 # means that the tag cache only has to worry about destroyed
2609 2611 # heads immediately after a strip/rollback. That in turn
2610 2612 # guarantees that "cachetip == currenttip" (comparing both rev
2611 2613 # and node) always means no nodes have been added or destroyed.
2612 2614
2613 2615 # XXX this is suboptimal when qrefresh'ing: we strip the current
2614 2616 # head, refresh the tag cache, then immediately add a new head.
2615 2617 # But I think doing it this way is necessary for the "instant
2616 2618 # tag cache retrieval" case to work.
2617 2619 self.invalidate()
2618 2620
2619 2621 def status(self, node1='.', node2=None, match=None,
2620 2622 ignored=False, clean=False, unknown=False,
2621 2623 listsubrepos=False):
2622 2624 '''a convenience method that calls node1.status(node2)'''
2623 2625 return self[node1].status(node2, match, ignored, clean, unknown,
2624 2626 listsubrepos)
2625 2627
2626 2628 def addpostdsstatus(self, ps):
2627 2629 """Add a callback to run within the wlock, at the point at which status
2628 2630 fixups happen.
2629 2631
2630 2632 On status completion, callback(wctx, status) will be called with the
2631 2633 wlock held, unless the dirstate has changed from underneath or the wlock
2632 2634 couldn't be grabbed.
2633 2635
2634 2636 Callbacks should not capture and use a cached copy of the dirstate --
2635 2637 it might change in the meanwhile. Instead, they should access the
2636 2638 dirstate via wctx.repo().dirstate.
2637 2639
2638 2640 This list is emptied out after each status run -- extensions should
2639 2641 make sure it adds to this list each time dirstate.status is called.
2640 2642 Extensions should also make sure they don't call this for statuses
2641 2643 that don't involve the dirstate.
2642 2644 """
2643 2645
2644 2646 # The list is located here for uniqueness reasons -- it is actually
2645 2647 # managed by the workingctx, but that isn't unique per-repo.
2646 2648 self._postdsstatus.append(ps)
2647 2649
2648 2650 def postdsstatus(self):
2649 2651 """Used by workingctx to get the list of post-dirstate-status hooks."""
2650 2652 return self._postdsstatus
2651 2653
2652 2654 def clearpostdsstatus(self):
2653 2655 """Used by workingctx to clear post-dirstate-status hooks."""
2654 2656 del self._postdsstatus[:]
2655 2657
2656 2658 def heads(self, start=None):
2657 2659 if start is None:
2658 2660 cl = self.changelog
2659 2661 headrevs = reversed(cl.headrevs())
2660 2662 return [cl.node(rev) for rev in headrevs]
2661 2663
2662 2664 heads = self.changelog.heads(start)
2663 2665 # sort the output in rev descending order
2664 2666 return sorted(heads, key=self.changelog.rev, reverse=True)
2665 2667
2666 2668 def branchheads(self, branch=None, start=None, closed=False):
2667 2669 '''return a (possibly filtered) list of heads for the given branch
2668 2670
2669 2671 Heads are returned in topological order, from newest to oldest.
2670 2672 If branch is None, use the dirstate branch.
2671 2673 If start is not None, return only heads reachable from start.
2672 2674 If closed is True, return heads that are marked as closed as well.
2673 2675 '''
2674 2676 if branch is None:
2675 2677 branch = self[None].branch()
2676 2678 branches = self.branchmap()
2677 2679 if branch not in branches:
2678 2680 return []
2679 2681 # the cache returns heads ordered lowest to highest
2680 2682 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2681 2683 if start is not None:
2682 2684 # filter out the heads that cannot be reached from startrev
2683 2685 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2684 2686 bheads = [h for h in bheads if h in fbheads]
2685 2687 return bheads
2686 2688
2687 2689 def branches(self, nodes):
2688 2690 if not nodes:
2689 2691 nodes = [self.changelog.tip()]
2690 2692 b = []
2691 2693 for n in nodes:
2692 2694 t = n
2693 2695 while True:
2694 2696 p = self.changelog.parents(n)
2695 2697 if p[1] != nullid or p[0] == nullid:
2696 2698 b.append((t, n, p[0], p[1]))
2697 2699 break
2698 2700 n = p[0]
2699 2701 return b
2700 2702
2701 2703 def between(self, pairs):
2702 2704 r = []
2703 2705
2704 2706 for top, bottom in pairs:
2705 2707 n, l, i = top, [], 0
2706 2708 f = 1
2707 2709
2708 2710 while n != bottom and n != nullid:
2709 2711 p = self.changelog.parents(n)[0]
2710 2712 if i == f:
2711 2713 l.append(n)
2712 2714 f = f * 2
2713 2715 n = p
2714 2716 i += 1
2715 2717
2716 2718 r.append(l)
2717 2719
2718 2720 return r
2719 2721
2720 2722 def checkpush(self, pushop):
2721 2723 """Extensions can override this function if additional checks have
2722 2724 to be performed before pushing, or call it if they override push
2723 2725 command.
2724 2726 """
2725 2727
2726 2728 @unfilteredpropertycache
2727 2729 def prepushoutgoinghooks(self):
2728 2730 """Return util.hooks consists of a pushop with repo, remote, outgoing
2729 2731 methods, which are called before pushing changesets.
2730 2732 """
2731 2733 return util.hooks()
2732 2734
2733 2735 def pushkey(self, namespace, key, old, new):
2734 2736 try:
2735 2737 tr = self.currenttransaction()
2736 2738 hookargs = {}
2737 2739 if tr is not None:
2738 2740 hookargs.update(tr.hookargs)
2739 2741 hookargs = pycompat.strkwargs(hookargs)
2740 2742 hookargs[r'namespace'] = namespace
2741 2743 hookargs[r'key'] = key
2742 2744 hookargs[r'old'] = old
2743 2745 hookargs[r'new'] = new
2744 2746 self.hook('prepushkey', throw=True, **hookargs)
2745 2747 except error.HookAbort as exc:
2746 2748 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2747 2749 if exc.hint:
2748 2750 self.ui.write_err(_("(%s)\n") % exc.hint)
2749 2751 return False
2750 2752 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2751 2753 ret = pushkey.push(self, namespace, key, old, new)
2752 2754 def runhook():
2753 2755 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2754 2756 ret=ret)
2755 2757 self._afterlock(runhook)
2756 2758 return ret
2757 2759
2758 2760 def listkeys(self, namespace):
2759 2761 self.hook('prelistkeys', throw=True, namespace=namespace)
2760 2762 self.ui.debug('listing keys for "%s"\n' % namespace)
2761 2763 values = pushkey.list(self, namespace)
2762 2764 self.hook('listkeys', namespace=namespace, values=values)
2763 2765 return values
2764 2766
2765 2767 def debugwireargs(self, one, two, three=None, four=None, five=None):
2766 2768 '''used to test argument passing over the wire'''
2767 2769 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2768 2770 pycompat.bytestr(four),
2769 2771 pycompat.bytestr(five))
2770 2772
2771 2773 def savecommitmessage(self, text):
2772 2774 fp = self.vfs('last-message.txt', 'wb')
2773 2775 try:
2774 2776 fp.write(text)
2775 2777 finally:
2776 2778 fp.close()
2777 2779 return self.pathto(fp.name[len(self.root) + 1:])
2778 2780
2779 2781 # used to avoid circular references so destructors work
2780 2782 def aftertrans(files):
2781 2783 renamefiles = [tuple(t) for t in files]
2782 2784 def a():
2783 2785 for vfs, src, dest in renamefiles:
2784 2786 # if src and dest refer to a same file, vfs.rename is a no-op,
2785 2787 # leaving both src and dest on disk. delete dest to make sure
2786 2788 # the rename couldn't be such a no-op.
2787 2789 vfs.tryunlink(dest)
2788 2790 try:
2789 2791 vfs.rename(src, dest)
2790 2792 except OSError: # journal file does not yet exist
2791 2793 pass
2792 2794 return a
2793 2795
2794 2796 def undoname(fn):
2795 2797 base, name = os.path.split(fn)
2796 2798 assert name.startswith('journal')
2797 2799 return os.path.join(base, name.replace('journal', 'undo', 1))
2798 2800
2799 2801 def instance(ui, path, create, intents=None, createopts=None):
2800 2802 localpath = util.urllocalpath(path)
2801 2803 if create:
2802 2804 createrepository(ui, localpath, createopts=createopts)
2803 2805
2804 2806 return makelocalrepository(ui, localpath, intents=intents)
2805 2807
2806 2808 def islocal(path):
2807 2809 return True
2808 2810
2809 2811 def newreporequirements(ui, createopts=None):
2810 2812 """Determine the set of requirements for a new local repository.
2811 2813
2812 2814 Extensions can wrap this function to specify custom requirements for
2813 2815 new repositories.
2814 2816 """
2815 2817 createopts = createopts or {}
2816 2818
2817 2819 # If the repo is being created from a shared repository, we copy
2818 2820 # its requirements.
2819 2821 if 'sharedrepo' in createopts:
2820 2822 requirements = set(createopts['sharedrepo'].requirements)
2821 2823 if createopts.get('sharedrelative'):
2822 2824 requirements.add('relshared')
2823 2825 else:
2824 2826 requirements.add('shared')
2825 2827
2826 2828 return requirements
2827 2829
2828 2830 requirements = {'revlogv1'}
2829 2831 if ui.configbool('format', 'usestore'):
2830 2832 requirements.add('store')
2831 2833 if ui.configbool('format', 'usefncache'):
2832 2834 requirements.add('fncache')
2833 2835 if ui.configbool('format', 'dotencode'):
2834 2836 requirements.add('dotencode')
2835 2837
2836 2838 compengine = ui.config('experimental', 'format.compression')
2837 2839 if compengine not in util.compengines:
2838 2840 raise error.Abort(_('compression engine %s defined by '
2839 2841 'experimental.format.compression not available') %
2840 2842 compengine,
2841 2843 hint=_('run "hg debuginstall" to list available '
2842 2844 'compression engines'))
2843 2845
2844 2846 # zlib is the historical default and doesn't need an explicit requirement.
2845 2847 if compengine != 'zlib':
2846 2848 requirements.add('exp-compression-%s' % compengine)
2847 2849
2848 2850 if scmutil.gdinitconfig(ui):
2849 2851 requirements.add('generaldelta')
2850 2852 if ui.configbool('experimental', 'treemanifest'):
2851 2853 requirements.add('treemanifest')
2852 2854 # experimental config: format.sparse-revlog
2853 2855 if ui.configbool('format', 'sparse-revlog'):
2854 2856 requirements.add(SPARSEREVLOG_REQUIREMENT)
2855 2857
2856 2858 revlogv2 = ui.config('experimental', 'revlogv2')
2857 2859 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2858 2860 requirements.remove('revlogv1')
2859 2861 # generaldelta is implied by revlogv2.
2860 2862 requirements.discard('generaldelta')
2861 2863 requirements.add(REVLOGV2_REQUIREMENT)
2862 2864 # experimental config: format.internal-phase
2863 2865 if ui.configbool('format', 'internal-phase'):
2864 2866 requirements.add('internal-phase')
2865 2867
2866 2868 if createopts.get('narrowfiles'):
2867 2869 requirements.add(repository.NARROW_REQUIREMENT)
2868 2870
2869 2871 return requirements
2870 2872
2871 2873 def filterknowncreateopts(ui, createopts):
2872 2874 """Filters a dict of repo creation options against options that are known.
2873 2875
2874 2876 Receives a dict of repo creation options and returns a dict of those
2875 2877 options that we don't know how to handle.
2876 2878
2877 2879 This function is called as part of repository creation. If the
2878 2880 returned dict contains any items, repository creation will not
2879 2881 be allowed, as it means there was a request to create a repository
2880 2882 with options not recognized by loaded code.
2881 2883
2882 2884 Extensions can wrap this function to filter out creation options
2883 2885 they know how to handle.
2884 2886 """
2885 2887 known = {
2886 2888 'narrowfiles',
2887 2889 'sharedrepo',
2888 2890 'sharedrelative',
2889 2891 'shareditems',
2890 2892 }
2891 2893
2892 2894 return {k: v for k, v in createopts.items() if k not in known}
2893 2895
2894 2896 def createrepository(ui, path, createopts=None):
2895 2897 """Create a new repository in a vfs.
2896 2898
2897 2899 ``path`` path to the new repo's working directory.
2898 2900 ``createopts`` options for the new repository.
2899 2901
2900 2902 The following keys for ``createopts`` are recognized:
2901 2903
2902 2904 narrowfiles
2903 2905 Set up repository to support narrow file storage.
2904 2906 sharedrepo
2905 2907 Repository object from which storage should be shared.
2906 2908 sharedrelative
2907 2909 Boolean indicating if the path to the shared repo should be
2908 2910 stored as relative. By default, the pointer to the "parent" repo
2909 2911 is stored as an absolute path.
2910 2912 shareditems
2911 2913 Set of items to share to the new repository (in addition to storage).
2912 2914 """
2913 2915 createopts = createopts or {}
2914 2916
2915 2917 unknownopts = filterknowncreateopts(ui, createopts)
2916 2918
2917 2919 if not isinstance(unknownopts, dict):
2918 2920 raise error.ProgrammingError('filterknowncreateopts() did not return '
2919 2921 'a dict')
2920 2922
2921 2923 if unknownopts:
2922 2924 raise error.Abort(_('unable to create repository because of unknown '
2923 2925 'creation option: %s') %
2924 2926 ', '.join(sorted(unknownopts)),
2925 2927 hint=_('is a required extension not loaded?'))
2926 2928
2927 2929 requirements = newreporequirements(ui, createopts=createopts)
2928 2930
2929 2931 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2930 2932
2931 2933 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2932 2934 if hgvfs.exists():
2933 2935 raise error.RepoError(_('repository %s already exists') % path)
2934 2936
2935 2937 if 'sharedrepo' in createopts:
2936 2938 sharedpath = createopts['sharedrepo'].sharedpath
2937 2939
2938 2940 if createopts.get('sharedrelative'):
2939 2941 try:
2940 2942 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2941 2943 except (IOError, ValueError) as e:
2942 2944 # ValueError is raised on Windows if the drive letters differ
2943 2945 # on each path.
2944 2946 raise error.Abort(_('cannot calculate relative path'),
2945 2947 hint=stringutil.forcebytestr(e))
2946 2948
2947 2949 if not wdirvfs.exists():
2948 2950 wdirvfs.makedirs()
2949 2951
2950 2952 hgvfs.makedir(notindexed=True)
2951 2953
2952 2954 if b'store' in requirements and 'sharedrepo' not in createopts:
2953 2955 hgvfs.mkdir(b'store')
2954 2956
2955 2957 # We create an invalid changelog outside the store so very old
2956 2958 # Mercurial versions (which didn't know about the requirements
2957 2959 # file) encounter an error on reading the changelog. This
2958 2960 # effectively locks out old clients and prevents them from
2959 2961 # mucking with a repo in an unknown format.
2960 2962 #
2961 2963 # The revlog header has version 2, which won't be recognized by
2962 2964 # such old clients.
2963 2965 hgvfs.append(b'00changelog.i',
2964 2966 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2965 2967 b'layout')
2966 2968
2967 2969 scmutil.writerequires(hgvfs, requirements)
2968 2970
2969 2971 # Write out file telling readers where to find the shared store.
2970 2972 if 'sharedrepo' in createopts:
2971 2973 hgvfs.write(b'sharedpath', sharedpath)
2972 2974
2973 2975 if createopts.get('shareditems'):
2974 2976 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
2975 2977 hgvfs.write(b'shared', shared)
2976 2978
2977 2979 def poisonrepository(repo):
2978 2980 """Poison a repository instance so it can no longer be used."""
2979 2981 # Perform any cleanup on the instance.
2980 2982 repo.close()
2981 2983
2982 2984 # Our strategy is to replace the type of the object with one that
2983 2985 # has all attribute lookups result in error.
2984 2986 #
2985 2987 # But we have to allow the close() method because some constructors
2986 2988 # of repos call close() on repo references.
2987 2989 class poisonedrepository(object):
2988 2990 def __getattribute__(self, item):
2989 2991 if item == r'close':
2990 2992 return object.__getattribute__(self, item)
2991 2993
2992 2994 raise error.ProgrammingError('repo instances should not be used '
2993 2995 'after unshare')
2994 2996
2995 2997 def close(self):
2996 2998 pass
2997 2999
2998 3000 # We may have a repoview, which intercepts __setattr__. So be sure
2999 3001 # we operate at the lowest level possible.
3000 3002 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now