##// END OF EJS Templates
localrepo: resolve store and cachevfs in makelocalrepository()...
Gregory Szorc -
r39733:98ca9078 default
parent child Browse files
Show More
@@ -1,2678 +1,2703
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 narrowspec,
48 48 obsolete,
49 49 pathutil,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 store,
59 store as storemod,
60 60 subrepoutil,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67 from .utils import (
68 68 interfaceutil,
69 69 procutil,
70 70 stringutil,
71 71 )
72 72
73 73 from .revlogutils import (
74 74 constants as revlogconst,
75 75 )
76 76
77 77 release = lockmod.release
78 78 urlerr = util.urlerr
79 79 urlreq = util.urlreq
80 80
81 81 # set of (path, vfs-location) tuples. vfs-location is:
82 82 # - 'plain for vfs relative paths
83 83 # - '' for svfs relative paths
84 84 _cachedfiles = set()
85 85
86 86 class _basefilecache(scmutil.filecache):
87 87 """All filecache usage on repo are done for logic that should be unfiltered
88 88 """
89 89 def __get__(self, repo, type=None):
90 90 if repo is None:
91 91 return self
92 92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 93 def __set__(self, repo, value):
94 94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 95 def __delete__(self, repo):
96 96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97 97
98 98 class repofilecache(_basefilecache):
99 99 """filecache for files in .hg but outside of .hg/store"""
100 100 def __init__(self, *paths):
101 101 super(repofilecache, self).__init__(*paths)
102 102 for path in paths:
103 103 _cachedfiles.add((path, 'plain'))
104 104
105 105 def join(self, obj, fname):
106 106 return obj.vfs.join(fname)
107 107
108 108 class storecache(_basefilecache):
109 109 """filecache for files in the store"""
110 110 def __init__(self, *paths):
111 111 super(storecache, self).__init__(*paths)
112 112 for path in paths:
113 113 _cachedfiles.add((path, ''))
114 114
115 115 def join(self, obj, fname):
116 116 return obj.sjoin(fname)
117 117
118 118 def isfilecached(repo, name):
119 119 """check if a repo has already cached "name" filecache-ed property
120 120
121 121 This returns (cachedobj-or-None, iscached) tuple.
122 122 """
123 123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 124 if not cacheentry:
125 125 return None, False
126 126 return cacheentry.obj, True
127 127
128 128 class unfilteredpropertycache(util.propertycache):
129 129 """propertycache that apply to unfiltered repo only"""
130 130
131 131 def __get__(self, repo, type=None):
132 132 unfi = repo.unfiltered()
133 133 if unfi is repo:
134 134 return super(unfilteredpropertycache, self).__get__(unfi)
135 135 return getattr(unfi, self.name)
136 136
137 137 class filteredpropertycache(util.propertycache):
138 138 """propertycache that must take filtering in account"""
139 139
140 140 def cachevalue(self, obj, value):
141 141 object.__setattr__(obj, self.name, value)
142 142
143 143
144 144 def hasunfilteredcache(repo, name):
145 145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 146 return name in vars(repo.unfiltered())
147 147
148 148 def unfilteredmethod(orig):
149 149 """decorate method that always need to be run on unfiltered version"""
150 150 def wrapper(repo, *args, **kwargs):
151 151 return orig(repo.unfiltered(), *args, **kwargs)
152 152 return wrapper
153 153
154 154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 155 'unbundle'}
156 156 legacycaps = moderncaps.union({'changegroupsubset'})
157 157
158 158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 159 class localcommandexecutor(object):
160 160 def __init__(self, peer):
161 161 self._peer = peer
162 162 self._sent = False
163 163 self._closed = False
164 164
165 165 def __enter__(self):
166 166 return self
167 167
168 168 def __exit__(self, exctype, excvalue, exctb):
169 169 self.close()
170 170
171 171 def callcommand(self, command, args):
172 172 if self._sent:
173 173 raise error.ProgrammingError('callcommand() cannot be used after '
174 174 'sendcommands()')
175 175
176 176 if self._closed:
177 177 raise error.ProgrammingError('callcommand() cannot be used after '
178 178 'close()')
179 179
180 180 # We don't need to support anything fancy. Just call the named
181 181 # method on the peer and return a resolved future.
182 182 fn = getattr(self._peer, pycompat.sysstr(command))
183 183
184 184 f = pycompat.futures.Future()
185 185
186 186 try:
187 187 result = fn(**pycompat.strkwargs(args))
188 188 except Exception:
189 189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 190 else:
191 191 f.set_result(result)
192 192
193 193 return f
194 194
195 195 def sendcommands(self):
196 196 self._sent = True
197 197
198 198 def close(self):
199 199 self._closed = True
200 200
201 201 @interfaceutil.implementer(repository.ipeercommands)
202 202 class localpeer(repository.peer):
203 203 '''peer for a local repo; reflects only the most recent API'''
204 204
205 205 def __init__(self, repo, caps=None):
206 206 super(localpeer, self).__init__()
207 207
208 208 if caps is None:
209 209 caps = moderncaps.copy()
210 210 self._repo = repo.filtered('served')
211 211 self.ui = repo.ui
212 212 self._caps = repo._restrictcapabilities(caps)
213 213
214 214 # Begin of _basepeer interface.
215 215
216 216 def url(self):
217 217 return self._repo.url()
218 218
219 219 def local(self):
220 220 return self._repo
221 221
222 222 def peer(self):
223 223 return self
224 224
225 225 def canpush(self):
226 226 return True
227 227
228 228 def close(self):
229 229 self._repo.close()
230 230
231 231 # End of _basepeer interface.
232 232
233 233 # Begin of _basewirecommands interface.
234 234
235 235 def branchmap(self):
236 236 return self._repo.branchmap()
237 237
238 238 def capabilities(self):
239 239 return self._caps
240 240
241 241 def clonebundles(self):
242 242 return self._repo.tryread('clonebundles.manifest')
243 243
244 244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 245 """Used to test argument passing over the wire"""
246 246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 247 pycompat.bytestr(four),
248 248 pycompat.bytestr(five))
249 249
250 250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 251 **kwargs):
252 252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 253 common=common, bundlecaps=bundlecaps,
254 254 **kwargs)[1]
255 255 cb = util.chunkbuffer(chunks)
256 256
257 257 if exchange.bundle2requested(bundlecaps):
258 258 # When requesting a bundle2, getbundle returns a stream to make the
259 259 # wire level function happier. We need to build a proper object
260 260 # from it in local peer.
261 261 return bundle2.getunbundler(self.ui, cb)
262 262 else:
263 263 return changegroup.getunbundler('01', cb, None)
264 264
265 265 def heads(self):
266 266 return self._repo.heads()
267 267
268 268 def known(self, nodes):
269 269 return self._repo.known(nodes)
270 270
271 271 def listkeys(self, namespace):
272 272 return self._repo.listkeys(namespace)
273 273
274 274 def lookup(self, key):
275 275 return self._repo.lookup(key)
276 276
277 277 def pushkey(self, namespace, key, old, new):
278 278 return self._repo.pushkey(namespace, key, old, new)
279 279
280 280 def stream_out(self):
281 281 raise error.Abort(_('cannot perform stream clone against local '
282 282 'peer'))
283 283
284 284 def unbundle(self, bundle, heads, url):
285 285 """apply a bundle on a repo
286 286
287 287 This function handles the repo locking itself."""
288 288 try:
289 289 try:
290 290 bundle = exchange.readbundle(self.ui, bundle, None)
291 291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 292 if util.safehasattr(ret, 'getchunks'):
293 293 # This is a bundle20 object, turn it into an unbundler.
294 294 # This little dance should be dropped eventually when the
295 295 # API is finally improved.
296 296 stream = util.chunkbuffer(ret.getchunks())
297 297 ret = bundle2.getunbundler(self.ui, stream)
298 298 return ret
299 299 except Exception as exc:
300 300 # If the exception contains output salvaged from a bundle2
301 301 # reply, we need to make sure it is printed before continuing
302 302 # to fail. So we build a bundle2 with such output and consume
303 303 # it directly.
304 304 #
305 305 # This is not very elegant but allows a "simple" solution for
306 306 # issue4594
307 307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 308 if output:
309 309 bundler = bundle2.bundle20(self._repo.ui)
310 310 for out in output:
311 311 bundler.addpart(out)
312 312 stream = util.chunkbuffer(bundler.getchunks())
313 313 b = bundle2.getunbundler(self.ui, stream)
314 314 bundle2.processbundle(self._repo, b)
315 315 raise
316 316 except error.PushRaced as exc:
317 317 raise error.ResponseError(_('push failed:'),
318 318 stringutil.forcebytestr(exc))
319 319
320 320 # End of _basewirecommands interface.
321 321
322 322 # Begin of peer interface.
323 323
324 324 def commandexecutor(self):
325 325 return localcommandexecutor(self)
326 326
327 327 # End of peer interface.
328 328
329 329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 330 class locallegacypeer(localpeer):
331 331 '''peer extension which implements legacy methods too; used for tests with
332 332 restricted capabilities'''
333 333
334 334 def __init__(self, repo):
335 335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336 336
337 337 # Begin of baselegacywirecommands interface.
338 338
339 339 def between(self, pairs):
340 340 return self._repo.between(pairs)
341 341
342 342 def branches(self, nodes):
343 343 return self._repo.branches(nodes)
344 344
345 345 def changegroup(self, nodes, source):
346 346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 347 missingheads=self._repo.heads())
348 348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 349
350 350 def changegroupsubset(self, bases, heads, source):
351 351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 352 missingheads=heads)
353 353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354 354
355 355 # End of baselegacywirecommands interface.
356 356
357 357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 358 # clients.
359 359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360 360
361 361 # A repository with the sparserevlog feature will have delta chains that
362 362 # can spread over a larger span. Sparse reading cuts these large spans into
363 363 # pieces, so that each piece isn't too big.
364 364 # Without the sparserevlog capability, reading from the repository could use
365 365 # huge amounts of memory, because the whole span would be read at once,
366 366 # including all the intermediate revisions that aren't pertinent for the chain.
367 367 # This is why once a repository has enabled sparse-read, it becomes required.
368 368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369 369
370 370 # Functions receiving (ui, features) that extensions can register to impact
371 371 # the ability to load repositories with custom requirements. Only
372 372 # functions defined in loaded extensions are called.
373 373 #
374 374 # The function receives a set of requirement strings that the repository
375 375 # is capable of opening. Functions will typically add elements to the
376 376 # set to reflect that the extension knows how to handle that requirements.
377 377 featuresetupfuncs = set()
378 378
379 379 def makelocalrepository(baseui, path, intents=None):
380 380 """Create a local repository object.
381 381
382 382 Given arguments needed to construct a local repository, this function
383 383 derives a type suitable for representing that repository and returns an
384 384 instance of it.
385 385
386 386 The returned object conforms to the ``repository.completelocalrepository``
387 387 interface.
388 388 """
389 389 ui = baseui.copy()
390 390 # Prevent copying repo configuration.
391 391 ui.copy = baseui.copy
392 392
393 393 # Working directory VFS rooted at repository root.
394 394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
395 395
396 396 # Main VFS for .hg/ directory.
397 397 hgpath = wdirvfs.join(b'.hg')
398 398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
399 399
400 400 # The .hg/ path should exist and should be a directory. All other
401 401 # cases are errors.
402 402 if not hgvfs.isdir():
403 403 try:
404 404 hgvfs.stat()
405 405 except OSError as e:
406 406 if e.errno != errno.ENOENT:
407 407 raise
408 408
409 409 raise error.RepoError(_(b'repository %s not found') % path)
410 410
411 411 # .hg/requires file contains a newline-delimited list of
412 412 # features/capabilities the opener (us) must have in order to use
413 413 # the repository. This file was introduced in Mercurial 0.9.2,
414 414 # which means very old repositories may not have one. We assume
415 415 # a missing file translates to no requirements.
416 416 try:
417 417 requirements = set(hgvfs.read(b'requires').splitlines())
418 418 except IOError as e:
419 419 if e.errno != errno.ENOENT:
420 420 raise
421 421 requirements = set()
422 422
423 423 # The .hg/hgrc file may load extensions or contain config options
424 424 # that influence repository construction. Attempt to load it and
425 425 # process any new extensions that it may have pulled in.
426 426 try:
427 427 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
428 428 except IOError:
429 429 pass
430 430 else:
431 431 extensions.loadall(ui)
432 432
433 433 supportedrequirements = gathersupportedrequirements(ui)
434 434
435 435 # We first validate the requirements are known.
436 436 ensurerequirementsrecognized(requirements, supportedrequirements)
437 437
438 438 # Then we validate that the known set is reasonable to use together.
439 439 ensurerequirementscompatible(ui, requirements)
440 440
441 441 # TODO there are unhandled edge cases related to opening repositories with
442 442 # shared storage. If storage is shared, we should also test for requirements
443 443 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
444 444 # that repo, as that repo may load extensions needed to open it. This is a
445 445 # bit complicated because we don't want the other hgrc to overwrite settings
446 446 # in this hgrc.
447 447 #
448 448 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
449 449 # file when sharing repos. But if a requirement is added after the share is
450 450 # performed, thereby introducing a new requirement for the opener, we may
451 451 # will not see that and could encounter a run-time error interacting with
452 452 # that shared store since it has an unknown-to-us requirement.
453 453
454 454 # At this point, we know we should be capable of opening the repository.
455 455 # Now get on with doing that.
456 456
457 # The "store" part of the repository holds versioned data. How it is
458 # accessed is determined by various requirements. The ``shared`` or
459 # ``relshared`` requirements indicate the store lives in the path contained
460 # in the ``.hg/sharedpath`` file. This is an absolute path for
461 # ``shared`` and relative to ``.hg/`` for ``relshared``.
462 if b'shared' in requirements or b'relshared' in requirements:
463 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
464 if b'relshared' in requirements:
465 sharedpath = hgvfs.join(sharedpath)
466
467 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
468
469 if not sharedvfs.exists():
470 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
471 b'directory %s') % sharedvfs.base)
472
473 storebasepath = sharedvfs.base
474 cachepath = sharedvfs.join(b'cache')
475 else:
476 storebasepath = hgvfs.base
477 cachepath = hgvfs.join(b'cache')
478
479 # The store has changed over time and the exact layout is dictated by
480 # requirements. The store interface abstracts differences across all
481 # of them.
482 store = storemod.store(requirements, storebasepath,
483 lambda base: vfsmod.vfs(base, cacheaudited=True))
484
485 hgvfs.createmode = store.createmode
486
487 # The cache vfs is used to manage cache files.
488 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
489 cachevfs.createmode = store.createmode
490
457 491 return localrepository(
458 492 baseui=baseui,
459 493 ui=ui,
460 494 origroot=path,
461 495 wdirvfs=wdirvfs,
462 496 hgvfs=hgvfs,
463 497 requirements=requirements,
464 498 supportedrequirements=supportedrequirements,
499 sharedpath=storebasepath,
500 store=store,
501 cachevfs=cachevfs,
465 502 intents=intents)
466 503
467 504 def gathersupportedrequirements(ui):
468 505 """Determine the complete set of recognized requirements."""
469 506 # Start with all requirements supported by this file.
470 507 supported = set(localrepository._basesupported)
471 508
472 509 # Execute ``featuresetupfuncs`` entries if they belong to an extension
473 510 # relevant to this ui instance.
474 511 modules = {m.__name__ for n, m in extensions.extensions(ui)}
475 512
476 513 for fn in featuresetupfuncs:
477 514 if fn.__module__ in modules:
478 515 fn(ui, supported)
479 516
480 517 # Add derived requirements from registered compression engines.
481 518 for name in util.compengines:
482 519 engine = util.compengines[name]
483 520 if engine.revlogheader():
484 521 supported.add(b'exp-compression-%s' % name)
485 522
486 523 return supported
487 524
488 525 def ensurerequirementsrecognized(requirements, supported):
489 526 """Validate that a set of local requirements is recognized.
490 527
491 528 Receives a set of requirements. Raises an ``error.RepoError`` if there
492 529 exists any requirement in that set that currently loaded code doesn't
493 530 recognize.
494 531
495 532 Returns a set of supported requirements.
496 533 """
497 534 missing = set()
498 535
499 536 for requirement in requirements:
500 537 if requirement in supported:
501 538 continue
502 539
503 540 if not requirement or not requirement[0:1].isalnum():
504 541 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
505 542
506 543 missing.add(requirement)
507 544
508 545 if missing:
509 546 raise error.RequirementError(
510 547 _(b'repository requires features unknown to this Mercurial: %s') %
511 548 b' '.join(sorted(missing)),
512 549 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
513 550 b'for more information'))
514 551
515 552 def ensurerequirementscompatible(ui, requirements):
516 553 """Validates that a set of recognized requirements is mutually compatible.
517 554
518 555 Some requirements may not be compatible with others or require
519 556 config options that aren't enabled. This function is called during
520 557 repository opening to ensure that the set of requirements needed
521 558 to open a repository is sane and compatible with config options.
522 559
523 560 Extensions can monkeypatch this function to perform additional
524 561 checking.
525 562
526 563 ``error.RepoError`` should be raised on failure.
527 564 """
528 565 if b'exp-sparse' in requirements and not sparse.enabled:
529 566 raise error.RepoError(_(b'repository is using sparse feature but '
530 567 b'sparse is not enabled; enable the '
531 568 b'"sparse" extensions to access'))
532 569
533 570 @interfaceutil.implementer(repository.completelocalrepository)
534 571 class localrepository(object):
535 572
536 573 # obsolete experimental requirements:
537 574 # - manifestv2: An experimental new manifest format that allowed
538 575 # for stem compression of long paths. Experiment ended up not
539 576 # being successful (repository sizes went up due to worse delta
540 577 # chains), and the code was deleted in 4.6.
541 578 supportedformats = {
542 579 'revlogv1',
543 580 'generaldelta',
544 581 'treemanifest',
545 582 REVLOGV2_REQUIREMENT,
546 583 SPARSEREVLOG_REQUIREMENT,
547 584 }
548 585 _basesupported = supportedformats | {
549 586 'store',
550 587 'fncache',
551 588 'shared',
552 589 'relshared',
553 590 'dotencode',
554 591 'exp-sparse',
555 592 'internal-phase'
556 593 }
557 594 openerreqs = {
558 595 'revlogv1',
559 596 'generaldelta',
560 597 'treemanifest',
561 598 }
562 599
563 600 # list of prefix for file which can be written without 'wlock'
564 601 # Extensions should extend this list when needed
565 602 _wlockfreeprefix = {
566 603 # We migh consider requiring 'wlock' for the next
567 604 # two, but pretty much all the existing code assume
568 605 # wlock is not needed so we keep them excluded for
569 606 # now.
570 607 'hgrc',
571 608 'requires',
572 609 # XXX cache is a complicatged business someone
573 610 # should investigate this in depth at some point
574 611 'cache/',
575 612 # XXX shouldn't be dirstate covered by the wlock?
576 613 'dirstate',
577 614 # XXX bisect was still a bit too messy at the time
578 615 # this changeset was introduced. Someone should fix
579 616 # the remainig bit and drop this line
580 617 'bisect.state',
581 618 }
582 619
583 620 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
584 supportedrequirements, intents=None):
621 supportedrequirements, sharedpath, store, cachevfs,
622 intents=None):
585 623 """Create a new local repository instance.
586 624
587 625 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
588 626 or ``localrepo.makelocalrepository()`` for obtaining a new repository
589 627 object.
590 628
591 629 Arguments:
592 630
593 631 baseui
594 632 ``ui.ui`` instance that ``ui`` argument was based off of.
595 633
596 634 ui
597 635 ``ui.ui`` instance for use by the repository.
598 636
599 637 origroot
600 638 ``bytes`` path to working directory root of this repository.
601 639
602 640 wdirvfs
603 641 ``vfs.vfs`` rooted at the working directory.
604 642
605 643 hgvfs
606 644 ``vfs.vfs`` rooted at .hg/
607 645
608 646 requirements
609 647 ``set`` of bytestrings representing repository opening requirements.
610 648
611 649 supportedrequirements
612 650 ``set`` of bytestrings representing repository requirements that we
613 651 know how to open. May be a supetset of ``requirements``.
614 652
653 sharedpath
654 ``bytes`` Defining path to storage base directory. Points to a
655 ``.hg/`` directory somewhere.
656
657 store
658 ``store.basicstore`` (or derived) instance providing access to
659 versioned storage.
660
661 cachevfs
662 ``vfs.vfs`` used for cache files.
663
615 664 intents
616 665 ``set`` of system strings indicating what this repo will be used
617 666 for.
618 667 """
619 668 self.baseui = baseui
620 669 self.ui = ui
621 670 self.origroot = origroot
622 671 # vfs rooted at working directory.
623 672 self.wvfs = wdirvfs
624 673 self.root = wdirvfs.base
625 674 # vfs rooted at .hg/. Used to access most non-store paths.
626 675 self.vfs = hgvfs
627 676 self.path = hgvfs.base
628 677 self.requirements = requirements
629 678 self.supported = supportedrequirements
679 self.sharedpath = sharedpath
680 self.store = store
681 self.cachevfs = cachevfs
630 682
631 683 self.filtername = None
632 # svfs: usually rooted at .hg/store, used to access repository history
633 # If this is a shared repository, this vfs may point to another
634 # repository's .hg/store directory.
635 self.svfs = None
636 684
637 685 if (self.ui.configbool('devel', 'all-warnings') or
638 686 self.ui.configbool('devel', 'check-locks')):
639 687 self.vfs.audit = self._getvfsward(self.vfs.audit)
640 688 # A list of callback to shape the phase if no data were found.
641 689 # Callback are in the form: func(repo, roots) --> processed root.
642 690 # This list it to be filled by extension during repo setup
643 691 self._phasedefaults = []
644 692
645 693 color.setup(self.ui)
646 694
647 cachepath = self.vfs.join('cache')
648 self.sharedpath = self.path
649 try:
650 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
651 if 'relshared' in self.requirements:
652 sharedpath = self.vfs.join(sharedpath)
653 vfs = vfsmod.vfs(sharedpath, realpath=True)
654 cachepath = vfs.join('cache')
655 s = vfs.base
656 if not vfs.exists():
657 raise error.RepoError(
658 _('.hg/sharedpath points to nonexistent directory %s') % s)
659 self.sharedpath = s
660 except IOError as inst:
661 if inst.errno != errno.ENOENT:
662 raise
663
664 self.store = store.store(
665 self.requirements, self.sharedpath,
666 lambda base: vfsmod.vfs(base, cacheaudited=True))
667 695 self.spath = self.store.path
668 696 self.svfs = self.store.vfs
669 697 self.sjoin = self.store.join
670 self.vfs.createmode = self.store.createmode
671 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
672 self.cachevfs.createmode = self.store.createmode
673 698 if (self.ui.configbool('devel', 'all-warnings') or
674 699 self.ui.configbool('devel', 'check-locks')):
675 700 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
676 701 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
677 702 else: # standard vfs
678 703 self.svfs.audit = self._getsvfsward(self.svfs.audit)
679 704 self._applyopenerreqs()
680 705
681 706 self._dirstatevalidatewarned = False
682 707
683 708 self._branchcaches = {}
684 709 self._revbranchcache = None
685 710 self._filterpats = {}
686 711 self._datafilters = {}
687 712 self._transref = self._lockref = self._wlockref = None
688 713
689 714 # A cache for various files under .hg/ that tracks file changes,
690 715 # (used by the filecache decorator)
691 716 #
692 717 # Maps a property name to its util.filecacheentry
693 718 self._filecache = {}
694 719
695 720 # hold sets of revision to be filtered
696 721 # should be cleared when something might have changed the filter value:
697 722 # - new changesets,
698 723 # - phase change,
699 724 # - new obsolescence marker,
700 725 # - working directory parent change,
701 726 # - bookmark changes
702 727 self.filteredrevcache = {}
703 728
704 729 # post-dirstate-status hooks
705 730 self._postdsstatus = []
706 731
707 732 # generic mapping between names and nodes
708 733 self.names = namespaces.namespaces()
709 734
710 735 # Key to signature value.
711 736 self._sparsesignaturecache = {}
712 737 # Signature to cached matcher instance.
713 738 self._sparsematchercache = {}
714 739
715 740 def _getvfsward(self, origfunc):
716 741 """build a ward for self.vfs"""
717 742 rref = weakref.ref(self)
718 743 def checkvfs(path, mode=None):
719 744 ret = origfunc(path, mode=mode)
720 745 repo = rref()
721 746 if (repo is None
722 747 or not util.safehasattr(repo, '_wlockref')
723 748 or not util.safehasattr(repo, '_lockref')):
724 749 return
725 750 if mode in (None, 'r', 'rb'):
726 751 return
727 752 if path.startswith(repo.path):
728 753 # truncate name relative to the repository (.hg)
729 754 path = path[len(repo.path) + 1:]
730 755 if path.startswith('cache/'):
731 756 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
732 757 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
733 758 if path.startswith('journal.'):
734 759 # journal is covered by 'lock'
735 760 if repo._currentlock(repo._lockref) is None:
736 761 repo.ui.develwarn('write with no lock: "%s"' % path,
737 762 stacklevel=2, config='check-locks')
738 763 elif repo._currentlock(repo._wlockref) is None:
739 764 # rest of vfs files are covered by 'wlock'
740 765 #
741 766 # exclude special files
742 767 for prefix in self._wlockfreeprefix:
743 768 if path.startswith(prefix):
744 769 return
745 770 repo.ui.develwarn('write with no wlock: "%s"' % path,
746 771 stacklevel=2, config='check-locks')
747 772 return ret
748 773 return checkvfs
749 774
750 775 def _getsvfsward(self, origfunc):
751 776 """build a ward for self.svfs"""
752 777 rref = weakref.ref(self)
753 778 def checksvfs(path, mode=None):
754 779 ret = origfunc(path, mode=mode)
755 780 repo = rref()
756 781 if repo is None or not util.safehasattr(repo, '_lockref'):
757 782 return
758 783 if mode in (None, 'r', 'rb'):
759 784 return
760 785 if path.startswith(repo.sharedpath):
761 786 # truncate name relative to the repository (.hg)
762 787 path = path[len(repo.sharedpath) + 1:]
763 788 if repo._currentlock(repo._lockref) is None:
764 789 repo.ui.develwarn('write with no lock: "%s"' % path,
765 790 stacklevel=3)
766 791 return ret
767 792 return checksvfs
768 793
769 794 def close(self):
770 795 self._writecaches()
771 796
772 797 def _writecaches(self):
773 798 if self._revbranchcache:
774 799 self._revbranchcache.write()
775 800
776 801 def _restrictcapabilities(self, caps):
777 802 if self.ui.configbool('experimental', 'bundle2-advertise'):
778 803 caps = set(caps)
779 804 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
780 805 role='client'))
781 806 caps.add('bundle2=' + urlreq.quote(capsblob))
782 807 return caps
783 808
784 809 def _applyopenerreqs(self):
785 810 self.svfs.options = dict((r, 1) for r in self.requirements
786 811 if r in self.openerreqs)
787 812 # experimental config: format.chunkcachesize
788 813 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
789 814 if chunkcachesize is not None:
790 815 self.svfs.options['chunkcachesize'] = chunkcachesize
791 816 # experimental config: format.manifestcachesize
792 817 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
793 818 if manifestcachesize is not None:
794 819 self.svfs.options['manifestcachesize'] = manifestcachesize
795 820 deltabothparents = self.ui.configbool('storage',
796 821 'revlog.optimize-delta-parent-choice')
797 822 self.svfs.options['deltabothparents'] = deltabothparents
798 823 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
799 824 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
800 825 if 0 <= chainspan:
801 826 self.svfs.options['maxdeltachainspan'] = chainspan
802 827 mmapindexthreshold = self.ui.configbytes('experimental',
803 828 'mmapindexthreshold')
804 829 if mmapindexthreshold is not None:
805 830 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
806 831 withsparseread = self.ui.configbool('experimental', 'sparse-read')
807 832 srdensitythres = float(self.ui.config('experimental',
808 833 'sparse-read.density-threshold'))
809 834 srmingapsize = self.ui.configbytes('experimental',
810 835 'sparse-read.min-gap-size')
811 836 self.svfs.options['with-sparse-read'] = withsparseread
812 837 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
813 838 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
814 839 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
815 840 self.svfs.options['sparse-revlog'] = sparserevlog
816 841 if sparserevlog:
817 842 self.svfs.options['generaldelta'] = True
818 843 maxchainlen = None
819 844 if sparserevlog:
820 845 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
821 846 # experimental config: format.maxchainlen
822 847 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
823 848 if maxchainlen is not None:
824 849 self.svfs.options['maxchainlen'] = maxchainlen
825 850
826 851 for r in self.requirements:
827 852 if r.startswith('exp-compression-'):
828 853 self.svfs.options['compengine'] = r[len('exp-compression-'):]
829 854
830 855 # TODO move "revlogv2" to openerreqs once finalized.
831 856 if REVLOGV2_REQUIREMENT in self.requirements:
832 857 self.svfs.options['revlogv2'] = True
833 858
834 859 def _writerequirements(self):
835 860 scmutil.writerequires(self.vfs, self.requirements)
836 861
837 862 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
838 863 # self -> auditor -> self._checknested -> self
839 864
840 865 @property
841 866 def auditor(self):
842 867 # This is only used by context.workingctx.match in order to
843 868 # detect files in subrepos.
844 869 return pathutil.pathauditor(self.root, callback=self._checknested)
845 870
846 871 @property
847 872 def nofsauditor(self):
848 873 # This is only used by context.basectx.match in order to detect
849 874 # files in subrepos.
850 875 return pathutil.pathauditor(self.root, callback=self._checknested,
851 876 realfs=False, cached=True)
852 877
853 878 def _checknested(self, path):
854 879 """Determine if path is a legal nested repository."""
855 880 if not path.startswith(self.root):
856 881 return False
857 882 subpath = path[len(self.root) + 1:]
858 883 normsubpath = util.pconvert(subpath)
859 884
860 885 # XXX: Checking against the current working copy is wrong in
861 886 # the sense that it can reject things like
862 887 #
863 888 # $ hg cat -r 10 sub/x.txt
864 889 #
865 890 # if sub/ is no longer a subrepository in the working copy
866 891 # parent revision.
867 892 #
868 893 # However, it can of course also allow things that would have
869 894 # been rejected before, such as the above cat command if sub/
870 895 # is a subrepository now, but was a normal directory before.
871 896 # The old path auditor would have rejected by mistake since it
872 897 # panics when it sees sub/.hg/.
873 898 #
874 899 # All in all, checking against the working copy seems sensible
875 900 # since we want to prevent access to nested repositories on
876 901 # the filesystem *now*.
877 902 ctx = self[None]
878 903 parts = util.splitpath(subpath)
879 904 while parts:
880 905 prefix = '/'.join(parts)
881 906 if prefix in ctx.substate:
882 907 if prefix == normsubpath:
883 908 return True
884 909 else:
885 910 sub = ctx.sub(prefix)
886 911 return sub.checknested(subpath[len(prefix) + 1:])
887 912 else:
888 913 parts.pop()
889 914 return False
890 915
891 916 def peer(self):
892 917 return localpeer(self) # not cached to avoid reference cycle
893 918
894 919 def unfiltered(self):
895 920 """Return unfiltered version of the repository
896 921
897 922 Intended to be overwritten by filtered repo."""
898 923 return self
899 924
900 925 def filtered(self, name, visibilityexceptions=None):
901 926 """Return a filtered version of a repository"""
902 927 cls = repoview.newtype(self.unfiltered().__class__)
903 928 return cls(self, name, visibilityexceptions)
904 929
905 930 @repofilecache('bookmarks', 'bookmarks.current')
906 931 def _bookmarks(self):
907 932 return bookmarks.bmstore(self)
908 933
909 934 @property
910 935 def _activebookmark(self):
911 936 return self._bookmarks.active
912 937
913 938 # _phasesets depend on changelog. what we need is to call
914 939 # _phasecache.invalidate() if '00changelog.i' was changed, but it
915 940 # can't be easily expressed in filecache mechanism.
916 941 @storecache('phaseroots', '00changelog.i')
917 942 def _phasecache(self):
918 943 return phases.phasecache(self, self._phasedefaults)
919 944
920 945 @storecache('obsstore')
921 946 def obsstore(self):
922 947 return obsolete.makestore(self.ui, self)
923 948
924 949 @storecache('00changelog.i')
925 950 def changelog(self):
926 951 return changelog.changelog(self.svfs,
927 952 trypending=txnutil.mayhavepending(self.root))
928 953
929 954 def _constructmanifest(self):
930 955 # This is a temporary function while we migrate from manifest to
931 956 # manifestlog. It allows bundlerepo and unionrepo to intercept the
932 957 # manifest creation.
933 958 return manifest.manifestrevlog(self.svfs)
934 959
935 960 @storecache('00manifest.i')
936 961 def manifestlog(self):
937 962 return manifest.manifestlog(self.svfs, self)
938 963
939 964 @repofilecache('dirstate')
940 965 def dirstate(self):
941 966 return self._makedirstate()
942 967
943 968 def _makedirstate(self):
944 969 """Extension point for wrapping the dirstate per-repo."""
945 970 sparsematchfn = lambda: sparse.matcher(self)
946 971
947 972 return dirstate.dirstate(self.vfs, self.ui, self.root,
948 973 self._dirstatevalidate, sparsematchfn)
949 974
950 975 def _dirstatevalidate(self, node):
951 976 try:
952 977 self.changelog.rev(node)
953 978 return node
954 979 except error.LookupError:
955 980 if not self._dirstatevalidatewarned:
956 981 self._dirstatevalidatewarned = True
957 982 self.ui.warn(_("warning: ignoring unknown"
958 983 " working parent %s!\n") % short(node))
959 984 return nullid
960 985
961 986 @storecache(narrowspec.FILENAME)
962 987 def narrowpats(self):
963 988 """matcher patterns for this repository's narrowspec
964 989
965 990 A tuple of (includes, excludes).
966 991 """
967 992 source = self
968 993 if self.shared():
969 994 from . import hg
970 995 source = hg.sharedreposource(self)
971 996 return narrowspec.load(source)
972 997
973 998 @storecache(narrowspec.FILENAME)
974 999 def _narrowmatch(self):
975 1000 if repository.NARROW_REQUIREMENT not in self.requirements:
976 1001 return matchmod.always(self.root, '')
977 1002 include, exclude = self.narrowpats
978 1003 return narrowspec.match(self.root, include=include, exclude=exclude)
979 1004
980 1005 # TODO(martinvonz): make this property-like instead?
981 1006 def narrowmatch(self):
982 1007 return self._narrowmatch
983 1008
984 1009 def setnarrowpats(self, newincludes, newexcludes):
985 1010 narrowspec.save(self, newincludes, newexcludes)
986 1011 self.invalidate(clearfilecache=True)
987 1012
988 1013 def __getitem__(self, changeid):
989 1014 if changeid is None:
990 1015 return context.workingctx(self)
991 1016 if isinstance(changeid, context.basectx):
992 1017 return changeid
993 1018 if isinstance(changeid, slice):
994 1019 # wdirrev isn't contiguous so the slice shouldn't include it
995 1020 return [context.changectx(self, i)
996 1021 for i in pycompat.xrange(*changeid.indices(len(self)))
997 1022 if i not in self.changelog.filteredrevs]
998 1023 try:
999 1024 return context.changectx(self, changeid)
1000 1025 except error.WdirUnsupported:
1001 1026 return context.workingctx(self)
1002 1027
1003 1028 def __contains__(self, changeid):
1004 1029 """True if the given changeid exists
1005 1030
1006 1031 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1007 1032 specified.
1008 1033 """
1009 1034 try:
1010 1035 self[changeid]
1011 1036 return True
1012 1037 except error.RepoLookupError:
1013 1038 return False
1014 1039
1015 1040 def __nonzero__(self):
1016 1041 return True
1017 1042
1018 1043 __bool__ = __nonzero__
1019 1044
1020 1045 def __len__(self):
1021 1046 # no need to pay the cost of repoview.changelog
1022 1047 unfi = self.unfiltered()
1023 1048 return len(unfi.changelog)
1024 1049
1025 1050 def __iter__(self):
1026 1051 return iter(self.changelog)
1027 1052
1028 1053 def revs(self, expr, *args):
1029 1054 '''Find revisions matching a revset.
1030 1055
1031 1056 The revset is specified as a string ``expr`` that may contain
1032 1057 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1033 1058
1034 1059 Revset aliases from the configuration are not expanded. To expand
1035 1060 user aliases, consider calling ``scmutil.revrange()`` or
1036 1061 ``repo.anyrevs([expr], user=True)``.
1037 1062
1038 1063 Returns a revset.abstractsmartset, which is a list-like interface
1039 1064 that contains integer revisions.
1040 1065 '''
1041 1066 expr = revsetlang.formatspec(expr, *args)
1042 1067 m = revset.match(None, expr)
1043 1068 return m(self)
1044 1069
1045 1070 def set(self, expr, *args):
1046 1071 '''Find revisions matching a revset and emit changectx instances.
1047 1072
1048 1073 This is a convenience wrapper around ``revs()`` that iterates the
1049 1074 result and is a generator of changectx instances.
1050 1075
1051 1076 Revset aliases from the configuration are not expanded. To expand
1052 1077 user aliases, consider calling ``scmutil.revrange()``.
1053 1078 '''
1054 1079 for r in self.revs(expr, *args):
1055 1080 yield self[r]
1056 1081
1057 1082 def anyrevs(self, specs, user=False, localalias=None):
1058 1083 '''Find revisions matching one of the given revsets.
1059 1084
1060 1085 Revset aliases from the configuration are not expanded by default. To
1061 1086 expand user aliases, specify ``user=True``. To provide some local
1062 1087 definitions overriding user aliases, set ``localalias`` to
1063 1088 ``{name: definitionstring}``.
1064 1089 '''
1065 1090 if user:
1066 1091 m = revset.matchany(self.ui, specs,
1067 1092 lookup=revset.lookupfn(self),
1068 1093 localalias=localalias)
1069 1094 else:
1070 1095 m = revset.matchany(None, specs, localalias=localalias)
1071 1096 return m(self)
1072 1097
1073 1098 def url(self):
1074 1099 return 'file:' + self.root
1075 1100
1076 1101 def hook(self, name, throw=False, **args):
1077 1102 """Call a hook, passing this repo instance.
1078 1103
1079 1104 This a convenience method to aid invoking hooks. Extensions likely
1080 1105 won't call this unless they have registered a custom hook or are
1081 1106 replacing code that is expected to call a hook.
1082 1107 """
1083 1108 return hook.hook(self.ui, self, name, throw, **args)
1084 1109
1085 1110 @filteredpropertycache
1086 1111 def _tagscache(self):
1087 1112 '''Returns a tagscache object that contains various tags related
1088 1113 caches.'''
1089 1114
1090 1115 # This simplifies its cache management by having one decorated
1091 1116 # function (this one) and the rest simply fetch things from it.
1092 1117 class tagscache(object):
1093 1118 def __init__(self):
1094 1119 # These two define the set of tags for this repository. tags
1095 1120 # maps tag name to node; tagtypes maps tag name to 'global' or
1096 1121 # 'local'. (Global tags are defined by .hgtags across all
1097 1122 # heads, and local tags are defined in .hg/localtags.)
1098 1123 # They constitute the in-memory cache of tags.
1099 1124 self.tags = self.tagtypes = None
1100 1125
1101 1126 self.nodetagscache = self.tagslist = None
1102 1127
1103 1128 cache = tagscache()
1104 1129 cache.tags, cache.tagtypes = self._findtags()
1105 1130
1106 1131 return cache
1107 1132
1108 1133 def tags(self):
1109 1134 '''return a mapping of tag to node'''
1110 1135 t = {}
1111 1136 if self.changelog.filteredrevs:
1112 1137 tags, tt = self._findtags()
1113 1138 else:
1114 1139 tags = self._tagscache.tags
1115 1140 for k, v in tags.iteritems():
1116 1141 try:
1117 1142 # ignore tags to unknown nodes
1118 1143 self.changelog.rev(v)
1119 1144 t[k] = v
1120 1145 except (error.LookupError, ValueError):
1121 1146 pass
1122 1147 return t
1123 1148
1124 1149 def _findtags(self):
1125 1150 '''Do the hard work of finding tags. Return a pair of dicts
1126 1151 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1127 1152 maps tag name to a string like \'global\' or \'local\'.
1128 1153 Subclasses or extensions are free to add their own tags, but
1129 1154 should be aware that the returned dicts will be retained for the
1130 1155 duration of the localrepo object.'''
1131 1156
1132 1157 # XXX what tagtype should subclasses/extensions use? Currently
1133 1158 # mq and bookmarks add tags, but do not set the tagtype at all.
1134 1159 # Should each extension invent its own tag type? Should there
1135 1160 # be one tagtype for all such "virtual" tags? Or is the status
1136 1161 # quo fine?
1137 1162
1138 1163
1139 1164 # map tag name to (node, hist)
1140 1165 alltags = tagsmod.findglobaltags(self.ui, self)
1141 1166 # map tag name to tag type
1142 1167 tagtypes = dict((tag, 'global') for tag in alltags)
1143 1168
1144 1169 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1145 1170
1146 1171 # Build the return dicts. Have to re-encode tag names because
1147 1172 # the tags module always uses UTF-8 (in order not to lose info
1148 1173 # writing to the cache), but the rest of Mercurial wants them in
1149 1174 # local encoding.
1150 1175 tags = {}
1151 1176 for (name, (node, hist)) in alltags.iteritems():
1152 1177 if node != nullid:
1153 1178 tags[encoding.tolocal(name)] = node
1154 1179 tags['tip'] = self.changelog.tip()
1155 1180 tagtypes = dict([(encoding.tolocal(name), value)
1156 1181 for (name, value) in tagtypes.iteritems()])
1157 1182 return (tags, tagtypes)
1158 1183
1159 1184 def tagtype(self, tagname):
1160 1185 '''
1161 1186 return the type of the given tag. result can be:
1162 1187
1163 1188 'local' : a local tag
1164 1189 'global' : a global tag
1165 1190 None : tag does not exist
1166 1191 '''
1167 1192
1168 1193 return self._tagscache.tagtypes.get(tagname)
1169 1194
1170 1195 def tagslist(self):
1171 1196 '''return a list of tags ordered by revision'''
1172 1197 if not self._tagscache.tagslist:
1173 1198 l = []
1174 1199 for t, n in self.tags().iteritems():
1175 1200 l.append((self.changelog.rev(n), t, n))
1176 1201 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1177 1202
1178 1203 return self._tagscache.tagslist
1179 1204
1180 1205 def nodetags(self, node):
1181 1206 '''return the tags associated with a node'''
1182 1207 if not self._tagscache.nodetagscache:
1183 1208 nodetagscache = {}
1184 1209 for t, n in self._tagscache.tags.iteritems():
1185 1210 nodetagscache.setdefault(n, []).append(t)
1186 1211 for tags in nodetagscache.itervalues():
1187 1212 tags.sort()
1188 1213 self._tagscache.nodetagscache = nodetagscache
1189 1214 return self._tagscache.nodetagscache.get(node, [])
1190 1215
1191 1216 def nodebookmarks(self, node):
1192 1217 """return the list of bookmarks pointing to the specified node"""
1193 1218 return self._bookmarks.names(node)
1194 1219
1195 1220 def branchmap(self):
1196 1221 '''returns a dictionary {branch: [branchheads]} with branchheads
1197 1222 ordered by increasing revision number'''
1198 1223 branchmap.updatecache(self)
1199 1224 return self._branchcaches[self.filtername]
1200 1225
1201 1226 @unfilteredmethod
1202 1227 def revbranchcache(self):
1203 1228 if not self._revbranchcache:
1204 1229 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1205 1230 return self._revbranchcache
1206 1231
1207 1232 def branchtip(self, branch, ignoremissing=False):
1208 1233 '''return the tip node for a given branch
1209 1234
1210 1235 If ignoremissing is True, then this method will not raise an error.
1211 1236 This is helpful for callers that only expect None for a missing branch
1212 1237 (e.g. namespace).
1213 1238
1214 1239 '''
1215 1240 try:
1216 1241 return self.branchmap().branchtip(branch)
1217 1242 except KeyError:
1218 1243 if not ignoremissing:
1219 1244 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1220 1245 else:
1221 1246 pass
1222 1247
1223 1248 def lookup(self, key):
1224 1249 return scmutil.revsymbol(self, key).node()
1225 1250
1226 1251 def lookupbranch(self, key):
1227 1252 if key in self.branchmap():
1228 1253 return key
1229 1254
1230 1255 return scmutil.revsymbol(self, key).branch()
1231 1256
1232 1257 def known(self, nodes):
1233 1258 cl = self.changelog
1234 1259 nm = cl.nodemap
1235 1260 filtered = cl.filteredrevs
1236 1261 result = []
1237 1262 for n in nodes:
1238 1263 r = nm.get(n)
1239 1264 resp = not (r is None or r in filtered)
1240 1265 result.append(resp)
1241 1266 return result
1242 1267
1243 1268 def local(self):
1244 1269 return self
1245 1270
1246 1271 def publishing(self):
1247 1272 # it's safe (and desirable) to trust the publish flag unconditionally
1248 1273 # so that we don't finalize changes shared between users via ssh or nfs
1249 1274 return self.ui.configbool('phases', 'publish', untrusted=True)
1250 1275
1251 1276 def cancopy(self):
1252 1277 # so statichttprepo's override of local() works
1253 1278 if not self.local():
1254 1279 return False
1255 1280 if not self.publishing():
1256 1281 return True
1257 1282 # if publishing we can't copy if there is filtered content
1258 1283 return not self.filtered('visible').changelog.filteredrevs
1259 1284
1260 1285 def shared(self):
1261 1286 '''the type of shared repository (None if not shared)'''
1262 1287 if self.sharedpath != self.path:
1263 1288 return 'store'
1264 1289 return None
1265 1290
1266 1291 def wjoin(self, f, *insidef):
1267 1292 return self.vfs.reljoin(self.root, f, *insidef)
1268 1293
1269 1294 def file(self, f):
1270 1295 if f[0] == '/':
1271 1296 f = f[1:]
1272 1297 return filelog.filelog(self.svfs, f)
1273 1298
1274 1299 def setparents(self, p1, p2=nullid):
1275 1300 with self.dirstate.parentchange():
1276 1301 copies = self.dirstate.setparents(p1, p2)
1277 1302 pctx = self[p1]
1278 1303 if copies:
1279 1304 # Adjust copy records, the dirstate cannot do it, it
1280 1305 # requires access to parents manifests. Preserve them
1281 1306 # only for entries added to first parent.
1282 1307 for f in copies:
1283 1308 if f not in pctx and copies[f] in pctx:
1284 1309 self.dirstate.copy(copies[f], f)
1285 1310 if p2 == nullid:
1286 1311 for f, s in sorted(self.dirstate.copies().items()):
1287 1312 if f not in pctx and s not in pctx:
1288 1313 self.dirstate.copy(None, f)
1289 1314
1290 1315 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1291 1316 """changeid can be a changeset revision, node, or tag.
1292 1317 fileid can be a file revision or node."""
1293 1318 return context.filectx(self, path, changeid, fileid,
1294 1319 changectx=changectx)
1295 1320
1296 1321 def getcwd(self):
1297 1322 return self.dirstate.getcwd()
1298 1323
1299 1324 def pathto(self, f, cwd=None):
1300 1325 return self.dirstate.pathto(f, cwd)
1301 1326
1302 1327 def _loadfilter(self, filter):
1303 1328 if filter not in self._filterpats:
1304 1329 l = []
1305 1330 for pat, cmd in self.ui.configitems(filter):
1306 1331 if cmd == '!':
1307 1332 continue
1308 1333 mf = matchmod.match(self.root, '', [pat])
1309 1334 fn = None
1310 1335 params = cmd
1311 1336 for name, filterfn in self._datafilters.iteritems():
1312 1337 if cmd.startswith(name):
1313 1338 fn = filterfn
1314 1339 params = cmd[len(name):].lstrip()
1315 1340 break
1316 1341 if not fn:
1317 1342 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1318 1343 # Wrap old filters not supporting keyword arguments
1319 1344 if not pycompat.getargspec(fn)[2]:
1320 1345 oldfn = fn
1321 1346 fn = lambda s, c, **kwargs: oldfn(s, c)
1322 1347 l.append((mf, fn, params))
1323 1348 self._filterpats[filter] = l
1324 1349 return self._filterpats[filter]
1325 1350
1326 1351 def _filter(self, filterpats, filename, data):
1327 1352 for mf, fn, cmd in filterpats:
1328 1353 if mf(filename):
1329 1354 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1330 1355 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1331 1356 break
1332 1357
1333 1358 return data
1334 1359
1335 1360 @unfilteredpropertycache
1336 1361 def _encodefilterpats(self):
1337 1362 return self._loadfilter('encode')
1338 1363
1339 1364 @unfilteredpropertycache
1340 1365 def _decodefilterpats(self):
1341 1366 return self._loadfilter('decode')
1342 1367
1343 1368 def adddatafilter(self, name, filter):
1344 1369 self._datafilters[name] = filter
1345 1370
1346 1371 def wread(self, filename):
1347 1372 if self.wvfs.islink(filename):
1348 1373 data = self.wvfs.readlink(filename)
1349 1374 else:
1350 1375 data = self.wvfs.read(filename)
1351 1376 return self._filter(self._encodefilterpats, filename, data)
1352 1377
1353 1378 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1354 1379 """write ``data`` into ``filename`` in the working directory
1355 1380
1356 1381 This returns length of written (maybe decoded) data.
1357 1382 """
1358 1383 data = self._filter(self._decodefilterpats, filename, data)
1359 1384 if 'l' in flags:
1360 1385 self.wvfs.symlink(data, filename)
1361 1386 else:
1362 1387 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1363 1388 **kwargs)
1364 1389 if 'x' in flags:
1365 1390 self.wvfs.setflags(filename, False, True)
1366 1391 else:
1367 1392 self.wvfs.setflags(filename, False, False)
1368 1393 return len(data)
1369 1394
1370 1395 def wwritedata(self, filename, data):
1371 1396 return self._filter(self._decodefilterpats, filename, data)
1372 1397
1373 1398 def currenttransaction(self):
1374 1399 """return the current transaction or None if non exists"""
1375 1400 if self._transref:
1376 1401 tr = self._transref()
1377 1402 else:
1378 1403 tr = None
1379 1404
1380 1405 if tr and tr.running():
1381 1406 return tr
1382 1407 return None
1383 1408
1384 1409 def transaction(self, desc, report=None):
1385 1410 if (self.ui.configbool('devel', 'all-warnings')
1386 1411 or self.ui.configbool('devel', 'check-locks')):
1387 1412 if self._currentlock(self._lockref) is None:
1388 1413 raise error.ProgrammingError('transaction requires locking')
1389 1414 tr = self.currenttransaction()
1390 1415 if tr is not None:
1391 1416 return tr.nest(name=desc)
1392 1417
1393 1418 # abort here if the journal already exists
1394 1419 if self.svfs.exists("journal"):
1395 1420 raise error.RepoError(
1396 1421 _("abandoned transaction found"),
1397 1422 hint=_("run 'hg recover' to clean up transaction"))
1398 1423
1399 1424 idbase = "%.40f#%f" % (random.random(), time.time())
1400 1425 ha = hex(hashlib.sha1(idbase).digest())
1401 1426 txnid = 'TXN:' + ha
1402 1427 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1403 1428
1404 1429 self._writejournal(desc)
1405 1430 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1406 1431 if report:
1407 1432 rp = report
1408 1433 else:
1409 1434 rp = self.ui.warn
1410 1435 vfsmap = {'plain': self.vfs} # root of .hg/
1411 1436 # we must avoid cyclic reference between repo and transaction.
1412 1437 reporef = weakref.ref(self)
1413 1438 # Code to track tag movement
1414 1439 #
1415 1440 # Since tags are all handled as file content, it is actually quite hard
1416 1441 # to track these movement from a code perspective. So we fallback to a
1417 1442 # tracking at the repository level. One could envision to track changes
1418 1443 # to the '.hgtags' file through changegroup apply but that fails to
1419 1444 # cope with case where transaction expose new heads without changegroup
1420 1445 # being involved (eg: phase movement).
1421 1446 #
1422 1447 # For now, We gate the feature behind a flag since this likely comes
1423 1448 # with performance impacts. The current code run more often than needed
1424 1449 # and do not use caches as much as it could. The current focus is on
1425 1450 # the behavior of the feature so we disable it by default. The flag
1426 1451 # will be removed when we are happy with the performance impact.
1427 1452 #
1428 1453 # Once this feature is no longer experimental move the following
1429 1454 # documentation to the appropriate help section:
1430 1455 #
1431 1456 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1432 1457 # tags (new or changed or deleted tags). In addition the details of
1433 1458 # these changes are made available in a file at:
1434 1459 # ``REPOROOT/.hg/changes/tags.changes``.
1435 1460 # Make sure you check for HG_TAG_MOVED before reading that file as it
1436 1461 # might exist from a previous transaction even if no tag were touched
1437 1462 # in this one. Changes are recorded in a line base format::
1438 1463 #
1439 1464 # <action> <hex-node> <tag-name>\n
1440 1465 #
1441 1466 # Actions are defined as follow:
1442 1467 # "-R": tag is removed,
1443 1468 # "+A": tag is added,
1444 1469 # "-M": tag is moved (old value),
1445 1470 # "+M": tag is moved (new value),
1446 1471 tracktags = lambda x: None
1447 1472 # experimental config: experimental.hook-track-tags
1448 1473 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1449 1474 if desc != 'strip' and shouldtracktags:
1450 1475 oldheads = self.changelog.headrevs()
1451 1476 def tracktags(tr2):
1452 1477 repo = reporef()
1453 1478 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1454 1479 newheads = repo.changelog.headrevs()
1455 1480 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1456 1481 # notes: we compare lists here.
1457 1482 # As we do it only once buiding set would not be cheaper
1458 1483 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1459 1484 if changes:
1460 1485 tr2.hookargs['tag_moved'] = '1'
1461 1486 with repo.vfs('changes/tags.changes', 'w',
1462 1487 atomictemp=True) as changesfile:
1463 1488 # note: we do not register the file to the transaction
1464 1489 # because we needs it to still exist on the transaction
1465 1490 # is close (for txnclose hooks)
1466 1491 tagsmod.writediff(changesfile, changes)
1467 1492 def validate(tr2):
1468 1493 """will run pre-closing hooks"""
1469 1494 # XXX the transaction API is a bit lacking here so we take a hacky
1470 1495 # path for now
1471 1496 #
1472 1497 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1473 1498 # dict is copied before these run. In addition we needs the data
1474 1499 # available to in memory hooks too.
1475 1500 #
1476 1501 # Moreover, we also need to make sure this runs before txnclose
1477 1502 # hooks and there is no "pending" mechanism that would execute
1478 1503 # logic only if hooks are about to run.
1479 1504 #
1480 1505 # Fixing this limitation of the transaction is also needed to track
1481 1506 # other families of changes (bookmarks, phases, obsolescence).
1482 1507 #
1483 1508 # This will have to be fixed before we remove the experimental
1484 1509 # gating.
1485 1510 tracktags(tr2)
1486 1511 repo = reporef()
1487 1512 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1488 1513 scmutil.enforcesinglehead(repo, tr2, desc)
1489 1514 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1490 1515 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1491 1516 args = tr.hookargs.copy()
1492 1517 args.update(bookmarks.preparehookargs(name, old, new))
1493 1518 repo.hook('pretxnclose-bookmark', throw=True,
1494 1519 txnname=desc,
1495 1520 **pycompat.strkwargs(args))
1496 1521 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1497 1522 cl = repo.unfiltered().changelog
1498 1523 for rev, (old, new) in tr.changes['phases'].items():
1499 1524 args = tr.hookargs.copy()
1500 1525 node = hex(cl.node(rev))
1501 1526 args.update(phases.preparehookargs(node, old, new))
1502 1527 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1503 1528 **pycompat.strkwargs(args))
1504 1529
1505 1530 repo.hook('pretxnclose', throw=True,
1506 1531 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1507 1532 def releasefn(tr, success):
1508 1533 repo = reporef()
1509 1534 if success:
1510 1535 # this should be explicitly invoked here, because
1511 1536 # in-memory changes aren't written out at closing
1512 1537 # transaction, if tr.addfilegenerator (via
1513 1538 # dirstate.write or so) isn't invoked while
1514 1539 # transaction running
1515 1540 repo.dirstate.write(None)
1516 1541 else:
1517 1542 # discard all changes (including ones already written
1518 1543 # out) in this transaction
1519 1544 narrowspec.restorebackup(self, 'journal.narrowspec')
1520 1545 repo.dirstate.restorebackup(None, 'journal.dirstate')
1521 1546
1522 1547 repo.invalidate(clearfilecache=True)
1523 1548
1524 1549 tr = transaction.transaction(rp, self.svfs, vfsmap,
1525 1550 "journal",
1526 1551 "undo",
1527 1552 aftertrans(renames),
1528 1553 self.store.createmode,
1529 1554 validator=validate,
1530 1555 releasefn=releasefn,
1531 1556 checkambigfiles=_cachedfiles,
1532 1557 name=desc)
1533 1558 tr.changes['origrepolen'] = len(self)
1534 1559 tr.changes['obsmarkers'] = set()
1535 1560 tr.changes['phases'] = {}
1536 1561 tr.changes['bookmarks'] = {}
1537 1562
1538 1563 tr.hookargs['txnid'] = txnid
1539 1564 # note: writing the fncache only during finalize mean that the file is
1540 1565 # outdated when running hooks. As fncache is used for streaming clone,
1541 1566 # this is not expected to break anything that happen during the hooks.
1542 1567 tr.addfinalize('flush-fncache', self.store.write)
1543 1568 def txnclosehook(tr2):
1544 1569 """To be run if transaction is successful, will schedule a hook run
1545 1570 """
1546 1571 # Don't reference tr2 in hook() so we don't hold a reference.
1547 1572 # This reduces memory consumption when there are multiple
1548 1573 # transactions per lock. This can likely go away if issue5045
1549 1574 # fixes the function accumulation.
1550 1575 hookargs = tr2.hookargs
1551 1576
1552 1577 def hookfunc():
1553 1578 repo = reporef()
1554 1579 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1555 1580 bmchanges = sorted(tr.changes['bookmarks'].items())
1556 1581 for name, (old, new) in bmchanges:
1557 1582 args = tr.hookargs.copy()
1558 1583 args.update(bookmarks.preparehookargs(name, old, new))
1559 1584 repo.hook('txnclose-bookmark', throw=False,
1560 1585 txnname=desc, **pycompat.strkwargs(args))
1561 1586
1562 1587 if hook.hashook(repo.ui, 'txnclose-phase'):
1563 1588 cl = repo.unfiltered().changelog
1564 1589 phasemv = sorted(tr.changes['phases'].items())
1565 1590 for rev, (old, new) in phasemv:
1566 1591 args = tr.hookargs.copy()
1567 1592 node = hex(cl.node(rev))
1568 1593 args.update(phases.preparehookargs(node, old, new))
1569 1594 repo.hook('txnclose-phase', throw=False, txnname=desc,
1570 1595 **pycompat.strkwargs(args))
1571 1596
1572 1597 repo.hook('txnclose', throw=False, txnname=desc,
1573 1598 **pycompat.strkwargs(hookargs))
1574 1599 reporef()._afterlock(hookfunc)
1575 1600 tr.addfinalize('txnclose-hook', txnclosehook)
1576 1601 # Include a leading "-" to make it happen before the transaction summary
1577 1602 # reports registered via scmutil.registersummarycallback() whose names
1578 1603 # are 00-txnreport etc. That way, the caches will be warm when the
1579 1604 # callbacks run.
1580 1605 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1581 1606 def txnaborthook(tr2):
1582 1607 """To be run if transaction is aborted
1583 1608 """
1584 1609 reporef().hook('txnabort', throw=False, txnname=desc,
1585 1610 **pycompat.strkwargs(tr2.hookargs))
1586 1611 tr.addabort('txnabort-hook', txnaborthook)
1587 1612 # avoid eager cache invalidation. in-memory data should be identical
1588 1613 # to stored data if transaction has no error.
1589 1614 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1590 1615 self._transref = weakref.ref(tr)
1591 1616 scmutil.registersummarycallback(self, tr, desc)
1592 1617 return tr
1593 1618
1594 1619 def _journalfiles(self):
1595 1620 return ((self.svfs, 'journal'),
1596 1621 (self.vfs, 'journal.dirstate'),
1597 1622 (self.vfs, 'journal.branch'),
1598 1623 (self.vfs, 'journal.desc'),
1599 1624 (self.vfs, 'journal.bookmarks'),
1600 1625 (self.svfs, 'journal.phaseroots'))
1601 1626
1602 1627 def undofiles(self):
1603 1628 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1604 1629
1605 1630 @unfilteredmethod
1606 1631 def _writejournal(self, desc):
1607 1632 self.dirstate.savebackup(None, 'journal.dirstate')
1608 1633 narrowspec.savebackup(self, 'journal.narrowspec')
1609 1634 self.vfs.write("journal.branch",
1610 1635 encoding.fromlocal(self.dirstate.branch()))
1611 1636 self.vfs.write("journal.desc",
1612 1637 "%d\n%s\n" % (len(self), desc))
1613 1638 self.vfs.write("journal.bookmarks",
1614 1639 self.vfs.tryread("bookmarks"))
1615 1640 self.svfs.write("journal.phaseroots",
1616 1641 self.svfs.tryread("phaseroots"))
1617 1642
1618 1643 def recover(self):
1619 1644 with self.lock():
1620 1645 if self.svfs.exists("journal"):
1621 1646 self.ui.status(_("rolling back interrupted transaction\n"))
1622 1647 vfsmap = {'': self.svfs,
1623 1648 'plain': self.vfs,}
1624 1649 transaction.rollback(self.svfs, vfsmap, "journal",
1625 1650 self.ui.warn,
1626 1651 checkambigfiles=_cachedfiles)
1627 1652 self.invalidate()
1628 1653 return True
1629 1654 else:
1630 1655 self.ui.warn(_("no interrupted transaction available\n"))
1631 1656 return False
1632 1657
1633 1658 def rollback(self, dryrun=False, force=False):
1634 1659 wlock = lock = dsguard = None
1635 1660 try:
1636 1661 wlock = self.wlock()
1637 1662 lock = self.lock()
1638 1663 if self.svfs.exists("undo"):
1639 1664 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1640 1665
1641 1666 return self._rollback(dryrun, force, dsguard)
1642 1667 else:
1643 1668 self.ui.warn(_("no rollback information available\n"))
1644 1669 return 1
1645 1670 finally:
1646 1671 release(dsguard, lock, wlock)
1647 1672
1648 1673 @unfilteredmethod # Until we get smarter cache management
1649 1674 def _rollback(self, dryrun, force, dsguard):
1650 1675 ui = self.ui
1651 1676 try:
1652 1677 args = self.vfs.read('undo.desc').splitlines()
1653 1678 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1654 1679 if len(args) >= 3:
1655 1680 detail = args[2]
1656 1681 oldtip = oldlen - 1
1657 1682
1658 1683 if detail and ui.verbose:
1659 1684 msg = (_('repository tip rolled back to revision %d'
1660 1685 ' (undo %s: %s)\n')
1661 1686 % (oldtip, desc, detail))
1662 1687 else:
1663 1688 msg = (_('repository tip rolled back to revision %d'
1664 1689 ' (undo %s)\n')
1665 1690 % (oldtip, desc))
1666 1691 except IOError:
1667 1692 msg = _('rolling back unknown transaction\n')
1668 1693 desc = None
1669 1694
1670 1695 if not force and self['.'] != self['tip'] and desc == 'commit':
1671 1696 raise error.Abort(
1672 1697 _('rollback of last commit while not checked out '
1673 1698 'may lose data'), hint=_('use -f to force'))
1674 1699
1675 1700 ui.status(msg)
1676 1701 if dryrun:
1677 1702 return 0
1678 1703
1679 1704 parents = self.dirstate.parents()
1680 1705 self.destroying()
1681 1706 vfsmap = {'plain': self.vfs, '': self.svfs}
1682 1707 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1683 1708 checkambigfiles=_cachedfiles)
1684 1709 if self.vfs.exists('undo.bookmarks'):
1685 1710 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1686 1711 if self.svfs.exists('undo.phaseroots'):
1687 1712 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1688 1713 self.invalidate()
1689 1714
1690 1715 parentgone = (parents[0] not in self.changelog.nodemap or
1691 1716 parents[1] not in self.changelog.nodemap)
1692 1717 if parentgone:
1693 1718 # prevent dirstateguard from overwriting already restored one
1694 1719 dsguard.close()
1695 1720
1696 1721 narrowspec.restorebackup(self, 'undo.narrowspec')
1697 1722 self.dirstate.restorebackup(None, 'undo.dirstate')
1698 1723 try:
1699 1724 branch = self.vfs.read('undo.branch')
1700 1725 self.dirstate.setbranch(encoding.tolocal(branch))
1701 1726 except IOError:
1702 1727 ui.warn(_('named branch could not be reset: '
1703 1728 'current branch is still \'%s\'\n')
1704 1729 % self.dirstate.branch())
1705 1730
1706 1731 parents = tuple([p.rev() for p in self[None].parents()])
1707 1732 if len(parents) > 1:
1708 1733 ui.status(_('working directory now based on '
1709 1734 'revisions %d and %d\n') % parents)
1710 1735 else:
1711 1736 ui.status(_('working directory now based on '
1712 1737 'revision %d\n') % parents)
1713 1738 mergemod.mergestate.clean(self, self['.'].node())
1714 1739
1715 1740 # TODO: if we know which new heads may result from this rollback, pass
1716 1741 # them to destroy(), which will prevent the branchhead cache from being
1717 1742 # invalidated.
1718 1743 self.destroyed()
1719 1744 return 0
1720 1745
1721 1746 def _buildcacheupdater(self, newtransaction):
1722 1747 """called during transaction to build the callback updating cache
1723 1748
1724 1749 Lives on the repository to help extension who might want to augment
1725 1750 this logic. For this purpose, the created transaction is passed to the
1726 1751 method.
1727 1752 """
1728 1753 # we must avoid cyclic reference between repo and transaction.
1729 1754 reporef = weakref.ref(self)
1730 1755 def updater(tr):
1731 1756 repo = reporef()
1732 1757 repo.updatecaches(tr)
1733 1758 return updater
1734 1759
1735 1760 @unfilteredmethod
1736 1761 def updatecaches(self, tr=None, full=False):
1737 1762 """warm appropriate caches
1738 1763
1739 1764 If this function is called after a transaction closed. The transaction
1740 1765 will be available in the 'tr' argument. This can be used to selectively
1741 1766 update caches relevant to the changes in that transaction.
1742 1767
1743 1768 If 'full' is set, make sure all caches the function knows about have
1744 1769 up-to-date data. Even the ones usually loaded more lazily.
1745 1770 """
1746 1771 if tr is not None and tr.hookargs.get('source') == 'strip':
1747 1772 # During strip, many caches are invalid but
1748 1773 # later call to `destroyed` will refresh them.
1749 1774 return
1750 1775
1751 1776 if tr is None or tr.changes['origrepolen'] < len(self):
1752 1777 # updating the unfiltered branchmap should refresh all the others,
1753 1778 self.ui.debug('updating the branch cache\n')
1754 1779 branchmap.updatecache(self.filtered('served'))
1755 1780
1756 1781 if full:
1757 1782 rbc = self.revbranchcache()
1758 1783 for r in self.changelog:
1759 1784 rbc.branchinfo(r)
1760 1785 rbc.write()
1761 1786
1762 1787 # ensure the working copy parents are in the manifestfulltextcache
1763 1788 for ctx in self['.'].parents():
1764 1789 ctx.manifest() # accessing the manifest is enough
1765 1790
1766 1791 def invalidatecaches(self):
1767 1792
1768 1793 if '_tagscache' in vars(self):
1769 1794 # can't use delattr on proxy
1770 1795 del self.__dict__['_tagscache']
1771 1796
1772 1797 self.unfiltered()._branchcaches.clear()
1773 1798 self.invalidatevolatilesets()
1774 1799 self._sparsesignaturecache.clear()
1775 1800
1776 1801 def invalidatevolatilesets(self):
1777 1802 self.filteredrevcache.clear()
1778 1803 obsolete.clearobscaches(self)
1779 1804
1780 1805 def invalidatedirstate(self):
1781 1806 '''Invalidates the dirstate, causing the next call to dirstate
1782 1807 to check if it was modified since the last time it was read,
1783 1808 rereading it if it has.
1784 1809
1785 1810 This is different to dirstate.invalidate() that it doesn't always
1786 1811 rereads the dirstate. Use dirstate.invalidate() if you want to
1787 1812 explicitly read the dirstate again (i.e. restoring it to a previous
1788 1813 known good state).'''
1789 1814 if hasunfilteredcache(self, 'dirstate'):
1790 1815 for k in self.dirstate._filecache:
1791 1816 try:
1792 1817 delattr(self.dirstate, k)
1793 1818 except AttributeError:
1794 1819 pass
1795 1820 delattr(self.unfiltered(), 'dirstate')
1796 1821
1797 1822 def invalidate(self, clearfilecache=False):
1798 1823 '''Invalidates both store and non-store parts other than dirstate
1799 1824
1800 1825 If a transaction is running, invalidation of store is omitted,
1801 1826 because discarding in-memory changes might cause inconsistency
1802 1827 (e.g. incomplete fncache causes unintentional failure, but
1803 1828 redundant one doesn't).
1804 1829 '''
1805 1830 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1806 1831 for k in list(self._filecache.keys()):
1807 1832 # dirstate is invalidated separately in invalidatedirstate()
1808 1833 if k == 'dirstate':
1809 1834 continue
1810 1835 if (k == 'changelog' and
1811 1836 self.currenttransaction() and
1812 1837 self.changelog._delayed):
1813 1838 # The changelog object may store unwritten revisions. We don't
1814 1839 # want to lose them.
1815 1840 # TODO: Solve the problem instead of working around it.
1816 1841 continue
1817 1842
1818 1843 if clearfilecache:
1819 1844 del self._filecache[k]
1820 1845 try:
1821 1846 delattr(unfiltered, k)
1822 1847 except AttributeError:
1823 1848 pass
1824 1849 self.invalidatecaches()
1825 1850 if not self.currenttransaction():
1826 1851 # TODO: Changing contents of store outside transaction
1827 1852 # causes inconsistency. We should make in-memory store
1828 1853 # changes detectable, and abort if changed.
1829 1854 self.store.invalidatecaches()
1830 1855
1831 1856 def invalidateall(self):
1832 1857 '''Fully invalidates both store and non-store parts, causing the
1833 1858 subsequent operation to reread any outside changes.'''
1834 1859 # extension should hook this to invalidate its caches
1835 1860 self.invalidate()
1836 1861 self.invalidatedirstate()
1837 1862
1838 1863 @unfilteredmethod
1839 1864 def _refreshfilecachestats(self, tr):
1840 1865 """Reload stats of cached files so that they are flagged as valid"""
1841 1866 for k, ce in self._filecache.items():
1842 1867 k = pycompat.sysstr(k)
1843 1868 if k == r'dirstate' or k not in self.__dict__:
1844 1869 continue
1845 1870 ce.refresh()
1846 1871
1847 1872 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1848 1873 inheritchecker=None, parentenvvar=None):
1849 1874 parentlock = None
1850 1875 # the contents of parentenvvar are used by the underlying lock to
1851 1876 # determine whether it can be inherited
1852 1877 if parentenvvar is not None:
1853 1878 parentlock = encoding.environ.get(parentenvvar)
1854 1879
1855 1880 timeout = 0
1856 1881 warntimeout = 0
1857 1882 if wait:
1858 1883 timeout = self.ui.configint("ui", "timeout")
1859 1884 warntimeout = self.ui.configint("ui", "timeout.warn")
1860 1885 # internal config: ui.signal-safe-lock
1861 1886 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1862 1887
1863 1888 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1864 1889 releasefn=releasefn,
1865 1890 acquirefn=acquirefn, desc=desc,
1866 1891 inheritchecker=inheritchecker,
1867 1892 parentlock=parentlock,
1868 1893 signalsafe=signalsafe)
1869 1894 return l
1870 1895
1871 1896 def _afterlock(self, callback):
1872 1897 """add a callback to be run when the repository is fully unlocked
1873 1898
1874 1899 The callback will be executed when the outermost lock is released
1875 1900 (with wlock being higher level than 'lock')."""
1876 1901 for ref in (self._wlockref, self._lockref):
1877 1902 l = ref and ref()
1878 1903 if l and l.held:
1879 1904 l.postrelease.append(callback)
1880 1905 break
1881 1906 else: # no lock have been found.
1882 1907 callback()
1883 1908
1884 1909 def lock(self, wait=True):
1885 1910 '''Lock the repository store (.hg/store) and return a weak reference
1886 1911 to the lock. Use this before modifying the store (e.g. committing or
1887 1912 stripping). If you are opening a transaction, get a lock as well.)
1888 1913
1889 1914 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1890 1915 'wlock' first to avoid a dead-lock hazard.'''
1891 1916 l = self._currentlock(self._lockref)
1892 1917 if l is not None:
1893 1918 l.lock()
1894 1919 return l
1895 1920
1896 1921 l = self._lock(self.svfs, "lock", wait, None,
1897 1922 self.invalidate, _('repository %s') % self.origroot)
1898 1923 self._lockref = weakref.ref(l)
1899 1924 return l
1900 1925
1901 1926 def _wlockchecktransaction(self):
1902 1927 if self.currenttransaction() is not None:
1903 1928 raise error.LockInheritanceContractViolation(
1904 1929 'wlock cannot be inherited in the middle of a transaction')
1905 1930
1906 1931 def wlock(self, wait=True):
1907 1932 '''Lock the non-store parts of the repository (everything under
1908 1933 .hg except .hg/store) and return a weak reference to the lock.
1909 1934
1910 1935 Use this before modifying files in .hg.
1911 1936
1912 1937 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1913 1938 'wlock' first to avoid a dead-lock hazard.'''
1914 1939 l = self._wlockref and self._wlockref()
1915 1940 if l is not None and l.held:
1916 1941 l.lock()
1917 1942 return l
1918 1943
1919 1944 # We do not need to check for non-waiting lock acquisition. Such
1920 1945 # acquisition would not cause dead-lock as they would just fail.
1921 1946 if wait and (self.ui.configbool('devel', 'all-warnings')
1922 1947 or self.ui.configbool('devel', 'check-locks')):
1923 1948 if self._currentlock(self._lockref) is not None:
1924 1949 self.ui.develwarn('"wlock" acquired after "lock"')
1925 1950
1926 1951 def unlock():
1927 1952 if self.dirstate.pendingparentchange():
1928 1953 self.dirstate.invalidate()
1929 1954 else:
1930 1955 self.dirstate.write(None)
1931 1956
1932 1957 self._filecache['dirstate'].refresh()
1933 1958
1934 1959 l = self._lock(self.vfs, "wlock", wait, unlock,
1935 1960 self.invalidatedirstate, _('working directory of %s') %
1936 1961 self.origroot,
1937 1962 inheritchecker=self._wlockchecktransaction,
1938 1963 parentenvvar='HG_WLOCK_LOCKER')
1939 1964 self._wlockref = weakref.ref(l)
1940 1965 return l
1941 1966
1942 1967 def _currentlock(self, lockref):
1943 1968 """Returns the lock if it's held, or None if it's not."""
1944 1969 if lockref is None:
1945 1970 return None
1946 1971 l = lockref()
1947 1972 if l is None or not l.held:
1948 1973 return None
1949 1974 return l
1950 1975
1951 1976 def currentwlock(self):
1952 1977 """Returns the wlock if it's held, or None if it's not."""
1953 1978 return self._currentlock(self._wlockref)
1954 1979
1955 1980 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1956 1981 """
1957 1982 commit an individual file as part of a larger transaction
1958 1983 """
1959 1984
1960 1985 fname = fctx.path()
1961 1986 fparent1 = manifest1.get(fname, nullid)
1962 1987 fparent2 = manifest2.get(fname, nullid)
1963 1988 if isinstance(fctx, context.filectx):
1964 1989 node = fctx.filenode()
1965 1990 if node in [fparent1, fparent2]:
1966 1991 self.ui.debug('reusing %s filelog entry\n' % fname)
1967 1992 if manifest1.flags(fname) != fctx.flags():
1968 1993 changelist.append(fname)
1969 1994 return node
1970 1995
1971 1996 flog = self.file(fname)
1972 1997 meta = {}
1973 1998 copy = fctx.renamed()
1974 1999 if copy and copy[0] != fname:
1975 2000 # Mark the new revision of this file as a copy of another
1976 2001 # file. This copy data will effectively act as a parent
1977 2002 # of this new revision. If this is a merge, the first
1978 2003 # parent will be the nullid (meaning "look up the copy data")
1979 2004 # and the second one will be the other parent. For example:
1980 2005 #
1981 2006 # 0 --- 1 --- 3 rev1 changes file foo
1982 2007 # \ / rev2 renames foo to bar and changes it
1983 2008 # \- 2 -/ rev3 should have bar with all changes and
1984 2009 # should record that bar descends from
1985 2010 # bar in rev2 and foo in rev1
1986 2011 #
1987 2012 # this allows this merge to succeed:
1988 2013 #
1989 2014 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1990 2015 # \ / merging rev3 and rev4 should use bar@rev2
1991 2016 # \- 2 --- 4 as the merge base
1992 2017 #
1993 2018
1994 2019 cfname = copy[0]
1995 2020 crev = manifest1.get(cfname)
1996 2021 newfparent = fparent2
1997 2022
1998 2023 if manifest2: # branch merge
1999 2024 if fparent2 == nullid or crev is None: # copied on remote side
2000 2025 if cfname in manifest2:
2001 2026 crev = manifest2[cfname]
2002 2027 newfparent = fparent1
2003 2028
2004 2029 # Here, we used to search backwards through history to try to find
2005 2030 # where the file copy came from if the source of a copy was not in
2006 2031 # the parent directory. However, this doesn't actually make sense to
2007 2032 # do (what does a copy from something not in your working copy even
2008 2033 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2009 2034 # the user that copy information was dropped, so if they didn't
2010 2035 # expect this outcome it can be fixed, but this is the correct
2011 2036 # behavior in this circumstance.
2012 2037
2013 2038 if crev:
2014 2039 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2015 2040 meta["copy"] = cfname
2016 2041 meta["copyrev"] = hex(crev)
2017 2042 fparent1, fparent2 = nullid, newfparent
2018 2043 else:
2019 2044 self.ui.warn(_("warning: can't find ancestor for '%s' "
2020 2045 "copied from '%s'!\n") % (fname, cfname))
2021 2046
2022 2047 elif fparent1 == nullid:
2023 2048 fparent1, fparent2 = fparent2, nullid
2024 2049 elif fparent2 != nullid:
2025 2050 # is one parent an ancestor of the other?
2026 2051 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2027 2052 if fparent1 in fparentancestors:
2028 2053 fparent1, fparent2 = fparent2, nullid
2029 2054 elif fparent2 in fparentancestors:
2030 2055 fparent2 = nullid
2031 2056
2032 2057 # is the file changed?
2033 2058 text = fctx.data()
2034 2059 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2035 2060 changelist.append(fname)
2036 2061 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2037 2062 # are just the flags changed during merge?
2038 2063 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2039 2064 changelist.append(fname)
2040 2065
2041 2066 return fparent1
2042 2067
2043 2068 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2044 2069 """check for commit arguments that aren't committable"""
2045 2070 if match.isexact() or match.prefix():
2046 2071 matched = set(status.modified + status.added + status.removed)
2047 2072
2048 2073 for f in match.files():
2049 2074 f = self.dirstate.normalize(f)
2050 2075 if f == '.' or f in matched or f in wctx.substate:
2051 2076 continue
2052 2077 if f in status.deleted:
2053 2078 fail(f, _('file not found!'))
2054 2079 if f in vdirs: # visited directory
2055 2080 d = f + '/'
2056 2081 for mf in matched:
2057 2082 if mf.startswith(d):
2058 2083 break
2059 2084 else:
2060 2085 fail(f, _("no match under directory!"))
2061 2086 elif f not in self.dirstate:
2062 2087 fail(f, _("file not tracked!"))
2063 2088
2064 2089 @unfilteredmethod
2065 2090 def commit(self, text="", user=None, date=None, match=None, force=False,
2066 2091 editor=False, extra=None):
2067 2092 """Add a new revision to current repository.
2068 2093
2069 2094 Revision information is gathered from the working directory,
2070 2095 match can be used to filter the committed files. If editor is
2071 2096 supplied, it is called to get a commit message.
2072 2097 """
2073 2098 if extra is None:
2074 2099 extra = {}
2075 2100
2076 2101 def fail(f, msg):
2077 2102 raise error.Abort('%s: %s' % (f, msg))
2078 2103
2079 2104 if not match:
2080 2105 match = matchmod.always(self.root, '')
2081 2106
2082 2107 if not force:
2083 2108 vdirs = []
2084 2109 match.explicitdir = vdirs.append
2085 2110 match.bad = fail
2086 2111
2087 2112 wlock = lock = tr = None
2088 2113 try:
2089 2114 wlock = self.wlock()
2090 2115 lock = self.lock() # for recent changelog (see issue4368)
2091 2116
2092 2117 wctx = self[None]
2093 2118 merge = len(wctx.parents()) > 1
2094 2119
2095 2120 if not force and merge and not match.always():
2096 2121 raise error.Abort(_('cannot partially commit a merge '
2097 2122 '(do not specify files or patterns)'))
2098 2123
2099 2124 status = self.status(match=match, clean=force)
2100 2125 if force:
2101 2126 status.modified.extend(status.clean) # mq may commit clean files
2102 2127
2103 2128 # check subrepos
2104 2129 subs, commitsubs, newstate = subrepoutil.precommit(
2105 2130 self.ui, wctx, status, match, force=force)
2106 2131
2107 2132 # make sure all explicit patterns are matched
2108 2133 if not force:
2109 2134 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2110 2135
2111 2136 cctx = context.workingcommitctx(self, status,
2112 2137 text, user, date, extra)
2113 2138
2114 2139 # internal config: ui.allowemptycommit
2115 2140 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2116 2141 or extra.get('close') or merge or cctx.files()
2117 2142 or self.ui.configbool('ui', 'allowemptycommit'))
2118 2143 if not allowemptycommit:
2119 2144 return None
2120 2145
2121 2146 if merge and cctx.deleted():
2122 2147 raise error.Abort(_("cannot commit merge with missing files"))
2123 2148
2124 2149 ms = mergemod.mergestate.read(self)
2125 2150 mergeutil.checkunresolved(ms)
2126 2151
2127 2152 if editor:
2128 2153 cctx._text = editor(self, cctx, subs)
2129 2154 edited = (text != cctx._text)
2130 2155
2131 2156 # Save commit message in case this transaction gets rolled back
2132 2157 # (e.g. by a pretxncommit hook). Leave the content alone on
2133 2158 # the assumption that the user will use the same editor again.
2134 2159 msgfn = self.savecommitmessage(cctx._text)
2135 2160
2136 2161 # commit subs and write new state
2137 2162 if subs:
2138 2163 for s in sorted(commitsubs):
2139 2164 sub = wctx.sub(s)
2140 2165 self.ui.status(_('committing subrepository %s\n') %
2141 2166 subrepoutil.subrelpath(sub))
2142 2167 sr = sub.commit(cctx._text, user, date)
2143 2168 newstate[s] = (newstate[s][0], sr)
2144 2169 subrepoutil.writestate(self, newstate)
2145 2170
2146 2171 p1, p2 = self.dirstate.parents()
2147 2172 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2148 2173 try:
2149 2174 self.hook("precommit", throw=True, parent1=hookp1,
2150 2175 parent2=hookp2)
2151 2176 tr = self.transaction('commit')
2152 2177 ret = self.commitctx(cctx, True)
2153 2178 except: # re-raises
2154 2179 if edited:
2155 2180 self.ui.write(
2156 2181 _('note: commit message saved in %s\n') % msgfn)
2157 2182 raise
2158 2183 # update bookmarks, dirstate and mergestate
2159 2184 bookmarks.update(self, [p1, p2], ret)
2160 2185 cctx.markcommitted(ret)
2161 2186 ms.reset()
2162 2187 tr.close()
2163 2188
2164 2189 finally:
2165 2190 lockmod.release(tr, lock, wlock)
2166 2191
2167 2192 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2168 2193 # hack for command that use a temporary commit (eg: histedit)
2169 2194 # temporary commit got stripped before hook release
2170 2195 if self.changelog.hasnode(ret):
2171 2196 self.hook("commit", node=node, parent1=parent1,
2172 2197 parent2=parent2)
2173 2198 self._afterlock(commithook)
2174 2199 return ret
2175 2200
2176 2201 @unfilteredmethod
2177 2202 def commitctx(self, ctx, error=False):
2178 2203 """Add a new revision to current repository.
2179 2204 Revision information is passed via the context argument.
2180 2205
2181 2206 ctx.files() should list all files involved in this commit, i.e.
2182 2207 modified/added/removed files. On merge, it may be wider than the
2183 2208 ctx.files() to be committed, since any file nodes derived directly
2184 2209 from p1 or p2 are excluded from the committed ctx.files().
2185 2210 """
2186 2211
2187 2212 tr = None
2188 2213 p1, p2 = ctx.p1(), ctx.p2()
2189 2214 user = ctx.user()
2190 2215
2191 2216 lock = self.lock()
2192 2217 try:
2193 2218 tr = self.transaction("commit")
2194 2219 trp = weakref.proxy(tr)
2195 2220
2196 2221 if ctx.manifestnode():
2197 2222 # reuse an existing manifest revision
2198 2223 self.ui.debug('reusing known manifest\n')
2199 2224 mn = ctx.manifestnode()
2200 2225 files = ctx.files()
2201 2226 elif ctx.files():
2202 2227 m1ctx = p1.manifestctx()
2203 2228 m2ctx = p2.manifestctx()
2204 2229 mctx = m1ctx.copy()
2205 2230
2206 2231 m = mctx.read()
2207 2232 m1 = m1ctx.read()
2208 2233 m2 = m2ctx.read()
2209 2234
2210 2235 # check in files
2211 2236 added = []
2212 2237 changed = []
2213 2238 removed = list(ctx.removed())
2214 2239 linkrev = len(self)
2215 2240 self.ui.note(_("committing files:\n"))
2216 2241 for f in sorted(ctx.modified() + ctx.added()):
2217 2242 self.ui.note(f + "\n")
2218 2243 try:
2219 2244 fctx = ctx[f]
2220 2245 if fctx is None:
2221 2246 removed.append(f)
2222 2247 else:
2223 2248 added.append(f)
2224 2249 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2225 2250 trp, changed)
2226 2251 m.setflag(f, fctx.flags())
2227 2252 except OSError as inst:
2228 2253 self.ui.warn(_("trouble committing %s!\n") % f)
2229 2254 raise
2230 2255 except IOError as inst:
2231 2256 errcode = getattr(inst, 'errno', errno.ENOENT)
2232 2257 if error or errcode and errcode != errno.ENOENT:
2233 2258 self.ui.warn(_("trouble committing %s!\n") % f)
2234 2259 raise
2235 2260
2236 2261 # update manifest
2237 2262 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2238 2263 drop = [f for f in removed if f in m]
2239 2264 for f in drop:
2240 2265 del m[f]
2241 2266 files = changed + removed
2242 2267 md = None
2243 2268 if not files:
2244 2269 # if no "files" actually changed in terms of the changelog,
2245 2270 # try hard to detect unmodified manifest entry so that the
2246 2271 # exact same commit can be reproduced later on convert.
2247 2272 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2248 2273 if not files and md:
2249 2274 self.ui.debug('not reusing manifest (no file change in '
2250 2275 'changelog, but manifest differs)\n')
2251 2276 if files or md:
2252 2277 self.ui.note(_("committing manifest\n"))
2253 2278 # we're using narrowmatch here since it's already applied at
2254 2279 # other stages (such as dirstate.walk), so we're already
2255 2280 # ignoring things outside of narrowspec in most cases. The
2256 2281 # one case where we might have files outside the narrowspec
2257 2282 # at this point is merges, and we already error out in the
2258 2283 # case where the merge has files outside of the narrowspec,
2259 2284 # so this is safe.
2260 2285 mn = mctx.write(trp, linkrev,
2261 2286 p1.manifestnode(), p2.manifestnode(),
2262 2287 added, drop, match=self.narrowmatch())
2263 2288 else:
2264 2289 self.ui.debug('reusing manifest form p1 (listed files '
2265 2290 'actually unchanged)\n')
2266 2291 mn = p1.manifestnode()
2267 2292 else:
2268 2293 self.ui.debug('reusing manifest from p1 (no file change)\n')
2269 2294 mn = p1.manifestnode()
2270 2295 files = []
2271 2296
2272 2297 # update changelog
2273 2298 self.ui.note(_("committing changelog\n"))
2274 2299 self.changelog.delayupdate(tr)
2275 2300 n = self.changelog.add(mn, files, ctx.description(),
2276 2301 trp, p1.node(), p2.node(),
2277 2302 user, ctx.date(), ctx.extra().copy())
2278 2303 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2279 2304 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2280 2305 parent2=xp2)
2281 2306 # set the new commit is proper phase
2282 2307 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2283 2308 if targetphase:
2284 2309 # retract boundary do not alter parent changeset.
2285 2310 # if a parent have higher the resulting phase will
2286 2311 # be compliant anyway
2287 2312 #
2288 2313 # if minimal phase was 0 we don't need to retract anything
2289 2314 phases.registernew(self, tr, targetphase, [n])
2290 2315 tr.close()
2291 2316 return n
2292 2317 finally:
2293 2318 if tr:
2294 2319 tr.release()
2295 2320 lock.release()
2296 2321
2297 2322 @unfilteredmethod
2298 2323 def destroying(self):
2299 2324 '''Inform the repository that nodes are about to be destroyed.
2300 2325 Intended for use by strip and rollback, so there's a common
2301 2326 place for anything that has to be done before destroying history.
2302 2327
2303 2328 This is mostly useful for saving state that is in memory and waiting
2304 2329 to be flushed when the current lock is released. Because a call to
2305 2330 destroyed is imminent, the repo will be invalidated causing those
2306 2331 changes to stay in memory (waiting for the next unlock), or vanish
2307 2332 completely.
2308 2333 '''
2309 2334 # When using the same lock to commit and strip, the phasecache is left
2310 2335 # dirty after committing. Then when we strip, the repo is invalidated,
2311 2336 # causing those changes to disappear.
2312 2337 if '_phasecache' in vars(self):
2313 2338 self._phasecache.write()
2314 2339
2315 2340 @unfilteredmethod
2316 2341 def destroyed(self):
2317 2342 '''Inform the repository that nodes have been destroyed.
2318 2343 Intended for use by strip and rollback, so there's a common
2319 2344 place for anything that has to be done after destroying history.
2320 2345 '''
2321 2346 # When one tries to:
2322 2347 # 1) destroy nodes thus calling this method (e.g. strip)
2323 2348 # 2) use phasecache somewhere (e.g. commit)
2324 2349 #
2325 2350 # then 2) will fail because the phasecache contains nodes that were
2326 2351 # removed. We can either remove phasecache from the filecache,
2327 2352 # causing it to reload next time it is accessed, or simply filter
2328 2353 # the removed nodes now and write the updated cache.
2329 2354 self._phasecache.filterunknown(self)
2330 2355 self._phasecache.write()
2331 2356
2332 2357 # refresh all repository caches
2333 2358 self.updatecaches()
2334 2359
2335 2360 # Ensure the persistent tag cache is updated. Doing it now
2336 2361 # means that the tag cache only has to worry about destroyed
2337 2362 # heads immediately after a strip/rollback. That in turn
2338 2363 # guarantees that "cachetip == currenttip" (comparing both rev
2339 2364 # and node) always means no nodes have been added or destroyed.
2340 2365
2341 2366 # XXX this is suboptimal when qrefresh'ing: we strip the current
2342 2367 # head, refresh the tag cache, then immediately add a new head.
2343 2368 # But I think doing it this way is necessary for the "instant
2344 2369 # tag cache retrieval" case to work.
2345 2370 self.invalidate()
2346 2371
2347 2372 def status(self, node1='.', node2=None, match=None,
2348 2373 ignored=False, clean=False, unknown=False,
2349 2374 listsubrepos=False):
2350 2375 '''a convenience method that calls node1.status(node2)'''
2351 2376 return self[node1].status(node2, match, ignored, clean, unknown,
2352 2377 listsubrepos)
2353 2378
2354 2379 def addpostdsstatus(self, ps):
2355 2380 """Add a callback to run within the wlock, at the point at which status
2356 2381 fixups happen.
2357 2382
2358 2383 On status completion, callback(wctx, status) will be called with the
2359 2384 wlock held, unless the dirstate has changed from underneath or the wlock
2360 2385 couldn't be grabbed.
2361 2386
2362 2387 Callbacks should not capture and use a cached copy of the dirstate --
2363 2388 it might change in the meanwhile. Instead, they should access the
2364 2389 dirstate via wctx.repo().dirstate.
2365 2390
2366 2391 This list is emptied out after each status run -- extensions should
2367 2392 make sure it adds to this list each time dirstate.status is called.
2368 2393 Extensions should also make sure they don't call this for statuses
2369 2394 that don't involve the dirstate.
2370 2395 """
2371 2396
2372 2397 # The list is located here for uniqueness reasons -- it is actually
2373 2398 # managed by the workingctx, but that isn't unique per-repo.
2374 2399 self._postdsstatus.append(ps)
2375 2400
2376 2401 def postdsstatus(self):
2377 2402 """Used by workingctx to get the list of post-dirstate-status hooks."""
2378 2403 return self._postdsstatus
2379 2404
2380 2405 def clearpostdsstatus(self):
2381 2406 """Used by workingctx to clear post-dirstate-status hooks."""
2382 2407 del self._postdsstatus[:]
2383 2408
2384 2409 def heads(self, start=None):
2385 2410 if start is None:
2386 2411 cl = self.changelog
2387 2412 headrevs = reversed(cl.headrevs())
2388 2413 return [cl.node(rev) for rev in headrevs]
2389 2414
2390 2415 heads = self.changelog.heads(start)
2391 2416 # sort the output in rev descending order
2392 2417 return sorted(heads, key=self.changelog.rev, reverse=True)
2393 2418
2394 2419 def branchheads(self, branch=None, start=None, closed=False):
2395 2420 '''return a (possibly filtered) list of heads for the given branch
2396 2421
2397 2422 Heads are returned in topological order, from newest to oldest.
2398 2423 If branch is None, use the dirstate branch.
2399 2424 If start is not None, return only heads reachable from start.
2400 2425 If closed is True, return heads that are marked as closed as well.
2401 2426 '''
2402 2427 if branch is None:
2403 2428 branch = self[None].branch()
2404 2429 branches = self.branchmap()
2405 2430 if branch not in branches:
2406 2431 return []
2407 2432 # the cache returns heads ordered lowest to highest
2408 2433 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2409 2434 if start is not None:
2410 2435 # filter out the heads that cannot be reached from startrev
2411 2436 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2412 2437 bheads = [h for h in bheads if h in fbheads]
2413 2438 return bheads
2414 2439
2415 2440 def branches(self, nodes):
2416 2441 if not nodes:
2417 2442 nodes = [self.changelog.tip()]
2418 2443 b = []
2419 2444 for n in nodes:
2420 2445 t = n
2421 2446 while True:
2422 2447 p = self.changelog.parents(n)
2423 2448 if p[1] != nullid or p[0] == nullid:
2424 2449 b.append((t, n, p[0], p[1]))
2425 2450 break
2426 2451 n = p[0]
2427 2452 return b
2428 2453
2429 2454 def between(self, pairs):
2430 2455 r = []
2431 2456
2432 2457 for top, bottom in pairs:
2433 2458 n, l, i = top, [], 0
2434 2459 f = 1
2435 2460
2436 2461 while n != bottom and n != nullid:
2437 2462 p = self.changelog.parents(n)[0]
2438 2463 if i == f:
2439 2464 l.append(n)
2440 2465 f = f * 2
2441 2466 n = p
2442 2467 i += 1
2443 2468
2444 2469 r.append(l)
2445 2470
2446 2471 return r
2447 2472
2448 2473 def checkpush(self, pushop):
2449 2474 """Extensions can override this function if additional checks have
2450 2475 to be performed before pushing, or call it if they override push
2451 2476 command.
2452 2477 """
2453 2478
2454 2479 @unfilteredpropertycache
2455 2480 def prepushoutgoinghooks(self):
2456 2481 """Return util.hooks consists of a pushop with repo, remote, outgoing
2457 2482 methods, which are called before pushing changesets.
2458 2483 """
2459 2484 return util.hooks()
2460 2485
2461 2486 def pushkey(self, namespace, key, old, new):
2462 2487 try:
2463 2488 tr = self.currenttransaction()
2464 2489 hookargs = {}
2465 2490 if tr is not None:
2466 2491 hookargs.update(tr.hookargs)
2467 2492 hookargs = pycompat.strkwargs(hookargs)
2468 2493 hookargs[r'namespace'] = namespace
2469 2494 hookargs[r'key'] = key
2470 2495 hookargs[r'old'] = old
2471 2496 hookargs[r'new'] = new
2472 2497 self.hook('prepushkey', throw=True, **hookargs)
2473 2498 except error.HookAbort as exc:
2474 2499 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2475 2500 if exc.hint:
2476 2501 self.ui.write_err(_("(%s)\n") % exc.hint)
2477 2502 return False
2478 2503 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2479 2504 ret = pushkey.push(self, namespace, key, old, new)
2480 2505 def runhook():
2481 2506 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2482 2507 ret=ret)
2483 2508 self._afterlock(runhook)
2484 2509 return ret
2485 2510
2486 2511 def listkeys(self, namespace):
2487 2512 self.hook('prelistkeys', throw=True, namespace=namespace)
2488 2513 self.ui.debug('listing keys for "%s"\n' % namespace)
2489 2514 values = pushkey.list(self, namespace)
2490 2515 self.hook('listkeys', namespace=namespace, values=values)
2491 2516 return values
2492 2517
2493 2518 def debugwireargs(self, one, two, three=None, four=None, five=None):
2494 2519 '''used to test argument passing over the wire'''
2495 2520 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2496 2521 pycompat.bytestr(four),
2497 2522 pycompat.bytestr(five))
2498 2523
2499 2524 def savecommitmessage(self, text):
2500 2525 fp = self.vfs('last-message.txt', 'wb')
2501 2526 try:
2502 2527 fp.write(text)
2503 2528 finally:
2504 2529 fp.close()
2505 2530 return self.pathto(fp.name[len(self.root) + 1:])
2506 2531
2507 2532 # used to avoid circular references so destructors work
2508 2533 def aftertrans(files):
2509 2534 renamefiles = [tuple(t) for t in files]
2510 2535 def a():
2511 2536 for vfs, src, dest in renamefiles:
2512 2537 # if src and dest refer to a same file, vfs.rename is a no-op,
2513 2538 # leaving both src and dest on disk. delete dest to make sure
2514 2539 # the rename couldn't be such a no-op.
2515 2540 vfs.tryunlink(dest)
2516 2541 try:
2517 2542 vfs.rename(src, dest)
2518 2543 except OSError: # journal file does not yet exist
2519 2544 pass
2520 2545 return a
2521 2546
2522 2547 def undoname(fn):
2523 2548 base, name = os.path.split(fn)
2524 2549 assert name.startswith('journal')
2525 2550 return os.path.join(base, name.replace('journal', 'undo', 1))
2526 2551
2527 2552 def instance(ui, path, create, intents=None, createopts=None):
2528 2553 localpath = util.urllocalpath(path)
2529 2554 if create:
2530 2555 createrepository(ui, localpath, createopts=createopts)
2531 2556
2532 2557 return makelocalrepository(ui, localpath, intents=intents)
2533 2558
2534 2559 def islocal(path):
2535 2560 return True
2536 2561
2537 2562 def newreporequirements(ui, createopts=None):
2538 2563 """Determine the set of requirements for a new local repository.
2539 2564
2540 2565 Extensions can wrap this function to specify custom requirements for
2541 2566 new repositories.
2542 2567 """
2543 2568 createopts = createopts or {}
2544 2569
2545 2570 requirements = {'revlogv1'}
2546 2571 if ui.configbool('format', 'usestore'):
2547 2572 requirements.add('store')
2548 2573 if ui.configbool('format', 'usefncache'):
2549 2574 requirements.add('fncache')
2550 2575 if ui.configbool('format', 'dotencode'):
2551 2576 requirements.add('dotencode')
2552 2577
2553 2578 compengine = ui.config('experimental', 'format.compression')
2554 2579 if compengine not in util.compengines:
2555 2580 raise error.Abort(_('compression engine %s defined by '
2556 2581 'experimental.format.compression not available') %
2557 2582 compengine,
2558 2583 hint=_('run "hg debuginstall" to list available '
2559 2584 'compression engines'))
2560 2585
2561 2586 # zlib is the historical default and doesn't need an explicit requirement.
2562 2587 if compengine != 'zlib':
2563 2588 requirements.add('exp-compression-%s' % compengine)
2564 2589
2565 2590 if scmutil.gdinitconfig(ui):
2566 2591 requirements.add('generaldelta')
2567 2592 if ui.configbool('experimental', 'treemanifest'):
2568 2593 requirements.add('treemanifest')
2569 2594 # experimental config: format.sparse-revlog
2570 2595 if ui.configbool('format', 'sparse-revlog'):
2571 2596 requirements.add(SPARSEREVLOG_REQUIREMENT)
2572 2597
2573 2598 revlogv2 = ui.config('experimental', 'revlogv2')
2574 2599 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2575 2600 requirements.remove('revlogv1')
2576 2601 # generaldelta is implied by revlogv2.
2577 2602 requirements.discard('generaldelta')
2578 2603 requirements.add(REVLOGV2_REQUIREMENT)
2579 2604 # experimental config: format.internal-phase
2580 2605 if ui.configbool('format', 'internal-phase'):
2581 2606 requirements.add('internal-phase')
2582 2607
2583 2608 if createopts.get('narrowfiles'):
2584 2609 requirements.add(repository.NARROW_REQUIREMENT)
2585 2610
2586 2611 return requirements
2587 2612
2588 2613 def filterknowncreateopts(ui, createopts):
2589 2614 """Filters a dict of repo creation options against options that are known.
2590 2615
2591 2616 Receives a dict of repo creation options and returns a dict of those
2592 2617 options that we don't know how to handle.
2593 2618
2594 2619 This function is called as part of repository creation. If the
2595 2620 returned dict contains any items, repository creation will not
2596 2621 be allowed, as it means there was a request to create a repository
2597 2622 with options not recognized by loaded code.
2598 2623
2599 2624 Extensions can wrap this function to filter out creation options
2600 2625 they know how to handle.
2601 2626 """
2602 2627 known = {'narrowfiles'}
2603 2628
2604 2629 return {k: v for k, v in createopts.items() if k not in known}
2605 2630
2606 2631 def createrepository(ui, path, createopts=None):
2607 2632 """Create a new repository in a vfs.
2608 2633
2609 2634 ``path`` path to the new repo's working directory.
2610 2635 ``createopts`` options for the new repository.
2611 2636 """
2612 2637 createopts = createopts or {}
2613 2638
2614 2639 unknownopts = filterknowncreateopts(ui, createopts)
2615 2640
2616 2641 if not isinstance(unknownopts, dict):
2617 2642 raise error.ProgrammingError('filterknowncreateopts() did not return '
2618 2643 'a dict')
2619 2644
2620 2645 if unknownopts:
2621 2646 raise error.Abort(_('unable to create repository because of unknown '
2622 2647 'creation option: %s') %
2623 2648 ', '.sorted(unknownopts),
2624 2649 hint=_('is a required extension not loaded?'))
2625 2650
2626 2651 requirements = newreporequirements(ui, createopts=createopts)
2627 2652
2628 2653 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2629 2654 if not wdirvfs.exists():
2630 2655 wdirvfs.makedirs()
2631 2656
2632 2657 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2633 2658 if hgvfs.exists():
2634 2659 raise error.RepoError(_('repository %s already exists') % path)
2635 2660
2636 2661 hgvfs.makedir(notindexed=True)
2637 2662
2638 2663 if b'store' in requirements:
2639 2664 hgvfs.mkdir(b'store')
2640 2665
2641 2666 # We create an invalid changelog outside the store so very old
2642 2667 # Mercurial versions (which didn't know about the requirements
2643 2668 # file) encounter an error on reading the changelog. This
2644 2669 # effectively locks out old clients and prevents them from
2645 2670 # mucking with a repo in an unknown format.
2646 2671 #
2647 2672 # The revlog header has version 2, which won't be recognized by
2648 2673 # such old clients.
2649 2674 hgvfs.append(b'00changelog.i',
2650 2675 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2651 2676 b'layout')
2652 2677
2653 2678 scmutil.writerequires(hgvfs, requirements)
2654 2679
2655 2680 def poisonrepository(repo):
2656 2681 """Poison a repository instance so it can no longer be used."""
2657 2682 # Perform any cleanup on the instance.
2658 2683 repo.close()
2659 2684
2660 2685 # Our strategy is to replace the type of the object with one that
2661 2686 # has all attribute lookups result in error.
2662 2687 #
2663 2688 # But we have to allow the close() method because some constructors
2664 2689 # of repos call close() on repo references.
2665 2690 class poisonedrepository(object):
2666 2691 def __getattribute__(self, item):
2667 2692 if item == r'close':
2668 2693 return object.__getattribute__(self, item)
2669 2694
2670 2695 raise error.ProgrammingError('repo instances should not be used '
2671 2696 'after unshare')
2672 2697
2673 2698 def close(self):
2674 2699 pass
2675 2700
2676 2701 # We may have a repoview, which intercepts __setattr__. So be sure
2677 2702 # we operate at the lowest level possible.
2678 2703 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now