##// END OF EJS Templates
localrepo: read requirements file in makelocalrepository()...
Gregory Szorc -
r39728:6a3162ed default
parent child Browse files
Show More
@@ -1,2591 +1,2619 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 narrowspec,
48 48 obsolete,
49 49 pathutil,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepoutil,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67 from .utils import (
68 68 interfaceutil,
69 69 procutil,
70 70 stringutil,
71 71 )
72 72
73 73 from .revlogutils import (
74 74 constants as revlogconst,
75 75 )
76 76
77 77 release = lockmod.release
78 78 urlerr = util.urlerr
79 79 urlreq = util.urlreq
80 80
81 81 # set of (path, vfs-location) tuples. vfs-location is:
82 82 # - 'plain for vfs relative paths
83 83 # - '' for svfs relative paths
84 84 _cachedfiles = set()
85 85
86 86 class _basefilecache(scmutil.filecache):
87 87 """All filecache usage on repo are done for logic that should be unfiltered
88 88 """
89 89 def __get__(self, repo, type=None):
90 90 if repo is None:
91 91 return self
92 92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 93 def __set__(self, repo, value):
94 94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 95 def __delete__(self, repo):
96 96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97 97
98 98 class repofilecache(_basefilecache):
99 99 """filecache for files in .hg but outside of .hg/store"""
100 100 def __init__(self, *paths):
101 101 super(repofilecache, self).__init__(*paths)
102 102 for path in paths:
103 103 _cachedfiles.add((path, 'plain'))
104 104
105 105 def join(self, obj, fname):
106 106 return obj.vfs.join(fname)
107 107
108 108 class storecache(_basefilecache):
109 109 """filecache for files in the store"""
110 110 def __init__(self, *paths):
111 111 super(storecache, self).__init__(*paths)
112 112 for path in paths:
113 113 _cachedfiles.add((path, ''))
114 114
115 115 def join(self, obj, fname):
116 116 return obj.sjoin(fname)
117 117
118 118 def isfilecached(repo, name):
119 119 """check if a repo has already cached "name" filecache-ed property
120 120
121 121 This returns (cachedobj-or-None, iscached) tuple.
122 122 """
123 123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 124 if not cacheentry:
125 125 return None, False
126 126 return cacheentry.obj, True
127 127
128 128 class unfilteredpropertycache(util.propertycache):
129 129 """propertycache that apply to unfiltered repo only"""
130 130
131 131 def __get__(self, repo, type=None):
132 132 unfi = repo.unfiltered()
133 133 if unfi is repo:
134 134 return super(unfilteredpropertycache, self).__get__(unfi)
135 135 return getattr(unfi, self.name)
136 136
137 137 class filteredpropertycache(util.propertycache):
138 138 """propertycache that must take filtering in account"""
139 139
140 140 def cachevalue(self, obj, value):
141 141 object.__setattr__(obj, self.name, value)
142 142
143 143
144 144 def hasunfilteredcache(repo, name):
145 145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 146 return name in vars(repo.unfiltered())
147 147
148 148 def unfilteredmethod(orig):
149 149 """decorate method that always need to be run on unfiltered version"""
150 150 def wrapper(repo, *args, **kwargs):
151 151 return orig(repo.unfiltered(), *args, **kwargs)
152 152 return wrapper
153 153
154 154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 155 'unbundle'}
156 156 legacycaps = moderncaps.union({'changegroupsubset'})
157 157
158 158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 159 class localcommandexecutor(object):
160 160 def __init__(self, peer):
161 161 self._peer = peer
162 162 self._sent = False
163 163 self._closed = False
164 164
165 165 def __enter__(self):
166 166 return self
167 167
168 168 def __exit__(self, exctype, excvalue, exctb):
169 169 self.close()
170 170
171 171 def callcommand(self, command, args):
172 172 if self._sent:
173 173 raise error.ProgrammingError('callcommand() cannot be used after '
174 174 'sendcommands()')
175 175
176 176 if self._closed:
177 177 raise error.ProgrammingError('callcommand() cannot be used after '
178 178 'close()')
179 179
180 180 # We don't need to support anything fancy. Just call the named
181 181 # method on the peer and return a resolved future.
182 182 fn = getattr(self._peer, pycompat.sysstr(command))
183 183
184 184 f = pycompat.futures.Future()
185 185
186 186 try:
187 187 result = fn(**pycompat.strkwargs(args))
188 188 except Exception:
189 189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 190 else:
191 191 f.set_result(result)
192 192
193 193 return f
194 194
195 195 def sendcommands(self):
196 196 self._sent = True
197 197
198 198 def close(self):
199 199 self._closed = True
200 200
201 201 @interfaceutil.implementer(repository.ipeercommands)
202 202 class localpeer(repository.peer):
203 203 '''peer for a local repo; reflects only the most recent API'''
204 204
205 205 def __init__(self, repo, caps=None):
206 206 super(localpeer, self).__init__()
207 207
208 208 if caps is None:
209 209 caps = moderncaps.copy()
210 210 self._repo = repo.filtered('served')
211 211 self.ui = repo.ui
212 212 self._caps = repo._restrictcapabilities(caps)
213 213
214 214 # Begin of _basepeer interface.
215 215
216 216 def url(self):
217 217 return self._repo.url()
218 218
219 219 def local(self):
220 220 return self._repo
221 221
222 222 def peer(self):
223 223 return self
224 224
225 225 def canpush(self):
226 226 return True
227 227
228 228 def close(self):
229 229 self._repo.close()
230 230
231 231 # End of _basepeer interface.
232 232
233 233 # Begin of _basewirecommands interface.
234 234
235 235 def branchmap(self):
236 236 return self._repo.branchmap()
237 237
238 238 def capabilities(self):
239 239 return self._caps
240 240
241 241 def clonebundles(self):
242 242 return self._repo.tryread('clonebundles.manifest')
243 243
244 244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 245 """Used to test argument passing over the wire"""
246 246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 247 pycompat.bytestr(four),
248 248 pycompat.bytestr(five))
249 249
250 250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 251 **kwargs):
252 252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 253 common=common, bundlecaps=bundlecaps,
254 254 **kwargs)[1]
255 255 cb = util.chunkbuffer(chunks)
256 256
257 257 if exchange.bundle2requested(bundlecaps):
258 258 # When requesting a bundle2, getbundle returns a stream to make the
259 259 # wire level function happier. We need to build a proper object
260 260 # from it in local peer.
261 261 return bundle2.getunbundler(self.ui, cb)
262 262 else:
263 263 return changegroup.getunbundler('01', cb, None)
264 264
265 265 def heads(self):
266 266 return self._repo.heads()
267 267
268 268 def known(self, nodes):
269 269 return self._repo.known(nodes)
270 270
271 271 def listkeys(self, namespace):
272 272 return self._repo.listkeys(namespace)
273 273
274 274 def lookup(self, key):
275 275 return self._repo.lookup(key)
276 276
277 277 def pushkey(self, namespace, key, old, new):
278 278 return self._repo.pushkey(namespace, key, old, new)
279 279
280 280 def stream_out(self):
281 281 raise error.Abort(_('cannot perform stream clone against local '
282 282 'peer'))
283 283
284 284 def unbundle(self, bundle, heads, url):
285 285 """apply a bundle on a repo
286 286
287 287 This function handles the repo locking itself."""
288 288 try:
289 289 try:
290 290 bundle = exchange.readbundle(self.ui, bundle, None)
291 291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 292 if util.safehasattr(ret, 'getchunks'):
293 293 # This is a bundle20 object, turn it into an unbundler.
294 294 # This little dance should be dropped eventually when the
295 295 # API is finally improved.
296 296 stream = util.chunkbuffer(ret.getchunks())
297 297 ret = bundle2.getunbundler(self.ui, stream)
298 298 return ret
299 299 except Exception as exc:
300 300 # If the exception contains output salvaged from a bundle2
301 301 # reply, we need to make sure it is printed before continuing
302 302 # to fail. So we build a bundle2 with such output and consume
303 303 # it directly.
304 304 #
305 305 # This is not very elegant but allows a "simple" solution for
306 306 # issue4594
307 307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 308 if output:
309 309 bundler = bundle2.bundle20(self._repo.ui)
310 310 for out in output:
311 311 bundler.addpart(out)
312 312 stream = util.chunkbuffer(bundler.getchunks())
313 313 b = bundle2.getunbundler(self.ui, stream)
314 314 bundle2.processbundle(self._repo, b)
315 315 raise
316 316 except error.PushRaced as exc:
317 317 raise error.ResponseError(_('push failed:'),
318 318 stringutil.forcebytestr(exc))
319 319
320 320 # End of _basewirecommands interface.
321 321
322 322 # Begin of peer interface.
323 323
324 324 def commandexecutor(self):
325 325 return localcommandexecutor(self)
326 326
327 327 # End of peer interface.
328 328
329 329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 330 class locallegacypeer(localpeer):
331 331 '''peer extension which implements legacy methods too; used for tests with
332 332 restricted capabilities'''
333 333
334 334 def __init__(self, repo):
335 335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336 336
337 337 # Begin of baselegacywirecommands interface.
338 338
339 339 def between(self, pairs):
340 340 return self._repo.between(pairs)
341 341
342 342 def branches(self, nodes):
343 343 return self._repo.branches(nodes)
344 344
345 345 def changegroup(self, nodes, source):
346 346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 347 missingheads=self._repo.heads())
348 348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 349
350 350 def changegroupsubset(self, bases, heads, source):
351 351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 352 missingheads=heads)
353 353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354 354
355 355 # End of baselegacywirecommands interface.
356 356
357 357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 358 # clients.
359 359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360 360
361 361 # A repository with the sparserevlog feature will have delta chains that
362 362 # can spread over a larger span. Sparse reading cuts these large spans into
363 363 # pieces, so that each piece isn't too big.
364 364 # Without the sparserevlog capability, reading from the repository could use
365 365 # huge amounts of memory, because the whole span would be read at once,
366 366 # including all the intermediate revisions that aren't pertinent for the chain.
367 367 # This is why once a repository has enabled sparse-read, it becomes required.
368 368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369 369
370 370 # Functions receiving (ui, features) that extensions can register to impact
371 371 # the ability to load repositories with custom requirements. Only
372 372 # functions defined in loaded extensions are called.
373 373 #
374 374 # The function receives a set of requirement strings that the repository
375 375 # is capable of opening. Functions will typically add elements to the
376 376 # set to reflect that the extension knows how to handle that requirements.
377 377 featuresetupfuncs = set()
378 378
379 379 def makelocalrepository(baseui, path, intents=None):
380 380 """Create a local repository object.
381 381
382 382 Given arguments needed to construct a local repository, this function
383 383 derives a type suitable for representing that repository and returns an
384 384 instance of it.
385 385
386 386 The returned object conforms to the ``repository.completelocalrepository``
387 387 interface.
388 388 """
389 389 ui = baseui.copy()
390 390 # Prevent copying repo configuration.
391 391 ui.copy = baseui.copy
392 392
393 393 # Working directory VFS rooted at repository root.
394 394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
395 395
396 396 # Main VFS for .hg/ directory.
397 397 hgpath = wdirvfs.join(b'.hg')
398 398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
399 399
400 400 # The .hg/ path should exist and should be a directory. All other
401 401 # cases are errors.
402 402 if not hgvfs.isdir():
403 403 try:
404 404 hgvfs.stat()
405 405 except OSError as e:
406 406 if e.errno != errno.ENOENT:
407 407 raise
408 408
409 409 raise error.RepoError(_(b'repository %s not found') % path)
410 410
411 # .hg/requires file contains a newline-delimited list of
412 # features/capabilities the opener (us) must have in order to use
413 # the repository. This file was introduced in Mercurial 0.9.2,
414 # which means very old repositories may not have one. We assume
415 # a missing file translates to no requirements.
416 try:
417 requirements = set(hgvfs.read(b'requires').splitlines())
418 except IOError as e:
419 if e.errno != errno.ENOENT:
420 raise
421 requirements = set()
422
411 423 # The .hg/hgrc file may load extensions or contain config options
412 424 # that influence repository construction. Attempt to load it and
413 425 # process any new extensions that it may have pulled in.
414 426 try:
415 427 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
416 428 except IOError:
417 429 pass
418 430 else:
419 431 extensions.loadall(ui)
420 432
421 433 return localrepository(
422 434 baseui=baseui,
423 435 ui=ui,
424 436 origroot=path,
425 437 wdirvfs=wdirvfs,
426 438 hgvfs=hgvfs,
439 requirements=requirements,
427 440 intents=intents)
428 441
429 442 @interfaceutil.implementer(repository.completelocalrepository)
430 443 class localrepository(object):
431 444
432 445 # obsolete experimental requirements:
433 446 # - manifestv2: An experimental new manifest format that allowed
434 447 # for stem compression of long paths. Experiment ended up not
435 448 # being successful (repository sizes went up due to worse delta
436 449 # chains), and the code was deleted in 4.6.
437 450 supportedformats = {
438 451 'revlogv1',
439 452 'generaldelta',
440 453 'treemanifest',
441 454 REVLOGV2_REQUIREMENT,
442 455 SPARSEREVLOG_REQUIREMENT,
443 456 }
444 457 _basesupported = supportedformats | {
445 458 'store',
446 459 'fncache',
447 460 'shared',
448 461 'relshared',
449 462 'dotencode',
450 463 'exp-sparse',
451 464 'internal-phase'
452 465 }
453 466 openerreqs = {
454 467 'revlogv1',
455 468 'generaldelta',
456 469 'treemanifest',
457 470 }
458 471
459 472 # list of prefix for file which can be written without 'wlock'
460 473 # Extensions should extend this list when needed
461 474 _wlockfreeprefix = {
462 475 # We migh consider requiring 'wlock' for the next
463 476 # two, but pretty much all the existing code assume
464 477 # wlock is not needed so we keep them excluded for
465 478 # now.
466 479 'hgrc',
467 480 'requires',
468 481 # XXX cache is a complicatged business someone
469 482 # should investigate this in depth at some point
470 483 'cache/',
471 484 # XXX shouldn't be dirstate covered by the wlock?
472 485 'dirstate',
473 486 # XXX bisect was still a bit too messy at the time
474 487 # this changeset was introduced. Someone should fix
475 488 # the remainig bit and drop this line
476 489 'bisect.state',
477 490 }
478 491
479 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, intents=None):
492 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
493 intents=None):
480 494 """Create a new local repository instance.
481 495
482 496 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
483 497 or ``localrepo.makelocalrepository()`` for obtaining a new repository
484 498 object.
485 499
486 500 Arguments:
487 501
488 502 baseui
489 503 ``ui.ui`` instance that ``ui`` argument was based off of.
490 504
491 505 ui
492 506 ``ui.ui`` instance for use by the repository.
493 507
494 508 origroot
495 509 ``bytes`` path to working directory root of this repository.
496 510
497 511 wdirvfs
498 512 ``vfs.vfs`` rooted at the working directory.
499 513
500 514 hgvfs
501 515 ``vfs.vfs`` rooted at .hg/
502 516
517 requirements
518 ``set`` of bytestrings representing repository opening requirements.
519
503 520 intents
504 521 ``set`` of system strings indicating what this repo will be used
505 522 for.
506 523 """
507 524 self.baseui = baseui
508 525 self.ui = ui
509 526 self.origroot = origroot
510 527 # vfs rooted at working directory.
511 528 self.wvfs = wdirvfs
512 529 self.root = wdirvfs.base
513 530 # vfs rooted at .hg/. Used to access most non-store paths.
514 531 self.vfs = hgvfs
515 532 self.path = hgvfs.base
516 533
517 534 self.filtername = None
518 535 # svfs: usually rooted at .hg/store, used to access repository history
519 536 # If this is a shared repository, this vfs may point to another
520 537 # repository's .hg/store directory.
521 538 self.svfs = None
522 539
523 540 if (self.ui.configbool('devel', 'all-warnings') or
524 541 self.ui.configbool('devel', 'check-locks')):
525 542 self.vfs.audit = self._getvfsward(self.vfs.audit)
526 543 # A list of callback to shape the phase if no data were found.
527 544 # Callback are in the form: func(repo, roots) --> processed root.
528 545 # This list it to be filled by extension during repo setup
529 546 self._phasedefaults = []
530 547
531 548 if featuresetupfuncs:
532 549 self.supported = set(self._basesupported) # use private copy
533 550 extmods = set(m.__name__ for n, m
534 551 in extensions.extensions(self.ui))
535 552 for setupfunc in featuresetupfuncs:
536 553 if setupfunc.__module__ in extmods:
537 554 setupfunc(self.ui, self.supported)
538 555 else:
539 556 self.supported = self._basesupported
540 557 color.setup(self.ui)
541 558
542 559 # Add compression engines.
543 560 for name in util.compengines:
544 561 engine = util.compengines[name]
545 562 if engine.revlogheader():
546 563 self.supported.add('exp-compression-%s' % name)
547 564
548 try:
549 self.requirements = scmutil.readrequires(self.vfs, self.supported)
550 except IOError as inst:
551 if inst.errno != errno.ENOENT:
552 raise
553 self.requirements = set()
565 # Validate that all seen repository requirements are supported.
566 missingrequirements = []
567 for r in requirements:
568 if r not in self.supported:
569 if not r or not r[0:1].isalnum():
570 raise error.RequirementError(
571 _(".hg/requires file is corrupt"))
572 missingrequirements.append(r)
573 missingrequirements.sort()
574 if missingrequirements:
575 raise error.RequirementError(
576 _("repository requires features unknown to this Mercurial: %s")
577 % " ".join(missingrequirements),
578 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
579 " for more information"))
580
581 self.requirements = requirements
554 582
555 583 cachepath = self.vfs.join('cache')
556 584 self.sharedpath = self.path
557 585 try:
558 586 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
559 587 if 'relshared' in self.requirements:
560 588 sharedpath = self.vfs.join(sharedpath)
561 589 vfs = vfsmod.vfs(sharedpath, realpath=True)
562 590 cachepath = vfs.join('cache')
563 591 s = vfs.base
564 592 if not vfs.exists():
565 593 raise error.RepoError(
566 594 _('.hg/sharedpath points to nonexistent directory %s') % s)
567 595 self.sharedpath = s
568 596 except IOError as inst:
569 597 if inst.errno != errno.ENOENT:
570 598 raise
571 599
572 600 if 'exp-sparse' in self.requirements and not sparse.enabled:
573 601 raise error.RepoError(_('repository is using sparse feature but '
574 602 'sparse is not enabled; enable the '
575 603 '"sparse" extensions to access'))
576 604
577 605 self.store = store.store(
578 606 self.requirements, self.sharedpath,
579 607 lambda base: vfsmod.vfs(base, cacheaudited=True))
580 608 self.spath = self.store.path
581 609 self.svfs = self.store.vfs
582 610 self.sjoin = self.store.join
583 611 self.vfs.createmode = self.store.createmode
584 612 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
585 613 self.cachevfs.createmode = self.store.createmode
586 614 if (self.ui.configbool('devel', 'all-warnings') or
587 615 self.ui.configbool('devel', 'check-locks')):
588 616 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
589 617 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
590 618 else: # standard vfs
591 619 self.svfs.audit = self._getsvfsward(self.svfs.audit)
592 620 self._applyopenerreqs()
593 621
594 622 self._dirstatevalidatewarned = False
595 623
596 624 self._branchcaches = {}
597 625 self._revbranchcache = None
598 626 self._filterpats = {}
599 627 self._datafilters = {}
600 628 self._transref = self._lockref = self._wlockref = None
601 629
602 630 # A cache for various files under .hg/ that tracks file changes,
603 631 # (used by the filecache decorator)
604 632 #
605 633 # Maps a property name to its util.filecacheentry
606 634 self._filecache = {}
607 635
608 636 # hold sets of revision to be filtered
609 637 # should be cleared when something might have changed the filter value:
610 638 # - new changesets,
611 639 # - phase change,
612 640 # - new obsolescence marker,
613 641 # - working directory parent change,
614 642 # - bookmark changes
615 643 self.filteredrevcache = {}
616 644
617 645 # post-dirstate-status hooks
618 646 self._postdsstatus = []
619 647
620 648 # generic mapping between names and nodes
621 649 self.names = namespaces.namespaces()
622 650
623 651 # Key to signature value.
624 652 self._sparsesignaturecache = {}
625 653 # Signature to cached matcher instance.
626 654 self._sparsematchercache = {}
627 655
628 656 def _getvfsward(self, origfunc):
629 657 """build a ward for self.vfs"""
630 658 rref = weakref.ref(self)
631 659 def checkvfs(path, mode=None):
632 660 ret = origfunc(path, mode=mode)
633 661 repo = rref()
634 662 if (repo is None
635 663 or not util.safehasattr(repo, '_wlockref')
636 664 or not util.safehasattr(repo, '_lockref')):
637 665 return
638 666 if mode in (None, 'r', 'rb'):
639 667 return
640 668 if path.startswith(repo.path):
641 669 # truncate name relative to the repository (.hg)
642 670 path = path[len(repo.path) + 1:]
643 671 if path.startswith('cache/'):
644 672 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
645 673 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
646 674 if path.startswith('journal.'):
647 675 # journal is covered by 'lock'
648 676 if repo._currentlock(repo._lockref) is None:
649 677 repo.ui.develwarn('write with no lock: "%s"' % path,
650 678 stacklevel=2, config='check-locks')
651 679 elif repo._currentlock(repo._wlockref) is None:
652 680 # rest of vfs files are covered by 'wlock'
653 681 #
654 682 # exclude special files
655 683 for prefix in self._wlockfreeprefix:
656 684 if path.startswith(prefix):
657 685 return
658 686 repo.ui.develwarn('write with no wlock: "%s"' % path,
659 687 stacklevel=2, config='check-locks')
660 688 return ret
661 689 return checkvfs
662 690
663 691 def _getsvfsward(self, origfunc):
664 692 """build a ward for self.svfs"""
665 693 rref = weakref.ref(self)
666 694 def checksvfs(path, mode=None):
667 695 ret = origfunc(path, mode=mode)
668 696 repo = rref()
669 697 if repo is None or not util.safehasattr(repo, '_lockref'):
670 698 return
671 699 if mode in (None, 'r', 'rb'):
672 700 return
673 701 if path.startswith(repo.sharedpath):
674 702 # truncate name relative to the repository (.hg)
675 703 path = path[len(repo.sharedpath) + 1:]
676 704 if repo._currentlock(repo._lockref) is None:
677 705 repo.ui.develwarn('write with no lock: "%s"' % path,
678 706 stacklevel=3)
679 707 return ret
680 708 return checksvfs
681 709
682 710 def close(self):
683 711 self._writecaches()
684 712
685 713 def _writecaches(self):
686 714 if self._revbranchcache:
687 715 self._revbranchcache.write()
688 716
689 717 def _restrictcapabilities(self, caps):
690 718 if self.ui.configbool('experimental', 'bundle2-advertise'):
691 719 caps = set(caps)
692 720 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
693 721 role='client'))
694 722 caps.add('bundle2=' + urlreq.quote(capsblob))
695 723 return caps
696 724
697 725 def _applyopenerreqs(self):
698 726 self.svfs.options = dict((r, 1) for r in self.requirements
699 727 if r in self.openerreqs)
700 728 # experimental config: format.chunkcachesize
701 729 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
702 730 if chunkcachesize is not None:
703 731 self.svfs.options['chunkcachesize'] = chunkcachesize
704 732 # experimental config: format.manifestcachesize
705 733 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
706 734 if manifestcachesize is not None:
707 735 self.svfs.options['manifestcachesize'] = manifestcachesize
708 736 deltabothparents = self.ui.configbool('storage',
709 737 'revlog.optimize-delta-parent-choice')
710 738 self.svfs.options['deltabothparents'] = deltabothparents
711 739 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
712 740 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
713 741 if 0 <= chainspan:
714 742 self.svfs.options['maxdeltachainspan'] = chainspan
715 743 mmapindexthreshold = self.ui.configbytes('experimental',
716 744 'mmapindexthreshold')
717 745 if mmapindexthreshold is not None:
718 746 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
719 747 withsparseread = self.ui.configbool('experimental', 'sparse-read')
720 748 srdensitythres = float(self.ui.config('experimental',
721 749 'sparse-read.density-threshold'))
722 750 srmingapsize = self.ui.configbytes('experimental',
723 751 'sparse-read.min-gap-size')
724 752 self.svfs.options['with-sparse-read'] = withsparseread
725 753 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
726 754 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
727 755 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
728 756 self.svfs.options['sparse-revlog'] = sparserevlog
729 757 if sparserevlog:
730 758 self.svfs.options['generaldelta'] = True
731 759 maxchainlen = None
732 760 if sparserevlog:
733 761 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
734 762 # experimental config: format.maxchainlen
735 763 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
736 764 if maxchainlen is not None:
737 765 self.svfs.options['maxchainlen'] = maxchainlen
738 766
739 767 for r in self.requirements:
740 768 if r.startswith('exp-compression-'):
741 769 self.svfs.options['compengine'] = r[len('exp-compression-'):]
742 770
743 771 # TODO move "revlogv2" to openerreqs once finalized.
744 772 if REVLOGV2_REQUIREMENT in self.requirements:
745 773 self.svfs.options['revlogv2'] = True
746 774
747 775 def _writerequirements(self):
748 776 scmutil.writerequires(self.vfs, self.requirements)
749 777
750 778 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
751 779 # self -> auditor -> self._checknested -> self
752 780
753 781 @property
754 782 def auditor(self):
755 783 # This is only used by context.workingctx.match in order to
756 784 # detect files in subrepos.
757 785 return pathutil.pathauditor(self.root, callback=self._checknested)
758 786
759 787 @property
760 788 def nofsauditor(self):
761 789 # This is only used by context.basectx.match in order to detect
762 790 # files in subrepos.
763 791 return pathutil.pathauditor(self.root, callback=self._checknested,
764 792 realfs=False, cached=True)
765 793
766 794 def _checknested(self, path):
767 795 """Determine if path is a legal nested repository."""
768 796 if not path.startswith(self.root):
769 797 return False
770 798 subpath = path[len(self.root) + 1:]
771 799 normsubpath = util.pconvert(subpath)
772 800
773 801 # XXX: Checking against the current working copy is wrong in
774 802 # the sense that it can reject things like
775 803 #
776 804 # $ hg cat -r 10 sub/x.txt
777 805 #
778 806 # if sub/ is no longer a subrepository in the working copy
779 807 # parent revision.
780 808 #
781 809 # However, it can of course also allow things that would have
782 810 # been rejected before, such as the above cat command if sub/
783 811 # is a subrepository now, but was a normal directory before.
784 812 # The old path auditor would have rejected by mistake since it
785 813 # panics when it sees sub/.hg/.
786 814 #
787 815 # All in all, checking against the working copy seems sensible
788 816 # since we want to prevent access to nested repositories on
789 817 # the filesystem *now*.
790 818 ctx = self[None]
791 819 parts = util.splitpath(subpath)
792 820 while parts:
793 821 prefix = '/'.join(parts)
794 822 if prefix in ctx.substate:
795 823 if prefix == normsubpath:
796 824 return True
797 825 else:
798 826 sub = ctx.sub(prefix)
799 827 return sub.checknested(subpath[len(prefix) + 1:])
800 828 else:
801 829 parts.pop()
802 830 return False
803 831
804 832 def peer(self):
805 833 return localpeer(self) # not cached to avoid reference cycle
806 834
807 835 def unfiltered(self):
808 836 """Return unfiltered version of the repository
809 837
810 838 Intended to be overwritten by filtered repo."""
811 839 return self
812 840
813 841 def filtered(self, name, visibilityexceptions=None):
814 842 """Return a filtered version of a repository"""
815 843 cls = repoview.newtype(self.unfiltered().__class__)
816 844 return cls(self, name, visibilityexceptions)
817 845
818 846 @repofilecache('bookmarks', 'bookmarks.current')
819 847 def _bookmarks(self):
820 848 return bookmarks.bmstore(self)
821 849
822 850 @property
823 851 def _activebookmark(self):
824 852 return self._bookmarks.active
825 853
826 854 # _phasesets depend on changelog. what we need is to call
827 855 # _phasecache.invalidate() if '00changelog.i' was changed, but it
828 856 # can't be easily expressed in filecache mechanism.
829 857 @storecache('phaseroots', '00changelog.i')
830 858 def _phasecache(self):
831 859 return phases.phasecache(self, self._phasedefaults)
832 860
833 861 @storecache('obsstore')
834 862 def obsstore(self):
835 863 return obsolete.makestore(self.ui, self)
836 864
837 865 @storecache('00changelog.i')
838 866 def changelog(self):
839 867 return changelog.changelog(self.svfs,
840 868 trypending=txnutil.mayhavepending(self.root))
841 869
842 870 def _constructmanifest(self):
843 871 # This is a temporary function while we migrate from manifest to
844 872 # manifestlog. It allows bundlerepo and unionrepo to intercept the
845 873 # manifest creation.
846 874 return manifest.manifestrevlog(self.svfs)
847 875
848 876 @storecache('00manifest.i')
849 877 def manifestlog(self):
850 878 return manifest.manifestlog(self.svfs, self)
851 879
852 880 @repofilecache('dirstate')
853 881 def dirstate(self):
854 882 return self._makedirstate()
855 883
856 884 def _makedirstate(self):
857 885 """Extension point for wrapping the dirstate per-repo."""
858 886 sparsematchfn = lambda: sparse.matcher(self)
859 887
860 888 return dirstate.dirstate(self.vfs, self.ui, self.root,
861 889 self._dirstatevalidate, sparsematchfn)
862 890
863 891 def _dirstatevalidate(self, node):
864 892 try:
865 893 self.changelog.rev(node)
866 894 return node
867 895 except error.LookupError:
868 896 if not self._dirstatevalidatewarned:
869 897 self._dirstatevalidatewarned = True
870 898 self.ui.warn(_("warning: ignoring unknown"
871 899 " working parent %s!\n") % short(node))
872 900 return nullid
873 901
874 902 @storecache(narrowspec.FILENAME)
875 903 def narrowpats(self):
876 904 """matcher patterns for this repository's narrowspec
877 905
878 906 A tuple of (includes, excludes).
879 907 """
880 908 source = self
881 909 if self.shared():
882 910 from . import hg
883 911 source = hg.sharedreposource(self)
884 912 return narrowspec.load(source)
885 913
886 914 @storecache(narrowspec.FILENAME)
887 915 def _narrowmatch(self):
888 916 if repository.NARROW_REQUIREMENT not in self.requirements:
889 917 return matchmod.always(self.root, '')
890 918 include, exclude = self.narrowpats
891 919 return narrowspec.match(self.root, include=include, exclude=exclude)
892 920
893 921 # TODO(martinvonz): make this property-like instead?
894 922 def narrowmatch(self):
895 923 return self._narrowmatch
896 924
897 925 def setnarrowpats(self, newincludes, newexcludes):
898 926 narrowspec.save(self, newincludes, newexcludes)
899 927 self.invalidate(clearfilecache=True)
900 928
901 929 def __getitem__(self, changeid):
902 930 if changeid is None:
903 931 return context.workingctx(self)
904 932 if isinstance(changeid, context.basectx):
905 933 return changeid
906 934 if isinstance(changeid, slice):
907 935 # wdirrev isn't contiguous so the slice shouldn't include it
908 936 return [context.changectx(self, i)
909 937 for i in pycompat.xrange(*changeid.indices(len(self)))
910 938 if i not in self.changelog.filteredrevs]
911 939 try:
912 940 return context.changectx(self, changeid)
913 941 except error.WdirUnsupported:
914 942 return context.workingctx(self)
915 943
916 944 def __contains__(self, changeid):
917 945 """True if the given changeid exists
918 946
919 947 error.AmbiguousPrefixLookupError is raised if an ambiguous node
920 948 specified.
921 949 """
922 950 try:
923 951 self[changeid]
924 952 return True
925 953 except error.RepoLookupError:
926 954 return False
927 955
928 956 def __nonzero__(self):
929 957 return True
930 958
931 959 __bool__ = __nonzero__
932 960
933 961 def __len__(self):
934 962 # no need to pay the cost of repoview.changelog
935 963 unfi = self.unfiltered()
936 964 return len(unfi.changelog)
937 965
938 966 def __iter__(self):
939 967 return iter(self.changelog)
940 968
941 969 def revs(self, expr, *args):
942 970 '''Find revisions matching a revset.
943 971
944 972 The revset is specified as a string ``expr`` that may contain
945 973 %-formatting to escape certain types. See ``revsetlang.formatspec``.
946 974
947 975 Revset aliases from the configuration are not expanded. To expand
948 976 user aliases, consider calling ``scmutil.revrange()`` or
949 977 ``repo.anyrevs([expr], user=True)``.
950 978
951 979 Returns a revset.abstractsmartset, which is a list-like interface
952 980 that contains integer revisions.
953 981 '''
954 982 expr = revsetlang.formatspec(expr, *args)
955 983 m = revset.match(None, expr)
956 984 return m(self)
957 985
958 986 def set(self, expr, *args):
959 987 '''Find revisions matching a revset and emit changectx instances.
960 988
961 989 This is a convenience wrapper around ``revs()`` that iterates the
962 990 result and is a generator of changectx instances.
963 991
964 992 Revset aliases from the configuration are not expanded. To expand
965 993 user aliases, consider calling ``scmutil.revrange()``.
966 994 '''
967 995 for r in self.revs(expr, *args):
968 996 yield self[r]
969 997
970 998 def anyrevs(self, specs, user=False, localalias=None):
971 999 '''Find revisions matching one of the given revsets.
972 1000
973 1001 Revset aliases from the configuration are not expanded by default. To
974 1002 expand user aliases, specify ``user=True``. To provide some local
975 1003 definitions overriding user aliases, set ``localalias`` to
976 1004 ``{name: definitionstring}``.
977 1005 '''
978 1006 if user:
979 1007 m = revset.matchany(self.ui, specs,
980 1008 lookup=revset.lookupfn(self),
981 1009 localalias=localalias)
982 1010 else:
983 1011 m = revset.matchany(None, specs, localalias=localalias)
984 1012 return m(self)
985 1013
986 1014 def url(self):
987 1015 return 'file:' + self.root
988 1016
989 1017 def hook(self, name, throw=False, **args):
990 1018 """Call a hook, passing this repo instance.
991 1019
992 1020 This a convenience method to aid invoking hooks. Extensions likely
993 1021 won't call this unless they have registered a custom hook or are
994 1022 replacing code that is expected to call a hook.
995 1023 """
996 1024 return hook.hook(self.ui, self, name, throw, **args)
997 1025
998 1026 @filteredpropertycache
999 1027 def _tagscache(self):
1000 1028 '''Returns a tagscache object that contains various tags related
1001 1029 caches.'''
1002 1030
1003 1031 # This simplifies its cache management by having one decorated
1004 1032 # function (this one) and the rest simply fetch things from it.
1005 1033 class tagscache(object):
1006 1034 def __init__(self):
1007 1035 # These two define the set of tags for this repository. tags
1008 1036 # maps tag name to node; tagtypes maps tag name to 'global' or
1009 1037 # 'local'. (Global tags are defined by .hgtags across all
1010 1038 # heads, and local tags are defined in .hg/localtags.)
1011 1039 # They constitute the in-memory cache of tags.
1012 1040 self.tags = self.tagtypes = None
1013 1041
1014 1042 self.nodetagscache = self.tagslist = None
1015 1043
1016 1044 cache = tagscache()
1017 1045 cache.tags, cache.tagtypes = self._findtags()
1018 1046
1019 1047 return cache
1020 1048
1021 1049 def tags(self):
1022 1050 '''return a mapping of tag to node'''
1023 1051 t = {}
1024 1052 if self.changelog.filteredrevs:
1025 1053 tags, tt = self._findtags()
1026 1054 else:
1027 1055 tags = self._tagscache.tags
1028 1056 for k, v in tags.iteritems():
1029 1057 try:
1030 1058 # ignore tags to unknown nodes
1031 1059 self.changelog.rev(v)
1032 1060 t[k] = v
1033 1061 except (error.LookupError, ValueError):
1034 1062 pass
1035 1063 return t
1036 1064
1037 1065 def _findtags(self):
1038 1066 '''Do the hard work of finding tags. Return a pair of dicts
1039 1067 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1040 1068 maps tag name to a string like \'global\' or \'local\'.
1041 1069 Subclasses or extensions are free to add their own tags, but
1042 1070 should be aware that the returned dicts will be retained for the
1043 1071 duration of the localrepo object.'''
1044 1072
1045 1073 # XXX what tagtype should subclasses/extensions use? Currently
1046 1074 # mq and bookmarks add tags, but do not set the tagtype at all.
1047 1075 # Should each extension invent its own tag type? Should there
1048 1076 # be one tagtype for all such "virtual" tags? Or is the status
1049 1077 # quo fine?
1050 1078
1051 1079
1052 1080 # map tag name to (node, hist)
1053 1081 alltags = tagsmod.findglobaltags(self.ui, self)
1054 1082 # map tag name to tag type
1055 1083 tagtypes = dict((tag, 'global') for tag in alltags)
1056 1084
1057 1085 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1058 1086
1059 1087 # Build the return dicts. Have to re-encode tag names because
1060 1088 # the tags module always uses UTF-8 (in order not to lose info
1061 1089 # writing to the cache), but the rest of Mercurial wants them in
1062 1090 # local encoding.
1063 1091 tags = {}
1064 1092 for (name, (node, hist)) in alltags.iteritems():
1065 1093 if node != nullid:
1066 1094 tags[encoding.tolocal(name)] = node
1067 1095 tags['tip'] = self.changelog.tip()
1068 1096 tagtypes = dict([(encoding.tolocal(name), value)
1069 1097 for (name, value) in tagtypes.iteritems()])
1070 1098 return (tags, tagtypes)
1071 1099
1072 1100 def tagtype(self, tagname):
1073 1101 '''
1074 1102 return the type of the given tag. result can be:
1075 1103
1076 1104 'local' : a local tag
1077 1105 'global' : a global tag
1078 1106 None : tag does not exist
1079 1107 '''
1080 1108
1081 1109 return self._tagscache.tagtypes.get(tagname)
1082 1110
1083 1111 def tagslist(self):
1084 1112 '''return a list of tags ordered by revision'''
1085 1113 if not self._tagscache.tagslist:
1086 1114 l = []
1087 1115 for t, n in self.tags().iteritems():
1088 1116 l.append((self.changelog.rev(n), t, n))
1089 1117 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1090 1118
1091 1119 return self._tagscache.tagslist
1092 1120
1093 1121 def nodetags(self, node):
1094 1122 '''return the tags associated with a node'''
1095 1123 if not self._tagscache.nodetagscache:
1096 1124 nodetagscache = {}
1097 1125 for t, n in self._tagscache.tags.iteritems():
1098 1126 nodetagscache.setdefault(n, []).append(t)
1099 1127 for tags in nodetagscache.itervalues():
1100 1128 tags.sort()
1101 1129 self._tagscache.nodetagscache = nodetagscache
1102 1130 return self._tagscache.nodetagscache.get(node, [])
1103 1131
1104 1132 def nodebookmarks(self, node):
1105 1133 """return the list of bookmarks pointing to the specified node"""
1106 1134 return self._bookmarks.names(node)
1107 1135
1108 1136 def branchmap(self):
1109 1137 '''returns a dictionary {branch: [branchheads]} with branchheads
1110 1138 ordered by increasing revision number'''
1111 1139 branchmap.updatecache(self)
1112 1140 return self._branchcaches[self.filtername]
1113 1141
1114 1142 @unfilteredmethod
1115 1143 def revbranchcache(self):
1116 1144 if not self._revbranchcache:
1117 1145 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1118 1146 return self._revbranchcache
1119 1147
1120 1148 def branchtip(self, branch, ignoremissing=False):
1121 1149 '''return the tip node for a given branch
1122 1150
1123 1151 If ignoremissing is True, then this method will not raise an error.
1124 1152 This is helpful for callers that only expect None for a missing branch
1125 1153 (e.g. namespace).
1126 1154
1127 1155 '''
1128 1156 try:
1129 1157 return self.branchmap().branchtip(branch)
1130 1158 except KeyError:
1131 1159 if not ignoremissing:
1132 1160 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1133 1161 else:
1134 1162 pass
1135 1163
1136 1164 def lookup(self, key):
1137 1165 return scmutil.revsymbol(self, key).node()
1138 1166
1139 1167 def lookupbranch(self, key):
1140 1168 if key in self.branchmap():
1141 1169 return key
1142 1170
1143 1171 return scmutil.revsymbol(self, key).branch()
1144 1172
1145 1173 def known(self, nodes):
1146 1174 cl = self.changelog
1147 1175 nm = cl.nodemap
1148 1176 filtered = cl.filteredrevs
1149 1177 result = []
1150 1178 for n in nodes:
1151 1179 r = nm.get(n)
1152 1180 resp = not (r is None or r in filtered)
1153 1181 result.append(resp)
1154 1182 return result
1155 1183
1156 1184 def local(self):
1157 1185 return self
1158 1186
1159 1187 def publishing(self):
1160 1188 # it's safe (and desirable) to trust the publish flag unconditionally
1161 1189 # so that we don't finalize changes shared between users via ssh or nfs
1162 1190 return self.ui.configbool('phases', 'publish', untrusted=True)
1163 1191
1164 1192 def cancopy(self):
1165 1193 # so statichttprepo's override of local() works
1166 1194 if not self.local():
1167 1195 return False
1168 1196 if not self.publishing():
1169 1197 return True
1170 1198 # if publishing we can't copy if there is filtered content
1171 1199 return not self.filtered('visible').changelog.filteredrevs
1172 1200
1173 1201 def shared(self):
1174 1202 '''the type of shared repository (None if not shared)'''
1175 1203 if self.sharedpath != self.path:
1176 1204 return 'store'
1177 1205 return None
1178 1206
1179 1207 def wjoin(self, f, *insidef):
1180 1208 return self.vfs.reljoin(self.root, f, *insidef)
1181 1209
1182 1210 def file(self, f):
1183 1211 if f[0] == '/':
1184 1212 f = f[1:]
1185 1213 return filelog.filelog(self.svfs, f)
1186 1214
1187 1215 def setparents(self, p1, p2=nullid):
1188 1216 with self.dirstate.parentchange():
1189 1217 copies = self.dirstate.setparents(p1, p2)
1190 1218 pctx = self[p1]
1191 1219 if copies:
1192 1220 # Adjust copy records, the dirstate cannot do it, it
1193 1221 # requires access to parents manifests. Preserve them
1194 1222 # only for entries added to first parent.
1195 1223 for f in copies:
1196 1224 if f not in pctx and copies[f] in pctx:
1197 1225 self.dirstate.copy(copies[f], f)
1198 1226 if p2 == nullid:
1199 1227 for f, s in sorted(self.dirstate.copies().items()):
1200 1228 if f not in pctx and s not in pctx:
1201 1229 self.dirstate.copy(None, f)
1202 1230
1203 1231 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1204 1232 """changeid can be a changeset revision, node, or tag.
1205 1233 fileid can be a file revision or node."""
1206 1234 return context.filectx(self, path, changeid, fileid,
1207 1235 changectx=changectx)
1208 1236
1209 1237 def getcwd(self):
1210 1238 return self.dirstate.getcwd()
1211 1239
1212 1240 def pathto(self, f, cwd=None):
1213 1241 return self.dirstate.pathto(f, cwd)
1214 1242
1215 1243 def _loadfilter(self, filter):
1216 1244 if filter not in self._filterpats:
1217 1245 l = []
1218 1246 for pat, cmd in self.ui.configitems(filter):
1219 1247 if cmd == '!':
1220 1248 continue
1221 1249 mf = matchmod.match(self.root, '', [pat])
1222 1250 fn = None
1223 1251 params = cmd
1224 1252 for name, filterfn in self._datafilters.iteritems():
1225 1253 if cmd.startswith(name):
1226 1254 fn = filterfn
1227 1255 params = cmd[len(name):].lstrip()
1228 1256 break
1229 1257 if not fn:
1230 1258 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1231 1259 # Wrap old filters not supporting keyword arguments
1232 1260 if not pycompat.getargspec(fn)[2]:
1233 1261 oldfn = fn
1234 1262 fn = lambda s, c, **kwargs: oldfn(s, c)
1235 1263 l.append((mf, fn, params))
1236 1264 self._filterpats[filter] = l
1237 1265 return self._filterpats[filter]
1238 1266
1239 1267 def _filter(self, filterpats, filename, data):
1240 1268 for mf, fn, cmd in filterpats:
1241 1269 if mf(filename):
1242 1270 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1243 1271 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1244 1272 break
1245 1273
1246 1274 return data
1247 1275
1248 1276 @unfilteredpropertycache
1249 1277 def _encodefilterpats(self):
1250 1278 return self._loadfilter('encode')
1251 1279
1252 1280 @unfilteredpropertycache
1253 1281 def _decodefilterpats(self):
1254 1282 return self._loadfilter('decode')
1255 1283
1256 1284 def adddatafilter(self, name, filter):
1257 1285 self._datafilters[name] = filter
1258 1286
1259 1287 def wread(self, filename):
1260 1288 if self.wvfs.islink(filename):
1261 1289 data = self.wvfs.readlink(filename)
1262 1290 else:
1263 1291 data = self.wvfs.read(filename)
1264 1292 return self._filter(self._encodefilterpats, filename, data)
1265 1293
1266 1294 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1267 1295 """write ``data`` into ``filename`` in the working directory
1268 1296
1269 1297 This returns length of written (maybe decoded) data.
1270 1298 """
1271 1299 data = self._filter(self._decodefilterpats, filename, data)
1272 1300 if 'l' in flags:
1273 1301 self.wvfs.symlink(data, filename)
1274 1302 else:
1275 1303 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1276 1304 **kwargs)
1277 1305 if 'x' in flags:
1278 1306 self.wvfs.setflags(filename, False, True)
1279 1307 else:
1280 1308 self.wvfs.setflags(filename, False, False)
1281 1309 return len(data)
1282 1310
1283 1311 def wwritedata(self, filename, data):
1284 1312 return self._filter(self._decodefilterpats, filename, data)
1285 1313
1286 1314 def currenttransaction(self):
1287 1315 """return the current transaction or None if non exists"""
1288 1316 if self._transref:
1289 1317 tr = self._transref()
1290 1318 else:
1291 1319 tr = None
1292 1320
1293 1321 if tr and tr.running():
1294 1322 return tr
1295 1323 return None
1296 1324
1297 1325 def transaction(self, desc, report=None):
1298 1326 if (self.ui.configbool('devel', 'all-warnings')
1299 1327 or self.ui.configbool('devel', 'check-locks')):
1300 1328 if self._currentlock(self._lockref) is None:
1301 1329 raise error.ProgrammingError('transaction requires locking')
1302 1330 tr = self.currenttransaction()
1303 1331 if tr is not None:
1304 1332 return tr.nest(name=desc)
1305 1333
1306 1334 # abort here if the journal already exists
1307 1335 if self.svfs.exists("journal"):
1308 1336 raise error.RepoError(
1309 1337 _("abandoned transaction found"),
1310 1338 hint=_("run 'hg recover' to clean up transaction"))
1311 1339
1312 1340 idbase = "%.40f#%f" % (random.random(), time.time())
1313 1341 ha = hex(hashlib.sha1(idbase).digest())
1314 1342 txnid = 'TXN:' + ha
1315 1343 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1316 1344
1317 1345 self._writejournal(desc)
1318 1346 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1319 1347 if report:
1320 1348 rp = report
1321 1349 else:
1322 1350 rp = self.ui.warn
1323 1351 vfsmap = {'plain': self.vfs} # root of .hg/
1324 1352 # we must avoid cyclic reference between repo and transaction.
1325 1353 reporef = weakref.ref(self)
1326 1354 # Code to track tag movement
1327 1355 #
1328 1356 # Since tags are all handled as file content, it is actually quite hard
1329 1357 # to track these movement from a code perspective. So we fallback to a
1330 1358 # tracking at the repository level. One could envision to track changes
1331 1359 # to the '.hgtags' file through changegroup apply but that fails to
1332 1360 # cope with case where transaction expose new heads without changegroup
1333 1361 # being involved (eg: phase movement).
1334 1362 #
1335 1363 # For now, We gate the feature behind a flag since this likely comes
1336 1364 # with performance impacts. The current code run more often than needed
1337 1365 # and do not use caches as much as it could. The current focus is on
1338 1366 # the behavior of the feature so we disable it by default. The flag
1339 1367 # will be removed when we are happy with the performance impact.
1340 1368 #
1341 1369 # Once this feature is no longer experimental move the following
1342 1370 # documentation to the appropriate help section:
1343 1371 #
1344 1372 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1345 1373 # tags (new or changed or deleted tags). In addition the details of
1346 1374 # these changes are made available in a file at:
1347 1375 # ``REPOROOT/.hg/changes/tags.changes``.
1348 1376 # Make sure you check for HG_TAG_MOVED before reading that file as it
1349 1377 # might exist from a previous transaction even if no tag were touched
1350 1378 # in this one. Changes are recorded in a line base format::
1351 1379 #
1352 1380 # <action> <hex-node> <tag-name>\n
1353 1381 #
1354 1382 # Actions are defined as follow:
1355 1383 # "-R": tag is removed,
1356 1384 # "+A": tag is added,
1357 1385 # "-M": tag is moved (old value),
1358 1386 # "+M": tag is moved (new value),
1359 1387 tracktags = lambda x: None
1360 1388 # experimental config: experimental.hook-track-tags
1361 1389 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1362 1390 if desc != 'strip' and shouldtracktags:
1363 1391 oldheads = self.changelog.headrevs()
1364 1392 def tracktags(tr2):
1365 1393 repo = reporef()
1366 1394 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1367 1395 newheads = repo.changelog.headrevs()
1368 1396 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1369 1397 # notes: we compare lists here.
1370 1398 # As we do it only once buiding set would not be cheaper
1371 1399 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1372 1400 if changes:
1373 1401 tr2.hookargs['tag_moved'] = '1'
1374 1402 with repo.vfs('changes/tags.changes', 'w',
1375 1403 atomictemp=True) as changesfile:
1376 1404 # note: we do not register the file to the transaction
1377 1405 # because we needs it to still exist on the transaction
1378 1406 # is close (for txnclose hooks)
1379 1407 tagsmod.writediff(changesfile, changes)
1380 1408 def validate(tr2):
1381 1409 """will run pre-closing hooks"""
1382 1410 # XXX the transaction API is a bit lacking here so we take a hacky
1383 1411 # path for now
1384 1412 #
1385 1413 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1386 1414 # dict is copied before these run. In addition we needs the data
1387 1415 # available to in memory hooks too.
1388 1416 #
1389 1417 # Moreover, we also need to make sure this runs before txnclose
1390 1418 # hooks and there is no "pending" mechanism that would execute
1391 1419 # logic only if hooks are about to run.
1392 1420 #
1393 1421 # Fixing this limitation of the transaction is also needed to track
1394 1422 # other families of changes (bookmarks, phases, obsolescence).
1395 1423 #
1396 1424 # This will have to be fixed before we remove the experimental
1397 1425 # gating.
1398 1426 tracktags(tr2)
1399 1427 repo = reporef()
1400 1428 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1401 1429 scmutil.enforcesinglehead(repo, tr2, desc)
1402 1430 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1403 1431 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1404 1432 args = tr.hookargs.copy()
1405 1433 args.update(bookmarks.preparehookargs(name, old, new))
1406 1434 repo.hook('pretxnclose-bookmark', throw=True,
1407 1435 txnname=desc,
1408 1436 **pycompat.strkwargs(args))
1409 1437 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1410 1438 cl = repo.unfiltered().changelog
1411 1439 for rev, (old, new) in tr.changes['phases'].items():
1412 1440 args = tr.hookargs.copy()
1413 1441 node = hex(cl.node(rev))
1414 1442 args.update(phases.preparehookargs(node, old, new))
1415 1443 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1416 1444 **pycompat.strkwargs(args))
1417 1445
1418 1446 repo.hook('pretxnclose', throw=True,
1419 1447 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1420 1448 def releasefn(tr, success):
1421 1449 repo = reporef()
1422 1450 if success:
1423 1451 # this should be explicitly invoked here, because
1424 1452 # in-memory changes aren't written out at closing
1425 1453 # transaction, if tr.addfilegenerator (via
1426 1454 # dirstate.write or so) isn't invoked while
1427 1455 # transaction running
1428 1456 repo.dirstate.write(None)
1429 1457 else:
1430 1458 # discard all changes (including ones already written
1431 1459 # out) in this transaction
1432 1460 narrowspec.restorebackup(self, 'journal.narrowspec')
1433 1461 repo.dirstate.restorebackup(None, 'journal.dirstate')
1434 1462
1435 1463 repo.invalidate(clearfilecache=True)
1436 1464
1437 1465 tr = transaction.transaction(rp, self.svfs, vfsmap,
1438 1466 "journal",
1439 1467 "undo",
1440 1468 aftertrans(renames),
1441 1469 self.store.createmode,
1442 1470 validator=validate,
1443 1471 releasefn=releasefn,
1444 1472 checkambigfiles=_cachedfiles,
1445 1473 name=desc)
1446 1474 tr.changes['origrepolen'] = len(self)
1447 1475 tr.changes['obsmarkers'] = set()
1448 1476 tr.changes['phases'] = {}
1449 1477 tr.changes['bookmarks'] = {}
1450 1478
1451 1479 tr.hookargs['txnid'] = txnid
1452 1480 # note: writing the fncache only during finalize mean that the file is
1453 1481 # outdated when running hooks. As fncache is used for streaming clone,
1454 1482 # this is not expected to break anything that happen during the hooks.
1455 1483 tr.addfinalize('flush-fncache', self.store.write)
1456 1484 def txnclosehook(tr2):
1457 1485 """To be run if transaction is successful, will schedule a hook run
1458 1486 """
1459 1487 # Don't reference tr2 in hook() so we don't hold a reference.
1460 1488 # This reduces memory consumption when there are multiple
1461 1489 # transactions per lock. This can likely go away if issue5045
1462 1490 # fixes the function accumulation.
1463 1491 hookargs = tr2.hookargs
1464 1492
1465 1493 def hookfunc():
1466 1494 repo = reporef()
1467 1495 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1468 1496 bmchanges = sorted(tr.changes['bookmarks'].items())
1469 1497 for name, (old, new) in bmchanges:
1470 1498 args = tr.hookargs.copy()
1471 1499 args.update(bookmarks.preparehookargs(name, old, new))
1472 1500 repo.hook('txnclose-bookmark', throw=False,
1473 1501 txnname=desc, **pycompat.strkwargs(args))
1474 1502
1475 1503 if hook.hashook(repo.ui, 'txnclose-phase'):
1476 1504 cl = repo.unfiltered().changelog
1477 1505 phasemv = sorted(tr.changes['phases'].items())
1478 1506 for rev, (old, new) in phasemv:
1479 1507 args = tr.hookargs.copy()
1480 1508 node = hex(cl.node(rev))
1481 1509 args.update(phases.preparehookargs(node, old, new))
1482 1510 repo.hook('txnclose-phase', throw=False, txnname=desc,
1483 1511 **pycompat.strkwargs(args))
1484 1512
1485 1513 repo.hook('txnclose', throw=False, txnname=desc,
1486 1514 **pycompat.strkwargs(hookargs))
1487 1515 reporef()._afterlock(hookfunc)
1488 1516 tr.addfinalize('txnclose-hook', txnclosehook)
1489 1517 # Include a leading "-" to make it happen before the transaction summary
1490 1518 # reports registered via scmutil.registersummarycallback() whose names
1491 1519 # are 00-txnreport etc. That way, the caches will be warm when the
1492 1520 # callbacks run.
1493 1521 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1494 1522 def txnaborthook(tr2):
1495 1523 """To be run if transaction is aborted
1496 1524 """
1497 1525 reporef().hook('txnabort', throw=False, txnname=desc,
1498 1526 **pycompat.strkwargs(tr2.hookargs))
1499 1527 tr.addabort('txnabort-hook', txnaborthook)
1500 1528 # avoid eager cache invalidation. in-memory data should be identical
1501 1529 # to stored data if transaction has no error.
1502 1530 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1503 1531 self._transref = weakref.ref(tr)
1504 1532 scmutil.registersummarycallback(self, tr, desc)
1505 1533 return tr
1506 1534
1507 1535 def _journalfiles(self):
1508 1536 return ((self.svfs, 'journal'),
1509 1537 (self.vfs, 'journal.dirstate'),
1510 1538 (self.vfs, 'journal.branch'),
1511 1539 (self.vfs, 'journal.desc'),
1512 1540 (self.vfs, 'journal.bookmarks'),
1513 1541 (self.svfs, 'journal.phaseroots'))
1514 1542
1515 1543 def undofiles(self):
1516 1544 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1517 1545
1518 1546 @unfilteredmethod
1519 1547 def _writejournal(self, desc):
1520 1548 self.dirstate.savebackup(None, 'journal.dirstate')
1521 1549 narrowspec.savebackup(self, 'journal.narrowspec')
1522 1550 self.vfs.write("journal.branch",
1523 1551 encoding.fromlocal(self.dirstate.branch()))
1524 1552 self.vfs.write("journal.desc",
1525 1553 "%d\n%s\n" % (len(self), desc))
1526 1554 self.vfs.write("journal.bookmarks",
1527 1555 self.vfs.tryread("bookmarks"))
1528 1556 self.svfs.write("journal.phaseroots",
1529 1557 self.svfs.tryread("phaseroots"))
1530 1558
1531 1559 def recover(self):
1532 1560 with self.lock():
1533 1561 if self.svfs.exists("journal"):
1534 1562 self.ui.status(_("rolling back interrupted transaction\n"))
1535 1563 vfsmap = {'': self.svfs,
1536 1564 'plain': self.vfs,}
1537 1565 transaction.rollback(self.svfs, vfsmap, "journal",
1538 1566 self.ui.warn,
1539 1567 checkambigfiles=_cachedfiles)
1540 1568 self.invalidate()
1541 1569 return True
1542 1570 else:
1543 1571 self.ui.warn(_("no interrupted transaction available\n"))
1544 1572 return False
1545 1573
1546 1574 def rollback(self, dryrun=False, force=False):
1547 1575 wlock = lock = dsguard = None
1548 1576 try:
1549 1577 wlock = self.wlock()
1550 1578 lock = self.lock()
1551 1579 if self.svfs.exists("undo"):
1552 1580 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1553 1581
1554 1582 return self._rollback(dryrun, force, dsguard)
1555 1583 else:
1556 1584 self.ui.warn(_("no rollback information available\n"))
1557 1585 return 1
1558 1586 finally:
1559 1587 release(dsguard, lock, wlock)
1560 1588
1561 1589 @unfilteredmethod # Until we get smarter cache management
1562 1590 def _rollback(self, dryrun, force, dsguard):
1563 1591 ui = self.ui
1564 1592 try:
1565 1593 args = self.vfs.read('undo.desc').splitlines()
1566 1594 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1567 1595 if len(args) >= 3:
1568 1596 detail = args[2]
1569 1597 oldtip = oldlen - 1
1570 1598
1571 1599 if detail and ui.verbose:
1572 1600 msg = (_('repository tip rolled back to revision %d'
1573 1601 ' (undo %s: %s)\n')
1574 1602 % (oldtip, desc, detail))
1575 1603 else:
1576 1604 msg = (_('repository tip rolled back to revision %d'
1577 1605 ' (undo %s)\n')
1578 1606 % (oldtip, desc))
1579 1607 except IOError:
1580 1608 msg = _('rolling back unknown transaction\n')
1581 1609 desc = None
1582 1610
1583 1611 if not force and self['.'] != self['tip'] and desc == 'commit':
1584 1612 raise error.Abort(
1585 1613 _('rollback of last commit while not checked out '
1586 1614 'may lose data'), hint=_('use -f to force'))
1587 1615
1588 1616 ui.status(msg)
1589 1617 if dryrun:
1590 1618 return 0
1591 1619
1592 1620 parents = self.dirstate.parents()
1593 1621 self.destroying()
1594 1622 vfsmap = {'plain': self.vfs, '': self.svfs}
1595 1623 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1596 1624 checkambigfiles=_cachedfiles)
1597 1625 if self.vfs.exists('undo.bookmarks'):
1598 1626 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1599 1627 if self.svfs.exists('undo.phaseroots'):
1600 1628 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1601 1629 self.invalidate()
1602 1630
1603 1631 parentgone = (parents[0] not in self.changelog.nodemap or
1604 1632 parents[1] not in self.changelog.nodemap)
1605 1633 if parentgone:
1606 1634 # prevent dirstateguard from overwriting already restored one
1607 1635 dsguard.close()
1608 1636
1609 1637 narrowspec.restorebackup(self, 'undo.narrowspec')
1610 1638 self.dirstate.restorebackup(None, 'undo.dirstate')
1611 1639 try:
1612 1640 branch = self.vfs.read('undo.branch')
1613 1641 self.dirstate.setbranch(encoding.tolocal(branch))
1614 1642 except IOError:
1615 1643 ui.warn(_('named branch could not be reset: '
1616 1644 'current branch is still \'%s\'\n')
1617 1645 % self.dirstate.branch())
1618 1646
1619 1647 parents = tuple([p.rev() for p in self[None].parents()])
1620 1648 if len(parents) > 1:
1621 1649 ui.status(_('working directory now based on '
1622 1650 'revisions %d and %d\n') % parents)
1623 1651 else:
1624 1652 ui.status(_('working directory now based on '
1625 1653 'revision %d\n') % parents)
1626 1654 mergemod.mergestate.clean(self, self['.'].node())
1627 1655
1628 1656 # TODO: if we know which new heads may result from this rollback, pass
1629 1657 # them to destroy(), which will prevent the branchhead cache from being
1630 1658 # invalidated.
1631 1659 self.destroyed()
1632 1660 return 0
1633 1661
1634 1662 def _buildcacheupdater(self, newtransaction):
1635 1663 """called during transaction to build the callback updating cache
1636 1664
1637 1665 Lives on the repository to help extension who might want to augment
1638 1666 this logic. For this purpose, the created transaction is passed to the
1639 1667 method.
1640 1668 """
1641 1669 # we must avoid cyclic reference between repo and transaction.
1642 1670 reporef = weakref.ref(self)
1643 1671 def updater(tr):
1644 1672 repo = reporef()
1645 1673 repo.updatecaches(tr)
1646 1674 return updater
1647 1675
1648 1676 @unfilteredmethod
1649 1677 def updatecaches(self, tr=None, full=False):
1650 1678 """warm appropriate caches
1651 1679
1652 1680 If this function is called after a transaction closed. The transaction
1653 1681 will be available in the 'tr' argument. This can be used to selectively
1654 1682 update caches relevant to the changes in that transaction.
1655 1683
1656 1684 If 'full' is set, make sure all caches the function knows about have
1657 1685 up-to-date data. Even the ones usually loaded more lazily.
1658 1686 """
1659 1687 if tr is not None and tr.hookargs.get('source') == 'strip':
1660 1688 # During strip, many caches are invalid but
1661 1689 # later call to `destroyed` will refresh them.
1662 1690 return
1663 1691
1664 1692 if tr is None or tr.changes['origrepolen'] < len(self):
1665 1693 # updating the unfiltered branchmap should refresh all the others,
1666 1694 self.ui.debug('updating the branch cache\n')
1667 1695 branchmap.updatecache(self.filtered('served'))
1668 1696
1669 1697 if full:
1670 1698 rbc = self.revbranchcache()
1671 1699 for r in self.changelog:
1672 1700 rbc.branchinfo(r)
1673 1701 rbc.write()
1674 1702
1675 1703 # ensure the working copy parents are in the manifestfulltextcache
1676 1704 for ctx in self['.'].parents():
1677 1705 ctx.manifest() # accessing the manifest is enough
1678 1706
1679 1707 def invalidatecaches(self):
1680 1708
1681 1709 if '_tagscache' in vars(self):
1682 1710 # can't use delattr on proxy
1683 1711 del self.__dict__['_tagscache']
1684 1712
1685 1713 self.unfiltered()._branchcaches.clear()
1686 1714 self.invalidatevolatilesets()
1687 1715 self._sparsesignaturecache.clear()
1688 1716
1689 1717 def invalidatevolatilesets(self):
1690 1718 self.filteredrevcache.clear()
1691 1719 obsolete.clearobscaches(self)
1692 1720
1693 1721 def invalidatedirstate(self):
1694 1722 '''Invalidates the dirstate, causing the next call to dirstate
1695 1723 to check if it was modified since the last time it was read,
1696 1724 rereading it if it has.
1697 1725
1698 1726 This is different to dirstate.invalidate() that it doesn't always
1699 1727 rereads the dirstate. Use dirstate.invalidate() if you want to
1700 1728 explicitly read the dirstate again (i.e. restoring it to a previous
1701 1729 known good state).'''
1702 1730 if hasunfilteredcache(self, 'dirstate'):
1703 1731 for k in self.dirstate._filecache:
1704 1732 try:
1705 1733 delattr(self.dirstate, k)
1706 1734 except AttributeError:
1707 1735 pass
1708 1736 delattr(self.unfiltered(), 'dirstate')
1709 1737
1710 1738 def invalidate(self, clearfilecache=False):
1711 1739 '''Invalidates both store and non-store parts other than dirstate
1712 1740
1713 1741 If a transaction is running, invalidation of store is omitted,
1714 1742 because discarding in-memory changes might cause inconsistency
1715 1743 (e.g. incomplete fncache causes unintentional failure, but
1716 1744 redundant one doesn't).
1717 1745 '''
1718 1746 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1719 1747 for k in list(self._filecache.keys()):
1720 1748 # dirstate is invalidated separately in invalidatedirstate()
1721 1749 if k == 'dirstate':
1722 1750 continue
1723 1751 if (k == 'changelog' and
1724 1752 self.currenttransaction() and
1725 1753 self.changelog._delayed):
1726 1754 # The changelog object may store unwritten revisions. We don't
1727 1755 # want to lose them.
1728 1756 # TODO: Solve the problem instead of working around it.
1729 1757 continue
1730 1758
1731 1759 if clearfilecache:
1732 1760 del self._filecache[k]
1733 1761 try:
1734 1762 delattr(unfiltered, k)
1735 1763 except AttributeError:
1736 1764 pass
1737 1765 self.invalidatecaches()
1738 1766 if not self.currenttransaction():
1739 1767 # TODO: Changing contents of store outside transaction
1740 1768 # causes inconsistency. We should make in-memory store
1741 1769 # changes detectable, and abort if changed.
1742 1770 self.store.invalidatecaches()
1743 1771
1744 1772 def invalidateall(self):
1745 1773 '''Fully invalidates both store and non-store parts, causing the
1746 1774 subsequent operation to reread any outside changes.'''
1747 1775 # extension should hook this to invalidate its caches
1748 1776 self.invalidate()
1749 1777 self.invalidatedirstate()
1750 1778
1751 1779 @unfilteredmethod
1752 1780 def _refreshfilecachestats(self, tr):
1753 1781 """Reload stats of cached files so that they are flagged as valid"""
1754 1782 for k, ce in self._filecache.items():
1755 1783 k = pycompat.sysstr(k)
1756 1784 if k == r'dirstate' or k not in self.__dict__:
1757 1785 continue
1758 1786 ce.refresh()
1759 1787
1760 1788 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1761 1789 inheritchecker=None, parentenvvar=None):
1762 1790 parentlock = None
1763 1791 # the contents of parentenvvar are used by the underlying lock to
1764 1792 # determine whether it can be inherited
1765 1793 if parentenvvar is not None:
1766 1794 parentlock = encoding.environ.get(parentenvvar)
1767 1795
1768 1796 timeout = 0
1769 1797 warntimeout = 0
1770 1798 if wait:
1771 1799 timeout = self.ui.configint("ui", "timeout")
1772 1800 warntimeout = self.ui.configint("ui", "timeout.warn")
1773 1801 # internal config: ui.signal-safe-lock
1774 1802 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1775 1803
1776 1804 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1777 1805 releasefn=releasefn,
1778 1806 acquirefn=acquirefn, desc=desc,
1779 1807 inheritchecker=inheritchecker,
1780 1808 parentlock=parentlock,
1781 1809 signalsafe=signalsafe)
1782 1810 return l
1783 1811
1784 1812 def _afterlock(self, callback):
1785 1813 """add a callback to be run when the repository is fully unlocked
1786 1814
1787 1815 The callback will be executed when the outermost lock is released
1788 1816 (with wlock being higher level than 'lock')."""
1789 1817 for ref in (self._wlockref, self._lockref):
1790 1818 l = ref and ref()
1791 1819 if l and l.held:
1792 1820 l.postrelease.append(callback)
1793 1821 break
1794 1822 else: # no lock have been found.
1795 1823 callback()
1796 1824
1797 1825 def lock(self, wait=True):
1798 1826 '''Lock the repository store (.hg/store) and return a weak reference
1799 1827 to the lock. Use this before modifying the store (e.g. committing or
1800 1828 stripping). If you are opening a transaction, get a lock as well.)
1801 1829
1802 1830 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1803 1831 'wlock' first to avoid a dead-lock hazard.'''
1804 1832 l = self._currentlock(self._lockref)
1805 1833 if l is not None:
1806 1834 l.lock()
1807 1835 return l
1808 1836
1809 1837 l = self._lock(self.svfs, "lock", wait, None,
1810 1838 self.invalidate, _('repository %s') % self.origroot)
1811 1839 self._lockref = weakref.ref(l)
1812 1840 return l
1813 1841
1814 1842 def _wlockchecktransaction(self):
1815 1843 if self.currenttransaction() is not None:
1816 1844 raise error.LockInheritanceContractViolation(
1817 1845 'wlock cannot be inherited in the middle of a transaction')
1818 1846
1819 1847 def wlock(self, wait=True):
1820 1848 '''Lock the non-store parts of the repository (everything under
1821 1849 .hg except .hg/store) and return a weak reference to the lock.
1822 1850
1823 1851 Use this before modifying files in .hg.
1824 1852
1825 1853 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1826 1854 'wlock' first to avoid a dead-lock hazard.'''
1827 1855 l = self._wlockref and self._wlockref()
1828 1856 if l is not None and l.held:
1829 1857 l.lock()
1830 1858 return l
1831 1859
1832 1860 # We do not need to check for non-waiting lock acquisition. Such
1833 1861 # acquisition would not cause dead-lock as they would just fail.
1834 1862 if wait and (self.ui.configbool('devel', 'all-warnings')
1835 1863 or self.ui.configbool('devel', 'check-locks')):
1836 1864 if self._currentlock(self._lockref) is not None:
1837 1865 self.ui.develwarn('"wlock" acquired after "lock"')
1838 1866
1839 1867 def unlock():
1840 1868 if self.dirstate.pendingparentchange():
1841 1869 self.dirstate.invalidate()
1842 1870 else:
1843 1871 self.dirstate.write(None)
1844 1872
1845 1873 self._filecache['dirstate'].refresh()
1846 1874
1847 1875 l = self._lock(self.vfs, "wlock", wait, unlock,
1848 1876 self.invalidatedirstate, _('working directory of %s') %
1849 1877 self.origroot,
1850 1878 inheritchecker=self._wlockchecktransaction,
1851 1879 parentenvvar='HG_WLOCK_LOCKER')
1852 1880 self._wlockref = weakref.ref(l)
1853 1881 return l
1854 1882
1855 1883 def _currentlock(self, lockref):
1856 1884 """Returns the lock if it's held, or None if it's not."""
1857 1885 if lockref is None:
1858 1886 return None
1859 1887 l = lockref()
1860 1888 if l is None or not l.held:
1861 1889 return None
1862 1890 return l
1863 1891
1864 1892 def currentwlock(self):
1865 1893 """Returns the wlock if it's held, or None if it's not."""
1866 1894 return self._currentlock(self._wlockref)
1867 1895
1868 1896 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1869 1897 """
1870 1898 commit an individual file as part of a larger transaction
1871 1899 """
1872 1900
1873 1901 fname = fctx.path()
1874 1902 fparent1 = manifest1.get(fname, nullid)
1875 1903 fparent2 = manifest2.get(fname, nullid)
1876 1904 if isinstance(fctx, context.filectx):
1877 1905 node = fctx.filenode()
1878 1906 if node in [fparent1, fparent2]:
1879 1907 self.ui.debug('reusing %s filelog entry\n' % fname)
1880 1908 if manifest1.flags(fname) != fctx.flags():
1881 1909 changelist.append(fname)
1882 1910 return node
1883 1911
1884 1912 flog = self.file(fname)
1885 1913 meta = {}
1886 1914 copy = fctx.renamed()
1887 1915 if copy and copy[0] != fname:
1888 1916 # Mark the new revision of this file as a copy of another
1889 1917 # file. This copy data will effectively act as a parent
1890 1918 # of this new revision. If this is a merge, the first
1891 1919 # parent will be the nullid (meaning "look up the copy data")
1892 1920 # and the second one will be the other parent. For example:
1893 1921 #
1894 1922 # 0 --- 1 --- 3 rev1 changes file foo
1895 1923 # \ / rev2 renames foo to bar and changes it
1896 1924 # \- 2 -/ rev3 should have bar with all changes and
1897 1925 # should record that bar descends from
1898 1926 # bar in rev2 and foo in rev1
1899 1927 #
1900 1928 # this allows this merge to succeed:
1901 1929 #
1902 1930 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1903 1931 # \ / merging rev3 and rev4 should use bar@rev2
1904 1932 # \- 2 --- 4 as the merge base
1905 1933 #
1906 1934
1907 1935 cfname = copy[0]
1908 1936 crev = manifest1.get(cfname)
1909 1937 newfparent = fparent2
1910 1938
1911 1939 if manifest2: # branch merge
1912 1940 if fparent2 == nullid or crev is None: # copied on remote side
1913 1941 if cfname in manifest2:
1914 1942 crev = manifest2[cfname]
1915 1943 newfparent = fparent1
1916 1944
1917 1945 # Here, we used to search backwards through history to try to find
1918 1946 # where the file copy came from if the source of a copy was not in
1919 1947 # the parent directory. However, this doesn't actually make sense to
1920 1948 # do (what does a copy from something not in your working copy even
1921 1949 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1922 1950 # the user that copy information was dropped, so if they didn't
1923 1951 # expect this outcome it can be fixed, but this is the correct
1924 1952 # behavior in this circumstance.
1925 1953
1926 1954 if crev:
1927 1955 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1928 1956 meta["copy"] = cfname
1929 1957 meta["copyrev"] = hex(crev)
1930 1958 fparent1, fparent2 = nullid, newfparent
1931 1959 else:
1932 1960 self.ui.warn(_("warning: can't find ancestor for '%s' "
1933 1961 "copied from '%s'!\n") % (fname, cfname))
1934 1962
1935 1963 elif fparent1 == nullid:
1936 1964 fparent1, fparent2 = fparent2, nullid
1937 1965 elif fparent2 != nullid:
1938 1966 # is one parent an ancestor of the other?
1939 1967 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1940 1968 if fparent1 in fparentancestors:
1941 1969 fparent1, fparent2 = fparent2, nullid
1942 1970 elif fparent2 in fparentancestors:
1943 1971 fparent2 = nullid
1944 1972
1945 1973 # is the file changed?
1946 1974 text = fctx.data()
1947 1975 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1948 1976 changelist.append(fname)
1949 1977 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1950 1978 # are just the flags changed during merge?
1951 1979 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1952 1980 changelist.append(fname)
1953 1981
1954 1982 return fparent1
1955 1983
1956 1984 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1957 1985 """check for commit arguments that aren't committable"""
1958 1986 if match.isexact() or match.prefix():
1959 1987 matched = set(status.modified + status.added + status.removed)
1960 1988
1961 1989 for f in match.files():
1962 1990 f = self.dirstate.normalize(f)
1963 1991 if f == '.' or f in matched or f in wctx.substate:
1964 1992 continue
1965 1993 if f in status.deleted:
1966 1994 fail(f, _('file not found!'))
1967 1995 if f in vdirs: # visited directory
1968 1996 d = f + '/'
1969 1997 for mf in matched:
1970 1998 if mf.startswith(d):
1971 1999 break
1972 2000 else:
1973 2001 fail(f, _("no match under directory!"))
1974 2002 elif f not in self.dirstate:
1975 2003 fail(f, _("file not tracked!"))
1976 2004
1977 2005 @unfilteredmethod
1978 2006 def commit(self, text="", user=None, date=None, match=None, force=False,
1979 2007 editor=False, extra=None):
1980 2008 """Add a new revision to current repository.
1981 2009
1982 2010 Revision information is gathered from the working directory,
1983 2011 match can be used to filter the committed files. If editor is
1984 2012 supplied, it is called to get a commit message.
1985 2013 """
1986 2014 if extra is None:
1987 2015 extra = {}
1988 2016
1989 2017 def fail(f, msg):
1990 2018 raise error.Abort('%s: %s' % (f, msg))
1991 2019
1992 2020 if not match:
1993 2021 match = matchmod.always(self.root, '')
1994 2022
1995 2023 if not force:
1996 2024 vdirs = []
1997 2025 match.explicitdir = vdirs.append
1998 2026 match.bad = fail
1999 2027
2000 2028 wlock = lock = tr = None
2001 2029 try:
2002 2030 wlock = self.wlock()
2003 2031 lock = self.lock() # for recent changelog (see issue4368)
2004 2032
2005 2033 wctx = self[None]
2006 2034 merge = len(wctx.parents()) > 1
2007 2035
2008 2036 if not force and merge and not match.always():
2009 2037 raise error.Abort(_('cannot partially commit a merge '
2010 2038 '(do not specify files or patterns)'))
2011 2039
2012 2040 status = self.status(match=match, clean=force)
2013 2041 if force:
2014 2042 status.modified.extend(status.clean) # mq may commit clean files
2015 2043
2016 2044 # check subrepos
2017 2045 subs, commitsubs, newstate = subrepoutil.precommit(
2018 2046 self.ui, wctx, status, match, force=force)
2019 2047
2020 2048 # make sure all explicit patterns are matched
2021 2049 if not force:
2022 2050 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2023 2051
2024 2052 cctx = context.workingcommitctx(self, status,
2025 2053 text, user, date, extra)
2026 2054
2027 2055 # internal config: ui.allowemptycommit
2028 2056 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2029 2057 or extra.get('close') or merge or cctx.files()
2030 2058 or self.ui.configbool('ui', 'allowemptycommit'))
2031 2059 if not allowemptycommit:
2032 2060 return None
2033 2061
2034 2062 if merge and cctx.deleted():
2035 2063 raise error.Abort(_("cannot commit merge with missing files"))
2036 2064
2037 2065 ms = mergemod.mergestate.read(self)
2038 2066 mergeutil.checkunresolved(ms)
2039 2067
2040 2068 if editor:
2041 2069 cctx._text = editor(self, cctx, subs)
2042 2070 edited = (text != cctx._text)
2043 2071
2044 2072 # Save commit message in case this transaction gets rolled back
2045 2073 # (e.g. by a pretxncommit hook). Leave the content alone on
2046 2074 # the assumption that the user will use the same editor again.
2047 2075 msgfn = self.savecommitmessage(cctx._text)
2048 2076
2049 2077 # commit subs and write new state
2050 2078 if subs:
2051 2079 for s in sorted(commitsubs):
2052 2080 sub = wctx.sub(s)
2053 2081 self.ui.status(_('committing subrepository %s\n') %
2054 2082 subrepoutil.subrelpath(sub))
2055 2083 sr = sub.commit(cctx._text, user, date)
2056 2084 newstate[s] = (newstate[s][0], sr)
2057 2085 subrepoutil.writestate(self, newstate)
2058 2086
2059 2087 p1, p2 = self.dirstate.parents()
2060 2088 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2061 2089 try:
2062 2090 self.hook("precommit", throw=True, parent1=hookp1,
2063 2091 parent2=hookp2)
2064 2092 tr = self.transaction('commit')
2065 2093 ret = self.commitctx(cctx, True)
2066 2094 except: # re-raises
2067 2095 if edited:
2068 2096 self.ui.write(
2069 2097 _('note: commit message saved in %s\n') % msgfn)
2070 2098 raise
2071 2099 # update bookmarks, dirstate and mergestate
2072 2100 bookmarks.update(self, [p1, p2], ret)
2073 2101 cctx.markcommitted(ret)
2074 2102 ms.reset()
2075 2103 tr.close()
2076 2104
2077 2105 finally:
2078 2106 lockmod.release(tr, lock, wlock)
2079 2107
2080 2108 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2081 2109 # hack for command that use a temporary commit (eg: histedit)
2082 2110 # temporary commit got stripped before hook release
2083 2111 if self.changelog.hasnode(ret):
2084 2112 self.hook("commit", node=node, parent1=parent1,
2085 2113 parent2=parent2)
2086 2114 self._afterlock(commithook)
2087 2115 return ret
2088 2116
2089 2117 @unfilteredmethod
2090 2118 def commitctx(self, ctx, error=False):
2091 2119 """Add a new revision to current repository.
2092 2120 Revision information is passed via the context argument.
2093 2121
2094 2122 ctx.files() should list all files involved in this commit, i.e.
2095 2123 modified/added/removed files. On merge, it may be wider than the
2096 2124 ctx.files() to be committed, since any file nodes derived directly
2097 2125 from p1 or p2 are excluded from the committed ctx.files().
2098 2126 """
2099 2127
2100 2128 tr = None
2101 2129 p1, p2 = ctx.p1(), ctx.p2()
2102 2130 user = ctx.user()
2103 2131
2104 2132 lock = self.lock()
2105 2133 try:
2106 2134 tr = self.transaction("commit")
2107 2135 trp = weakref.proxy(tr)
2108 2136
2109 2137 if ctx.manifestnode():
2110 2138 # reuse an existing manifest revision
2111 2139 self.ui.debug('reusing known manifest\n')
2112 2140 mn = ctx.manifestnode()
2113 2141 files = ctx.files()
2114 2142 elif ctx.files():
2115 2143 m1ctx = p1.manifestctx()
2116 2144 m2ctx = p2.manifestctx()
2117 2145 mctx = m1ctx.copy()
2118 2146
2119 2147 m = mctx.read()
2120 2148 m1 = m1ctx.read()
2121 2149 m2 = m2ctx.read()
2122 2150
2123 2151 # check in files
2124 2152 added = []
2125 2153 changed = []
2126 2154 removed = list(ctx.removed())
2127 2155 linkrev = len(self)
2128 2156 self.ui.note(_("committing files:\n"))
2129 2157 for f in sorted(ctx.modified() + ctx.added()):
2130 2158 self.ui.note(f + "\n")
2131 2159 try:
2132 2160 fctx = ctx[f]
2133 2161 if fctx is None:
2134 2162 removed.append(f)
2135 2163 else:
2136 2164 added.append(f)
2137 2165 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2138 2166 trp, changed)
2139 2167 m.setflag(f, fctx.flags())
2140 2168 except OSError as inst:
2141 2169 self.ui.warn(_("trouble committing %s!\n") % f)
2142 2170 raise
2143 2171 except IOError as inst:
2144 2172 errcode = getattr(inst, 'errno', errno.ENOENT)
2145 2173 if error or errcode and errcode != errno.ENOENT:
2146 2174 self.ui.warn(_("trouble committing %s!\n") % f)
2147 2175 raise
2148 2176
2149 2177 # update manifest
2150 2178 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2151 2179 drop = [f for f in removed if f in m]
2152 2180 for f in drop:
2153 2181 del m[f]
2154 2182 files = changed + removed
2155 2183 md = None
2156 2184 if not files:
2157 2185 # if no "files" actually changed in terms of the changelog,
2158 2186 # try hard to detect unmodified manifest entry so that the
2159 2187 # exact same commit can be reproduced later on convert.
2160 2188 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2161 2189 if not files and md:
2162 2190 self.ui.debug('not reusing manifest (no file change in '
2163 2191 'changelog, but manifest differs)\n')
2164 2192 if files or md:
2165 2193 self.ui.note(_("committing manifest\n"))
2166 2194 # we're using narrowmatch here since it's already applied at
2167 2195 # other stages (such as dirstate.walk), so we're already
2168 2196 # ignoring things outside of narrowspec in most cases. The
2169 2197 # one case where we might have files outside the narrowspec
2170 2198 # at this point is merges, and we already error out in the
2171 2199 # case where the merge has files outside of the narrowspec,
2172 2200 # so this is safe.
2173 2201 mn = mctx.write(trp, linkrev,
2174 2202 p1.manifestnode(), p2.manifestnode(),
2175 2203 added, drop, match=self.narrowmatch())
2176 2204 else:
2177 2205 self.ui.debug('reusing manifest form p1 (listed files '
2178 2206 'actually unchanged)\n')
2179 2207 mn = p1.manifestnode()
2180 2208 else:
2181 2209 self.ui.debug('reusing manifest from p1 (no file change)\n')
2182 2210 mn = p1.manifestnode()
2183 2211 files = []
2184 2212
2185 2213 # update changelog
2186 2214 self.ui.note(_("committing changelog\n"))
2187 2215 self.changelog.delayupdate(tr)
2188 2216 n = self.changelog.add(mn, files, ctx.description(),
2189 2217 trp, p1.node(), p2.node(),
2190 2218 user, ctx.date(), ctx.extra().copy())
2191 2219 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2192 2220 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2193 2221 parent2=xp2)
2194 2222 # set the new commit is proper phase
2195 2223 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2196 2224 if targetphase:
2197 2225 # retract boundary do not alter parent changeset.
2198 2226 # if a parent have higher the resulting phase will
2199 2227 # be compliant anyway
2200 2228 #
2201 2229 # if minimal phase was 0 we don't need to retract anything
2202 2230 phases.registernew(self, tr, targetphase, [n])
2203 2231 tr.close()
2204 2232 return n
2205 2233 finally:
2206 2234 if tr:
2207 2235 tr.release()
2208 2236 lock.release()
2209 2237
2210 2238 @unfilteredmethod
2211 2239 def destroying(self):
2212 2240 '''Inform the repository that nodes are about to be destroyed.
2213 2241 Intended for use by strip and rollback, so there's a common
2214 2242 place for anything that has to be done before destroying history.
2215 2243
2216 2244 This is mostly useful for saving state that is in memory and waiting
2217 2245 to be flushed when the current lock is released. Because a call to
2218 2246 destroyed is imminent, the repo will be invalidated causing those
2219 2247 changes to stay in memory (waiting for the next unlock), or vanish
2220 2248 completely.
2221 2249 '''
2222 2250 # When using the same lock to commit and strip, the phasecache is left
2223 2251 # dirty after committing. Then when we strip, the repo is invalidated,
2224 2252 # causing those changes to disappear.
2225 2253 if '_phasecache' in vars(self):
2226 2254 self._phasecache.write()
2227 2255
2228 2256 @unfilteredmethod
2229 2257 def destroyed(self):
2230 2258 '''Inform the repository that nodes have been destroyed.
2231 2259 Intended for use by strip and rollback, so there's a common
2232 2260 place for anything that has to be done after destroying history.
2233 2261 '''
2234 2262 # When one tries to:
2235 2263 # 1) destroy nodes thus calling this method (e.g. strip)
2236 2264 # 2) use phasecache somewhere (e.g. commit)
2237 2265 #
2238 2266 # then 2) will fail because the phasecache contains nodes that were
2239 2267 # removed. We can either remove phasecache from the filecache,
2240 2268 # causing it to reload next time it is accessed, or simply filter
2241 2269 # the removed nodes now and write the updated cache.
2242 2270 self._phasecache.filterunknown(self)
2243 2271 self._phasecache.write()
2244 2272
2245 2273 # refresh all repository caches
2246 2274 self.updatecaches()
2247 2275
2248 2276 # Ensure the persistent tag cache is updated. Doing it now
2249 2277 # means that the tag cache only has to worry about destroyed
2250 2278 # heads immediately after a strip/rollback. That in turn
2251 2279 # guarantees that "cachetip == currenttip" (comparing both rev
2252 2280 # and node) always means no nodes have been added or destroyed.
2253 2281
2254 2282 # XXX this is suboptimal when qrefresh'ing: we strip the current
2255 2283 # head, refresh the tag cache, then immediately add a new head.
2256 2284 # But I think doing it this way is necessary for the "instant
2257 2285 # tag cache retrieval" case to work.
2258 2286 self.invalidate()
2259 2287
2260 2288 def status(self, node1='.', node2=None, match=None,
2261 2289 ignored=False, clean=False, unknown=False,
2262 2290 listsubrepos=False):
2263 2291 '''a convenience method that calls node1.status(node2)'''
2264 2292 return self[node1].status(node2, match, ignored, clean, unknown,
2265 2293 listsubrepos)
2266 2294
2267 2295 def addpostdsstatus(self, ps):
2268 2296 """Add a callback to run within the wlock, at the point at which status
2269 2297 fixups happen.
2270 2298
2271 2299 On status completion, callback(wctx, status) will be called with the
2272 2300 wlock held, unless the dirstate has changed from underneath or the wlock
2273 2301 couldn't be grabbed.
2274 2302
2275 2303 Callbacks should not capture and use a cached copy of the dirstate --
2276 2304 it might change in the meanwhile. Instead, they should access the
2277 2305 dirstate via wctx.repo().dirstate.
2278 2306
2279 2307 This list is emptied out after each status run -- extensions should
2280 2308 make sure it adds to this list each time dirstate.status is called.
2281 2309 Extensions should also make sure they don't call this for statuses
2282 2310 that don't involve the dirstate.
2283 2311 """
2284 2312
2285 2313 # The list is located here for uniqueness reasons -- it is actually
2286 2314 # managed by the workingctx, but that isn't unique per-repo.
2287 2315 self._postdsstatus.append(ps)
2288 2316
2289 2317 def postdsstatus(self):
2290 2318 """Used by workingctx to get the list of post-dirstate-status hooks."""
2291 2319 return self._postdsstatus
2292 2320
2293 2321 def clearpostdsstatus(self):
2294 2322 """Used by workingctx to clear post-dirstate-status hooks."""
2295 2323 del self._postdsstatus[:]
2296 2324
2297 2325 def heads(self, start=None):
2298 2326 if start is None:
2299 2327 cl = self.changelog
2300 2328 headrevs = reversed(cl.headrevs())
2301 2329 return [cl.node(rev) for rev in headrevs]
2302 2330
2303 2331 heads = self.changelog.heads(start)
2304 2332 # sort the output in rev descending order
2305 2333 return sorted(heads, key=self.changelog.rev, reverse=True)
2306 2334
2307 2335 def branchheads(self, branch=None, start=None, closed=False):
2308 2336 '''return a (possibly filtered) list of heads for the given branch
2309 2337
2310 2338 Heads are returned in topological order, from newest to oldest.
2311 2339 If branch is None, use the dirstate branch.
2312 2340 If start is not None, return only heads reachable from start.
2313 2341 If closed is True, return heads that are marked as closed as well.
2314 2342 '''
2315 2343 if branch is None:
2316 2344 branch = self[None].branch()
2317 2345 branches = self.branchmap()
2318 2346 if branch not in branches:
2319 2347 return []
2320 2348 # the cache returns heads ordered lowest to highest
2321 2349 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2322 2350 if start is not None:
2323 2351 # filter out the heads that cannot be reached from startrev
2324 2352 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2325 2353 bheads = [h for h in bheads if h in fbheads]
2326 2354 return bheads
2327 2355
2328 2356 def branches(self, nodes):
2329 2357 if not nodes:
2330 2358 nodes = [self.changelog.tip()]
2331 2359 b = []
2332 2360 for n in nodes:
2333 2361 t = n
2334 2362 while True:
2335 2363 p = self.changelog.parents(n)
2336 2364 if p[1] != nullid or p[0] == nullid:
2337 2365 b.append((t, n, p[0], p[1]))
2338 2366 break
2339 2367 n = p[0]
2340 2368 return b
2341 2369
2342 2370 def between(self, pairs):
2343 2371 r = []
2344 2372
2345 2373 for top, bottom in pairs:
2346 2374 n, l, i = top, [], 0
2347 2375 f = 1
2348 2376
2349 2377 while n != bottom and n != nullid:
2350 2378 p = self.changelog.parents(n)[0]
2351 2379 if i == f:
2352 2380 l.append(n)
2353 2381 f = f * 2
2354 2382 n = p
2355 2383 i += 1
2356 2384
2357 2385 r.append(l)
2358 2386
2359 2387 return r
2360 2388
2361 2389 def checkpush(self, pushop):
2362 2390 """Extensions can override this function if additional checks have
2363 2391 to be performed before pushing, or call it if they override push
2364 2392 command.
2365 2393 """
2366 2394
2367 2395 @unfilteredpropertycache
2368 2396 def prepushoutgoinghooks(self):
2369 2397 """Return util.hooks consists of a pushop with repo, remote, outgoing
2370 2398 methods, which are called before pushing changesets.
2371 2399 """
2372 2400 return util.hooks()
2373 2401
2374 2402 def pushkey(self, namespace, key, old, new):
2375 2403 try:
2376 2404 tr = self.currenttransaction()
2377 2405 hookargs = {}
2378 2406 if tr is not None:
2379 2407 hookargs.update(tr.hookargs)
2380 2408 hookargs = pycompat.strkwargs(hookargs)
2381 2409 hookargs[r'namespace'] = namespace
2382 2410 hookargs[r'key'] = key
2383 2411 hookargs[r'old'] = old
2384 2412 hookargs[r'new'] = new
2385 2413 self.hook('prepushkey', throw=True, **hookargs)
2386 2414 except error.HookAbort as exc:
2387 2415 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2388 2416 if exc.hint:
2389 2417 self.ui.write_err(_("(%s)\n") % exc.hint)
2390 2418 return False
2391 2419 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2392 2420 ret = pushkey.push(self, namespace, key, old, new)
2393 2421 def runhook():
2394 2422 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2395 2423 ret=ret)
2396 2424 self._afterlock(runhook)
2397 2425 return ret
2398 2426
2399 2427 def listkeys(self, namespace):
2400 2428 self.hook('prelistkeys', throw=True, namespace=namespace)
2401 2429 self.ui.debug('listing keys for "%s"\n' % namespace)
2402 2430 values = pushkey.list(self, namespace)
2403 2431 self.hook('listkeys', namespace=namespace, values=values)
2404 2432 return values
2405 2433
2406 2434 def debugwireargs(self, one, two, three=None, four=None, five=None):
2407 2435 '''used to test argument passing over the wire'''
2408 2436 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2409 2437 pycompat.bytestr(four),
2410 2438 pycompat.bytestr(five))
2411 2439
2412 2440 def savecommitmessage(self, text):
2413 2441 fp = self.vfs('last-message.txt', 'wb')
2414 2442 try:
2415 2443 fp.write(text)
2416 2444 finally:
2417 2445 fp.close()
2418 2446 return self.pathto(fp.name[len(self.root) + 1:])
2419 2447
2420 2448 # used to avoid circular references so destructors work
2421 2449 def aftertrans(files):
2422 2450 renamefiles = [tuple(t) for t in files]
2423 2451 def a():
2424 2452 for vfs, src, dest in renamefiles:
2425 2453 # if src and dest refer to a same file, vfs.rename is a no-op,
2426 2454 # leaving both src and dest on disk. delete dest to make sure
2427 2455 # the rename couldn't be such a no-op.
2428 2456 vfs.tryunlink(dest)
2429 2457 try:
2430 2458 vfs.rename(src, dest)
2431 2459 except OSError: # journal file does not yet exist
2432 2460 pass
2433 2461 return a
2434 2462
2435 2463 def undoname(fn):
2436 2464 base, name = os.path.split(fn)
2437 2465 assert name.startswith('journal')
2438 2466 return os.path.join(base, name.replace('journal', 'undo', 1))
2439 2467
2440 2468 def instance(ui, path, create, intents=None, createopts=None):
2441 2469 localpath = util.urllocalpath(path)
2442 2470 if create:
2443 2471 createrepository(ui, localpath, createopts=createopts)
2444 2472
2445 2473 return makelocalrepository(ui, localpath, intents=intents)
2446 2474
2447 2475 def islocal(path):
2448 2476 return True
2449 2477
2450 2478 def newreporequirements(ui, createopts=None):
2451 2479 """Determine the set of requirements for a new local repository.
2452 2480
2453 2481 Extensions can wrap this function to specify custom requirements for
2454 2482 new repositories.
2455 2483 """
2456 2484 createopts = createopts or {}
2457 2485
2458 2486 requirements = {'revlogv1'}
2459 2487 if ui.configbool('format', 'usestore'):
2460 2488 requirements.add('store')
2461 2489 if ui.configbool('format', 'usefncache'):
2462 2490 requirements.add('fncache')
2463 2491 if ui.configbool('format', 'dotencode'):
2464 2492 requirements.add('dotencode')
2465 2493
2466 2494 compengine = ui.config('experimental', 'format.compression')
2467 2495 if compengine not in util.compengines:
2468 2496 raise error.Abort(_('compression engine %s defined by '
2469 2497 'experimental.format.compression not available') %
2470 2498 compengine,
2471 2499 hint=_('run "hg debuginstall" to list available '
2472 2500 'compression engines'))
2473 2501
2474 2502 # zlib is the historical default and doesn't need an explicit requirement.
2475 2503 if compengine != 'zlib':
2476 2504 requirements.add('exp-compression-%s' % compengine)
2477 2505
2478 2506 if scmutil.gdinitconfig(ui):
2479 2507 requirements.add('generaldelta')
2480 2508 if ui.configbool('experimental', 'treemanifest'):
2481 2509 requirements.add('treemanifest')
2482 2510 # experimental config: format.sparse-revlog
2483 2511 if ui.configbool('format', 'sparse-revlog'):
2484 2512 requirements.add(SPARSEREVLOG_REQUIREMENT)
2485 2513
2486 2514 revlogv2 = ui.config('experimental', 'revlogv2')
2487 2515 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2488 2516 requirements.remove('revlogv1')
2489 2517 # generaldelta is implied by revlogv2.
2490 2518 requirements.discard('generaldelta')
2491 2519 requirements.add(REVLOGV2_REQUIREMENT)
2492 2520 # experimental config: format.internal-phase
2493 2521 if ui.configbool('format', 'internal-phase'):
2494 2522 requirements.add('internal-phase')
2495 2523
2496 2524 if createopts.get('narrowfiles'):
2497 2525 requirements.add(repository.NARROW_REQUIREMENT)
2498 2526
2499 2527 return requirements
2500 2528
2501 2529 def filterknowncreateopts(ui, createopts):
2502 2530 """Filters a dict of repo creation options against options that are known.
2503 2531
2504 2532 Receives a dict of repo creation options and returns a dict of those
2505 2533 options that we don't know how to handle.
2506 2534
2507 2535 This function is called as part of repository creation. If the
2508 2536 returned dict contains any items, repository creation will not
2509 2537 be allowed, as it means there was a request to create a repository
2510 2538 with options not recognized by loaded code.
2511 2539
2512 2540 Extensions can wrap this function to filter out creation options
2513 2541 they know how to handle.
2514 2542 """
2515 2543 known = {'narrowfiles'}
2516 2544
2517 2545 return {k: v for k, v in createopts.items() if k not in known}
2518 2546
2519 2547 def createrepository(ui, path, createopts=None):
2520 2548 """Create a new repository in a vfs.
2521 2549
2522 2550 ``path`` path to the new repo's working directory.
2523 2551 ``createopts`` options for the new repository.
2524 2552 """
2525 2553 createopts = createopts or {}
2526 2554
2527 2555 unknownopts = filterknowncreateopts(ui, createopts)
2528 2556
2529 2557 if not isinstance(unknownopts, dict):
2530 2558 raise error.ProgrammingError('filterknowncreateopts() did not return '
2531 2559 'a dict')
2532 2560
2533 2561 if unknownopts:
2534 2562 raise error.Abort(_('unable to create repository because of unknown '
2535 2563 'creation option: %s') %
2536 2564 ', '.sorted(unknownopts),
2537 2565 hint=_('is a required extension not loaded?'))
2538 2566
2539 2567 requirements = newreporequirements(ui, createopts=createopts)
2540 2568
2541 2569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2542 2570 if not wdirvfs.exists():
2543 2571 wdirvfs.makedirs()
2544 2572
2545 2573 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2546 2574 if hgvfs.exists():
2547 2575 raise error.RepoError(_('repository %s already exists') % path)
2548 2576
2549 2577 hgvfs.makedir(notindexed=True)
2550 2578
2551 2579 if b'store' in requirements:
2552 2580 hgvfs.mkdir(b'store')
2553 2581
2554 2582 # We create an invalid changelog outside the store so very old
2555 2583 # Mercurial versions (which didn't know about the requirements
2556 2584 # file) encounter an error on reading the changelog. This
2557 2585 # effectively locks out old clients and prevents them from
2558 2586 # mucking with a repo in an unknown format.
2559 2587 #
2560 2588 # The revlog header has version 2, which won't be recognized by
2561 2589 # such old clients.
2562 2590 hgvfs.append(b'00changelog.i',
2563 2591 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2564 2592 b'layout')
2565 2593
2566 2594 scmutil.writerequires(hgvfs, requirements)
2567 2595
2568 2596 def poisonrepository(repo):
2569 2597 """Poison a repository instance so it can no longer be used."""
2570 2598 # Perform any cleanup on the instance.
2571 2599 repo.close()
2572 2600
2573 2601 # Our strategy is to replace the type of the object with one that
2574 2602 # has all attribute lookups result in error.
2575 2603 #
2576 2604 # But we have to allow the close() method because some constructors
2577 2605 # of repos call close() on repo references.
2578 2606 class poisonedrepository(object):
2579 2607 def __getattribute__(self, item):
2580 2608 if item == r'close':
2581 2609 return object.__getattribute__(self, item)
2582 2610
2583 2611 raise error.ProgrammingError('repo instances should not be used '
2584 2612 'after unshare')
2585 2613
2586 2614 def close(self):
2587 2615 pass
2588 2616
2589 2617 # We may have a repoview, which intercepts __setattr__. So be sure
2590 2618 # we operate at the lowest level possible.
2591 2619 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now