##// END OF EJS Templates
localrepo: move some vfs initialization out of __init__...
Gregory Szorc -
r39724:2f9cdb5b default
parent child Browse files
Show More
@@ -1,2549 +1,2581 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 narrowspec,
48 48 obsolete,
49 49 pathutil,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepoutil,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67 from .utils import (
68 68 interfaceutil,
69 69 procutil,
70 70 stringutil,
71 71 )
72 72
73 73 from .revlogutils import (
74 74 constants as revlogconst,
75 75 )
76 76
77 77 release = lockmod.release
78 78 urlerr = util.urlerr
79 79 urlreq = util.urlreq
80 80
81 81 # set of (path, vfs-location) tuples. vfs-location is:
82 82 # - 'plain for vfs relative paths
83 83 # - '' for svfs relative paths
84 84 _cachedfiles = set()
85 85
86 86 class _basefilecache(scmutil.filecache):
87 87 """All filecache usage on repo are done for logic that should be unfiltered
88 88 """
89 89 def __get__(self, repo, type=None):
90 90 if repo is None:
91 91 return self
92 92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 93 def __set__(self, repo, value):
94 94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 95 def __delete__(self, repo):
96 96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97 97
98 98 class repofilecache(_basefilecache):
99 99 """filecache for files in .hg but outside of .hg/store"""
100 100 def __init__(self, *paths):
101 101 super(repofilecache, self).__init__(*paths)
102 102 for path in paths:
103 103 _cachedfiles.add((path, 'plain'))
104 104
105 105 def join(self, obj, fname):
106 106 return obj.vfs.join(fname)
107 107
108 108 class storecache(_basefilecache):
109 109 """filecache for files in the store"""
110 110 def __init__(self, *paths):
111 111 super(storecache, self).__init__(*paths)
112 112 for path in paths:
113 113 _cachedfiles.add((path, ''))
114 114
115 115 def join(self, obj, fname):
116 116 return obj.sjoin(fname)
117 117
118 118 def isfilecached(repo, name):
119 119 """check if a repo has already cached "name" filecache-ed property
120 120
121 121 This returns (cachedobj-or-None, iscached) tuple.
122 122 """
123 123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 124 if not cacheentry:
125 125 return None, False
126 126 return cacheentry.obj, True
127 127
128 128 class unfilteredpropertycache(util.propertycache):
129 129 """propertycache that apply to unfiltered repo only"""
130 130
131 131 def __get__(self, repo, type=None):
132 132 unfi = repo.unfiltered()
133 133 if unfi is repo:
134 134 return super(unfilteredpropertycache, self).__get__(unfi)
135 135 return getattr(unfi, self.name)
136 136
137 137 class filteredpropertycache(util.propertycache):
138 138 """propertycache that must take filtering in account"""
139 139
140 140 def cachevalue(self, obj, value):
141 141 object.__setattr__(obj, self.name, value)
142 142
143 143
144 144 def hasunfilteredcache(repo, name):
145 145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 146 return name in vars(repo.unfiltered())
147 147
148 148 def unfilteredmethod(orig):
149 149 """decorate method that always need to be run on unfiltered version"""
150 150 def wrapper(repo, *args, **kwargs):
151 151 return orig(repo.unfiltered(), *args, **kwargs)
152 152 return wrapper
153 153
154 154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 155 'unbundle'}
156 156 legacycaps = moderncaps.union({'changegroupsubset'})
157 157
158 158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 159 class localcommandexecutor(object):
160 160 def __init__(self, peer):
161 161 self._peer = peer
162 162 self._sent = False
163 163 self._closed = False
164 164
165 165 def __enter__(self):
166 166 return self
167 167
168 168 def __exit__(self, exctype, excvalue, exctb):
169 169 self.close()
170 170
171 171 def callcommand(self, command, args):
172 172 if self._sent:
173 173 raise error.ProgrammingError('callcommand() cannot be used after '
174 174 'sendcommands()')
175 175
176 176 if self._closed:
177 177 raise error.ProgrammingError('callcommand() cannot be used after '
178 178 'close()')
179 179
180 180 # We don't need to support anything fancy. Just call the named
181 181 # method on the peer and return a resolved future.
182 182 fn = getattr(self._peer, pycompat.sysstr(command))
183 183
184 184 f = pycompat.futures.Future()
185 185
186 186 try:
187 187 result = fn(**pycompat.strkwargs(args))
188 188 except Exception:
189 189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 190 else:
191 191 f.set_result(result)
192 192
193 193 return f
194 194
195 195 def sendcommands(self):
196 196 self._sent = True
197 197
198 198 def close(self):
199 199 self._closed = True
200 200
201 201 @interfaceutil.implementer(repository.ipeercommands)
202 202 class localpeer(repository.peer):
203 203 '''peer for a local repo; reflects only the most recent API'''
204 204
205 205 def __init__(self, repo, caps=None):
206 206 super(localpeer, self).__init__()
207 207
208 208 if caps is None:
209 209 caps = moderncaps.copy()
210 210 self._repo = repo.filtered('served')
211 211 self.ui = repo.ui
212 212 self._caps = repo._restrictcapabilities(caps)
213 213
214 214 # Begin of _basepeer interface.
215 215
216 216 def url(self):
217 217 return self._repo.url()
218 218
219 219 def local(self):
220 220 return self._repo
221 221
222 222 def peer(self):
223 223 return self
224 224
225 225 def canpush(self):
226 226 return True
227 227
228 228 def close(self):
229 229 self._repo.close()
230 230
231 231 # End of _basepeer interface.
232 232
233 233 # Begin of _basewirecommands interface.
234 234
235 235 def branchmap(self):
236 236 return self._repo.branchmap()
237 237
238 238 def capabilities(self):
239 239 return self._caps
240 240
241 241 def clonebundles(self):
242 242 return self._repo.tryread('clonebundles.manifest')
243 243
244 244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 245 """Used to test argument passing over the wire"""
246 246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 247 pycompat.bytestr(four),
248 248 pycompat.bytestr(five))
249 249
250 250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 251 **kwargs):
252 252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 253 common=common, bundlecaps=bundlecaps,
254 254 **kwargs)[1]
255 255 cb = util.chunkbuffer(chunks)
256 256
257 257 if exchange.bundle2requested(bundlecaps):
258 258 # When requesting a bundle2, getbundle returns a stream to make the
259 259 # wire level function happier. We need to build a proper object
260 260 # from it in local peer.
261 261 return bundle2.getunbundler(self.ui, cb)
262 262 else:
263 263 return changegroup.getunbundler('01', cb, None)
264 264
265 265 def heads(self):
266 266 return self._repo.heads()
267 267
268 268 def known(self, nodes):
269 269 return self._repo.known(nodes)
270 270
271 271 def listkeys(self, namespace):
272 272 return self._repo.listkeys(namespace)
273 273
274 274 def lookup(self, key):
275 275 return self._repo.lookup(key)
276 276
277 277 def pushkey(self, namespace, key, old, new):
278 278 return self._repo.pushkey(namespace, key, old, new)
279 279
280 280 def stream_out(self):
281 281 raise error.Abort(_('cannot perform stream clone against local '
282 282 'peer'))
283 283
284 284 def unbundle(self, bundle, heads, url):
285 285 """apply a bundle on a repo
286 286
287 287 This function handles the repo locking itself."""
288 288 try:
289 289 try:
290 290 bundle = exchange.readbundle(self.ui, bundle, None)
291 291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 292 if util.safehasattr(ret, 'getchunks'):
293 293 # This is a bundle20 object, turn it into an unbundler.
294 294 # This little dance should be dropped eventually when the
295 295 # API is finally improved.
296 296 stream = util.chunkbuffer(ret.getchunks())
297 297 ret = bundle2.getunbundler(self.ui, stream)
298 298 return ret
299 299 except Exception as exc:
300 300 # If the exception contains output salvaged from a bundle2
301 301 # reply, we need to make sure it is printed before continuing
302 302 # to fail. So we build a bundle2 with such output and consume
303 303 # it directly.
304 304 #
305 305 # This is not very elegant but allows a "simple" solution for
306 306 # issue4594
307 307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 308 if output:
309 309 bundler = bundle2.bundle20(self._repo.ui)
310 310 for out in output:
311 311 bundler.addpart(out)
312 312 stream = util.chunkbuffer(bundler.getchunks())
313 313 b = bundle2.getunbundler(self.ui, stream)
314 314 bundle2.processbundle(self._repo, b)
315 315 raise
316 316 except error.PushRaced as exc:
317 317 raise error.ResponseError(_('push failed:'),
318 318 stringutil.forcebytestr(exc))
319 319
320 320 # End of _basewirecommands interface.
321 321
322 322 # Begin of peer interface.
323 323
324 324 def commandexecutor(self):
325 325 return localcommandexecutor(self)
326 326
327 327 # End of peer interface.
328 328
329 329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 330 class locallegacypeer(localpeer):
331 331 '''peer extension which implements legacy methods too; used for tests with
332 332 restricted capabilities'''
333 333
334 334 def __init__(self, repo):
335 335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336 336
337 337 # Begin of baselegacywirecommands interface.
338 338
339 339 def between(self, pairs):
340 340 return self._repo.between(pairs)
341 341
342 342 def branches(self, nodes):
343 343 return self._repo.branches(nodes)
344 344
345 345 def changegroup(self, nodes, source):
346 346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 347 missingheads=self._repo.heads())
348 348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 349
350 350 def changegroupsubset(self, bases, heads, source):
351 351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 352 missingheads=heads)
353 353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354 354
355 355 # End of baselegacywirecommands interface.
356 356
357 357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 358 # clients.
359 359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360 360
361 361 # A repository with the sparserevlog feature will have delta chains that
362 362 # can spread over a larger span. Sparse reading cuts these large spans into
363 363 # pieces, so that each piece isn't too big.
364 364 # Without the sparserevlog capability, reading from the repository could use
365 365 # huge amounts of memory, because the whole span would be read at once,
366 366 # including all the intermediate revisions that aren't pertinent for the chain.
367 367 # This is why once a repository has enabled sparse-read, it becomes required.
368 368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369 369
370 370 # Functions receiving (ui, features) that extensions can register to impact
371 371 # the ability to load repositories with custom requirements. Only
372 372 # functions defined in loaded extensions are called.
373 373 #
374 374 # The function receives a set of requirement strings that the repository
375 375 # is capable of opening. Functions will typically add elements to the
376 376 # set to reflect that the extension knows how to handle that requirements.
377 377 featuresetupfuncs = set()
378 378
379 379 def makelocalrepository(ui, path, intents=None):
380 380 """Create a local repository object.
381 381
382 382 Given arguments needed to construct a local repository, this function
383 383 derives a type suitable for representing that repository and returns an
384 384 instance of it.
385 385
386 386 The returned object conforms to the ``repository.completelocalrepository``
387 387 interface.
388 388 """
389 return localrepository(ui, path, intents=intents)
389 # Working directory VFS rooted at repository root.
390 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
391
392 # Main VFS for .hg/ directory.
393 hgpath = wdirvfs.join(b'.hg')
394 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
395
396 return localrepository(
397 ui, path,
398 wdirvfs=wdirvfs,
399 hgvfs=hgvfs,
400 intents=intents)
390 401
391 402 @interfaceutil.implementer(repository.completelocalrepository)
392 403 class localrepository(object):
393 404
394 405 # obsolete experimental requirements:
395 406 # - manifestv2: An experimental new manifest format that allowed
396 407 # for stem compression of long paths. Experiment ended up not
397 408 # being successful (repository sizes went up due to worse delta
398 409 # chains), and the code was deleted in 4.6.
399 410 supportedformats = {
400 411 'revlogv1',
401 412 'generaldelta',
402 413 'treemanifest',
403 414 REVLOGV2_REQUIREMENT,
404 415 SPARSEREVLOG_REQUIREMENT,
405 416 }
406 417 _basesupported = supportedformats | {
407 418 'store',
408 419 'fncache',
409 420 'shared',
410 421 'relshared',
411 422 'dotencode',
412 423 'exp-sparse',
413 424 'internal-phase'
414 425 }
415 426 openerreqs = {
416 427 'revlogv1',
417 428 'generaldelta',
418 429 'treemanifest',
419 430 }
420 431
421 432 # list of prefix for file which can be written without 'wlock'
422 433 # Extensions should extend this list when needed
423 434 _wlockfreeprefix = {
424 435 # We migh consider requiring 'wlock' for the next
425 436 # two, but pretty much all the existing code assume
426 437 # wlock is not needed so we keep them excluded for
427 438 # now.
428 439 'hgrc',
429 440 'requires',
430 441 # XXX cache is a complicatged business someone
431 442 # should investigate this in depth at some point
432 443 'cache/',
433 444 # XXX shouldn't be dirstate covered by the wlock?
434 445 'dirstate',
435 446 # XXX bisect was still a bit too messy at the time
436 447 # this changeset was introduced. Someone should fix
437 448 # the remainig bit and drop this line
438 449 'bisect.state',
439 450 }
440 451
441 def __init__(self, baseui, path, intents=None):
452 def __init__(self, baseui, origroot, wdirvfs, hgvfs, intents=None):
442 453 """Create a new local repository instance.
443 454
444 Most callers should use ``hg.repository()`` or ``localrepo.instance()``
445 for obtaining a new repository object.
455 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
456 or ``localrepo.makelocalrepository()`` for obtaining a new repository
457 object.
458
459 Arguments:
460
461 baseui
462 ``ui.ui`` instance to use. A copy will be made (since new config
463 options may be loaded into it).
464
465 origroot
466 ``bytes`` path to working directory root of this repository.
467
468 wdirvfs
469 ``vfs.vfs`` rooted at the working directory.
470
471 hgvfs
472 ``vfs.vfs`` rooted at .hg/
473
474 intents
475 ``set`` of system strings indicating what this repo will be used
476 for.
446 477 """
478 self.baseui = baseui
479 self.ui = baseui.copy()
480 self.ui.copy = baseui.copy # prevent copying repo configuration
481
482 self.origroot = origroot
483 # vfs rooted at working directory.
484 self.wvfs = wdirvfs
485 self.root = wdirvfs.base
486 # vfs rooted at .hg/. Used to access most non-store paths.
487 self.vfs = hgvfs
488 self.path = hgvfs.base
447 489
448 490 self.requirements = set()
449 491 self.filtername = None
450 # wvfs: rooted at the repository root, used to access the working copy
451 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
452 # vfs: rooted at .hg, used to access repo files outside of .hg/store
453 self.vfs = None
454 492 # svfs: usually rooted at .hg/store, used to access repository history
455 493 # If this is a shared repository, this vfs may point to another
456 494 # repository's .hg/store directory.
457 495 self.svfs = None
458 self.root = self.wvfs.base
459 self.path = self.wvfs.join(".hg")
460 self.origroot = path
461 self.baseui = baseui
462 self.ui = baseui.copy()
463 self.ui.copy = baseui.copy # prevent copying repo configuration
464 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
496
465 497 if (self.ui.configbool('devel', 'all-warnings') or
466 498 self.ui.configbool('devel', 'check-locks')):
467 499 self.vfs.audit = self._getvfsward(self.vfs.audit)
468 500 # A list of callback to shape the phase if no data were found.
469 501 # Callback are in the form: func(repo, roots) --> processed root.
470 502 # This list it to be filled by extension during repo setup
471 503 self._phasedefaults = []
472 504 try:
473 505 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
474 506 self._loadextensions()
475 507 except IOError:
476 508 pass
477 509
478 510 if featuresetupfuncs:
479 511 self.supported = set(self._basesupported) # use private copy
480 512 extmods = set(m.__name__ for n, m
481 513 in extensions.extensions(self.ui))
482 514 for setupfunc in featuresetupfuncs:
483 515 if setupfunc.__module__ in extmods:
484 516 setupfunc(self.ui, self.supported)
485 517 else:
486 518 self.supported = self._basesupported
487 519 color.setup(self.ui)
488 520
489 521 # Add compression engines.
490 522 for name in util.compengines:
491 523 engine = util.compengines[name]
492 524 if engine.revlogheader():
493 525 self.supported.add('exp-compression-%s' % name)
494 526
495 527 if not self.vfs.isdir():
496 528 try:
497 529 self.vfs.stat()
498 530 except OSError as inst:
499 531 if inst.errno != errno.ENOENT:
500 532 raise
501 raise error.RepoError(_("repository %s not found") % path)
533 raise error.RepoError(_("repository %s not found") % origroot)
502 534 else:
503 535 try:
504 536 self.requirements = scmutil.readrequires(
505 537 self.vfs, self.supported)
506 538 except IOError as inst:
507 539 if inst.errno != errno.ENOENT:
508 540 raise
509 541
510 542 cachepath = self.vfs.join('cache')
511 543 self.sharedpath = self.path
512 544 try:
513 545 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
514 546 if 'relshared' in self.requirements:
515 547 sharedpath = self.vfs.join(sharedpath)
516 548 vfs = vfsmod.vfs(sharedpath, realpath=True)
517 549 cachepath = vfs.join('cache')
518 550 s = vfs.base
519 551 if not vfs.exists():
520 552 raise error.RepoError(
521 553 _('.hg/sharedpath points to nonexistent directory %s') % s)
522 554 self.sharedpath = s
523 555 except IOError as inst:
524 556 if inst.errno != errno.ENOENT:
525 557 raise
526 558
527 559 if 'exp-sparse' in self.requirements and not sparse.enabled:
528 560 raise error.RepoError(_('repository is using sparse feature but '
529 561 'sparse is not enabled; enable the '
530 562 '"sparse" extensions to access'))
531 563
532 564 self.store = store.store(
533 565 self.requirements, self.sharedpath,
534 566 lambda base: vfsmod.vfs(base, cacheaudited=True))
535 567 self.spath = self.store.path
536 568 self.svfs = self.store.vfs
537 569 self.sjoin = self.store.join
538 570 self.vfs.createmode = self.store.createmode
539 571 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
540 572 self.cachevfs.createmode = self.store.createmode
541 573 if (self.ui.configbool('devel', 'all-warnings') or
542 574 self.ui.configbool('devel', 'check-locks')):
543 575 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
544 576 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
545 577 else: # standard vfs
546 578 self.svfs.audit = self._getsvfsward(self.svfs.audit)
547 579 self._applyopenerreqs()
548 580
549 581 self._dirstatevalidatewarned = False
550 582
551 583 self._branchcaches = {}
552 584 self._revbranchcache = None
553 585 self._filterpats = {}
554 586 self._datafilters = {}
555 587 self._transref = self._lockref = self._wlockref = None
556 588
557 589 # A cache for various files under .hg/ that tracks file changes,
558 590 # (used by the filecache decorator)
559 591 #
560 592 # Maps a property name to its util.filecacheentry
561 593 self._filecache = {}
562 594
563 595 # hold sets of revision to be filtered
564 596 # should be cleared when something might have changed the filter value:
565 597 # - new changesets,
566 598 # - phase change,
567 599 # - new obsolescence marker,
568 600 # - working directory parent change,
569 601 # - bookmark changes
570 602 self.filteredrevcache = {}
571 603
572 604 # post-dirstate-status hooks
573 605 self._postdsstatus = []
574 606
575 607 # generic mapping between names and nodes
576 608 self.names = namespaces.namespaces()
577 609
578 610 # Key to signature value.
579 611 self._sparsesignaturecache = {}
580 612 # Signature to cached matcher instance.
581 613 self._sparsematchercache = {}
582 614
583 615 def _getvfsward(self, origfunc):
584 616 """build a ward for self.vfs"""
585 617 rref = weakref.ref(self)
586 618 def checkvfs(path, mode=None):
587 619 ret = origfunc(path, mode=mode)
588 620 repo = rref()
589 621 if (repo is None
590 622 or not util.safehasattr(repo, '_wlockref')
591 623 or not util.safehasattr(repo, '_lockref')):
592 624 return
593 625 if mode in (None, 'r', 'rb'):
594 626 return
595 627 if path.startswith(repo.path):
596 628 # truncate name relative to the repository (.hg)
597 629 path = path[len(repo.path) + 1:]
598 630 if path.startswith('cache/'):
599 631 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
600 632 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
601 633 if path.startswith('journal.'):
602 634 # journal is covered by 'lock'
603 635 if repo._currentlock(repo._lockref) is None:
604 636 repo.ui.develwarn('write with no lock: "%s"' % path,
605 637 stacklevel=2, config='check-locks')
606 638 elif repo._currentlock(repo._wlockref) is None:
607 639 # rest of vfs files are covered by 'wlock'
608 640 #
609 641 # exclude special files
610 642 for prefix in self._wlockfreeprefix:
611 643 if path.startswith(prefix):
612 644 return
613 645 repo.ui.develwarn('write with no wlock: "%s"' % path,
614 646 stacklevel=2, config='check-locks')
615 647 return ret
616 648 return checkvfs
617 649
618 650 def _getsvfsward(self, origfunc):
619 651 """build a ward for self.svfs"""
620 652 rref = weakref.ref(self)
621 653 def checksvfs(path, mode=None):
622 654 ret = origfunc(path, mode=mode)
623 655 repo = rref()
624 656 if repo is None or not util.safehasattr(repo, '_lockref'):
625 657 return
626 658 if mode in (None, 'r', 'rb'):
627 659 return
628 660 if path.startswith(repo.sharedpath):
629 661 # truncate name relative to the repository (.hg)
630 662 path = path[len(repo.sharedpath) + 1:]
631 663 if repo._currentlock(repo._lockref) is None:
632 664 repo.ui.develwarn('write with no lock: "%s"' % path,
633 665 stacklevel=3)
634 666 return ret
635 667 return checksvfs
636 668
637 669 def close(self):
638 670 self._writecaches()
639 671
640 672 def _loadextensions(self):
641 673 extensions.loadall(self.ui)
642 674
643 675 def _writecaches(self):
644 676 if self._revbranchcache:
645 677 self._revbranchcache.write()
646 678
647 679 def _restrictcapabilities(self, caps):
648 680 if self.ui.configbool('experimental', 'bundle2-advertise'):
649 681 caps = set(caps)
650 682 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
651 683 role='client'))
652 684 caps.add('bundle2=' + urlreq.quote(capsblob))
653 685 return caps
654 686
655 687 def _applyopenerreqs(self):
656 688 self.svfs.options = dict((r, 1) for r in self.requirements
657 689 if r in self.openerreqs)
658 690 # experimental config: format.chunkcachesize
659 691 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
660 692 if chunkcachesize is not None:
661 693 self.svfs.options['chunkcachesize'] = chunkcachesize
662 694 # experimental config: format.manifestcachesize
663 695 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
664 696 if manifestcachesize is not None:
665 697 self.svfs.options['manifestcachesize'] = manifestcachesize
666 698 deltabothparents = self.ui.configbool('storage',
667 699 'revlog.optimize-delta-parent-choice')
668 700 self.svfs.options['deltabothparents'] = deltabothparents
669 701 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
670 702 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
671 703 if 0 <= chainspan:
672 704 self.svfs.options['maxdeltachainspan'] = chainspan
673 705 mmapindexthreshold = self.ui.configbytes('experimental',
674 706 'mmapindexthreshold')
675 707 if mmapindexthreshold is not None:
676 708 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
677 709 withsparseread = self.ui.configbool('experimental', 'sparse-read')
678 710 srdensitythres = float(self.ui.config('experimental',
679 711 'sparse-read.density-threshold'))
680 712 srmingapsize = self.ui.configbytes('experimental',
681 713 'sparse-read.min-gap-size')
682 714 self.svfs.options['with-sparse-read'] = withsparseread
683 715 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
684 716 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
685 717 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
686 718 self.svfs.options['sparse-revlog'] = sparserevlog
687 719 if sparserevlog:
688 720 self.svfs.options['generaldelta'] = True
689 721 maxchainlen = None
690 722 if sparserevlog:
691 723 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
692 724 # experimental config: format.maxchainlen
693 725 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
694 726 if maxchainlen is not None:
695 727 self.svfs.options['maxchainlen'] = maxchainlen
696 728
697 729 for r in self.requirements:
698 730 if r.startswith('exp-compression-'):
699 731 self.svfs.options['compengine'] = r[len('exp-compression-'):]
700 732
701 733 # TODO move "revlogv2" to openerreqs once finalized.
702 734 if REVLOGV2_REQUIREMENT in self.requirements:
703 735 self.svfs.options['revlogv2'] = True
704 736
705 737 def _writerequirements(self):
706 738 scmutil.writerequires(self.vfs, self.requirements)
707 739
708 740 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
709 741 # self -> auditor -> self._checknested -> self
710 742
711 743 @property
712 744 def auditor(self):
713 745 # This is only used by context.workingctx.match in order to
714 746 # detect files in subrepos.
715 747 return pathutil.pathauditor(self.root, callback=self._checknested)
716 748
717 749 @property
718 750 def nofsauditor(self):
719 751 # This is only used by context.basectx.match in order to detect
720 752 # files in subrepos.
721 753 return pathutil.pathauditor(self.root, callback=self._checknested,
722 754 realfs=False, cached=True)
723 755
724 756 def _checknested(self, path):
725 757 """Determine if path is a legal nested repository."""
726 758 if not path.startswith(self.root):
727 759 return False
728 760 subpath = path[len(self.root) + 1:]
729 761 normsubpath = util.pconvert(subpath)
730 762
731 763 # XXX: Checking against the current working copy is wrong in
732 764 # the sense that it can reject things like
733 765 #
734 766 # $ hg cat -r 10 sub/x.txt
735 767 #
736 768 # if sub/ is no longer a subrepository in the working copy
737 769 # parent revision.
738 770 #
739 771 # However, it can of course also allow things that would have
740 772 # been rejected before, such as the above cat command if sub/
741 773 # is a subrepository now, but was a normal directory before.
742 774 # The old path auditor would have rejected by mistake since it
743 775 # panics when it sees sub/.hg/.
744 776 #
745 777 # All in all, checking against the working copy seems sensible
746 778 # since we want to prevent access to nested repositories on
747 779 # the filesystem *now*.
748 780 ctx = self[None]
749 781 parts = util.splitpath(subpath)
750 782 while parts:
751 783 prefix = '/'.join(parts)
752 784 if prefix in ctx.substate:
753 785 if prefix == normsubpath:
754 786 return True
755 787 else:
756 788 sub = ctx.sub(prefix)
757 789 return sub.checknested(subpath[len(prefix) + 1:])
758 790 else:
759 791 parts.pop()
760 792 return False
761 793
762 794 def peer(self):
763 795 return localpeer(self) # not cached to avoid reference cycle
764 796
765 797 def unfiltered(self):
766 798 """Return unfiltered version of the repository
767 799
768 800 Intended to be overwritten by filtered repo."""
769 801 return self
770 802
771 803 def filtered(self, name, visibilityexceptions=None):
772 804 """Return a filtered version of a repository"""
773 805 cls = repoview.newtype(self.unfiltered().__class__)
774 806 return cls(self, name, visibilityexceptions)
775 807
776 808 @repofilecache('bookmarks', 'bookmarks.current')
777 809 def _bookmarks(self):
778 810 return bookmarks.bmstore(self)
779 811
780 812 @property
781 813 def _activebookmark(self):
782 814 return self._bookmarks.active
783 815
784 816 # _phasesets depend on changelog. what we need is to call
785 817 # _phasecache.invalidate() if '00changelog.i' was changed, but it
786 818 # can't be easily expressed in filecache mechanism.
787 819 @storecache('phaseroots', '00changelog.i')
788 820 def _phasecache(self):
789 821 return phases.phasecache(self, self._phasedefaults)
790 822
791 823 @storecache('obsstore')
792 824 def obsstore(self):
793 825 return obsolete.makestore(self.ui, self)
794 826
795 827 @storecache('00changelog.i')
796 828 def changelog(self):
797 829 return changelog.changelog(self.svfs,
798 830 trypending=txnutil.mayhavepending(self.root))
799 831
800 832 def _constructmanifest(self):
801 833 # This is a temporary function while we migrate from manifest to
802 834 # manifestlog. It allows bundlerepo and unionrepo to intercept the
803 835 # manifest creation.
804 836 return manifest.manifestrevlog(self.svfs)
805 837
806 838 @storecache('00manifest.i')
807 839 def manifestlog(self):
808 840 return manifest.manifestlog(self.svfs, self)
809 841
810 842 @repofilecache('dirstate')
811 843 def dirstate(self):
812 844 return self._makedirstate()
813 845
814 846 def _makedirstate(self):
815 847 """Extension point for wrapping the dirstate per-repo."""
816 848 sparsematchfn = lambda: sparse.matcher(self)
817 849
818 850 return dirstate.dirstate(self.vfs, self.ui, self.root,
819 851 self._dirstatevalidate, sparsematchfn)
820 852
821 853 def _dirstatevalidate(self, node):
822 854 try:
823 855 self.changelog.rev(node)
824 856 return node
825 857 except error.LookupError:
826 858 if not self._dirstatevalidatewarned:
827 859 self._dirstatevalidatewarned = True
828 860 self.ui.warn(_("warning: ignoring unknown"
829 861 " working parent %s!\n") % short(node))
830 862 return nullid
831 863
832 864 @storecache(narrowspec.FILENAME)
833 865 def narrowpats(self):
834 866 """matcher patterns for this repository's narrowspec
835 867
836 868 A tuple of (includes, excludes).
837 869 """
838 870 source = self
839 871 if self.shared():
840 872 from . import hg
841 873 source = hg.sharedreposource(self)
842 874 return narrowspec.load(source)
843 875
844 876 @storecache(narrowspec.FILENAME)
845 877 def _narrowmatch(self):
846 878 if repository.NARROW_REQUIREMENT not in self.requirements:
847 879 return matchmod.always(self.root, '')
848 880 include, exclude = self.narrowpats
849 881 return narrowspec.match(self.root, include=include, exclude=exclude)
850 882
851 883 # TODO(martinvonz): make this property-like instead?
852 884 def narrowmatch(self):
853 885 return self._narrowmatch
854 886
855 887 def setnarrowpats(self, newincludes, newexcludes):
856 888 narrowspec.save(self, newincludes, newexcludes)
857 889 self.invalidate(clearfilecache=True)
858 890
859 891 def __getitem__(self, changeid):
860 892 if changeid is None:
861 893 return context.workingctx(self)
862 894 if isinstance(changeid, context.basectx):
863 895 return changeid
864 896 if isinstance(changeid, slice):
865 897 # wdirrev isn't contiguous so the slice shouldn't include it
866 898 return [context.changectx(self, i)
867 899 for i in pycompat.xrange(*changeid.indices(len(self)))
868 900 if i not in self.changelog.filteredrevs]
869 901 try:
870 902 return context.changectx(self, changeid)
871 903 except error.WdirUnsupported:
872 904 return context.workingctx(self)
873 905
874 906 def __contains__(self, changeid):
875 907 """True if the given changeid exists
876 908
877 909 error.AmbiguousPrefixLookupError is raised if an ambiguous node
878 910 specified.
879 911 """
880 912 try:
881 913 self[changeid]
882 914 return True
883 915 except error.RepoLookupError:
884 916 return False
885 917
886 918 def __nonzero__(self):
887 919 return True
888 920
889 921 __bool__ = __nonzero__
890 922
891 923 def __len__(self):
892 924 # no need to pay the cost of repoview.changelog
893 925 unfi = self.unfiltered()
894 926 return len(unfi.changelog)
895 927
896 928 def __iter__(self):
897 929 return iter(self.changelog)
898 930
899 931 def revs(self, expr, *args):
900 932 '''Find revisions matching a revset.
901 933
902 934 The revset is specified as a string ``expr`` that may contain
903 935 %-formatting to escape certain types. See ``revsetlang.formatspec``.
904 936
905 937 Revset aliases from the configuration are not expanded. To expand
906 938 user aliases, consider calling ``scmutil.revrange()`` or
907 939 ``repo.anyrevs([expr], user=True)``.
908 940
909 941 Returns a revset.abstractsmartset, which is a list-like interface
910 942 that contains integer revisions.
911 943 '''
912 944 expr = revsetlang.formatspec(expr, *args)
913 945 m = revset.match(None, expr)
914 946 return m(self)
915 947
916 948 def set(self, expr, *args):
917 949 '''Find revisions matching a revset and emit changectx instances.
918 950
919 951 This is a convenience wrapper around ``revs()`` that iterates the
920 952 result and is a generator of changectx instances.
921 953
922 954 Revset aliases from the configuration are not expanded. To expand
923 955 user aliases, consider calling ``scmutil.revrange()``.
924 956 '''
925 957 for r in self.revs(expr, *args):
926 958 yield self[r]
927 959
928 960 def anyrevs(self, specs, user=False, localalias=None):
929 961 '''Find revisions matching one of the given revsets.
930 962
931 963 Revset aliases from the configuration are not expanded by default. To
932 964 expand user aliases, specify ``user=True``. To provide some local
933 965 definitions overriding user aliases, set ``localalias`` to
934 966 ``{name: definitionstring}``.
935 967 '''
936 968 if user:
937 969 m = revset.matchany(self.ui, specs,
938 970 lookup=revset.lookupfn(self),
939 971 localalias=localalias)
940 972 else:
941 973 m = revset.matchany(None, specs, localalias=localalias)
942 974 return m(self)
943 975
944 976 def url(self):
945 977 return 'file:' + self.root
946 978
947 979 def hook(self, name, throw=False, **args):
948 980 """Call a hook, passing this repo instance.
949 981
950 982 This a convenience method to aid invoking hooks. Extensions likely
951 983 won't call this unless they have registered a custom hook or are
952 984 replacing code that is expected to call a hook.
953 985 """
954 986 return hook.hook(self.ui, self, name, throw, **args)
955 987
956 988 @filteredpropertycache
957 989 def _tagscache(self):
958 990 '''Returns a tagscache object that contains various tags related
959 991 caches.'''
960 992
961 993 # This simplifies its cache management by having one decorated
962 994 # function (this one) and the rest simply fetch things from it.
963 995 class tagscache(object):
964 996 def __init__(self):
965 997 # These two define the set of tags for this repository. tags
966 998 # maps tag name to node; tagtypes maps tag name to 'global' or
967 999 # 'local'. (Global tags are defined by .hgtags across all
968 1000 # heads, and local tags are defined in .hg/localtags.)
969 1001 # They constitute the in-memory cache of tags.
970 1002 self.tags = self.tagtypes = None
971 1003
972 1004 self.nodetagscache = self.tagslist = None
973 1005
974 1006 cache = tagscache()
975 1007 cache.tags, cache.tagtypes = self._findtags()
976 1008
977 1009 return cache
978 1010
979 1011 def tags(self):
980 1012 '''return a mapping of tag to node'''
981 1013 t = {}
982 1014 if self.changelog.filteredrevs:
983 1015 tags, tt = self._findtags()
984 1016 else:
985 1017 tags = self._tagscache.tags
986 1018 for k, v in tags.iteritems():
987 1019 try:
988 1020 # ignore tags to unknown nodes
989 1021 self.changelog.rev(v)
990 1022 t[k] = v
991 1023 except (error.LookupError, ValueError):
992 1024 pass
993 1025 return t
994 1026
995 1027 def _findtags(self):
996 1028 '''Do the hard work of finding tags. Return a pair of dicts
997 1029 (tags, tagtypes) where tags maps tag name to node, and tagtypes
998 1030 maps tag name to a string like \'global\' or \'local\'.
999 1031 Subclasses or extensions are free to add their own tags, but
1000 1032 should be aware that the returned dicts will be retained for the
1001 1033 duration of the localrepo object.'''
1002 1034
1003 1035 # XXX what tagtype should subclasses/extensions use? Currently
1004 1036 # mq and bookmarks add tags, but do not set the tagtype at all.
1005 1037 # Should each extension invent its own tag type? Should there
1006 1038 # be one tagtype for all such "virtual" tags? Or is the status
1007 1039 # quo fine?
1008 1040
1009 1041
1010 1042 # map tag name to (node, hist)
1011 1043 alltags = tagsmod.findglobaltags(self.ui, self)
1012 1044 # map tag name to tag type
1013 1045 tagtypes = dict((tag, 'global') for tag in alltags)
1014 1046
1015 1047 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1016 1048
1017 1049 # Build the return dicts. Have to re-encode tag names because
1018 1050 # the tags module always uses UTF-8 (in order not to lose info
1019 1051 # writing to the cache), but the rest of Mercurial wants them in
1020 1052 # local encoding.
1021 1053 tags = {}
1022 1054 for (name, (node, hist)) in alltags.iteritems():
1023 1055 if node != nullid:
1024 1056 tags[encoding.tolocal(name)] = node
1025 1057 tags['tip'] = self.changelog.tip()
1026 1058 tagtypes = dict([(encoding.tolocal(name), value)
1027 1059 for (name, value) in tagtypes.iteritems()])
1028 1060 return (tags, tagtypes)
1029 1061
1030 1062 def tagtype(self, tagname):
1031 1063 '''
1032 1064 return the type of the given tag. result can be:
1033 1065
1034 1066 'local' : a local tag
1035 1067 'global' : a global tag
1036 1068 None : tag does not exist
1037 1069 '''
1038 1070
1039 1071 return self._tagscache.tagtypes.get(tagname)
1040 1072
1041 1073 def tagslist(self):
1042 1074 '''return a list of tags ordered by revision'''
1043 1075 if not self._tagscache.tagslist:
1044 1076 l = []
1045 1077 for t, n in self.tags().iteritems():
1046 1078 l.append((self.changelog.rev(n), t, n))
1047 1079 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1048 1080
1049 1081 return self._tagscache.tagslist
1050 1082
1051 1083 def nodetags(self, node):
1052 1084 '''return the tags associated with a node'''
1053 1085 if not self._tagscache.nodetagscache:
1054 1086 nodetagscache = {}
1055 1087 for t, n in self._tagscache.tags.iteritems():
1056 1088 nodetagscache.setdefault(n, []).append(t)
1057 1089 for tags in nodetagscache.itervalues():
1058 1090 tags.sort()
1059 1091 self._tagscache.nodetagscache = nodetagscache
1060 1092 return self._tagscache.nodetagscache.get(node, [])
1061 1093
1062 1094 def nodebookmarks(self, node):
1063 1095 """return the list of bookmarks pointing to the specified node"""
1064 1096 return self._bookmarks.names(node)
1065 1097
1066 1098 def branchmap(self):
1067 1099 '''returns a dictionary {branch: [branchheads]} with branchheads
1068 1100 ordered by increasing revision number'''
1069 1101 branchmap.updatecache(self)
1070 1102 return self._branchcaches[self.filtername]
1071 1103
1072 1104 @unfilteredmethod
1073 1105 def revbranchcache(self):
1074 1106 if not self._revbranchcache:
1075 1107 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1076 1108 return self._revbranchcache
1077 1109
1078 1110 def branchtip(self, branch, ignoremissing=False):
1079 1111 '''return the tip node for a given branch
1080 1112
1081 1113 If ignoremissing is True, then this method will not raise an error.
1082 1114 This is helpful for callers that only expect None for a missing branch
1083 1115 (e.g. namespace).
1084 1116
1085 1117 '''
1086 1118 try:
1087 1119 return self.branchmap().branchtip(branch)
1088 1120 except KeyError:
1089 1121 if not ignoremissing:
1090 1122 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1091 1123 else:
1092 1124 pass
1093 1125
1094 1126 def lookup(self, key):
1095 1127 return scmutil.revsymbol(self, key).node()
1096 1128
1097 1129 def lookupbranch(self, key):
1098 1130 if key in self.branchmap():
1099 1131 return key
1100 1132
1101 1133 return scmutil.revsymbol(self, key).branch()
1102 1134
1103 1135 def known(self, nodes):
1104 1136 cl = self.changelog
1105 1137 nm = cl.nodemap
1106 1138 filtered = cl.filteredrevs
1107 1139 result = []
1108 1140 for n in nodes:
1109 1141 r = nm.get(n)
1110 1142 resp = not (r is None or r in filtered)
1111 1143 result.append(resp)
1112 1144 return result
1113 1145
1114 1146 def local(self):
1115 1147 return self
1116 1148
1117 1149 def publishing(self):
1118 1150 # it's safe (and desirable) to trust the publish flag unconditionally
1119 1151 # so that we don't finalize changes shared between users via ssh or nfs
1120 1152 return self.ui.configbool('phases', 'publish', untrusted=True)
1121 1153
1122 1154 def cancopy(self):
1123 1155 # so statichttprepo's override of local() works
1124 1156 if not self.local():
1125 1157 return False
1126 1158 if not self.publishing():
1127 1159 return True
1128 1160 # if publishing we can't copy if there is filtered content
1129 1161 return not self.filtered('visible').changelog.filteredrevs
1130 1162
1131 1163 def shared(self):
1132 1164 '''the type of shared repository (None if not shared)'''
1133 1165 if self.sharedpath != self.path:
1134 1166 return 'store'
1135 1167 return None
1136 1168
1137 1169 def wjoin(self, f, *insidef):
1138 1170 return self.vfs.reljoin(self.root, f, *insidef)
1139 1171
1140 1172 def file(self, f):
1141 1173 if f[0] == '/':
1142 1174 f = f[1:]
1143 1175 return filelog.filelog(self.svfs, f)
1144 1176
1145 1177 def setparents(self, p1, p2=nullid):
1146 1178 with self.dirstate.parentchange():
1147 1179 copies = self.dirstate.setparents(p1, p2)
1148 1180 pctx = self[p1]
1149 1181 if copies:
1150 1182 # Adjust copy records, the dirstate cannot do it, it
1151 1183 # requires access to parents manifests. Preserve them
1152 1184 # only for entries added to first parent.
1153 1185 for f in copies:
1154 1186 if f not in pctx and copies[f] in pctx:
1155 1187 self.dirstate.copy(copies[f], f)
1156 1188 if p2 == nullid:
1157 1189 for f, s in sorted(self.dirstate.copies().items()):
1158 1190 if f not in pctx and s not in pctx:
1159 1191 self.dirstate.copy(None, f)
1160 1192
1161 1193 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1162 1194 """changeid can be a changeset revision, node, or tag.
1163 1195 fileid can be a file revision or node."""
1164 1196 return context.filectx(self, path, changeid, fileid,
1165 1197 changectx=changectx)
1166 1198
1167 1199 def getcwd(self):
1168 1200 return self.dirstate.getcwd()
1169 1201
1170 1202 def pathto(self, f, cwd=None):
1171 1203 return self.dirstate.pathto(f, cwd)
1172 1204
1173 1205 def _loadfilter(self, filter):
1174 1206 if filter not in self._filterpats:
1175 1207 l = []
1176 1208 for pat, cmd in self.ui.configitems(filter):
1177 1209 if cmd == '!':
1178 1210 continue
1179 1211 mf = matchmod.match(self.root, '', [pat])
1180 1212 fn = None
1181 1213 params = cmd
1182 1214 for name, filterfn in self._datafilters.iteritems():
1183 1215 if cmd.startswith(name):
1184 1216 fn = filterfn
1185 1217 params = cmd[len(name):].lstrip()
1186 1218 break
1187 1219 if not fn:
1188 1220 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1189 1221 # Wrap old filters not supporting keyword arguments
1190 1222 if not pycompat.getargspec(fn)[2]:
1191 1223 oldfn = fn
1192 1224 fn = lambda s, c, **kwargs: oldfn(s, c)
1193 1225 l.append((mf, fn, params))
1194 1226 self._filterpats[filter] = l
1195 1227 return self._filterpats[filter]
1196 1228
1197 1229 def _filter(self, filterpats, filename, data):
1198 1230 for mf, fn, cmd in filterpats:
1199 1231 if mf(filename):
1200 1232 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1201 1233 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1202 1234 break
1203 1235
1204 1236 return data
1205 1237
1206 1238 @unfilteredpropertycache
1207 1239 def _encodefilterpats(self):
1208 1240 return self._loadfilter('encode')
1209 1241
1210 1242 @unfilteredpropertycache
1211 1243 def _decodefilterpats(self):
1212 1244 return self._loadfilter('decode')
1213 1245
1214 1246 def adddatafilter(self, name, filter):
1215 1247 self._datafilters[name] = filter
1216 1248
1217 1249 def wread(self, filename):
1218 1250 if self.wvfs.islink(filename):
1219 1251 data = self.wvfs.readlink(filename)
1220 1252 else:
1221 1253 data = self.wvfs.read(filename)
1222 1254 return self._filter(self._encodefilterpats, filename, data)
1223 1255
1224 1256 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1225 1257 """write ``data`` into ``filename`` in the working directory
1226 1258
1227 1259 This returns length of written (maybe decoded) data.
1228 1260 """
1229 1261 data = self._filter(self._decodefilterpats, filename, data)
1230 1262 if 'l' in flags:
1231 1263 self.wvfs.symlink(data, filename)
1232 1264 else:
1233 1265 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1234 1266 **kwargs)
1235 1267 if 'x' in flags:
1236 1268 self.wvfs.setflags(filename, False, True)
1237 1269 else:
1238 1270 self.wvfs.setflags(filename, False, False)
1239 1271 return len(data)
1240 1272
1241 1273 def wwritedata(self, filename, data):
1242 1274 return self._filter(self._decodefilterpats, filename, data)
1243 1275
1244 1276 def currenttransaction(self):
1245 1277 """return the current transaction or None if non exists"""
1246 1278 if self._transref:
1247 1279 tr = self._transref()
1248 1280 else:
1249 1281 tr = None
1250 1282
1251 1283 if tr and tr.running():
1252 1284 return tr
1253 1285 return None
1254 1286
1255 1287 def transaction(self, desc, report=None):
1256 1288 if (self.ui.configbool('devel', 'all-warnings')
1257 1289 or self.ui.configbool('devel', 'check-locks')):
1258 1290 if self._currentlock(self._lockref) is None:
1259 1291 raise error.ProgrammingError('transaction requires locking')
1260 1292 tr = self.currenttransaction()
1261 1293 if tr is not None:
1262 1294 return tr.nest(name=desc)
1263 1295
1264 1296 # abort here if the journal already exists
1265 1297 if self.svfs.exists("journal"):
1266 1298 raise error.RepoError(
1267 1299 _("abandoned transaction found"),
1268 1300 hint=_("run 'hg recover' to clean up transaction"))
1269 1301
1270 1302 idbase = "%.40f#%f" % (random.random(), time.time())
1271 1303 ha = hex(hashlib.sha1(idbase).digest())
1272 1304 txnid = 'TXN:' + ha
1273 1305 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1274 1306
1275 1307 self._writejournal(desc)
1276 1308 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1277 1309 if report:
1278 1310 rp = report
1279 1311 else:
1280 1312 rp = self.ui.warn
1281 1313 vfsmap = {'plain': self.vfs} # root of .hg/
1282 1314 # we must avoid cyclic reference between repo and transaction.
1283 1315 reporef = weakref.ref(self)
1284 1316 # Code to track tag movement
1285 1317 #
1286 1318 # Since tags are all handled as file content, it is actually quite hard
1287 1319 # to track these movement from a code perspective. So we fallback to a
1288 1320 # tracking at the repository level. One could envision to track changes
1289 1321 # to the '.hgtags' file through changegroup apply but that fails to
1290 1322 # cope with case where transaction expose new heads without changegroup
1291 1323 # being involved (eg: phase movement).
1292 1324 #
1293 1325 # For now, We gate the feature behind a flag since this likely comes
1294 1326 # with performance impacts. The current code run more often than needed
1295 1327 # and do not use caches as much as it could. The current focus is on
1296 1328 # the behavior of the feature so we disable it by default. The flag
1297 1329 # will be removed when we are happy with the performance impact.
1298 1330 #
1299 1331 # Once this feature is no longer experimental move the following
1300 1332 # documentation to the appropriate help section:
1301 1333 #
1302 1334 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1303 1335 # tags (new or changed or deleted tags). In addition the details of
1304 1336 # these changes are made available in a file at:
1305 1337 # ``REPOROOT/.hg/changes/tags.changes``.
1306 1338 # Make sure you check for HG_TAG_MOVED before reading that file as it
1307 1339 # might exist from a previous transaction even if no tag were touched
1308 1340 # in this one. Changes are recorded in a line base format::
1309 1341 #
1310 1342 # <action> <hex-node> <tag-name>\n
1311 1343 #
1312 1344 # Actions are defined as follow:
1313 1345 # "-R": tag is removed,
1314 1346 # "+A": tag is added,
1315 1347 # "-M": tag is moved (old value),
1316 1348 # "+M": tag is moved (new value),
1317 1349 tracktags = lambda x: None
1318 1350 # experimental config: experimental.hook-track-tags
1319 1351 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1320 1352 if desc != 'strip' and shouldtracktags:
1321 1353 oldheads = self.changelog.headrevs()
1322 1354 def tracktags(tr2):
1323 1355 repo = reporef()
1324 1356 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1325 1357 newheads = repo.changelog.headrevs()
1326 1358 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1327 1359 # notes: we compare lists here.
1328 1360 # As we do it only once buiding set would not be cheaper
1329 1361 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1330 1362 if changes:
1331 1363 tr2.hookargs['tag_moved'] = '1'
1332 1364 with repo.vfs('changes/tags.changes', 'w',
1333 1365 atomictemp=True) as changesfile:
1334 1366 # note: we do not register the file to the transaction
1335 1367 # because we needs it to still exist on the transaction
1336 1368 # is close (for txnclose hooks)
1337 1369 tagsmod.writediff(changesfile, changes)
1338 1370 def validate(tr2):
1339 1371 """will run pre-closing hooks"""
1340 1372 # XXX the transaction API is a bit lacking here so we take a hacky
1341 1373 # path for now
1342 1374 #
1343 1375 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1344 1376 # dict is copied before these run. In addition we needs the data
1345 1377 # available to in memory hooks too.
1346 1378 #
1347 1379 # Moreover, we also need to make sure this runs before txnclose
1348 1380 # hooks and there is no "pending" mechanism that would execute
1349 1381 # logic only if hooks are about to run.
1350 1382 #
1351 1383 # Fixing this limitation of the transaction is also needed to track
1352 1384 # other families of changes (bookmarks, phases, obsolescence).
1353 1385 #
1354 1386 # This will have to be fixed before we remove the experimental
1355 1387 # gating.
1356 1388 tracktags(tr2)
1357 1389 repo = reporef()
1358 1390 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1359 1391 scmutil.enforcesinglehead(repo, tr2, desc)
1360 1392 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1361 1393 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1362 1394 args = tr.hookargs.copy()
1363 1395 args.update(bookmarks.preparehookargs(name, old, new))
1364 1396 repo.hook('pretxnclose-bookmark', throw=True,
1365 1397 txnname=desc,
1366 1398 **pycompat.strkwargs(args))
1367 1399 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1368 1400 cl = repo.unfiltered().changelog
1369 1401 for rev, (old, new) in tr.changes['phases'].items():
1370 1402 args = tr.hookargs.copy()
1371 1403 node = hex(cl.node(rev))
1372 1404 args.update(phases.preparehookargs(node, old, new))
1373 1405 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1374 1406 **pycompat.strkwargs(args))
1375 1407
1376 1408 repo.hook('pretxnclose', throw=True,
1377 1409 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1378 1410 def releasefn(tr, success):
1379 1411 repo = reporef()
1380 1412 if success:
1381 1413 # this should be explicitly invoked here, because
1382 1414 # in-memory changes aren't written out at closing
1383 1415 # transaction, if tr.addfilegenerator (via
1384 1416 # dirstate.write or so) isn't invoked while
1385 1417 # transaction running
1386 1418 repo.dirstate.write(None)
1387 1419 else:
1388 1420 # discard all changes (including ones already written
1389 1421 # out) in this transaction
1390 1422 narrowspec.restorebackup(self, 'journal.narrowspec')
1391 1423 repo.dirstate.restorebackup(None, 'journal.dirstate')
1392 1424
1393 1425 repo.invalidate(clearfilecache=True)
1394 1426
1395 1427 tr = transaction.transaction(rp, self.svfs, vfsmap,
1396 1428 "journal",
1397 1429 "undo",
1398 1430 aftertrans(renames),
1399 1431 self.store.createmode,
1400 1432 validator=validate,
1401 1433 releasefn=releasefn,
1402 1434 checkambigfiles=_cachedfiles,
1403 1435 name=desc)
1404 1436 tr.changes['origrepolen'] = len(self)
1405 1437 tr.changes['obsmarkers'] = set()
1406 1438 tr.changes['phases'] = {}
1407 1439 tr.changes['bookmarks'] = {}
1408 1440
1409 1441 tr.hookargs['txnid'] = txnid
1410 1442 # note: writing the fncache only during finalize mean that the file is
1411 1443 # outdated when running hooks. As fncache is used for streaming clone,
1412 1444 # this is not expected to break anything that happen during the hooks.
1413 1445 tr.addfinalize('flush-fncache', self.store.write)
1414 1446 def txnclosehook(tr2):
1415 1447 """To be run if transaction is successful, will schedule a hook run
1416 1448 """
1417 1449 # Don't reference tr2 in hook() so we don't hold a reference.
1418 1450 # This reduces memory consumption when there are multiple
1419 1451 # transactions per lock. This can likely go away if issue5045
1420 1452 # fixes the function accumulation.
1421 1453 hookargs = tr2.hookargs
1422 1454
1423 1455 def hookfunc():
1424 1456 repo = reporef()
1425 1457 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1426 1458 bmchanges = sorted(tr.changes['bookmarks'].items())
1427 1459 for name, (old, new) in bmchanges:
1428 1460 args = tr.hookargs.copy()
1429 1461 args.update(bookmarks.preparehookargs(name, old, new))
1430 1462 repo.hook('txnclose-bookmark', throw=False,
1431 1463 txnname=desc, **pycompat.strkwargs(args))
1432 1464
1433 1465 if hook.hashook(repo.ui, 'txnclose-phase'):
1434 1466 cl = repo.unfiltered().changelog
1435 1467 phasemv = sorted(tr.changes['phases'].items())
1436 1468 for rev, (old, new) in phasemv:
1437 1469 args = tr.hookargs.copy()
1438 1470 node = hex(cl.node(rev))
1439 1471 args.update(phases.preparehookargs(node, old, new))
1440 1472 repo.hook('txnclose-phase', throw=False, txnname=desc,
1441 1473 **pycompat.strkwargs(args))
1442 1474
1443 1475 repo.hook('txnclose', throw=False, txnname=desc,
1444 1476 **pycompat.strkwargs(hookargs))
1445 1477 reporef()._afterlock(hookfunc)
1446 1478 tr.addfinalize('txnclose-hook', txnclosehook)
1447 1479 # Include a leading "-" to make it happen before the transaction summary
1448 1480 # reports registered via scmutil.registersummarycallback() whose names
1449 1481 # are 00-txnreport etc. That way, the caches will be warm when the
1450 1482 # callbacks run.
1451 1483 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1452 1484 def txnaborthook(tr2):
1453 1485 """To be run if transaction is aborted
1454 1486 """
1455 1487 reporef().hook('txnabort', throw=False, txnname=desc,
1456 1488 **pycompat.strkwargs(tr2.hookargs))
1457 1489 tr.addabort('txnabort-hook', txnaborthook)
1458 1490 # avoid eager cache invalidation. in-memory data should be identical
1459 1491 # to stored data if transaction has no error.
1460 1492 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1461 1493 self._transref = weakref.ref(tr)
1462 1494 scmutil.registersummarycallback(self, tr, desc)
1463 1495 return tr
1464 1496
1465 1497 def _journalfiles(self):
1466 1498 return ((self.svfs, 'journal'),
1467 1499 (self.vfs, 'journal.dirstate'),
1468 1500 (self.vfs, 'journal.branch'),
1469 1501 (self.vfs, 'journal.desc'),
1470 1502 (self.vfs, 'journal.bookmarks'),
1471 1503 (self.svfs, 'journal.phaseroots'))
1472 1504
1473 1505 def undofiles(self):
1474 1506 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1475 1507
1476 1508 @unfilteredmethod
1477 1509 def _writejournal(self, desc):
1478 1510 self.dirstate.savebackup(None, 'journal.dirstate')
1479 1511 narrowspec.savebackup(self, 'journal.narrowspec')
1480 1512 self.vfs.write("journal.branch",
1481 1513 encoding.fromlocal(self.dirstate.branch()))
1482 1514 self.vfs.write("journal.desc",
1483 1515 "%d\n%s\n" % (len(self), desc))
1484 1516 self.vfs.write("journal.bookmarks",
1485 1517 self.vfs.tryread("bookmarks"))
1486 1518 self.svfs.write("journal.phaseroots",
1487 1519 self.svfs.tryread("phaseroots"))
1488 1520
1489 1521 def recover(self):
1490 1522 with self.lock():
1491 1523 if self.svfs.exists("journal"):
1492 1524 self.ui.status(_("rolling back interrupted transaction\n"))
1493 1525 vfsmap = {'': self.svfs,
1494 1526 'plain': self.vfs,}
1495 1527 transaction.rollback(self.svfs, vfsmap, "journal",
1496 1528 self.ui.warn,
1497 1529 checkambigfiles=_cachedfiles)
1498 1530 self.invalidate()
1499 1531 return True
1500 1532 else:
1501 1533 self.ui.warn(_("no interrupted transaction available\n"))
1502 1534 return False
1503 1535
1504 1536 def rollback(self, dryrun=False, force=False):
1505 1537 wlock = lock = dsguard = None
1506 1538 try:
1507 1539 wlock = self.wlock()
1508 1540 lock = self.lock()
1509 1541 if self.svfs.exists("undo"):
1510 1542 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1511 1543
1512 1544 return self._rollback(dryrun, force, dsguard)
1513 1545 else:
1514 1546 self.ui.warn(_("no rollback information available\n"))
1515 1547 return 1
1516 1548 finally:
1517 1549 release(dsguard, lock, wlock)
1518 1550
1519 1551 @unfilteredmethod # Until we get smarter cache management
1520 1552 def _rollback(self, dryrun, force, dsguard):
1521 1553 ui = self.ui
1522 1554 try:
1523 1555 args = self.vfs.read('undo.desc').splitlines()
1524 1556 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1525 1557 if len(args) >= 3:
1526 1558 detail = args[2]
1527 1559 oldtip = oldlen - 1
1528 1560
1529 1561 if detail and ui.verbose:
1530 1562 msg = (_('repository tip rolled back to revision %d'
1531 1563 ' (undo %s: %s)\n')
1532 1564 % (oldtip, desc, detail))
1533 1565 else:
1534 1566 msg = (_('repository tip rolled back to revision %d'
1535 1567 ' (undo %s)\n')
1536 1568 % (oldtip, desc))
1537 1569 except IOError:
1538 1570 msg = _('rolling back unknown transaction\n')
1539 1571 desc = None
1540 1572
1541 1573 if not force and self['.'] != self['tip'] and desc == 'commit':
1542 1574 raise error.Abort(
1543 1575 _('rollback of last commit while not checked out '
1544 1576 'may lose data'), hint=_('use -f to force'))
1545 1577
1546 1578 ui.status(msg)
1547 1579 if dryrun:
1548 1580 return 0
1549 1581
1550 1582 parents = self.dirstate.parents()
1551 1583 self.destroying()
1552 1584 vfsmap = {'plain': self.vfs, '': self.svfs}
1553 1585 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1554 1586 checkambigfiles=_cachedfiles)
1555 1587 if self.vfs.exists('undo.bookmarks'):
1556 1588 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1557 1589 if self.svfs.exists('undo.phaseroots'):
1558 1590 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1559 1591 self.invalidate()
1560 1592
1561 1593 parentgone = (parents[0] not in self.changelog.nodemap or
1562 1594 parents[1] not in self.changelog.nodemap)
1563 1595 if parentgone:
1564 1596 # prevent dirstateguard from overwriting already restored one
1565 1597 dsguard.close()
1566 1598
1567 1599 narrowspec.restorebackup(self, 'undo.narrowspec')
1568 1600 self.dirstate.restorebackup(None, 'undo.dirstate')
1569 1601 try:
1570 1602 branch = self.vfs.read('undo.branch')
1571 1603 self.dirstate.setbranch(encoding.tolocal(branch))
1572 1604 except IOError:
1573 1605 ui.warn(_('named branch could not be reset: '
1574 1606 'current branch is still \'%s\'\n')
1575 1607 % self.dirstate.branch())
1576 1608
1577 1609 parents = tuple([p.rev() for p in self[None].parents()])
1578 1610 if len(parents) > 1:
1579 1611 ui.status(_('working directory now based on '
1580 1612 'revisions %d and %d\n') % parents)
1581 1613 else:
1582 1614 ui.status(_('working directory now based on '
1583 1615 'revision %d\n') % parents)
1584 1616 mergemod.mergestate.clean(self, self['.'].node())
1585 1617
1586 1618 # TODO: if we know which new heads may result from this rollback, pass
1587 1619 # them to destroy(), which will prevent the branchhead cache from being
1588 1620 # invalidated.
1589 1621 self.destroyed()
1590 1622 return 0
1591 1623
1592 1624 def _buildcacheupdater(self, newtransaction):
1593 1625 """called during transaction to build the callback updating cache
1594 1626
1595 1627 Lives on the repository to help extension who might want to augment
1596 1628 this logic. For this purpose, the created transaction is passed to the
1597 1629 method.
1598 1630 """
1599 1631 # we must avoid cyclic reference between repo and transaction.
1600 1632 reporef = weakref.ref(self)
1601 1633 def updater(tr):
1602 1634 repo = reporef()
1603 1635 repo.updatecaches(tr)
1604 1636 return updater
1605 1637
1606 1638 @unfilteredmethod
1607 1639 def updatecaches(self, tr=None, full=False):
1608 1640 """warm appropriate caches
1609 1641
1610 1642 If this function is called after a transaction closed. The transaction
1611 1643 will be available in the 'tr' argument. This can be used to selectively
1612 1644 update caches relevant to the changes in that transaction.
1613 1645
1614 1646 If 'full' is set, make sure all caches the function knows about have
1615 1647 up-to-date data. Even the ones usually loaded more lazily.
1616 1648 """
1617 1649 if tr is not None and tr.hookargs.get('source') == 'strip':
1618 1650 # During strip, many caches are invalid but
1619 1651 # later call to `destroyed` will refresh them.
1620 1652 return
1621 1653
1622 1654 if tr is None or tr.changes['origrepolen'] < len(self):
1623 1655 # updating the unfiltered branchmap should refresh all the others,
1624 1656 self.ui.debug('updating the branch cache\n')
1625 1657 branchmap.updatecache(self.filtered('served'))
1626 1658
1627 1659 if full:
1628 1660 rbc = self.revbranchcache()
1629 1661 for r in self.changelog:
1630 1662 rbc.branchinfo(r)
1631 1663 rbc.write()
1632 1664
1633 1665 # ensure the working copy parents are in the manifestfulltextcache
1634 1666 for ctx in self['.'].parents():
1635 1667 ctx.manifest() # accessing the manifest is enough
1636 1668
1637 1669 def invalidatecaches(self):
1638 1670
1639 1671 if '_tagscache' in vars(self):
1640 1672 # can't use delattr on proxy
1641 1673 del self.__dict__['_tagscache']
1642 1674
1643 1675 self.unfiltered()._branchcaches.clear()
1644 1676 self.invalidatevolatilesets()
1645 1677 self._sparsesignaturecache.clear()
1646 1678
1647 1679 def invalidatevolatilesets(self):
1648 1680 self.filteredrevcache.clear()
1649 1681 obsolete.clearobscaches(self)
1650 1682
1651 1683 def invalidatedirstate(self):
1652 1684 '''Invalidates the dirstate, causing the next call to dirstate
1653 1685 to check if it was modified since the last time it was read,
1654 1686 rereading it if it has.
1655 1687
1656 1688 This is different to dirstate.invalidate() that it doesn't always
1657 1689 rereads the dirstate. Use dirstate.invalidate() if you want to
1658 1690 explicitly read the dirstate again (i.e. restoring it to a previous
1659 1691 known good state).'''
1660 1692 if hasunfilteredcache(self, 'dirstate'):
1661 1693 for k in self.dirstate._filecache:
1662 1694 try:
1663 1695 delattr(self.dirstate, k)
1664 1696 except AttributeError:
1665 1697 pass
1666 1698 delattr(self.unfiltered(), 'dirstate')
1667 1699
1668 1700 def invalidate(self, clearfilecache=False):
1669 1701 '''Invalidates both store and non-store parts other than dirstate
1670 1702
1671 1703 If a transaction is running, invalidation of store is omitted,
1672 1704 because discarding in-memory changes might cause inconsistency
1673 1705 (e.g. incomplete fncache causes unintentional failure, but
1674 1706 redundant one doesn't).
1675 1707 '''
1676 1708 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1677 1709 for k in list(self._filecache.keys()):
1678 1710 # dirstate is invalidated separately in invalidatedirstate()
1679 1711 if k == 'dirstate':
1680 1712 continue
1681 1713 if (k == 'changelog' and
1682 1714 self.currenttransaction() and
1683 1715 self.changelog._delayed):
1684 1716 # The changelog object may store unwritten revisions. We don't
1685 1717 # want to lose them.
1686 1718 # TODO: Solve the problem instead of working around it.
1687 1719 continue
1688 1720
1689 1721 if clearfilecache:
1690 1722 del self._filecache[k]
1691 1723 try:
1692 1724 delattr(unfiltered, k)
1693 1725 except AttributeError:
1694 1726 pass
1695 1727 self.invalidatecaches()
1696 1728 if not self.currenttransaction():
1697 1729 # TODO: Changing contents of store outside transaction
1698 1730 # causes inconsistency. We should make in-memory store
1699 1731 # changes detectable, and abort if changed.
1700 1732 self.store.invalidatecaches()
1701 1733
1702 1734 def invalidateall(self):
1703 1735 '''Fully invalidates both store and non-store parts, causing the
1704 1736 subsequent operation to reread any outside changes.'''
1705 1737 # extension should hook this to invalidate its caches
1706 1738 self.invalidate()
1707 1739 self.invalidatedirstate()
1708 1740
1709 1741 @unfilteredmethod
1710 1742 def _refreshfilecachestats(self, tr):
1711 1743 """Reload stats of cached files so that they are flagged as valid"""
1712 1744 for k, ce in self._filecache.items():
1713 1745 k = pycompat.sysstr(k)
1714 1746 if k == r'dirstate' or k not in self.__dict__:
1715 1747 continue
1716 1748 ce.refresh()
1717 1749
1718 1750 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1719 1751 inheritchecker=None, parentenvvar=None):
1720 1752 parentlock = None
1721 1753 # the contents of parentenvvar are used by the underlying lock to
1722 1754 # determine whether it can be inherited
1723 1755 if parentenvvar is not None:
1724 1756 parentlock = encoding.environ.get(parentenvvar)
1725 1757
1726 1758 timeout = 0
1727 1759 warntimeout = 0
1728 1760 if wait:
1729 1761 timeout = self.ui.configint("ui", "timeout")
1730 1762 warntimeout = self.ui.configint("ui", "timeout.warn")
1731 1763 # internal config: ui.signal-safe-lock
1732 1764 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1733 1765
1734 1766 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1735 1767 releasefn=releasefn,
1736 1768 acquirefn=acquirefn, desc=desc,
1737 1769 inheritchecker=inheritchecker,
1738 1770 parentlock=parentlock,
1739 1771 signalsafe=signalsafe)
1740 1772 return l
1741 1773
1742 1774 def _afterlock(self, callback):
1743 1775 """add a callback to be run when the repository is fully unlocked
1744 1776
1745 1777 The callback will be executed when the outermost lock is released
1746 1778 (with wlock being higher level than 'lock')."""
1747 1779 for ref in (self._wlockref, self._lockref):
1748 1780 l = ref and ref()
1749 1781 if l and l.held:
1750 1782 l.postrelease.append(callback)
1751 1783 break
1752 1784 else: # no lock have been found.
1753 1785 callback()
1754 1786
1755 1787 def lock(self, wait=True):
1756 1788 '''Lock the repository store (.hg/store) and return a weak reference
1757 1789 to the lock. Use this before modifying the store (e.g. committing or
1758 1790 stripping). If you are opening a transaction, get a lock as well.)
1759 1791
1760 1792 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1761 1793 'wlock' first to avoid a dead-lock hazard.'''
1762 1794 l = self._currentlock(self._lockref)
1763 1795 if l is not None:
1764 1796 l.lock()
1765 1797 return l
1766 1798
1767 1799 l = self._lock(self.svfs, "lock", wait, None,
1768 1800 self.invalidate, _('repository %s') % self.origroot)
1769 1801 self._lockref = weakref.ref(l)
1770 1802 return l
1771 1803
1772 1804 def _wlockchecktransaction(self):
1773 1805 if self.currenttransaction() is not None:
1774 1806 raise error.LockInheritanceContractViolation(
1775 1807 'wlock cannot be inherited in the middle of a transaction')
1776 1808
1777 1809 def wlock(self, wait=True):
1778 1810 '''Lock the non-store parts of the repository (everything under
1779 1811 .hg except .hg/store) and return a weak reference to the lock.
1780 1812
1781 1813 Use this before modifying files in .hg.
1782 1814
1783 1815 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1784 1816 'wlock' first to avoid a dead-lock hazard.'''
1785 1817 l = self._wlockref and self._wlockref()
1786 1818 if l is not None and l.held:
1787 1819 l.lock()
1788 1820 return l
1789 1821
1790 1822 # We do not need to check for non-waiting lock acquisition. Such
1791 1823 # acquisition would not cause dead-lock as they would just fail.
1792 1824 if wait and (self.ui.configbool('devel', 'all-warnings')
1793 1825 or self.ui.configbool('devel', 'check-locks')):
1794 1826 if self._currentlock(self._lockref) is not None:
1795 1827 self.ui.develwarn('"wlock" acquired after "lock"')
1796 1828
1797 1829 def unlock():
1798 1830 if self.dirstate.pendingparentchange():
1799 1831 self.dirstate.invalidate()
1800 1832 else:
1801 1833 self.dirstate.write(None)
1802 1834
1803 1835 self._filecache['dirstate'].refresh()
1804 1836
1805 1837 l = self._lock(self.vfs, "wlock", wait, unlock,
1806 1838 self.invalidatedirstate, _('working directory of %s') %
1807 1839 self.origroot,
1808 1840 inheritchecker=self._wlockchecktransaction,
1809 1841 parentenvvar='HG_WLOCK_LOCKER')
1810 1842 self._wlockref = weakref.ref(l)
1811 1843 return l
1812 1844
1813 1845 def _currentlock(self, lockref):
1814 1846 """Returns the lock if it's held, or None if it's not."""
1815 1847 if lockref is None:
1816 1848 return None
1817 1849 l = lockref()
1818 1850 if l is None or not l.held:
1819 1851 return None
1820 1852 return l
1821 1853
1822 1854 def currentwlock(self):
1823 1855 """Returns the wlock if it's held, or None if it's not."""
1824 1856 return self._currentlock(self._wlockref)
1825 1857
1826 1858 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1827 1859 """
1828 1860 commit an individual file as part of a larger transaction
1829 1861 """
1830 1862
1831 1863 fname = fctx.path()
1832 1864 fparent1 = manifest1.get(fname, nullid)
1833 1865 fparent2 = manifest2.get(fname, nullid)
1834 1866 if isinstance(fctx, context.filectx):
1835 1867 node = fctx.filenode()
1836 1868 if node in [fparent1, fparent2]:
1837 1869 self.ui.debug('reusing %s filelog entry\n' % fname)
1838 1870 if manifest1.flags(fname) != fctx.flags():
1839 1871 changelist.append(fname)
1840 1872 return node
1841 1873
1842 1874 flog = self.file(fname)
1843 1875 meta = {}
1844 1876 copy = fctx.renamed()
1845 1877 if copy and copy[0] != fname:
1846 1878 # Mark the new revision of this file as a copy of another
1847 1879 # file. This copy data will effectively act as a parent
1848 1880 # of this new revision. If this is a merge, the first
1849 1881 # parent will be the nullid (meaning "look up the copy data")
1850 1882 # and the second one will be the other parent. For example:
1851 1883 #
1852 1884 # 0 --- 1 --- 3 rev1 changes file foo
1853 1885 # \ / rev2 renames foo to bar and changes it
1854 1886 # \- 2 -/ rev3 should have bar with all changes and
1855 1887 # should record that bar descends from
1856 1888 # bar in rev2 and foo in rev1
1857 1889 #
1858 1890 # this allows this merge to succeed:
1859 1891 #
1860 1892 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1861 1893 # \ / merging rev3 and rev4 should use bar@rev2
1862 1894 # \- 2 --- 4 as the merge base
1863 1895 #
1864 1896
1865 1897 cfname = copy[0]
1866 1898 crev = manifest1.get(cfname)
1867 1899 newfparent = fparent2
1868 1900
1869 1901 if manifest2: # branch merge
1870 1902 if fparent2 == nullid or crev is None: # copied on remote side
1871 1903 if cfname in manifest2:
1872 1904 crev = manifest2[cfname]
1873 1905 newfparent = fparent1
1874 1906
1875 1907 # Here, we used to search backwards through history to try to find
1876 1908 # where the file copy came from if the source of a copy was not in
1877 1909 # the parent directory. However, this doesn't actually make sense to
1878 1910 # do (what does a copy from something not in your working copy even
1879 1911 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1880 1912 # the user that copy information was dropped, so if they didn't
1881 1913 # expect this outcome it can be fixed, but this is the correct
1882 1914 # behavior in this circumstance.
1883 1915
1884 1916 if crev:
1885 1917 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1886 1918 meta["copy"] = cfname
1887 1919 meta["copyrev"] = hex(crev)
1888 1920 fparent1, fparent2 = nullid, newfparent
1889 1921 else:
1890 1922 self.ui.warn(_("warning: can't find ancestor for '%s' "
1891 1923 "copied from '%s'!\n") % (fname, cfname))
1892 1924
1893 1925 elif fparent1 == nullid:
1894 1926 fparent1, fparent2 = fparent2, nullid
1895 1927 elif fparent2 != nullid:
1896 1928 # is one parent an ancestor of the other?
1897 1929 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1898 1930 if fparent1 in fparentancestors:
1899 1931 fparent1, fparent2 = fparent2, nullid
1900 1932 elif fparent2 in fparentancestors:
1901 1933 fparent2 = nullid
1902 1934
1903 1935 # is the file changed?
1904 1936 text = fctx.data()
1905 1937 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1906 1938 changelist.append(fname)
1907 1939 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1908 1940 # are just the flags changed during merge?
1909 1941 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1910 1942 changelist.append(fname)
1911 1943
1912 1944 return fparent1
1913 1945
1914 1946 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1915 1947 """check for commit arguments that aren't committable"""
1916 1948 if match.isexact() or match.prefix():
1917 1949 matched = set(status.modified + status.added + status.removed)
1918 1950
1919 1951 for f in match.files():
1920 1952 f = self.dirstate.normalize(f)
1921 1953 if f == '.' or f in matched or f in wctx.substate:
1922 1954 continue
1923 1955 if f in status.deleted:
1924 1956 fail(f, _('file not found!'))
1925 1957 if f in vdirs: # visited directory
1926 1958 d = f + '/'
1927 1959 for mf in matched:
1928 1960 if mf.startswith(d):
1929 1961 break
1930 1962 else:
1931 1963 fail(f, _("no match under directory!"))
1932 1964 elif f not in self.dirstate:
1933 1965 fail(f, _("file not tracked!"))
1934 1966
1935 1967 @unfilteredmethod
1936 1968 def commit(self, text="", user=None, date=None, match=None, force=False,
1937 1969 editor=False, extra=None):
1938 1970 """Add a new revision to current repository.
1939 1971
1940 1972 Revision information is gathered from the working directory,
1941 1973 match can be used to filter the committed files. If editor is
1942 1974 supplied, it is called to get a commit message.
1943 1975 """
1944 1976 if extra is None:
1945 1977 extra = {}
1946 1978
1947 1979 def fail(f, msg):
1948 1980 raise error.Abort('%s: %s' % (f, msg))
1949 1981
1950 1982 if not match:
1951 1983 match = matchmod.always(self.root, '')
1952 1984
1953 1985 if not force:
1954 1986 vdirs = []
1955 1987 match.explicitdir = vdirs.append
1956 1988 match.bad = fail
1957 1989
1958 1990 wlock = lock = tr = None
1959 1991 try:
1960 1992 wlock = self.wlock()
1961 1993 lock = self.lock() # for recent changelog (see issue4368)
1962 1994
1963 1995 wctx = self[None]
1964 1996 merge = len(wctx.parents()) > 1
1965 1997
1966 1998 if not force and merge and not match.always():
1967 1999 raise error.Abort(_('cannot partially commit a merge '
1968 2000 '(do not specify files or patterns)'))
1969 2001
1970 2002 status = self.status(match=match, clean=force)
1971 2003 if force:
1972 2004 status.modified.extend(status.clean) # mq may commit clean files
1973 2005
1974 2006 # check subrepos
1975 2007 subs, commitsubs, newstate = subrepoutil.precommit(
1976 2008 self.ui, wctx, status, match, force=force)
1977 2009
1978 2010 # make sure all explicit patterns are matched
1979 2011 if not force:
1980 2012 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1981 2013
1982 2014 cctx = context.workingcommitctx(self, status,
1983 2015 text, user, date, extra)
1984 2016
1985 2017 # internal config: ui.allowemptycommit
1986 2018 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1987 2019 or extra.get('close') or merge or cctx.files()
1988 2020 or self.ui.configbool('ui', 'allowemptycommit'))
1989 2021 if not allowemptycommit:
1990 2022 return None
1991 2023
1992 2024 if merge and cctx.deleted():
1993 2025 raise error.Abort(_("cannot commit merge with missing files"))
1994 2026
1995 2027 ms = mergemod.mergestate.read(self)
1996 2028 mergeutil.checkunresolved(ms)
1997 2029
1998 2030 if editor:
1999 2031 cctx._text = editor(self, cctx, subs)
2000 2032 edited = (text != cctx._text)
2001 2033
2002 2034 # Save commit message in case this transaction gets rolled back
2003 2035 # (e.g. by a pretxncommit hook). Leave the content alone on
2004 2036 # the assumption that the user will use the same editor again.
2005 2037 msgfn = self.savecommitmessage(cctx._text)
2006 2038
2007 2039 # commit subs and write new state
2008 2040 if subs:
2009 2041 for s in sorted(commitsubs):
2010 2042 sub = wctx.sub(s)
2011 2043 self.ui.status(_('committing subrepository %s\n') %
2012 2044 subrepoutil.subrelpath(sub))
2013 2045 sr = sub.commit(cctx._text, user, date)
2014 2046 newstate[s] = (newstate[s][0], sr)
2015 2047 subrepoutil.writestate(self, newstate)
2016 2048
2017 2049 p1, p2 = self.dirstate.parents()
2018 2050 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2019 2051 try:
2020 2052 self.hook("precommit", throw=True, parent1=hookp1,
2021 2053 parent2=hookp2)
2022 2054 tr = self.transaction('commit')
2023 2055 ret = self.commitctx(cctx, True)
2024 2056 except: # re-raises
2025 2057 if edited:
2026 2058 self.ui.write(
2027 2059 _('note: commit message saved in %s\n') % msgfn)
2028 2060 raise
2029 2061 # update bookmarks, dirstate and mergestate
2030 2062 bookmarks.update(self, [p1, p2], ret)
2031 2063 cctx.markcommitted(ret)
2032 2064 ms.reset()
2033 2065 tr.close()
2034 2066
2035 2067 finally:
2036 2068 lockmod.release(tr, lock, wlock)
2037 2069
2038 2070 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2039 2071 # hack for command that use a temporary commit (eg: histedit)
2040 2072 # temporary commit got stripped before hook release
2041 2073 if self.changelog.hasnode(ret):
2042 2074 self.hook("commit", node=node, parent1=parent1,
2043 2075 parent2=parent2)
2044 2076 self._afterlock(commithook)
2045 2077 return ret
2046 2078
2047 2079 @unfilteredmethod
2048 2080 def commitctx(self, ctx, error=False):
2049 2081 """Add a new revision to current repository.
2050 2082 Revision information is passed via the context argument.
2051 2083
2052 2084 ctx.files() should list all files involved in this commit, i.e.
2053 2085 modified/added/removed files. On merge, it may be wider than the
2054 2086 ctx.files() to be committed, since any file nodes derived directly
2055 2087 from p1 or p2 are excluded from the committed ctx.files().
2056 2088 """
2057 2089
2058 2090 tr = None
2059 2091 p1, p2 = ctx.p1(), ctx.p2()
2060 2092 user = ctx.user()
2061 2093
2062 2094 lock = self.lock()
2063 2095 try:
2064 2096 tr = self.transaction("commit")
2065 2097 trp = weakref.proxy(tr)
2066 2098
2067 2099 if ctx.manifestnode():
2068 2100 # reuse an existing manifest revision
2069 2101 self.ui.debug('reusing known manifest\n')
2070 2102 mn = ctx.manifestnode()
2071 2103 files = ctx.files()
2072 2104 elif ctx.files():
2073 2105 m1ctx = p1.manifestctx()
2074 2106 m2ctx = p2.manifestctx()
2075 2107 mctx = m1ctx.copy()
2076 2108
2077 2109 m = mctx.read()
2078 2110 m1 = m1ctx.read()
2079 2111 m2 = m2ctx.read()
2080 2112
2081 2113 # check in files
2082 2114 added = []
2083 2115 changed = []
2084 2116 removed = list(ctx.removed())
2085 2117 linkrev = len(self)
2086 2118 self.ui.note(_("committing files:\n"))
2087 2119 for f in sorted(ctx.modified() + ctx.added()):
2088 2120 self.ui.note(f + "\n")
2089 2121 try:
2090 2122 fctx = ctx[f]
2091 2123 if fctx is None:
2092 2124 removed.append(f)
2093 2125 else:
2094 2126 added.append(f)
2095 2127 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2096 2128 trp, changed)
2097 2129 m.setflag(f, fctx.flags())
2098 2130 except OSError as inst:
2099 2131 self.ui.warn(_("trouble committing %s!\n") % f)
2100 2132 raise
2101 2133 except IOError as inst:
2102 2134 errcode = getattr(inst, 'errno', errno.ENOENT)
2103 2135 if error or errcode and errcode != errno.ENOENT:
2104 2136 self.ui.warn(_("trouble committing %s!\n") % f)
2105 2137 raise
2106 2138
2107 2139 # update manifest
2108 2140 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2109 2141 drop = [f for f in removed if f in m]
2110 2142 for f in drop:
2111 2143 del m[f]
2112 2144 files = changed + removed
2113 2145 md = None
2114 2146 if not files:
2115 2147 # if no "files" actually changed in terms of the changelog,
2116 2148 # try hard to detect unmodified manifest entry so that the
2117 2149 # exact same commit can be reproduced later on convert.
2118 2150 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2119 2151 if not files and md:
2120 2152 self.ui.debug('not reusing manifest (no file change in '
2121 2153 'changelog, but manifest differs)\n')
2122 2154 if files or md:
2123 2155 self.ui.note(_("committing manifest\n"))
2124 2156 # we're using narrowmatch here since it's already applied at
2125 2157 # other stages (such as dirstate.walk), so we're already
2126 2158 # ignoring things outside of narrowspec in most cases. The
2127 2159 # one case where we might have files outside the narrowspec
2128 2160 # at this point is merges, and we already error out in the
2129 2161 # case where the merge has files outside of the narrowspec,
2130 2162 # so this is safe.
2131 2163 mn = mctx.write(trp, linkrev,
2132 2164 p1.manifestnode(), p2.manifestnode(),
2133 2165 added, drop, match=self.narrowmatch())
2134 2166 else:
2135 2167 self.ui.debug('reusing manifest form p1 (listed files '
2136 2168 'actually unchanged)\n')
2137 2169 mn = p1.manifestnode()
2138 2170 else:
2139 2171 self.ui.debug('reusing manifest from p1 (no file change)\n')
2140 2172 mn = p1.manifestnode()
2141 2173 files = []
2142 2174
2143 2175 # update changelog
2144 2176 self.ui.note(_("committing changelog\n"))
2145 2177 self.changelog.delayupdate(tr)
2146 2178 n = self.changelog.add(mn, files, ctx.description(),
2147 2179 trp, p1.node(), p2.node(),
2148 2180 user, ctx.date(), ctx.extra().copy())
2149 2181 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2150 2182 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2151 2183 parent2=xp2)
2152 2184 # set the new commit is proper phase
2153 2185 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2154 2186 if targetphase:
2155 2187 # retract boundary do not alter parent changeset.
2156 2188 # if a parent have higher the resulting phase will
2157 2189 # be compliant anyway
2158 2190 #
2159 2191 # if minimal phase was 0 we don't need to retract anything
2160 2192 phases.registernew(self, tr, targetphase, [n])
2161 2193 tr.close()
2162 2194 return n
2163 2195 finally:
2164 2196 if tr:
2165 2197 tr.release()
2166 2198 lock.release()
2167 2199
2168 2200 @unfilteredmethod
2169 2201 def destroying(self):
2170 2202 '''Inform the repository that nodes are about to be destroyed.
2171 2203 Intended for use by strip and rollback, so there's a common
2172 2204 place for anything that has to be done before destroying history.
2173 2205
2174 2206 This is mostly useful for saving state that is in memory and waiting
2175 2207 to be flushed when the current lock is released. Because a call to
2176 2208 destroyed is imminent, the repo will be invalidated causing those
2177 2209 changes to stay in memory (waiting for the next unlock), or vanish
2178 2210 completely.
2179 2211 '''
2180 2212 # When using the same lock to commit and strip, the phasecache is left
2181 2213 # dirty after committing. Then when we strip, the repo is invalidated,
2182 2214 # causing those changes to disappear.
2183 2215 if '_phasecache' in vars(self):
2184 2216 self._phasecache.write()
2185 2217
2186 2218 @unfilteredmethod
2187 2219 def destroyed(self):
2188 2220 '''Inform the repository that nodes have been destroyed.
2189 2221 Intended for use by strip and rollback, so there's a common
2190 2222 place for anything that has to be done after destroying history.
2191 2223 '''
2192 2224 # When one tries to:
2193 2225 # 1) destroy nodes thus calling this method (e.g. strip)
2194 2226 # 2) use phasecache somewhere (e.g. commit)
2195 2227 #
2196 2228 # then 2) will fail because the phasecache contains nodes that were
2197 2229 # removed. We can either remove phasecache from the filecache,
2198 2230 # causing it to reload next time it is accessed, or simply filter
2199 2231 # the removed nodes now and write the updated cache.
2200 2232 self._phasecache.filterunknown(self)
2201 2233 self._phasecache.write()
2202 2234
2203 2235 # refresh all repository caches
2204 2236 self.updatecaches()
2205 2237
2206 2238 # Ensure the persistent tag cache is updated. Doing it now
2207 2239 # means that the tag cache only has to worry about destroyed
2208 2240 # heads immediately after a strip/rollback. That in turn
2209 2241 # guarantees that "cachetip == currenttip" (comparing both rev
2210 2242 # and node) always means no nodes have been added or destroyed.
2211 2243
2212 2244 # XXX this is suboptimal when qrefresh'ing: we strip the current
2213 2245 # head, refresh the tag cache, then immediately add a new head.
2214 2246 # But I think doing it this way is necessary for the "instant
2215 2247 # tag cache retrieval" case to work.
2216 2248 self.invalidate()
2217 2249
2218 2250 def status(self, node1='.', node2=None, match=None,
2219 2251 ignored=False, clean=False, unknown=False,
2220 2252 listsubrepos=False):
2221 2253 '''a convenience method that calls node1.status(node2)'''
2222 2254 return self[node1].status(node2, match, ignored, clean, unknown,
2223 2255 listsubrepos)
2224 2256
2225 2257 def addpostdsstatus(self, ps):
2226 2258 """Add a callback to run within the wlock, at the point at which status
2227 2259 fixups happen.
2228 2260
2229 2261 On status completion, callback(wctx, status) will be called with the
2230 2262 wlock held, unless the dirstate has changed from underneath or the wlock
2231 2263 couldn't be grabbed.
2232 2264
2233 2265 Callbacks should not capture and use a cached copy of the dirstate --
2234 2266 it might change in the meanwhile. Instead, they should access the
2235 2267 dirstate via wctx.repo().dirstate.
2236 2268
2237 2269 This list is emptied out after each status run -- extensions should
2238 2270 make sure it adds to this list each time dirstate.status is called.
2239 2271 Extensions should also make sure they don't call this for statuses
2240 2272 that don't involve the dirstate.
2241 2273 """
2242 2274
2243 2275 # The list is located here for uniqueness reasons -- it is actually
2244 2276 # managed by the workingctx, but that isn't unique per-repo.
2245 2277 self._postdsstatus.append(ps)
2246 2278
2247 2279 def postdsstatus(self):
2248 2280 """Used by workingctx to get the list of post-dirstate-status hooks."""
2249 2281 return self._postdsstatus
2250 2282
2251 2283 def clearpostdsstatus(self):
2252 2284 """Used by workingctx to clear post-dirstate-status hooks."""
2253 2285 del self._postdsstatus[:]
2254 2286
2255 2287 def heads(self, start=None):
2256 2288 if start is None:
2257 2289 cl = self.changelog
2258 2290 headrevs = reversed(cl.headrevs())
2259 2291 return [cl.node(rev) for rev in headrevs]
2260 2292
2261 2293 heads = self.changelog.heads(start)
2262 2294 # sort the output in rev descending order
2263 2295 return sorted(heads, key=self.changelog.rev, reverse=True)
2264 2296
2265 2297 def branchheads(self, branch=None, start=None, closed=False):
2266 2298 '''return a (possibly filtered) list of heads for the given branch
2267 2299
2268 2300 Heads are returned in topological order, from newest to oldest.
2269 2301 If branch is None, use the dirstate branch.
2270 2302 If start is not None, return only heads reachable from start.
2271 2303 If closed is True, return heads that are marked as closed as well.
2272 2304 '''
2273 2305 if branch is None:
2274 2306 branch = self[None].branch()
2275 2307 branches = self.branchmap()
2276 2308 if branch not in branches:
2277 2309 return []
2278 2310 # the cache returns heads ordered lowest to highest
2279 2311 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2280 2312 if start is not None:
2281 2313 # filter out the heads that cannot be reached from startrev
2282 2314 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2283 2315 bheads = [h for h in bheads if h in fbheads]
2284 2316 return bheads
2285 2317
2286 2318 def branches(self, nodes):
2287 2319 if not nodes:
2288 2320 nodes = [self.changelog.tip()]
2289 2321 b = []
2290 2322 for n in nodes:
2291 2323 t = n
2292 2324 while True:
2293 2325 p = self.changelog.parents(n)
2294 2326 if p[1] != nullid or p[0] == nullid:
2295 2327 b.append((t, n, p[0], p[1]))
2296 2328 break
2297 2329 n = p[0]
2298 2330 return b
2299 2331
2300 2332 def between(self, pairs):
2301 2333 r = []
2302 2334
2303 2335 for top, bottom in pairs:
2304 2336 n, l, i = top, [], 0
2305 2337 f = 1
2306 2338
2307 2339 while n != bottom and n != nullid:
2308 2340 p = self.changelog.parents(n)[0]
2309 2341 if i == f:
2310 2342 l.append(n)
2311 2343 f = f * 2
2312 2344 n = p
2313 2345 i += 1
2314 2346
2315 2347 r.append(l)
2316 2348
2317 2349 return r
2318 2350
2319 2351 def checkpush(self, pushop):
2320 2352 """Extensions can override this function if additional checks have
2321 2353 to be performed before pushing, or call it if they override push
2322 2354 command.
2323 2355 """
2324 2356
2325 2357 @unfilteredpropertycache
2326 2358 def prepushoutgoinghooks(self):
2327 2359 """Return util.hooks consists of a pushop with repo, remote, outgoing
2328 2360 methods, which are called before pushing changesets.
2329 2361 """
2330 2362 return util.hooks()
2331 2363
2332 2364 def pushkey(self, namespace, key, old, new):
2333 2365 try:
2334 2366 tr = self.currenttransaction()
2335 2367 hookargs = {}
2336 2368 if tr is not None:
2337 2369 hookargs.update(tr.hookargs)
2338 2370 hookargs = pycompat.strkwargs(hookargs)
2339 2371 hookargs[r'namespace'] = namespace
2340 2372 hookargs[r'key'] = key
2341 2373 hookargs[r'old'] = old
2342 2374 hookargs[r'new'] = new
2343 2375 self.hook('prepushkey', throw=True, **hookargs)
2344 2376 except error.HookAbort as exc:
2345 2377 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2346 2378 if exc.hint:
2347 2379 self.ui.write_err(_("(%s)\n") % exc.hint)
2348 2380 return False
2349 2381 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2350 2382 ret = pushkey.push(self, namespace, key, old, new)
2351 2383 def runhook():
2352 2384 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2353 2385 ret=ret)
2354 2386 self._afterlock(runhook)
2355 2387 return ret
2356 2388
2357 2389 def listkeys(self, namespace):
2358 2390 self.hook('prelistkeys', throw=True, namespace=namespace)
2359 2391 self.ui.debug('listing keys for "%s"\n' % namespace)
2360 2392 values = pushkey.list(self, namespace)
2361 2393 self.hook('listkeys', namespace=namespace, values=values)
2362 2394 return values
2363 2395
2364 2396 def debugwireargs(self, one, two, three=None, four=None, five=None):
2365 2397 '''used to test argument passing over the wire'''
2366 2398 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2367 2399 pycompat.bytestr(four),
2368 2400 pycompat.bytestr(five))
2369 2401
2370 2402 def savecommitmessage(self, text):
2371 2403 fp = self.vfs('last-message.txt', 'wb')
2372 2404 try:
2373 2405 fp.write(text)
2374 2406 finally:
2375 2407 fp.close()
2376 2408 return self.pathto(fp.name[len(self.root) + 1:])
2377 2409
2378 2410 # used to avoid circular references so destructors work
2379 2411 def aftertrans(files):
2380 2412 renamefiles = [tuple(t) for t in files]
2381 2413 def a():
2382 2414 for vfs, src, dest in renamefiles:
2383 2415 # if src and dest refer to a same file, vfs.rename is a no-op,
2384 2416 # leaving both src and dest on disk. delete dest to make sure
2385 2417 # the rename couldn't be such a no-op.
2386 2418 vfs.tryunlink(dest)
2387 2419 try:
2388 2420 vfs.rename(src, dest)
2389 2421 except OSError: # journal file does not yet exist
2390 2422 pass
2391 2423 return a
2392 2424
2393 2425 def undoname(fn):
2394 2426 base, name = os.path.split(fn)
2395 2427 assert name.startswith('journal')
2396 2428 return os.path.join(base, name.replace('journal', 'undo', 1))
2397 2429
2398 2430 def instance(ui, path, create, intents=None, createopts=None):
2399 2431 localpath = util.urllocalpath(path)
2400 2432 if create:
2401 2433 createrepository(ui, localpath, createopts=createopts)
2402 2434
2403 2435 return makelocalrepository(ui, localpath, intents=intents)
2404 2436
2405 2437 def islocal(path):
2406 2438 return True
2407 2439
2408 2440 def newreporequirements(ui, createopts=None):
2409 2441 """Determine the set of requirements for a new local repository.
2410 2442
2411 2443 Extensions can wrap this function to specify custom requirements for
2412 2444 new repositories.
2413 2445 """
2414 2446 createopts = createopts or {}
2415 2447
2416 2448 requirements = {'revlogv1'}
2417 2449 if ui.configbool('format', 'usestore'):
2418 2450 requirements.add('store')
2419 2451 if ui.configbool('format', 'usefncache'):
2420 2452 requirements.add('fncache')
2421 2453 if ui.configbool('format', 'dotencode'):
2422 2454 requirements.add('dotencode')
2423 2455
2424 2456 compengine = ui.config('experimental', 'format.compression')
2425 2457 if compengine not in util.compengines:
2426 2458 raise error.Abort(_('compression engine %s defined by '
2427 2459 'experimental.format.compression not available') %
2428 2460 compengine,
2429 2461 hint=_('run "hg debuginstall" to list available '
2430 2462 'compression engines'))
2431 2463
2432 2464 # zlib is the historical default and doesn't need an explicit requirement.
2433 2465 if compengine != 'zlib':
2434 2466 requirements.add('exp-compression-%s' % compengine)
2435 2467
2436 2468 if scmutil.gdinitconfig(ui):
2437 2469 requirements.add('generaldelta')
2438 2470 if ui.configbool('experimental', 'treemanifest'):
2439 2471 requirements.add('treemanifest')
2440 2472 # experimental config: format.sparse-revlog
2441 2473 if ui.configbool('format', 'sparse-revlog'):
2442 2474 requirements.add(SPARSEREVLOG_REQUIREMENT)
2443 2475
2444 2476 revlogv2 = ui.config('experimental', 'revlogv2')
2445 2477 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2446 2478 requirements.remove('revlogv1')
2447 2479 # generaldelta is implied by revlogv2.
2448 2480 requirements.discard('generaldelta')
2449 2481 requirements.add(REVLOGV2_REQUIREMENT)
2450 2482 # experimental config: format.internal-phase
2451 2483 if ui.configbool('format', 'internal-phase'):
2452 2484 requirements.add('internal-phase')
2453 2485
2454 2486 if createopts.get('narrowfiles'):
2455 2487 requirements.add(repository.NARROW_REQUIREMENT)
2456 2488
2457 2489 return requirements
2458 2490
2459 2491 def filterknowncreateopts(ui, createopts):
2460 2492 """Filters a dict of repo creation options against options that are known.
2461 2493
2462 2494 Receives a dict of repo creation options and returns a dict of those
2463 2495 options that we don't know how to handle.
2464 2496
2465 2497 This function is called as part of repository creation. If the
2466 2498 returned dict contains any items, repository creation will not
2467 2499 be allowed, as it means there was a request to create a repository
2468 2500 with options not recognized by loaded code.
2469 2501
2470 2502 Extensions can wrap this function to filter out creation options
2471 2503 they know how to handle.
2472 2504 """
2473 2505 known = {'narrowfiles'}
2474 2506
2475 2507 return {k: v for k, v in createopts.items() if k not in known}
2476 2508
2477 2509 def createrepository(ui, path, createopts=None):
2478 2510 """Create a new repository in a vfs.
2479 2511
2480 2512 ``path`` path to the new repo's working directory.
2481 2513 ``createopts`` options for the new repository.
2482 2514 """
2483 2515 createopts = createopts or {}
2484 2516
2485 2517 unknownopts = filterknowncreateopts(ui, createopts)
2486 2518
2487 2519 if not isinstance(unknownopts, dict):
2488 2520 raise error.ProgrammingError('filterknowncreateopts() did not return '
2489 2521 'a dict')
2490 2522
2491 2523 if unknownopts:
2492 2524 raise error.Abort(_('unable to create repository because of unknown '
2493 2525 'creation option: %s') %
2494 2526 ', '.sorted(unknownopts),
2495 2527 hint=_('is a required extension not loaded?'))
2496 2528
2497 2529 requirements = newreporequirements(ui, createopts=createopts)
2498 2530
2499 2531 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2500 2532 if not wdirvfs.exists():
2501 2533 wdirvfs.makedirs()
2502 2534
2503 2535 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2504 2536 if hgvfs.exists():
2505 2537 raise error.RepoError(_('repository %s already exists') % path)
2506 2538
2507 2539 hgvfs.makedir(notindexed=True)
2508 2540
2509 2541 if b'store' in requirements:
2510 2542 hgvfs.mkdir(b'store')
2511 2543
2512 2544 # We create an invalid changelog outside the store so very old
2513 2545 # Mercurial versions (which didn't know about the requirements
2514 2546 # file) encounter an error on reading the changelog. This
2515 2547 # effectively locks out old clients and prevents them from
2516 2548 # mucking with a repo in an unknown format.
2517 2549 #
2518 2550 # The revlog header has version 2, which won't be recognized by
2519 2551 # such old clients.
2520 2552 hgvfs.append(b'00changelog.i',
2521 2553 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2522 2554 b'layout')
2523 2555
2524 2556 scmutil.writerequires(hgvfs, requirements)
2525 2557
2526 2558 def poisonrepository(repo):
2527 2559 """Poison a repository instance so it can no longer be used."""
2528 2560 # Perform any cleanup on the instance.
2529 2561 repo.close()
2530 2562
2531 2563 # Our strategy is to replace the type of the object with one that
2532 2564 # has all attribute lookups result in error.
2533 2565 #
2534 2566 # But we have to allow the close() method because some constructors
2535 2567 # of repos call close() on repo references.
2536 2568 class poisonedrepository(object):
2537 2569 def __getattribute__(self, item):
2538 2570 if item == r'close':
2539 2571 return object.__getattribute__(self, item)
2540 2572
2541 2573 raise error.ProgrammingError('repo instances should not be used '
2542 2574 'after unshare')
2543 2575
2544 2576 def close(self):
2545 2577 pass
2546 2578
2547 2579 # We may have a repoview, which intercepts __setattr__. So be sure
2548 2580 # we operate at the lowest level possible.
2549 2581 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now