##// END OF EJS Templates
localrepo: check for .hg/ directory in makelocalrepository()...
Gregory Szorc -
r39727:2f067e36 default
parent child Browse files
Show More
@@ -1,2589 +1,2591
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 narrowspec,
48 48 obsolete,
49 49 pathutil,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepoutil,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67 from .utils import (
68 68 interfaceutil,
69 69 procutil,
70 70 stringutil,
71 71 )
72 72
73 73 from .revlogutils import (
74 74 constants as revlogconst,
75 75 )
76 76
77 77 release = lockmod.release
78 78 urlerr = util.urlerr
79 79 urlreq = util.urlreq
80 80
81 81 # set of (path, vfs-location) tuples. vfs-location is:
82 82 # - 'plain for vfs relative paths
83 83 # - '' for svfs relative paths
84 84 _cachedfiles = set()
85 85
86 86 class _basefilecache(scmutil.filecache):
87 87 """All filecache usage on repo are done for logic that should be unfiltered
88 88 """
89 89 def __get__(self, repo, type=None):
90 90 if repo is None:
91 91 return self
92 92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 93 def __set__(self, repo, value):
94 94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 95 def __delete__(self, repo):
96 96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97 97
98 98 class repofilecache(_basefilecache):
99 99 """filecache for files in .hg but outside of .hg/store"""
100 100 def __init__(self, *paths):
101 101 super(repofilecache, self).__init__(*paths)
102 102 for path in paths:
103 103 _cachedfiles.add((path, 'plain'))
104 104
105 105 def join(self, obj, fname):
106 106 return obj.vfs.join(fname)
107 107
108 108 class storecache(_basefilecache):
109 109 """filecache for files in the store"""
110 110 def __init__(self, *paths):
111 111 super(storecache, self).__init__(*paths)
112 112 for path in paths:
113 113 _cachedfiles.add((path, ''))
114 114
115 115 def join(self, obj, fname):
116 116 return obj.sjoin(fname)
117 117
118 118 def isfilecached(repo, name):
119 119 """check if a repo has already cached "name" filecache-ed property
120 120
121 121 This returns (cachedobj-or-None, iscached) tuple.
122 122 """
123 123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 124 if not cacheentry:
125 125 return None, False
126 126 return cacheentry.obj, True
127 127
128 128 class unfilteredpropertycache(util.propertycache):
129 129 """propertycache that apply to unfiltered repo only"""
130 130
131 131 def __get__(self, repo, type=None):
132 132 unfi = repo.unfiltered()
133 133 if unfi is repo:
134 134 return super(unfilteredpropertycache, self).__get__(unfi)
135 135 return getattr(unfi, self.name)
136 136
137 137 class filteredpropertycache(util.propertycache):
138 138 """propertycache that must take filtering in account"""
139 139
140 140 def cachevalue(self, obj, value):
141 141 object.__setattr__(obj, self.name, value)
142 142
143 143
144 144 def hasunfilteredcache(repo, name):
145 145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 146 return name in vars(repo.unfiltered())
147 147
148 148 def unfilteredmethod(orig):
149 149 """decorate method that always need to be run on unfiltered version"""
150 150 def wrapper(repo, *args, **kwargs):
151 151 return orig(repo.unfiltered(), *args, **kwargs)
152 152 return wrapper
153 153
154 154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 155 'unbundle'}
156 156 legacycaps = moderncaps.union({'changegroupsubset'})
157 157
158 158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 159 class localcommandexecutor(object):
160 160 def __init__(self, peer):
161 161 self._peer = peer
162 162 self._sent = False
163 163 self._closed = False
164 164
165 165 def __enter__(self):
166 166 return self
167 167
168 168 def __exit__(self, exctype, excvalue, exctb):
169 169 self.close()
170 170
171 171 def callcommand(self, command, args):
172 172 if self._sent:
173 173 raise error.ProgrammingError('callcommand() cannot be used after '
174 174 'sendcommands()')
175 175
176 176 if self._closed:
177 177 raise error.ProgrammingError('callcommand() cannot be used after '
178 178 'close()')
179 179
180 180 # We don't need to support anything fancy. Just call the named
181 181 # method on the peer and return a resolved future.
182 182 fn = getattr(self._peer, pycompat.sysstr(command))
183 183
184 184 f = pycompat.futures.Future()
185 185
186 186 try:
187 187 result = fn(**pycompat.strkwargs(args))
188 188 except Exception:
189 189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 190 else:
191 191 f.set_result(result)
192 192
193 193 return f
194 194
195 195 def sendcommands(self):
196 196 self._sent = True
197 197
198 198 def close(self):
199 199 self._closed = True
200 200
201 201 @interfaceutil.implementer(repository.ipeercommands)
202 202 class localpeer(repository.peer):
203 203 '''peer for a local repo; reflects only the most recent API'''
204 204
205 205 def __init__(self, repo, caps=None):
206 206 super(localpeer, self).__init__()
207 207
208 208 if caps is None:
209 209 caps = moderncaps.copy()
210 210 self._repo = repo.filtered('served')
211 211 self.ui = repo.ui
212 212 self._caps = repo._restrictcapabilities(caps)
213 213
214 214 # Begin of _basepeer interface.
215 215
216 216 def url(self):
217 217 return self._repo.url()
218 218
219 219 def local(self):
220 220 return self._repo
221 221
222 222 def peer(self):
223 223 return self
224 224
225 225 def canpush(self):
226 226 return True
227 227
228 228 def close(self):
229 229 self._repo.close()
230 230
231 231 # End of _basepeer interface.
232 232
233 233 # Begin of _basewirecommands interface.
234 234
235 235 def branchmap(self):
236 236 return self._repo.branchmap()
237 237
238 238 def capabilities(self):
239 239 return self._caps
240 240
241 241 def clonebundles(self):
242 242 return self._repo.tryread('clonebundles.manifest')
243 243
244 244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 245 """Used to test argument passing over the wire"""
246 246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 247 pycompat.bytestr(four),
248 248 pycompat.bytestr(five))
249 249
250 250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 251 **kwargs):
252 252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 253 common=common, bundlecaps=bundlecaps,
254 254 **kwargs)[1]
255 255 cb = util.chunkbuffer(chunks)
256 256
257 257 if exchange.bundle2requested(bundlecaps):
258 258 # When requesting a bundle2, getbundle returns a stream to make the
259 259 # wire level function happier. We need to build a proper object
260 260 # from it in local peer.
261 261 return bundle2.getunbundler(self.ui, cb)
262 262 else:
263 263 return changegroup.getunbundler('01', cb, None)
264 264
265 265 def heads(self):
266 266 return self._repo.heads()
267 267
268 268 def known(self, nodes):
269 269 return self._repo.known(nodes)
270 270
271 271 def listkeys(self, namespace):
272 272 return self._repo.listkeys(namespace)
273 273
274 274 def lookup(self, key):
275 275 return self._repo.lookup(key)
276 276
277 277 def pushkey(self, namespace, key, old, new):
278 278 return self._repo.pushkey(namespace, key, old, new)
279 279
280 280 def stream_out(self):
281 281 raise error.Abort(_('cannot perform stream clone against local '
282 282 'peer'))
283 283
284 284 def unbundle(self, bundle, heads, url):
285 285 """apply a bundle on a repo
286 286
287 287 This function handles the repo locking itself."""
288 288 try:
289 289 try:
290 290 bundle = exchange.readbundle(self.ui, bundle, None)
291 291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 292 if util.safehasattr(ret, 'getchunks'):
293 293 # This is a bundle20 object, turn it into an unbundler.
294 294 # This little dance should be dropped eventually when the
295 295 # API is finally improved.
296 296 stream = util.chunkbuffer(ret.getchunks())
297 297 ret = bundle2.getunbundler(self.ui, stream)
298 298 return ret
299 299 except Exception as exc:
300 300 # If the exception contains output salvaged from a bundle2
301 301 # reply, we need to make sure it is printed before continuing
302 302 # to fail. So we build a bundle2 with such output and consume
303 303 # it directly.
304 304 #
305 305 # This is not very elegant but allows a "simple" solution for
306 306 # issue4594
307 307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 308 if output:
309 309 bundler = bundle2.bundle20(self._repo.ui)
310 310 for out in output:
311 311 bundler.addpart(out)
312 312 stream = util.chunkbuffer(bundler.getchunks())
313 313 b = bundle2.getunbundler(self.ui, stream)
314 314 bundle2.processbundle(self._repo, b)
315 315 raise
316 316 except error.PushRaced as exc:
317 317 raise error.ResponseError(_('push failed:'),
318 318 stringutil.forcebytestr(exc))
319 319
320 320 # End of _basewirecommands interface.
321 321
322 322 # Begin of peer interface.
323 323
324 324 def commandexecutor(self):
325 325 return localcommandexecutor(self)
326 326
327 327 # End of peer interface.
328 328
329 329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 330 class locallegacypeer(localpeer):
331 331 '''peer extension which implements legacy methods too; used for tests with
332 332 restricted capabilities'''
333 333
334 334 def __init__(self, repo):
335 335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336 336
337 337 # Begin of baselegacywirecommands interface.
338 338
339 339 def between(self, pairs):
340 340 return self._repo.between(pairs)
341 341
342 342 def branches(self, nodes):
343 343 return self._repo.branches(nodes)
344 344
345 345 def changegroup(self, nodes, source):
346 346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 347 missingheads=self._repo.heads())
348 348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 349
350 350 def changegroupsubset(self, bases, heads, source):
351 351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 352 missingheads=heads)
353 353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354 354
355 355 # End of baselegacywirecommands interface.
356 356
357 357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 358 # clients.
359 359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360 360
361 361 # A repository with the sparserevlog feature will have delta chains that
362 362 # can spread over a larger span. Sparse reading cuts these large spans into
363 363 # pieces, so that each piece isn't too big.
364 364 # Without the sparserevlog capability, reading from the repository could use
365 365 # huge amounts of memory, because the whole span would be read at once,
366 366 # including all the intermediate revisions that aren't pertinent for the chain.
367 367 # This is why once a repository has enabled sparse-read, it becomes required.
368 368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369 369
370 370 # Functions receiving (ui, features) that extensions can register to impact
371 371 # the ability to load repositories with custom requirements. Only
372 372 # functions defined in loaded extensions are called.
373 373 #
374 374 # The function receives a set of requirement strings that the repository
375 375 # is capable of opening. Functions will typically add elements to the
376 376 # set to reflect that the extension knows how to handle that requirements.
377 377 featuresetupfuncs = set()
378 378
379 379 def makelocalrepository(baseui, path, intents=None):
380 380 """Create a local repository object.
381 381
382 382 Given arguments needed to construct a local repository, this function
383 383 derives a type suitable for representing that repository and returns an
384 384 instance of it.
385 385
386 386 The returned object conforms to the ``repository.completelocalrepository``
387 387 interface.
388 388 """
389 389 ui = baseui.copy()
390 390 # Prevent copying repo configuration.
391 391 ui.copy = baseui.copy
392 392
393 393 # Working directory VFS rooted at repository root.
394 394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
395 395
396 396 # Main VFS for .hg/ directory.
397 397 hgpath = wdirvfs.join(b'.hg')
398 398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
399 399
400 # The .hg/ path should exist and should be a directory. All other
401 # cases are errors.
402 if not hgvfs.isdir():
403 try:
404 hgvfs.stat()
405 except OSError as e:
406 if e.errno != errno.ENOENT:
407 raise
408
409 raise error.RepoError(_(b'repository %s not found') % path)
410
400 411 # The .hg/hgrc file may load extensions or contain config options
401 412 # that influence repository construction. Attempt to load it and
402 413 # process any new extensions that it may have pulled in.
403 414 try:
404 415 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
405 416 except IOError:
406 417 pass
407 418 else:
408 419 extensions.loadall(ui)
409 420
410 421 return localrepository(
411 422 baseui=baseui,
412 423 ui=ui,
413 424 origroot=path,
414 425 wdirvfs=wdirvfs,
415 426 hgvfs=hgvfs,
416 427 intents=intents)
417 428
418 429 @interfaceutil.implementer(repository.completelocalrepository)
419 430 class localrepository(object):
420 431
421 432 # obsolete experimental requirements:
422 433 # - manifestv2: An experimental new manifest format that allowed
423 434 # for stem compression of long paths. Experiment ended up not
424 435 # being successful (repository sizes went up due to worse delta
425 436 # chains), and the code was deleted in 4.6.
426 437 supportedformats = {
427 438 'revlogv1',
428 439 'generaldelta',
429 440 'treemanifest',
430 441 REVLOGV2_REQUIREMENT,
431 442 SPARSEREVLOG_REQUIREMENT,
432 443 }
433 444 _basesupported = supportedformats | {
434 445 'store',
435 446 'fncache',
436 447 'shared',
437 448 'relshared',
438 449 'dotencode',
439 450 'exp-sparse',
440 451 'internal-phase'
441 452 }
442 453 openerreqs = {
443 454 'revlogv1',
444 455 'generaldelta',
445 456 'treemanifest',
446 457 }
447 458
448 459 # list of prefix for file which can be written without 'wlock'
449 460 # Extensions should extend this list when needed
450 461 _wlockfreeprefix = {
451 462 # We migh consider requiring 'wlock' for the next
452 463 # two, but pretty much all the existing code assume
453 464 # wlock is not needed so we keep them excluded for
454 465 # now.
455 466 'hgrc',
456 467 'requires',
457 468 # XXX cache is a complicatged business someone
458 469 # should investigate this in depth at some point
459 470 'cache/',
460 471 # XXX shouldn't be dirstate covered by the wlock?
461 472 'dirstate',
462 473 # XXX bisect was still a bit too messy at the time
463 474 # this changeset was introduced. Someone should fix
464 475 # the remainig bit and drop this line
465 476 'bisect.state',
466 477 }
467 478
468 479 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, intents=None):
469 480 """Create a new local repository instance.
470 481
471 482 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
472 483 or ``localrepo.makelocalrepository()`` for obtaining a new repository
473 484 object.
474 485
475 486 Arguments:
476 487
477 488 baseui
478 489 ``ui.ui`` instance that ``ui`` argument was based off of.
479 490
480 491 ui
481 492 ``ui.ui`` instance for use by the repository.
482 493
483 494 origroot
484 495 ``bytes`` path to working directory root of this repository.
485 496
486 497 wdirvfs
487 498 ``vfs.vfs`` rooted at the working directory.
488 499
489 500 hgvfs
490 501 ``vfs.vfs`` rooted at .hg/
491 502
492 503 intents
493 504 ``set`` of system strings indicating what this repo will be used
494 505 for.
495 506 """
496 507 self.baseui = baseui
497 508 self.ui = ui
498 509 self.origroot = origroot
499 510 # vfs rooted at working directory.
500 511 self.wvfs = wdirvfs
501 512 self.root = wdirvfs.base
502 513 # vfs rooted at .hg/. Used to access most non-store paths.
503 514 self.vfs = hgvfs
504 515 self.path = hgvfs.base
505 516
506 self.requirements = set()
507 517 self.filtername = None
508 518 # svfs: usually rooted at .hg/store, used to access repository history
509 519 # If this is a shared repository, this vfs may point to another
510 520 # repository's .hg/store directory.
511 521 self.svfs = None
512 522
513 523 if (self.ui.configbool('devel', 'all-warnings') or
514 524 self.ui.configbool('devel', 'check-locks')):
515 525 self.vfs.audit = self._getvfsward(self.vfs.audit)
516 526 # A list of callback to shape the phase if no data were found.
517 527 # Callback are in the form: func(repo, roots) --> processed root.
518 528 # This list it to be filled by extension during repo setup
519 529 self._phasedefaults = []
520 530
521 531 if featuresetupfuncs:
522 532 self.supported = set(self._basesupported) # use private copy
523 533 extmods = set(m.__name__ for n, m
524 534 in extensions.extensions(self.ui))
525 535 for setupfunc in featuresetupfuncs:
526 536 if setupfunc.__module__ in extmods:
527 537 setupfunc(self.ui, self.supported)
528 538 else:
529 539 self.supported = self._basesupported
530 540 color.setup(self.ui)
531 541
532 542 # Add compression engines.
533 543 for name in util.compengines:
534 544 engine = util.compengines[name]
535 545 if engine.revlogheader():
536 546 self.supported.add('exp-compression-%s' % name)
537 547
538 if not self.vfs.isdir():
539 548 try:
540 self.vfs.stat()
541 except OSError as inst:
542 if inst.errno != errno.ENOENT:
543 raise
544 raise error.RepoError(_("repository %s not found") % origroot)
545 else:
546 try:
547 self.requirements = scmutil.readrequires(
548 self.vfs, self.supported)
549 self.requirements = scmutil.readrequires(self.vfs, self.supported)
549 550 except IOError as inst:
550 551 if inst.errno != errno.ENOENT:
551 552 raise
553 self.requirements = set()
552 554
553 555 cachepath = self.vfs.join('cache')
554 556 self.sharedpath = self.path
555 557 try:
556 558 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
557 559 if 'relshared' in self.requirements:
558 560 sharedpath = self.vfs.join(sharedpath)
559 561 vfs = vfsmod.vfs(sharedpath, realpath=True)
560 562 cachepath = vfs.join('cache')
561 563 s = vfs.base
562 564 if not vfs.exists():
563 565 raise error.RepoError(
564 566 _('.hg/sharedpath points to nonexistent directory %s') % s)
565 567 self.sharedpath = s
566 568 except IOError as inst:
567 569 if inst.errno != errno.ENOENT:
568 570 raise
569 571
570 572 if 'exp-sparse' in self.requirements and not sparse.enabled:
571 573 raise error.RepoError(_('repository is using sparse feature but '
572 574 'sparse is not enabled; enable the '
573 575 '"sparse" extensions to access'))
574 576
575 577 self.store = store.store(
576 578 self.requirements, self.sharedpath,
577 579 lambda base: vfsmod.vfs(base, cacheaudited=True))
578 580 self.spath = self.store.path
579 581 self.svfs = self.store.vfs
580 582 self.sjoin = self.store.join
581 583 self.vfs.createmode = self.store.createmode
582 584 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
583 585 self.cachevfs.createmode = self.store.createmode
584 586 if (self.ui.configbool('devel', 'all-warnings') or
585 587 self.ui.configbool('devel', 'check-locks')):
586 588 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
587 589 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
588 590 else: # standard vfs
589 591 self.svfs.audit = self._getsvfsward(self.svfs.audit)
590 592 self._applyopenerreqs()
591 593
592 594 self._dirstatevalidatewarned = False
593 595
594 596 self._branchcaches = {}
595 597 self._revbranchcache = None
596 598 self._filterpats = {}
597 599 self._datafilters = {}
598 600 self._transref = self._lockref = self._wlockref = None
599 601
600 602 # A cache for various files under .hg/ that tracks file changes,
601 603 # (used by the filecache decorator)
602 604 #
603 605 # Maps a property name to its util.filecacheentry
604 606 self._filecache = {}
605 607
606 608 # hold sets of revision to be filtered
607 609 # should be cleared when something might have changed the filter value:
608 610 # - new changesets,
609 611 # - phase change,
610 612 # - new obsolescence marker,
611 613 # - working directory parent change,
612 614 # - bookmark changes
613 615 self.filteredrevcache = {}
614 616
615 617 # post-dirstate-status hooks
616 618 self._postdsstatus = []
617 619
618 620 # generic mapping between names and nodes
619 621 self.names = namespaces.namespaces()
620 622
621 623 # Key to signature value.
622 624 self._sparsesignaturecache = {}
623 625 # Signature to cached matcher instance.
624 626 self._sparsematchercache = {}
625 627
626 628 def _getvfsward(self, origfunc):
627 629 """build a ward for self.vfs"""
628 630 rref = weakref.ref(self)
629 631 def checkvfs(path, mode=None):
630 632 ret = origfunc(path, mode=mode)
631 633 repo = rref()
632 634 if (repo is None
633 635 or not util.safehasattr(repo, '_wlockref')
634 636 or not util.safehasattr(repo, '_lockref')):
635 637 return
636 638 if mode in (None, 'r', 'rb'):
637 639 return
638 640 if path.startswith(repo.path):
639 641 # truncate name relative to the repository (.hg)
640 642 path = path[len(repo.path) + 1:]
641 643 if path.startswith('cache/'):
642 644 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
643 645 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
644 646 if path.startswith('journal.'):
645 647 # journal is covered by 'lock'
646 648 if repo._currentlock(repo._lockref) is None:
647 649 repo.ui.develwarn('write with no lock: "%s"' % path,
648 650 stacklevel=2, config='check-locks')
649 651 elif repo._currentlock(repo._wlockref) is None:
650 652 # rest of vfs files are covered by 'wlock'
651 653 #
652 654 # exclude special files
653 655 for prefix in self._wlockfreeprefix:
654 656 if path.startswith(prefix):
655 657 return
656 658 repo.ui.develwarn('write with no wlock: "%s"' % path,
657 659 stacklevel=2, config='check-locks')
658 660 return ret
659 661 return checkvfs
660 662
661 663 def _getsvfsward(self, origfunc):
662 664 """build a ward for self.svfs"""
663 665 rref = weakref.ref(self)
664 666 def checksvfs(path, mode=None):
665 667 ret = origfunc(path, mode=mode)
666 668 repo = rref()
667 669 if repo is None or not util.safehasattr(repo, '_lockref'):
668 670 return
669 671 if mode in (None, 'r', 'rb'):
670 672 return
671 673 if path.startswith(repo.sharedpath):
672 674 # truncate name relative to the repository (.hg)
673 675 path = path[len(repo.sharedpath) + 1:]
674 676 if repo._currentlock(repo._lockref) is None:
675 677 repo.ui.develwarn('write with no lock: "%s"' % path,
676 678 stacklevel=3)
677 679 return ret
678 680 return checksvfs
679 681
680 682 def close(self):
681 683 self._writecaches()
682 684
683 685 def _writecaches(self):
684 686 if self._revbranchcache:
685 687 self._revbranchcache.write()
686 688
687 689 def _restrictcapabilities(self, caps):
688 690 if self.ui.configbool('experimental', 'bundle2-advertise'):
689 691 caps = set(caps)
690 692 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
691 693 role='client'))
692 694 caps.add('bundle2=' + urlreq.quote(capsblob))
693 695 return caps
694 696
695 697 def _applyopenerreqs(self):
696 698 self.svfs.options = dict((r, 1) for r in self.requirements
697 699 if r in self.openerreqs)
698 700 # experimental config: format.chunkcachesize
699 701 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
700 702 if chunkcachesize is not None:
701 703 self.svfs.options['chunkcachesize'] = chunkcachesize
702 704 # experimental config: format.manifestcachesize
703 705 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
704 706 if manifestcachesize is not None:
705 707 self.svfs.options['manifestcachesize'] = manifestcachesize
706 708 deltabothparents = self.ui.configbool('storage',
707 709 'revlog.optimize-delta-parent-choice')
708 710 self.svfs.options['deltabothparents'] = deltabothparents
709 711 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
710 712 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
711 713 if 0 <= chainspan:
712 714 self.svfs.options['maxdeltachainspan'] = chainspan
713 715 mmapindexthreshold = self.ui.configbytes('experimental',
714 716 'mmapindexthreshold')
715 717 if mmapindexthreshold is not None:
716 718 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
717 719 withsparseread = self.ui.configbool('experimental', 'sparse-read')
718 720 srdensitythres = float(self.ui.config('experimental',
719 721 'sparse-read.density-threshold'))
720 722 srmingapsize = self.ui.configbytes('experimental',
721 723 'sparse-read.min-gap-size')
722 724 self.svfs.options['with-sparse-read'] = withsparseread
723 725 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
724 726 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
725 727 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
726 728 self.svfs.options['sparse-revlog'] = sparserevlog
727 729 if sparserevlog:
728 730 self.svfs.options['generaldelta'] = True
729 731 maxchainlen = None
730 732 if sparserevlog:
731 733 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
732 734 # experimental config: format.maxchainlen
733 735 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
734 736 if maxchainlen is not None:
735 737 self.svfs.options['maxchainlen'] = maxchainlen
736 738
737 739 for r in self.requirements:
738 740 if r.startswith('exp-compression-'):
739 741 self.svfs.options['compengine'] = r[len('exp-compression-'):]
740 742
741 743 # TODO move "revlogv2" to openerreqs once finalized.
742 744 if REVLOGV2_REQUIREMENT in self.requirements:
743 745 self.svfs.options['revlogv2'] = True
744 746
745 747 def _writerequirements(self):
746 748 scmutil.writerequires(self.vfs, self.requirements)
747 749
748 750 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
749 751 # self -> auditor -> self._checknested -> self
750 752
751 753 @property
752 754 def auditor(self):
753 755 # This is only used by context.workingctx.match in order to
754 756 # detect files in subrepos.
755 757 return pathutil.pathauditor(self.root, callback=self._checknested)
756 758
757 759 @property
758 760 def nofsauditor(self):
759 761 # This is only used by context.basectx.match in order to detect
760 762 # files in subrepos.
761 763 return pathutil.pathauditor(self.root, callback=self._checknested,
762 764 realfs=False, cached=True)
763 765
764 766 def _checknested(self, path):
765 767 """Determine if path is a legal nested repository."""
766 768 if not path.startswith(self.root):
767 769 return False
768 770 subpath = path[len(self.root) + 1:]
769 771 normsubpath = util.pconvert(subpath)
770 772
771 773 # XXX: Checking against the current working copy is wrong in
772 774 # the sense that it can reject things like
773 775 #
774 776 # $ hg cat -r 10 sub/x.txt
775 777 #
776 778 # if sub/ is no longer a subrepository in the working copy
777 779 # parent revision.
778 780 #
779 781 # However, it can of course also allow things that would have
780 782 # been rejected before, such as the above cat command if sub/
781 783 # is a subrepository now, but was a normal directory before.
782 784 # The old path auditor would have rejected by mistake since it
783 785 # panics when it sees sub/.hg/.
784 786 #
785 787 # All in all, checking against the working copy seems sensible
786 788 # since we want to prevent access to nested repositories on
787 789 # the filesystem *now*.
788 790 ctx = self[None]
789 791 parts = util.splitpath(subpath)
790 792 while parts:
791 793 prefix = '/'.join(parts)
792 794 if prefix in ctx.substate:
793 795 if prefix == normsubpath:
794 796 return True
795 797 else:
796 798 sub = ctx.sub(prefix)
797 799 return sub.checknested(subpath[len(prefix) + 1:])
798 800 else:
799 801 parts.pop()
800 802 return False
801 803
802 804 def peer(self):
803 805 return localpeer(self) # not cached to avoid reference cycle
804 806
805 807 def unfiltered(self):
806 808 """Return unfiltered version of the repository
807 809
808 810 Intended to be overwritten by filtered repo."""
809 811 return self
810 812
811 813 def filtered(self, name, visibilityexceptions=None):
812 814 """Return a filtered version of a repository"""
813 815 cls = repoview.newtype(self.unfiltered().__class__)
814 816 return cls(self, name, visibilityexceptions)
815 817
816 818 @repofilecache('bookmarks', 'bookmarks.current')
817 819 def _bookmarks(self):
818 820 return bookmarks.bmstore(self)
819 821
820 822 @property
821 823 def _activebookmark(self):
822 824 return self._bookmarks.active
823 825
824 826 # _phasesets depend on changelog. what we need is to call
825 827 # _phasecache.invalidate() if '00changelog.i' was changed, but it
826 828 # can't be easily expressed in filecache mechanism.
827 829 @storecache('phaseroots', '00changelog.i')
828 830 def _phasecache(self):
829 831 return phases.phasecache(self, self._phasedefaults)
830 832
831 833 @storecache('obsstore')
832 834 def obsstore(self):
833 835 return obsolete.makestore(self.ui, self)
834 836
835 837 @storecache('00changelog.i')
836 838 def changelog(self):
837 839 return changelog.changelog(self.svfs,
838 840 trypending=txnutil.mayhavepending(self.root))
839 841
840 842 def _constructmanifest(self):
841 843 # This is a temporary function while we migrate from manifest to
842 844 # manifestlog. It allows bundlerepo and unionrepo to intercept the
843 845 # manifest creation.
844 846 return manifest.manifestrevlog(self.svfs)
845 847
846 848 @storecache('00manifest.i')
847 849 def manifestlog(self):
848 850 return manifest.manifestlog(self.svfs, self)
849 851
850 852 @repofilecache('dirstate')
851 853 def dirstate(self):
852 854 return self._makedirstate()
853 855
854 856 def _makedirstate(self):
855 857 """Extension point for wrapping the dirstate per-repo."""
856 858 sparsematchfn = lambda: sparse.matcher(self)
857 859
858 860 return dirstate.dirstate(self.vfs, self.ui, self.root,
859 861 self._dirstatevalidate, sparsematchfn)
860 862
861 863 def _dirstatevalidate(self, node):
862 864 try:
863 865 self.changelog.rev(node)
864 866 return node
865 867 except error.LookupError:
866 868 if not self._dirstatevalidatewarned:
867 869 self._dirstatevalidatewarned = True
868 870 self.ui.warn(_("warning: ignoring unknown"
869 871 " working parent %s!\n") % short(node))
870 872 return nullid
871 873
872 874 @storecache(narrowspec.FILENAME)
873 875 def narrowpats(self):
874 876 """matcher patterns for this repository's narrowspec
875 877
876 878 A tuple of (includes, excludes).
877 879 """
878 880 source = self
879 881 if self.shared():
880 882 from . import hg
881 883 source = hg.sharedreposource(self)
882 884 return narrowspec.load(source)
883 885
884 886 @storecache(narrowspec.FILENAME)
885 887 def _narrowmatch(self):
886 888 if repository.NARROW_REQUIREMENT not in self.requirements:
887 889 return matchmod.always(self.root, '')
888 890 include, exclude = self.narrowpats
889 891 return narrowspec.match(self.root, include=include, exclude=exclude)
890 892
891 893 # TODO(martinvonz): make this property-like instead?
892 894 def narrowmatch(self):
893 895 return self._narrowmatch
894 896
895 897 def setnarrowpats(self, newincludes, newexcludes):
896 898 narrowspec.save(self, newincludes, newexcludes)
897 899 self.invalidate(clearfilecache=True)
898 900
899 901 def __getitem__(self, changeid):
900 902 if changeid is None:
901 903 return context.workingctx(self)
902 904 if isinstance(changeid, context.basectx):
903 905 return changeid
904 906 if isinstance(changeid, slice):
905 907 # wdirrev isn't contiguous so the slice shouldn't include it
906 908 return [context.changectx(self, i)
907 909 for i in pycompat.xrange(*changeid.indices(len(self)))
908 910 if i not in self.changelog.filteredrevs]
909 911 try:
910 912 return context.changectx(self, changeid)
911 913 except error.WdirUnsupported:
912 914 return context.workingctx(self)
913 915
914 916 def __contains__(self, changeid):
915 917 """True if the given changeid exists
916 918
917 919 error.AmbiguousPrefixLookupError is raised if an ambiguous node
918 920 specified.
919 921 """
920 922 try:
921 923 self[changeid]
922 924 return True
923 925 except error.RepoLookupError:
924 926 return False
925 927
926 928 def __nonzero__(self):
927 929 return True
928 930
929 931 __bool__ = __nonzero__
930 932
931 933 def __len__(self):
932 934 # no need to pay the cost of repoview.changelog
933 935 unfi = self.unfiltered()
934 936 return len(unfi.changelog)
935 937
936 938 def __iter__(self):
937 939 return iter(self.changelog)
938 940
939 941 def revs(self, expr, *args):
940 942 '''Find revisions matching a revset.
941 943
942 944 The revset is specified as a string ``expr`` that may contain
943 945 %-formatting to escape certain types. See ``revsetlang.formatspec``.
944 946
945 947 Revset aliases from the configuration are not expanded. To expand
946 948 user aliases, consider calling ``scmutil.revrange()`` or
947 949 ``repo.anyrevs([expr], user=True)``.
948 950
949 951 Returns a revset.abstractsmartset, which is a list-like interface
950 952 that contains integer revisions.
951 953 '''
952 954 expr = revsetlang.formatspec(expr, *args)
953 955 m = revset.match(None, expr)
954 956 return m(self)
955 957
956 958 def set(self, expr, *args):
957 959 '''Find revisions matching a revset and emit changectx instances.
958 960
959 961 This is a convenience wrapper around ``revs()`` that iterates the
960 962 result and is a generator of changectx instances.
961 963
962 964 Revset aliases from the configuration are not expanded. To expand
963 965 user aliases, consider calling ``scmutil.revrange()``.
964 966 '''
965 967 for r in self.revs(expr, *args):
966 968 yield self[r]
967 969
968 970 def anyrevs(self, specs, user=False, localalias=None):
969 971 '''Find revisions matching one of the given revsets.
970 972
971 973 Revset aliases from the configuration are not expanded by default. To
972 974 expand user aliases, specify ``user=True``. To provide some local
973 975 definitions overriding user aliases, set ``localalias`` to
974 976 ``{name: definitionstring}``.
975 977 '''
976 978 if user:
977 979 m = revset.matchany(self.ui, specs,
978 980 lookup=revset.lookupfn(self),
979 981 localalias=localalias)
980 982 else:
981 983 m = revset.matchany(None, specs, localalias=localalias)
982 984 return m(self)
983 985
984 986 def url(self):
985 987 return 'file:' + self.root
986 988
987 989 def hook(self, name, throw=False, **args):
988 990 """Call a hook, passing this repo instance.
989 991
990 992 This a convenience method to aid invoking hooks. Extensions likely
991 993 won't call this unless they have registered a custom hook or are
992 994 replacing code that is expected to call a hook.
993 995 """
994 996 return hook.hook(self.ui, self, name, throw, **args)
995 997
996 998 @filteredpropertycache
997 999 def _tagscache(self):
998 1000 '''Returns a tagscache object that contains various tags related
999 1001 caches.'''
1000 1002
1001 1003 # This simplifies its cache management by having one decorated
1002 1004 # function (this one) and the rest simply fetch things from it.
1003 1005 class tagscache(object):
1004 1006 def __init__(self):
1005 1007 # These two define the set of tags for this repository. tags
1006 1008 # maps tag name to node; tagtypes maps tag name to 'global' or
1007 1009 # 'local'. (Global tags are defined by .hgtags across all
1008 1010 # heads, and local tags are defined in .hg/localtags.)
1009 1011 # They constitute the in-memory cache of tags.
1010 1012 self.tags = self.tagtypes = None
1011 1013
1012 1014 self.nodetagscache = self.tagslist = None
1013 1015
1014 1016 cache = tagscache()
1015 1017 cache.tags, cache.tagtypes = self._findtags()
1016 1018
1017 1019 return cache
1018 1020
1019 1021 def tags(self):
1020 1022 '''return a mapping of tag to node'''
1021 1023 t = {}
1022 1024 if self.changelog.filteredrevs:
1023 1025 tags, tt = self._findtags()
1024 1026 else:
1025 1027 tags = self._tagscache.tags
1026 1028 for k, v in tags.iteritems():
1027 1029 try:
1028 1030 # ignore tags to unknown nodes
1029 1031 self.changelog.rev(v)
1030 1032 t[k] = v
1031 1033 except (error.LookupError, ValueError):
1032 1034 pass
1033 1035 return t
1034 1036
1035 1037 def _findtags(self):
1036 1038 '''Do the hard work of finding tags. Return a pair of dicts
1037 1039 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1038 1040 maps tag name to a string like \'global\' or \'local\'.
1039 1041 Subclasses or extensions are free to add their own tags, but
1040 1042 should be aware that the returned dicts will be retained for the
1041 1043 duration of the localrepo object.'''
1042 1044
1043 1045 # XXX what tagtype should subclasses/extensions use? Currently
1044 1046 # mq and bookmarks add tags, but do not set the tagtype at all.
1045 1047 # Should each extension invent its own tag type? Should there
1046 1048 # be one tagtype for all such "virtual" tags? Or is the status
1047 1049 # quo fine?
1048 1050
1049 1051
1050 1052 # map tag name to (node, hist)
1051 1053 alltags = tagsmod.findglobaltags(self.ui, self)
1052 1054 # map tag name to tag type
1053 1055 tagtypes = dict((tag, 'global') for tag in alltags)
1054 1056
1055 1057 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1056 1058
1057 1059 # Build the return dicts. Have to re-encode tag names because
1058 1060 # the tags module always uses UTF-8 (in order not to lose info
1059 1061 # writing to the cache), but the rest of Mercurial wants them in
1060 1062 # local encoding.
1061 1063 tags = {}
1062 1064 for (name, (node, hist)) in alltags.iteritems():
1063 1065 if node != nullid:
1064 1066 tags[encoding.tolocal(name)] = node
1065 1067 tags['tip'] = self.changelog.tip()
1066 1068 tagtypes = dict([(encoding.tolocal(name), value)
1067 1069 for (name, value) in tagtypes.iteritems()])
1068 1070 return (tags, tagtypes)
1069 1071
1070 1072 def tagtype(self, tagname):
1071 1073 '''
1072 1074 return the type of the given tag. result can be:
1073 1075
1074 1076 'local' : a local tag
1075 1077 'global' : a global tag
1076 1078 None : tag does not exist
1077 1079 '''
1078 1080
1079 1081 return self._tagscache.tagtypes.get(tagname)
1080 1082
1081 1083 def tagslist(self):
1082 1084 '''return a list of tags ordered by revision'''
1083 1085 if not self._tagscache.tagslist:
1084 1086 l = []
1085 1087 for t, n in self.tags().iteritems():
1086 1088 l.append((self.changelog.rev(n), t, n))
1087 1089 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1088 1090
1089 1091 return self._tagscache.tagslist
1090 1092
1091 1093 def nodetags(self, node):
1092 1094 '''return the tags associated with a node'''
1093 1095 if not self._tagscache.nodetagscache:
1094 1096 nodetagscache = {}
1095 1097 for t, n in self._tagscache.tags.iteritems():
1096 1098 nodetagscache.setdefault(n, []).append(t)
1097 1099 for tags in nodetagscache.itervalues():
1098 1100 tags.sort()
1099 1101 self._tagscache.nodetagscache = nodetagscache
1100 1102 return self._tagscache.nodetagscache.get(node, [])
1101 1103
1102 1104 def nodebookmarks(self, node):
1103 1105 """return the list of bookmarks pointing to the specified node"""
1104 1106 return self._bookmarks.names(node)
1105 1107
1106 1108 def branchmap(self):
1107 1109 '''returns a dictionary {branch: [branchheads]} with branchheads
1108 1110 ordered by increasing revision number'''
1109 1111 branchmap.updatecache(self)
1110 1112 return self._branchcaches[self.filtername]
1111 1113
1112 1114 @unfilteredmethod
1113 1115 def revbranchcache(self):
1114 1116 if not self._revbranchcache:
1115 1117 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1116 1118 return self._revbranchcache
1117 1119
1118 1120 def branchtip(self, branch, ignoremissing=False):
1119 1121 '''return the tip node for a given branch
1120 1122
1121 1123 If ignoremissing is True, then this method will not raise an error.
1122 1124 This is helpful for callers that only expect None for a missing branch
1123 1125 (e.g. namespace).
1124 1126
1125 1127 '''
1126 1128 try:
1127 1129 return self.branchmap().branchtip(branch)
1128 1130 except KeyError:
1129 1131 if not ignoremissing:
1130 1132 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1131 1133 else:
1132 1134 pass
1133 1135
1134 1136 def lookup(self, key):
1135 1137 return scmutil.revsymbol(self, key).node()
1136 1138
1137 1139 def lookupbranch(self, key):
1138 1140 if key in self.branchmap():
1139 1141 return key
1140 1142
1141 1143 return scmutil.revsymbol(self, key).branch()
1142 1144
1143 1145 def known(self, nodes):
1144 1146 cl = self.changelog
1145 1147 nm = cl.nodemap
1146 1148 filtered = cl.filteredrevs
1147 1149 result = []
1148 1150 for n in nodes:
1149 1151 r = nm.get(n)
1150 1152 resp = not (r is None or r in filtered)
1151 1153 result.append(resp)
1152 1154 return result
1153 1155
1154 1156 def local(self):
1155 1157 return self
1156 1158
1157 1159 def publishing(self):
1158 1160 # it's safe (and desirable) to trust the publish flag unconditionally
1159 1161 # so that we don't finalize changes shared between users via ssh or nfs
1160 1162 return self.ui.configbool('phases', 'publish', untrusted=True)
1161 1163
1162 1164 def cancopy(self):
1163 1165 # so statichttprepo's override of local() works
1164 1166 if not self.local():
1165 1167 return False
1166 1168 if not self.publishing():
1167 1169 return True
1168 1170 # if publishing we can't copy if there is filtered content
1169 1171 return not self.filtered('visible').changelog.filteredrevs
1170 1172
1171 1173 def shared(self):
1172 1174 '''the type of shared repository (None if not shared)'''
1173 1175 if self.sharedpath != self.path:
1174 1176 return 'store'
1175 1177 return None
1176 1178
1177 1179 def wjoin(self, f, *insidef):
1178 1180 return self.vfs.reljoin(self.root, f, *insidef)
1179 1181
1180 1182 def file(self, f):
1181 1183 if f[0] == '/':
1182 1184 f = f[1:]
1183 1185 return filelog.filelog(self.svfs, f)
1184 1186
1185 1187 def setparents(self, p1, p2=nullid):
1186 1188 with self.dirstate.parentchange():
1187 1189 copies = self.dirstate.setparents(p1, p2)
1188 1190 pctx = self[p1]
1189 1191 if copies:
1190 1192 # Adjust copy records, the dirstate cannot do it, it
1191 1193 # requires access to parents manifests. Preserve them
1192 1194 # only for entries added to first parent.
1193 1195 for f in copies:
1194 1196 if f not in pctx and copies[f] in pctx:
1195 1197 self.dirstate.copy(copies[f], f)
1196 1198 if p2 == nullid:
1197 1199 for f, s in sorted(self.dirstate.copies().items()):
1198 1200 if f not in pctx and s not in pctx:
1199 1201 self.dirstate.copy(None, f)
1200 1202
1201 1203 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1202 1204 """changeid can be a changeset revision, node, or tag.
1203 1205 fileid can be a file revision or node."""
1204 1206 return context.filectx(self, path, changeid, fileid,
1205 1207 changectx=changectx)
1206 1208
1207 1209 def getcwd(self):
1208 1210 return self.dirstate.getcwd()
1209 1211
1210 1212 def pathto(self, f, cwd=None):
1211 1213 return self.dirstate.pathto(f, cwd)
1212 1214
1213 1215 def _loadfilter(self, filter):
1214 1216 if filter not in self._filterpats:
1215 1217 l = []
1216 1218 for pat, cmd in self.ui.configitems(filter):
1217 1219 if cmd == '!':
1218 1220 continue
1219 1221 mf = matchmod.match(self.root, '', [pat])
1220 1222 fn = None
1221 1223 params = cmd
1222 1224 for name, filterfn in self._datafilters.iteritems():
1223 1225 if cmd.startswith(name):
1224 1226 fn = filterfn
1225 1227 params = cmd[len(name):].lstrip()
1226 1228 break
1227 1229 if not fn:
1228 1230 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1229 1231 # Wrap old filters not supporting keyword arguments
1230 1232 if not pycompat.getargspec(fn)[2]:
1231 1233 oldfn = fn
1232 1234 fn = lambda s, c, **kwargs: oldfn(s, c)
1233 1235 l.append((mf, fn, params))
1234 1236 self._filterpats[filter] = l
1235 1237 return self._filterpats[filter]
1236 1238
1237 1239 def _filter(self, filterpats, filename, data):
1238 1240 for mf, fn, cmd in filterpats:
1239 1241 if mf(filename):
1240 1242 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1241 1243 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1242 1244 break
1243 1245
1244 1246 return data
1245 1247
1246 1248 @unfilteredpropertycache
1247 1249 def _encodefilterpats(self):
1248 1250 return self._loadfilter('encode')
1249 1251
1250 1252 @unfilteredpropertycache
1251 1253 def _decodefilterpats(self):
1252 1254 return self._loadfilter('decode')
1253 1255
1254 1256 def adddatafilter(self, name, filter):
1255 1257 self._datafilters[name] = filter
1256 1258
1257 1259 def wread(self, filename):
1258 1260 if self.wvfs.islink(filename):
1259 1261 data = self.wvfs.readlink(filename)
1260 1262 else:
1261 1263 data = self.wvfs.read(filename)
1262 1264 return self._filter(self._encodefilterpats, filename, data)
1263 1265
1264 1266 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1265 1267 """write ``data`` into ``filename`` in the working directory
1266 1268
1267 1269 This returns length of written (maybe decoded) data.
1268 1270 """
1269 1271 data = self._filter(self._decodefilterpats, filename, data)
1270 1272 if 'l' in flags:
1271 1273 self.wvfs.symlink(data, filename)
1272 1274 else:
1273 1275 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1274 1276 **kwargs)
1275 1277 if 'x' in flags:
1276 1278 self.wvfs.setflags(filename, False, True)
1277 1279 else:
1278 1280 self.wvfs.setflags(filename, False, False)
1279 1281 return len(data)
1280 1282
1281 1283 def wwritedata(self, filename, data):
1282 1284 return self._filter(self._decodefilterpats, filename, data)
1283 1285
1284 1286 def currenttransaction(self):
1285 1287 """return the current transaction or None if non exists"""
1286 1288 if self._transref:
1287 1289 tr = self._transref()
1288 1290 else:
1289 1291 tr = None
1290 1292
1291 1293 if tr and tr.running():
1292 1294 return tr
1293 1295 return None
1294 1296
1295 1297 def transaction(self, desc, report=None):
1296 1298 if (self.ui.configbool('devel', 'all-warnings')
1297 1299 or self.ui.configbool('devel', 'check-locks')):
1298 1300 if self._currentlock(self._lockref) is None:
1299 1301 raise error.ProgrammingError('transaction requires locking')
1300 1302 tr = self.currenttransaction()
1301 1303 if tr is not None:
1302 1304 return tr.nest(name=desc)
1303 1305
1304 1306 # abort here if the journal already exists
1305 1307 if self.svfs.exists("journal"):
1306 1308 raise error.RepoError(
1307 1309 _("abandoned transaction found"),
1308 1310 hint=_("run 'hg recover' to clean up transaction"))
1309 1311
1310 1312 idbase = "%.40f#%f" % (random.random(), time.time())
1311 1313 ha = hex(hashlib.sha1(idbase).digest())
1312 1314 txnid = 'TXN:' + ha
1313 1315 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1314 1316
1315 1317 self._writejournal(desc)
1316 1318 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1317 1319 if report:
1318 1320 rp = report
1319 1321 else:
1320 1322 rp = self.ui.warn
1321 1323 vfsmap = {'plain': self.vfs} # root of .hg/
1322 1324 # we must avoid cyclic reference between repo and transaction.
1323 1325 reporef = weakref.ref(self)
1324 1326 # Code to track tag movement
1325 1327 #
1326 1328 # Since tags are all handled as file content, it is actually quite hard
1327 1329 # to track these movement from a code perspective. So we fallback to a
1328 1330 # tracking at the repository level. One could envision to track changes
1329 1331 # to the '.hgtags' file through changegroup apply but that fails to
1330 1332 # cope with case where transaction expose new heads without changegroup
1331 1333 # being involved (eg: phase movement).
1332 1334 #
1333 1335 # For now, We gate the feature behind a flag since this likely comes
1334 1336 # with performance impacts. The current code run more often than needed
1335 1337 # and do not use caches as much as it could. The current focus is on
1336 1338 # the behavior of the feature so we disable it by default. The flag
1337 1339 # will be removed when we are happy with the performance impact.
1338 1340 #
1339 1341 # Once this feature is no longer experimental move the following
1340 1342 # documentation to the appropriate help section:
1341 1343 #
1342 1344 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1343 1345 # tags (new or changed or deleted tags). In addition the details of
1344 1346 # these changes are made available in a file at:
1345 1347 # ``REPOROOT/.hg/changes/tags.changes``.
1346 1348 # Make sure you check for HG_TAG_MOVED before reading that file as it
1347 1349 # might exist from a previous transaction even if no tag were touched
1348 1350 # in this one. Changes are recorded in a line base format::
1349 1351 #
1350 1352 # <action> <hex-node> <tag-name>\n
1351 1353 #
1352 1354 # Actions are defined as follow:
1353 1355 # "-R": tag is removed,
1354 1356 # "+A": tag is added,
1355 1357 # "-M": tag is moved (old value),
1356 1358 # "+M": tag is moved (new value),
1357 1359 tracktags = lambda x: None
1358 1360 # experimental config: experimental.hook-track-tags
1359 1361 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1360 1362 if desc != 'strip' and shouldtracktags:
1361 1363 oldheads = self.changelog.headrevs()
1362 1364 def tracktags(tr2):
1363 1365 repo = reporef()
1364 1366 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1365 1367 newheads = repo.changelog.headrevs()
1366 1368 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1367 1369 # notes: we compare lists here.
1368 1370 # As we do it only once buiding set would not be cheaper
1369 1371 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1370 1372 if changes:
1371 1373 tr2.hookargs['tag_moved'] = '1'
1372 1374 with repo.vfs('changes/tags.changes', 'w',
1373 1375 atomictemp=True) as changesfile:
1374 1376 # note: we do not register the file to the transaction
1375 1377 # because we needs it to still exist on the transaction
1376 1378 # is close (for txnclose hooks)
1377 1379 tagsmod.writediff(changesfile, changes)
1378 1380 def validate(tr2):
1379 1381 """will run pre-closing hooks"""
1380 1382 # XXX the transaction API is a bit lacking here so we take a hacky
1381 1383 # path for now
1382 1384 #
1383 1385 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1384 1386 # dict is copied before these run. In addition we needs the data
1385 1387 # available to in memory hooks too.
1386 1388 #
1387 1389 # Moreover, we also need to make sure this runs before txnclose
1388 1390 # hooks and there is no "pending" mechanism that would execute
1389 1391 # logic only if hooks are about to run.
1390 1392 #
1391 1393 # Fixing this limitation of the transaction is also needed to track
1392 1394 # other families of changes (bookmarks, phases, obsolescence).
1393 1395 #
1394 1396 # This will have to be fixed before we remove the experimental
1395 1397 # gating.
1396 1398 tracktags(tr2)
1397 1399 repo = reporef()
1398 1400 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1399 1401 scmutil.enforcesinglehead(repo, tr2, desc)
1400 1402 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1401 1403 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1402 1404 args = tr.hookargs.copy()
1403 1405 args.update(bookmarks.preparehookargs(name, old, new))
1404 1406 repo.hook('pretxnclose-bookmark', throw=True,
1405 1407 txnname=desc,
1406 1408 **pycompat.strkwargs(args))
1407 1409 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1408 1410 cl = repo.unfiltered().changelog
1409 1411 for rev, (old, new) in tr.changes['phases'].items():
1410 1412 args = tr.hookargs.copy()
1411 1413 node = hex(cl.node(rev))
1412 1414 args.update(phases.preparehookargs(node, old, new))
1413 1415 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1414 1416 **pycompat.strkwargs(args))
1415 1417
1416 1418 repo.hook('pretxnclose', throw=True,
1417 1419 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1418 1420 def releasefn(tr, success):
1419 1421 repo = reporef()
1420 1422 if success:
1421 1423 # this should be explicitly invoked here, because
1422 1424 # in-memory changes aren't written out at closing
1423 1425 # transaction, if tr.addfilegenerator (via
1424 1426 # dirstate.write or so) isn't invoked while
1425 1427 # transaction running
1426 1428 repo.dirstate.write(None)
1427 1429 else:
1428 1430 # discard all changes (including ones already written
1429 1431 # out) in this transaction
1430 1432 narrowspec.restorebackup(self, 'journal.narrowspec')
1431 1433 repo.dirstate.restorebackup(None, 'journal.dirstate')
1432 1434
1433 1435 repo.invalidate(clearfilecache=True)
1434 1436
1435 1437 tr = transaction.transaction(rp, self.svfs, vfsmap,
1436 1438 "journal",
1437 1439 "undo",
1438 1440 aftertrans(renames),
1439 1441 self.store.createmode,
1440 1442 validator=validate,
1441 1443 releasefn=releasefn,
1442 1444 checkambigfiles=_cachedfiles,
1443 1445 name=desc)
1444 1446 tr.changes['origrepolen'] = len(self)
1445 1447 tr.changes['obsmarkers'] = set()
1446 1448 tr.changes['phases'] = {}
1447 1449 tr.changes['bookmarks'] = {}
1448 1450
1449 1451 tr.hookargs['txnid'] = txnid
1450 1452 # note: writing the fncache only during finalize mean that the file is
1451 1453 # outdated when running hooks. As fncache is used for streaming clone,
1452 1454 # this is not expected to break anything that happen during the hooks.
1453 1455 tr.addfinalize('flush-fncache', self.store.write)
1454 1456 def txnclosehook(tr2):
1455 1457 """To be run if transaction is successful, will schedule a hook run
1456 1458 """
1457 1459 # Don't reference tr2 in hook() so we don't hold a reference.
1458 1460 # This reduces memory consumption when there are multiple
1459 1461 # transactions per lock. This can likely go away if issue5045
1460 1462 # fixes the function accumulation.
1461 1463 hookargs = tr2.hookargs
1462 1464
1463 1465 def hookfunc():
1464 1466 repo = reporef()
1465 1467 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1466 1468 bmchanges = sorted(tr.changes['bookmarks'].items())
1467 1469 for name, (old, new) in bmchanges:
1468 1470 args = tr.hookargs.copy()
1469 1471 args.update(bookmarks.preparehookargs(name, old, new))
1470 1472 repo.hook('txnclose-bookmark', throw=False,
1471 1473 txnname=desc, **pycompat.strkwargs(args))
1472 1474
1473 1475 if hook.hashook(repo.ui, 'txnclose-phase'):
1474 1476 cl = repo.unfiltered().changelog
1475 1477 phasemv = sorted(tr.changes['phases'].items())
1476 1478 for rev, (old, new) in phasemv:
1477 1479 args = tr.hookargs.copy()
1478 1480 node = hex(cl.node(rev))
1479 1481 args.update(phases.preparehookargs(node, old, new))
1480 1482 repo.hook('txnclose-phase', throw=False, txnname=desc,
1481 1483 **pycompat.strkwargs(args))
1482 1484
1483 1485 repo.hook('txnclose', throw=False, txnname=desc,
1484 1486 **pycompat.strkwargs(hookargs))
1485 1487 reporef()._afterlock(hookfunc)
1486 1488 tr.addfinalize('txnclose-hook', txnclosehook)
1487 1489 # Include a leading "-" to make it happen before the transaction summary
1488 1490 # reports registered via scmutil.registersummarycallback() whose names
1489 1491 # are 00-txnreport etc. That way, the caches will be warm when the
1490 1492 # callbacks run.
1491 1493 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1492 1494 def txnaborthook(tr2):
1493 1495 """To be run if transaction is aborted
1494 1496 """
1495 1497 reporef().hook('txnabort', throw=False, txnname=desc,
1496 1498 **pycompat.strkwargs(tr2.hookargs))
1497 1499 tr.addabort('txnabort-hook', txnaborthook)
1498 1500 # avoid eager cache invalidation. in-memory data should be identical
1499 1501 # to stored data if transaction has no error.
1500 1502 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1501 1503 self._transref = weakref.ref(tr)
1502 1504 scmutil.registersummarycallback(self, tr, desc)
1503 1505 return tr
1504 1506
1505 1507 def _journalfiles(self):
1506 1508 return ((self.svfs, 'journal'),
1507 1509 (self.vfs, 'journal.dirstate'),
1508 1510 (self.vfs, 'journal.branch'),
1509 1511 (self.vfs, 'journal.desc'),
1510 1512 (self.vfs, 'journal.bookmarks'),
1511 1513 (self.svfs, 'journal.phaseroots'))
1512 1514
1513 1515 def undofiles(self):
1514 1516 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1515 1517
1516 1518 @unfilteredmethod
1517 1519 def _writejournal(self, desc):
1518 1520 self.dirstate.savebackup(None, 'journal.dirstate')
1519 1521 narrowspec.savebackup(self, 'journal.narrowspec')
1520 1522 self.vfs.write("journal.branch",
1521 1523 encoding.fromlocal(self.dirstate.branch()))
1522 1524 self.vfs.write("journal.desc",
1523 1525 "%d\n%s\n" % (len(self), desc))
1524 1526 self.vfs.write("journal.bookmarks",
1525 1527 self.vfs.tryread("bookmarks"))
1526 1528 self.svfs.write("journal.phaseroots",
1527 1529 self.svfs.tryread("phaseroots"))
1528 1530
1529 1531 def recover(self):
1530 1532 with self.lock():
1531 1533 if self.svfs.exists("journal"):
1532 1534 self.ui.status(_("rolling back interrupted transaction\n"))
1533 1535 vfsmap = {'': self.svfs,
1534 1536 'plain': self.vfs,}
1535 1537 transaction.rollback(self.svfs, vfsmap, "journal",
1536 1538 self.ui.warn,
1537 1539 checkambigfiles=_cachedfiles)
1538 1540 self.invalidate()
1539 1541 return True
1540 1542 else:
1541 1543 self.ui.warn(_("no interrupted transaction available\n"))
1542 1544 return False
1543 1545
1544 1546 def rollback(self, dryrun=False, force=False):
1545 1547 wlock = lock = dsguard = None
1546 1548 try:
1547 1549 wlock = self.wlock()
1548 1550 lock = self.lock()
1549 1551 if self.svfs.exists("undo"):
1550 1552 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1551 1553
1552 1554 return self._rollback(dryrun, force, dsguard)
1553 1555 else:
1554 1556 self.ui.warn(_("no rollback information available\n"))
1555 1557 return 1
1556 1558 finally:
1557 1559 release(dsguard, lock, wlock)
1558 1560
1559 1561 @unfilteredmethod # Until we get smarter cache management
1560 1562 def _rollback(self, dryrun, force, dsguard):
1561 1563 ui = self.ui
1562 1564 try:
1563 1565 args = self.vfs.read('undo.desc').splitlines()
1564 1566 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1565 1567 if len(args) >= 3:
1566 1568 detail = args[2]
1567 1569 oldtip = oldlen - 1
1568 1570
1569 1571 if detail and ui.verbose:
1570 1572 msg = (_('repository tip rolled back to revision %d'
1571 1573 ' (undo %s: %s)\n')
1572 1574 % (oldtip, desc, detail))
1573 1575 else:
1574 1576 msg = (_('repository tip rolled back to revision %d'
1575 1577 ' (undo %s)\n')
1576 1578 % (oldtip, desc))
1577 1579 except IOError:
1578 1580 msg = _('rolling back unknown transaction\n')
1579 1581 desc = None
1580 1582
1581 1583 if not force and self['.'] != self['tip'] and desc == 'commit':
1582 1584 raise error.Abort(
1583 1585 _('rollback of last commit while not checked out '
1584 1586 'may lose data'), hint=_('use -f to force'))
1585 1587
1586 1588 ui.status(msg)
1587 1589 if dryrun:
1588 1590 return 0
1589 1591
1590 1592 parents = self.dirstate.parents()
1591 1593 self.destroying()
1592 1594 vfsmap = {'plain': self.vfs, '': self.svfs}
1593 1595 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1594 1596 checkambigfiles=_cachedfiles)
1595 1597 if self.vfs.exists('undo.bookmarks'):
1596 1598 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1597 1599 if self.svfs.exists('undo.phaseroots'):
1598 1600 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1599 1601 self.invalidate()
1600 1602
1601 1603 parentgone = (parents[0] not in self.changelog.nodemap or
1602 1604 parents[1] not in self.changelog.nodemap)
1603 1605 if parentgone:
1604 1606 # prevent dirstateguard from overwriting already restored one
1605 1607 dsguard.close()
1606 1608
1607 1609 narrowspec.restorebackup(self, 'undo.narrowspec')
1608 1610 self.dirstate.restorebackup(None, 'undo.dirstate')
1609 1611 try:
1610 1612 branch = self.vfs.read('undo.branch')
1611 1613 self.dirstate.setbranch(encoding.tolocal(branch))
1612 1614 except IOError:
1613 1615 ui.warn(_('named branch could not be reset: '
1614 1616 'current branch is still \'%s\'\n')
1615 1617 % self.dirstate.branch())
1616 1618
1617 1619 parents = tuple([p.rev() for p in self[None].parents()])
1618 1620 if len(parents) > 1:
1619 1621 ui.status(_('working directory now based on '
1620 1622 'revisions %d and %d\n') % parents)
1621 1623 else:
1622 1624 ui.status(_('working directory now based on '
1623 1625 'revision %d\n') % parents)
1624 1626 mergemod.mergestate.clean(self, self['.'].node())
1625 1627
1626 1628 # TODO: if we know which new heads may result from this rollback, pass
1627 1629 # them to destroy(), which will prevent the branchhead cache from being
1628 1630 # invalidated.
1629 1631 self.destroyed()
1630 1632 return 0
1631 1633
1632 1634 def _buildcacheupdater(self, newtransaction):
1633 1635 """called during transaction to build the callback updating cache
1634 1636
1635 1637 Lives on the repository to help extension who might want to augment
1636 1638 this logic. For this purpose, the created transaction is passed to the
1637 1639 method.
1638 1640 """
1639 1641 # we must avoid cyclic reference between repo and transaction.
1640 1642 reporef = weakref.ref(self)
1641 1643 def updater(tr):
1642 1644 repo = reporef()
1643 1645 repo.updatecaches(tr)
1644 1646 return updater
1645 1647
1646 1648 @unfilteredmethod
1647 1649 def updatecaches(self, tr=None, full=False):
1648 1650 """warm appropriate caches
1649 1651
1650 1652 If this function is called after a transaction closed. The transaction
1651 1653 will be available in the 'tr' argument. This can be used to selectively
1652 1654 update caches relevant to the changes in that transaction.
1653 1655
1654 1656 If 'full' is set, make sure all caches the function knows about have
1655 1657 up-to-date data. Even the ones usually loaded more lazily.
1656 1658 """
1657 1659 if tr is not None and tr.hookargs.get('source') == 'strip':
1658 1660 # During strip, many caches are invalid but
1659 1661 # later call to `destroyed` will refresh them.
1660 1662 return
1661 1663
1662 1664 if tr is None or tr.changes['origrepolen'] < len(self):
1663 1665 # updating the unfiltered branchmap should refresh all the others,
1664 1666 self.ui.debug('updating the branch cache\n')
1665 1667 branchmap.updatecache(self.filtered('served'))
1666 1668
1667 1669 if full:
1668 1670 rbc = self.revbranchcache()
1669 1671 for r in self.changelog:
1670 1672 rbc.branchinfo(r)
1671 1673 rbc.write()
1672 1674
1673 1675 # ensure the working copy parents are in the manifestfulltextcache
1674 1676 for ctx in self['.'].parents():
1675 1677 ctx.manifest() # accessing the manifest is enough
1676 1678
1677 1679 def invalidatecaches(self):
1678 1680
1679 1681 if '_tagscache' in vars(self):
1680 1682 # can't use delattr on proxy
1681 1683 del self.__dict__['_tagscache']
1682 1684
1683 1685 self.unfiltered()._branchcaches.clear()
1684 1686 self.invalidatevolatilesets()
1685 1687 self._sparsesignaturecache.clear()
1686 1688
1687 1689 def invalidatevolatilesets(self):
1688 1690 self.filteredrevcache.clear()
1689 1691 obsolete.clearobscaches(self)
1690 1692
1691 1693 def invalidatedirstate(self):
1692 1694 '''Invalidates the dirstate, causing the next call to dirstate
1693 1695 to check if it was modified since the last time it was read,
1694 1696 rereading it if it has.
1695 1697
1696 1698 This is different to dirstate.invalidate() that it doesn't always
1697 1699 rereads the dirstate. Use dirstate.invalidate() if you want to
1698 1700 explicitly read the dirstate again (i.e. restoring it to a previous
1699 1701 known good state).'''
1700 1702 if hasunfilteredcache(self, 'dirstate'):
1701 1703 for k in self.dirstate._filecache:
1702 1704 try:
1703 1705 delattr(self.dirstate, k)
1704 1706 except AttributeError:
1705 1707 pass
1706 1708 delattr(self.unfiltered(), 'dirstate')
1707 1709
1708 1710 def invalidate(self, clearfilecache=False):
1709 1711 '''Invalidates both store and non-store parts other than dirstate
1710 1712
1711 1713 If a transaction is running, invalidation of store is omitted,
1712 1714 because discarding in-memory changes might cause inconsistency
1713 1715 (e.g. incomplete fncache causes unintentional failure, but
1714 1716 redundant one doesn't).
1715 1717 '''
1716 1718 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1717 1719 for k in list(self._filecache.keys()):
1718 1720 # dirstate is invalidated separately in invalidatedirstate()
1719 1721 if k == 'dirstate':
1720 1722 continue
1721 1723 if (k == 'changelog' and
1722 1724 self.currenttransaction() and
1723 1725 self.changelog._delayed):
1724 1726 # The changelog object may store unwritten revisions. We don't
1725 1727 # want to lose them.
1726 1728 # TODO: Solve the problem instead of working around it.
1727 1729 continue
1728 1730
1729 1731 if clearfilecache:
1730 1732 del self._filecache[k]
1731 1733 try:
1732 1734 delattr(unfiltered, k)
1733 1735 except AttributeError:
1734 1736 pass
1735 1737 self.invalidatecaches()
1736 1738 if not self.currenttransaction():
1737 1739 # TODO: Changing contents of store outside transaction
1738 1740 # causes inconsistency. We should make in-memory store
1739 1741 # changes detectable, and abort if changed.
1740 1742 self.store.invalidatecaches()
1741 1743
1742 1744 def invalidateall(self):
1743 1745 '''Fully invalidates both store and non-store parts, causing the
1744 1746 subsequent operation to reread any outside changes.'''
1745 1747 # extension should hook this to invalidate its caches
1746 1748 self.invalidate()
1747 1749 self.invalidatedirstate()
1748 1750
1749 1751 @unfilteredmethod
1750 1752 def _refreshfilecachestats(self, tr):
1751 1753 """Reload stats of cached files so that they are flagged as valid"""
1752 1754 for k, ce in self._filecache.items():
1753 1755 k = pycompat.sysstr(k)
1754 1756 if k == r'dirstate' or k not in self.__dict__:
1755 1757 continue
1756 1758 ce.refresh()
1757 1759
1758 1760 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1759 1761 inheritchecker=None, parentenvvar=None):
1760 1762 parentlock = None
1761 1763 # the contents of parentenvvar are used by the underlying lock to
1762 1764 # determine whether it can be inherited
1763 1765 if parentenvvar is not None:
1764 1766 parentlock = encoding.environ.get(parentenvvar)
1765 1767
1766 1768 timeout = 0
1767 1769 warntimeout = 0
1768 1770 if wait:
1769 1771 timeout = self.ui.configint("ui", "timeout")
1770 1772 warntimeout = self.ui.configint("ui", "timeout.warn")
1771 1773 # internal config: ui.signal-safe-lock
1772 1774 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1773 1775
1774 1776 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1775 1777 releasefn=releasefn,
1776 1778 acquirefn=acquirefn, desc=desc,
1777 1779 inheritchecker=inheritchecker,
1778 1780 parentlock=parentlock,
1779 1781 signalsafe=signalsafe)
1780 1782 return l
1781 1783
1782 1784 def _afterlock(self, callback):
1783 1785 """add a callback to be run when the repository is fully unlocked
1784 1786
1785 1787 The callback will be executed when the outermost lock is released
1786 1788 (with wlock being higher level than 'lock')."""
1787 1789 for ref in (self._wlockref, self._lockref):
1788 1790 l = ref and ref()
1789 1791 if l and l.held:
1790 1792 l.postrelease.append(callback)
1791 1793 break
1792 1794 else: # no lock have been found.
1793 1795 callback()
1794 1796
1795 1797 def lock(self, wait=True):
1796 1798 '''Lock the repository store (.hg/store) and return a weak reference
1797 1799 to the lock. Use this before modifying the store (e.g. committing or
1798 1800 stripping). If you are opening a transaction, get a lock as well.)
1799 1801
1800 1802 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1801 1803 'wlock' first to avoid a dead-lock hazard.'''
1802 1804 l = self._currentlock(self._lockref)
1803 1805 if l is not None:
1804 1806 l.lock()
1805 1807 return l
1806 1808
1807 1809 l = self._lock(self.svfs, "lock", wait, None,
1808 1810 self.invalidate, _('repository %s') % self.origroot)
1809 1811 self._lockref = weakref.ref(l)
1810 1812 return l
1811 1813
1812 1814 def _wlockchecktransaction(self):
1813 1815 if self.currenttransaction() is not None:
1814 1816 raise error.LockInheritanceContractViolation(
1815 1817 'wlock cannot be inherited in the middle of a transaction')
1816 1818
1817 1819 def wlock(self, wait=True):
1818 1820 '''Lock the non-store parts of the repository (everything under
1819 1821 .hg except .hg/store) and return a weak reference to the lock.
1820 1822
1821 1823 Use this before modifying files in .hg.
1822 1824
1823 1825 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1824 1826 'wlock' first to avoid a dead-lock hazard.'''
1825 1827 l = self._wlockref and self._wlockref()
1826 1828 if l is not None and l.held:
1827 1829 l.lock()
1828 1830 return l
1829 1831
1830 1832 # We do not need to check for non-waiting lock acquisition. Such
1831 1833 # acquisition would not cause dead-lock as they would just fail.
1832 1834 if wait and (self.ui.configbool('devel', 'all-warnings')
1833 1835 or self.ui.configbool('devel', 'check-locks')):
1834 1836 if self._currentlock(self._lockref) is not None:
1835 1837 self.ui.develwarn('"wlock" acquired after "lock"')
1836 1838
1837 1839 def unlock():
1838 1840 if self.dirstate.pendingparentchange():
1839 1841 self.dirstate.invalidate()
1840 1842 else:
1841 1843 self.dirstate.write(None)
1842 1844
1843 1845 self._filecache['dirstate'].refresh()
1844 1846
1845 1847 l = self._lock(self.vfs, "wlock", wait, unlock,
1846 1848 self.invalidatedirstate, _('working directory of %s') %
1847 1849 self.origroot,
1848 1850 inheritchecker=self._wlockchecktransaction,
1849 1851 parentenvvar='HG_WLOCK_LOCKER')
1850 1852 self._wlockref = weakref.ref(l)
1851 1853 return l
1852 1854
1853 1855 def _currentlock(self, lockref):
1854 1856 """Returns the lock if it's held, or None if it's not."""
1855 1857 if lockref is None:
1856 1858 return None
1857 1859 l = lockref()
1858 1860 if l is None or not l.held:
1859 1861 return None
1860 1862 return l
1861 1863
1862 1864 def currentwlock(self):
1863 1865 """Returns the wlock if it's held, or None if it's not."""
1864 1866 return self._currentlock(self._wlockref)
1865 1867
1866 1868 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1867 1869 """
1868 1870 commit an individual file as part of a larger transaction
1869 1871 """
1870 1872
1871 1873 fname = fctx.path()
1872 1874 fparent1 = manifest1.get(fname, nullid)
1873 1875 fparent2 = manifest2.get(fname, nullid)
1874 1876 if isinstance(fctx, context.filectx):
1875 1877 node = fctx.filenode()
1876 1878 if node in [fparent1, fparent2]:
1877 1879 self.ui.debug('reusing %s filelog entry\n' % fname)
1878 1880 if manifest1.flags(fname) != fctx.flags():
1879 1881 changelist.append(fname)
1880 1882 return node
1881 1883
1882 1884 flog = self.file(fname)
1883 1885 meta = {}
1884 1886 copy = fctx.renamed()
1885 1887 if copy and copy[0] != fname:
1886 1888 # Mark the new revision of this file as a copy of another
1887 1889 # file. This copy data will effectively act as a parent
1888 1890 # of this new revision. If this is a merge, the first
1889 1891 # parent will be the nullid (meaning "look up the copy data")
1890 1892 # and the second one will be the other parent. For example:
1891 1893 #
1892 1894 # 0 --- 1 --- 3 rev1 changes file foo
1893 1895 # \ / rev2 renames foo to bar and changes it
1894 1896 # \- 2 -/ rev3 should have bar with all changes and
1895 1897 # should record that bar descends from
1896 1898 # bar in rev2 and foo in rev1
1897 1899 #
1898 1900 # this allows this merge to succeed:
1899 1901 #
1900 1902 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1901 1903 # \ / merging rev3 and rev4 should use bar@rev2
1902 1904 # \- 2 --- 4 as the merge base
1903 1905 #
1904 1906
1905 1907 cfname = copy[0]
1906 1908 crev = manifest1.get(cfname)
1907 1909 newfparent = fparent2
1908 1910
1909 1911 if manifest2: # branch merge
1910 1912 if fparent2 == nullid or crev is None: # copied on remote side
1911 1913 if cfname in manifest2:
1912 1914 crev = manifest2[cfname]
1913 1915 newfparent = fparent1
1914 1916
1915 1917 # Here, we used to search backwards through history to try to find
1916 1918 # where the file copy came from if the source of a copy was not in
1917 1919 # the parent directory. However, this doesn't actually make sense to
1918 1920 # do (what does a copy from something not in your working copy even
1919 1921 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1920 1922 # the user that copy information was dropped, so if they didn't
1921 1923 # expect this outcome it can be fixed, but this is the correct
1922 1924 # behavior in this circumstance.
1923 1925
1924 1926 if crev:
1925 1927 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1926 1928 meta["copy"] = cfname
1927 1929 meta["copyrev"] = hex(crev)
1928 1930 fparent1, fparent2 = nullid, newfparent
1929 1931 else:
1930 1932 self.ui.warn(_("warning: can't find ancestor for '%s' "
1931 1933 "copied from '%s'!\n") % (fname, cfname))
1932 1934
1933 1935 elif fparent1 == nullid:
1934 1936 fparent1, fparent2 = fparent2, nullid
1935 1937 elif fparent2 != nullid:
1936 1938 # is one parent an ancestor of the other?
1937 1939 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1938 1940 if fparent1 in fparentancestors:
1939 1941 fparent1, fparent2 = fparent2, nullid
1940 1942 elif fparent2 in fparentancestors:
1941 1943 fparent2 = nullid
1942 1944
1943 1945 # is the file changed?
1944 1946 text = fctx.data()
1945 1947 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1946 1948 changelist.append(fname)
1947 1949 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1948 1950 # are just the flags changed during merge?
1949 1951 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1950 1952 changelist.append(fname)
1951 1953
1952 1954 return fparent1
1953 1955
1954 1956 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1955 1957 """check for commit arguments that aren't committable"""
1956 1958 if match.isexact() or match.prefix():
1957 1959 matched = set(status.modified + status.added + status.removed)
1958 1960
1959 1961 for f in match.files():
1960 1962 f = self.dirstate.normalize(f)
1961 1963 if f == '.' or f in matched or f in wctx.substate:
1962 1964 continue
1963 1965 if f in status.deleted:
1964 1966 fail(f, _('file not found!'))
1965 1967 if f in vdirs: # visited directory
1966 1968 d = f + '/'
1967 1969 for mf in matched:
1968 1970 if mf.startswith(d):
1969 1971 break
1970 1972 else:
1971 1973 fail(f, _("no match under directory!"))
1972 1974 elif f not in self.dirstate:
1973 1975 fail(f, _("file not tracked!"))
1974 1976
1975 1977 @unfilteredmethod
1976 1978 def commit(self, text="", user=None, date=None, match=None, force=False,
1977 1979 editor=False, extra=None):
1978 1980 """Add a new revision to current repository.
1979 1981
1980 1982 Revision information is gathered from the working directory,
1981 1983 match can be used to filter the committed files. If editor is
1982 1984 supplied, it is called to get a commit message.
1983 1985 """
1984 1986 if extra is None:
1985 1987 extra = {}
1986 1988
1987 1989 def fail(f, msg):
1988 1990 raise error.Abort('%s: %s' % (f, msg))
1989 1991
1990 1992 if not match:
1991 1993 match = matchmod.always(self.root, '')
1992 1994
1993 1995 if not force:
1994 1996 vdirs = []
1995 1997 match.explicitdir = vdirs.append
1996 1998 match.bad = fail
1997 1999
1998 2000 wlock = lock = tr = None
1999 2001 try:
2000 2002 wlock = self.wlock()
2001 2003 lock = self.lock() # for recent changelog (see issue4368)
2002 2004
2003 2005 wctx = self[None]
2004 2006 merge = len(wctx.parents()) > 1
2005 2007
2006 2008 if not force and merge and not match.always():
2007 2009 raise error.Abort(_('cannot partially commit a merge '
2008 2010 '(do not specify files or patterns)'))
2009 2011
2010 2012 status = self.status(match=match, clean=force)
2011 2013 if force:
2012 2014 status.modified.extend(status.clean) # mq may commit clean files
2013 2015
2014 2016 # check subrepos
2015 2017 subs, commitsubs, newstate = subrepoutil.precommit(
2016 2018 self.ui, wctx, status, match, force=force)
2017 2019
2018 2020 # make sure all explicit patterns are matched
2019 2021 if not force:
2020 2022 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2021 2023
2022 2024 cctx = context.workingcommitctx(self, status,
2023 2025 text, user, date, extra)
2024 2026
2025 2027 # internal config: ui.allowemptycommit
2026 2028 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2027 2029 or extra.get('close') or merge or cctx.files()
2028 2030 or self.ui.configbool('ui', 'allowemptycommit'))
2029 2031 if not allowemptycommit:
2030 2032 return None
2031 2033
2032 2034 if merge and cctx.deleted():
2033 2035 raise error.Abort(_("cannot commit merge with missing files"))
2034 2036
2035 2037 ms = mergemod.mergestate.read(self)
2036 2038 mergeutil.checkunresolved(ms)
2037 2039
2038 2040 if editor:
2039 2041 cctx._text = editor(self, cctx, subs)
2040 2042 edited = (text != cctx._text)
2041 2043
2042 2044 # Save commit message in case this transaction gets rolled back
2043 2045 # (e.g. by a pretxncommit hook). Leave the content alone on
2044 2046 # the assumption that the user will use the same editor again.
2045 2047 msgfn = self.savecommitmessage(cctx._text)
2046 2048
2047 2049 # commit subs and write new state
2048 2050 if subs:
2049 2051 for s in sorted(commitsubs):
2050 2052 sub = wctx.sub(s)
2051 2053 self.ui.status(_('committing subrepository %s\n') %
2052 2054 subrepoutil.subrelpath(sub))
2053 2055 sr = sub.commit(cctx._text, user, date)
2054 2056 newstate[s] = (newstate[s][0], sr)
2055 2057 subrepoutil.writestate(self, newstate)
2056 2058
2057 2059 p1, p2 = self.dirstate.parents()
2058 2060 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2059 2061 try:
2060 2062 self.hook("precommit", throw=True, parent1=hookp1,
2061 2063 parent2=hookp2)
2062 2064 tr = self.transaction('commit')
2063 2065 ret = self.commitctx(cctx, True)
2064 2066 except: # re-raises
2065 2067 if edited:
2066 2068 self.ui.write(
2067 2069 _('note: commit message saved in %s\n') % msgfn)
2068 2070 raise
2069 2071 # update bookmarks, dirstate and mergestate
2070 2072 bookmarks.update(self, [p1, p2], ret)
2071 2073 cctx.markcommitted(ret)
2072 2074 ms.reset()
2073 2075 tr.close()
2074 2076
2075 2077 finally:
2076 2078 lockmod.release(tr, lock, wlock)
2077 2079
2078 2080 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2079 2081 # hack for command that use a temporary commit (eg: histedit)
2080 2082 # temporary commit got stripped before hook release
2081 2083 if self.changelog.hasnode(ret):
2082 2084 self.hook("commit", node=node, parent1=parent1,
2083 2085 parent2=parent2)
2084 2086 self._afterlock(commithook)
2085 2087 return ret
2086 2088
2087 2089 @unfilteredmethod
2088 2090 def commitctx(self, ctx, error=False):
2089 2091 """Add a new revision to current repository.
2090 2092 Revision information is passed via the context argument.
2091 2093
2092 2094 ctx.files() should list all files involved in this commit, i.e.
2093 2095 modified/added/removed files. On merge, it may be wider than the
2094 2096 ctx.files() to be committed, since any file nodes derived directly
2095 2097 from p1 or p2 are excluded from the committed ctx.files().
2096 2098 """
2097 2099
2098 2100 tr = None
2099 2101 p1, p2 = ctx.p1(), ctx.p2()
2100 2102 user = ctx.user()
2101 2103
2102 2104 lock = self.lock()
2103 2105 try:
2104 2106 tr = self.transaction("commit")
2105 2107 trp = weakref.proxy(tr)
2106 2108
2107 2109 if ctx.manifestnode():
2108 2110 # reuse an existing manifest revision
2109 2111 self.ui.debug('reusing known manifest\n')
2110 2112 mn = ctx.manifestnode()
2111 2113 files = ctx.files()
2112 2114 elif ctx.files():
2113 2115 m1ctx = p1.manifestctx()
2114 2116 m2ctx = p2.manifestctx()
2115 2117 mctx = m1ctx.copy()
2116 2118
2117 2119 m = mctx.read()
2118 2120 m1 = m1ctx.read()
2119 2121 m2 = m2ctx.read()
2120 2122
2121 2123 # check in files
2122 2124 added = []
2123 2125 changed = []
2124 2126 removed = list(ctx.removed())
2125 2127 linkrev = len(self)
2126 2128 self.ui.note(_("committing files:\n"))
2127 2129 for f in sorted(ctx.modified() + ctx.added()):
2128 2130 self.ui.note(f + "\n")
2129 2131 try:
2130 2132 fctx = ctx[f]
2131 2133 if fctx is None:
2132 2134 removed.append(f)
2133 2135 else:
2134 2136 added.append(f)
2135 2137 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2136 2138 trp, changed)
2137 2139 m.setflag(f, fctx.flags())
2138 2140 except OSError as inst:
2139 2141 self.ui.warn(_("trouble committing %s!\n") % f)
2140 2142 raise
2141 2143 except IOError as inst:
2142 2144 errcode = getattr(inst, 'errno', errno.ENOENT)
2143 2145 if error or errcode and errcode != errno.ENOENT:
2144 2146 self.ui.warn(_("trouble committing %s!\n") % f)
2145 2147 raise
2146 2148
2147 2149 # update manifest
2148 2150 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2149 2151 drop = [f for f in removed if f in m]
2150 2152 for f in drop:
2151 2153 del m[f]
2152 2154 files = changed + removed
2153 2155 md = None
2154 2156 if not files:
2155 2157 # if no "files" actually changed in terms of the changelog,
2156 2158 # try hard to detect unmodified manifest entry so that the
2157 2159 # exact same commit can be reproduced later on convert.
2158 2160 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2159 2161 if not files and md:
2160 2162 self.ui.debug('not reusing manifest (no file change in '
2161 2163 'changelog, but manifest differs)\n')
2162 2164 if files or md:
2163 2165 self.ui.note(_("committing manifest\n"))
2164 2166 # we're using narrowmatch here since it's already applied at
2165 2167 # other stages (such as dirstate.walk), so we're already
2166 2168 # ignoring things outside of narrowspec in most cases. The
2167 2169 # one case where we might have files outside the narrowspec
2168 2170 # at this point is merges, and we already error out in the
2169 2171 # case where the merge has files outside of the narrowspec,
2170 2172 # so this is safe.
2171 2173 mn = mctx.write(trp, linkrev,
2172 2174 p1.manifestnode(), p2.manifestnode(),
2173 2175 added, drop, match=self.narrowmatch())
2174 2176 else:
2175 2177 self.ui.debug('reusing manifest form p1 (listed files '
2176 2178 'actually unchanged)\n')
2177 2179 mn = p1.manifestnode()
2178 2180 else:
2179 2181 self.ui.debug('reusing manifest from p1 (no file change)\n')
2180 2182 mn = p1.manifestnode()
2181 2183 files = []
2182 2184
2183 2185 # update changelog
2184 2186 self.ui.note(_("committing changelog\n"))
2185 2187 self.changelog.delayupdate(tr)
2186 2188 n = self.changelog.add(mn, files, ctx.description(),
2187 2189 trp, p1.node(), p2.node(),
2188 2190 user, ctx.date(), ctx.extra().copy())
2189 2191 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2190 2192 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2191 2193 parent2=xp2)
2192 2194 # set the new commit is proper phase
2193 2195 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2194 2196 if targetphase:
2195 2197 # retract boundary do not alter parent changeset.
2196 2198 # if a parent have higher the resulting phase will
2197 2199 # be compliant anyway
2198 2200 #
2199 2201 # if minimal phase was 0 we don't need to retract anything
2200 2202 phases.registernew(self, tr, targetphase, [n])
2201 2203 tr.close()
2202 2204 return n
2203 2205 finally:
2204 2206 if tr:
2205 2207 tr.release()
2206 2208 lock.release()
2207 2209
2208 2210 @unfilteredmethod
2209 2211 def destroying(self):
2210 2212 '''Inform the repository that nodes are about to be destroyed.
2211 2213 Intended for use by strip and rollback, so there's a common
2212 2214 place for anything that has to be done before destroying history.
2213 2215
2214 2216 This is mostly useful for saving state that is in memory and waiting
2215 2217 to be flushed when the current lock is released. Because a call to
2216 2218 destroyed is imminent, the repo will be invalidated causing those
2217 2219 changes to stay in memory (waiting for the next unlock), or vanish
2218 2220 completely.
2219 2221 '''
2220 2222 # When using the same lock to commit and strip, the phasecache is left
2221 2223 # dirty after committing. Then when we strip, the repo is invalidated,
2222 2224 # causing those changes to disappear.
2223 2225 if '_phasecache' in vars(self):
2224 2226 self._phasecache.write()
2225 2227
2226 2228 @unfilteredmethod
2227 2229 def destroyed(self):
2228 2230 '''Inform the repository that nodes have been destroyed.
2229 2231 Intended for use by strip and rollback, so there's a common
2230 2232 place for anything that has to be done after destroying history.
2231 2233 '''
2232 2234 # When one tries to:
2233 2235 # 1) destroy nodes thus calling this method (e.g. strip)
2234 2236 # 2) use phasecache somewhere (e.g. commit)
2235 2237 #
2236 2238 # then 2) will fail because the phasecache contains nodes that were
2237 2239 # removed. We can either remove phasecache from the filecache,
2238 2240 # causing it to reload next time it is accessed, or simply filter
2239 2241 # the removed nodes now and write the updated cache.
2240 2242 self._phasecache.filterunknown(self)
2241 2243 self._phasecache.write()
2242 2244
2243 2245 # refresh all repository caches
2244 2246 self.updatecaches()
2245 2247
2246 2248 # Ensure the persistent tag cache is updated. Doing it now
2247 2249 # means that the tag cache only has to worry about destroyed
2248 2250 # heads immediately after a strip/rollback. That in turn
2249 2251 # guarantees that "cachetip == currenttip" (comparing both rev
2250 2252 # and node) always means no nodes have been added or destroyed.
2251 2253
2252 2254 # XXX this is suboptimal when qrefresh'ing: we strip the current
2253 2255 # head, refresh the tag cache, then immediately add a new head.
2254 2256 # But I think doing it this way is necessary for the "instant
2255 2257 # tag cache retrieval" case to work.
2256 2258 self.invalidate()
2257 2259
2258 2260 def status(self, node1='.', node2=None, match=None,
2259 2261 ignored=False, clean=False, unknown=False,
2260 2262 listsubrepos=False):
2261 2263 '''a convenience method that calls node1.status(node2)'''
2262 2264 return self[node1].status(node2, match, ignored, clean, unknown,
2263 2265 listsubrepos)
2264 2266
2265 2267 def addpostdsstatus(self, ps):
2266 2268 """Add a callback to run within the wlock, at the point at which status
2267 2269 fixups happen.
2268 2270
2269 2271 On status completion, callback(wctx, status) will be called with the
2270 2272 wlock held, unless the dirstate has changed from underneath or the wlock
2271 2273 couldn't be grabbed.
2272 2274
2273 2275 Callbacks should not capture and use a cached copy of the dirstate --
2274 2276 it might change in the meanwhile. Instead, they should access the
2275 2277 dirstate via wctx.repo().dirstate.
2276 2278
2277 2279 This list is emptied out after each status run -- extensions should
2278 2280 make sure it adds to this list each time dirstate.status is called.
2279 2281 Extensions should also make sure they don't call this for statuses
2280 2282 that don't involve the dirstate.
2281 2283 """
2282 2284
2283 2285 # The list is located here for uniqueness reasons -- it is actually
2284 2286 # managed by the workingctx, but that isn't unique per-repo.
2285 2287 self._postdsstatus.append(ps)
2286 2288
2287 2289 def postdsstatus(self):
2288 2290 """Used by workingctx to get the list of post-dirstate-status hooks."""
2289 2291 return self._postdsstatus
2290 2292
2291 2293 def clearpostdsstatus(self):
2292 2294 """Used by workingctx to clear post-dirstate-status hooks."""
2293 2295 del self._postdsstatus[:]
2294 2296
2295 2297 def heads(self, start=None):
2296 2298 if start is None:
2297 2299 cl = self.changelog
2298 2300 headrevs = reversed(cl.headrevs())
2299 2301 return [cl.node(rev) for rev in headrevs]
2300 2302
2301 2303 heads = self.changelog.heads(start)
2302 2304 # sort the output in rev descending order
2303 2305 return sorted(heads, key=self.changelog.rev, reverse=True)
2304 2306
2305 2307 def branchheads(self, branch=None, start=None, closed=False):
2306 2308 '''return a (possibly filtered) list of heads for the given branch
2307 2309
2308 2310 Heads are returned in topological order, from newest to oldest.
2309 2311 If branch is None, use the dirstate branch.
2310 2312 If start is not None, return only heads reachable from start.
2311 2313 If closed is True, return heads that are marked as closed as well.
2312 2314 '''
2313 2315 if branch is None:
2314 2316 branch = self[None].branch()
2315 2317 branches = self.branchmap()
2316 2318 if branch not in branches:
2317 2319 return []
2318 2320 # the cache returns heads ordered lowest to highest
2319 2321 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2320 2322 if start is not None:
2321 2323 # filter out the heads that cannot be reached from startrev
2322 2324 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2323 2325 bheads = [h for h in bheads if h in fbheads]
2324 2326 return bheads
2325 2327
2326 2328 def branches(self, nodes):
2327 2329 if not nodes:
2328 2330 nodes = [self.changelog.tip()]
2329 2331 b = []
2330 2332 for n in nodes:
2331 2333 t = n
2332 2334 while True:
2333 2335 p = self.changelog.parents(n)
2334 2336 if p[1] != nullid or p[0] == nullid:
2335 2337 b.append((t, n, p[0], p[1]))
2336 2338 break
2337 2339 n = p[0]
2338 2340 return b
2339 2341
2340 2342 def between(self, pairs):
2341 2343 r = []
2342 2344
2343 2345 for top, bottom in pairs:
2344 2346 n, l, i = top, [], 0
2345 2347 f = 1
2346 2348
2347 2349 while n != bottom and n != nullid:
2348 2350 p = self.changelog.parents(n)[0]
2349 2351 if i == f:
2350 2352 l.append(n)
2351 2353 f = f * 2
2352 2354 n = p
2353 2355 i += 1
2354 2356
2355 2357 r.append(l)
2356 2358
2357 2359 return r
2358 2360
2359 2361 def checkpush(self, pushop):
2360 2362 """Extensions can override this function if additional checks have
2361 2363 to be performed before pushing, or call it if they override push
2362 2364 command.
2363 2365 """
2364 2366
2365 2367 @unfilteredpropertycache
2366 2368 def prepushoutgoinghooks(self):
2367 2369 """Return util.hooks consists of a pushop with repo, remote, outgoing
2368 2370 methods, which are called before pushing changesets.
2369 2371 """
2370 2372 return util.hooks()
2371 2373
2372 2374 def pushkey(self, namespace, key, old, new):
2373 2375 try:
2374 2376 tr = self.currenttransaction()
2375 2377 hookargs = {}
2376 2378 if tr is not None:
2377 2379 hookargs.update(tr.hookargs)
2378 2380 hookargs = pycompat.strkwargs(hookargs)
2379 2381 hookargs[r'namespace'] = namespace
2380 2382 hookargs[r'key'] = key
2381 2383 hookargs[r'old'] = old
2382 2384 hookargs[r'new'] = new
2383 2385 self.hook('prepushkey', throw=True, **hookargs)
2384 2386 except error.HookAbort as exc:
2385 2387 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2386 2388 if exc.hint:
2387 2389 self.ui.write_err(_("(%s)\n") % exc.hint)
2388 2390 return False
2389 2391 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2390 2392 ret = pushkey.push(self, namespace, key, old, new)
2391 2393 def runhook():
2392 2394 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2393 2395 ret=ret)
2394 2396 self._afterlock(runhook)
2395 2397 return ret
2396 2398
2397 2399 def listkeys(self, namespace):
2398 2400 self.hook('prelistkeys', throw=True, namespace=namespace)
2399 2401 self.ui.debug('listing keys for "%s"\n' % namespace)
2400 2402 values = pushkey.list(self, namespace)
2401 2403 self.hook('listkeys', namespace=namespace, values=values)
2402 2404 return values
2403 2405
2404 2406 def debugwireargs(self, one, two, three=None, four=None, five=None):
2405 2407 '''used to test argument passing over the wire'''
2406 2408 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2407 2409 pycompat.bytestr(four),
2408 2410 pycompat.bytestr(five))
2409 2411
2410 2412 def savecommitmessage(self, text):
2411 2413 fp = self.vfs('last-message.txt', 'wb')
2412 2414 try:
2413 2415 fp.write(text)
2414 2416 finally:
2415 2417 fp.close()
2416 2418 return self.pathto(fp.name[len(self.root) + 1:])
2417 2419
2418 2420 # used to avoid circular references so destructors work
2419 2421 def aftertrans(files):
2420 2422 renamefiles = [tuple(t) for t in files]
2421 2423 def a():
2422 2424 for vfs, src, dest in renamefiles:
2423 2425 # if src and dest refer to a same file, vfs.rename is a no-op,
2424 2426 # leaving both src and dest on disk. delete dest to make sure
2425 2427 # the rename couldn't be such a no-op.
2426 2428 vfs.tryunlink(dest)
2427 2429 try:
2428 2430 vfs.rename(src, dest)
2429 2431 except OSError: # journal file does not yet exist
2430 2432 pass
2431 2433 return a
2432 2434
2433 2435 def undoname(fn):
2434 2436 base, name = os.path.split(fn)
2435 2437 assert name.startswith('journal')
2436 2438 return os.path.join(base, name.replace('journal', 'undo', 1))
2437 2439
2438 2440 def instance(ui, path, create, intents=None, createopts=None):
2439 2441 localpath = util.urllocalpath(path)
2440 2442 if create:
2441 2443 createrepository(ui, localpath, createopts=createopts)
2442 2444
2443 2445 return makelocalrepository(ui, localpath, intents=intents)
2444 2446
2445 2447 def islocal(path):
2446 2448 return True
2447 2449
2448 2450 def newreporequirements(ui, createopts=None):
2449 2451 """Determine the set of requirements for a new local repository.
2450 2452
2451 2453 Extensions can wrap this function to specify custom requirements for
2452 2454 new repositories.
2453 2455 """
2454 2456 createopts = createopts or {}
2455 2457
2456 2458 requirements = {'revlogv1'}
2457 2459 if ui.configbool('format', 'usestore'):
2458 2460 requirements.add('store')
2459 2461 if ui.configbool('format', 'usefncache'):
2460 2462 requirements.add('fncache')
2461 2463 if ui.configbool('format', 'dotencode'):
2462 2464 requirements.add('dotencode')
2463 2465
2464 2466 compengine = ui.config('experimental', 'format.compression')
2465 2467 if compengine not in util.compengines:
2466 2468 raise error.Abort(_('compression engine %s defined by '
2467 2469 'experimental.format.compression not available') %
2468 2470 compengine,
2469 2471 hint=_('run "hg debuginstall" to list available '
2470 2472 'compression engines'))
2471 2473
2472 2474 # zlib is the historical default and doesn't need an explicit requirement.
2473 2475 if compengine != 'zlib':
2474 2476 requirements.add('exp-compression-%s' % compengine)
2475 2477
2476 2478 if scmutil.gdinitconfig(ui):
2477 2479 requirements.add('generaldelta')
2478 2480 if ui.configbool('experimental', 'treemanifest'):
2479 2481 requirements.add('treemanifest')
2480 2482 # experimental config: format.sparse-revlog
2481 2483 if ui.configbool('format', 'sparse-revlog'):
2482 2484 requirements.add(SPARSEREVLOG_REQUIREMENT)
2483 2485
2484 2486 revlogv2 = ui.config('experimental', 'revlogv2')
2485 2487 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2486 2488 requirements.remove('revlogv1')
2487 2489 # generaldelta is implied by revlogv2.
2488 2490 requirements.discard('generaldelta')
2489 2491 requirements.add(REVLOGV2_REQUIREMENT)
2490 2492 # experimental config: format.internal-phase
2491 2493 if ui.configbool('format', 'internal-phase'):
2492 2494 requirements.add('internal-phase')
2493 2495
2494 2496 if createopts.get('narrowfiles'):
2495 2497 requirements.add(repository.NARROW_REQUIREMENT)
2496 2498
2497 2499 return requirements
2498 2500
2499 2501 def filterknowncreateopts(ui, createopts):
2500 2502 """Filters a dict of repo creation options against options that are known.
2501 2503
2502 2504 Receives a dict of repo creation options and returns a dict of those
2503 2505 options that we don't know how to handle.
2504 2506
2505 2507 This function is called as part of repository creation. If the
2506 2508 returned dict contains any items, repository creation will not
2507 2509 be allowed, as it means there was a request to create a repository
2508 2510 with options not recognized by loaded code.
2509 2511
2510 2512 Extensions can wrap this function to filter out creation options
2511 2513 they know how to handle.
2512 2514 """
2513 2515 known = {'narrowfiles'}
2514 2516
2515 2517 return {k: v for k, v in createopts.items() if k not in known}
2516 2518
2517 2519 def createrepository(ui, path, createopts=None):
2518 2520 """Create a new repository in a vfs.
2519 2521
2520 2522 ``path`` path to the new repo's working directory.
2521 2523 ``createopts`` options for the new repository.
2522 2524 """
2523 2525 createopts = createopts or {}
2524 2526
2525 2527 unknownopts = filterknowncreateopts(ui, createopts)
2526 2528
2527 2529 if not isinstance(unknownopts, dict):
2528 2530 raise error.ProgrammingError('filterknowncreateopts() did not return '
2529 2531 'a dict')
2530 2532
2531 2533 if unknownopts:
2532 2534 raise error.Abort(_('unable to create repository because of unknown '
2533 2535 'creation option: %s') %
2534 2536 ', '.sorted(unknownopts),
2535 2537 hint=_('is a required extension not loaded?'))
2536 2538
2537 2539 requirements = newreporequirements(ui, createopts=createopts)
2538 2540
2539 2541 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2540 2542 if not wdirvfs.exists():
2541 2543 wdirvfs.makedirs()
2542 2544
2543 2545 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2544 2546 if hgvfs.exists():
2545 2547 raise error.RepoError(_('repository %s already exists') % path)
2546 2548
2547 2549 hgvfs.makedir(notindexed=True)
2548 2550
2549 2551 if b'store' in requirements:
2550 2552 hgvfs.mkdir(b'store')
2551 2553
2552 2554 # We create an invalid changelog outside the store so very old
2553 2555 # Mercurial versions (which didn't know about the requirements
2554 2556 # file) encounter an error on reading the changelog. This
2555 2557 # effectively locks out old clients and prevents them from
2556 2558 # mucking with a repo in an unknown format.
2557 2559 #
2558 2560 # The revlog header has version 2, which won't be recognized by
2559 2561 # such old clients.
2560 2562 hgvfs.append(b'00changelog.i',
2561 2563 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2562 2564 b'layout')
2563 2565
2564 2566 scmutil.writerequires(hgvfs, requirements)
2565 2567
2566 2568 def poisonrepository(repo):
2567 2569 """Poison a repository instance so it can no longer be used."""
2568 2570 # Perform any cleanup on the instance.
2569 2571 repo.close()
2570 2572
2571 2573 # Our strategy is to replace the type of the object with one that
2572 2574 # has all attribute lookups result in error.
2573 2575 #
2574 2576 # But we have to allow the close() method because some constructors
2575 2577 # of repos call close() on repo references.
2576 2578 class poisonedrepository(object):
2577 2579 def __getattribute__(self, item):
2578 2580 if item == r'close':
2579 2581 return object.__getattribute__(self, item)
2580 2582
2581 2583 raise error.ProgrammingError('repo instances should not be used '
2582 2584 'after unshare')
2583 2585
2584 2586 def close(self):
2585 2587 pass
2586 2588
2587 2589 # We may have a repoview, which intercepts __setattr__. So be sure
2588 2590 # we operate at the lowest level possible.
2589 2591 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now