##// END OF EJS Templates
localrepo: use urllocalpath() for path to create repo too...
Martin von Zweigbergk -
r39627:76b58f24 default
parent child Browse files
Show More
@@ -1,2504 +1,2505 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 narrowspec,
48 48 obsolete,
49 49 pathutil,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepoutil,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67 from .utils import (
68 68 interfaceutil,
69 69 procutil,
70 70 stringutil,
71 71 )
72 72
73 73 from .revlogutils import (
74 74 constants as revlogconst,
75 75 )
76 76
77 77 release = lockmod.release
78 78 urlerr = util.urlerr
79 79 urlreq = util.urlreq
80 80
81 81 # set of (path, vfs-location) tuples. vfs-location is:
82 82 # - 'plain for vfs relative paths
83 83 # - '' for svfs relative paths
84 84 _cachedfiles = set()
85 85
86 86 class _basefilecache(scmutil.filecache):
87 87 """All filecache usage on repo are done for logic that should be unfiltered
88 88 """
89 89 def __get__(self, repo, type=None):
90 90 if repo is None:
91 91 return self
92 92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 93 def __set__(self, repo, value):
94 94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 95 def __delete__(self, repo):
96 96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97 97
98 98 class repofilecache(_basefilecache):
99 99 """filecache for files in .hg but outside of .hg/store"""
100 100 def __init__(self, *paths):
101 101 super(repofilecache, self).__init__(*paths)
102 102 for path in paths:
103 103 _cachedfiles.add((path, 'plain'))
104 104
105 105 def join(self, obj, fname):
106 106 return obj.vfs.join(fname)
107 107
108 108 class storecache(_basefilecache):
109 109 """filecache for files in the store"""
110 110 def __init__(self, *paths):
111 111 super(storecache, self).__init__(*paths)
112 112 for path in paths:
113 113 _cachedfiles.add((path, ''))
114 114
115 115 def join(self, obj, fname):
116 116 return obj.sjoin(fname)
117 117
118 118 def isfilecached(repo, name):
119 119 """check if a repo has already cached "name" filecache-ed property
120 120
121 121 This returns (cachedobj-or-None, iscached) tuple.
122 122 """
123 123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 124 if not cacheentry:
125 125 return None, False
126 126 return cacheentry.obj, True
127 127
128 128 class unfilteredpropertycache(util.propertycache):
129 129 """propertycache that apply to unfiltered repo only"""
130 130
131 131 def __get__(self, repo, type=None):
132 132 unfi = repo.unfiltered()
133 133 if unfi is repo:
134 134 return super(unfilteredpropertycache, self).__get__(unfi)
135 135 return getattr(unfi, self.name)
136 136
137 137 class filteredpropertycache(util.propertycache):
138 138 """propertycache that must take filtering in account"""
139 139
140 140 def cachevalue(self, obj, value):
141 141 object.__setattr__(obj, self.name, value)
142 142
143 143
144 144 def hasunfilteredcache(repo, name):
145 145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 146 return name in vars(repo.unfiltered())
147 147
148 148 def unfilteredmethod(orig):
149 149 """decorate method that always need to be run on unfiltered version"""
150 150 def wrapper(repo, *args, **kwargs):
151 151 return orig(repo.unfiltered(), *args, **kwargs)
152 152 return wrapper
153 153
154 154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 155 'unbundle'}
156 156 legacycaps = moderncaps.union({'changegroupsubset'})
157 157
158 158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 159 class localcommandexecutor(object):
160 160 def __init__(self, peer):
161 161 self._peer = peer
162 162 self._sent = False
163 163 self._closed = False
164 164
165 165 def __enter__(self):
166 166 return self
167 167
168 168 def __exit__(self, exctype, excvalue, exctb):
169 169 self.close()
170 170
171 171 def callcommand(self, command, args):
172 172 if self._sent:
173 173 raise error.ProgrammingError('callcommand() cannot be used after '
174 174 'sendcommands()')
175 175
176 176 if self._closed:
177 177 raise error.ProgrammingError('callcommand() cannot be used after '
178 178 'close()')
179 179
180 180 # We don't need to support anything fancy. Just call the named
181 181 # method on the peer and return a resolved future.
182 182 fn = getattr(self._peer, pycompat.sysstr(command))
183 183
184 184 f = pycompat.futures.Future()
185 185
186 186 try:
187 187 result = fn(**pycompat.strkwargs(args))
188 188 except Exception:
189 189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 190 else:
191 191 f.set_result(result)
192 192
193 193 return f
194 194
195 195 def sendcommands(self):
196 196 self._sent = True
197 197
198 198 def close(self):
199 199 self._closed = True
200 200
201 201 @interfaceutil.implementer(repository.ipeercommands)
202 202 class localpeer(repository.peer):
203 203 '''peer for a local repo; reflects only the most recent API'''
204 204
205 205 def __init__(self, repo, caps=None):
206 206 super(localpeer, self).__init__()
207 207
208 208 if caps is None:
209 209 caps = moderncaps.copy()
210 210 self._repo = repo.filtered('served')
211 211 self.ui = repo.ui
212 212 self._caps = repo._restrictcapabilities(caps)
213 213
214 214 # Begin of _basepeer interface.
215 215
216 216 def url(self):
217 217 return self._repo.url()
218 218
219 219 def local(self):
220 220 return self._repo
221 221
222 222 def peer(self):
223 223 return self
224 224
225 225 def canpush(self):
226 226 return True
227 227
228 228 def close(self):
229 229 self._repo.close()
230 230
231 231 # End of _basepeer interface.
232 232
233 233 # Begin of _basewirecommands interface.
234 234
235 235 def branchmap(self):
236 236 return self._repo.branchmap()
237 237
238 238 def capabilities(self):
239 239 return self._caps
240 240
241 241 def clonebundles(self):
242 242 return self._repo.tryread('clonebundles.manifest')
243 243
244 244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 245 """Used to test argument passing over the wire"""
246 246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 247 pycompat.bytestr(four),
248 248 pycompat.bytestr(five))
249 249
250 250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 251 **kwargs):
252 252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 253 common=common, bundlecaps=bundlecaps,
254 254 **kwargs)[1]
255 255 cb = util.chunkbuffer(chunks)
256 256
257 257 if exchange.bundle2requested(bundlecaps):
258 258 # When requesting a bundle2, getbundle returns a stream to make the
259 259 # wire level function happier. We need to build a proper object
260 260 # from it in local peer.
261 261 return bundle2.getunbundler(self.ui, cb)
262 262 else:
263 263 return changegroup.getunbundler('01', cb, None)
264 264
265 265 def heads(self):
266 266 return self._repo.heads()
267 267
268 268 def known(self, nodes):
269 269 return self._repo.known(nodes)
270 270
271 271 def listkeys(self, namespace):
272 272 return self._repo.listkeys(namespace)
273 273
274 274 def lookup(self, key):
275 275 return self._repo.lookup(key)
276 276
277 277 def pushkey(self, namespace, key, old, new):
278 278 return self._repo.pushkey(namespace, key, old, new)
279 279
280 280 def stream_out(self):
281 281 raise error.Abort(_('cannot perform stream clone against local '
282 282 'peer'))
283 283
284 284 def unbundle(self, bundle, heads, url):
285 285 """apply a bundle on a repo
286 286
287 287 This function handles the repo locking itself."""
288 288 try:
289 289 try:
290 290 bundle = exchange.readbundle(self.ui, bundle, None)
291 291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 292 if util.safehasattr(ret, 'getchunks'):
293 293 # This is a bundle20 object, turn it into an unbundler.
294 294 # This little dance should be dropped eventually when the
295 295 # API is finally improved.
296 296 stream = util.chunkbuffer(ret.getchunks())
297 297 ret = bundle2.getunbundler(self.ui, stream)
298 298 return ret
299 299 except Exception as exc:
300 300 # If the exception contains output salvaged from a bundle2
301 301 # reply, we need to make sure it is printed before continuing
302 302 # to fail. So we build a bundle2 with such output and consume
303 303 # it directly.
304 304 #
305 305 # This is not very elegant but allows a "simple" solution for
306 306 # issue4594
307 307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 308 if output:
309 309 bundler = bundle2.bundle20(self._repo.ui)
310 310 for out in output:
311 311 bundler.addpart(out)
312 312 stream = util.chunkbuffer(bundler.getchunks())
313 313 b = bundle2.getunbundler(self.ui, stream)
314 314 bundle2.processbundle(self._repo, b)
315 315 raise
316 316 except error.PushRaced as exc:
317 317 raise error.ResponseError(_('push failed:'),
318 318 stringutil.forcebytestr(exc))
319 319
320 320 # End of _basewirecommands interface.
321 321
322 322 # Begin of peer interface.
323 323
324 324 def commandexecutor(self):
325 325 return localcommandexecutor(self)
326 326
327 327 # End of peer interface.
328 328
329 329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 330 class locallegacypeer(localpeer):
331 331 '''peer extension which implements legacy methods too; used for tests with
332 332 restricted capabilities'''
333 333
334 334 def __init__(self, repo):
335 335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336 336
337 337 # Begin of baselegacywirecommands interface.
338 338
339 339 def between(self, pairs):
340 340 return self._repo.between(pairs)
341 341
342 342 def branches(self, nodes):
343 343 return self._repo.branches(nodes)
344 344
345 345 def changegroup(self, nodes, source):
346 346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 347 missingheads=self._repo.heads())
348 348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 349
350 350 def changegroupsubset(self, bases, heads, source):
351 351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 352 missingheads=heads)
353 353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354 354
355 355 # End of baselegacywirecommands interface.
356 356
357 357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 358 # clients.
359 359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360 360
361 361 # A repository with the sparserevlog feature will have delta chains that
362 362 # can spread over a larger span. Sparse reading cuts these large spans into
363 363 # pieces, so that each piece isn't too big.
364 364 # Without the sparserevlog capability, reading from the repository could use
365 365 # huge amounts of memory, because the whole span would be read at once,
366 366 # including all the intermediate revisions that aren't pertinent for the chain.
367 367 # This is why once a repository has enabled sparse-read, it becomes required.
368 368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369 369
370 370 # Functions receiving (ui, features) that extensions can register to impact
371 371 # the ability to load repositories with custom requirements. Only
372 372 # functions defined in loaded extensions are called.
373 373 #
374 374 # The function receives a set of requirement strings that the repository
375 375 # is capable of opening. Functions will typically add elements to the
376 376 # set to reflect that the extension knows how to handle that requirements.
377 377 featuresetupfuncs = set()
378 378
379 379 @interfaceutil.implementer(repository.completelocalrepository)
380 380 class localrepository(object):
381 381
382 382 # obsolete experimental requirements:
383 383 # - manifestv2: An experimental new manifest format that allowed
384 384 # for stem compression of long paths. Experiment ended up not
385 385 # being successful (repository sizes went up due to worse delta
386 386 # chains), and the code was deleted in 4.6.
387 387 supportedformats = {
388 388 'revlogv1',
389 389 'generaldelta',
390 390 'treemanifest',
391 391 REVLOGV2_REQUIREMENT,
392 392 SPARSEREVLOG_REQUIREMENT,
393 393 }
394 394 _basesupported = supportedformats | {
395 395 'store',
396 396 'fncache',
397 397 'shared',
398 398 'relshared',
399 399 'dotencode',
400 400 'exp-sparse',
401 401 'internal-phase'
402 402 }
403 403 openerreqs = {
404 404 'revlogv1',
405 405 'generaldelta',
406 406 'treemanifest',
407 407 }
408 408
409 409 # list of prefix for file which can be written without 'wlock'
410 410 # Extensions should extend this list when needed
411 411 _wlockfreeprefix = {
412 412 # We migh consider requiring 'wlock' for the next
413 413 # two, but pretty much all the existing code assume
414 414 # wlock is not needed so we keep them excluded for
415 415 # now.
416 416 'hgrc',
417 417 'requires',
418 418 # XXX cache is a complicatged business someone
419 419 # should investigate this in depth at some point
420 420 'cache/',
421 421 # XXX shouldn't be dirstate covered by the wlock?
422 422 'dirstate',
423 423 # XXX bisect was still a bit too messy at the time
424 424 # this changeset was introduced. Someone should fix
425 425 # the remainig bit and drop this line
426 426 'bisect.state',
427 427 }
428 428
429 429 def __init__(self, baseui, path, intents=None):
430 430 """Create a new local repository instance.
431 431
432 432 Most callers should use ``hg.repository()`` or ``localrepo.instance()``
433 433 for obtaining a new repository object.
434 434 """
435 435
436 436 self.requirements = set()
437 437 self.filtername = None
438 438 # wvfs: rooted at the repository root, used to access the working copy
439 439 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
440 440 # vfs: rooted at .hg, used to access repo files outside of .hg/store
441 441 self.vfs = None
442 442 # svfs: usually rooted at .hg/store, used to access repository history
443 443 # If this is a shared repository, this vfs may point to another
444 444 # repository's .hg/store directory.
445 445 self.svfs = None
446 446 self.root = self.wvfs.base
447 447 self.path = self.wvfs.join(".hg")
448 448 self.origroot = path
449 449 self.baseui = baseui
450 450 self.ui = baseui.copy()
451 451 self.ui.copy = baseui.copy # prevent copying repo configuration
452 452 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
453 453 if (self.ui.configbool('devel', 'all-warnings') or
454 454 self.ui.configbool('devel', 'check-locks')):
455 455 self.vfs.audit = self._getvfsward(self.vfs.audit)
456 456 # A list of callback to shape the phase if no data were found.
457 457 # Callback are in the form: func(repo, roots) --> processed root.
458 458 # This list it to be filled by extension during repo setup
459 459 self._phasedefaults = []
460 460 try:
461 461 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
462 462 self._loadextensions()
463 463 except IOError:
464 464 pass
465 465
466 466 if featuresetupfuncs:
467 467 self.supported = set(self._basesupported) # use private copy
468 468 extmods = set(m.__name__ for n, m
469 469 in extensions.extensions(self.ui))
470 470 for setupfunc in featuresetupfuncs:
471 471 if setupfunc.__module__ in extmods:
472 472 setupfunc(self.ui, self.supported)
473 473 else:
474 474 self.supported = self._basesupported
475 475 color.setup(self.ui)
476 476
477 477 # Add compression engines.
478 478 for name in util.compengines:
479 479 engine = util.compengines[name]
480 480 if engine.revlogheader():
481 481 self.supported.add('exp-compression-%s' % name)
482 482
483 483 if not self.vfs.isdir():
484 484 try:
485 485 self.vfs.stat()
486 486 except OSError as inst:
487 487 if inst.errno != errno.ENOENT:
488 488 raise
489 489 raise error.RepoError(_("repository %s not found") % path)
490 490 else:
491 491 try:
492 492 self.requirements = scmutil.readrequires(
493 493 self.vfs, self.supported)
494 494 except IOError as inst:
495 495 if inst.errno != errno.ENOENT:
496 496 raise
497 497
498 498 cachepath = self.vfs.join('cache')
499 499 self.sharedpath = self.path
500 500 try:
501 501 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
502 502 if 'relshared' in self.requirements:
503 503 sharedpath = self.vfs.join(sharedpath)
504 504 vfs = vfsmod.vfs(sharedpath, realpath=True)
505 505 cachepath = vfs.join('cache')
506 506 s = vfs.base
507 507 if not vfs.exists():
508 508 raise error.RepoError(
509 509 _('.hg/sharedpath points to nonexistent directory %s') % s)
510 510 self.sharedpath = s
511 511 except IOError as inst:
512 512 if inst.errno != errno.ENOENT:
513 513 raise
514 514
515 515 if 'exp-sparse' in self.requirements and not sparse.enabled:
516 516 raise error.RepoError(_('repository is using sparse feature but '
517 517 'sparse is not enabled; enable the '
518 518 '"sparse" extensions to access'))
519 519
520 520 self.store = store.store(
521 521 self.requirements, self.sharedpath,
522 522 lambda base: vfsmod.vfs(base, cacheaudited=True))
523 523 self.spath = self.store.path
524 524 self.svfs = self.store.vfs
525 525 self.sjoin = self.store.join
526 526 self.vfs.createmode = self.store.createmode
527 527 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
528 528 self.cachevfs.createmode = self.store.createmode
529 529 if (self.ui.configbool('devel', 'all-warnings') or
530 530 self.ui.configbool('devel', 'check-locks')):
531 531 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
532 532 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
533 533 else: # standard vfs
534 534 self.svfs.audit = self._getsvfsward(self.svfs.audit)
535 535 self._applyopenerreqs()
536 536
537 537 self._dirstatevalidatewarned = False
538 538
539 539 self._branchcaches = {}
540 540 self._revbranchcache = None
541 541 self._filterpats = {}
542 542 self._datafilters = {}
543 543 self._transref = self._lockref = self._wlockref = None
544 544
545 545 # A cache for various files under .hg/ that tracks file changes,
546 546 # (used by the filecache decorator)
547 547 #
548 548 # Maps a property name to its util.filecacheentry
549 549 self._filecache = {}
550 550
551 551 # hold sets of revision to be filtered
552 552 # should be cleared when something might have changed the filter value:
553 553 # - new changesets,
554 554 # - phase change,
555 555 # - new obsolescence marker,
556 556 # - working directory parent change,
557 557 # - bookmark changes
558 558 self.filteredrevcache = {}
559 559
560 560 # post-dirstate-status hooks
561 561 self._postdsstatus = []
562 562
563 563 # generic mapping between names and nodes
564 564 self.names = namespaces.namespaces()
565 565
566 566 # Key to signature value.
567 567 self._sparsesignaturecache = {}
568 568 # Signature to cached matcher instance.
569 569 self._sparsematchercache = {}
570 570
571 571 def _getvfsward(self, origfunc):
572 572 """build a ward for self.vfs"""
573 573 rref = weakref.ref(self)
574 574 def checkvfs(path, mode=None):
575 575 ret = origfunc(path, mode=mode)
576 576 repo = rref()
577 577 if (repo is None
578 578 or not util.safehasattr(repo, '_wlockref')
579 579 or not util.safehasattr(repo, '_lockref')):
580 580 return
581 581 if mode in (None, 'r', 'rb'):
582 582 return
583 583 if path.startswith(repo.path):
584 584 # truncate name relative to the repository (.hg)
585 585 path = path[len(repo.path) + 1:]
586 586 if path.startswith('cache/'):
587 587 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
588 588 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
589 589 if path.startswith('journal.'):
590 590 # journal is covered by 'lock'
591 591 if repo._currentlock(repo._lockref) is None:
592 592 repo.ui.develwarn('write with no lock: "%s"' % path,
593 593 stacklevel=2, config='check-locks')
594 594 elif repo._currentlock(repo._wlockref) is None:
595 595 # rest of vfs files are covered by 'wlock'
596 596 #
597 597 # exclude special files
598 598 for prefix in self._wlockfreeprefix:
599 599 if path.startswith(prefix):
600 600 return
601 601 repo.ui.develwarn('write with no wlock: "%s"' % path,
602 602 stacklevel=2, config='check-locks')
603 603 return ret
604 604 return checkvfs
605 605
606 606 def _getsvfsward(self, origfunc):
607 607 """build a ward for self.svfs"""
608 608 rref = weakref.ref(self)
609 609 def checksvfs(path, mode=None):
610 610 ret = origfunc(path, mode=mode)
611 611 repo = rref()
612 612 if repo is None or not util.safehasattr(repo, '_lockref'):
613 613 return
614 614 if mode in (None, 'r', 'rb'):
615 615 return
616 616 if path.startswith(repo.sharedpath):
617 617 # truncate name relative to the repository (.hg)
618 618 path = path[len(repo.sharedpath) + 1:]
619 619 if repo._currentlock(repo._lockref) is None:
620 620 repo.ui.develwarn('write with no lock: "%s"' % path,
621 621 stacklevel=3)
622 622 return ret
623 623 return checksvfs
624 624
625 625 def close(self):
626 626 self._writecaches()
627 627
628 628 def _loadextensions(self):
629 629 extensions.loadall(self.ui)
630 630
631 631 def _writecaches(self):
632 632 if self._revbranchcache:
633 633 self._revbranchcache.write()
634 634
635 635 def _restrictcapabilities(self, caps):
636 636 if self.ui.configbool('experimental', 'bundle2-advertise'):
637 637 caps = set(caps)
638 638 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
639 639 role='client'))
640 640 caps.add('bundle2=' + urlreq.quote(capsblob))
641 641 return caps
642 642
643 643 def _applyopenerreqs(self):
644 644 self.svfs.options = dict((r, 1) for r in self.requirements
645 645 if r in self.openerreqs)
646 646 # experimental config: format.chunkcachesize
647 647 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
648 648 if chunkcachesize is not None:
649 649 self.svfs.options['chunkcachesize'] = chunkcachesize
650 650 # experimental config: format.manifestcachesize
651 651 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
652 652 if manifestcachesize is not None:
653 653 self.svfs.options['manifestcachesize'] = manifestcachesize
654 654 deltabothparents = self.ui.configbool('storage',
655 655 'revlog.optimize-delta-parent-choice')
656 656 self.svfs.options['deltabothparents'] = deltabothparents
657 657 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
658 658 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
659 659 if 0 <= chainspan:
660 660 self.svfs.options['maxdeltachainspan'] = chainspan
661 661 mmapindexthreshold = self.ui.configbytes('experimental',
662 662 'mmapindexthreshold')
663 663 if mmapindexthreshold is not None:
664 664 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
665 665 withsparseread = self.ui.configbool('experimental', 'sparse-read')
666 666 srdensitythres = float(self.ui.config('experimental',
667 667 'sparse-read.density-threshold'))
668 668 srmingapsize = self.ui.configbytes('experimental',
669 669 'sparse-read.min-gap-size')
670 670 self.svfs.options['with-sparse-read'] = withsparseread
671 671 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
672 672 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
673 673 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
674 674 self.svfs.options['sparse-revlog'] = sparserevlog
675 675 if sparserevlog:
676 676 self.svfs.options['generaldelta'] = True
677 677 maxchainlen = None
678 678 if sparserevlog:
679 679 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
680 680 # experimental config: format.maxchainlen
681 681 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
682 682 if maxchainlen is not None:
683 683 self.svfs.options['maxchainlen'] = maxchainlen
684 684
685 685 for r in self.requirements:
686 686 if r.startswith('exp-compression-'):
687 687 self.svfs.options['compengine'] = r[len('exp-compression-'):]
688 688
689 689 # TODO move "revlogv2" to openerreqs once finalized.
690 690 if REVLOGV2_REQUIREMENT in self.requirements:
691 691 self.svfs.options['revlogv2'] = True
692 692
693 693 def _writerequirements(self):
694 694 scmutil.writerequires(self.vfs, self.requirements)
695 695
696 696 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
697 697 # self -> auditor -> self._checknested -> self
698 698
699 699 @property
700 700 def auditor(self):
701 701 # This is only used by context.workingctx.match in order to
702 702 # detect files in subrepos.
703 703 return pathutil.pathauditor(self.root, callback=self._checknested)
704 704
705 705 @property
706 706 def nofsauditor(self):
707 707 # This is only used by context.basectx.match in order to detect
708 708 # files in subrepos.
709 709 return pathutil.pathauditor(self.root, callback=self._checknested,
710 710 realfs=False, cached=True)
711 711
712 712 def _checknested(self, path):
713 713 """Determine if path is a legal nested repository."""
714 714 if not path.startswith(self.root):
715 715 return False
716 716 subpath = path[len(self.root) + 1:]
717 717 normsubpath = util.pconvert(subpath)
718 718
719 719 # XXX: Checking against the current working copy is wrong in
720 720 # the sense that it can reject things like
721 721 #
722 722 # $ hg cat -r 10 sub/x.txt
723 723 #
724 724 # if sub/ is no longer a subrepository in the working copy
725 725 # parent revision.
726 726 #
727 727 # However, it can of course also allow things that would have
728 728 # been rejected before, such as the above cat command if sub/
729 729 # is a subrepository now, but was a normal directory before.
730 730 # The old path auditor would have rejected by mistake since it
731 731 # panics when it sees sub/.hg/.
732 732 #
733 733 # All in all, checking against the working copy seems sensible
734 734 # since we want to prevent access to nested repositories on
735 735 # the filesystem *now*.
736 736 ctx = self[None]
737 737 parts = util.splitpath(subpath)
738 738 while parts:
739 739 prefix = '/'.join(parts)
740 740 if prefix in ctx.substate:
741 741 if prefix == normsubpath:
742 742 return True
743 743 else:
744 744 sub = ctx.sub(prefix)
745 745 return sub.checknested(subpath[len(prefix) + 1:])
746 746 else:
747 747 parts.pop()
748 748 return False
749 749
750 750 def peer(self):
751 751 return localpeer(self) # not cached to avoid reference cycle
752 752
753 753 def unfiltered(self):
754 754 """Return unfiltered version of the repository
755 755
756 756 Intended to be overwritten by filtered repo."""
757 757 return self
758 758
759 759 def filtered(self, name, visibilityexceptions=None):
760 760 """Return a filtered version of a repository"""
761 761 cls = repoview.newtype(self.unfiltered().__class__)
762 762 return cls(self, name, visibilityexceptions)
763 763
764 764 @repofilecache('bookmarks', 'bookmarks.current')
765 765 def _bookmarks(self):
766 766 return bookmarks.bmstore(self)
767 767
768 768 @property
769 769 def _activebookmark(self):
770 770 return self._bookmarks.active
771 771
772 772 # _phasesets depend on changelog. what we need is to call
773 773 # _phasecache.invalidate() if '00changelog.i' was changed, but it
774 774 # can't be easily expressed in filecache mechanism.
775 775 @storecache('phaseroots', '00changelog.i')
776 776 def _phasecache(self):
777 777 return phases.phasecache(self, self._phasedefaults)
778 778
779 779 @storecache('obsstore')
780 780 def obsstore(self):
781 781 return obsolete.makestore(self.ui, self)
782 782
783 783 @storecache('00changelog.i')
784 784 def changelog(self):
785 785 return changelog.changelog(self.svfs,
786 786 trypending=txnutil.mayhavepending(self.root))
787 787
788 788 def _constructmanifest(self):
789 789 # This is a temporary function while we migrate from manifest to
790 790 # manifestlog. It allows bundlerepo and unionrepo to intercept the
791 791 # manifest creation.
792 792 return manifest.manifestrevlog(self.svfs)
793 793
794 794 @storecache('00manifest.i')
795 795 def manifestlog(self):
796 796 return manifest.manifestlog(self.svfs, self)
797 797
798 798 @repofilecache('dirstate')
799 799 def dirstate(self):
800 800 return self._makedirstate()
801 801
802 802 def _makedirstate(self):
803 803 """Extension point for wrapping the dirstate per-repo."""
804 804 sparsematchfn = lambda: sparse.matcher(self)
805 805
806 806 return dirstate.dirstate(self.vfs, self.ui, self.root,
807 807 self._dirstatevalidate, sparsematchfn)
808 808
809 809 def _dirstatevalidate(self, node):
810 810 try:
811 811 self.changelog.rev(node)
812 812 return node
813 813 except error.LookupError:
814 814 if not self._dirstatevalidatewarned:
815 815 self._dirstatevalidatewarned = True
816 816 self.ui.warn(_("warning: ignoring unknown"
817 817 " working parent %s!\n") % short(node))
818 818 return nullid
819 819
820 820 @storecache(narrowspec.FILENAME)
821 821 def narrowpats(self):
822 822 """matcher patterns for this repository's narrowspec
823 823
824 824 A tuple of (includes, excludes).
825 825 """
826 826 source = self
827 827 if self.shared():
828 828 from . import hg
829 829 source = hg.sharedreposource(self)
830 830 return narrowspec.load(source)
831 831
832 832 @storecache(narrowspec.FILENAME)
833 833 def _narrowmatch(self):
834 834 if repository.NARROW_REQUIREMENT not in self.requirements:
835 835 return matchmod.always(self.root, '')
836 836 include, exclude = self.narrowpats
837 837 return narrowspec.match(self.root, include=include, exclude=exclude)
838 838
839 839 # TODO(martinvonz): make this property-like instead?
840 840 def narrowmatch(self):
841 841 return self._narrowmatch
842 842
843 843 def setnarrowpats(self, newincludes, newexcludes):
844 844 narrowspec.save(self, newincludes, newexcludes)
845 845 self.invalidate(clearfilecache=True)
846 846
847 847 def __getitem__(self, changeid):
848 848 if changeid is None:
849 849 return context.workingctx(self)
850 850 if isinstance(changeid, context.basectx):
851 851 return changeid
852 852 if isinstance(changeid, slice):
853 853 # wdirrev isn't contiguous so the slice shouldn't include it
854 854 return [context.changectx(self, i)
855 855 for i in pycompat.xrange(*changeid.indices(len(self)))
856 856 if i not in self.changelog.filteredrevs]
857 857 try:
858 858 return context.changectx(self, changeid)
859 859 except error.WdirUnsupported:
860 860 return context.workingctx(self)
861 861
862 862 def __contains__(self, changeid):
863 863 """True if the given changeid exists
864 864
865 865 error.AmbiguousPrefixLookupError is raised if an ambiguous node
866 866 specified.
867 867 """
868 868 try:
869 869 self[changeid]
870 870 return True
871 871 except error.RepoLookupError:
872 872 return False
873 873
874 874 def __nonzero__(self):
875 875 return True
876 876
877 877 __bool__ = __nonzero__
878 878
879 879 def __len__(self):
880 880 # no need to pay the cost of repoview.changelog
881 881 unfi = self.unfiltered()
882 882 return len(unfi.changelog)
883 883
884 884 def __iter__(self):
885 885 return iter(self.changelog)
886 886
887 887 def revs(self, expr, *args):
888 888 '''Find revisions matching a revset.
889 889
890 890 The revset is specified as a string ``expr`` that may contain
891 891 %-formatting to escape certain types. See ``revsetlang.formatspec``.
892 892
893 893 Revset aliases from the configuration are not expanded. To expand
894 894 user aliases, consider calling ``scmutil.revrange()`` or
895 895 ``repo.anyrevs([expr], user=True)``.
896 896
897 897 Returns a revset.abstractsmartset, which is a list-like interface
898 898 that contains integer revisions.
899 899 '''
900 900 expr = revsetlang.formatspec(expr, *args)
901 901 m = revset.match(None, expr)
902 902 return m(self)
903 903
904 904 def set(self, expr, *args):
905 905 '''Find revisions matching a revset and emit changectx instances.
906 906
907 907 This is a convenience wrapper around ``revs()`` that iterates the
908 908 result and is a generator of changectx instances.
909 909
910 910 Revset aliases from the configuration are not expanded. To expand
911 911 user aliases, consider calling ``scmutil.revrange()``.
912 912 '''
913 913 for r in self.revs(expr, *args):
914 914 yield self[r]
915 915
916 916 def anyrevs(self, specs, user=False, localalias=None):
917 917 '''Find revisions matching one of the given revsets.
918 918
919 919 Revset aliases from the configuration are not expanded by default. To
920 920 expand user aliases, specify ``user=True``. To provide some local
921 921 definitions overriding user aliases, set ``localalias`` to
922 922 ``{name: definitionstring}``.
923 923 '''
924 924 if user:
925 925 m = revset.matchany(self.ui, specs,
926 926 lookup=revset.lookupfn(self),
927 927 localalias=localalias)
928 928 else:
929 929 m = revset.matchany(None, specs, localalias=localalias)
930 930 return m(self)
931 931
932 932 def url(self):
933 933 return 'file:' + self.root
934 934
935 935 def hook(self, name, throw=False, **args):
936 936 """Call a hook, passing this repo instance.
937 937
938 938 This a convenience method to aid invoking hooks. Extensions likely
939 939 won't call this unless they have registered a custom hook or are
940 940 replacing code that is expected to call a hook.
941 941 """
942 942 return hook.hook(self.ui, self, name, throw, **args)
943 943
944 944 @filteredpropertycache
945 945 def _tagscache(self):
946 946 '''Returns a tagscache object that contains various tags related
947 947 caches.'''
948 948
949 949 # This simplifies its cache management by having one decorated
950 950 # function (this one) and the rest simply fetch things from it.
951 951 class tagscache(object):
952 952 def __init__(self):
953 953 # These two define the set of tags for this repository. tags
954 954 # maps tag name to node; tagtypes maps tag name to 'global' or
955 955 # 'local'. (Global tags are defined by .hgtags across all
956 956 # heads, and local tags are defined in .hg/localtags.)
957 957 # They constitute the in-memory cache of tags.
958 958 self.tags = self.tagtypes = None
959 959
960 960 self.nodetagscache = self.tagslist = None
961 961
962 962 cache = tagscache()
963 963 cache.tags, cache.tagtypes = self._findtags()
964 964
965 965 return cache
966 966
967 967 def tags(self):
968 968 '''return a mapping of tag to node'''
969 969 t = {}
970 970 if self.changelog.filteredrevs:
971 971 tags, tt = self._findtags()
972 972 else:
973 973 tags = self._tagscache.tags
974 974 for k, v in tags.iteritems():
975 975 try:
976 976 # ignore tags to unknown nodes
977 977 self.changelog.rev(v)
978 978 t[k] = v
979 979 except (error.LookupError, ValueError):
980 980 pass
981 981 return t
982 982
983 983 def _findtags(self):
984 984 '''Do the hard work of finding tags. Return a pair of dicts
985 985 (tags, tagtypes) where tags maps tag name to node, and tagtypes
986 986 maps tag name to a string like \'global\' or \'local\'.
987 987 Subclasses or extensions are free to add their own tags, but
988 988 should be aware that the returned dicts will be retained for the
989 989 duration of the localrepo object.'''
990 990
991 991 # XXX what tagtype should subclasses/extensions use? Currently
992 992 # mq and bookmarks add tags, but do not set the tagtype at all.
993 993 # Should each extension invent its own tag type? Should there
994 994 # be one tagtype for all such "virtual" tags? Or is the status
995 995 # quo fine?
996 996
997 997
998 998 # map tag name to (node, hist)
999 999 alltags = tagsmod.findglobaltags(self.ui, self)
1000 1000 # map tag name to tag type
1001 1001 tagtypes = dict((tag, 'global') for tag in alltags)
1002 1002
1003 1003 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1004 1004
1005 1005 # Build the return dicts. Have to re-encode tag names because
1006 1006 # the tags module always uses UTF-8 (in order not to lose info
1007 1007 # writing to the cache), but the rest of Mercurial wants them in
1008 1008 # local encoding.
1009 1009 tags = {}
1010 1010 for (name, (node, hist)) in alltags.iteritems():
1011 1011 if node != nullid:
1012 1012 tags[encoding.tolocal(name)] = node
1013 1013 tags['tip'] = self.changelog.tip()
1014 1014 tagtypes = dict([(encoding.tolocal(name), value)
1015 1015 for (name, value) in tagtypes.iteritems()])
1016 1016 return (tags, tagtypes)
1017 1017
1018 1018 def tagtype(self, tagname):
1019 1019 '''
1020 1020 return the type of the given tag. result can be:
1021 1021
1022 1022 'local' : a local tag
1023 1023 'global' : a global tag
1024 1024 None : tag does not exist
1025 1025 '''
1026 1026
1027 1027 return self._tagscache.tagtypes.get(tagname)
1028 1028
1029 1029 def tagslist(self):
1030 1030 '''return a list of tags ordered by revision'''
1031 1031 if not self._tagscache.tagslist:
1032 1032 l = []
1033 1033 for t, n in self.tags().iteritems():
1034 1034 l.append((self.changelog.rev(n), t, n))
1035 1035 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1036 1036
1037 1037 return self._tagscache.tagslist
1038 1038
1039 1039 def nodetags(self, node):
1040 1040 '''return the tags associated with a node'''
1041 1041 if not self._tagscache.nodetagscache:
1042 1042 nodetagscache = {}
1043 1043 for t, n in self._tagscache.tags.iteritems():
1044 1044 nodetagscache.setdefault(n, []).append(t)
1045 1045 for tags in nodetagscache.itervalues():
1046 1046 tags.sort()
1047 1047 self._tagscache.nodetagscache = nodetagscache
1048 1048 return self._tagscache.nodetagscache.get(node, [])
1049 1049
1050 1050 def nodebookmarks(self, node):
1051 1051 """return the list of bookmarks pointing to the specified node"""
1052 1052 return self._bookmarks.names(node)
1053 1053
1054 1054 def branchmap(self):
1055 1055 '''returns a dictionary {branch: [branchheads]} with branchheads
1056 1056 ordered by increasing revision number'''
1057 1057 branchmap.updatecache(self)
1058 1058 return self._branchcaches[self.filtername]
1059 1059
1060 1060 @unfilteredmethod
1061 1061 def revbranchcache(self):
1062 1062 if not self._revbranchcache:
1063 1063 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1064 1064 return self._revbranchcache
1065 1065
1066 1066 def branchtip(self, branch, ignoremissing=False):
1067 1067 '''return the tip node for a given branch
1068 1068
1069 1069 If ignoremissing is True, then this method will not raise an error.
1070 1070 This is helpful for callers that only expect None for a missing branch
1071 1071 (e.g. namespace).
1072 1072
1073 1073 '''
1074 1074 try:
1075 1075 return self.branchmap().branchtip(branch)
1076 1076 except KeyError:
1077 1077 if not ignoremissing:
1078 1078 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1079 1079 else:
1080 1080 pass
1081 1081
1082 1082 def lookup(self, key):
1083 1083 return scmutil.revsymbol(self, key).node()
1084 1084
1085 1085 def lookupbranch(self, key):
1086 1086 if key in self.branchmap():
1087 1087 return key
1088 1088
1089 1089 return scmutil.revsymbol(self, key).branch()
1090 1090
1091 1091 def known(self, nodes):
1092 1092 cl = self.changelog
1093 1093 nm = cl.nodemap
1094 1094 filtered = cl.filteredrevs
1095 1095 result = []
1096 1096 for n in nodes:
1097 1097 r = nm.get(n)
1098 1098 resp = not (r is None or r in filtered)
1099 1099 result.append(resp)
1100 1100 return result
1101 1101
1102 1102 def local(self):
1103 1103 return self
1104 1104
1105 1105 def publishing(self):
1106 1106 # it's safe (and desirable) to trust the publish flag unconditionally
1107 1107 # so that we don't finalize changes shared between users via ssh or nfs
1108 1108 return self.ui.configbool('phases', 'publish', untrusted=True)
1109 1109
1110 1110 def cancopy(self):
1111 1111 # so statichttprepo's override of local() works
1112 1112 if not self.local():
1113 1113 return False
1114 1114 if not self.publishing():
1115 1115 return True
1116 1116 # if publishing we can't copy if there is filtered content
1117 1117 return not self.filtered('visible').changelog.filteredrevs
1118 1118
1119 1119 def shared(self):
1120 1120 '''the type of shared repository (None if not shared)'''
1121 1121 if self.sharedpath != self.path:
1122 1122 return 'store'
1123 1123 return None
1124 1124
1125 1125 def wjoin(self, f, *insidef):
1126 1126 return self.vfs.reljoin(self.root, f, *insidef)
1127 1127
1128 1128 def file(self, f):
1129 1129 if f[0] == '/':
1130 1130 f = f[1:]
1131 1131 return filelog.filelog(self.svfs, f)
1132 1132
1133 1133 def setparents(self, p1, p2=nullid):
1134 1134 with self.dirstate.parentchange():
1135 1135 copies = self.dirstate.setparents(p1, p2)
1136 1136 pctx = self[p1]
1137 1137 if copies:
1138 1138 # Adjust copy records, the dirstate cannot do it, it
1139 1139 # requires access to parents manifests. Preserve them
1140 1140 # only for entries added to first parent.
1141 1141 for f in copies:
1142 1142 if f not in pctx and copies[f] in pctx:
1143 1143 self.dirstate.copy(copies[f], f)
1144 1144 if p2 == nullid:
1145 1145 for f, s in sorted(self.dirstate.copies().items()):
1146 1146 if f not in pctx and s not in pctx:
1147 1147 self.dirstate.copy(None, f)
1148 1148
1149 1149 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1150 1150 """changeid can be a changeset revision, node, or tag.
1151 1151 fileid can be a file revision or node."""
1152 1152 return context.filectx(self, path, changeid, fileid,
1153 1153 changectx=changectx)
1154 1154
1155 1155 def getcwd(self):
1156 1156 return self.dirstate.getcwd()
1157 1157
1158 1158 def pathto(self, f, cwd=None):
1159 1159 return self.dirstate.pathto(f, cwd)
1160 1160
1161 1161 def _loadfilter(self, filter):
1162 1162 if filter not in self._filterpats:
1163 1163 l = []
1164 1164 for pat, cmd in self.ui.configitems(filter):
1165 1165 if cmd == '!':
1166 1166 continue
1167 1167 mf = matchmod.match(self.root, '', [pat])
1168 1168 fn = None
1169 1169 params = cmd
1170 1170 for name, filterfn in self._datafilters.iteritems():
1171 1171 if cmd.startswith(name):
1172 1172 fn = filterfn
1173 1173 params = cmd[len(name):].lstrip()
1174 1174 break
1175 1175 if not fn:
1176 1176 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1177 1177 # Wrap old filters not supporting keyword arguments
1178 1178 if not pycompat.getargspec(fn)[2]:
1179 1179 oldfn = fn
1180 1180 fn = lambda s, c, **kwargs: oldfn(s, c)
1181 1181 l.append((mf, fn, params))
1182 1182 self._filterpats[filter] = l
1183 1183 return self._filterpats[filter]
1184 1184
1185 1185 def _filter(self, filterpats, filename, data):
1186 1186 for mf, fn, cmd in filterpats:
1187 1187 if mf(filename):
1188 1188 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1189 1189 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1190 1190 break
1191 1191
1192 1192 return data
1193 1193
1194 1194 @unfilteredpropertycache
1195 1195 def _encodefilterpats(self):
1196 1196 return self._loadfilter('encode')
1197 1197
1198 1198 @unfilteredpropertycache
1199 1199 def _decodefilterpats(self):
1200 1200 return self._loadfilter('decode')
1201 1201
1202 1202 def adddatafilter(self, name, filter):
1203 1203 self._datafilters[name] = filter
1204 1204
1205 1205 def wread(self, filename):
1206 1206 if self.wvfs.islink(filename):
1207 1207 data = self.wvfs.readlink(filename)
1208 1208 else:
1209 1209 data = self.wvfs.read(filename)
1210 1210 return self._filter(self._encodefilterpats, filename, data)
1211 1211
1212 1212 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1213 1213 """write ``data`` into ``filename`` in the working directory
1214 1214
1215 1215 This returns length of written (maybe decoded) data.
1216 1216 """
1217 1217 data = self._filter(self._decodefilterpats, filename, data)
1218 1218 if 'l' in flags:
1219 1219 self.wvfs.symlink(data, filename)
1220 1220 else:
1221 1221 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1222 1222 **kwargs)
1223 1223 if 'x' in flags:
1224 1224 self.wvfs.setflags(filename, False, True)
1225 1225 else:
1226 1226 self.wvfs.setflags(filename, False, False)
1227 1227 return len(data)
1228 1228
1229 1229 def wwritedata(self, filename, data):
1230 1230 return self._filter(self._decodefilterpats, filename, data)
1231 1231
1232 1232 def currenttransaction(self):
1233 1233 """return the current transaction or None if non exists"""
1234 1234 if self._transref:
1235 1235 tr = self._transref()
1236 1236 else:
1237 1237 tr = None
1238 1238
1239 1239 if tr and tr.running():
1240 1240 return tr
1241 1241 return None
1242 1242
1243 1243 def transaction(self, desc, report=None):
1244 1244 if (self.ui.configbool('devel', 'all-warnings')
1245 1245 or self.ui.configbool('devel', 'check-locks')):
1246 1246 if self._currentlock(self._lockref) is None:
1247 1247 raise error.ProgrammingError('transaction requires locking')
1248 1248 tr = self.currenttransaction()
1249 1249 if tr is not None:
1250 1250 return tr.nest(name=desc)
1251 1251
1252 1252 # abort here if the journal already exists
1253 1253 if self.svfs.exists("journal"):
1254 1254 raise error.RepoError(
1255 1255 _("abandoned transaction found"),
1256 1256 hint=_("run 'hg recover' to clean up transaction"))
1257 1257
1258 1258 idbase = "%.40f#%f" % (random.random(), time.time())
1259 1259 ha = hex(hashlib.sha1(idbase).digest())
1260 1260 txnid = 'TXN:' + ha
1261 1261 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1262 1262
1263 1263 self._writejournal(desc)
1264 1264 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1265 1265 if report:
1266 1266 rp = report
1267 1267 else:
1268 1268 rp = self.ui.warn
1269 1269 vfsmap = {'plain': self.vfs} # root of .hg/
1270 1270 # we must avoid cyclic reference between repo and transaction.
1271 1271 reporef = weakref.ref(self)
1272 1272 # Code to track tag movement
1273 1273 #
1274 1274 # Since tags are all handled as file content, it is actually quite hard
1275 1275 # to track these movement from a code perspective. So we fallback to a
1276 1276 # tracking at the repository level. One could envision to track changes
1277 1277 # to the '.hgtags' file through changegroup apply but that fails to
1278 1278 # cope with case where transaction expose new heads without changegroup
1279 1279 # being involved (eg: phase movement).
1280 1280 #
1281 1281 # For now, We gate the feature behind a flag since this likely comes
1282 1282 # with performance impacts. The current code run more often than needed
1283 1283 # and do not use caches as much as it could. The current focus is on
1284 1284 # the behavior of the feature so we disable it by default. The flag
1285 1285 # will be removed when we are happy with the performance impact.
1286 1286 #
1287 1287 # Once this feature is no longer experimental move the following
1288 1288 # documentation to the appropriate help section:
1289 1289 #
1290 1290 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1291 1291 # tags (new or changed or deleted tags). In addition the details of
1292 1292 # these changes are made available in a file at:
1293 1293 # ``REPOROOT/.hg/changes/tags.changes``.
1294 1294 # Make sure you check for HG_TAG_MOVED before reading that file as it
1295 1295 # might exist from a previous transaction even if no tag were touched
1296 1296 # in this one. Changes are recorded in a line base format::
1297 1297 #
1298 1298 # <action> <hex-node> <tag-name>\n
1299 1299 #
1300 1300 # Actions are defined as follow:
1301 1301 # "-R": tag is removed,
1302 1302 # "+A": tag is added,
1303 1303 # "-M": tag is moved (old value),
1304 1304 # "+M": tag is moved (new value),
1305 1305 tracktags = lambda x: None
1306 1306 # experimental config: experimental.hook-track-tags
1307 1307 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1308 1308 if desc != 'strip' and shouldtracktags:
1309 1309 oldheads = self.changelog.headrevs()
1310 1310 def tracktags(tr2):
1311 1311 repo = reporef()
1312 1312 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1313 1313 newheads = repo.changelog.headrevs()
1314 1314 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1315 1315 # notes: we compare lists here.
1316 1316 # As we do it only once buiding set would not be cheaper
1317 1317 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1318 1318 if changes:
1319 1319 tr2.hookargs['tag_moved'] = '1'
1320 1320 with repo.vfs('changes/tags.changes', 'w',
1321 1321 atomictemp=True) as changesfile:
1322 1322 # note: we do not register the file to the transaction
1323 1323 # because we needs it to still exist on the transaction
1324 1324 # is close (for txnclose hooks)
1325 1325 tagsmod.writediff(changesfile, changes)
1326 1326 def validate(tr2):
1327 1327 """will run pre-closing hooks"""
1328 1328 # XXX the transaction API is a bit lacking here so we take a hacky
1329 1329 # path for now
1330 1330 #
1331 1331 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1332 1332 # dict is copied before these run. In addition we needs the data
1333 1333 # available to in memory hooks too.
1334 1334 #
1335 1335 # Moreover, we also need to make sure this runs before txnclose
1336 1336 # hooks and there is no "pending" mechanism that would execute
1337 1337 # logic only if hooks are about to run.
1338 1338 #
1339 1339 # Fixing this limitation of the transaction is also needed to track
1340 1340 # other families of changes (bookmarks, phases, obsolescence).
1341 1341 #
1342 1342 # This will have to be fixed before we remove the experimental
1343 1343 # gating.
1344 1344 tracktags(tr2)
1345 1345 repo = reporef()
1346 1346 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1347 1347 scmutil.enforcesinglehead(repo, tr2, desc)
1348 1348 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1349 1349 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1350 1350 args = tr.hookargs.copy()
1351 1351 args.update(bookmarks.preparehookargs(name, old, new))
1352 1352 repo.hook('pretxnclose-bookmark', throw=True,
1353 1353 txnname=desc,
1354 1354 **pycompat.strkwargs(args))
1355 1355 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1356 1356 cl = repo.unfiltered().changelog
1357 1357 for rev, (old, new) in tr.changes['phases'].items():
1358 1358 args = tr.hookargs.copy()
1359 1359 node = hex(cl.node(rev))
1360 1360 args.update(phases.preparehookargs(node, old, new))
1361 1361 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1362 1362 **pycompat.strkwargs(args))
1363 1363
1364 1364 repo.hook('pretxnclose', throw=True,
1365 1365 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1366 1366 def releasefn(tr, success):
1367 1367 repo = reporef()
1368 1368 if success:
1369 1369 # this should be explicitly invoked here, because
1370 1370 # in-memory changes aren't written out at closing
1371 1371 # transaction, if tr.addfilegenerator (via
1372 1372 # dirstate.write or so) isn't invoked while
1373 1373 # transaction running
1374 1374 repo.dirstate.write(None)
1375 1375 else:
1376 1376 # discard all changes (including ones already written
1377 1377 # out) in this transaction
1378 1378 narrowspec.restorebackup(self, 'journal.narrowspec')
1379 1379 repo.dirstate.restorebackup(None, 'journal.dirstate')
1380 1380
1381 1381 repo.invalidate(clearfilecache=True)
1382 1382
1383 1383 tr = transaction.transaction(rp, self.svfs, vfsmap,
1384 1384 "journal",
1385 1385 "undo",
1386 1386 aftertrans(renames),
1387 1387 self.store.createmode,
1388 1388 validator=validate,
1389 1389 releasefn=releasefn,
1390 1390 checkambigfiles=_cachedfiles,
1391 1391 name=desc)
1392 1392 tr.changes['origrepolen'] = len(self)
1393 1393 tr.changes['obsmarkers'] = set()
1394 1394 tr.changes['phases'] = {}
1395 1395 tr.changes['bookmarks'] = {}
1396 1396
1397 1397 tr.hookargs['txnid'] = txnid
1398 1398 # note: writing the fncache only during finalize mean that the file is
1399 1399 # outdated when running hooks. As fncache is used for streaming clone,
1400 1400 # this is not expected to break anything that happen during the hooks.
1401 1401 tr.addfinalize('flush-fncache', self.store.write)
1402 1402 def txnclosehook(tr2):
1403 1403 """To be run if transaction is successful, will schedule a hook run
1404 1404 """
1405 1405 # Don't reference tr2 in hook() so we don't hold a reference.
1406 1406 # This reduces memory consumption when there are multiple
1407 1407 # transactions per lock. This can likely go away if issue5045
1408 1408 # fixes the function accumulation.
1409 1409 hookargs = tr2.hookargs
1410 1410
1411 1411 def hookfunc():
1412 1412 repo = reporef()
1413 1413 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1414 1414 bmchanges = sorted(tr.changes['bookmarks'].items())
1415 1415 for name, (old, new) in bmchanges:
1416 1416 args = tr.hookargs.copy()
1417 1417 args.update(bookmarks.preparehookargs(name, old, new))
1418 1418 repo.hook('txnclose-bookmark', throw=False,
1419 1419 txnname=desc, **pycompat.strkwargs(args))
1420 1420
1421 1421 if hook.hashook(repo.ui, 'txnclose-phase'):
1422 1422 cl = repo.unfiltered().changelog
1423 1423 phasemv = sorted(tr.changes['phases'].items())
1424 1424 for rev, (old, new) in phasemv:
1425 1425 args = tr.hookargs.copy()
1426 1426 node = hex(cl.node(rev))
1427 1427 args.update(phases.preparehookargs(node, old, new))
1428 1428 repo.hook('txnclose-phase', throw=False, txnname=desc,
1429 1429 **pycompat.strkwargs(args))
1430 1430
1431 1431 repo.hook('txnclose', throw=False, txnname=desc,
1432 1432 **pycompat.strkwargs(hookargs))
1433 1433 reporef()._afterlock(hookfunc)
1434 1434 tr.addfinalize('txnclose-hook', txnclosehook)
1435 1435 # Include a leading "-" to make it happen before the transaction summary
1436 1436 # reports registered via scmutil.registersummarycallback() whose names
1437 1437 # are 00-txnreport etc. That way, the caches will be warm when the
1438 1438 # callbacks run.
1439 1439 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1440 1440 def txnaborthook(tr2):
1441 1441 """To be run if transaction is aborted
1442 1442 """
1443 1443 reporef().hook('txnabort', throw=False, txnname=desc,
1444 1444 **pycompat.strkwargs(tr2.hookargs))
1445 1445 tr.addabort('txnabort-hook', txnaborthook)
1446 1446 # avoid eager cache invalidation. in-memory data should be identical
1447 1447 # to stored data if transaction has no error.
1448 1448 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1449 1449 self._transref = weakref.ref(tr)
1450 1450 scmutil.registersummarycallback(self, tr, desc)
1451 1451 return tr
1452 1452
1453 1453 def _journalfiles(self):
1454 1454 return ((self.svfs, 'journal'),
1455 1455 (self.vfs, 'journal.dirstate'),
1456 1456 (self.vfs, 'journal.branch'),
1457 1457 (self.vfs, 'journal.desc'),
1458 1458 (self.vfs, 'journal.bookmarks'),
1459 1459 (self.svfs, 'journal.phaseroots'))
1460 1460
1461 1461 def undofiles(self):
1462 1462 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1463 1463
1464 1464 @unfilteredmethod
1465 1465 def _writejournal(self, desc):
1466 1466 self.dirstate.savebackup(None, 'journal.dirstate')
1467 1467 narrowspec.savebackup(self, 'journal.narrowspec')
1468 1468 self.vfs.write("journal.branch",
1469 1469 encoding.fromlocal(self.dirstate.branch()))
1470 1470 self.vfs.write("journal.desc",
1471 1471 "%d\n%s\n" % (len(self), desc))
1472 1472 self.vfs.write("journal.bookmarks",
1473 1473 self.vfs.tryread("bookmarks"))
1474 1474 self.svfs.write("journal.phaseroots",
1475 1475 self.svfs.tryread("phaseroots"))
1476 1476
1477 1477 def recover(self):
1478 1478 with self.lock():
1479 1479 if self.svfs.exists("journal"):
1480 1480 self.ui.status(_("rolling back interrupted transaction\n"))
1481 1481 vfsmap = {'': self.svfs,
1482 1482 'plain': self.vfs,}
1483 1483 transaction.rollback(self.svfs, vfsmap, "journal",
1484 1484 self.ui.warn,
1485 1485 checkambigfiles=_cachedfiles)
1486 1486 self.invalidate()
1487 1487 return True
1488 1488 else:
1489 1489 self.ui.warn(_("no interrupted transaction available\n"))
1490 1490 return False
1491 1491
1492 1492 def rollback(self, dryrun=False, force=False):
1493 1493 wlock = lock = dsguard = None
1494 1494 try:
1495 1495 wlock = self.wlock()
1496 1496 lock = self.lock()
1497 1497 if self.svfs.exists("undo"):
1498 1498 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1499 1499
1500 1500 return self._rollback(dryrun, force, dsguard)
1501 1501 else:
1502 1502 self.ui.warn(_("no rollback information available\n"))
1503 1503 return 1
1504 1504 finally:
1505 1505 release(dsguard, lock, wlock)
1506 1506
1507 1507 @unfilteredmethod # Until we get smarter cache management
1508 1508 def _rollback(self, dryrun, force, dsguard):
1509 1509 ui = self.ui
1510 1510 try:
1511 1511 args = self.vfs.read('undo.desc').splitlines()
1512 1512 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1513 1513 if len(args) >= 3:
1514 1514 detail = args[2]
1515 1515 oldtip = oldlen - 1
1516 1516
1517 1517 if detail and ui.verbose:
1518 1518 msg = (_('repository tip rolled back to revision %d'
1519 1519 ' (undo %s: %s)\n')
1520 1520 % (oldtip, desc, detail))
1521 1521 else:
1522 1522 msg = (_('repository tip rolled back to revision %d'
1523 1523 ' (undo %s)\n')
1524 1524 % (oldtip, desc))
1525 1525 except IOError:
1526 1526 msg = _('rolling back unknown transaction\n')
1527 1527 desc = None
1528 1528
1529 1529 if not force and self['.'] != self['tip'] and desc == 'commit':
1530 1530 raise error.Abort(
1531 1531 _('rollback of last commit while not checked out '
1532 1532 'may lose data'), hint=_('use -f to force'))
1533 1533
1534 1534 ui.status(msg)
1535 1535 if dryrun:
1536 1536 return 0
1537 1537
1538 1538 parents = self.dirstate.parents()
1539 1539 self.destroying()
1540 1540 vfsmap = {'plain': self.vfs, '': self.svfs}
1541 1541 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1542 1542 checkambigfiles=_cachedfiles)
1543 1543 if self.vfs.exists('undo.bookmarks'):
1544 1544 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1545 1545 if self.svfs.exists('undo.phaseroots'):
1546 1546 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1547 1547 self.invalidate()
1548 1548
1549 1549 parentgone = (parents[0] not in self.changelog.nodemap or
1550 1550 parents[1] not in self.changelog.nodemap)
1551 1551 if parentgone:
1552 1552 # prevent dirstateguard from overwriting already restored one
1553 1553 dsguard.close()
1554 1554
1555 1555 narrowspec.restorebackup(self, 'undo.narrowspec')
1556 1556 self.dirstate.restorebackup(None, 'undo.dirstate')
1557 1557 try:
1558 1558 branch = self.vfs.read('undo.branch')
1559 1559 self.dirstate.setbranch(encoding.tolocal(branch))
1560 1560 except IOError:
1561 1561 ui.warn(_('named branch could not be reset: '
1562 1562 'current branch is still \'%s\'\n')
1563 1563 % self.dirstate.branch())
1564 1564
1565 1565 parents = tuple([p.rev() for p in self[None].parents()])
1566 1566 if len(parents) > 1:
1567 1567 ui.status(_('working directory now based on '
1568 1568 'revisions %d and %d\n') % parents)
1569 1569 else:
1570 1570 ui.status(_('working directory now based on '
1571 1571 'revision %d\n') % parents)
1572 1572 mergemod.mergestate.clean(self, self['.'].node())
1573 1573
1574 1574 # TODO: if we know which new heads may result from this rollback, pass
1575 1575 # them to destroy(), which will prevent the branchhead cache from being
1576 1576 # invalidated.
1577 1577 self.destroyed()
1578 1578 return 0
1579 1579
1580 1580 def _buildcacheupdater(self, newtransaction):
1581 1581 """called during transaction to build the callback updating cache
1582 1582
1583 1583 Lives on the repository to help extension who might want to augment
1584 1584 this logic. For this purpose, the created transaction is passed to the
1585 1585 method.
1586 1586 """
1587 1587 # we must avoid cyclic reference between repo and transaction.
1588 1588 reporef = weakref.ref(self)
1589 1589 def updater(tr):
1590 1590 repo = reporef()
1591 1591 repo.updatecaches(tr)
1592 1592 return updater
1593 1593
1594 1594 @unfilteredmethod
1595 1595 def updatecaches(self, tr=None, full=False):
1596 1596 """warm appropriate caches
1597 1597
1598 1598 If this function is called after a transaction closed. The transaction
1599 1599 will be available in the 'tr' argument. This can be used to selectively
1600 1600 update caches relevant to the changes in that transaction.
1601 1601
1602 1602 If 'full' is set, make sure all caches the function knows about have
1603 1603 up-to-date data. Even the ones usually loaded more lazily.
1604 1604 """
1605 1605 if tr is not None and tr.hookargs.get('source') == 'strip':
1606 1606 # During strip, many caches are invalid but
1607 1607 # later call to `destroyed` will refresh them.
1608 1608 return
1609 1609
1610 1610 if tr is None or tr.changes['origrepolen'] < len(self):
1611 1611 # updating the unfiltered branchmap should refresh all the others,
1612 1612 self.ui.debug('updating the branch cache\n')
1613 1613 branchmap.updatecache(self.filtered('served'))
1614 1614
1615 1615 if full:
1616 1616 rbc = self.revbranchcache()
1617 1617 for r in self.changelog:
1618 1618 rbc.branchinfo(r)
1619 1619 rbc.write()
1620 1620
1621 1621 # ensure the working copy parents are in the manifestfulltextcache
1622 1622 for ctx in self['.'].parents():
1623 1623 ctx.manifest() # accessing the manifest is enough
1624 1624
1625 1625 def invalidatecaches(self):
1626 1626
1627 1627 if '_tagscache' in vars(self):
1628 1628 # can't use delattr on proxy
1629 1629 del self.__dict__['_tagscache']
1630 1630
1631 1631 self.unfiltered()._branchcaches.clear()
1632 1632 self.invalidatevolatilesets()
1633 1633 self._sparsesignaturecache.clear()
1634 1634
1635 1635 def invalidatevolatilesets(self):
1636 1636 self.filteredrevcache.clear()
1637 1637 obsolete.clearobscaches(self)
1638 1638
1639 1639 def invalidatedirstate(self):
1640 1640 '''Invalidates the dirstate, causing the next call to dirstate
1641 1641 to check if it was modified since the last time it was read,
1642 1642 rereading it if it has.
1643 1643
1644 1644 This is different to dirstate.invalidate() that it doesn't always
1645 1645 rereads the dirstate. Use dirstate.invalidate() if you want to
1646 1646 explicitly read the dirstate again (i.e. restoring it to a previous
1647 1647 known good state).'''
1648 1648 if hasunfilteredcache(self, 'dirstate'):
1649 1649 for k in self.dirstate._filecache:
1650 1650 try:
1651 1651 delattr(self.dirstate, k)
1652 1652 except AttributeError:
1653 1653 pass
1654 1654 delattr(self.unfiltered(), 'dirstate')
1655 1655
1656 1656 def invalidate(self, clearfilecache=False):
1657 1657 '''Invalidates both store and non-store parts other than dirstate
1658 1658
1659 1659 If a transaction is running, invalidation of store is omitted,
1660 1660 because discarding in-memory changes might cause inconsistency
1661 1661 (e.g. incomplete fncache causes unintentional failure, but
1662 1662 redundant one doesn't).
1663 1663 '''
1664 1664 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1665 1665 for k in list(self._filecache.keys()):
1666 1666 # dirstate is invalidated separately in invalidatedirstate()
1667 1667 if k == 'dirstate':
1668 1668 continue
1669 1669 if (k == 'changelog' and
1670 1670 self.currenttransaction() and
1671 1671 self.changelog._delayed):
1672 1672 # The changelog object may store unwritten revisions. We don't
1673 1673 # want to lose them.
1674 1674 # TODO: Solve the problem instead of working around it.
1675 1675 continue
1676 1676
1677 1677 if clearfilecache:
1678 1678 del self._filecache[k]
1679 1679 try:
1680 1680 delattr(unfiltered, k)
1681 1681 except AttributeError:
1682 1682 pass
1683 1683 self.invalidatecaches()
1684 1684 if not self.currenttransaction():
1685 1685 # TODO: Changing contents of store outside transaction
1686 1686 # causes inconsistency. We should make in-memory store
1687 1687 # changes detectable, and abort if changed.
1688 1688 self.store.invalidatecaches()
1689 1689
1690 1690 def invalidateall(self):
1691 1691 '''Fully invalidates both store and non-store parts, causing the
1692 1692 subsequent operation to reread any outside changes.'''
1693 1693 # extension should hook this to invalidate its caches
1694 1694 self.invalidate()
1695 1695 self.invalidatedirstate()
1696 1696
1697 1697 @unfilteredmethod
1698 1698 def _refreshfilecachestats(self, tr):
1699 1699 """Reload stats of cached files so that they are flagged as valid"""
1700 1700 for k, ce in self._filecache.items():
1701 1701 k = pycompat.sysstr(k)
1702 1702 if k == r'dirstate' or k not in self.__dict__:
1703 1703 continue
1704 1704 ce.refresh()
1705 1705
1706 1706 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1707 1707 inheritchecker=None, parentenvvar=None):
1708 1708 parentlock = None
1709 1709 # the contents of parentenvvar are used by the underlying lock to
1710 1710 # determine whether it can be inherited
1711 1711 if parentenvvar is not None:
1712 1712 parentlock = encoding.environ.get(parentenvvar)
1713 1713
1714 1714 timeout = 0
1715 1715 warntimeout = 0
1716 1716 if wait:
1717 1717 timeout = self.ui.configint("ui", "timeout")
1718 1718 warntimeout = self.ui.configint("ui", "timeout.warn")
1719 1719 # internal config: ui.signal-safe-lock
1720 1720 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1721 1721
1722 1722 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1723 1723 releasefn=releasefn,
1724 1724 acquirefn=acquirefn, desc=desc,
1725 1725 inheritchecker=inheritchecker,
1726 1726 parentlock=parentlock,
1727 1727 signalsafe=signalsafe)
1728 1728 return l
1729 1729
1730 1730 def _afterlock(self, callback):
1731 1731 """add a callback to be run when the repository is fully unlocked
1732 1732
1733 1733 The callback will be executed when the outermost lock is released
1734 1734 (with wlock being higher level than 'lock')."""
1735 1735 for ref in (self._wlockref, self._lockref):
1736 1736 l = ref and ref()
1737 1737 if l and l.held:
1738 1738 l.postrelease.append(callback)
1739 1739 break
1740 1740 else: # no lock have been found.
1741 1741 callback()
1742 1742
1743 1743 def lock(self, wait=True):
1744 1744 '''Lock the repository store (.hg/store) and return a weak reference
1745 1745 to the lock. Use this before modifying the store (e.g. committing or
1746 1746 stripping). If you are opening a transaction, get a lock as well.)
1747 1747
1748 1748 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1749 1749 'wlock' first to avoid a dead-lock hazard.'''
1750 1750 l = self._currentlock(self._lockref)
1751 1751 if l is not None:
1752 1752 l.lock()
1753 1753 return l
1754 1754
1755 1755 l = self._lock(self.svfs, "lock", wait, None,
1756 1756 self.invalidate, _('repository %s') % self.origroot)
1757 1757 self._lockref = weakref.ref(l)
1758 1758 return l
1759 1759
1760 1760 def _wlockchecktransaction(self):
1761 1761 if self.currenttransaction() is not None:
1762 1762 raise error.LockInheritanceContractViolation(
1763 1763 'wlock cannot be inherited in the middle of a transaction')
1764 1764
1765 1765 def wlock(self, wait=True):
1766 1766 '''Lock the non-store parts of the repository (everything under
1767 1767 .hg except .hg/store) and return a weak reference to the lock.
1768 1768
1769 1769 Use this before modifying files in .hg.
1770 1770
1771 1771 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1772 1772 'wlock' first to avoid a dead-lock hazard.'''
1773 1773 l = self._wlockref and self._wlockref()
1774 1774 if l is not None and l.held:
1775 1775 l.lock()
1776 1776 return l
1777 1777
1778 1778 # We do not need to check for non-waiting lock acquisition. Such
1779 1779 # acquisition would not cause dead-lock as they would just fail.
1780 1780 if wait and (self.ui.configbool('devel', 'all-warnings')
1781 1781 or self.ui.configbool('devel', 'check-locks')):
1782 1782 if self._currentlock(self._lockref) is not None:
1783 1783 self.ui.develwarn('"wlock" acquired after "lock"')
1784 1784
1785 1785 def unlock():
1786 1786 if self.dirstate.pendingparentchange():
1787 1787 self.dirstate.invalidate()
1788 1788 else:
1789 1789 self.dirstate.write(None)
1790 1790
1791 1791 self._filecache['dirstate'].refresh()
1792 1792
1793 1793 l = self._lock(self.vfs, "wlock", wait, unlock,
1794 1794 self.invalidatedirstate, _('working directory of %s') %
1795 1795 self.origroot,
1796 1796 inheritchecker=self._wlockchecktransaction,
1797 1797 parentenvvar='HG_WLOCK_LOCKER')
1798 1798 self._wlockref = weakref.ref(l)
1799 1799 return l
1800 1800
1801 1801 def _currentlock(self, lockref):
1802 1802 """Returns the lock if it's held, or None if it's not."""
1803 1803 if lockref is None:
1804 1804 return None
1805 1805 l = lockref()
1806 1806 if l is None or not l.held:
1807 1807 return None
1808 1808 return l
1809 1809
1810 1810 def currentwlock(self):
1811 1811 """Returns the wlock if it's held, or None if it's not."""
1812 1812 return self._currentlock(self._wlockref)
1813 1813
1814 1814 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1815 1815 """
1816 1816 commit an individual file as part of a larger transaction
1817 1817 """
1818 1818
1819 1819 fname = fctx.path()
1820 1820 fparent1 = manifest1.get(fname, nullid)
1821 1821 fparent2 = manifest2.get(fname, nullid)
1822 1822 if isinstance(fctx, context.filectx):
1823 1823 node = fctx.filenode()
1824 1824 if node in [fparent1, fparent2]:
1825 1825 self.ui.debug('reusing %s filelog entry\n' % fname)
1826 1826 if manifest1.flags(fname) != fctx.flags():
1827 1827 changelist.append(fname)
1828 1828 return node
1829 1829
1830 1830 flog = self.file(fname)
1831 1831 meta = {}
1832 1832 copy = fctx.renamed()
1833 1833 if copy and copy[0] != fname:
1834 1834 # Mark the new revision of this file as a copy of another
1835 1835 # file. This copy data will effectively act as a parent
1836 1836 # of this new revision. If this is a merge, the first
1837 1837 # parent will be the nullid (meaning "look up the copy data")
1838 1838 # and the second one will be the other parent. For example:
1839 1839 #
1840 1840 # 0 --- 1 --- 3 rev1 changes file foo
1841 1841 # \ / rev2 renames foo to bar and changes it
1842 1842 # \- 2 -/ rev3 should have bar with all changes and
1843 1843 # should record that bar descends from
1844 1844 # bar in rev2 and foo in rev1
1845 1845 #
1846 1846 # this allows this merge to succeed:
1847 1847 #
1848 1848 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1849 1849 # \ / merging rev3 and rev4 should use bar@rev2
1850 1850 # \- 2 --- 4 as the merge base
1851 1851 #
1852 1852
1853 1853 cfname = copy[0]
1854 1854 crev = manifest1.get(cfname)
1855 1855 newfparent = fparent2
1856 1856
1857 1857 if manifest2: # branch merge
1858 1858 if fparent2 == nullid or crev is None: # copied on remote side
1859 1859 if cfname in manifest2:
1860 1860 crev = manifest2[cfname]
1861 1861 newfparent = fparent1
1862 1862
1863 1863 # Here, we used to search backwards through history to try to find
1864 1864 # where the file copy came from if the source of a copy was not in
1865 1865 # the parent directory. However, this doesn't actually make sense to
1866 1866 # do (what does a copy from something not in your working copy even
1867 1867 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1868 1868 # the user that copy information was dropped, so if they didn't
1869 1869 # expect this outcome it can be fixed, but this is the correct
1870 1870 # behavior in this circumstance.
1871 1871
1872 1872 if crev:
1873 1873 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1874 1874 meta["copy"] = cfname
1875 1875 meta["copyrev"] = hex(crev)
1876 1876 fparent1, fparent2 = nullid, newfparent
1877 1877 else:
1878 1878 self.ui.warn(_("warning: can't find ancestor for '%s' "
1879 1879 "copied from '%s'!\n") % (fname, cfname))
1880 1880
1881 1881 elif fparent1 == nullid:
1882 1882 fparent1, fparent2 = fparent2, nullid
1883 1883 elif fparent2 != nullid:
1884 1884 # is one parent an ancestor of the other?
1885 1885 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1886 1886 if fparent1 in fparentancestors:
1887 1887 fparent1, fparent2 = fparent2, nullid
1888 1888 elif fparent2 in fparentancestors:
1889 1889 fparent2 = nullid
1890 1890
1891 1891 # is the file changed?
1892 1892 text = fctx.data()
1893 1893 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1894 1894 changelist.append(fname)
1895 1895 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1896 1896 # are just the flags changed during merge?
1897 1897 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1898 1898 changelist.append(fname)
1899 1899
1900 1900 return fparent1
1901 1901
1902 1902 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1903 1903 """check for commit arguments that aren't committable"""
1904 1904 if match.isexact() or match.prefix():
1905 1905 matched = set(status.modified + status.added + status.removed)
1906 1906
1907 1907 for f in match.files():
1908 1908 f = self.dirstate.normalize(f)
1909 1909 if f == '.' or f in matched or f in wctx.substate:
1910 1910 continue
1911 1911 if f in status.deleted:
1912 1912 fail(f, _('file not found!'))
1913 1913 if f in vdirs: # visited directory
1914 1914 d = f + '/'
1915 1915 for mf in matched:
1916 1916 if mf.startswith(d):
1917 1917 break
1918 1918 else:
1919 1919 fail(f, _("no match under directory!"))
1920 1920 elif f not in self.dirstate:
1921 1921 fail(f, _("file not tracked!"))
1922 1922
1923 1923 @unfilteredmethod
1924 1924 def commit(self, text="", user=None, date=None, match=None, force=False,
1925 1925 editor=False, extra=None):
1926 1926 """Add a new revision to current repository.
1927 1927
1928 1928 Revision information is gathered from the working directory,
1929 1929 match can be used to filter the committed files. If editor is
1930 1930 supplied, it is called to get a commit message.
1931 1931 """
1932 1932 if extra is None:
1933 1933 extra = {}
1934 1934
1935 1935 def fail(f, msg):
1936 1936 raise error.Abort('%s: %s' % (f, msg))
1937 1937
1938 1938 if not match:
1939 1939 match = matchmod.always(self.root, '')
1940 1940
1941 1941 if not force:
1942 1942 vdirs = []
1943 1943 match.explicitdir = vdirs.append
1944 1944 match.bad = fail
1945 1945
1946 1946 wlock = lock = tr = None
1947 1947 try:
1948 1948 wlock = self.wlock()
1949 1949 lock = self.lock() # for recent changelog (see issue4368)
1950 1950
1951 1951 wctx = self[None]
1952 1952 merge = len(wctx.parents()) > 1
1953 1953
1954 1954 if not force and merge and not match.always():
1955 1955 raise error.Abort(_('cannot partially commit a merge '
1956 1956 '(do not specify files or patterns)'))
1957 1957
1958 1958 status = self.status(match=match, clean=force)
1959 1959 if force:
1960 1960 status.modified.extend(status.clean) # mq may commit clean files
1961 1961
1962 1962 # check subrepos
1963 1963 subs, commitsubs, newstate = subrepoutil.precommit(
1964 1964 self.ui, wctx, status, match, force=force)
1965 1965
1966 1966 # make sure all explicit patterns are matched
1967 1967 if not force:
1968 1968 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1969 1969
1970 1970 cctx = context.workingcommitctx(self, status,
1971 1971 text, user, date, extra)
1972 1972
1973 1973 # internal config: ui.allowemptycommit
1974 1974 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1975 1975 or extra.get('close') or merge or cctx.files()
1976 1976 or self.ui.configbool('ui', 'allowemptycommit'))
1977 1977 if not allowemptycommit:
1978 1978 return None
1979 1979
1980 1980 if merge and cctx.deleted():
1981 1981 raise error.Abort(_("cannot commit merge with missing files"))
1982 1982
1983 1983 ms = mergemod.mergestate.read(self)
1984 1984 mergeutil.checkunresolved(ms)
1985 1985
1986 1986 if editor:
1987 1987 cctx._text = editor(self, cctx, subs)
1988 1988 edited = (text != cctx._text)
1989 1989
1990 1990 # Save commit message in case this transaction gets rolled back
1991 1991 # (e.g. by a pretxncommit hook). Leave the content alone on
1992 1992 # the assumption that the user will use the same editor again.
1993 1993 msgfn = self.savecommitmessage(cctx._text)
1994 1994
1995 1995 # commit subs and write new state
1996 1996 if subs:
1997 1997 for s in sorted(commitsubs):
1998 1998 sub = wctx.sub(s)
1999 1999 self.ui.status(_('committing subrepository %s\n') %
2000 2000 subrepoutil.subrelpath(sub))
2001 2001 sr = sub.commit(cctx._text, user, date)
2002 2002 newstate[s] = (newstate[s][0], sr)
2003 2003 subrepoutil.writestate(self, newstate)
2004 2004
2005 2005 p1, p2 = self.dirstate.parents()
2006 2006 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2007 2007 try:
2008 2008 self.hook("precommit", throw=True, parent1=hookp1,
2009 2009 parent2=hookp2)
2010 2010 tr = self.transaction('commit')
2011 2011 ret = self.commitctx(cctx, True)
2012 2012 except: # re-raises
2013 2013 if edited:
2014 2014 self.ui.write(
2015 2015 _('note: commit message saved in %s\n') % msgfn)
2016 2016 raise
2017 2017 # update bookmarks, dirstate and mergestate
2018 2018 bookmarks.update(self, [p1, p2], ret)
2019 2019 cctx.markcommitted(ret)
2020 2020 ms.reset()
2021 2021 tr.close()
2022 2022
2023 2023 finally:
2024 2024 lockmod.release(tr, lock, wlock)
2025 2025
2026 2026 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2027 2027 # hack for command that use a temporary commit (eg: histedit)
2028 2028 # temporary commit got stripped before hook release
2029 2029 if self.changelog.hasnode(ret):
2030 2030 self.hook("commit", node=node, parent1=parent1,
2031 2031 parent2=parent2)
2032 2032 self._afterlock(commithook)
2033 2033 return ret
2034 2034
2035 2035 @unfilteredmethod
2036 2036 def commitctx(self, ctx, error=False):
2037 2037 """Add a new revision to current repository.
2038 2038 Revision information is passed via the context argument.
2039 2039
2040 2040 ctx.files() should list all files involved in this commit, i.e.
2041 2041 modified/added/removed files. On merge, it may be wider than the
2042 2042 ctx.files() to be committed, since any file nodes derived directly
2043 2043 from p1 or p2 are excluded from the committed ctx.files().
2044 2044 """
2045 2045
2046 2046 tr = None
2047 2047 p1, p2 = ctx.p1(), ctx.p2()
2048 2048 user = ctx.user()
2049 2049
2050 2050 lock = self.lock()
2051 2051 try:
2052 2052 tr = self.transaction("commit")
2053 2053 trp = weakref.proxy(tr)
2054 2054
2055 2055 if ctx.manifestnode():
2056 2056 # reuse an existing manifest revision
2057 2057 self.ui.debug('reusing known manifest\n')
2058 2058 mn = ctx.manifestnode()
2059 2059 files = ctx.files()
2060 2060 elif ctx.files():
2061 2061 m1ctx = p1.manifestctx()
2062 2062 m2ctx = p2.manifestctx()
2063 2063 mctx = m1ctx.copy()
2064 2064
2065 2065 m = mctx.read()
2066 2066 m1 = m1ctx.read()
2067 2067 m2 = m2ctx.read()
2068 2068
2069 2069 # check in files
2070 2070 added = []
2071 2071 changed = []
2072 2072 removed = list(ctx.removed())
2073 2073 linkrev = len(self)
2074 2074 self.ui.note(_("committing files:\n"))
2075 2075 for f in sorted(ctx.modified() + ctx.added()):
2076 2076 self.ui.note(f + "\n")
2077 2077 try:
2078 2078 fctx = ctx[f]
2079 2079 if fctx is None:
2080 2080 removed.append(f)
2081 2081 else:
2082 2082 added.append(f)
2083 2083 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2084 2084 trp, changed)
2085 2085 m.setflag(f, fctx.flags())
2086 2086 except OSError as inst:
2087 2087 self.ui.warn(_("trouble committing %s!\n") % f)
2088 2088 raise
2089 2089 except IOError as inst:
2090 2090 errcode = getattr(inst, 'errno', errno.ENOENT)
2091 2091 if error or errcode and errcode != errno.ENOENT:
2092 2092 self.ui.warn(_("trouble committing %s!\n") % f)
2093 2093 raise
2094 2094
2095 2095 # update manifest
2096 2096 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2097 2097 drop = [f for f in removed if f in m]
2098 2098 for f in drop:
2099 2099 del m[f]
2100 2100 files = changed + removed
2101 2101 md = None
2102 2102 if not files:
2103 2103 # if no "files" actually changed in terms of the changelog,
2104 2104 # try hard to detect unmodified manifest entry so that the
2105 2105 # exact same commit can be reproduced later on convert.
2106 2106 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2107 2107 if not files and md:
2108 2108 self.ui.debug('not reusing manifest (no file change in '
2109 2109 'changelog, but manifest differs)\n')
2110 2110 if files or md:
2111 2111 self.ui.note(_("committing manifest\n"))
2112 2112 mn = mctx.write(trp, linkrev,
2113 2113 p1.manifestnode(), p2.manifestnode(),
2114 2114 added, drop)
2115 2115 else:
2116 2116 self.ui.debug('reusing manifest form p1 (listed files '
2117 2117 'actually unchanged)\n')
2118 2118 mn = p1.manifestnode()
2119 2119 else:
2120 2120 self.ui.debug('reusing manifest from p1 (no file change)\n')
2121 2121 mn = p1.manifestnode()
2122 2122 files = []
2123 2123
2124 2124 # update changelog
2125 2125 self.ui.note(_("committing changelog\n"))
2126 2126 self.changelog.delayupdate(tr)
2127 2127 n = self.changelog.add(mn, files, ctx.description(),
2128 2128 trp, p1.node(), p2.node(),
2129 2129 user, ctx.date(), ctx.extra().copy())
2130 2130 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2131 2131 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2132 2132 parent2=xp2)
2133 2133 # set the new commit is proper phase
2134 2134 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2135 2135 if targetphase:
2136 2136 # retract boundary do not alter parent changeset.
2137 2137 # if a parent have higher the resulting phase will
2138 2138 # be compliant anyway
2139 2139 #
2140 2140 # if minimal phase was 0 we don't need to retract anything
2141 2141 phases.registernew(self, tr, targetphase, [n])
2142 2142 tr.close()
2143 2143 return n
2144 2144 finally:
2145 2145 if tr:
2146 2146 tr.release()
2147 2147 lock.release()
2148 2148
2149 2149 @unfilteredmethod
2150 2150 def destroying(self):
2151 2151 '''Inform the repository that nodes are about to be destroyed.
2152 2152 Intended for use by strip and rollback, so there's a common
2153 2153 place for anything that has to be done before destroying history.
2154 2154
2155 2155 This is mostly useful for saving state that is in memory and waiting
2156 2156 to be flushed when the current lock is released. Because a call to
2157 2157 destroyed is imminent, the repo will be invalidated causing those
2158 2158 changes to stay in memory (waiting for the next unlock), or vanish
2159 2159 completely.
2160 2160 '''
2161 2161 # When using the same lock to commit and strip, the phasecache is left
2162 2162 # dirty after committing. Then when we strip, the repo is invalidated,
2163 2163 # causing those changes to disappear.
2164 2164 if '_phasecache' in vars(self):
2165 2165 self._phasecache.write()
2166 2166
2167 2167 @unfilteredmethod
2168 2168 def destroyed(self):
2169 2169 '''Inform the repository that nodes have been destroyed.
2170 2170 Intended for use by strip and rollback, so there's a common
2171 2171 place for anything that has to be done after destroying history.
2172 2172 '''
2173 2173 # When one tries to:
2174 2174 # 1) destroy nodes thus calling this method (e.g. strip)
2175 2175 # 2) use phasecache somewhere (e.g. commit)
2176 2176 #
2177 2177 # then 2) will fail because the phasecache contains nodes that were
2178 2178 # removed. We can either remove phasecache from the filecache,
2179 2179 # causing it to reload next time it is accessed, or simply filter
2180 2180 # the removed nodes now and write the updated cache.
2181 2181 self._phasecache.filterunknown(self)
2182 2182 self._phasecache.write()
2183 2183
2184 2184 # refresh all repository caches
2185 2185 self.updatecaches()
2186 2186
2187 2187 # Ensure the persistent tag cache is updated. Doing it now
2188 2188 # means that the tag cache only has to worry about destroyed
2189 2189 # heads immediately after a strip/rollback. That in turn
2190 2190 # guarantees that "cachetip == currenttip" (comparing both rev
2191 2191 # and node) always means no nodes have been added or destroyed.
2192 2192
2193 2193 # XXX this is suboptimal when qrefresh'ing: we strip the current
2194 2194 # head, refresh the tag cache, then immediately add a new head.
2195 2195 # But I think doing it this way is necessary for the "instant
2196 2196 # tag cache retrieval" case to work.
2197 2197 self.invalidate()
2198 2198
2199 2199 def status(self, node1='.', node2=None, match=None,
2200 2200 ignored=False, clean=False, unknown=False,
2201 2201 listsubrepos=False):
2202 2202 '''a convenience method that calls node1.status(node2)'''
2203 2203 return self[node1].status(node2, match, ignored, clean, unknown,
2204 2204 listsubrepos)
2205 2205
2206 2206 def addpostdsstatus(self, ps):
2207 2207 """Add a callback to run within the wlock, at the point at which status
2208 2208 fixups happen.
2209 2209
2210 2210 On status completion, callback(wctx, status) will be called with the
2211 2211 wlock held, unless the dirstate has changed from underneath or the wlock
2212 2212 couldn't be grabbed.
2213 2213
2214 2214 Callbacks should not capture and use a cached copy of the dirstate --
2215 2215 it might change in the meanwhile. Instead, they should access the
2216 2216 dirstate via wctx.repo().dirstate.
2217 2217
2218 2218 This list is emptied out after each status run -- extensions should
2219 2219 make sure it adds to this list each time dirstate.status is called.
2220 2220 Extensions should also make sure they don't call this for statuses
2221 2221 that don't involve the dirstate.
2222 2222 """
2223 2223
2224 2224 # The list is located here for uniqueness reasons -- it is actually
2225 2225 # managed by the workingctx, but that isn't unique per-repo.
2226 2226 self._postdsstatus.append(ps)
2227 2227
2228 2228 def postdsstatus(self):
2229 2229 """Used by workingctx to get the list of post-dirstate-status hooks."""
2230 2230 return self._postdsstatus
2231 2231
2232 2232 def clearpostdsstatus(self):
2233 2233 """Used by workingctx to clear post-dirstate-status hooks."""
2234 2234 del self._postdsstatus[:]
2235 2235
2236 2236 def heads(self, start=None):
2237 2237 if start is None:
2238 2238 cl = self.changelog
2239 2239 headrevs = reversed(cl.headrevs())
2240 2240 return [cl.node(rev) for rev in headrevs]
2241 2241
2242 2242 heads = self.changelog.heads(start)
2243 2243 # sort the output in rev descending order
2244 2244 return sorted(heads, key=self.changelog.rev, reverse=True)
2245 2245
2246 2246 def branchheads(self, branch=None, start=None, closed=False):
2247 2247 '''return a (possibly filtered) list of heads for the given branch
2248 2248
2249 2249 Heads are returned in topological order, from newest to oldest.
2250 2250 If branch is None, use the dirstate branch.
2251 2251 If start is not None, return only heads reachable from start.
2252 2252 If closed is True, return heads that are marked as closed as well.
2253 2253 '''
2254 2254 if branch is None:
2255 2255 branch = self[None].branch()
2256 2256 branches = self.branchmap()
2257 2257 if branch not in branches:
2258 2258 return []
2259 2259 # the cache returns heads ordered lowest to highest
2260 2260 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2261 2261 if start is not None:
2262 2262 # filter out the heads that cannot be reached from startrev
2263 2263 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2264 2264 bheads = [h for h in bheads if h in fbheads]
2265 2265 return bheads
2266 2266
2267 2267 def branches(self, nodes):
2268 2268 if not nodes:
2269 2269 nodes = [self.changelog.tip()]
2270 2270 b = []
2271 2271 for n in nodes:
2272 2272 t = n
2273 2273 while True:
2274 2274 p = self.changelog.parents(n)
2275 2275 if p[1] != nullid or p[0] == nullid:
2276 2276 b.append((t, n, p[0], p[1]))
2277 2277 break
2278 2278 n = p[0]
2279 2279 return b
2280 2280
2281 2281 def between(self, pairs):
2282 2282 r = []
2283 2283
2284 2284 for top, bottom in pairs:
2285 2285 n, l, i = top, [], 0
2286 2286 f = 1
2287 2287
2288 2288 while n != bottom and n != nullid:
2289 2289 p = self.changelog.parents(n)[0]
2290 2290 if i == f:
2291 2291 l.append(n)
2292 2292 f = f * 2
2293 2293 n = p
2294 2294 i += 1
2295 2295
2296 2296 r.append(l)
2297 2297
2298 2298 return r
2299 2299
2300 2300 def checkpush(self, pushop):
2301 2301 """Extensions can override this function if additional checks have
2302 2302 to be performed before pushing, or call it if they override push
2303 2303 command.
2304 2304 """
2305 2305
2306 2306 @unfilteredpropertycache
2307 2307 def prepushoutgoinghooks(self):
2308 2308 """Return util.hooks consists of a pushop with repo, remote, outgoing
2309 2309 methods, which are called before pushing changesets.
2310 2310 """
2311 2311 return util.hooks()
2312 2312
2313 2313 def pushkey(self, namespace, key, old, new):
2314 2314 try:
2315 2315 tr = self.currenttransaction()
2316 2316 hookargs = {}
2317 2317 if tr is not None:
2318 2318 hookargs.update(tr.hookargs)
2319 2319 hookargs = pycompat.strkwargs(hookargs)
2320 2320 hookargs[r'namespace'] = namespace
2321 2321 hookargs[r'key'] = key
2322 2322 hookargs[r'old'] = old
2323 2323 hookargs[r'new'] = new
2324 2324 self.hook('prepushkey', throw=True, **hookargs)
2325 2325 except error.HookAbort as exc:
2326 2326 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2327 2327 if exc.hint:
2328 2328 self.ui.write_err(_("(%s)\n") % exc.hint)
2329 2329 return False
2330 2330 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2331 2331 ret = pushkey.push(self, namespace, key, old, new)
2332 2332 def runhook():
2333 2333 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2334 2334 ret=ret)
2335 2335 self._afterlock(runhook)
2336 2336 return ret
2337 2337
2338 2338 def listkeys(self, namespace):
2339 2339 self.hook('prelistkeys', throw=True, namespace=namespace)
2340 2340 self.ui.debug('listing keys for "%s"\n' % namespace)
2341 2341 values = pushkey.list(self, namespace)
2342 2342 self.hook('listkeys', namespace=namespace, values=values)
2343 2343 return values
2344 2344
2345 2345 def debugwireargs(self, one, two, three=None, four=None, five=None):
2346 2346 '''used to test argument passing over the wire'''
2347 2347 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2348 2348 pycompat.bytestr(four),
2349 2349 pycompat.bytestr(five))
2350 2350
2351 2351 def savecommitmessage(self, text):
2352 2352 fp = self.vfs('last-message.txt', 'wb')
2353 2353 try:
2354 2354 fp.write(text)
2355 2355 finally:
2356 2356 fp.close()
2357 2357 return self.pathto(fp.name[len(self.root) + 1:])
2358 2358
2359 2359 # used to avoid circular references so destructors work
2360 2360 def aftertrans(files):
2361 2361 renamefiles = [tuple(t) for t in files]
2362 2362 def a():
2363 2363 for vfs, src, dest in renamefiles:
2364 2364 # if src and dest refer to a same file, vfs.rename is a no-op,
2365 2365 # leaving both src and dest on disk. delete dest to make sure
2366 2366 # the rename couldn't be such a no-op.
2367 2367 vfs.tryunlink(dest)
2368 2368 try:
2369 2369 vfs.rename(src, dest)
2370 2370 except OSError: # journal file does not yet exist
2371 2371 pass
2372 2372 return a
2373 2373
2374 2374 def undoname(fn):
2375 2375 base, name = os.path.split(fn)
2376 2376 assert name.startswith('journal')
2377 2377 return os.path.join(base, name.replace('journal', 'undo', 1))
2378 2378
2379 2379 def instance(ui, path, create, intents=None, createopts=None):
2380 localpath = util.urllocalpath(path)
2380 2381 if create:
2381 createrepository(ui, path, createopts=createopts)
2382 createrepository(ui, localpath, createopts=createopts)
2382 2383
2383 return localrepository(ui, util.urllocalpath(path), intents=intents)
2384 return localrepository(ui, localpath, intents=intents)
2384 2385
2385 2386 def islocal(path):
2386 2387 return True
2387 2388
2388 2389 def newreporequirements(ui, createopts=None):
2389 2390 """Determine the set of requirements for a new local repository.
2390 2391
2391 2392 Extensions can wrap this function to specify custom requirements for
2392 2393 new repositories.
2393 2394 """
2394 2395 createopts = createopts or {}
2395 2396
2396 2397 requirements = {'revlogv1'}
2397 2398 if ui.configbool('format', 'usestore'):
2398 2399 requirements.add('store')
2399 2400 if ui.configbool('format', 'usefncache'):
2400 2401 requirements.add('fncache')
2401 2402 if ui.configbool('format', 'dotencode'):
2402 2403 requirements.add('dotencode')
2403 2404
2404 2405 compengine = ui.config('experimental', 'format.compression')
2405 2406 if compengine not in util.compengines:
2406 2407 raise error.Abort(_('compression engine %s defined by '
2407 2408 'experimental.format.compression not available') %
2408 2409 compengine,
2409 2410 hint=_('run "hg debuginstall" to list available '
2410 2411 'compression engines'))
2411 2412
2412 2413 # zlib is the historical default and doesn't need an explicit requirement.
2413 2414 if compengine != 'zlib':
2414 2415 requirements.add('exp-compression-%s' % compengine)
2415 2416
2416 2417 if scmutil.gdinitconfig(ui):
2417 2418 requirements.add('generaldelta')
2418 2419 if ui.configbool('experimental', 'treemanifest'):
2419 2420 requirements.add('treemanifest')
2420 2421 # experimental config: format.sparse-revlog
2421 2422 if ui.configbool('format', 'sparse-revlog'):
2422 2423 requirements.add(SPARSEREVLOG_REQUIREMENT)
2423 2424
2424 2425 revlogv2 = ui.config('experimental', 'revlogv2')
2425 2426 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2426 2427 requirements.remove('revlogv1')
2427 2428 # generaldelta is implied by revlogv2.
2428 2429 requirements.discard('generaldelta')
2429 2430 requirements.add(REVLOGV2_REQUIREMENT)
2430 2431 # experimental config: format.internal-phase
2431 2432 if ui.configbool('format', 'internal-phase'):
2432 2433 requirements.add('internal-phase')
2433 2434
2434 2435 if createopts.get('narrowfiles'):
2435 2436 requirements.add(repository.NARROW_REQUIREMENT)
2436 2437
2437 2438 return requirements
2438 2439
2439 2440 def filterknowncreateopts(ui, createopts):
2440 2441 """Filters a dict of repo creation options against options that are known.
2441 2442
2442 2443 Receives a dict of repo creation options and returns a dict of those
2443 2444 options that we don't know how to handle.
2444 2445
2445 2446 This function is called as part of repository creation. If the
2446 2447 returned dict contains any items, repository creation will not
2447 2448 be allowed, as it means there was a request to create a repository
2448 2449 with options not recognized by loaded code.
2449 2450
2450 2451 Extensions can wrap this function to filter out creation options
2451 2452 they know how to handle.
2452 2453 """
2453 2454 known = {'narrowfiles'}
2454 2455
2455 2456 return {k: v for k, v in createopts.items() if k not in known}
2456 2457
2457 2458 def createrepository(ui, path, createopts=None):
2458 2459 """Create a new repository in a vfs.
2459 2460
2460 2461 ``path`` path to the new repo's working directory.
2461 2462 ``createopts`` options for the new repository.
2462 2463 """
2463 2464 createopts = createopts or {}
2464 2465
2465 2466 unknownopts = filterknowncreateopts(ui, createopts)
2466 2467
2467 2468 if not isinstance(unknownopts, dict):
2468 2469 raise error.ProgrammingError('filterknowncreateopts() did not return '
2469 2470 'a dict')
2470 2471
2471 2472 if unknownopts:
2472 2473 raise error.Abort(_('unable to create repository because of unknown '
2473 2474 'creation option: %s') %
2474 2475 ', '.sorted(unknownopts),
2475 2476 hint=_('is a required extension not loaded?'))
2476 2477
2477 2478 requirements = newreporequirements(ui, createopts=createopts)
2478 2479
2479 2480 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2480 2481 if not wdirvfs.exists():
2481 2482 wdirvfs.makedirs()
2482 2483
2483 2484 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2484 2485 if hgvfs.exists():
2485 2486 raise error.RepoError(_('repository %s already exists') % path)
2486 2487
2487 2488 hgvfs.makedir(notindexed=True)
2488 2489
2489 2490 if b'store' in requirements:
2490 2491 hgvfs.mkdir(b'store')
2491 2492
2492 2493 # We create an invalid changelog outside the store so very old
2493 2494 # Mercurial versions (which didn't know about the requirements
2494 2495 # file) encounter an error on reading the changelog. This
2495 2496 # effectively locks out old clients and prevents them from
2496 2497 # mucking with a repo in an unknown format.
2497 2498 #
2498 2499 # The revlog header has version 2, which won't be recognized by
2499 2500 # such old clients.
2500 2501 hgvfs.append(b'00changelog.i',
2501 2502 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2502 2503 b'layout')
2503 2504
2504 2505 scmutil.writerequires(hgvfs, requirements)
General Comments 0
You need to be logged in to leave comments. Login now