##// END OF EJS Templates
localrepo: do not cache auditor/nofsauditor which would make reference cycle...
Yuya Nishihara -
r39348:9198e41d default
parent child Browse files
Show More
@@ -1,2435 +1,2443
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 narrowspec,
48 48 obsolete,
49 49 pathutil,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepoutil,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67 from .utils import (
68 68 interfaceutil,
69 69 procutil,
70 70 stringutil,
71 71 )
72 72
73 73 release = lockmod.release
74 74 urlerr = util.urlerr
75 75 urlreq = util.urlreq
76 76
77 77 # set of (path, vfs-location) tuples. vfs-location is:
78 78 # - 'plain for vfs relative paths
79 79 # - '' for svfs relative paths
80 80 _cachedfiles = set()
81 81
82 82 class _basefilecache(scmutil.filecache):
83 83 """All filecache usage on repo are done for logic that should be unfiltered
84 84 """
85 85 def __get__(self, repo, type=None):
86 86 if repo is None:
87 87 return self
88 88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
89 89 def __set__(self, repo, value):
90 90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
91 91 def __delete__(self, repo):
92 92 return super(_basefilecache, self).__delete__(repo.unfiltered())
93 93
94 94 class repofilecache(_basefilecache):
95 95 """filecache for files in .hg but outside of .hg/store"""
96 96 def __init__(self, *paths):
97 97 super(repofilecache, self).__init__(*paths)
98 98 for path in paths:
99 99 _cachedfiles.add((path, 'plain'))
100 100
101 101 def join(self, obj, fname):
102 102 return obj.vfs.join(fname)
103 103
104 104 class storecache(_basefilecache):
105 105 """filecache for files in the store"""
106 106 def __init__(self, *paths):
107 107 super(storecache, self).__init__(*paths)
108 108 for path in paths:
109 109 _cachedfiles.add((path, ''))
110 110
111 111 def join(self, obj, fname):
112 112 return obj.sjoin(fname)
113 113
114 114 def isfilecached(repo, name):
115 115 """check if a repo has already cached "name" filecache-ed property
116 116
117 117 This returns (cachedobj-or-None, iscached) tuple.
118 118 """
119 119 cacheentry = repo.unfiltered()._filecache.get(name, None)
120 120 if not cacheentry:
121 121 return None, False
122 122 return cacheentry.obj, True
123 123
124 124 class unfilteredpropertycache(util.propertycache):
125 125 """propertycache that apply to unfiltered repo only"""
126 126
127 127 def __get__(self, repo, type=None):
128 128 unfi = repo.unfiltered()
129 129 if unfi is repo:
130 130 return super(unfilteredpropertycache, self).__get__(unfi)
131 131 return getattr(unfi, self.name)
132 132
133 133 class filteredpropertycache(util.propertycache):
134 134 """propertycache that must take filtering in account"""
135 135
136 136 def cachevalue(self, obj, value):
137 137 object.__setattr__(obj, self.name, value)
138 138
139 139
140 140 def hasunfilteredcache(repo, name):
141 141 """check if a repo has an unfilteredpropertycache value for <name>"""
142 142 return name in vars(repo.unfiltered())
143 143
144 144 def unfilteredmethod(orig):
145 145 """decorate method that always need to be run on unfiltered version"""
146 146 def wrapper(repo, *args, **kwargs):
147 147 return orig(repo.unfiltered(), *args, **kwargs)
148 148 return wrapper
149 149
150 150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
151 151 'unbundle'}
152 152 legacycaps = moderncaps.union({'changegroupsubset'})
153 153
154 154 @interfaceutil.implementer(repository.ipeercommandexecutor)
155 155 class localcommandexecutor(object):
156 156 def __init__(self, peer):
157 157 self._peer = peer
158 158 self._sent = False
159 159 self._closed = False
160 160
161 161 def __enter__(self):
162 162 return self
163 163
164 164 def __exit__(self, exctype, excvalue, exctb):
165 165 self.close()
166 166
167 167 def callcommand(self, command, args):
168 168 if self._sent:
169 169 raise error.ProgrammingError('callcommand() cannot be used after '
170 170 'sendcommands()')
171 171
172 172 if self._closed:
173 173 raise error.ProgrammingError('callcommand() cannot be used after '
174 174 'close()')
175 175
176 176 # We don't need to support anything fancy. Just call the named
177 177 # method on the peer and return a resolved future.
178 178 fn = getattr(self._peer, pycompat.sysstr(command))
179 179
180 180 f = pycompat.futures.Future()
181 181
182 182 try:
183 183 result = fn(**pycompat.strkwargs(args))
184 184 except Exception:
185 185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
186 186 else:
187 187 f.set_result(result)
188 188
189 189 return f
190 190
191 191 def sendcommands(self):
192 192 self._sent = True
193 193
194 194 def close(self):
195 195 self._closed = True
196 196
197 197 @interfaceutil.implementer(repository.ipeercommands)
198 198 class localpeer(repository.peer):
199 199 '''peer for a local repo; reflects only the most recent API'''
200 200
201 201 def __init__(self, repo, caps=None):
202 202 super(localpeer, self).__init__()
203 203
204 204 if caps is None:
205 205 caps = moderncaps.copy()
206 206 self._repo = repo.filtered('served')
207 207 self.ui = repo.ui
208 208 self._caps = repo._restrictcapabilities(caps)
209 209
210 210 # Begin of _basepeer interface.
211 211
212 212 def url(self):
213 213 return self._repo.url()
214 214
215 215 def local(self):
216 216 return self._repo
217 217
218 218 def peer(self):
219 219 return self
220 220
221 221 def canpush(self):
222 222 return True
223 223
224 224 def close(self):
225 225 self._repo.close()
226 226
227 227 # End of _basepeer interface.
228 228
229 229 # Begin of _basewirecommands interface.
230 230
231 231 def branchmap(self):
232 232 return self._repo.branchmap()
233 233
234 234 def capabilities(self):
235 235 return self._caps
236 236
237 237 def clonebundles(self):
238 238 return self._repo.tryread('clonebundles.manifest')
239 239
240 240 def debugwireargs(self, one, two, three=None, four=None, five=None):
241 241 """Used to test argument passing over the wire"""
242 242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
243 243 pycompat.bytestr(four),
244 244 pycompat.bytestr(five))
245 245
246 246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
247 247 **kwargs):
248 248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
249 249 common=common, bundlecaps=bundlecaps,
250 250 **kwargs)[1]
251 251 cb = util.chunkbuffer(chunks)
252 252
253 253 if exchange.bundle2requested(bundlecaps):
254 254 # When requesting a bundle2, getbundle returns a stream to make the
255 255 # wire level function happier. We need to build a proper object
256 256 # from it in local peer.
257 257 return bundle2.getunbundler(self.ui, cb)
258 258 else:
259 259 return changegroup.getunbundler('01', cb, None)
260 260
261 261 def heads(self):
262 262 return self._repo.heads()
263 263
264 264 def known(self, nodes):
265 265 return self._repo.known(nodes)
266 266
267 267 def listkeys(self, namespace):
268 268 return self._repo.listkeys(namespace)
269 269
270 270 def lookup(self, key):
271 271 return self._repo.lookup(key)
272 272
273 273 def pushkey(self, namespace, key, old, new):
274 274 return self._repo.pushkey(namespace, key, old, new)
275 275
276 276 def stream_out(self):
277 277 raise error.Abort(_('cannot perform stream clone against local '
278 278 'peer'))
279 279
280 280 def unbundle(self, bundle, heads, url):
281 281 """apply a bundle on a repo
282 282
283 283 This function handles the repo locking itself."""
284 284 try:
285 285 try:
286 286 bundle = exchange.readbundle(self.ui, bundle, None)
287 287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
288 288 if util.safehasattr(ret, 'getchunks'):
289 289 # This is a bundle20 object, turn it into an unbundler.
290 290 # This little dance should be dropped eventually when the
291 291 # API is finally improved.
292 292 stream = util.chunkbuffer(ret.getchunks())
293 293 ret = bundle2.getunbundler(self.ui, stream)
294 294 return ret
295 295 except Exception as exc:
296 296 # If the exception contains output salvaged from a bundle2
297 297 # reply, we need to make sure it is printed before continuing
298 298 # to fail. So we build a bundle2 with such output and consume
299 299 # it directly.
300 300 #
301 301 # This is not very elegant but allows a "simple" solution for
302 302 # issue4594
303 303 output = getattr(exc, '_bundle2salvagedoutput', ())
304 304 if output:
305 305 bundler = bundle2.bundle20(self._repo.ui)
306 306 for out in output:
307 307 bundler.addpart(out)
308 308 stream = util.chunkbuffer(bundler.getchunks())
309 309 b = bundle2.getunbundler(self.ui, stream)
310 310 bundle2.processbundle(self._repo, b)
311 311 raise
312 312 except error.PushRaced as exc:
313 313 raise error.ResponseError(_('push failed:'),
314 314 stringutil.forcebytestr(exc))
315 315
316 316 # End of _basewirecommands interface.
317 317
318 318 # Begin of peer interface.
319 319
320 320 def commandexecutor(self):
321 321 return localcommandexecutor(self)
322 322
323 323 # End of peer interface.
324 324
325 325 @interfaceutil.implementer(repository.ipeerlegacycommands)
326 326 class locallegacypeer(localpeer):
327 327 '''peer extension which implements legacy methods too; used for tests with
328 328 restricted capabilities'''
329 329
330 330 def __init__(self, repo):
331 331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
332 332
333 333 # Begin of baselegacywirecommands interface.
334 334
335 335 def between(self, pairs):
336 336 return self._repo.between(pairs)
337 337
338 338 def branches(self, nodes):
339 339 return self._repo.branches(nodes)
340 340
341 341 def changegroup(self, nodes, source):
342 342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
343 343 missingheads=self._repo.heads())
344 344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
345 345
346 346 def changegroupsubset(self, bases, heads, source):
347 347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
348 348 missingheads=heads)
349 349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350 350
351 351 # End of baselegacywirecommands interface.
352 352
353 353 # Increment the sub-version when the revlog v2 format changes to lock out old
354 354 # clients.
355 355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
356 356
357 357 # A repository with the sparserevlog feature will have delta chains that
358 358 # can spread over a larger span. Sparse reading cuts these large spans into
359 359 # pieces, so that each piece isn't too big.
360 360 # Without the sparserevlog capability, reading from the repository could use
361 361 # huge amounts of memory, because the whole span would be read at once,
362 362 # including all the intermediate revisions that aren't pertinent for the chain.
363 363 # This is why once a repository has enabled sparse-read, it becomes required.
364 364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
365 365
366 366 # Functions receiving (ui, features) that extensions can register to impact
367 367 # the ability to load repositories with custom requirements. Only
368 368 # functions defined in loaded extensions are called.
369 369 #
370 370 # The function receives a set of requirement strings that the repository
371 371 # is capable of opening. Functions will typically add elements to the
372 372 # set to reflect that the extension knows how to handle that requirements.
373 373 featuresetupfuncs = set()
374 374
375 375 @interfaceutil.implementer(repository.completelocalrepository)
376 376 class localrepository(object):
377 377
378 378 # obsolete experimental requirements:
379 379 # - manifestv2: An experimental new manifest format that allowed
380 380 # for stem compression of long paths. Experiment ended up not
381 381 # being successful (repository sizes went up due to worse delta
382 382 # chains), and the code was deleted in 4.6.
383 383 supportedformats = {
384 384 'revlogv1',
385 385 'generaldelta',
386 386 'treemanifest',
387 387 REVLOGV2_REQUIREMENT,
388 388 SPARSEREVLOG_REQUIREMENT,
389 389 }
390 390 _basesupported = supportedformats | {
391 391 'store',
392 392 'fncache',
393 393 'shared',
394 394 'relshared',
395 395 'dotencode',
396 396 'exp-sparse',
397 397 'internal-phase'
398 398 }
399 399 openerreqs = {
400 400 'revlogv1',
401 401 'generaldelta',
402 402 'treemanifest',
403 403 }
404 404
405 405 # list of prefix for file which can be written without 'wlock'
406 406 # Extensions should extend this list when needed
407 407 _wlockfreeprefix = {
408 408 # We migh consider requiring 'wlock' for the next
409 409 # two, but pretty much all the existing code assume
410 410 # wlock is not needed so we keep them excluded for
411 411 # now.
412 412 'hgrc',
413 413 'requires',
414 414 # XXX cache is a complicatged business someone
415 415 # should investigate this in depth at some point
416 416 'cache/',
417 417 # XXX shouldn't be dirstate covered by the wlock?
418 418 'dirstate',
419 419 # XXX bisect was still a bit too messy at the time
420 420 # this changeset was introduced. Someone should fix
421 421 # the remainig bit and drop this line
422 422 'bisect.state',
423 423 }
424 424
425 425 def __init__(self, baseui, path, create=False, intents=None):
426 426 self.requirements = set()
427 427 self.filtername = None
428 428 # wvfs: rooted at the repository root, used to access the working copy
429 429 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
430 430 # vfs: rooted at .hg, used to access repo files outside of .hg/store
431 431 self.vfs = None
432 432 # svfs: usually rooted at .hg/store, used to access repository history
433 433 # If this is a shared repository, this vfs may point to another
434 434 # repository's .hg/store directory.
435 435 self.svfs = None
436 436 self.root = self.wvfs.base
437 437 self.path = self.wvfs.join(".hg")
438 438 self.origroot = path
439 # This is only used by context.workingctx.match in order to
440 # detect files in subrepos.
441 self.auditor = pathutil.pathauditor(
442 self.root, callback=self._checknested)
443 # This is only used by context.basectx.match in order to detect
444 # files in subrepos.
445 self.nofsauditor = pathutil.pathauditor(
446 self.root, callback=self._checknested, realfs=False, cached=True)
447 439 self.baseui = baseui
448 440 self.ui = baseui.copy()
449 441 self.ui.copy = baseui.copy # prevent copying repo configuration
450 442 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
451 443 if (self.ui.configbool('devel', 'all-warnings') or
452 444 self.ui.configbool('devel', 'check-locks')):
453 445 self.vfs.audit = self._getvfsward(self.vfs.audit)
454 446 # A list of callback to shape the phase if no data were found.
455 447 # Callback are in the form: func(repo, roots) --> processed root.
456 448 # This list it to be filled by extension during repo setup
457 449 self._phasedefaults = []
458 450 try:
459 451 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
460 452 self._loadextensions()
461 453 except IOError:
462 454 pass
463 455
464 456 if featuresetupfuncs:
465 457 self.supported = set(self._basesupported) # use private copy
466 458 extmods = set(m.__name__ for n, m
467 459 in extensions.extensions(self.ui))
468 460 for setupfunc in featuresetupfuncs:
469 461 if setupfunc.__module__ in extmods:
470 462 setupfunc(self.ui, self.supported)
471 463 else:
472 464 self.supported = self._basesupported
473 465 color.setup(self.ui)
474 466
475 467 # Add compression engines.
476 468 for name in util.compengines:
477 469 engine = util.compengines[name]
478 470 if engine.revlogheader():
479 471 self.supported.add('exp-compression-%s' % name)
480 472
481 473 if not self.vfs.isdir():
482 474 if create:
483 475 self.requirements = newreporequirements(self)
484 476
485 477 if not self.wvfs.exists():
486 478 self.wvfs.makedirs()
487 479 self.vfs.makedir(notindexed=True)
488 480
489 481 if 'store' in self.requirements:
490 482 self.vfs.mkdir("store")
491 483
492 484 # create an invalid changelog
493 485 self.vfs.append(
494 486 "00changelog.i",
495 487 '\0\0\0\2' # represents revlogv2
496 488 ' dummy changelog to prevent using the old repo layout'
497 489 )
498 490 else:
499 491 try:
500 492 self.vfs.stat()
501 493 except OSError as inst:
502 494 if inst.errno != errno.ENOENT:
503 495 raise
504 496 raise error.RepoError(_("repository %s not found") % path)
505 497 elif create:
506 498 raise error.RepoError(_("repository %s already exists") % path)
507 499 else:
508 500 try:
509 501 self.requirements = scmutil.readrequires(
510 502 self.vfs, self.supported)
511 503 except IOError as inst:
512 504 if inst.errno != errno.ENOENT:
513 505 raise
514 506
515 507 cachepath = self.vfs.join('cache')
516 508 self.sharedpath = self.path
517 509 try:
518 510 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
519 511 if 'relshared' in self.requirements:
520 512 sharedpath = self.vfs.join(sharedpath)
521 513 vfs = vfsmod.vfs(sharedpath, realpath=True)
522 514 cachepath = vfs.join('cache')
523 515 s = vfs.base
524 516 if not vfs.exists():
525 517 raise error.RepoError(
526 518 _('.hg/sharedpath points to nonexistent directory %s') % s)
527 519 self.sharedpath = s
528 520 except IOError as inst:
529 521 if inst.errno != errno.ENOENT:
530 522 raise
531 523
532 524 if 'exp-sparse' in self.requirements and not sparse.enabled:
533 525 raise error.RepoError(_('repository is using sparse feature but '
534 526 'sparse is not enabled; enable the '
535 527 '"sparse" extensions to access'))
536 528
537 529 self.store = store.store(
538 530 self.requirements, self.sharedpath,
539 531 lambda base: vfsmod.vfs(base, cacheaudited=True))
540 532 self.spath = self.store.path
541 533 self.svfs = self.store.vfs
542 534 self.sjoin = self.store.join
543 535 self.vfs.createmode = self.store.createmode
544 536 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
545 537 self.cachevfs.createmode = self.store.createmode
546 538 if (self.ui.configbool('devel', 'all-warnings') or
547 539 self.ui.configbool('devel', 'check-locks')):
548 540 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
549 541 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
550 542 else: # standard vfs
551 543 self.svfs.audit = self._getsvfsward(self.svfs.audit)
552 544 self._applyopenerreqs()
553 545 if create:
554 546 self._writerequirements()
555 547
556 548 self._dirstatevalidatewarned = False
557 549
558 550 self._branchcaches = {}
559 551 self._revbranchcache = None
560 552 self._filterpats = {}
561 553 self._datafilters = {}
562 554 self._transref = self._lockref = self._wlockref = None
563 555
564 556 # A cache for various files under .hg/ that tracks file changes,
565 557 # (used by the filecache decorator)
566 558 #
567 559 # Maps a property name to its util.filecacheentry
568 560 self._filecache = {}
569 561
570 562 # hold sets of revision to be filtered
571 563 # should be cleared when something might have changed the filter value:
572 564 # - new changesets,
573 565 # - phase change,
574 566 # - new obsolescence marker,
575 567 # - working directory parent change,
576 568 # - bookmark changes
577 569 self.filteredrevcache = {}
578 570
579 571 # post-dirstate-status hooks
580 572 self._postdsstatus = []
581 573
582 574 # generic mapping between names and nodes
583 575 self.names = namespaces.namespaces()
584 576
585 577 # Key to signature value.
586 578 self._sparsesignaturecache = {}
587 579 # Signature to cached matcher instance.
588 580 self._sparsematchercache = {}
589 581
590 582 def _getvfsward(self, origfunc):
591 583 """build a ward for self.vfs"""
592 584 rref = weakref.ref(self)
593 585 def checkvfs(path, mode=None):
594 586 ret = origfunc(path, mode=mode)
595 587 repo = rref()
596 588 if (repo is None
597 589 or not util.safehasattr(repo, '_wlockref')
598 590 or not util.safehasattr(repo, '_lockref')):
599 591 return
600 592 if mode in (None, 'r', 'rb'):
601 593 return
602 594 if path.startswith(repo.path):
603 595 # truncate name relative to the repository (.hg)
604 596 path = path[len(repo.path) + 1:]
605 597 if path.startswith('cache/'):
606 598 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
607 599 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
608 600 if path.startswith('journal.'):
609 601 # journal is covered by 'lock'
610 602 if repo._currentlock(repo._lockref) is None:
611 603 repo.ui.develwarn('write with no lock: "%s"' % path,
612 604 stacklevel=2, config='check-locks')
613 605 elif repo._currentlock(repo._wlockref) is None:
614 606 # rest of vfs files are covered by 'wlock'
615 607 #
616 608 # exclude special files
617 609 for prefix in self._wlockfreeprefix:
618 610 if path.startswith(prefix):
619 611 return
620 612 repo.ui.develwarn('write with no wlock: "%s"' % path,
621 613 stacklevel=2, config='check-locks')
622 614 return ret
623 615 return checkvfs
624 616
625 617 def _getsvfsward(self, origfunc):
626 618 """build a ward for self.svfs"""
627 619 rref = weakref.ref(self)
628 620 def checksvfs(path, mode=None):
629 621 ret = origfunc(path, mode=mode)
630 622 repo = rref()
631 623 if repo is None or not util.safehasattr(repo, '_lockref'):
632 624 return
633 625 if mode in (None, 'r', 'rb'):
634 626 return
635 627 if path.startswith(repo.sharedpath):
636 628 # truncate name relative to the repository (.hg)
637 629 path = path[len(repo.sharedpath) + 1:]
638 630 if repo._currentlock(repo._lockref) is None:
639 631 repo.ui.develwarn('write with no lock: "%s"' % path,
640 632 stacklevel=3)
641 633 return ret
642 634 return checksvfs
643 635
644 636 def close(self):
645 637 self._writecaches()
646 638
647 639 def _loadextensions(self):
648 640 extensions.loadall(self.ui)
649 641
650 642 def _writecaches(self):
651 643 if self._revbranchcache:
652 644 self._revbranchcache.write()
653 645
654 646 def _restrictcapabilities(self, caps):
655 647 if self.ui.configbool('experimental', 'bundle2-advertise'):
656 648 caps = set(caps)
657 649 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
658 650 role='client'))
659 651 caps.add('bundle2=' + urlreq.quote(capsblob))
660 652 return caps
661 653
662 654 def _applyopenerreqs(self):
663 655 self.svfs.options = dict((r, 1) for r in self.requirements
664 656 if r in self.openerreqs)
665 657 # experimental config: format.chunkcachesize
666 658 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
667 659 if chunkcachesize is not None:
668 660 self.svfs.options['chunkcachesize'] = chunkcachesize
669 661 # experimental config: format.maxchainlen
670 662 maxchainlen = self.ui.configint('format', 'maxchainlen')
671 663 if maxchainlen is not None:
672 664 self.svfs.options['maxchainlen'] = maxchainlen
673 665 # experimental config: format.manifestcachesize
674 666 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
675 667 if manifestcachesize is not None:
676 668 self.svfs.options['manifestcachesize'] = manifestcachesize
677 669 deltabothparents = self.ui.configbool('storage',
678 670 'revlog.optimize-delta-parent-choice')
679 671 self.svfs.options['deltabothparents'] = deltabothparents
680 672 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
681 673 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
682 674 if 0 <= chainspan:
683 675 self.svfs.options['maxdeltachainspan'] = chainspan
684 676 mmapindexthreshold = self.ui.configbytes('experimental',
685 677 'mmapindexthreshold')
686 678 if mmapindexthreshold is not None:
687 679 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
688 680 withsparseread = self.ui.configbool('experimental', 'sparse-read')
689 681 srdensitythres = float(self.ui.config('experimental',
690 682 'sparse-read.density-threshold'))
691 683 srmingapsize = self.ui.configbytes('experimental',
692 684 'sparse-read.min-gap-size')
693 685 self.svfs.options['with-sparse-read'] = withsparseread
694 686 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
695 687 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
696 688 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
697 689 self.svfs.options['sparse-revlog'] = sparserevlog
698 690 if sparserevlog:
699 691 self.svfs.options['generaldelta'] = True
700 692
701 693 for r in self.requirements:
702 694 if r.startswith('exp-compression-'):
703 695 self.svfs.options['compengine'] = r[len('exp-compression-'):]
704 696
705 697 # TODO move "revlogv2" to openerreqs once finalized.
706 698 if REVLOGV2_REQUIREMENT in self.requirements:
707 699 self.svfs.options['revlogv2'] = True
708 700
709 701 def _writerequirements(self):
710 702 scmutil.writerequires(self.vfs, self.requirements)
711 703
704 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
705 # self -> auditor -> self._checknested -> self
706
707 @property
708 def auditor(self):
709 # This is only used by context.workingctx.match in order to
710 # detect files in subrepos.
711 return pathutil.pathauditor(self.root, callback=self._checknested)
712
713 @property
714 def nofsauditor(self):
715 # This is only used by context.basectx.match in order to detect
716 # files in subrepos.
717 return pathutil.pathauditor(self.root, callback=self._checknested,
718 realfs=False, cached=True)
719
712 720 def _checknested(self, path):
713 721 """Determine if path is a legal nested repository."""
714 722 if not path.startswith(self.root):
715 723 return False
716 724 subpath = path[len(self.root) + 1:]
717 725 normsubpath = util.pconvert(subpath)
718 726
719 727 # XXX: Checking against the current working copy is wrong in
720 728 # the sense that it can reject things like
721 729 #
722 730 # $ hg cat -r 10 sub/x.txt
723 731 #
724 732 # if sub/ is no longer a subrepository in the working copy
725 733 # parent revision.
726 734 #
727 735 # However, it can of course also allow things that would have
728 736 # been rejected before, such as the above cat command if sub/
729 737 # is a subrepository now, but was a normal directory before.
730 738 # The old path auditor would have rejected by mistake since it
731 739 # panics when it sees sub/.hg/.
732 740 #
733 741 # All in all, checking against the working copy seems sensible
734 742 # since we want to prevent access to nested repositories on
735 743 # the filesystem *now*.
736 744 ctx = self[None]
737 745 parts = util.splitpath(subpath)
738 746 while parts:
739 747 prefix = '/'.join(parts)
740 748 if prefix in ctx.substate:
741 749 if prefix == normsubpath:
742 750 return True
743 751 else:
744 752 sub = ctx.sub(prefix)
745 753 return sub.checknested(subpath[len(prefix) + 1:])
746 754 else:
747 755 parts.pop()
748 756 return False
749 757
750 758 def peer(self):
751 759 return localpeer(self) # not cached to avoid reference cycle
752 760
753 761 def unfiltered(self):
754 762 """Return unfiltered version of the repository
755 763
756 764 Intended to be overwritten by filtered repo."""
757 765 return self
758 766
759 767 def filtered(self, name, visibilityexceptions=None):
760 768 """Return a filtered version of a repository"""
761 769 cls = repoview.newtype(self.unfiltered().__class__)
762 770 return cls(self, name, visibilityexceptions)
763 771
764 772 @repofilecache('bookmarks', 'bookmarks.current')
765 773 def _bookmarks(self):
766 774 return bookmarks.bmstore(self)
767 775
768 776 @property
769 777 def _activebookmark(self):
770 778 return self._bookmarks.active
771 779
772 780 # _phasesets depend on changelog. what we need is to call
773 781 # _phasecache.invalidate() if '00changelog.i' was changed, but it
774 782 # can't be easily expressed in filecache mechanism.
775 783 @storecache('phaseroots', '00changelog.i')
776 784 def _phasecache(self):
777 785 return phases.phasecache(self, self._phasedefaults)
778 786
779 787 @storecache('obsstore')
780 788 def obsstore(self):
781 789 return obsolete.makestore(self.ui, self)
782 790
783 791 @storecache('00changelog.i')
784 792 def changelog(self):
785 793 return changelog.changelog(self.svfs,
786 794 trypending=txnutil.mayhavepending(self.root))
787 795
788 796 def _constructmanifest(self):
789 797 # This is a temporary function while we migrate from manifest to
790 798 # manifestlog. It allows bundlerepo and unionrepo to intercept the
791 799 # manifest creation.
792 800 return manifest.manifestrevlog(self.svfs)
793 801
794 802 @storecache('00manifest.i')
795 803 def manifestlog(self):
796 804 return manifest.manifestlog(self.svfs, self)
797 805
798 806 @repofilecache('dirstate')
799 807 def dirstate(self):
800 808 return self._makedirstate()
801 809
802 810 def _makedirstate(self):
803 811 """Extension point for wrapping the dirstate per-repo."""
804 812 sparsematchfn = lambda: sparse.matcher(self)
805 813
806 814 return dirstate.dirstate(self.vfs, self.ui, self.root,
807 815 self._dirstatevalidate, sparsematchfn)
808 816
809 817 def _dirstatevalidate(self, node):
810 818 try:
811 819 self.changelog.rev(node)
812 820 return node
813 821 except error.LookupError:
814 822 if not self._dirstatevalidatewarned:
815 823 self._dirstatevalidatewarned = True
816 824 self.ui.warn(_("warning: ignoring unknown"
817 825 " working parent %s!\n") % short(node))
818 826 return nullid
819 827
820 828 @storecache(narrowspec.FILENAME)
821 829 def narrowpats(self):
822 830 """matcher patterns for this repository's narrowspec
823 831
824 832 A tuple of (includes, excludes).
825 833 """
826 834 source = self
827 835 if self.shared():
828 836 from . import hg
829 837 source = hg.sharedreposource(self)
830 838 return narrowspec.load(source)
831 839
832 840 @storecache(narrowspec.FILENAME)
833 841 def _narrowmatch(self):
834 842 if repository.NARROW_REQUIREMENT not in self.requirements:
835 843 return matchmod.always(self.root, '')
836 844 include, exclude = self.narrowpats
837 845 return narrowspec.match(self.root, include=include, exclude=exclude)
838 846
839 847 # TODO(martinvonz): make this property-like instead?
840 848 def narrowmatch(self):
841 849 return self._narrowmatch
842 850
843 851 def setnarrowpats(self, newincludes, newexcludes):
844 852 target = self
845 853 if self.shared():
846 854 from . import hg
847 855 target = hg.sharedreposource(self)
848 856 narrowspec.save(target, newincludes, newexcludes)
849 857 self.invalidate(clearfilecache=True)
850 858
851 859 def __getitem__(self, changeid):
852 860 if changeid is None:
853 861 return context.workingctx(self)
854 862 if isinstance(changeid, context.basectx):
855 863 return changeid
856 864 if isinstance(changeid, slice):
857 865 # wdirrev isn't contiguous so the slice shouldn't include it
858 866 return [context.changectx(self, i)
859 867 for i in pycompat.xrange(*changeid.indices(len(self)))
860 868 if i not in self.changelog.filteredrevs]
861 869 try:
862 870 return context.changectx(self, changeid)
863 871 except error.WdirUnsupported:
864 872 return context.workingctx(self)
865 873
866 874 def __contains__(self, changeid):
867 875 """True if the given changeid exists
868 876
869 877 error.AmbiguousPrefixLookupError is raised if an ambiguous node
870 878 specified.
871 879 """
872 880 try:
873 881 self[changeid]
874 882 return True
875 883 except error.RepoLookupError:
876 884 return False
877 885
878 886 def __nonzero__(self):
879 887 return True
880 888
881 889 __bool__ = __nonzero__
882 890
883 891 def __len__(self):
884 892 # no need to pay the cost of repoview.changelog
885 893 unfi = self.unfiltered()
886 894 return len(unfi.changelog)
887 895
888 896 def __iter__(self):
889 897 return iter(self.changelog)
890 898
891 899 def revs(self, expr, *args):
892 900 '''Find revisions matching a revset.
893 901
894 902 The revset is specified as a string ``expr`` that may contain
895 903 %-formatting to escape certain types. See ``revsetlang.formatspec``.
896 904
897 905 Revset aliases from the configuration are not expanded. To expand
898 906 user aliases, consider calling ``scmutil.revrange()`` or
899 907 ``repo.anyrevs([expr], user=True)``.
900 908
901 909 Returns a revset.abstractsmartset, which is a list-like interface
902 910 that contains integer revisions.
903 911 '''
904 912 expr = revsetlang.formatspec(expr, *args)
905 913 m = revset.match(None, expr)
906 914 return m(self)
907 915
908 916 def set(self, expr, *args):
909 917 '''Find revisions matching a revset and emit changectx instances.
910 918
911 919 This is a convenience wrapper around ``revs()`` that iterates the
912 920 result and is a generator of changectx instances.
913 921
914 922 Revset aliases from the configuration are not expanded. To expand
915 923 user aliases, consider calling ``scmutil.revrange()``.
916 924 '''
917 925 for r in self.revs(expr, *args):
918 926 yield self[r]
919 927
920 928 def anyrevs(self, specs, user=False, localalias=None):
921 929 '''Find revisions matching one of the given revsets.
922 930
923 931 Revset aliases from the configuration are not expanded by default. To
924 932 expand user aliases, specify ``user=True``. To provide some local
925 933 definitions overriding user aliases, set ``localalias`` to
926 934 ``{name: definitionstring}``.
927 935 '''
928 936 if user:
929 937 m = revset.matchany(self.ui, specs,
930 938 lookup=revset.lookupfn(self),
931 939 localalias=localalias)
932 940 else:
933 941 m = revset.matchany(None, specs, localalias=localalias)
934 942 return m(self)
935 943
936 944 def url(self):
937 945 return 'file:' + self.root
938 946
939 947 def hook(self, name, throw=False, **args):
940 948 """Call a hook, passing this repo instance.
941 949
942 950 This a convenience method to aid invoking hooks. Extensions likely
943 951 won't call this unless they have registered a custom hook or are
944 952 replacing code that is expected to call a hook.
945 953 """
946 954 return hook.hook(self.ui, self, name, throw, **args)
947 955
948 956 @filteredpropertycache
949 957 def _tagscache(self):
950 958 '''Returns a tagscache object that contains various tags related
951 959 caches.'''
952 960
953 961 # This simplifies its cache management by having one decorated
954 962 # function (this one) and the rest simply fetch things from it.
955 963 class tagscache(object):
956 964 def __init__(self):
957 965 # These two define the set of tags for this repository. tags
958 966 # maps tag name to node; tagtypes maps tag name to 'global' or
959 967 # 'local'. (Global tags are defined by .hgtags across all
960 968 # heads, and local tags are defined in .hg/localtags.)
961 969 # They constitute the in-memory cache of tags.
962 970 self.tags = self.tagtypes = None
963 971
964 972 self.nodetagscache = self.tagslist = None
965 973
966 974 cache = tagscache()
967 975 cache.tags, cache.tagtypes = self._findtags()
968 976
969 977 return cache
970 978
971 979 def tags(self):
972 980 '''return a mapping of tag to node'''
973 981 t = {}
974 982 if self.changelog.filteredrevs:
975 983 tags, tt = self._findtags()
976 984 else:
977 985 tags = self._tagscache.tags
978 986 for k, v in tags.iteritems():
979 987 try:
980 988 # ignore tags to unknown nodes
981 989 self.changelog.rev(v)
982 990 t[k] = v
983 991 except (error.LookupError, ValueError):
984 992 pass
985 993 return t
986 994
987 995 def _findtags(self):
988 996 '''Do the hard work of finding tags. Return a pair of dicts
989 997 (tags, tagtypes) where tags maps tag name to node, and tagtypes
990 998 maps tag name to a string like \'global\' or \'local\'.
991 999 Subclasses or extensions are free to add their own tags, but
992 1000 should be aware that the returned dicts will be retained for the
993 1001 duration of the localrepo object.'''
994 1002
995 1003 # XXX what tagtype should subclasses/extensions use? Currently
996 1004 # mq and bookmarks add tags, but do not set the tagtype at all.
997 1005 # Should each extension invent its own tag type? Should there
998 1006 # be one tagtype for all such "virtual" tags? Or is the status
999 1007 # quo fine?
1000 1008
1001 1009
1002 1010 # map tag name to (node, hist)
1003 1011 alltags = tagsmod.findglobaltags(self.ui, self)
1004 1012 # map tag name to tag type
1005 1013 tagtypes = dict((tag, 'global') for tag in alltags)
1006 1014
1007 1015 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1008 1016
1009 1017 # Build the return dicts. Have to re-encode tag names because
1010 1018 # the tags module always uses UTF-8 (in order not to lose info
1011 1019 # writing to the cache), but the rest of Mercurial wants them in
1012 1020 # local encoding.
1013 1021 tags = {}
1014 1022 for (name, (node, hist)) in alltags.iteritems():
1015 1023 if node != nullid:
1016 1024 tags[encoding.tolocal(name)] = node
1017 1025 tags['tip'] = self.changelog.tip()
1018 1026 tagtypes = dict([(encoding.tolocal(name), value)
1019 1027 for (name, value) in tagtypes.iteritems()])
1020 1028 return (tags, tagtypes)
1021 1029
1022 1030 def tagtype(self, tagname):
1023 1031 '''
1024 1032 return the type of the given tag. result can be:
1025 1033
1026 1034 'local' : a local tag
1027 1035 'global' : a global tag
1028 1036 None : tag does not exist
1029 1037 '''
1030 1038
1031 1039 return self._tagscache.tagtypes.get(tagname)
1032 1040
1033 1041 def tagslist(self):
1034 1042 '''return a list of tags ordered by revision'''
1035 1043 if not self._tagscache.tagslist:
1036 1044 l = []
1037 1045 for t, n in self.tags().iteritems():
1038 1046 l.append((self.changelog.rev(n), t, n))
1039 1047 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1040 1048
1041 1049 return self._tagscache.tagslist
1042 1050
1043 1051 def nodetags(self, node):
1044 1052 '''return the tags associated with a node'''
1045 1053 if not self._tagscache.nodetagscache:
1046 1054 nodetagscache = {}
1047 1055 for t, n in self._tagscache.tags.iteritems():
1048 1056 nodetagscache.setdefault(n, []).append(t)
1049 1057 for tags in nodetagscache.itervalues():
1050 1058 tags.sort()
1051 1059 self._tagscache.nodetagscache = nodetagscache
1052 1060 return self._tagscache.nodetagscache.get(node, [])
1053 1061
1054 1062 def nodebookmarks(self, node):
1055 1063 """return the list of bookmarks pointing to the specified node"""
1056 1064 return self._bookmarks.names(node)
1057 1065
1058 1066 def branchmap(self):
1059 1067 '''returns a dictionary {branch: [branchheads]} with branchheads
1060 1068 ordered by increasing revision number'''
1061 1069 branchmap.updatecache(self)
1062 1070 return self._branchcaches[self.filtername]
1063 1071
1064 1072 @unfilteredmethod
1065 1073 def revbranchcache(self):
1066 1074 if not self._revbranchcache:
1067 1075 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1068 1076 return self._revbranchcache
1069 1077
1070 1078 def branchtip(self, branch, ignoremissing=False):
1071 1079 '''return the tip node for a given branch
1072 1080
1073 1081 If ignoremissing is True, then this method will not raise an error.
1074 1082 This is helpful for callers that only expect None for a missing branch
1075 1083 (e.g. namespace).
1076 1084
1077 1085 '''
1078 1086 try:
1079 1087 return self.branchmap().branchtip(branch)
1080 1088 except KeyError:
1081 1089 if not ignoremissing:
1082 1090 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1083 1091 else:
1084 1092 pass
1085 1093
1086 1094 def lookup(self, key):
1087 1095 return scmutil.revsymbol(self, key).node()
1088 1096
1089 1097 def lookupbranch(self, key):
1090 1098 if key in self.branchmap():
1091 1099 return key
1092 1100
1093 1101 return scmutil.revsymbol(self, key).branch()
1094 1102
1095 1103 def known(self, nodes):
1096 1104 cl = self.changelog
1097 1105 nm = cl.nodemap
1098 1106 filtered = cl.filteredrevs
1099 1107 result = []
1100 1108 for n in nodes:
1101 1109 r = nm.get(n)
1102 1110 resp = not (r is None or r in filtered)
1103 1111 result.append(resp)
1104 1112 return result
1105 1113
1106 1114 def local(self):
1107 1115 return self
1108 1116
1109 1117 def publishing(self):
1110 1118 # it's safe (and desirable) to trust the publish flag unconditionally
1111 1119 # so that we don't finalize changes shared between users via ssh or nfs
1112 1120 return self.ui.configbool('phases', 'publish', untrusted=True)
1113 1121
1114 1122 def cancopy(self):
1115 1123 # so statichttprepo's override of local() works
1116 1124 if not self.local():
1117 1125 return False
1118 1126 if not self.publishing():
1119 1127 return True
1120 1128 # if publishing we can't copy if there is filtered content
1121 1129 return not self.filtered('visible').changelog.filteredrevs
1122 1130
1123 1131 def shared(self):
1124 1132 '''the type of shared repository (None if not shared)'''
1125 1133 if self.sharedpath != self.path:
1126 1134 return 'store'
1127 1135 return None
1128 1136
1129 1137 def wjoin(self, f, *insidef):
1130 1138 return self.vfs.reljoin(self.root, f, *insidef)
1131 1139
1132 1140 def file(self, f):
1133 1141 if f[0] == '/':
1134 1142 f = f[1:]
1135 1143 return filelog.filelog(self.svfs, f)
1136 1144
1137 1145 def setparents(self, p1, p2=nullid):
1138 1146 with self.dirstate.parentchange():
1139 1147 copies = self.dirstate.setparents(p1, p2)
1140 1148 pctx = self[p1]
1141 1149 if copies:
1142 1150 # Adjust copy records, the dirstate cannot do it, it
1143 1151 # requires access to parents manifests. Preserve them
1144 1152 # only for entries added to first parent.
1145 1153 for f in copies:
1146 1154 if f not in pctx and copies[f] in pctx:
1147 1155 self.dirstate.copy(copies[f], f)
1148 1156 if p2 == nullid:
1149 1157 for f, s in sorted(self.dirstate.copies().items()):
1150 1158 if f not in pctx and s not in pctx:
1151 1159 self.dirstate.copy(None, f)
1152 1160
1153 1161 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1154 1162 """changeid can be a changeset revision, node, or tag.
1155 1163 fileid can be a file revision or node."""
1156 1164 return context.filectx(self, path, changeid, fileid,
1157 1165 changectx=changectx)
1158 1166
1159 1167 def getcwd(self):
1160 1168 return self.dirstate.getcwd()
1161 1169
1162 1170 def pathto(self, f, cwd=None):
1163 1171 return self.dirstate.pathto(f, cwd)
1164 1172
1165 1173 def _loadfilter(self, filter):
1166 1174 if filter not in self._filterpats:
1167 1175 l = []
1168 1176 for pat, cmd in self.ui.configitems(filter):
1169 1177 if cmd == '!':
1170 1178 continue
1171 1179 mf = matchmod.match(self.root, '', [pat])
1172 1180 fn = None
1173 1181 params = cmd
1174 1182 for name, filterfn in self._datafilters.iteritems():
1175 1183 if cmd.startswith(name):
1176 1184 fn = filterfn
1177 1185 params = cmd[len(name):].lstrip()
1178 1186 break
1179 1187 if not fn:
1180 1188 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1181 1189 # Wrap old filters not supporting keyword arguments
1182 1190 if not pycompat.getargspec(fn)[2]:
1183 1191 oldfn = fn
1184 1192 fn = lambda s, c, **kwargs: oldfn(s, c)
1185 1193 l.append((mf, fn, params))
1186 1194 self._filterpats[filter] = l
1187 1195 return self._filterpats[filter]
1188 1196
1189 1197 def _filter(self, filterpats, filename, data):
1190 1198 for mf, fn, cmd in filterpats:
1191 1199 if mf(filename):
1192 1200 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1193 1201 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1194 1202 break
1195 1203
1196 1204 return data
1197 1205
1198 1206 @unfilteredpropertycache
1199 1207 def _encodefilterpats(self):
1200 1208 return self._loadfilter('encode')
1201 1209
1202 1210 @unfilteredpropertycache
1203 1211 def _decodefilterpats(self):
1204 1212 return self._loadfilter('decode')
1205 1213
1206 1214 def adddatafilter(self, name, filter):
1207 1215 self._datafilters[name] = filter
1208 1216
1209 1217 def wread(self, filename):
1210 1218 if self.wvfs.islink(filename):
1211 1219 data = self.wvfs.readlink(filename)
1212 1220 else:
1213 1221 data = self.wvfs.read(filename)
1214 1222 return self._filter(self._encodefilterpats, filename, data)
1215 1223
1216 1224 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1217 1225 """write ``data`` into ``filename`` in the working directory
1218 1226
1219 1227 This returns length of written (maybe decoded) data.
1220 1228 """
1221 1229 data = self._filter(self._decodefilterpats, filename, data)
1222 1230 if 'l' in flags:
1223 1231 self.wvfs.symlink(data, filename)
1224 1232 else:
1225 1233 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1226 1234 **kwargs)
1227 1235 if 'x' in flags:
1228 1236 self.wvfs.setflags(filename, False, True)
1229 1237 else:
1230 1238 self.wvfs.setflags(filename, False, False)
1231 1239 return len(data)
1232 1240
1233 1241 def wwritedata(self, filename, data):
1234 1242 return self._filter(self._decodefilterpats, filename, data)
1235 1243
1236 1244 def currenttransaction(self):
1237 1245 """return the current transaction or None if non exists"""
1238 1246 if self._transref:
1239 1247 tr = self._transref()
1240 1248 else:
1241 1249 tr = None
1242 1250
1243 1251 if tr and tr.running():
1244 1252 return tr
1245 1253 return None
1246 1254
1247 1255 def transaction(self, desc, report=None):
1248 1256 if (self.ui.configbool('devel', 'all-warnings')
1249 1257 or self.ui.configbool('devel', 'check-locks')):
1250 1258 if self._currentlock(self._lockref) is None:
1251 1259 raise error.ProgrammingError('transaction requires locking')
1252 1260 tr = self.currenttransaction()
1253 1261 if tr is not None:
1254 1262 return tr.nest(name=desc)
1255 1263
1256 1264 # abort here if the journal already exists
1257 1265 if self.svfs.exists("journal"):
1258 1266 raise error.RepoError(
1259 1267 _("abandoned transaction found"),
1260 1268 hint=_("run 'hg recover' to clean up transaction"))
1261 1269
1262 1270 idbase = "%.40f#%f" % (random.random(), time.time())
1263 1271 ha = hex(hashlib.sha1(idbase).digest())
1264 1272 txnid = 'TXN:' + ha
1265 1273 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1266 1274
1267 1275 self._writejournal(desc)
1268 1276 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1269 1277 if report:
1270 1278 rp = report
1271 1279 else:
1272 1280 rp = self.ui.warn
1273 1281 vfsmap = {'plain': self.vfs} # root of .hg/
1274 1282 # we must avoid cyclic reference between repo and transaction.
1275 1283 reporef = weakref.ref(self)
1276 1284 # Code to track tag movement
1277 1285 #
1278 1286 # Since tags are all handled as file content, it is actually quite hard
1279 1287 # to track these movement from a code perspective. So we fallback to a
1280 1288 # tracking at the repository level. One could envision to track changes
1281 1289 # to the '.hgtags' file through changegroup apply but that fails to
1282 1290 # cope with case where transaction expose new heads without changegroup
1283 1291 # being involved (eg: phase movement).
1284 1292 #
1285 1293 # For now, We gate the feature behind a flag since this likely comes
1286 1294 # with performance impacts. The current code run more often than needed
1287 1295 # and do not use caches as much as it could. The current focus is on
1288 1296 # the behavior of the feature so we disable it by default. The flag
1289 1297 # will be removed when we are happy with the performance impact.
1290 1298 #
1291 1299 # Once this feature is no longer experimental move the following
1292 1300 # documentation to the appropriate help section:
1293 1301 #
1294 1302 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1295 1303 # tags (new or changed or deleted tags). In addition the details of
1296 1304 # these changes are made available in a file at:
1297 1305 # ``REPOROOT/.hg/changes/tags.changes``.
1298 1306 # Make sure you check for HG_TAG_MOVED before reading that file as it
1299 1307 # might exist from a previous transaction even if no tag were touched
1300 1308 # in this one. Changes are recorded in a line base format::
1301 1309 #
1302 1310 # <action> <hex-node> <tag-name>\n
1303 1311 #
1304 1312 # Actions are defined as follow:
1305 1313 # "-R": tag is removed,
1306 1314 # "+A": tag is added,
1307 1315 # "-M": tag is moved (old value),
1308 1316 # "+M": tag is moved (new value),
1309 1317 tracktags = lambda x: None
1310 1318 # experimental config: experimental.hook-track-tags
1311 1319 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1312 1320 if desc != 'strip' and shouldtracktags:
1313 1321 oldheads = self.changelog.headrevs()
1314 1322 def tracktags(tr2):
1315 1323 repo = reporef()
1316 1324 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1317 1325 newheads = repo.changelog.headrevs()
1318 1326 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1319 1327 # notes: we compare lists here.
1320 1328 # As we do it only once buiding set would not be cheaper
1321 1329 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1322 1330 if changes:
1323 1331 tr2.hookargs['tag_moved'] = '1'
1324 1332 with repo.vfs('changes/tags.changes', 'w',
1325 1333 atomictemp=True) as changesfile:
1326 1334 # note: we do not register the file to the transaction
1327 1335 # because we needs it to still exist on the transaction
1328 1336 # is close (for txnclose hooks)
1329 1337 tagsmod.writediff(changesfile, changes)
1330 1338 def validate(tr2):
1331 1339 """will run pre-closing hooks"""
1332 1340 # XXX the transaction API is a bit lacking here so we take a hacky
1333 1341 # path for now
1334 1342 #
1335 1343 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1336 1344 # dict is copied before these run. In addition we needs the data
1337 1345 # available to in memory hooks too.
1338 1346 #
1339 1347 # Moreover, we also need to make sure this runs before txnclose
1340 1348 # hooks and there is no "pending" mechanism that would execute
1341 1349 # logic only if hooks are about to run.
1342 1350 #
1343 1351 # Fixing this limitation of the transaction is also needed to track
1344 1352 # other families of changes (bookmarks, phases, obsolescence).
1345 1353 #
1346 1354 # This will have to be fixed before we remove the experimental
1347 1355 # gating.
1348 1356 tracktags(tr2)
1349 1357 repo = reporef()
1350 1358 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1351 1359 scmutil.enforcesinglehead(repo, tr2, desc)
1352 1360 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1353 1361 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1354 1362 args = tr.hookargs.copy()
1355 1363 args.update(bookmarks.preparehookargs(name, old, new))
1356 1364 repo.hook('pretxnclose-bookmark', throw=True,
1357 1365 txnname=desc,
1358 1366 **pycompat.strkwargs(args))
1359 1367 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1360 1368 cl = repo.unfiltered().changelog
1361 1369 for rev, (old, new) in tr.changes['phases'].items():
1362 1370 args = tr.hookargs.copy()
1363 1371 node = hex(cl.node(rev))
1364 1372 args.update(phases.preparehookargs(node, old, new))
1365 1373 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1366 1374 **pycompat.strkwargs(args))
1367 1375
1368 1376 repo.hook('pretxnclose', throw=True,
1369 1377 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1370 1378 def releasefn(tr, success):
1371 1379 repo = reporef()
1372 1380 if success:
1373 1381 # this should be explicitly invoked here, because
1374 1382 # in-memory changes aren't written out at closing
1375 1383 # transaction, if tr.addfilegenerator (via
1376 1384 # dirstate.write or so) isn't invoked while
1377 1385 # transaction running
1378 1386 repo.dirstate.write(None)
1379 1387 else:
1380 1388 # discard all changes (including ones already written
1381 1389 # out) in this transaction
1382 1390 narrowspec.restorebackup(self, 'journal.narrowspec')
1383 1391 repo.dirstate.restorebackup(None, 'journal.dirstate')
1384 1392
1385 1393 repo.invalidate(clearfilecache=True)
1386 1394
1387 1395 tr = transaction.transaction(rp, self.svfs, vfsmap,
1388 1396 "journal",
1389 1397 "undo",
1390 1398 aftertrans(renames),
1391 1399 self.store.createmode,
1392 1400 validator=validate,
1393 1401 releasefn=releasefn,
1394 1402 checkambigfiles=_cachedfiles,
1395 1403 name=desc)
1396 1404 tr.changes['origrepolen'] = len(self)
1397 1405 tr.changes['obsmarkers'] = set()
1398 1406 tr.changes['phases'] = {}
1399 1407 tr.changes['bookmarks'] = {}
1400 1408
1401 1409 tr.hookargs['txnid'] = txnid
1402 1410 # note: writing the fncache only during finalize mean that the file is
1403 1411 # outdated when running hooks. As fncache is used for streaming clone,
1404 1412 # this is not expected to break anything that happen during the hooks.
1405 1413 tr.addfinalize('flush-fncache', self.store.write)
1406 1414 def txnclosehook(tr2):
1407 1415 """To be run if transaction is successful, will schedule a hook run
1408 1416 """
1409 1417 # Don't reference tr2 in hook() so we don't hold a reference.
1410 1418 # This reduces memory consumption when there are multiple
1411 1419 # transactions per lock. This can likely go away if issue5045
1412 1420 # fixes the function accumulation.
1413 1421 hookargs = tr2.hookargs
1414 1422
1415 1423 def hookfunc():
1416 1424 repo = reporef()
1417 1425 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1418 1426 bmchanges = sorted(tr.changes['bookmarks'].items())
1419 1427 for name, (old, new) in bmchanges:
1420 1428 args = tr.hookargs.copy()
1421 1429 args.update(bookmarks.preparehookargs(name, old, new))
1422 1430 repo.hook('txnclose-bookmark', throw=False,
1423 1431 txnname=desc, **pycompat.strkwargs(args))
1424 1432
1425 1433 if hook.hashook(repo.ui, 'txnclose-phase'):
1426 1434 cl = repo.unfiltered().changelog
1427 1435 phasemv = sorted(tr.changes['phases'].items())
1428 1436 for rev, (old, new) in phasemv:
1429 1437 args = tr.hookargs.copy()
1430 1438 node = hex(cl.node(rev))
1431 1439 args.update(phases.preparehookargs(node, old, new))
1432 1440 repo.hook('txnclose-phase', throw=False, txnname=desc,
1433 1441 **pycompat.strkwargs(args))
1434 1442
1435 1443 repo.hook('txnclose', throw=False, txnname=desc,
1436 1444 **pycompat.strkwargs(hookargs))
1437 1445 reporef()._afterlock(hookfunc)
1438 1446 tr.addfinalize('txnclose-hook', txnclosehook)
1439 1447 # Include a leading "-" to make it happen before the transaction summary
1440 1448 # reports registered via scmutil.registersummarycallback() whose names
1441 1449 # are 00-txnreport etc. That way, the caches will be warm when the
1442 1450 # callbacks run.
1443 1451 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1444 1452 def txnaborthook(tr2):
1445 1453 """To be run if transaction is aborted
1446 1454 """
1447 1455 reporef().hook('txnabort', throw=False, txnname=desc,
1448 1456 **pycompat.strkwargs(tr2.hookargs))
1449 1457 tr.addabort('txnabort-hook', txnaborthook)
1450 1458 # avoid eager cache invalidation. in-memory data should be identical
1451 1459 # to stored data if transaction has no error.
1452 1460 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1453 1461 self._transref = weakref.ref(tr)
1454 1462 scmutil.registersummarycallback(self, tr, desc)
1455 1463 return tr
1456 1464
1457 1465 def _journalfiles(self):
1458 1466 return ((self.svfs, 'journal'),
1459 1467 (self.vfs, 'journal.dirstate'),
1460 1468 (self.vfs, 'journal.branch'),
1461 1469 (self.vfs, 'journal.desc'),
1462 1470 (self.vfs, 'journal.bookmarks'),
1463 1471 (self.svfs, 'journal.phaseroots'))
1464 1472
1465 1473 def undofiles(self):
1466 1474 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1467 1475
1468 1476 @unfilteredmethod
1469 1477 def _writejournal(self, desc):
1470 1478 self.dirstate.savebackup(None, 'journal.dirstate')
1471 1479 narrowspec.savebackup(self, 'journal.narrowspec')
1472 1480 self.vfs.write("journal.branch",
1473 1481 encoding.fromlocal(self.dirstate.branch()))
1474 1482 self.vfs.write("journal.desc",
1475 1483 "%d\n%s\n" % (len(self), desc))
1476 1484 self.vfs.write("journal.bookmarks",
1477 1485 self.vfs.tryread("bookmarks"))
1478 1486 self.svfs.write("journal.phaseroots",
1479 1487 self.svfs.tryread("phaseroots"))
1480 1488
1481 1489 def recover(self):
1482 1490 with self.lock():
1483 1491 if self.svfs.exists("journal"):
1484 1492 self.ui.status(_("rolling back interrupted transaction\n"))
1485 1493 vfsmap = {'': self.svfs,
1486 1494 'plain': self.vfs,}
1487 1495 transaction.rollback(self.svfs, vfsmap, "journal",
1488 1496 self.ui.warn,
1489 1497 checkambigfiles=_cachedfiles)
1490 1498 self.invalidate()
1491 1499 return True
1492 1500 else:
1493 1501 self.ui.warn(_("no interrupted transaction available\n"))
1494 1502 return False
1495 1503
1496 1504 def rollback(self, dryrun=False, force=False):
1497 1505 wlock = lock = dsguard = None
1498 1506 try:
1499 1507 wlock = self.wlock()
1500 1508 lock = self.lock()
1501 1509 if self.svfs.exists("undo"):
1502 1510 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1503 1511
1504 1512 return self._rollback(dryrun, force, dsguard)
1505 1513 else:
1506 1514 self.ui.warn(_("no rollback information available\n"))
1507 1515 return 1
1508 1516 finally:
1509 1517 release(dsguard, lock, wlock)
1510 1518
1511 1519 @unfilteredmethod # Until we get smarter cache management
1512 1520 def _rollback(self, dryrun, force, dsguard):
1513 1521 ui = self.ui
1514 1522 try:
1515 1523 args = self.vfs.read('undo.desc').splitlines()
1516 1524 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1517 1525 if len(args) >= 3:
1518 1526 detail = args[2]
1519 1527 oldtip = oldlen - 1
1520 1528
1521 1529 if detail and ui.verbose:
1522 1530 msg = (_('repository tip rolled back to revision %d'
1523 1531 ' (undo %s: %s)\n')
1524 1532 % (oldtip, desc, detail))
1525 1533 else:
1526 1534 msg = (_('repository tip rolled back to revision %d'
1527 1535 ' (undo %s)\n')
1528 1536 % (oldtip, desc))
1529 1537 except IOError:
1530 1538 msg = _('rolling back unknown transaction\n')
1531 1539 desc = None
1532 1540
1533 1541 if not force and self['.'] != self['tip'] and desc == 'commit':
1534 1542 raise error.Abort(
1535 1543 _('rollback of last commit while not checked out '
1536 1544 'may lose data'), hint=_('use -f to force'))
1537 1545
1538 1546 ui.status(msg)
1539 1547 if dryrun:
1540 1548 return 0
1541 1549
1542 1550 parents = self.dirstate.parents()
1543 1551 self.destroying()
1544 1552 vfsmap = {'plain': self.vfs, '': self.svfs}
1545 1553 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1546 1554 checkambigfiles=_cachedfiles)
1547 1555 if self.vfs.exists('undo.bookmarks'):
1548 1556 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1549 1557 if self.svfs.exists('undo.phaseroots'):
1550 1558 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1551 1559 self.invalidate()
1552 1560
1553 1561 parentgone = (parents[0] not in self.changelog.nodemap or
1554 1562 parents[1] not in self.changelog.nodemap)
1555 1563 if parentgone:
1556 1564 # prevent dirstateguard from overwriting already restored one
1557 1565 dsguard.close()
1558 1566
1559 1567 narrowspec.restorebackup(self, 'undo.narrowspec')
1560 1568 self.dirstate.restorebackup(None, 'undo.dirstate')
1561 1569 try:
1562 1570 branch = self.vfs.read('undo.branch')
1563 1571 self.dirstate.setbranch(encoding.tolocal(branch))
1564 1572 except IOError:
1565 1573 ui.warn(_('named branch could not be reset: '
1566 1574 'current branch is still \'%s\'\n')
1567 1575 % self.dirstate.branch())
1568 1576
1569 1577 parents = tuple([p.rev() for p in self[None].parents()])
1570 1578 if len(parents) > 1:
1571 1579 ui.status(_('working directory now based on '
1572 1580 'revisions %d and %d\n') % parents)
1573 1581 else:
1574 1582 ui.status(_('working directory now based on '
1575 1583 'revision %d\n') % parents)
1576 1584 mergemod.mergestate.clean(self, self['.'].node())
1577 1585
1578 1586 # TODO: if we know which new heads may result from this rollback, pass
1579 1587 # them to destroy(), which will prevent the branchhead cache from being
1580 1588 # invalidated.
1581 1589 self.destroyed()
1582 1590 return 0
1583 1591
1584 1592 def _buildcacheupdater(self, newtransaction):
1585 1593 """called during transaction to build the callback updating cache
1586 1594
1587 1595 Lives on the repository to help extension who might want to augment
1588 1596 this logic. For this purpose, the created transaction is passed to the
1589 1597 method.
1590 1598 """
1591 1599 # we must avoid cyclic reference between repo and transaction.
1592 1600 reporef = weakref.ref(self)
1593 1601 def updater(tr):
1594 1602 repo = reporef()
1595 1603 repo.updatecaches(tr)
1596 1604 return updater
1597 1605
1598 1606 @unfilteredmethod
1599 1607 def updatecaches(self, tr=None, full=False):
1600 1608 """warm appropriate caches
1601 1609
1602 1610 If this function is called after a transaction closed. The transaction
1603 1611 will be available in the 'tr' argument. This can be used to selectively
1604 1612 update caches relevant to the changes in that transaction.
1605 1613
1606 1614 If 'full' is set, make sure all caches the function knows about have
1607 1615 up-to-date data. Even the ones usually loaded more lazily.
1608 1616 """
1609 1617 if tr is not None and tr.hookargs.get('source') == 'strip':
1610 1618 # During strip, many caches are invalid but
1611 1619 # later call to `destroyed` will refresh them.
1612 1620 return
1613 1621
1614 1622 if tr is None or tr.changes['origrepolen'] < len(self):
1615 1623 # updating the unfiltered branchmap should refresh all the others,
1616 1624 self.ui.debug('updating the branch cache\n')
1617 1625 branchmap.updatecache(self.filtered('served'))
1618 1626
1619 1627 if full:
1620 1628 rbc = self.revbranchcache()
1621 1629 for r in self.changelog:
1622 1630 rbc.branchinfo(r)
1623 1631 rbc.write()
1624 1632
1625 1633 # ensure the working copy parents are in the manifestfulltextcache
1626 1634 for ctx in self['.'].parents():
1627 1635 ctx.manifest() # accessing the manifest is enough
1628 1636
1629 1637 def invalidatecaches(self):
1630 1638
1631 1639 if '_tagscache' in vars(self):
1632 1640 # can't use delattr on proxy
1633 1641 del self.__dict__['_tagscache']
1634 1642
1635 1643 self.unfiltered()._branchcaches.clear()
1636 1644 self.invalidatevolatilesets()
1637 1645 self._sparsesignaturecache.clear()
1638 1646
1639 1647 def invalidatevolatilesets(self):
1640 1648 self.filteredrevcache.clear()
1641 1649 obsolete.clearobscaches(self)
1642 1650
1643 1651 def invalidatedirstate(self):
1644 1652 '''Invalidates the dirstate, causing the next call to dirstate
1645 1653 to check if it was modified since the last time it was read,
1646 1654 rereading it if it has.
1647 1655
1648 1656 This is different to dirstate.invalidate() that it doesn't always
1649 1657 rereads the dirstate. Use dirstate.invalidate() if you want to
1650 1658 explicitly read the dirstate again (i.e. restoring it to a previous
1651 1659 known good state).'''
1652 1660 if hasunfilteredcache(self, 'dirstate'):
1653 1661 for k in self.dirstate._filecache:
1654 1662 try:
1655 1663 delattr(self.dirstate, k)
1656 1664 except AttributeError:
1657 1665 pass
1658 1666 delattr(self.unfiltered(), 'dirstate')
1659 1667
1660 1668 def invalidate(self, clearfilecache=False):
1661 1669 '''Invalidates both store and non-store parts other than dirstate
1662 1670
1663 1671 If a transaction is running, invalidation of store is omitted,
1664 1672 because discarding in-memory changes might cause inconsistency
1665 1673 (e.g. incomplete fncache causes unintentional failure, but
1666 1674 redundant one doesn't).
1667 1675 '''
1668 1676 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1669 1677 for k in list(self._filecache.keys()):
1670 1678 # dirstate is invalidated separately in invalidatedirstate()
1671 1679 if k == 'dirstate':
1672 1680 continue
1673 1681 if (k == 'changelog' and
1674 1682 self.currenttransaction() and
1675 1683 self.changelog._delayed):
1676 1684 # The changelog object may store unwritten revisions. We don't
1677 1685 # want to lose them.
1678 1686 # TODO: Solve the problem instead of working around it.
1679 1687 continue
1680 1688
1681 1689 if clearfilecache:
1682 1690 del self._filecache[k]
1683 1691 try:
1684 1692 delattr(unfiltered, k)
1685 1693 except AttributeError:
1686 1694 pass
1687 1695 self.invalidatecaches()
1688 1696 if not self.currenttransaction():
1689 1697 # TODO: Changing contents of store outside transaction
1690 1698 # causes inconsistency. We should make in-memory store
1691 1699 # changes detectable, and abort if changed.
1692 1700 self.store.invalidatecaches()
1693 1701
1694 1702 def invalidateall(self):
1695 1703 '''Fully invalidates both store and non-store parts, causing the
1696 1704 subsequent operation to reread any outside changes.'''
1697 1705 # extension should hook this to invalidate its caches
1698 1706 self.invalidate()
1699 1707 self.invalidatedirstate()
1700 1708
1701 1709 @unfilteredmethod
1702 1710 def _refreshfilecachestats(self, tr):
1703 1711 """Reload stats of cached files so that they are flagged as valid"""
1704 1712 for k, ce in self._filecache.items():
1705 1713 k = pycompat.sysstr(k)
1706 1714 if k == r'dirstate' or k not in self.__dict__:
1707 1715 continue
1708 1716 ce.refresh()
1709 1717
1710 1718 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1711 1719 inheritchecker=None, parentenvvar=None):
1712 1720 parentlock = None
1713 1721 # the contents of parentenvvar are used by the underlying lock to
1714 1722 # determine whether it can be inherited
1715 1723 if parentenvvar is not None:
1716 1724 parentlock = encoding.environ.get(parentenvvar)
1717 1725
1718 1726 timeout = 0
1719 1727 warntimeout = 0
1720 1728 if wait:
1721 1729 timeout = self.ui.configint("ui", "timeout")
1722 1730 warntimeout = self.ui.configint("ui", "timeout.warn")
1723 1731 # internal config: ui.signal-safe-lock
1724 1732 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1725 1733
1726 1734 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1727 1735 releasefn=releasefn,
1728 1736 acquirefn=acquirefn, desc=desc,
1729 1737 inheritchecker=inheritchecker,
1730 1738 parentlock=parentlock,
1731 1739 signalsafe=signalsafe)
1732 1740 return l
1733 1741
1734 1742 def _afterlock(self, callback):
1735 1743 """add a callback to be run when the repository is fully unlocked
1736 1744
1737 1745 The callback will be executed when the outermost lock is released
1738 1746 (with wlock being higher level than 'lock')."""
1739 1747 for ref in (self._wlockref, self._lockref):
1740 1748 l = ref and ref()
1741 1749 if l and l.held:
1742 1750 l.postrelease.append(callback)
1743 1751 break
1744 1752 else: # no lock have been found.
1745 1753 callback()
1746 1754
1747 1755 def lock(self, wait=True):
1748 1756 '''Lock the repository store (.hg/store) and return a weak reference
1749 1757 to the lock. Use this before modifying the store (e.g. committing or
1750 1758 stripping). If you are opening a transaction, get a lock as well.)
1751 1759
1752 1760 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1753 1761 'wlock' first to avoid a dead-lock hazard.'''
1754 1762 l = self._currentlock(self._lockref)
1755 1763 if l is not None:
1756 1764 l.lock()
1757 1765 return l
1758 1766
1759 1767 l = self._lock(self.svfs, "lock", wait, None,
1760 1768 self.invalidate, _('repository %s') % self.origroot)
1761 1769 self._lockref = weakref.ref(l)
1762 1770 return l
1763 1771
1764 1772 def _wlockchecktransaction(self):
1765 1773 if self.currenttransaction() is not None:
1766 1774 raise error.LockInheritanceContractViolation(
1767 1775 'wlock cannot be inherited in the middle of a transaction')
1768 1776
1769 1777 def wlock(self, wait=True):
1770 1778 '''Lock the non-store parts of the repository (everything under
1771 1779 .hg except .hg/store) and return a weak reference to the lock.
1772 1780
1773 1781 Use this before modifying files in .hg.
1774 1782
1775 1783 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1776 1784 'wlock' first to avoid a dead-lock hazard.'''
1777 1785 l = self._wlockref and self._wlockref()
1778 1786 if l is not None and l.held:
1779 1787 l.lock()
1780 1788 return l
1781 1789
1782 1790 # We do not need to check for non-waiting lock acquisition. Such
1783 1791 # acquisition would not cause dead-lock as they would just fail.
1784 1792 if wait and (self.ui.configbool('devel', 'all-warnings')
1785 1793 or self.ui.configbool('devel', 'check-locks')):
1786 1794 if self._currentlock(self._lockref) is not None:
1787 1795 self.ui.develwarn('"wlock" acquired after "lock"')
1788 1796
1789 1797 def unlock():
1790 1798 if self.dirstate.pendingparentchange():
1791 1799 self.dirstate.invalidate()
1792 1800 else:
1793 1801 self.dirstate.write(None)
1794 1802
1795 1803 self._filecache['dirstate'].refresh()
1796 1804
1797 1805 l = self._lock(self.vfs, "wlock", wait, unlock,
1798 1806 self.invalidatedirstate, _('working directory of %s') %
1799 1807 self.origroot,
1800 1808 inheritchecker=self._wlockchecktransaction,
1801 1809 parentenvvar='HG_WLOCK_LOCKER')
1802 1810 self._wlockref = weakref.ref(l)
1803 1811 return l
1804 1812
1805 1813 def _currentlock(self, lockref):
1806 1814 """Returns the lock if it's held, or None if it's not."""
1807 1815 if lockref is None:
1808 1816 return None
1809 1817 l = lockref()
1810 1818 if l is None or not l.held:
1811 1819 return None
1812 1820 return l
1813 1821
1814 1822 def currentwlock(self):
1815 1823 """Returns the wlock if it's held, or None if it's not."""
1816 1824 return self._currentlock(self._wlockref)
1817 1825
1818 1826 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1819 1827 """
1820 1828 commit an individual file as part of a larger transaction
1821 1829 """
1822 1830
1823 1831 fname = fctx.path()
1824 1832 fparent1 = manifest1.get(fname, nullid)
1825 1833 fparent2 = manifest2.get(fname, nullid)
1826 1834 if isinstance(fctx, context.filectx):
1827 1835 node = fctx.filenode()
1828 1836 if node in [fparent1, fparent2]:
1829 1837 self.ui.debug('reusing %s filelog entry\n' % fname)
1830 1838 if manifest1.flags(fname) != fctx.flags():
1831 1839 changelist.append(fname)
1832 1840 return node
1833 1841
1834 1842 flog = self.file(fname)
1835 1843 meta = {}
1836 1844 copy = fctx.renamed()
1837 1845 if copy and copy[0] != fname:
1838 1846 # Mark the new revision of this file as a copy of another
1839 1847 # file. This copy data will effectively act as a parent
1840 1848 # of this new revision. If this is a merge, the first
1841 1849 # parent will be the nullid (meaning "look up the copy data")
1842 1850 # and the second one will be the other parent. For example:
1843 1851 #
1844 1852 # 0 --- 1 --- 3 rev1 changes file foo
1845 1853 # \ / rev2 renames foo to bar and changes it
1846 1854 # \- 2 -/ rev3 should have bar with all changes and
1847 1855 # should record that bar descends from
1848 1856 # bar in rev2 and foo in rev1
1849 1857 #
1850 1858 # this allows this merge to succeed:
1851 1859 #
1852 1860 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1853 1861 # \ / merging rev3 and rev4 should use bar@rev2
1854 1862 # \- 2 --- 4 as the merge base
1855 1863 #
1856 1864
1857 1865 cfname = copy[0]
1858 1866 crev = manifest1.get(cfname)
1859 1867 newfparent = fparent2
1860 1868
1861 1869 if manifest2: # branch merge
1862 1870 if fparent2 == nullid or crev is None: # copied on remote side
1863 1871 if cfname in manifest2:
1864 1872 crev = manifest2[cfname]
1865 1873 newfparent = fparent1
1866 1874
1867 1875 # Here, we used to search backwards through history to try to find
1868 1876 # where the file copy came from if the source of a copy was not in
1869 1877 # the parent directory. However, this doesn't actually make sense to
1870 1878 # do (what does a copy from something not in your working copy even
1871 1879 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1872 1880 # the user that copy information was dropped, so if they didn't
1873 1881 # expect this outcome it can be fixed, but this is the correct
1874 1882 # behavior in this circumstance.
1875 1883
1876 1884 if crev:
1877 1885 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1878 1886 meta["copy"] = cfname
1879 1887 meta["copyrev"] = hex(crev)
1880 1888 fparent1, fparent2 = nullid, newfparent
1881 1889 else:
1882 1890 self.ui.warn(_("warning: can't find ancestor for '%s' "
1883 1891 "copied from '%s'!\n") % (fname, cfname))
1884 1892
1885 1893 elif fparent1 == nullid:
1886 1894 fparent1, fparent2 = fparent2, nullid
1887 1895 elif fparent2 != nullid:
1888 1896 # is one parent an ancestor of the other?
1889 1897 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1890 1898 if fparent1 in fparentancestors:
1891 1899 fparent1, fparent2 = fparent2, nullid
1892 1900 elif fparent2 in fparentancestors:
1893 1901 fparent2 = nullid
1894 1902
1895 1903 # is the file changed?
1896 1904 text = fctx.data()
1897 1905 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1898 1906 changelist.append(fname)
1899 1907 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1900 1908 # are just the flags changed during merge?
1901 1909 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1902 1910 changelist.append(fname)
1903 1911
1904 1912 return fparent1
1905 1913
1906 1914 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1907 1915 """check for commit arguments that aren't committable"""
1908 1916 if match.isexact() or match.prefix():
1909 1917 matched = set(status.modified + status.added + status.removed)
1910 1918
1911 1919 for f in match.files():
1912 1920 f = self.dirstate.normalize(f)
1913 1921 if f == '.' or f in matched or f in wctx.substate:
1914 1922 continue
1915 1923 if f in status.deleted:
1916 1924 fail(f, _('file not found!'))
1917 1925 if f in vdirs: # visited directory
1918 1926 d = f + '/'
1919 1927 for mf in matched:
1920 1928 if mf.startswith(d):
1921 1929 break
1922 1930 else:
1923 1931 fail(f, _("no match under directory!"))
1924 1932 elif f not in self.dirstate:
1925 1933 fail(f, _("file not tracked!"))
1926 1934
1927 1935 @unfilteredmethod
1928 1936 def commit(self, text="", user=None, date=None, match=None, force=False,
1929 1937 editor=False, extra=None):
1930 1938 """Add a new revision to current repository.
1931 1939
1932 1940 Revision information is gathered from the working directory,
1933 1941 match can be used to filter the committed files. If editor is
1934 1942 supplied, it is called to get a commit message.
1935 1943 """
1936 1944 if extra is None:
1937 1945 extra = {}
1938 1946
1939 1947 def fail(f, msg):
1940 1948 raise error.Abort('%s: %s' % (f, msg))
1941 1949
1942 1950 if not match:
1943 1951 match = matchmod.always(self.root, '')
1944 1952
1945 1953 if not force:
1946 1954 vdirs = []
1947 1955 match.explicitdir = vdirs.append
1948 1956 match.bad = fail
1949 1957
1950 1958 wlock = lock = tr = None
1951 1959 try:
1952 1960 wlock = self.wlock()
1953 1961 lock = self.lock() # for recent changelog (see issue4368)
1954 1962
1955 1963 wctx = self[None]
1956 1964 merge = len(wctx.parents()) > 1
1957 1965
1958 1966 if not force and merge and not match.always():
1959 1967 raise error.Abort(_('cannot partially commit a merge '
1960 1968 '(do not specify files or patterns)'))
1961 1969
1962 1970 status = self.status(match=match, clean=force)
1963 1971 if force:
1964 1972 status.modified.extend(status.clean) # mq may commit clean files
1965 1973
1966 1974 # check subrepos
1967 1975 subs, commitsubs, newstate = subrepoutil.precommit(
1968 1976 self.ui, wctx, status, match, force=force)
1969 1977
1970 1978 # make sure all explicit patterns are matched
1971 1979 if not force:
1972 1980 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1973 1981
1974 1982 cctx = context.workingcommitctx(self, status,
1975 1983 text, user, date, extra)
1976 1984
1977 1985 # internal config: ui.allowemptycommit
1978 1986 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1979 1987 or extra.get('close') or merge or cctx.files()
1980 1988 or self.ui.configbool('ui', 'allowemptycommit'))
1981 1989 if not allowemptycommit:
1982 1990 return None
1983 1991
1984 1992 if merge and cctx.deleted():
1985 1993 raise error.Abort(_("cannot commit merge with missing files"))
1986 1994
1987 1995 ms = mergemod.mergestate.read(self)
1988 1996 mergeutil.checkunresolved(ms)
1989 1997
1990 1998 if editor:
1991 1999 cctx._text = editor(self, cctx, subs)
1992 2000 edited = (text != cctx._text)
1993 2001
1994 2002 # Save commit message in case this transaction gets rolled back
1995 2003 # (e.g. by a pretxncommit hook). Leave the content alone on
1996 2004 # the assumption that the user will use the same editor again.
1997 2005 msgfn = self.savecommitmessage(cctx._text)
1998 2006
1999 2007 # commit subs and write new state
2000 2008 if subs:
2001 2009 for s in sorted(commitsubs):
2002 2010 sub = wctx.sub(s)
2003 2011 self.ui.status(_('committing subrepository %s\n') %
2004 2012 subrepoutil.subrelpath(sub))
2005 2013 sr = sub.commit(cctx._text, user, date)
2006 2014 newstate[s] = (newstate[s][0], sr)
2007 2015 subrepoutil.writestate(self, newstate)
2008 2016
2009 2017 p1, p2 = self.dirstate.parents()
2010 2018 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2011 2019 try:
2012 2020 self.hook("precommit", throw=True, parent1=hookp1,
2013 2021 parent2=hookp2)
2014 2022 tr = self.transaction('commit')
2015 2023 ret = self.commitctx(cctx, True)
2016 2024 except: # re-raises
2017 2025 if edited:
2018 2026 self.ui.write(
2019 2027 _('note: commit message saved in %s\n') % msgfn)
2020 2028 raise
2021 2029 # update bookmarks, dirstate and mergestate
2022 2030 bookmarks.update(self, [p1, p2], ret)
2023 2031 cctx.markcommitted(ret)
2024 2032 ms.reset()
2025 2033 tr.close()
2026 2034
2027 2035 finally:
2028 2036 lockmod.release(tr, lock, wlock)
2029 2037
2030 2038 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2031 2039 # hack for command that use a temporary commit (eg: histedit)
2032 2040 # temporary commit got stripped before hook release
2033 2041 if self.changelog.hasnode(ret):
2034 2042 self.hook("commit", node=node, parent1=parent1,
2035 2043 parent2=parent2)
2036 2044 self._afterlock(commithook)
2037 2045 return ret
2038 2046
2039 2047 @unfilteredmethod
2040 2048 def commitctx(self, ctx, error=False):
2041 2049 """Add a new revision to current repository.
2042 2050 Revision information is passed via the context argument.
2043 2051
2044 2052 ctx.files() should list all files involved in this commit, i.e.
2045 2053 modified/added/removed files. On merge, it may be wider than the
2046 2054 ctx.files() to be committed, since any file nodes derived directly
2047 2055 from p1 or p2 are excluded from the committed ctx.files().
2048 2056 """
2049 2057
2050 2058 tr = None
2051 2059 p1, p2 = ctx.p1(), ctx.p2()
2052 2060 user = ctx.user()
2053 2061
2054 2062 lock = self.lock()
2055 2063 try:
2056 2064 tr = self.transaction("commit")
2057 2065 trp = weakref.proxy(tr)
2058 2066
2059 2067 if ctx.manifestnode():
2060 2068 # reuse an existing manifest revision
2061 2069 self.ui.debug('reusing known manifest\n')
2062 2070 mn = ctx.manifestnode()
2063 2071 files = ctx.files()
2064 2072 elif ctx.files():
2065 2073 m1ctx = p1.manifestctx()
2066 2074 m2ctx = p2.manifestctx()
2067 2075 mctx = m1ctx.copy()
2068 2076
2069 2077 m = mctx.read()
2070 2078 m1 = m1ctx.read()
2071 2079 m2 = m2ctx.read()
2072 2080
2073 2081 # check in files
2074 2082 added = []
2075 2083 changed = []
2076 2084 removed = list(ctx.removed())
2077 2085 linkrev = len(self)
2078 2086 self.ui.note(_("committing files:\n"))
2079 2087 for f in sorted(ctx.modified() + ctx.added()):
2080 2088 self.ui.note(f + "\n")
2081 2089 try:
2082 2090 fctx = ctx[f]
2083 2091 if fctx is None:
2084 2092 removed.append(f)
2085 2093 else:
2086 2094 added.append(f)
2087 2095 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2088 2096 trp, changed)
2089 2097 m.setflag(f, fctx.flags())
2090 2098 except OSError as inst:
2091 2099 self.ui.warn(_("trouble committing %s!\n") % f)
2092 2100 raise
2093 2101 except IOError as inst:
2094 2102 errcode = getattr(inst, 'errno', errno.ENOENT)
2095 2103 if error or errcode and errcode != errno.ENOENT:
2096 2104 self.ui.warn(_("trouble committing %s!\n") % f)
2097 2105 raise
2098 2106
2099 2107 # update manifest
2100 2108 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2101 2109 drop = [f for f in removed if f in m]
2102 2110 for f in drop:
2103 2111 del m[f]
2104 2112 files = changed + removed
2105 2113 md = None
2106 2114 if not files:
2107 2115 # if no "files" actually changed in terms of the changelog,
2108 2116 # try hard to detect unmodified manifest entry so that the
2109 2117 # exact same commit can be reproduced later on convert.
2110 2118 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2111 2119 if not files and md:
2112 2120 self.ui.debug('not reusing manifest (no file change in '
2113 2121 'changelog, but manifest differs)\n')
2114 2122 if files or md:
2115 2123 self.ui.note(_("committing manifest\n"))
2116 2124 mn = mctx.write(trp, linkrev,
2117 2125 p1.manifestnode(), p2.manifestnode(),
2118 2126 added, drop)
2119 2127 else:
2120 2128 self.ui.debug('reusing manifest form p1 (listed files '
2121 2129 'actually unchanged)\n')
2122 2130 mn = p1.manifestnode()
2123 2131 else:
2124 2132 self.ui.debug('reusing manifest from p1 (no file change)\n')
2125 2133 mn = p1.manifestnode()
2126 2134 files = []
2127 2135
2128 2136 # update changelog
2129 2137 self.ui.note(_("committing changelog\n"))
2130 2138 self.changelog.delayupdate(tr)
2131 2139 n = self.changelog.add(mn, files, ctx.description(),
2132 2140 trp, p1.node(), p2.node(),
2133 2141 user, ctx.date(), ctx.extra().copy())
2134 2142 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2135 2143 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2136 2144 parent2=xp2)
2137 2145 # set the new commit is proper phase
2138 2146 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2139 2147 if targetphase:
2140 2148 # retract boundary do not alter parent changeset.
2141 2149 # if a parent have higher the resulting phase will
2142 2150 # be compliant anyway
2143 2151 #
2144 2152 # if minimal phase was 0 we don't need to retract anything
2145 2153 phases.registernew(self, tr, targetphase, [n])
2146 2154 tr.close()
2147 2155 return n
2148 2156 finally:
2149 2157 if tr:
2150 2158 tr.release()
2151 2159 lock.release()
2152 2160
2153 2161 @unfilteredmethod
2154 2162 def destroying(self):
2155 2163 '''Inform the repository that nodes are about to be destroyed.
2156 2164 Intended for use by strip and rollback, so there's a common
2157 2165 place for anything that has to be done before destroying history.
2158 2166
2159 2167 This is mostly useful for saving state that is in memory and waiting
2160 2168 to be flushed when the current lock is released. Because a call to
2161 2169 destroyed is imminent, the repo will be invalidated causing those
2162 2170 changes to stay in memory (waiting for the next unlock), or vanish
2163 2171 completely.
2164 2172 '''
2165 2173 # When using the same lock to commit and strip, the phasecache is left
2166 2174 # dirty after committing. Then when we strip, the repo is invalidated,
2167 2175 # causing those changes to disappear.
2168 2176 if '_phasecache' in vars(self):
2169 2177 self._phasecache.write()
2170 2178
2171 2179 @unfilteredmethod
2172 2180 def destroyed(self):
2173 2181 '''Inform the repository that nodes have been destroyed.
2174 2182 Intended for use by strip and rollback, so there's a common
2175 2183 place for anything that has to be done after destroying history.
2176 2184 '''
2177 2185 # When one tries to:
2178 2186 # 1) destroy nodes thus calling this method (e.g. strip)
2179 2187 # 2) use phasecache somewhere (e.g. commit)
2180 2188 #
2181 2189 # then 2) will fail because the phasecache contains nodes that were
2182 2190 # removed. We can either remove phasecache from the filecache,
2183 2191 # causing it to reload next time it is accessed, or simply filter
2184 2192 # the removed nodes now and write the updated cache.
2185 2193 self._phasecache.filterunknown(self)
2186 2194 self._phasecache.write()
2187 2195
2188 2196 # refresh all repository caches
2189 2197 self.updatecaches()
2190 2198
2191 2199 # Ensure the persistent tag cache is updated. Doing it now
2192 2200 # means that the tag cache only has to worry about destroyed
2193 2201 # heads immediately after a strip/rollback. That in turn
2194 2202 # guarantees that "cachetip == currenttip" (comparing both rev
2195 2203 # and node) always means no nodes have been added or destroyed.
2196 2204
2197 2205 # XXX this is suboptimal when qrefresh'ing: we strip the current
2198 2206 # head, refresh the tag cache, then immediately add a new head.
2199 2207 # But I think doing it this way is necessary for the "instant
2200 2208 # tag cache retrieval" case to work.
2201 2209 self.invalidate()
2202 2210
2203 2211 def status(self, node1='.', node2=None, match=None,
2204 2212 ignored=False, clean=False, unknown=False,
2205 2213 listsubrepos=False):
2206 2214 '''a convenience method that calls node1.status(node2)'''
2207 2215 return self[node1].status(node2, match, ignored, clean, unknown,
2208 2216 listsubrepos)
2209 2217
2210 2218 def addpostdsstatus(self, ps):
2211 2219 """Add a callback to run within the wlock, at the point at which status
2212 2220 fixups happen.
2213 2221
2214 2222 On status completion, callback(wctx, status) will be called with the
2215 2223 wlock held, unless the dirstate has changed from underneath or the wlock
2216 2224 couldn't be grabbed.
2217 2225
2218 2226 Callbacks should not capture and use a cached copy of the dirstate --
2219 2227 it might change in the meanwhile. Instead, they should access the
2220 2228 dirstate via wctx.repo().dirstate.
2221 2229
2222 2230 This list is emptied out after each status run -- extensions should
2223 2231 make sure it adds to this list each time dirstate.status is called.
2224 2232 Extensions should also make sure they don't call this for statuses
2225 2233 that don't involve the dirstate.
2226 2234 """
2227 2235
2228 2236 # The list is located here for uniqueness reasons -- it is actually
2229 2237 # managed by the workingctx, but that isn't unique per-repo.
2230 2238 self._postdsstatus.append(ps)
2231 2239
2232 2240 def postdsstatus(self):
2233 2241 """Used by workingctx to get the list of post-dirstate-status hooks."""
2234 2242 return self._postdsstatus
2235 2243
2236 2244 def clearpostdsstatus(self):
2237 2245 """Used by workingctx to clear post-dirstate-status hooks."""
2238 2246 del self._postdsstatus[:]
2239 2247
2240 2248 def heads(self, start=None):
2241 2249 if start is None:
2242 2250 cl = self.changelog
2243 2251 headrevs = reversed(cl.headrevs())
2244 2252 return [cl.node(rev) for rev in headrevs]
2245 2253
2246 2254 heads = self.changelog.heads(start)
2247 2255 # sort the output in rev descending order
2248 2256 return sorted(heads, key=self.changelog.rev, reverse=True)
2249 2257
2250 2258 def branchheads(self, branch=None, start=None, closed=False):
2251 2259 '''return a (possibly filtered) list of heads for the given branch
2252 2260
2253 2261 Heads are returned in topological order, from newest to oldest.
2254 2262 If branch is None, use the dirstate branch.
2255 2263 If start is not None, return only heads reachable from start.
2256 2264 If closed is True, return heads that are marked as closed as well.
2257 2265 '''
2258 2266 if branch is None:
2259 2267 branch = self[None].branch()
2260 2268 branches = self.branchmap()
2261 2269 if branch not in branches:
2262 2270 return []
2263 2271 # the cache returns heads ordered lowest to highest
2264 2272 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2265 2273 if start is not None:
2266 2274 # filter out the heads that cannot be reached from startrev
2267 2275 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2268 2276 bheads = [h for h in bheads if h in fbheads]
2269 2277 return bheads
2270 2278
2271 2279 def branches(self, nodes):
2272 2280 if not nodes:
2273 2281 nodes = [self.changelog.tip()]
2274 2282 b = []
2275 2283 for n in nodes:
2276 2284 t = n
2277 2285 while True:
2278 2286 p = self.changelog.parents(n)
2279 2287 if p[1] != nullid or p[0] == nullid:
2280 2288 b.append((t, n, p[0], p[1]))
2281 2289 break
2282 2290 n = p[0]
2283 2291 return b
2284 2292
2285 2293 def between(self, pairs):
2286 2294 r = []
2287 2295
2288 2296 for top, bottom in pairs:
2289 2297 n, l, i = top, [], 0
2290 2298 f = 1
2291 2299
2292 2300 while n != bottom and n != nullid:
2293 2301 p = self.changelog.parents(n)[0]
2294 2302 if i == f:
2295 2303 l.append(n)
2296 2304 f = f * 2
2297 2305 n = p
2298 2306 i += 1
2299 2307
2300 2308 r.append(l)
2301 2309
2302 2310 return r
2303 2311
2304 2312 def checkpush(self, pushop):
2305 2313 """Extensions can override this function if additional checks have
2306 2314 to be performed before pushing, or call it if they override push
2307 2315 command.
2308 2316 """
2309 2317
2310 2318 @unfilteredpropertycache
2311 2319 def prepushoutgoinghooks(self):
2312 2320 """Return util.hooks consists of a pushop with repo, remote, outgoing
2313 2321 methods, which are called before pushing changesets.
2314 2322 """
2315 2323 return util.hooks()
2316 2324
2317 2325 def pushkey(self, namespace, key, old, new):
2318 2326 try:
2319 2327 tr = self.currenttransaction()
2320 2328 hookargs = {}
2321 2329 if tr is not None:
2322 2330 hookargs.update(tr.hookargs)
2323 2331 hookargs = pycompat.strkwargs(hookargs)
2324 2332 hookargs[r'namespace'] = namespace
2325 2333 hookargs[r'key'] = key
2326 2334 hookargs[r'old'] = old
2327 2335 hookargs[r'new'] = new
2328 2336 self.hook('prepushkey', throw=True, **hookargs)
2329 2337 except error.HookAbort as exc:
2330 2338 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2331 2339 if exc.hint:
2332 2340 self.ui.write_err(_("(%s)\n") % exc.hint)
2333 2341 return False
2334 2342 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2335 2343 ret = pushkey.push(self, namespace, key, old, new)
2336 2344 def runhook():
2337 2345 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2338 2346 ret=ret)
2339 2347 self._afterlock(runhook)
2340 2348 return ret
2341 2349
2342 2350 def listkeys(self, namespace):
2343 2351 self.hook('prelistkeys', throw=True, namespace=namespace)
2344 2352 self.ui.debug('listing keys for "%s"\n' % namespace)
2345 2353 values = pushkey.list(self, namespace)
2346 2354 self.hook('listkeys', namespace=namespace, values=values)
2347 2355 return values
2348 2356
2349 2357 def debugwireargs(self, one, two, three=None, four=None, five=None):
2350 2358 '''used to test argument passing over the wire'''
2351 2359 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2352 2360 pycompat.bytestr(four),
2353 2361 pycompat.bytestr(five))
2354 2362
2355 2363 def savecommitmessage(self, text):
2356 2364 fp = self.vfs('last-message.txt', 'wb')
2357 2365 try:
2358 2366 fp.write(text)
2359 2367 finally:
2360 2368 fp.close()
2361 2369 return self.pathto(fp.name[len(self.root) + 1:])
2362 2370
2363 2371 # used to avoid circular references so destructors work
2364 2372 def aftertrans(files):
2365 2373 renamefiles = [tuple(t) for t in files]
2366 2374 def a():
2367 2375 for vfs, src, dest in renamefiles:
2368 2376 # if src and dest refer to a same file, vfs.rename is a no-op,
2369 2377 # leaving both src and dest on disk. delete dest to make sure
2370 2378 # the rename couldn't be such a no-op.
2371 2379 vfs.tryunlink(dest)
2372 2380 try:
2373 2381 vfs.rename(src, dest)
2374 2382 except OSError: # journal file does not yet exist
2375 2383 pass
2376 2384 return a
2377 2385
2378 2386 def undoname(fn):
2379 2387 base, name = os.path.split(fn)
2380 2388 assert name.startswith('journal')
2381 2389 return os.path.join(base, name.replace('journal', 'undo', 1))
2382 2390
2383 2391 def instance(ui, path, create, intents=None):
2384 2392 return localrepository(ui, util.urllocalpath(path), create,
2385 2393 intents=intents)
2386 2394
2387 2395 def islocal(path):
2388 2396 return True
2389 2397
2390 2398 def newreporequirements(repo):
2391 2399 """Determine the set of requirements for a new local repository.
2392 2400
2393 2401 Extensions can wrap this function to specify custom requirements for
2394 2402 new repositories.
2395 2403 """
2396 2404 ui = repo.ui
2397 2405 requirements = {'revlogv1'}
2398 2406 if ui.configbool('format', 'usestore'):
2399 2407 requirements.add('store')
2400 2408 if ui.configbool('format', 'usefncache'):
2401 2409 requirements.add('fncache')
2402 2410 if ui.configbool('format', 'dotencode'):
2403 2411 requirements.add('dotencode')
2404 2412
2405 2413 compengine = ui.config('experimental', 'format.compression')
2406 2414 if compengine not in util.compengines:
2407 2415 raise error.Abort(_('compression engine %s defined by '
2408 2416 'experimental.format.compression not available') %
2409 2417 compengine,
2410 2418 hint=_('run "hg debuginstall" to list available '
2411 2419 'compression engines'))
2412 2420
2413 2421 # zlib is the historical default and doesn't need an explicit requirement.
2414 2422 if compengine != 'zlib':
2415 2423 requirements.add('exp-compression-%s' % compengine)
2416 2424
2417 2425 if scmutil.gdinitconfig(ui):
2418 2426 requirements.add('generaldelta')
2419 2427 if ui.configbool('experimental', 'treemanifest'):
2420 2428 requirements.add('treemanifest')
2421 2429 # experimental config: format.sparse-revlog
2422 2430 if ui.configbool('format', 'sparse-revlog'):
2423 2431 requirements.add(SPARSEREVLOG_REQUIREMENT)
2424 2432
2425 2433 revlogv2 = ui.config('experimental', 'revlogv2')
2426 2434 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2427 2435 requirements.remove('revlogv1')
2428 2436 # generaldelta is implied by revlogv2.
2429 2437 requirements.discard('generaldelta')
2430 2438 requirements.add(REVLOGV2_REQUIREMENT)
2431 2439 # experimental config: format.internal-phase
2432 2440 if repo.ui.configbool('format', 'internal-phase'):
2433 2441 requirements.add('internal-phase')
2434 2442
2435 2443 return requirements
General Comments 0
You need to be logged in to leave comments. Login now