##// END OF EJS Templates
localrepo: update comments around path auditors
Augie Fackler -
r35119:ff80efc8 default
parent child Browse files
Show More
@@ -1,2347 +1,2349 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepo,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67
68 68 release = lockmod.release
69 69 urlerr = util.urlerr
70 70 urlreq = util.urlreq
71 71
72 72 # set of (path, vfs-location) tuples. vfs-location is:
73 73 # - 'plain for vfs relative paths
74 74 # - '' for svfs relative paths
75 75 _cachedfiles = set()
76 76
77 77 class _basefilecache(scmutil.filecache):
78 78 """All filecache usage on repo are done for logic that should be unfiltered
79 79 """
80 80 def __get__(self, repo, type=None):
81 81 if repo is None:
82 82 return self
83 83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 84 def __set__(self, repo, value):
85 85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 86 def __delete__(self, repo):
87 87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88 88
89 89 class repofilecache(_basefilecache):
90 90 """filecache for files in .hg but outside of .hg/store"""
91 91 def __init__(self, *paths):
92 92 super(repofilecache, self).__init__(*paths)
93 93 for path in paths:
94 94 _cachedfiles.add((path, 'plain'))
95 95
96 96 def join(self, obj, fname):
97 97 return obj.vfs.join(fname)
98 98
99 99 class storecache(_basefilecache):
100 100 """filecache for files in the store"""
101 101 def __init__(self, *paths):
102 102 super(storecache, self).__init__(*paths)
103 103 for path in paths:
104 104 _cachedfiles.add((path, ''))
105 105
106 106 def join(self, obj, fname):
107 107 return obj.sjoin(fname)
108 108
109 109 def isfilecached(repo, name):
110 110 """check if a repo has already cached "name" filecache-ed property
111 111
112 112 This returns (cachedobj-or-None, iscached) tuple.
113 113 """
114 114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 115 if not cacheentry:
116 116 return None, False
117 117 return cacheentry.obj, True
118 118
119 119 class unfilteredpropertycache(util.propertycache):
120 120 """propertycache that apply to unfiltered repo only"""
121 121
122 122 def __get__(self, repo, type=None):
123 123 unfi = repo.unfiltered()
124 124 if unfi is repo:
125 125 return super(unfilteredpropertycache, self).__get__(unfi)
126 126 return getattr(unfi, self.name)
127 127
128 128 class filteredpropertycache(util.propertycache):
129 129 """propertycache that must take filtering in account"""
130 130
131 131 def cachevalue(self, obj, value):
132 132 object.__setattr__(obj, self.name, value)
133 133
134 134
135 135 def hasunfilteredcache(repo, name):
136 136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 137 return name in vars(repo.unfiltered())
138 138
139 139 def unfilteredmethod(orig):
140 140 """decorate method that always need to be run on unfiltered version"""
141 141 def wrapper(repo, *args, **kwargs):
142 142 return orig(repo.unfiltered(), *args, **kwargs)
143 143 return wrapper
144 144
145 145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 146 'unbundle'}
147 147 legacycaps = moderncaps.union({'changegroupsubset'})
148 148
149 149 class localpeer(repository.peer):
150 150 '''peer for a local repo; reflects only the most recent API'''
151 151
152 152 def __init__(self, repo, caps=None):
153 153 super(localpeer, self).__init__()
154 154
155 155 if caps is None:
156 156 caps = moderncaps.copy()
157 157 self._repo = repo.filtered('served')
158 158 self._ui = repo.ui
159 159 self._caps = repo._restrictcapabilities(caps)
160 160
161 161 # Begin of _basepeer interface.
162 162
163 163 @util.propertycache
164 164 def ui(self):
165 165 return self._ui
166 166
167 167 def url(self):
168 168 return self._repo.url()
169 169
170 170 def local(self):
171 171 return self._repo
172 172
173 173 def peer(self):
174 174 return self
175 175
176 176 def canpush(self):
177 177 return True
178 178
179 179 def close(self):
180 180 self._repo.close()
181 181
182 182 # End of _basepeer interface.
183 183
184 184 # Begin of _basewirecommands interface.
185 185
186 186 def branchmap(self):
187 187 return self._repo.branchmap()
188 188
189 189 def capabilities(self):
190 190 return self._caps
191 191
192 192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 193 """Used to test argument passing over the wire"""
194 194 return "%s %s %s %s %s" % (one, two, three, four, five)
195 195
196 196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 197 **kwargs):
198 198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 199 common=common, bundlecaps=bundlecaps,
200 200 **kwargs)
201 201 cb = util.chunkbuffer(chunks)
202 202
203 203 if exchange.bundle2requested(bundlecaps):
204 204 # When requesting a bundle2, getbundle returns a stream to make the
205 205 # wire level function happier. We need to build a proper object
206 206 # from it in local peer.
207 207 return bundle2.getunbundler(self.ui, cb)
208 208 else:
209 209 return changegroup.getunbundler('01', cb, None)
210 210
211 211 def heads(self):
212 212 return self._repo.heads()
213 213
214 214 def known(self, nodes):
215 215 return self._repo.known(nodes)
216 216
217 217 def listkeys(self, namespace):
218 218 return self._repo.listkeys(namespace)
219 219
220 220 def lookup(self, key):
221 221 return self._repo.lookup(key)
222 222
223 223 def pushkey(self, namespace, key, old, new):
224 224 return self._repo.pushkey(namespace, key, old, new)
225 225
226 226 def stream_out(self):
227 227 raise error.Abort(_('cannot perform stream clone against local '
228 228 'peer'))
229 229
230 230 def unbundle(self, cg, heads, url):
231 231 """apply a bundle on a repo
232 232
233 233 This function handles the repo locking itself."""
234 234 try:
235 235 try:
236 236 cg = exchange.readbundle(self.ui, cg, None)
237 237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 238 if util.safehasattr(ret, 'getchunks'):
239 239 # This is a bundle20 object, turn it into an unbundler.
240 240 # This little dance should be dropped eventually when the
241 241 # API is finally improved.
242 242 stream = util.chunkbuffer(ret.getchunks())
243 243 ret = bundle2.getunbundler(self.ui, stream)
244 244 return ret
245 245 except Exception as exc:
246 246 # If the exception contains output salvaged from a bundle2
247 247 # reply, we need to make sure it is printed before continuing
248 248 # to fail. So we build a bundle2 with such output and consume
249 249 # it directly.
250 250 #
251 251 # This is not very elegant but allows a "simple" solution for
252 252 # issue4594
253 253 output = getattr(exc, '_bundle2salvagedoutput', ())
254 254 if output:
255 255 bundler = bundle2.bundle20(self._repo.ui)
256 256 for out in output:
257 257 bundler.addpart(out)
258 258 stream = util.chunkbuffer(bundler.getchunks())
259 259 b = bundle2.getunbundler(self.ui, stream)
260 260 bundle2.processbundle(self._repo, b)
261 261 raise
262 262 except error.PushRaced as exc:
263 263 raise error.ResponseError(_('push failed:'), str(exc))
264 264
265 265 # End of _basewirecommands interface.
266 266
267 267 # Begin of peer interface.
268 268
269 269 def iterbatch(self):
270 270 return peer.localiterbatcher(self)
271 271
272 272 # End of peer interface.
273 273
274 274 class locallegacypeer(repository.legacypeer, localpeer):
275 275 '''peer extension which implements legacy methods too; used for tests with
276 276 restricted capabilities'''
277 277
278 278 def __init__(self, repo):
279 279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280 280
281 281 # Begin of baselegacywirecommands interface.
282 282
283 283 def between(self, pairs):
284 284 return self._repo.between(pairs)
285 285
286 286 def branches(self, nodes):
287 287 return self._repo.branches(nodes)
288 288
289 289 def changegroup(self, basenodes, source):
290 290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 291 missingheads=self._repo.heads())
292 292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293 293
294 294 def changegroupsubset(self, bases, heads, source):
295 295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 296 missingheads=heads)
297 297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298 298
299 299 # End of baselegacywirecommands interface.
300 300
301 301 # Increment the sub-version when the revlog v2 format changes to lock out old
302 302 # clients.
303 303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304 304
305 305 class localrepository(object):
306 306
307 307 supportedformats = {
308 308 'revlogv1',
309 309 'generaldelta',
310 310 'treemanifest',
311 311 'manifestv2',
312 312 REVLOGV2_REQUIREMENT,
313 313 }
314 314 _basesupported = supportedformats | {
315 315 'store',
316 316 'fncache',
317 317 'shared',
318 318 'relshared',
319 319 'dotencode',
320 320 'exp-sparse',
321 321 }
322 322 openerreqs = {
323 323 'revlogv1',
324 324 'generaldelta',
325 325 'treemanifest',
326 326 'manifestv2',
327 327 }
328 328
329 329 # a list of (ui, featureset) functions.
330 330 # only functions defined in module of enabled extensions are invoked
331 331 featuresetupfuncs = set()
332 332
333 333 # list of prefix for file which can be written without 'wlock'
334 334 # Extensions should extend this list when needed
335 335 _wlockfreeprefix = {
336 336 # We migh consider requiring 'wlock' for the next
337 337 # two, but pretty much all the existing code assume
338 338 # wlock is not needed so we keep them excluded for
339 339 # now.
340 340 'hgrc',
341 341 'requires',
342 342 # XXX cache is a complicatged business someone
343 343 # should investigate this in depth at some point
344 344 'cache/',
345 345 # XXX shouldn't be dirstate covered by the wlock?
346 346 'dirstate',
347 347 # XXX bisect was still a bit too messy at the time
348 348 # this changeset was introduced. Someone should fix
349 349 # the remainig bit and drop this line
350 350 'bisect.state',
351 351 }
352 352
353 353 def __init__(self, baseui, path, create=False):
354 354 self.requirements = set()
355 355 self.filtername = None
356 356 # wvfs: rooted at the repository root, used to access the working copy
357 357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 359 self.vfs = None
360 360 # svfs: usually rooted at .hg/store, used to access repository history
361 361 # If this is a shared repository, this vfs may point to another
362 362 # repository's .hg/store directory.
363 363 self.svfs = None
364 364 self.root = self.wvfs.base
365 365 self.path = self.wvfs.join(".hg")
366 366 self.origroot = path
367 # These auditor are not used by the vfs,
368 # only used when writing this comment: basectx.match
367 # This is only used by context.workingctx.match in order to
368 # detect files in subrepos.
369 369 self.auditor = pathutil.pathauditor(
370 370 self.root, callback=self._checknested)
371 # This is only used by context.basectx.match in order to detect
372 # files in subrepos.
371 373 self.nofsauditor = pathutil.pathauditor(
372 374 self.root, callback=self._checknested, realfs=False, cached=True)
373 375 self.baseui = baseui
374 376 self.ui = baseui.copy()
375 377 self.ui.copy = baseui.copy # prevent copying repo configuration
376 378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
377 379 if (self.ui.configbool('devel', 'all-warnings') or
378 380 self.ui.configbool('devel', 'check-locks')):
379 381 self.vfs.audit = self._getvfsward(self.vfs.audit)
380 382 # A list of callback to shape the phase if no data were found.
381 383 # Callback are in the form: func(repo, roots) --> processed root.
382 384 # This list it to be filled by extension during repo setup
383 385 self._phasedefaults = []
384 386 try:
385 387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
386 388 self._loadextensions()
387 389 except IOError:
388 390 pass
389 391
390 392 if self.featuresetupfuncs:
391 393 self.supported = set(self._basesupported) # use private copy
392 394 extmods = set(m.__name__ for n, m
393 395 in extensions.extensions(self.ui))
394 396 for setupfunc in self.featuresetupfuncs:
395 397 if setupfunc.__module__ in extmods:
396 398 setupfunc(self.ui, self.supported)
397 399 else:
398 400 self.supported = self._basesupported
399 401 color.setup(self.ui)
400 402
401 403 # Add compression engines.
402 404 for name in util.compengines:
403 405 engine = util.compengines[name]
404 406 if engine.revlogheader():
405 407 self.supported.add('exp-compression-%s' % name)
406 408
407 409 if not self.vfs.isdir():
408 410 if create:
409 411 self.requirements = newreporequirements(self)
410 412
411 413 if not self.wvfs.exists():
412 414 self.wvfs.makedirs()
413 415 self.vfs.makedir(notindexed=True)
414 416
415 417 if 'store' in self.requirements:
416 418 self.vfs.mkdir("store")
417 419
418 420 # create an invalid changelog
419 421 self.vfs.append(
420 422 "00changelog.i",
421 423 '\0\0\0\2' # represents revlogv2
422 424 ' dummy changelog to prevent using the old repo layout'
423 425 )
424 426 else:
425 427 raise error.RepoError(_("repository %s not found") % path)
426 428 elif create:
427 429 raise error.RepoError(_("repository %s already exists") % path)
428 430 else:
429 431 try:
430 432 self.requirements = scmutil.readrequires(
431 433 self.vfs, self.supported)
432 434 except IOError as inst:
433 435 if inst.errno != errno.ENOENT:
434 436 raise
435 437
436 438 cachepath = self.vfs.join('cache')
437 439 self.sharedpath = self.path
438 440 try:
439 441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
440 442 if 'relshared' in self.requirements:
441 443 sharedpath = self.vfs.join(sharedpath)
442 444 vfs = vfsmod.vfs(sharedpath, realpath=True)
443 445 cachepath = vfs.join('cache')
444 446 s = vfs.base
445 447 if not vfs.exists():
446 448 raise error.RepoError(
447 449 _('.hg/sharedpath points to nonexistent directory %s') % s)
448 450 self.sharedpath = s
449 451 except IOError as inst:
450 452 if inst.errno != errno.ENOENT:
451 453 raise
452 454
453 455 if 'exp-sparse' in self.requirements and not sparse.enabled:
454 456 raise error.RepoError(_('repository is using sparse feature but '
455 457 'sparse is not enabled; enable the '
456 458 '"sparse" extensions to access'))
457 459
458 460 self.store = store.store(
459 461 self.requirements, self.sharedpath,
460 462 lambda base: vfsmod.vfs(base, cacheaudited=True))
461 463 self.spath = self.store.path
462 464 self.svfs = self.store.vfs
463 465 self.sjoin = self.store.join
464 466 self.vfs.createmode = self.store.createmode
465 467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
466 468 self.cachevfs.createmode = self.store.createmode
467 469 if (self.ui.configbool('devel', 'all-warnings') or
468 470 self.ui.configbool('devel', 'check-locks')):
469 471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
470 472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
471 473 else: # standard vfs
472 474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
473 475 self._applyopenerreqs()
474 476 if create:
475 477 self._writerequirements()
476 478
477 479 self._dirstatevalidatewarned = False
478 480
479 481 self._branchcaches = {}
480 482 self._revbranchcache = None
481 483 self.filterpats = {}
482 484 self._datafilters = {}
483 485 self._transref = self._lockref = self._wlockref = None
484 486
485 487 # A cache for various files under .hg/ that tracks file changes,
486 488 # (used by the filecache decorator)
487 489 #
488 490 # Maps a property name to its util.filecacheentry
489 491 self._filecache = {}
490 492
491 493 # hold sets of revision to be filtered
492 494 # should be cleared when something might have changed the filter value:
493 495 # - new changesets,
494 496 # - phase change,
495 497 # - new obsolescence marker,
496 498 # - working directory parent change,
497 499 # - bookmark changes
498 500 self.filteredrevcache = {}
499 501
500 502 # post-dirstate-status hooks
501 503 self._postdsstatus = []
502 504
503 505 # Cache of types representing filtered repos.
504 506 self._filteredrepotypes = weakref.WeakKeyDictionary()
505 507
506 508 # generic mapping between names and nodes
507 509 self.names = namespaces.namespaces()
508 510
509 511 # Key to signature value.
510 512 self._sparsesignaturecache = {}
511 513 # Signature to cached matcher instance.
512 514 self._sparsematchercache = {}
513 515
514 516 def _getvfsward(self, origfunc):
515 517 """build a ward for self.vfs"""
516 518 rref = weakref.ref(self)
517 519 def checkvfs(path, mode=None):
518 520 ret = origfunc(path, mode=mode)
519 521 repo = rref()
520 522 if (repo is None
521 523 or not util.safehasattr(repo, '_wlockref')
522 524 or not util.safehasattr(repo, '_lockref')):
523 525 return
524 526 if mode in (None, 'r', 'rb'):
525 527 return
526 528 if path.startswith(repo.path):
527 529 # truncate name relative to the repository (.hg)
528 530 path = path[len(repo.path) + 1:]
529 531 if path.startswith('cache/'):
530 532 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
531 533 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
532 534 if path.startswith('journal.'):
533 535 # journal is covered by 'lock'
534 536 if repo._currentlock(repo._lockref) is None:
535 537 repo.ui.develwarn('write with no lock: "%s"' % path,
536 538 stacklevel=2, config='check-locks')
537 539 elif repo._currentlock(repo._wlockref) is None:
538 540 # rest of vfs files are covered by 'wlock'
539 541 #
540 542 # exclude special files
541 543 for prefix in self._wlockfreeprefix:
542 544 if path.startswith(prefix):
543 545 return
544 546 repo.ui.develwarn('write with no wlock: "%s"' % path,
545 547 stacklevel=2, config='check-locks')
546 548 return ret
547 549 return checkvfs
548 550
549 551 def _getsvfsward(self, origfunc):
550 552 """build a ward for self.svfs"""
551 553 rref = weakref.ref(self)
552 554 def checksvfs(path, mode=None):
553 555 ret = origfunc(path, mode=mode)
554 556 repo = rref()
555 557 if repo is None or not util.safehasattr(repo, '_lockref'):
556 558 return
557 559 if mode in (None, 'r', 'rb'):
558 560 return
559 561 if path.startswith(repo.sharedpath):
560 562 # truncate name relative to the repository (.hg)
561 563 path = path[len(repo.sharedpath) + 1:]
562 564 if repo._currentlock(repo._lockref) is None:
563 565 repo.ui.develwarn('write with no lock: "%s"' % path,
564 566 stacklevel=3)
565 567 return ret
566 568 return checksvfs
567 569
568 570 def close(self):
569 571 self._writecaches()
570 572
571 573 def _loadextensions(self):
572 574 extensions.loadall(self.ui)
573 575
574 576 def _writecaches(self):
575 577 if self._revbranchcache:
576 578 self._revbranchcache.write()
577 579
578 580 def _restrictcapabilities(self, caps):
579 581 if self.ui.configbool('experimental', 'bundle2-advertise'):
580 582 caps = set(caps)
581 583 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
582 584 caps.add('bundle2=' + urlreq.quote(capsblob))
583 585 return caps
584 586
585 587 def _applyopenerreqs(self):
586 588 self.svfs.options = dict((r, 1) for r in self.requirements
587 589 if r in self.openerreqs)
588 590 # experimental config: format.chunkcachesize
589 591 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
590 592 if chunkcachesize is not None:
591 593 self.svfs.options['chunkcachesize'] = chunkcachesize
592 594 # experimental config: format.maxchainlen
593 595 maxchainlen = self.ui.configint('format', 'maxchainlen')
594 596 if maxchainlen is not None:
595 597 self.svfs.options['maxchainlen'] = maxchainlen
596 598 # experimental config: format.manifestcachesize
597 599 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
598 600 if manifestcachesize is not None:
599 601 self.svfs.options['manifestcachesize'] = manifestcachesize
600 602 # experimental config: format.aggressivemergedeltas
601 603 aggressivemergedeltas = self.ui.configbool('format',
602 604 'aggressivemergedeltas')
603 605 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
604 606 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
605 607 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
606 608 if 0 <= chainspan:
607 609 self.svfs.options['maxdeltachainspan'] = chainspan
608 610 mmapindexthreshold = self.ui.configbytes('experimental',
609 611 'mmapindexthreshold')
610 612 if mmapindexthreshold is not None:
611 613 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
612 614 withsparseread = self.ui.configbool('experimental', 'sparse-read')
613 615 srdensitythres = float(self.ui.config('experimental',
614 616 'sparse-read.density-threshold'))
615 617 srmingapsize = self.ui.configbytes('experimental',
616 618 'sparse-read.min-gap-size')
617 619 self.svfs.options['with-sparse-read'] = withsparseread
618 620 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
619 621 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
620 622
621 623 for r in self.requirements:
622 624 if r.startswith('exp-compression-'):
623 625 self.svfs.options['compengine'] = r[len('exp-compression-'):]
624 626
625 627 # TODO move "revlogv2" to openerreqs once finalized.
626 628 if REVLOGV2_REQUIREMENT in self.requirements:
627 629 self.svfs.options['revlogv2'] = True
628 630
629 631 def _writerequirements(self):
630 632 scmutil.writerequires(self.vfs, self.requirements)
631 633
632 634 def _checknested(self, path):
633 635 """Determine if path is a legal nested repository."""
634 636 if not path.startswith(self.root):
635 637 return False
636 638 subpath = path[len(self.root) + 1:]
637 639 normsubpath = util.pconvert(subpath)
638 640
639 641 # XXX: Checking against the current working copy is wrong in
640 642 # the sense that it can reject things like
641 643 #
642 644 # $ hg cat -r 10 sub/x.txt
643 645 #
644 646 # if sub/ is no longer a subrepository in the working copy
645 647 # parent revision.
646 648 #
647 649 # However, it can of course also allow things that would have
648 650 # been rejected before, such as the above cat command if sub/
649 651 # is a subrepository now, but was a normal directory before.
650 652 # The old path auditor would have rejected by mistake since it
651 653 # panics when it sees sub/.hg/.
652 654 #
653 655 # All in all, checking against the working copy seems sensible
654 656 # since we want to prevent access to nested repositories on
655 657 # the filesystem *now*.
656 658 ctx = self[None]
657 659 parts = util.splitpath(subpath)
658 660 while parts:
659 661 prefix = '/'.join(parts)
660 662 if prefix in ctx.substate:
661 663 if prefix == normsubpath:
662 664 return True
663 665 else:
664 666 sub = ctx.sub(prefix)
665 667 return sub.checknested(subpath[len(prefix) + 1:])
666 668 else:
667 669 parts.pop()
668 670 return False
669 671
670 672 def peer(self):
671 673 return localpeer(self) # not cached to avoid reference cycle
672 674
673 675 def unfiltered(self):
674 676 """Return unfiltered version of the repository
675 677
676 678 Intended to be overwritten by filtered repo."""
677 679 return self
678 680
679 681 def filtered(self, name):
680 682 """Return a filtered version of a repository"""
681 683 # Python <3.4 easily leaks types via __mro__. See
682 684 # https://bugs.python.org/issue17950. We cache dynamically
683 685 # created types so this method doesn't leak on every
684 686 # invocation.
685 687
686 688 key = self.unfiltered().__class__
687 689 if key not in self._filteredrepotypes:
688 690 # Build a new type with the repoview mixin and the base
689 691 # class of this repo. Give it a name containing the
690 692 # filter name to aid debugging.
691 693 bases = (repoview.repoview, key)
692 694 cls = type(r'%sfilteredrepo' % name, bases, {})
693 695 self._filteredrepotypes[key] = cls
694 696
695 697 return self._filteredrepotypes[key](self, name)
696 698
697 699 @repofilecache('bookmarks', 'bookmarks.current')
698 700 def _bookmarks(self):
699 701 return bookmarks.bmstore(self)
700 702
701 703 @property
702 704 def _activebookmark(self):
703 705 return self._bookmarks.active
704 706
705 707 # _phaserevs and _phasesets depend on changelog. what we need is to
706 708 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
707 709 # can't be easily expressed in filecache mechanism.
708 710 @storecache('phaseroots', '00changelog.i')
709 711 def _phasecache(self):
710 712 return phases.phasecache(self, self._phasedefaults)
711 713
712 714 @storecache('obsstore')
713 715 def obsstore(self):
714 716 return obsolete.makestore(self.ui, self)
715 717
716 718 @storecache('00changelog.i')
717 719 def changelog(self):
718 720 return changelog.changelog(self.svfs,
719 721 trypending=txnutil.mayhavepending(self.root))
720 722
721 723 def _constructmanifest(self):
722 724 # This is a temporary function while we migrate from manifest to
723 725 # manifestlog. It allows bundlerepo and unionrepo to intercept the
724 726 # manifest creation.
725 727 return manifest.manifestrevlog(self.svfs)
726 728
727 729 @storecache('00manifest.i')
728 730 def manifestlog(self):
729 731 return manifest.manifestlog(self.svfs, self)
730 732
731 733 @repofilecache('dirstate')
732 734 def dirstate(self):
733 735 sparsematchfn = lambda: sparse.matcher(self)
734 736
735 737 return dirstate.dirstate(self.vfs, self.ui, self.root,
736 738 self._dirstatevalidate, sparsematchfn)
737 739
738 740 def _dirstatevalidate(self, node):
739 741 try:
740 742 self.changelog.rev(node)
741 743 return node
742 744 except error.LookupError:
743 745 if not self._dirstatevalidatewarned:
744 746 self._dirstatevalidatewarned = True
745 747 self.ui.warn(_("warning: ignoring unknown"
746 748 " working parent %s!\n") % short(node))
747 749 return nullid
748 750
749 751 def __getitem__(self, changeid):
750 752 if changeid is None:
751 753 return context.workingctx(self)
752 754 if isinstance(changeid, slice):
753 755 # wdirrev isn't contiguous so the slice shouldn't include it
754 756 return [context.changectx(self, i)
755 757 for i in xrange(*changeid.indices(len(self)))
756 758 if i not in self.changelog.filteredrevs]
757 759 try:
758 760 return context.changectx(self, changeid)
759 761 except error.WdirUnsupported:
760 762 return context.workingctx(self)
761 763
762 764 def __contains__(self, changeid):
763 765 """True if the given changeid exists
764 766
765 767 error.LookupError is raised if an ambiguous node specified.
766 768 """
767 769 try:
768 770 self[changeid]
769 771 return True
770 772 except error.RepoLookupError:
771 773 return False
772 774
773 775 def __nonzero__(self):
774 776 return True
775 777
776 778 __bool__ = __nonzero__
777 779
778 780 def __len__(self):
779 781 return len(self.changelog)
780 782
781 783 def __iter__(self):
782 784 return iter(self.changelog)
783 785
784 786 def revs(self, expr, *args):
785 787 '''Find revisions matching a revset.
786 788
787 789 The revset is specified as a string ``expr`` that may contain
788 790 %-formatting to escape certain types. See ``revsetlang.formatspec``.
789 791
790 792 Revset aliases from the configuration are not expanded. To expand
791 793 user aliases, consider calling ``scmutil.revrange()`` or
792 794 ``repo.anyrevs([expr], user=True)``.
793 795
794 796 Returns a revset.abstractsmartset, which is a list-like interface
795 797 that contains integer revisions.
796 798 '''
797 799 expr = revsetlang.formatspec(expr, *args)
798 800 m = revset.match(None, expr)
799 801 return m(self)
800 802
801 803 def set(self, expr, *args):
802 804 '''Find revisions matching a revset and emit changectx instances.
803 805
804 806 This is a convenience wrapper around ``revs()`` that iterates the
805 807 result and is a generator of changectx instances.
806 808
807 809 Revset aliases from the configuration are not expanded. To expand
808 810 user aliases, consider calling ``scmutil.revrange()``.
809 811 '''
810 812 for r in self.revs(expr, *args):
811 813 yield self[r]
812 814
813 815 def anyrevs(self, specs, user=False, localalias=None):
814 816 '''Find revisions matching one of the given revsets.
815 817
816 818 Revset aliases from the configuration are not expanded by default. To
817 819 expand user aliases, specify ``user=True``. To provide some local
818 820 definitions overriding user aliases, set ``localalias`` to
819 821 ``{name: definitionstring}``.
820 822 '''
821 823 if user:
822 824 m = revset.matchany(self.ui, specs, repo=self,
823 825 localalias=localalias)
824 826 else:
825 827 m = revset.matchany(None, specs, localalias=localalias)
826 828 return m(self)
827 829
828 830 def url(self):
829 831 return 'file:' + self.root
830 832
831 833 def hook(self, name, throw=False, **args):
832 834 """Call a hook, passing this repo instance.
833 835
834 836 This a convenience method to aid invoking hooks. Extensions likely
835 837 won't call this unless they have registered a custom hook or are
836 838 replacing code that is expected to call a hook.
837 839 """
838 840 return hook.hook(self.ui, self, name, throw, **args)
839 841
840 842 @filteredpropertycache
841 843 def _tagscache(self):
842 844 '''Returns a tagscache object that contains various tags related
843 845 caches.'''
844 846
845 847 # This simplifies its cache management by having one decorated
846 848 # function (this one) and the rest simply fetch things from it.
847 849 class tagscache(object):
848 850 def __init__(self):
849 851 # These two define the set of tags for this repository. tags
850 852 # maps tag name to node; tagtypes maps tag name to 'global' or
851 853 # 'local'. (Global tags are defined by .hgtags across all
852 854 # heads, and local tags are defined in .hg/localtags.)
853 855 # They constitute the in-memory cache of tags.
854 856 self.tags = self.tagtypes = None
855 857
856 858 self.nodetagscache = self.tagslist = None
857 859
858 860 cache = tagscache()
859 861 cache.tags, cache.tagtypes = self._findtags()
860 862
861 863 return cache
862 864
863 865 def tags(self):
864 866 '''return a mapping of tag to node'''
865 867 t = {}
866 868 if self.changelog.filteredrevs:
867 869 tags, tt = self._findtags()
868 870 else:
869 871 tags = self._tagscache.tags
870 872 for k, v in tags.iteritems():
871 873 try:
872 874 # ignore tags to unknown nodes
873 875 self.changelog.rev(v)
874 876 t[k] = v
875 877 except (error.LookupError, ValueError):
876 878 pass
877 879 return t
878 880
879 881 def _findtags(self):
880 882 '''Do the hard work of finding tags. Return a pair of dicts
881 883 (tags, tagtypes) where tags maps tag name to node, and tagtypes
882 884 maps tag name to a string like \'global\' or \'local\'.
883 885 Subclasses or extensions are free to add their own tags, but
884 886 should be aware that the returned dicts will be retained for the
885 887 duration of the localrepo object.'''
886 888
887 889 # XXX what tagtype should subclasses/extensions use? Currently
888 890 # mq and bookmarks add tags, but do not set the tagtype at all.
889 891 # Should each extension invent its own tag type? Should there
890 892 # be one tagtype for all such "virtual" tags? Or is the status
891 893 # quo fine?
892 894
893 895
894 896 # map tag name to (node, hist)
895 897 alltags = tagsmod.findglobaltags(self.ui, self)
896 898 # map tag name to tag type
897 899 tagtypes = dict((tag, 'global') for tag in alltags)
898 900
899 901 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
900 902
901 903 # Build the return dicts. Have to re-encode tag names because
902 904 # the tags module always uses UTF-8 (in order not to lose info
903 905 # writing to the cache), but the rest of Mercurial wants them in
904 906 # local encoding.
905 907 tags = {}
906 908 for (name, (node, hist)) in alltags.iteritems():
907 909 if node != nullid:
908 910 tags[encoding.tolocal(name)] = node
909 911 tags['tip'] = self.changelog.tip()
910 912 tagtypes = dict([(encoding.tolocal(name), value)
911 913 for (name, value) in tagtypes.iteritems()])
912 914 return (tags, tagtypes)
913 915
914 916 def tagtype(self, tagname):
915 917 '''
916 918 return the type of the given tag. result can be:
917 919
918 920 'local' : a local tag
919 921 'global' : a global tag
920 922 None : tag does not exist
921 923 '''
922 924
923 925 return self._tagscache.tagtypes.get(tagname)
924 926
925 927 def tagslist(self):
926 928 '''return a list of tags ordered by revision'''
927 929 if not self._tagscache.tagslist:
928 930 l = []
929 931 for t, n in self.tags().iteritems():
930 932 l.append((self.changelog.rev(n), t, n))
931 933 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
932 934
933 935 return self._tagscache.tagslist
934 936
935 937 def nodetags(self, node):
936 938 '''return the tags associated with a node'''
937 939 if not self._tagscache.nodetagscache:
938 940 nodetagscache = {}
939 941 for t, n in self._tagscache.tags.iteritems():
940 942 nodetagscache.setdefault(n, []).append(t)
941 943 for tags in nodetagscache.itervalues():
942 944 tags.sort()
943 945 self._tagscache.nodetagscache = nodetagscache
944 946 return self._tagscache.nodetagscache.get(node, [])
945 947
946 948 def nodebookmarks(self, node):
947 949 """return the list of bookmarks pointing to the specified node"""
948 950 marks = []
949 951 for bookmark, n in self._bookmarks.iteritems():
950 952 if n == node:
951 953 marks.append(bookmark)
952 954 return sorted(marks)
953 955
954 956 def branchmap(self):
955 957 '''returns a dictionary {branch: [branchheads]} with branchheads
956 958 ordered by increasing revision number'''
957 959 branchmap.updatecache(self)
958 960 return self._branchcaches[self.filtername]
959 961
960 962 @unfilteredmethod
961 963 def revbranchcache(self):
962 964 if not self._revbranchcache:
963 965 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
964 966 return self._revbranchcache
965 967
966 968 def branchtip(self, branch, ignoremissing=False):
967 969 '''return the tip node for a given branch
968 970
969 971 If ignoremissing is True, then this method will not raise an error.
970 972 This is helpful for callers that only expect None for a missing branch
971 973 (e.g. namespace).
972 974
973 975 '''
974 976 try:
975 977 return self.branchmap().branchtip(branch)
976 978 except KeyError:
977 979 if not ignoremissing:
978 980 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
979 981 else:
980 982 pass
981 983
982 984 def lookup(self, key):
983 985 return self[key].node()
984 986
985 987 def lookupbranch(self, key, remote=None):
986 988 repo = remote or self
987 989 if key in repo.branchmap():
988 990 return key
989 991
990 992 repo = (remote and remote.local()) and remote or self
991 993 return repo[key].branch()
992 994
993 995 def known(self, nodes):
994 996 cl = self.changelog
995 997 nm = cl.nodemap
996 998 filtered = cl.filteredrevs
997 999 result = []
998 1000 for n in nodes:
999 1001 r = nm.get(n)
1000 1002 resp = not (r is None or r in filtered)
1001 1003 result.append(resp)
1002 1004 return result
1003 1005
1004 1006 def local(self):
1005 1007 return self
1006 1008
1007 1009 def publishing(self):
1008 1010 # it's safe (and desirable) to trust the publish flag unconditionally
1009 1011 # so that we don't finalize changes shared between users via ssh or nfs
1010 1012 return self.ui.configbool('phases', 'publish', untrusted=True)
1011 1013
1012 1014 def cancopy(self):
1013 1015 # so statichttprepo's override of local() works
1014 1016 if not self.local():
1015 1017 return False
1016 1018 if not self.publishing():
1017 1019 return True
1018 1020 # if publishing we can't copy if there is filtered content
1019 1021 return not self.filtered('visible').changelog.filteredrevs
1020 1022
1021 1023 def shared(self):
1022 1024 '''the type of shared repository (None if not shared)'''
1023 1025 if self.sharedpath != self.path:
1024 1026 return 'store'
1025 1027 return None
1026 1028
1027 1029 def wjoin(self, f, *insidef):
1028 1030 return self.vfs.reljoin(self.root, f, *insidef)
1029 1031
1030 1032 def file(self, f):
1031 1033 if f[0] == '/':
1032 1034 f = f[1:]
1033 1035 return filelog.filelog(self.svfs, f)
1034 1036
1035 1037 def changectx(self, changeid):
1036 1038 return self[changeid]
1037 1039
1038 1040 def setparents(self, p1, p2=nullid):
1039 1041 with self.dirstate.parentchange():
1040 1042 copies = self.dirstate.setparents(p1, p2)
1041 1043 pctx = self[p1]
1042 1044 if copies:
1043 1045 # Adjust copy records, the dirstate cannot do it, it
1044 1046 # requires access to parents manifests. Preserve them
1045 1047 # only for entries added to first parent.
1046 1048 for f in copies:
1047 1049 if f not in pctx and copies[f] in pctx:
1048 1050 self.dirstate.copy(copies[f], f)
1049 1051 if p2 == nullid:
1050 1052 for f, s in sorted(self.dirstate.copies().items()):
1051 1053 if f not in pctx and s not in pctx:
1052 1054 self.dirstate.copy(None, f)
1053 1055
1054 1056 def filectx(self, path, changeid=None, fileid=None):
1055 1057 """changeid can be a changeset revision, node, or tag.
1056 1058 fileid can be a file revision or node."""
1057 1059 return context.filectx(self, path, changeid, fileid)
1058 1060
1059 1061 def getcwd(self):
1060 1062 return self.dirstate.getcwd()
1061 1063
1062 1064 def pathto(self, f, cwd=None):
1063 1065 return self.dirstate.pathto(f, cwd)
1064 1066
1065 1067 def _loadfilter(self, filter):
1066 1068 if filter not in self.filterpats:
1067 1069 l = []
1068 1070 for pat, cmd in self.ui.configitems(filter):
1069 1071 if cmd == '!':
1070 1072 continue
1071 1073 mf = matchmod.match(self.root, '', [pat])
1072 1074 fn = None
1073 1075 params = cmd
1074 1076 for name, filterfn in self._datafilters.iteritems():
1075 1077 if cmd.startswith(name):
1076 1078 fn = filterfn
1077 1079 params = cmd[len(name):].lstrip()
1078 1080 break
1079 1081 if not fn:
1080 1082 fn = lambda s, c, **kwargs: util.filter(s, c)
1081 1083 # Wrap old filters not supporting keyword arguments
1082 1084 if not inspect.getargspec(fn)[2]:
1083 1085 oldfn = fn
1084 1086 fn = lambda s, c, **kwargs: oldfn(s, c)
1085 1087 l.append((mf, fn, params))
1086 1088 self.filterpats[filter] = l
1087 1089 return self.filterpats[filter]
1088 1090
1089 1091 def _filter(self, filterpats, filename, data):
1090 1092 for mf, fn, cmd in filterpats:
1091 1093 if mf(filename):
1092 1094 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1093 1095 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1094 1096 break
1095 1097
1096 1098 return data
1097 1099
1098 1100 @unfilteredpropertycache
1099 1101 def _encodefilterpats(self):
1100 1102 return self._loadfilter('encode')
1101 1103
1102 1104 @unfilteredpropertycache
1103 1105 def _decodefilterpats(self):
1104 1106 return self._loadfilter('decode')
1105 1107
1106 1108 def adddatafilter(self, name, filter):
1107 1109 self._datafilters[name] = filter
1108 1110
1109 1111 def wread(self, filename):
1110 1112 if self.wvfs.islink(filename):
1111 1113 data = self.wvfs.readlink(filename)
1112 1114 else:
1113 1115 data = self.wvfs.read(filename)
1114 1116 return self._filter(self._encodefilterpats, filename, data)
1115 1117
1116 1118 def wwrite(self, filename, data, flags, backgroundclose=False):
1117 1119 """write ``data`` into ``filename`` in the working directory
1118 1120
1119 1121 This returns length of written (maybe decoded) data.
1120 1122 """
1121 1123 data = self._filter(self._decodefilterpats, filename, data)
1122 1124 if 'l' in flags:
1123 1125 self.wvfs.symlink(data, filename)
1124 1126 else:
1125 1127 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1126 1128 if 'x' in flags:
1127 1129 self.wvfs.setflags(filename, False, True)
1128 1130 return len(data)
1129 1131
1130 1132 def wwritedata(self, filename, data):
1131 1133 return self._filter(self._decodefilterpats, filename, data)
1132 1134
1133 1135 def currenttransaction(self):
1134 1136 """return the current transaction or None if non exists"""
1135 1137 if self._transref:
1136 1138 tr = self._transref()
1137 1139 else:
1138 1140 tr = None
1139 1141
1140 1142 if tr and tr.running():
1141 1143 return tr
1142 1144 return None
1143 1145
1144 1146 def transaction(self, desc, report=None):
1145 1147 if (self.ui.configbool('devel', 'all-warnings')
1146 1148 or self.ui.configbool('devel', 'check-locks')):
1147 1149 if self._currentlock(self._lockref) is None:
1148 1150 raise error.ProgrammingError('transaction requires locking')
1149 1151 tr = self.currenttransaction()
1150 1152 if tr is not None:
1151 1153 scmutil.registersummarycallback(self, tr, desc)
1152 1154 return tr.nest()
1153 1155
1154 1156 # abort here if the journal already exists
1155 1157 if self.svfs.exists("journal"):
1156 1158 raise error.RepoError(
1157 1159 _("abandoned transaction found"),
1158 1160 hint=_("run 'hg recover' to clean up transaction"))
1159 1161
1160 1162 idbase = "%.40f#%f" % (random.random(), time.time())
1161 1163 ha = hex(hashlib.sha1(idbase).digest())
1162 1164 txnid = 'TXN:' + ha
1163 1165 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1164 1166
1165 1167 self._writejournal(desc)
1166 1168 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1167 1169 if report:
1168 1170 rp = report
1169 1171 else:
1170 1172 rp = self.ui.warn
1171 1173 vfsmap = {'plain': self.vfs} # root of .hg/
1172 1174 # we must avoid cyclic reference between repo and transaction.
1173 1175 reporef = weakref.ref(self)
1174 1176 # Code to track tag movement
1175 1177 #
1176 1178 # Since tags are all handled as file content, it is actually quite hard
1177 1179 # to track these movement from a code perspective. So we fallback to a
1178 1180 # tracking at the repository level. One could envision to track changes
1179 1181 # to the '.hgtags' file through changegroup apply but that fails to
1180 1182 # cope with case where transaction expose new heads without changegroup
1181 1183 # being involved (eg: phase movement).
1182 1184 #
1183 1185 # For now, We gate the feature behind a flag since this likely comes
1184 1186 # with performance impacts. The current code run more often than needed
1185 1187 # and do not use caches as much as it could. The current focus is on
1186 1188 # the behavior of the feature so we disable it by default. The flag
1187 1189 # will be removed when we are happy with the performance impact.
1188 1190 #
1189 1191 # Once this feature is no longer experimental move the following
1190 1192 # documentation to the appropriate help section:
1191 1193 #
1192 1194 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1193 1195 # tags (new or changed or deleted tags). In addition the details of
1194 1196 # these changes are made available in a file at:
1195 1197 # ``REPOROOT/.hg/changes/tags.changes``.
1196 1198 # Make sure you check for HG_TAG_MOVED before reading that file as it
1197 1199 # might exist from a previous transaction even if no tag were touched
1198 1200 # in this one. Changes are recorded in a line base format::
1199 1201 #
1200 1202 # <action> <hex-node> <tag-name>\n
1201 1203 #
1202 1204 # Actions are defined as follow:
1203 1205 # "-R": tag is removed,
1204 1206 # "+A": tag is added,
1205 1207 # "-M": tag is moved (old value),
1206 1208 # "+M": tag is moved (new value),
1207 1209 tracktags = lambda x: None
1208 1210 # experimental config: experimental.hook-track-tags
1209 1211 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1210 1212 if desc != 'strip' and shouldtracktags:
1211 1213 oldheads = self.changelog.headrevs()
1212 1214 def tracktags(tr2):
1213 1215 repo = reporef()
1214 1216 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1215 1217 newheads = repo.changelog.headrevs()
1216 1218 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1217 1219 # notes: we compare lists here.
1218 1220 # As we do it only once buiding set would not be cheaper
1219 1221 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1220 1222 if changes:
1221 1223 tr2.hookargs['tag_moved'] = '1'
1222 1224 with repo.vfs('changes/tags.changes', 'w',
1223 1225 atomictemp=True) as changesfile:
1224 1226 # note: we do not register the file to the transaction
1225 1227 # because we needs it to still exist on the transaction
1226 1228 # is close (for txnclose hooks)
1227 1229 tagsmod.writediff(changesfile, changes)
1228 1230 def validate(tr2):
1229 1231 """will run pre-closing hooks"""
1230 1232 # XXX the transaction API is a bit lacking here so we take a hacky
1231 1233 # path for now
1232 1234 #
1233 1235 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1234 1236 # dict is copied before these run. In addition we needs the data
1235 1237 # available to in memory hooks too.
1236 1238 #
1237 1239 # Moreover, we also need to make sure this runs before txnclose
1238 1240 # hooks and there is no "pending" mechanism that would execute
1239 1241 # logic only if hooks are about to run.
1240 1242 #
1241 1243 # Fixing this limitation of the transaction is also needed to track
1242 1244 # other families of changes (bookmarks, phases, obsolescence).
1243 1245 #
1244 1246 # This will have to be fixed before we remove the experimental
1245 1247 # gating.
1246 1248 tracktags(tr2)
1247 1249 repo = reporef()
1248 1250 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1249 1251 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1250 1252 args = tr.hookargs.copy()
1251 1253 args.update(bookmarks.preparehookargs(name, old, new))
1252 1254 repo.hook('pretxnclose-bookmark', throw=True,
1253 1255 txnname=desc,
1254 1256 **pycompat.strkwargs(args))
1255 1257 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1256 1258 cl = repo.unfiltered().changelog
1257 1259 for rev, (old, new) in tr.changes['phases'].items():
1258 1260 args = tr.hookargs.copy()
1259 1261 node = hex(cl.node(rev))
1260 1262 args.update(phases.preparehookargs(node, old, new))
1261 1263 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1262 1264 **pycompat.strkwargs(args))
1263 1265
1264 1266 repo.hook('pretxnclose', throw=True,
1265 1267 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1266 1268 def releasefn(tr, success):
1267 1269 repo = reporef()
1268 1270 if success:
1269 1271 # this should be explicitly invoked here, because
1270 1272 # in-memory changes aren't written out at closing
1271 1273 # transaction, if tr.addfilegenerator (via
1272 1274 # dirstate.write or so) isn't invoked while
1273 1275 # transaction running
1274 1276 repo.dirstate.write(None)
1275 1277 else:
1276 1278 # discard all changes (including ones already written
1277 1279 # out) in this transaction
1278 1280 repo.dirstate.restorebackup(None, 'journal.dirstate')
1279 1281
1280 1282 repo.invalidate(clearfilecache=True)
1281 1283
1282 1284 tr = transaction.transaction(rp, self.svfs, vfsmap,
1283 1285 "journal",
1284 1286 "undo",
1285 1287 aftertrans(renames),
1286 1288 self.store.createmode,
1287 1289 validator=validate,
1288 1290 releasefn=releasefn,
1289 1291 checkambigfiles=_cachedfiles)
1290 1292 tr.changes['revs'] = set()
1291 1293 tr.changes['obsmarkers'] = set()
1292 1294 tr.changes['phases'] = {}
1293 1295 tr.changes['bookmarks'] = {}
1294 1296
1295 1297 tr.hookargs['txnid'] = txnid
1296 1298 # note: writing the fncache only during finalize mean that the file is
1297 1299 # outdated when running hooks. As fncache is used for streaming clone,
1298 1300 # this is not expected to break anything that happen during the hooks.
1299 1301 tr.addfinalize('flush-fncache', self.store.write)
1300 1302 def txnclosehook(tr2):
1301 1303 """To be run if transaction is successful, will schedule a hook run
1302 1304 """
1303 1305 # Don't reference tr2 in hook() so we don't hold a reference.
1304 1306 # This reduces memory consumption when there are multiple
1305 1307 # transactions per lock. This can likely go away if issue5045
1306 1308 # fixes the function accumulation.
1307 1309 hookargs = tr2.hookargs
1308 1310
1309 1311 def hookfunc():
1310 1312 repo = reporef()
1311 1313 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1312 1314 bmchanges = sorted(tr.changes['bookmarks'].items())
1313 1315 for name, (old, new) in bmchanges:
1314 1316 args = tr.hookargs.copy()
1315 1317 args.update(bookmarks.preparehookargs(name, old, new))
1316 1318 repo.hook('txnclose-bookmark', throw=False,
1317 1319 txnname=desc, **pycompat.strkwargs(args))
1318 1320
1319 1321 if hook.hashook(repo.ui, 'txnclose-phase'):
1320 1322 cl = repo.unfiltered().changelog
1321 1323 phasemv = sorted(tr.changes['phases'].items())
1322 1324 for rev, (old, new) in phasemv:
1323 1325 args = tr.hookargs.copy()
1324 1326 node = hex(cl.node(rev))
1325 1327 args.update(phases.preparehookargs(node, old, new))
1326 1328 repo.hook('txnclose-phase', throw=False, txnname=desc,
1327 1329 **pycompat.strkwargs(args))
1328 1330
1329 1331 repo.hook('txnclose', throw=False, txnname=desc,
1330 1332 **pycompat.strkwargs(hookargs))
1331 1333 reporef()._afterlock(hookfunc)
1332 1334 tr.addfinalize('txnclose-hook', txnclosehook)
1333 1335 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1334 1336 def txnaborthook(tr2):
1335 1337 """To be run if transaction is aborted
1336 1338 """
1337 1339 reporef().hook('txnabort', throw=False, txnname=desc,
1338 1340 **tr2.hookargs)
1339 1341 tr.addabort('txnabort-hook', txnaborthook)
1340 1342 # avoid eager cache invalidation. in-memory data should be identical
1341 1343 # to stored data if transaction has no error.
1342 1344 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1343 1345 self._transref = weakref.ref(tr)
1344 1346 scmutil.registersummarycallback(self, tr, desc)
1345 1347 return tr
1346 1348
1347 1349 def _journalfiles(self):
1348 1350 return ((self.svfs, 'journal'),
1349 1351 (self.vfs, 'journal.dirstate'),
1350 1352 (self.vfs, 'journal.branch'),
1351 1353 (self.vfs, 'journal.desc'),
1352 1354 (self.vfs, 'journal.bookmarks'),
1353 1355 (self.svfs, 'journal.phaseroots'))
1354 1356
1355 1357 def undofiles(self):
1356 1358 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1357 1359
1358 1360 @unfilteredmethod
1359 1361 def _writejournal(self, desc):
1360 1362 self.dirstate.savebackup(None, 'journal.dirstate')
1361 1363 self.vfs.write("journal.branch",
1362 1364 encoding.fromlocal(self.dirstate.branch()))
1363 1365 self.vfs.write("journal.desc",
1364 1366 "%d\n%s\n" % (len(self), desc))
1365 1367 self.vfs.write("journal.bookmarks",
1366 1368 self.vfs.tryread("bookmarks"))
1367 1369 self.svfs.write("journal.phaseroots",
1368 1370 self.svfs.tryread("phaseroots"))
1369 1371
1370 1372 def recover(self):
1371 1373 with self.lock():
1372 1374 if self.svfs.exists("journal"):
1373 1375 self.ui.status(_("rolling back interrupted transaction\n"))
1374 1376 vfsmap = {'': self.svfs,
1375 1377 'plain': self.vfs,}
1376 1378 transaction.rollback(self.svfs, vfsmap, "journal",
1377 1379 self.ui.warn,
1378 1380 checkambigfiles=_cachedfiles)
1379 1381 self.invalidate()
1380 1382 return True
1381 1383 else:
1382 1384 self.ui.warn(_("no interrupted transaction available\n"))
1383 1385 return False
1384 1386
1385 1387 def rollback(self, dryrun=False, force=False):
1386 1388 wlock = lock = dsguard = None
1387 1389 try:
1388 1390 wlock = self.wlock()
1389 1391 lock = self.lock()
1390 1392 if self.svfs.exists("undo"):
1391 1393 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1392 1394
1393 1395 return self._rollback(dryrun, force, dsguard)
1394 1396 else:
1395 1397 self.ui.warn(_("no rollback information available\n"))
1396 1398 return 1
1397 1399 finally:
1398 1400 release(dsguard, lock, wlock)
1399 1401
1400 1402 @unfilteredmethod # Until we get smarter cache management
1401 1403 def _rollback(self, dryrun, force, dsguard):
1402 1404 ui = self.ui
1403 1405 try:
1404 1406 args = self.vfs.read('undo.desc').splitlines()
1405 1407 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1406 1408 if len(args) >= 3:
1407 1409 detail = args[2]
1408 1410 oldtip = oldlen - 1
1409 1411
1410 1412 if detail and ui.verbose:
1411 1413 msg = (_('repository tip rolled back to revision %d'
1412 1414 ' (undo %s: %s)\n')
1413 1415 % (oldtip, desc, detail))
1414 1416 else:
1415 1417 msg = (_('repository tip rolled back to revision %d'
1416 1418 ' (undo %s)\n')
1417 1419 % (oldtip, desc))
1418 1420 except IOError:
1419 1421 msg = _('rolling back unknown transaction\n')
1420 1422 desc = None
1421 1423
1422 1424 if not force and self['.'] != self['tip'] and desc == 'commit':
1423 1425 raise error.Abort(
1424 1426 _('rollback of last commit while not checked out '
1425 1427 'may lose data'), hint=_('use -f to force'))
1426 1428
1427 1429 ui.status(msg)
1428 1430 if dryrun:
1429 1431 return 0
1430 1432
1431 1433 parents = self.dirstate.parents()
1432 1434 self.destroying()
1433 1435 vfsmap = {'plain': self.vfs, '': self.svfs}
1434 1436 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1435 1437 checkambigfiles=_cachedfiles)
1436 1438 if self.vfs.exists('undo.bookmarks'):
1437 1439 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1438 1440 if self.svfs.exists('undo.phaseroots'):
1439 1441 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1440 1442 self.invalidate()
1441 1443
1442 1444 parentgone = (parents[0] not in self.changelog.nodemap or
1443 1445 parents[1] not in self.changelog.nodemap)
1444 1446 if parentgone:
1445 1447 # prevent dirstateguard from overwriting already restored one
1446 1448 dsguard.close()
1447 1449
1448 1450 self.dirstate.restorebackup(None, 'undo.dirstate')
1449 1451 try:
1450 1452 branch = self.vfs.read('undo.branch')
1451 1453 self.dirstate.setbranch(encoding.tolocal(branch))
1452 1454 except IOError:
1453 1455 ui.warn(_('named branch could not be reset: '
1454 1456 'current branch is still \'%s\'\n')
1455 1457 % self.dirstate.branch())
1456 1458
1457 1459 parents = tuple([p.rev() for p in self[None].parents()])
1458 1460 if len(parents) > 1:
1459 1461 ui.status(_('working directory now based on '
1460 1462 'revisions %d and %d\n') % parents)
1461 1463 else:
1462 1464 ui.status(_('working directory now based on '
1463 1465 'revision %d\n') % parents)
1464 1466 mergemod.mergestate.clean(self, self['.'].node())
1465 1467
1466 1468 # TODO: if we know which new heads may result from this rollback, pass
1467 1469 # them to destroy(), which will prevent the branchhead cache from being
1468 1470 # invalidated.
1469 1471 self.destroyed()
1470 1472 return 0
1471 1473
1472 1474 def _buildcacheupdater(self, newtransaction):
1473 1475 """called during transaction to build the callback updating cache
1474 1476
1475 1477 Lives on the repository to help extension who might want to augment
1476 1478 this logic. For this purpose, the created transaction is passed to the
1477 1479 method.
1478 1480 """
1479 1481 # we must avoid cyclic reference between repo and transaction.
1480 1482 reporef = weakref.ref(self)
1481 1483 def updater(tr):
1482 1484 repo = reporef()
1483 1485 repo.updatecaches(tr)
1484 1486 return updater
1485 1487
1486 1488 @unfilteredmethod
1487 1489 def updatecaches(self, tr=None):
1488 1490 """warm appropriate caches
1489 1491
1490 1492 If this function is called after a transaction closed. The transaction
1491 1493 will be available in the 'tr' argument. This can be used to selectively
1492 1494 update caches relevant to the changes in that transaction.
1493 1495 """
1494 1496 if tr is not None and tr.hookargs.get('source') == 'strip':
1495 1497 # During strip, many caches are invalid but
1496 1498 # later call to `destroyed` will refresh them.
1497 1499 return
1498 1500
1499 1501 if tr is None or tr.changes['revs']:
1500 1502 # updating the unfiltered branchmap should refresh all the others,
1501 1503 self.ui.debug('updating the branch cache\n')
1502 1504 branchmap.updatecache(self.filtered('served'))
1503 1505
1504 1506 def invalidatecaches(self):
1505 1507
1506 1508 if '_tagscache' in vars(self):
1507 1509 # can't use delattr on proxy
1508 1510 del self.__dict__['_tagscache']
1509 1511
1510 1512 self.unfiltered()._branchcaches.clear()
1511 1513 self.invalidatevolatilesets()
1512 1514 self._sparsesignaturecache.clear()
1513 1515
1514 1516 def invalidatevolatilesets(self):
1515 1517 self.filteredrevcache.clear()
1516 1518 obsolete.clearobscaches(self)
1517 1519
1518 1520 def invalidatedirstate(self):
1519 1521 '''Invalidates the dirstate, causing the next call to dirstate
1520 1522 to check if it was modified since the last time it was read,
1521 1523 rereading it if it has.
1522 1524
1523 1525 This is different to dirstate.invalidate() that it doesn't always
1524 1526 rereads the dirstate. Use dirstate.invalidate() if you want to
1525 1527 explicitly read the dirstate again (i.e. restoring it to a previous
1526 1528 known good state).'''
1527 1529 if hasunfilteredcache(self, 'dirstate'):
1528 1530 for k in self.dirstate._filecache:
1529 1531 try:
1530 1532 delattr(self.dirstate, k)
1531 1533 except AttributeError:
1532 1534 pass
1533 1535 delattr(self.unfiltered(), 'dirstate')
1534 1536
1535 1537 def invalidate(self, clearfilecache=False):
1536 1538 '''Invalidates both store and non-store parts other than dirstate
1537 1539
1538 1540 If a transaction is running, invalidation of store is omitted,
1539 1541 because discarding in-memory changes might cause inconsistency
1540 1542 (e.g. incomplete fncache causes unintentional failure, but
1541 1543 redundant one doesn't).
1542 1544 '''
1543 1545 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1544 1546 for k in list(self._filecache.keys()):
1545 1547 # dirstate is invalidated separately in invalidatedirstate()
1546 1548 if k == 'dirstate':
1547 1549 continue
1548 1550 if (k == 'changelog' and
1549 1551 self.currenttransaction() and
1550 1552 self.changelog._delayed):
1551 1553 # The changelog object may store unwritten revisions. We don't
1552 1554 # want to lose them.
1553 1555 # TODO: Solve the problem instead of working around it.
1554 1556 continue
1555 1557
1556 1558 if clearfilecache:
1557 1559 del self._filecache[k]
1558 1560 try:
1559 1561 delattr(unfiltered, k)
1560 1562 except AttributeError:
1561 1563 pass
1562 1564 self.invalidatecaches()
1563 1565 if not self.currenttransaction():
1564 1566 # TODO: Changing contents of store outside transaction
1565 1567 # causes inconsistency. We should make in-memory store
1566 1568 # changes detectable, and abort if changed.
1567 1569 self.store.invalidatecaches()
1568 1570
1569 1571 def invalidateall(self):
1570 1572 '''Fully invalidates both store and non-store parts, causing the
1571 1573 subsequent operation to reread any outside changes.'''
1572 1574 # extension should hook this to invalidate its caches
1573 1575 self.invalidate()
1574 1576 self.invalidatedirstate()
1575 1577
1576 1578 @unfilteredmethod
1577 1579 def _refreshfilecachestats(self, tr):
1578 1580 """Reload stats of cached files so that they are flagged as valid"""
1579 1581 for k, ce in self._filecache.items():
1580 1582 if k == 'dirstate' or k not in self.__dict__:
1581 1583 continue
1582 1584 ce.refresh()
1583 1585
1584 1586 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1585 1587 inheritchecker=None, parentenvvar=None):
1586 1588 parentlock = None
1587 1589 # the contents of parentenvvar are used by the underlying lock to
1588 1590 # determine whether it can be inherited
1589 1591 if parentenvvar is not None:
1590 1592 parentlock = encoding.environ.get(parentenvvar)
1591 1593 try:
1592 1594 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1593 1595 acquirefn=acquirefn, desc=desc,
1594 1596 inheritchecker=inheritchecker,
1595 1597 parentlock=parentlock)
1596 1598 except error.LockHeld as inst:
1597 1599 if not wait:
1598 1600 raise
1599 1601 # show more details for new-style locks
1600 1602 if ':' in inst.locker:
1601 1603 host, pid = inst.locker.split(":", 1)
1602 1604 self.ui.warn(
1603 1605 _("waiting for lock on %s held by process %r "
1604 1606 "on host %r\n") % (desc, pid, host))
1605 1607 else:
1606 1608 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1607 1609 (desc, inst.locker))
1608 1610 # default to 600 seconds timeout
1609 1611 l = lockmod.lock(vfs, lockname,
1610 1612 int(self.ui.config("ui", "timeout")),
1611 1613 releasefn=releasefn, acquirefn=acquirefn,
1612 1614 desc=desc)
1613 1615 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1614 1616 return l
1615 1617
1616 1618 def _afterlock(self, callback):
1617 1619 """add a callback to be run when the repository is fully unlocked
1618 1620
1619 1621 The callback will be executed when the outermost lock is released
1620 1622 (with wlock being higher level than 'lock')."""
1621 1623 for ref in (self._wlockref, self._lockref):
1622 1624 l = ref and ref()
1623 1625 if l and l.held:
1624 1626 l.postrelease.append(callback)
1625 1627 break
1626 1628 else: # no lock have been found.
1627 1629 callback()
1628 1630
1629 1631 def lock(self, wait=True):
1630 1632 '''Lock the repository store (.hg/store) and return a weak reference
1631 1633 to the lock. Use this before modifying the store (e.g. committing or
1632 1634 stripping). If you are opening a transaction, get a lock as well.)
1633 1635
1634 1636 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1635 1637 'wlock' first to avoid a dead-lock hazard.'''
1636 1638 l = self._currentlock(self._lockref)
1637 1639 if l is not None:
1638 1640 l.lock()
1639 1641 return l
1640 1642
1641 1643 l = self._lock(self.svfs, "lock", wait, None,
1642 1644 self.invalidate, _('repository %s') % self.origroot)
1643 1645 self._lockref = weakref.ref(l)
1644 1646 return l
1645 1647
1646 1648 def _wlockchecktransaction(self):
1647 1649 if self.currenttransaction() is not None:
1648 1650 raise error.LockInheritanceContractViolation(
1649 1651 'wlock cannot be inherited in the middle of a transaction')
1650 1652
1651 1653 def wlock(self, wait=True):
1652 1654 '''Lock the non-store parts of the repository (everything under
1653 1655 .hg except .hg/store) and return a weak reference to the lock.
1654 1656
1655 1657 Use this before modifying files in .hg.
1656 1658
1657 1659 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1658 1660 'wlock' first to avoid a dead-lock hazard.'''
1659 1661 l = self._wlockref and self._wlockref()
1660 1662 if l is not None and l.held:
1661 1663 l.lock()
1662 1664 return l
1663 1665
1664 1666 # We do not need to check for non-waiting lock acquisition. Such
1665 1667 # acquisition would not cause dead-lock as they would just fail.
1666 1668 if wait and (self.ui.configbool('devel', 'all-warnings')
1667 1669 or self.ui.configbool('devel', 'check-locks')):
1668 1670 if self._currentlock(self._lockref) is not None:
1669 1671 self.ui.develwarn('"wlock" acquired after "lock"')
1670 1672
1671 1673 def unlock():
1672 1674 if self.dirstate.pendingparentchange():
1673 1675 self.dirstate.invalidate()
1674 1676 else:
1675 1677 self.dirstate.write(None)
1676 1678
1677 1679 self._filecache['dirstate'].refresh()
1678 1680
1679 1681 l = self._lock(self.vfs, "wlock", wait, unlock,
1680 1682 self.invalidatedirstate, _('working directory of %s') %
1681 1683 self.origroot,
1682 1684 inheritchecker=self._wlockchecktransaction,
1683 1685 parentenvvar='HG_WLOCK_LOCKER')
1684 1686 self._wlockref = weakref.ref(l)
1685 1687 return l
1686 1688
1687 1689 def _currentlock(self, lockref):
1688 1690 """Returns the lock if it's held, or None if it's not."""
1689 1691 if lockref is None:
1690 1692 return None
1691 1693 l = lockref()
1692 1694 if l is None or not l.held:
1693 1695 return None
1694 1696 return l
1695 1697
1696 1698 def currentwlock(self):
1697 1699 """Returns the wlock if it's held, or None if it's not."""
1698 1700 return self._currentlock(self._wlockref)
1699 1701
1700 1702 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1701 1703 """
1702 1704 commit an individual file as part of a larger transaction
1703 1705 """
1704 1706
1705 1707 fname = fctx.path()
1706 1708 fparent1 = manifest1.get(fname, nullid)
1707 1709 fparent2 = manifest2.get(fname, nullid)
1708 1710 if isinstance(fctx, context.filectx):
1709 1711 node = fctx.filenode()
1710 1712 if node in [fparent1, fparent2]:
1711 1713 self.ui.debug('reusing %s filelog entry\n' % fname)
1712 1714 if manifest1.flags(fname) != fctx.flags():
1713 1715 changelist.append(fname)
1714 1716 return node
1715 1717
1716 1718 flog = self.file(fname)
1717 1719 meta = {}
1718 1720 copy = fctx.renamed()
1719 1721 if copy and copy[0] != fname:
1720 1722 # Mark the new revision of this file as a copy of another
1721 1723 # file. This copy data will effectively act as a parent
1722 1724 # of this new revision. If this is a merge, the first
1723 1725 # parent will be the nullid (meaning "look up the copy data")
1724 1726 # and the second one will be the other parent. For example:
1725 1727 #
1726 1728 # 0 --- 1 --- 3 rev1 changes file foo
1727 1729 # \ / rev2 renames foo to bar and changes it
1728 1730 # \- 2 -/ rev3 should have bar with all changes and
1729 1731 # should record that bar descends from
1730 1732 # bar in rev2 and foo in rev1
1731 1733 #
1732 1734 # this allows this merge to succeed:
1733 1735 #
1734 1736 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1735 1737 # \ / merging rev3 and rev4 should use bar@rev2
1736 1738 # \- 2 --- 4 as the merge base
1737 1739 #
1738 1740
1739 1741 cfname = copy[0]
1740 1742 crev = manifest1.get(cfname)
1741 1743 newfparent = fparent2
1742 1744
1743 1745 if manifest2: # branch merge
1744 1746 if fparent2 == nullid or crev is None: # copied on remote side
1745 1747 if cfname in manifest2:
1746 1748 crev = manifest2[cfname]
1747 1749 newfparent = fparent1
1748 1750
1749 1751 # Here, we used to search backwards through history to try to find
1750 1752 # where the file copy came from if the source of a copy was not in
1751 1753 # the parent directory. However, this doesn't actually make sense to
1752 1754 # do (what does a copy from something not in your working copy even
1753 1755 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1754 1756 # the user that copy information was dropped, so if they didn't
1755 1757 # expect this outcome it can be fixed, but this is the correct
1756 1758 # behavior in this circumstance.
1757 1759
1758 1760 if crev:
1759 1761 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1760 1762 meta["copy"] = cfname
1761 1763 meta["copyrev"] = hex(crev)
1762 1764 fparent1, fparent2 = nullid, newfparent
1763 1765 else:
1764 1766 self.ui.warn(_("warning: can't find ancestor for '%s' "
1765 1767 "copied from '%s'!\n") % (fname, cfname))
1766 1768
1767 1769 elif fparent1 == nullid:
1768 1770 fparent1, fparent2 = fparent2, nullid
1769 1771 elif fparent2 != nullid:
1770 1772 # is one parent an ancestor of the other?
1771 1773 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1772 1774 if fparent1 in fparentancestors:
1773 1775 fparent1, fparent2 = fparent2, nullid
1774 1776 elif fparent2 in fparentancestors:
1775 1777 fparent2 = nullid
1776 1778
1777 1779 # is the file changed?
1778 1780 text = fctx.data()
1779 1781 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1780 1782 changelist.append(fname)
1781 1783 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1782 1784 # are just the flags changed during merge?
1783 1785 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1784 1786 changelist.append(fname)
1785 1787
1786 1788 return fparent1
1787 1789
1788 1790 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1789 1791 """check for commit arguments that aren't committable"""
1790 1792 if match.isexact() or match.prefix():
1791 1793 matched = set(status.modified + status.added + status.removed)
1792 1794
1793 1795 for f in match.files():
1794 1796 f = self.dirstate.normalize(f)
1795 1797 if f == '.' or f in matched or f in wctx.substate:
1796 1798 continue
1797 1799 if f in status.deleted:
1798 1800 fail(f, _('file not found!'))
1799 1801 if f in vdirs: # visited directory
1800 1802 d = f + '/'
1801 1803 for mf in matched:
1802 1804 if mf.startswith(d):
1803 1805 break
1804 1806 else:
1805 1807 fail(f, _("no match under directory!"))
1806 1808 elif f not in self.dirstate:
1807 1809 fail(f, _("file not tracked!"))
1808 1810
1809 1811 @unfilteredmethod
1810 1812 def commit(self, text="", user=None, date=None, match=None, force=False,
1811 1813 editor=False, extra=None):
1812 1814 """Add a new revision to current repository.
1813 1815
1814 1816 Revision information is gathered from the working directory,
1815 1817 match can be used to filter the committed files. If editor is
1816 1818 supplied, it is called to get a commit message.
1817 1819 """
1818 1820 if extra is None:
1819 1821 extra = {}
1820 1822
1821 1823 def fail(f, msg):
1822 1824 raise error.Abort('%s: %s' % (f, msg))
1823 1825
1824 1826 if not match:
1825 1827 match = matchmod.always(self.root, '')
1826 1828
1827 1829 if not force:
1828 1830 vdirs = []
1829 1831 match.explicitdir = vdirs.append
1830 1832 match.bad = fail
1831 1833
1832 1834 wlock = lock = tr = None
1833 1835 try:
1834 1836 wlock = self.wlock()
1835 1837 lock = self.lock() # for recent changelog (see issue4368)
1836 1838
1837 1839 wctx = self[None]
1838 1840 merge = len(wctx.parents()) > 1
1839 1841
1840 1842 if not force and merge and not match.always():
1841 1843 raise error.Abort(_('cannot partially commit a merge '
1842 1844 '(do not specify files or patterns)'))
1843 1845
1844 1846 status = self.status(match=match, clean=force)
1845 1847 if force:
1846 1848 status.modified.extend(status.clean) # mq may commit clean files
1847 1849
1848 1850 # check subrepos
1849 1851 subs = []
1850 1852 commitsubs = set()
1851 1853 newstate = wctx.substate.copy()
1852 1854 # only manage subrepos and .hgsubstate if .hgsub is present
1853 1855 if '.hgsub' in wctx:
1854 1856 # we'll decide whether to track this ourselves, thanks
1855 1857 for c in status.modified, status.added, status.removed:
1856 1858 if '.hgsubstate' in c:
1857 1859 c.remove('.hgsubstate')
1858 1860
1859 1861 # compare current state to last committed state
1860 1862 # build new substate based on last committed state
1861 1863 oldstate = wctx.p1().substate
1862 1864 for s in sorted(newstate.keys()):
1863 1865 if not match(s):
1864 1866 # ignore working copy, use old state if present
1865 1867 if s in oldstate:
1866 1868 newstate[s] = oldstate[s]
1867 1869 continue
1868 1870 if not force:
1869 1871 raise error.Abort(
1870 1872 _("commit with new subrepo %s excluded") % s)
1871 1873 dirtyreason = wctx.sub(s).dirtyreason(True)
1872 1874 if dirtyreason:
1873 1875 if not self.ui.configbool('ui', 'commitsubrepos'):
1874 1876 raise error.Abort(dirtyreason,
1875 1877 hint=_("use --subrepos for recursive commit"))
1876 1878 subs.append(s)
1877 1879 commitsubs.add(s)
1878 1880 else:
1879 1881 bs = wctx.sub(s).basestate()
1880 1882 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1881 1883 if oldstate.get(s, (None, None, None))[1] != bs:
1882 1884 subs.append(s)
1883 1885
1884 1886 # check for removed subrepos
1885 1887 for p in wctx.parents():
1886 1888 r = [s for s in p.substate if s not in newstate]
1887 1889 subs += [s for s in r if match(s)]
1888 1890 if subs:
1889 1891 if (not match('.hgsub') and
1890 1892 '.hgsub' in (wctx.modified() + wctx.added())):
1891 1893 raise error.Abort(
1892 1894 _("can't commit subrepos without .hgsub"))
1893 1895 status.modified.insert(0, '.hgsubstate')
1894 1896
1895 1897 elif '.hgsub' in status.removed:
1896 1898 # clean up .hgsubstate when .hgsub is removed
1897 1899 if ('.hgsubstate' in wctx and
1898 1900 '.hgsubstate' not in (status.modified + status.added +
1899 1901 status.removed)):
1900 1902 status.removed.insert(0, '.hgsubstate')
1901 1903
1902 1904 # make sure all explicit patterns are matched
1903 1905 if not force:
1904 1906 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1905 1907
1906 1908 cctx = context.workingcommitctx(self, status,
1907 1909 text, user, date, extra)
1908 1910
1909 1911 # internal config: ui.allowemptycommit
1910 1912 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1911 1913 or extra.get('close') or merge or cctx.files()
1912 1914 or self.ui.configbool('ui', 'allowemptycommit'))
1913 1915 if not allowemptycommit:
1914 1916 return None
1915 1917
1916 1918 if merge and cctx.deleted():
1917 1919 raise error.Abort(_("cannot commit merge with missing files"))
1918 1920
1919 1921 ms = mergemod.mergestate.read(self)
1920 1922 mergeutil.checkunresolved(ms)
1921 1923
1922 1924 if editor:
1923 1925 cctx._text = editor(self, cctx, subs)
1924 1926 edited = (text != cctx._text)
1925 1927
1926 1928 # Save commit message in case this transaction gets rolled back
1927 1929 # (e.g. by a pretxncommit hook). Leave the content alone on
1928 1930 # the assumption that the user will use the same editor again.
1929 1931 msgfn = self.savecommitmessage(cctx._text)
1930 1932
1931 1933 # commit subs and write new state
1932 1934 if subs:
1933 1935 for s in sorted(commitsubs):
1934 1936 sub = wctx.sub(s)
1935 1937 self.ui.status(_('committing subrepository %s\n') %
1936 1938 subrepo.subrelpath(sub))
1937 1939 sr = sub.commit(cctx._text, user, date)
1938 1940 newstate[s] = (newstate[s][0], sr)
1939 1941 subrepo.writestate(self, newstate)
1940 1942
1941 1943 p1, p2 = self.dirstate.parents()
1942 1944 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1943 1945 try:
1944 1946 self.hook("precommit", throw=True, parent1=hookp1,
1945 1947 parent2=hookp2)
1946 1948 tr = self.transaction('commit')
1947 1949 ret = self.commitctx(cctx, True)
1948 1950 except: # re-raises
1949 1951 if edited:
1950 1952 self.ui.write(
1951 1953 _('note: commit message saved in %s\n') % msgfn)
1952 1954 raise
1953 1955 # update bookmarks, dirstate and mergestate
1954 1956 bookmarks.update(self, [p1, p2], ret)
1955 1957 cctx.markcommitted(ret)
1956 1958 ms.reset()
1957 1959 tr.close()
1958 1960
1959 1961 finally:
1960 1962 lockmod.release(tr, lock, wlock)
1961 1963
1962 1964 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1963 1965 # hack for command that use a temporary commit (eg: histedit)
1964 1966 # temporary commit got stripped before hook release
1965 1967 if self.changelog.hasnode(ret):
1966 1968 self.hook("commit", node=node, parent1=parent1,
1967 1969 parent2=parent2)
1968 1970 self._afterlock(commithook)
1969 1971 return ret
1970 1972
1971 1973 @unfilteredmethod
1972 1974 def commitctx(self, ctx, error=False):
1973 1975 """Add a new revision to current repository.
1974 1976 Revision information is passed via the context argument.
1975 1977 """
1976 1978
1977 1979 tr = None
1978 1980 p1, p2 = ctx.p1(), ctx.p2()
1979 1981 user = ctx.user()
1980 1982
1981 1983 lock = self.lock()
1982 1984 try:
1983 1985 tr = self.transaction("commit")
1984 1986 trp = weakref.proxy(tr)
1985 1987
1986 1988 if ctx.manifestnode():
1987 1989 # reuse an existing manifest revision
1988 1990 mn = ctx.manifestnode()
1989 1991 files = ctx.files()
1990 1992 elif ctx.files():
1991 1993 m1ctx = p1.manifestctx()
1992 1994 m2ctx = p2.manifestctx()
1993 1995 mctx = m1ctx.copy()
1994 1996
1995 1997 m = mctx.read()
1996 1998 m1 = m1ctx.read()
1997 1999 m2 = m2ctx.read()
1998 2000
1999 2001 # check in files
2000 2002 added = []
2001 2003 changed = []
2002 2004 removed = list(ctx.removed())
2003 2005 linkrev = len(self)
2004 2006 self.ui.note(_("committing files:\n"))
2005 2007 for f in sorted(ctx.modified() + ctx.added()):
2006 2008 self.ui.note(f + "\n")
2007 2009 try:
2008 2010 fctx = ctx[f]
2009 2011 if fctx is None:
2010 2012 removed.append(f)
2011 2013 else:
2012 2014 added.append(f)
2013 2015 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2014 2016 trp, changed)
2015 2017 m.setflag(f, fctx.flags())
2016 2018 except OSError as inst:
2017 2019 self.ui.warn(_("trouble committing %s!\n") % f)
2018 2020 raise
2019 2021 except IOError as inst:
2020 2022 errcode = getattr(inst, 'errno', errno.ENOENT)
2021 2023 if error or errcode and errcode != errno.ENOENT:
2022 2024 self.ui.warn(_("trouble committing %s!\n") % f)
2023 2025 raise
2024 2026
2025 2027 # update manifest
2026 2028 self.ui.note(_("committing manifest\n"))
2027 2029 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2028 2030 drop = [f for f in removed if f in m]
2029 2031 for f in drop:
2030 2032 del m[f]
2031 2033 mn = mctx.write(trp, linkrev,
2032 2034 p1.manifestnode(), p2.manifestnode(),
2033 2035 added, drop)
2034 2036 files = changed + removed
2035 2037 else:
2036 2038 mn = p1.manifestnode()
2037 2039 files = []
2038 2040
2039 2041 # update changelog
2040 2042 self.ui.note(_("committing changelog\n"))
2041 2043 self.changelog.delayupdate(tr)
2042 2044 n = self.changelog.add(mn, files, ctx.description(),
2043 2045 trp, p1.node(), p2.node(),
2044 2046 user, ctx.date(), ctx.extra().copy())
2045 2047 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2046 2048 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2047 2049 parent2=xp2)
2048 2050 # set the new commit is proper phase
2049 2051 targetphase = subrepo.newcommitphase(self.ui, ctx)
2050 2052 if targetphase:
2051 2053 # retract boundary do not alter parent changeset.
2052 2054 # if a parent have higher the resulting phase will
2053 2055 # be compliant anyway
2054 2056 #
2055 2057 # if minimal phase was 0 we don't need to retract anything
2056 2058 phases.registernew(self, tr, targetphase, [n])
2057 2059 tr.close()
2058 2060 return n
2059 2061 finally:
2060 2062 if tr:
2061 2063 tr.release()
2062 2064 lock.release()
2063 2065
2064 2066 @unfilteredmethod
2065 2067 def destroying(self):
2066 2068 '''Inform the repository that nodes are about to be destroyed.
2067 2069 Intended for use by strip and rollback, so there's a common
2068 2070 place for anything that has to be done before destroying history.
2069 2071
2070 2072 This is mostly useful for saving state that is in memory and waiting
2071 2073 to be flushed when the current lock is released. Because a call to
2072 2074 destroyed is imminent, the repo will be invalidated causing those
2073 2075 changes to stay in memory (waiting for the next unlock), or vanish
2074 2076 completely.
2075 2077 '''
2076 2078 # When using the same lock to commit and strip, the phasecache is left
2077 2079 # dirty after committing. Then when we strip, the repo is invalidated,
2078 2080 # causing those changes to disappear.
2079 2081 if '_phasecache' in vars(self):
2080 2082 self._phasecache.write()
2081 2083
2082 2084 @unfilteredmethod
2083 2085 def destroyed(self):
2084 2086 '''Inform the repository that nodes have been destroyed.
2085 2087 Intended for use by strip and rollback, so there's a common
2086 2088 place for anything that has to be done after destroying history.
2087 2089 '''
2088 2090 # When one tries to:
2089 2091 # 1) destroy nodes thus calling this method (e.g. strip)
2090 2092 # 2) use phasecache somewhere (e.g. commit)
2091 2093 #
2092 2094 # then 2) will fail because the phasecache contains nodes that were
2093 2095 # removed. We can either remove phasecache from the filecache,
2094 2096 # causing it to reload next time it is accessed, or simply filter
2095 2097 # the removed nodes now and write the updated cache.
2096 2098 self._phasecache.filterunknown(self)
2097 2099 self._phasecache.write()
2098 2100
2099 2101 # refresh all repository caches
2100 2102 self.updatecaches()
2101 2103
2102 2104 # Ensure the persistent tag cache is updated. Doing it now
2103 2105 # means that the tag cache only has to worry about destroyed
2104 2106 # heads immediately after a strip/rollback. That in turn
2105 2107 # guarantees that "cachetip == currenttip" (comparing both rev
2106 2108 # and node) always means no nodes have been added or destroyed.
2107 2109
2108 2110 # XXX this is suboptimal when qrefresh'ing: we strip the current
2109 2111 # head, refresh the tag cache, then immediately add a new head.
2110 2112 # But I think doing it this way is necessary for the "instant
2111 2113 # tag cache retrieval" case to work.
2112 2114 self.invalidate()
2113 2115
2114 2116 def walk(self, match, node=None):
2115 2117 '''
2116 2118 walk recursively through the directory tree or a given
2117 2119 changeset, finding all files matched by the match
2118 2120 function
2119 2121 '''
2120 2122 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2121 2123 return self[node].walk(match)
2122 2124
2123 2125 def status(self, node1='.', node2=None, match=None,
2124 2126 ignored=False, clean=False, unknown=False,
2125 2127 listsubrepos=False):
2126 2128 '''a convenience method that calls node1.status(node2)'''
2127 2129 return self[node1].status(node2, match, ignored, clean, unknown,
2128 2130 listsubrepos)
2129 2131
2130 2132 def addpostdsstatus(self, ps):
2131 2133 """Add a callback to run within the wlock, at the point at which status
2132 2134 fixups happen.
2133 2135
2134 2136 On status completion, callback(wctx, status) will be called with the
2135 2137 wlock held, unless the dirstate has changed from underneath or the wlock
2136 2138 couldn't be grabbed.
2137 2139
2138 2140 Callbacks should not capture and use a cached copy of the dirstate --
2139 2141 it might change in the meanwhile. Instead, they should access the
2140 2142 dirstate via wctx.repo().dirstate.
2141 2143
2142 2144 This list is emptied out after each status run -- extensions should
2143 2145 make sure it adds to this list each time dirstate.status is called.
2144 2146 Extensions should also make sure they don't call this for statuses
2145 2147 that don't involve the dirstate.
2146 2148 """
2147 2149
2148 2150 # The list is located here for uniqueness reasons -- it is actually
2149 2151 # managed by the workingctx, but that isn't unique per-repo.
2150 2152 self._postdsstatus.append(ps)
2151 2153
2152 2154 def postdsstatus(self):
2153 2155 """Used by workingctx to get the list of post-dirstate-status hooks."""
2154 2156 return self._postdsstatus
2155 2157
2156 2158 def clearpostdsstatus(self):
2157 2159 """Used by workingctx to clear post-dirstate-status hooks."""
2158 2160 del self._postdsstatus[:]
2159 2161
2160 2162 def heads(self, start=None):
2161 2163 if start is None:
2162 2164 cl = self.changelog
2163 2165 headrevs = reversed(cl.headrevs())
2164 2166 return [cl.node(rev) for rev in headrevs]
2165 2167
2166 2168 heads = self.changelog.heads(start)
2167 2169 # sort the output in rev descending order
2168 2170 return sorted(heads, key=self.changelog.rev, reverse=True)
2169 2171
2170 2172 def branchheads(self, branch=None, start=None, closed=False):
2171 2173 '''return a (possibly filtered) list of heads for the given branch
2172 2174
2173 2175 Heads are returned in topological order, from newest to oldest.
2174 2176 If branch is None, use the dirstate branch.
2175 2177 If start is not None, return only heads reachable from start.
2176 2178 If closed is True, return heads that are marked as closed as well.
2177 2179 '''
2178 2180 if branch is None:
2179 2181 branch = self[None].branch()
2180 2182 branches = self.branchmap()
2181 2183 if branch not in branches:
2182 2184 return []
2183 2185 # the cache returns heads ordered lowest to highest
2184 2186 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2185 2187 if start is not None:
2186 2188 # filter out the heads that cannot be reached from startrev
2187 2189 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2188 2190 bheads = [h for h in bheads if h in fbheads]
2189 2191 return bheads
2190 2192
2191 2193 def branches(self, nodes):
2192 2194 if not nodes:
2193 2195 nodes = [self.changelog.tip()]
2194 2196 b = []
2195 2197 for n in nodes:
2196 2198 t = n
2197 2199 while True:
2198 2200 p = self.changelog.parents(n)
2199 2201 if p[1] != nullid or p[0] == nullid:
2200 2202 b.append((t, n, p[0], p[1]))
2201 2203 break
2202 2204 n = p[0]
2203 2205 return b
2204 2206
2205 2207 def between(self, pairs):
2206 2208 r = []
2207 2209
2208 2210 for top, bottom in pairs:
2209 2211 n, l, i = top, [], 0
2210 2212 f = 1
2211 2213
2212 2214 while n != bottom and n != nullid:
2213 2215 p = self.changelog.parents(n)[0]
2214 2216 if i == f:
2215 2217 l.append(n)
2216 2218 f = f * 2
2217 2219 n = p
2218 2220 i += 1
2219 2221
2220 2222 r.append(l)
2221 2223
2222 2224 return r
2223 2225
2224 2226 def checkpush(self, pushop):
2225 2227 """Extensions can override this function if additional checks have
2226 2228 to be performed before pushing, or call it if they override push
2227 2229 command.
2228 2230 """
2229 2231
2230 2232 @unfilteredpropertycache
2231 2233 def prepushoutgoinghooks(self):
2232 2234 """Return util.hooks consists of a pushop with repo, remote, outgoing
2233 2235 methods, which are called before pushing changesets.
2234 2236 """
2235 2237 return util.hooks()
2236 2238
2237 2239 def pushkey(self, namespace, key, old, new):
2238 2240 try:
2239 2241 tr = self.currenttransaction()
2240 2242 hookargs = {}
2241 2243 if tr is not None:
2242 2244 hookargs.update(tr.hookargs)
2243 2245 hookargs['namespace'] = namespace
2244 2246 hookargs['key'] = key
2245 2247 hookargs['old'] = old
2246 2248 hookargs['new'] = new
2247 2249 self.hook('prepushkey', throw=True, **hookargs)
2248 2250 except error.HookAbort as exc:
2249 2251 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2250 2252 if exc.hint:
2251 2253 self.ui.write_err(_("(%s)\n") % exc.hint)
2252 2254 return False
2253 2255 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2254 2256 ret = pushkey.push(self, namespace, key, old, new)
2255 2257 def runhook():
2256 2258 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2257 2259 ret=ret)
2258 2260 self._afterlock(runhook)
2259 2261 return ret
2260 2262
2261 2263 def listkeys(self, namespace):
2262 2264 self.hook('prelistkeys', throw=True, namespace=namespace)
2263 2265 self.ui.debug('listing keys for "%s"\n' % namespace)
2264 2266 values = pushkey.list(self, namespace)
2265 2267 self.hook('listkeys', namespace=namespace, values=values)
2266 2268 return values
2267 2269
2268 2270 def debugwireargs(self, one, two, three=None, four=None, five=None):
2269 2271 '''used to test argument passing over the wire'''
2270 2272 return "%s %s %s %s %s" % (one, two, three, four, five)
2271 2273
2272 2274 def savecommitmessage(self, text):
2273 2275 fp = self.vfs('last-message.txt', 'wb')
2274 2276 try:
2275 2277 fp.write(text)
2276 2278 finally:
2277 2279 fp.close()
2278 2280 return self.pathto(fp.name[len(self.root) + 1:])
2279 2281
2280 2282 # used to avoid circular references so destructors work
2281 2283 def aftertrans(files):
2282 2284 renamefiles = [tuple(t) for t in files]
2283 2285 def a():
2284 2286 for vfs, src, dest in renamefiles:
2285 2287 # if src and dest refer to a same file, vfs.rename is a no-op,
2286 2288 # leaving both src and dest on disk. delete dest to make sure
2287 2289 # the rename couldn't be such a no-op.
2288 2290 vfs.tryunlink(dest)
2289 2291 try:
2290 2292 vfs.rename(src, dest)
2291 2293 except OSError: # journal file does not yet exist
2292 2294 pass
2293 2295 return a
2294 2296
2295 2297 def undoname(fn):
2296 2298 base, name = os.path.split(fn)
2297 2299 assert name.startswith('journal')
2298 2300 return os.path.join(base, name.replace('journal', 'undo', 1))
2299 2301
2300 2302 def instance(ui, path, create):
2301 2303 return localrepository(ui, util.urllocalpath(path), create)
2302 2304
2303 2305 def islocal(path):
2304 2306 return True
2305 2307
2306 2308 def newreporequirements(repo):
2307 2309 """Determine the set of requirements for a new local repository.
2308 2310
2309 2311 Extensions can wrap this function to specify custom requirements for
2310 2312 new repositories.
2311 2313 """
2312 2314 ui = repo.ui
2313 2315 requirements = {'revlogv1'}
2314 2316 if ui.configbool('format', 'usestore'):
2315 2317 requirements.add('store')
2316 2318 if ui.configbool('format', 'usefncache'):
2317 2319 requirements.add('fncache')
2318 2320 if ui.configbool('format', 'dotencode'):
2319 2321 requirements.add('dotencode')
2320 2322
2321 2323 compengine = ui.config('experimental', 'format.compression')
2322 2324 if compengine not in util.compengines:
2323 2325 raise error.Abort(_('compression engine %s defined by '
2324 2326 'experimental.format.compression not available') %
2325 2327 compengine,
2326 2328 hint=_('run "hg debuginstall" to list available '
2327 2329 'compression engines'))
2328 2330
2329 2331 # zlib is the historical default and doesn't need an explicit requirement.
2330 2332 if compengine != 'zlib':
2331 2333 requirements.add('exp-compression-%s' % compengine)
2332 2334
2333 2335 if scmutil.gdinitconfig(ui):
2334 2336 requirements.add('generaldelta')
2335 2337 if ui.configbool('experimental', 'treemanifest'):
2336 2338 requirements.add('treemanifest')
2337 2339 if ui.configbool('experimental', 'manifestv2'):
2338 2340 requirements.add('manifestv2')
2339 2341
2340 2342 revlogv2 = ui.config('experimental', 'revlogv2')
2341 2343 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2342 2344 requirements.remove('revlogv1')
2343 2345 # generaldelta is implied by revlogv2.
2344 2346 requirements.discard('generaldelta')
2345 2347 requirements.add(REVLOGV2_REQUIREMENT)
2346 2348
2347 2349 return requirements
General Comments 0
You need to be logged in to leave comments. Login now