##// END OF EJS Templates
localrepo: specify optional callback parameter to pathauditor as a keyword
Augie Fackler -
r35118:ebabc4a8 default
parent child Browse files
Show More
@@ -1,2346 +1,2347 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepo,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67
68 68 release = lockmod.release
69 69 urlerr = util.urlerr
70 70 urlreq = util.urlreq
71 71
72 72 # set of (path, vfs-location) tuples. vfs-location is:
73 73 # - 'plain for vfs relative paths
74 74 # - '' for svfs relative paths
75 75 _cachedfiles = set()
76 76
77 77 class _basefilecache(scmutil.filecache):
78 78 """All filecache usage on repo are done for logic that should be unfiltered
79 79 """
80 80 def __get__(self, repo, type=None):
81 81 if repo is None:
82 82 return self
83 83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 84 def __set__(self, repo, value):
85 85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 86 def __delete__(self, repo):
87 87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88 88
89 89 class repofilecache(_basefilecache):
90 90 """filecache for files in .hg but outside of .hg/store"""
91 91 def __init__(self, *paths):
92 92 super(repofilecache, self).__init__(*paths)
93 93 for path in paths:
94 94 _cachedfiles.add((path, 'plain'))
95 95
96 96 def join(self, obj, fname):
97 97 return obj.vfs.join(fname)
98 98
99 99 class storecache(_basefilecache):
100 100 """filecache for files in the store"""
101 101 def __init__(self, *paths):
102 102 super(storecache, self).__init__(*paths)
103 103 for path in paths:
104 104 _cachedfiles.add((path, ''))
105 105
106 106 def join(self, obj, fname):
107 107 return obj.sjoin(fname)
108 108
109 109 def isfilecached(repo, name):
110 110 """check if a repo has already cached "name" filecache-ed property
111 111
112 112 This returns (cachedobj-or-None, iscached) tuple.
113 113 """
114 114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 115 if not cacheentry:
116 116 return None, False
117 117 return cacheentry.obj, True
118 118
119 119 class unfilteredpropertycache(util.propertycache):
120 120 """propertycache that apply to unfiltered repo only"""
121 121
122 122 def __get__(self, repo, type=None):
123 123 unfi = repo.unfiltered()
124 124 if unfi is repo:
125 125 return super(unfilteredpropertycache, self).__get__(unfi)
126 126 return getattr(unfi, self.name)
127 127
128 128 class filteredpropertycache(util.propertycache):
129 129 """propertycache that must take filtering in account"""
130 130
131 131 def cachevalue(self, obj, value):
132 132 object.__setattr__(obj, self.name, value)
133 133
134 134
135 135 def hasunfilteredcache(repo, name):
136 136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 137 return name in vars(repo.unfiltered())
138 138
139 139 def unfilteredmethod(orig):
140 140 """decorate method that always need to be run on unfiltered version"""
141 141 def wrapper(repo, *args, **kwargs):
142 142 return orig(repo.unfiltered(), *args, **kwargs)
143 143 return wrapper
144 144
145 145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 146 'unbundle'}
147 147 legacycaps = moderncaps.union({'changegroupsubset'})
148 148
149 149 class localpeer(repository.peer):
150 150 '''peer for a local repo; reflects only the most recent API'''
151 151
152 152 def __init__(self, repo, caps=None):
153 153 super(localpeer, self).__init__()
154 154
155 155 if caps is None:
156 156 caps = moderncaps.copy()
157 157 self._repo = repo.filtered('served')
158 158 self._ui = repo.ui
159 159 self._caps = repo._restrictcapabilities(caps)
160 160
161 161 # Begin of _basepeer interface.
162 162
163 163 @util.propertycache
164 164 def ui(self):
165 165 return self._ui
166 166
167 167 def url(self):
168 168 return self._repo.url()
169 169
170 170 def local(self):
171 171 return self._repo
172 172
173 173 def peer(self):
174 174 return self
175 175
176 176 def canpush(self):
177 177 return True
178 178
179 179 def close(self):
180 180 self._repo.close()
181 181
182 182 # End of _basepeer interface.
183 183
184 184 # Begin of _basewirecommands interface.
185 185
186 186 def branchmap(self):
187 187 return self._repo.branchmap()
188 188
189 189 def capabilities(self):
190 190 return self._caps
191 191
192 192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 193 """Used to test argument passing over the wire"""
194 194 return "%s %s %s %s %s" % (one, two, three, four, five)
195 195
196 196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 197 **kwargs):
198 198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 199 common=common, bundlecaps=bundlecaps,
200 200 **kwargs)
201 201 cb = util.chunkbuffer(chunks)
202 202
203 203 if exchange.bundle2requested(bundlecaps):
204 204 # When requesting a bundle2, getbundle returns a stream to make the
205 205 # wire level function happier. We need to build a proper object
206 206 # from it in local peer.
207 207 return bundle2.getunbundler(self.ui, cb)
208 208 else:
209 209 return changegroup.getunbundler('01', cb, None)
210 210
211 211 def heads(self):
212 212 return self._repo.heads()
213 213
214 214 def known(self, nodes):
215 215 return self._repo.known(nodes)
216 216
217 217 def listkeys(self, namespace):
218 218 return self._repo.listkeys(namespace)
219 219
220 220 def lookup(self, key):
221 221 return self._repo.lookup(key)
222 222
223 223 def pushkey(self, namespace, key, old, new):
224 224 return self._repo.pushkey(namespace, key, old, new)
225 225
226 226 def stream_out(self):
227 227 raise error.Abort(_('cannot perform stream clone against local '
228 228 'peer'))
229 229
230 230 def unbundle(self, cg, heads, url):
231 231 """apply a bundle on a repo
232 232
233 233 This function handles the repo locking itself."""
234 234 try:
235 235 try:
236 236 cg = exchange.readbundle(self.ui, cg, None)
237 237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 238 if util.safehasattr(ret, 'getchunks'):
239 239 # This is a bundle20 object, turn it into an unbundler.
240 240 # This little dance should be dropped eventually when the
241 241 # API is finally improved.
242 242 stream = util.chunkbuffer(ret.getchunks())
243 243 ret = bundle2.getunbundler(self.ui, stream)
244 244 return ret
245 245 except Exception as exc:
246 246 # If the exception contains output salvaged from a bundle2
247 247 # reply, we need to make sure it is printed before continuing
248 248 # to fail. So we build a bundle2 with such output and consume
249 249 # it directly.
250 250 #
251 251 # This is not very elegant but allows a "simple" solution for
252 252 # issue4594
253 253 output = getattr(exc, '_bundle2salvagedoutput', ())
254 254 if output:
255 255 bundler = bundle2.bundle20(self._repo.ui)
256 256 for out in output:
257 257 bundler.addpart(out)
258 258 stream = util.chunkbuffer(bundler.getchunks())
259 259 b = bundle2.getunbundler(self.ui, stream)
260 260 bundle2.processbundle(self._repo, b)
261 261 raise
262 262 except error.PushRaced as exc:
263 263 raise error.ResponseError(_('push failed:'), str(exc))
264 264
265 265 # End of _basewirecommands interface.
266 266
267 267 # Begin of peer interface.
268 268
269 269 def iterbatch(self):
270 270 return peer.localiterbatcher(self)
271 271
272 272 # End of peer interface.
273 273
274 274 class locallegacypeer(repository.legacypeer, localpeer):
275 275 '''peer extension which implements legacy methods too; used for tests with
276 276 restricted capabilities'''
277 277
278 278 def __init__(self, repo):
279 279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280 280
281 281 # Begin of baselegacywirecommands interface.
282 282
283 283 def between(self, pairs):
284 284 return self._repo.between(pairs)
285 285
286 286 def branches(self, nodes):
287 287 return self._repo.branches(nodes)
288 288
289 289 def changegroup(self, basenodes, source):
290 290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 291 missingheads=self._repo.heads())
292 292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293 293
294 294 def changegroupsubset(self, bases, heads, source):
295 295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 296 missingheads=heads)
297 297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298 298
299 299 # End of baselegacywirecommands interface.
300 300
301 301 # Increment the sub-version when the revlog v2 format changes to lock out old
302 302 # clients.
303 303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304 304
305 305 class localrepository(object):
306 306
307 307 supportedformats = {
308 308 'revlogv1',
309 309 'generaldelta',
310 310 'treemanifest',
311 311 'manifestv2',
312 312 REVLOGV2_REQUIREMENT,
313 313 }
314 314 _basesupported = supportedformats | {
315 315 'store',
316 316 'fncache',
317 317 'shared',
318 318 'relshared',
319 319 'dotencode',
320 320 'exp-sparse',
321 321 }
322 322 openerreqs = {
323 323 'revlogv1',
324 324 'generaldelta',
325 325 'treemanifest',
326 326 'manifestv2',
327 327 }
328 328
329 329 # a list of (ui, featureset) functions.
330 330 # only functions defined in module of enabled extensions are invoked
331 331 featuresetupfuncs = set()
332 332
333 333 # list of prefix for file which can be written without 'wlock'
334 334 # Extensions should extend this list when needed
335 335 _wlockfreeprefix = {
336 336 # We migh consider requiring 'wlock' for the next
337 337 # two, but pretty much all the existing code assume
338 338 # wlock is not needed so we keep them excluded for
339 339 # now.
340 340 'hgrc',
341 341 'requires',
342 342 # XXX cache is a complicatged business someone
343 343 # should investigate this in depth at some point
344 344 'cache/',
345 345 # XXX shouldn't be dirstate covered by the wlock?
346 346 'dirstate',
347 347 # XXX bisect was still a bit too messy at the time
348 348 # this changeset was introduced. Someone should fix
349 349 # the remainig bit and drop this line
350 350 'bisect.state',
351 351 }
352 352
353 353 def __init__(self, baseui, path, create=False):
354 354 self.requirements = set()
355 355 self.filtername = None
356 356 # wvfs: rooted at the repository root, used to access the working copy
357 357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 359 self.vfs = None
360 360 # svfs: usually rooted at .hg/store, used to access repository history
361 361 # If this is a shared repository, this vfs may point to another
362 362 # repository's .hg/store directory.
363 363 self.svfs = None
364 364 self.root = self.wvfs.base
365 365 self.path = self.wvfs.join(".hg")
366 366 self.origroot = path
367 367 # These auditor are not used by the vfs,
368 368 # only used when writing this comment: basectx.match
369 self.auditor = pathutil.pathauditor(self.root, self._checknested)
370 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
371 realfs=False, cached=True)
369 self.auditor = pathutil.pathauditor(
370 self.root, callback=self._checknested)
371 self.nofsauditor = pathutil.pathauditor(
372 self.root, callback=self._checknested, realfs=False, cached=True)
372 373 self.baseui = baseui
373 374 self.ui = baseui.copy()
374 375 self.ui.copy = baseui.copy # prevent copying repo configuration
375 376 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
376 377 if (self.ui.configbool('devel', 'all-warnings') or
377 378 self.ui.configbool('devel', 'check-locks')):
378 379 self.vfs.audit = self._getvfsward(self.vfs.audit)
379 380 # A list of callback to shape the phase if no data were found.
380 381 # Callback are in the form: func(repo, roots) --> processed root.
381 382 # This list it to be filled by extension during repo setup
382 383 self._phasedefaults = []
383 384 try:
384 385 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
385 386 self._loadextensions()
386 387 except IOError:
387 388 pass
388 389
389 390 if self.featuresetupfuncs:
390 391 self.supported = set(self._basesupported) # use private copy
391 392 extmods = set(m.__name__ for n, m
392 393 in extensions.extensions(self.ui))
393 394 for setupfunc in self.featuresetupfuncs:
394 395 if setupfunc.__module__ in extmods:
395 396 setupfunc(self.ui, self.supported)
396 397 else:
397 398 self.supported = self._basesupported
398 399 color.setup(self.ui)
399 400
400 401 # Add compression engines.
401 402 for name in util.compengines:
402 403 engine = util.compengines[name]
403 404 if engine.revlogheader():
404 405 self.supported.add('exp-compression-%s' % name)
405 406
406 407 if not self.vfs.isdir():
407 408 if create:
408 409 self.requirements = newreporequirements(self)
409 410
410 411 if not self.wvfs.exists():
411 412 self.wvfs.makedirs()
412 413 self.vfs.makedir(notindexed=True)
413 414
414 415 if 'store' in self.requirements:
415 416 self.vfs.mkdir("store")
416 417
417 418 # create an invalid changelog
418 419 self.vfs.append(
419 420 "00changelog.i",
420 421 '\0\0\0\2' # represents revlogv2
421 422 ' dummy changelog to prevent using the old repo layout'
422 423 )
423 424 else:
424 425 raise error.RepoError(_("repository %s not found") % path)
425 426 elif create:
426 427 raise error.RepoError(_("repository %s already exists") % path)
427 428 else:
428 429 try:
429 430 self.requirements = scmutil.readrequires(
430 431 self.vfs, self.supported)
431 432 except IOError as inst:
432 433 if inst.errno != errno.ENOENT:
433 434 raise
434 435
435 436 cachepath = self.vfs.join('cache')
436 437 self.sharedpath = self.path
437 438 try:
438 439 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
439 440 if 'relshared' in self.requirements:
440 441 sharedpath = self.vfs.join(sharedpath)
441 442 vfs = vfsmod.vfs(sharedpath, realpath=True)
442 443 cachepath = vfs.join('cache')
443 444 s = vfs.base
444 445 if not vfs.exists():
445 446 raise error.RepoError(
446 447 _('.hg/sharedpath points to nonexistent directory %s') % s)
447 448 self.sharedpath = s
448 449 except IOError as inst:
449 450 if inst.errno != errno.ENOENT:
450 451 raise
451 452
452 453 if 'exp-sparse' in self.requirements and not sparse.enabled:
453 454 raise error.RepoError(_('repository is using sparse feature but '
454 455 'sparse is not enabled; enable the '
455 456 '"sparse" extensions to access'))
456 457
457 458 self.store = store.store(
458 459 self.requirements, self.sharedpath,
459 460 lambda base: vfsmod.vfs(base, cacheaudited=True))
460 461 self.spath = self.store.path
461 462 self.svfs = self.store.vfs
462 463 self.sjoin = self.store.join
463 464 self.vfs.createmode = self.store.createmode
464 465 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
465 466 self.cachevfs.createmode = self.store.createmode
466 467 if (self.ui.configbool('devel', 'all-warnings') or
467 468 self.ui.configbool('devel', 'check-locks')):
468 469 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
469 470 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
470 471 else: # standard vfs
471 472 self.svfs.audit = self._getsvfsward(self.svfs.audit)
472 473 self._applyopenerreqs()
473 474 if create:
474 475 self._writerequirements()
475 476
476 477 self._dirstatevalidatewarned = False
477 478
478 479 self._branchcaches = {}
479 480 self._revbranchcache = None
480 481 self.filterpats = {}
481 482 self._datafilters = {}
482 483 self._transref = self._lockref = self._wlockref = None
483 484
484 485 # A cache for various files under .hg/ that tracks file changes,
485 486 # (used by the filecache decorator)
486 487 #
487 488 # Maps a property name to its util.filecacheentry
488 489 self._filecache = {}
489 490
490 491 # hold sets of revision to be filtered
491 492 # should be cleared when something might have changed the filter value:
492 493 # - new changesets,
493 494 # - phase change,
494 495 # - new obsolescence marker,
495 496 # - working directory parent change,
496 497 # - bookmark changes
497 498 self.filteredrevcache = {}
498 499
499 500 # post-dirstate-status hooks
500 501 self._postdsstatus = []
501 502
502 503 # Cache of types representing filtered repos.
503 504 self._filteredrepotypes = weakref.WeakKeyDictionary()
504 505
505 506 # generic mapping between names and nodes
506 507 self.names = namespaces.namespaces()
507 508
508 509 # Key to signature value.
509 510 self._sparsesignaturecache = {}
510 511 # Signature to cached matcher instance.
511 512 self._sparsematchercache = {}
512 513
513 514 def _getvfsward(self, origfunc):
514 515 """build a ward for self.vfs"""
515 516 rref = weakref.ref(self)
516 517 def checkvfs(path, mode=None):
517 518 ret = origfunc(path, mode=mode)
518 519 repo = rref()
519 520 if (repo is None
520 521 or not util.safehasattr(repo, '_wlockref')
521 522 or not util.safehasattr(repo, '_lockref')):
522 523 return
523 524 if mode in (None, 'r', 'rb'):
524 525 return
525 526 if path.startswith(repo.path):
526 527 # truncate name relative to the repository (.hg)
527 528 path = path[len(repo.path) + 1:]
528 529 if path.startswith('cache/'):
529 530 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
530 531 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
531 532 if path.startswith('journal.'):
532 533 # journal is covered by 'lock'
533 534 if repo._currentlock(repo._lockref) is None:
534 535 repo.ui.develwarn('write with no lock: "%s"' % path,
535 536 stacklevel=2, config='check-locks')
536 537 elif repo._currentlock(repo._wlockref) is None:
537 538 # rest of vfs files are covered by 'wlock'
538 539 #
539 540 # exclude special files
540 541 for prefix in self._wlockfreeprefix:
541 542 if path.startswith(prefix):
542 543 return
543 544 repo.ui.develwarn('write with no wlock: "%s"' % path,
544 545 stacklevel=2, config='check-locks')
545 546 return ret
546 547 return checkvfs
547 548
548 549 def _getsvfsward(self, origfunc):
549 550 """build a ward for self.svfs"""
550 551 rref = weakref.ref(self)
551 552 def checksvfs(path, mode=None):
552 553 ret = origfunc(path, mode=mode)
553 554 repo = rref()
554 555 if repo is None or not util.safehasattr(repo, '_lockref'):
555 556 return
556 557 if mode in (None, 'r', 'rb'):
557 558 return
558 559 if path.startswith(repo.sharedpath):
559 560 # truncate name relative to the repository (.hg)
560 561 path = path[len(repo.sharedpath) + 1:]
561 562 if repo._currentlock(repo._lockref) is None:
562 563 repo.ui.develwarn('write with no lock: "%s"' % path,
563 564 stacklevel=3)
564 565 return ret
565 566 return checksvfs
566 567
567 568 def close(self):
568 569 self._writecaches()
569 570
570 571 def _loadextensions(self):
571 572 extensions.loadall(self.ui)
572 573
573 574 def _writecaches(self):
574 575 if self._revbranchcache:
575 576 self._revbranchcache.write()
576 577
577 578 def _restrictcapabilities(self, caps):
578 579 if self.ui.configbool('experimental', 'bundle2-advertise'):
579 580 caps = set(caps)
580 581 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
581 582 caps.add('bundle2=' + urlreq.quote(capsblob))
582 583 return caps
583 584
584 585 def _applyopenerreqs(self):
585 586 self.svfs.options = dict((r, 1) for r in self.requirements
586 587 if r in self.openerreqs)
587 588 # experimental config: format.chunkcachesize
588 589 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
589 590 if chunkcachesize is not None:
590 591 self.svfs.options['chunkcachesize'] = chunkcachesize
591 592 # experimental config: format.maxchainlen
592 593 maxchainlen = self.ui.configint('format', 'maxchainlen')
593 594 if maxchainlen is not None:
594 595 self.svfs.options['maxchainlen'] = maxchainlen
595 596 # experimental config: format.manifestcachesize
596 597 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
597 598 if manifestcachesize is not None:
598 599 self.svfs.options['manifestcachesize'] = manifestcachesize
599 600 # experimental config: format.aggressivemergedeltas
600 601 aggressivemergedeltas = self.ui.configbool('format',
601 602 'aggressivemergedeltas')
602 603 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
603 604 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
604 605 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
605 606 if 0 <= chainspan:
606 607 self.svfs.options['maxdeltachainspan'] = chainspan
607 608 mmapindexthreshold = self.ui.configbytes('experimental',
608 609 'mmapindexthreshold')
609 610 if mmapindexthreshold is not None:
610 611 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
611 612 withsparseread = self.ui.configbool('experimental', 'sparse-read')
612 613 srdensitythres = float(self.ui.config('experimental',
613 614 'sparse-read.density-threshold'))
614 615 srmingapsize = self.ui.configbytes('experimental',
615 616 'sparse-read.min-gap-size')
616 617 self.svfs.options['with-sparse-read'] = withsparseread
617 618 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
618 619 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
619 620
620 621 for r in self.requirements:
621 622 if r.startswith('exp-compression-'):
622 623 self.svfs.options['compengine'] = r[len('exp-compression-'):]
623 624
624 625 # TODO move "revlogv2" to openerreqs once finalized.
625 626 if REVLOGV2_REQUIREMENT in self.requirements:
626 627 self.svfs.options['revlogv2'] = True
627 628
628 629 def _writerequirements(self):
629 630 scmutil.writerequires(self.vfs, self.requirements)
630 631
631 632 def _checknested(self, path):
632 633 """Determine if path is a legal nested repository."""
633 634 if not path.startswith(self.root):
634 635 return False
635 636 subpath = path[len(self.root) + 1:]
636 637 normsubpath = util.pconvert(subpath)
637 638
638 639 # XXX: Checking against the current working copy is wrong in
639 640 # the sense that it can reject things like
640 641 #
641 642 # $ hg cat -r 10 sub/x.txt
642 643 #
643 644 # if sub/ is no longer a subrepository in the working copy
644 645 # parent revision.
645 646 #
646 647 # However, it can of course also allow things that would have
647 648 # been rejected before, such as the above cat command if sub/
648 649 # is a subrepository now, but was a normal directory before.
649 650 # The old path auditor would have rejected by mistake since it
650 651 # panics when it sees sub/.hg/.
651 652 #
652 653 # All in all, checking against the working copy seems sensible
653 654 # since we want to prevent access to nested repositories on
654 655 # the filesystem *now*.
655 656 ctx = self[None]
656 657 parts = util.splitpath(subpath)
657 658 while parts:
658 659 prefix = '/'.join(parts)
659 660 if prefix in ctx.substate:
660 661 if prefix == normsubpath:
661 662 return True
662 663 else:
663 664 sub = ctx.sub(prefix)
664 665 return sub.checknested(subpath[len(prefix) + 1:])
665 666 else:
666 667 parts.pop()
667 668 return False
668 669
669 670 def peer(self):
670 671 return localpeer(self) # not cached to avoid reference cycle
671 672
672 673 def unfiltered(self):
673 674 """Return unfiltered version of the repository
674 675
675 676 Intended to be overwritten by filtered repo."""
676 677 return self
677 678
678 679 def filtered(self, name):
679 680 """Return a filtered version of a repository"""
680 681 # Python <3.4 easily leaks types via __mro__. See
681 682 # https://bugs.python.org/issue17950. We cache dynamically
682 683 # created types so this method doesn't leak on every
683 684 # invocation.
684 685
685 686 key = self.unfiltered().__class__
686 687 if key not in self._filteredrepotypes:
687 688 # Build a new type with the repoview mixin and the base
688 689 # class of this repo. Give it a name containing the
689 690 # filter name to aid debugging.
690 691 bases = (repoview.repoview, key)
691 692 cls = type(r'%sfilteredrepo' % name, bases, {})
692 693 self._filteredrepotypes[key] = cls
693 694
694 695 return self._filteredrepotypes[key](self, name)
695 696
696 697 @repofilecache('bookmarks', 'bookmarks.current')
697 698 def _bookmarks(self):
698 699 return bookmarks.bmstore(self)
699 700
700 701 @property
701 702 def _activebookmark(self):
702 703 return self._bookmarks.active
703 704
704 705 # _phaserevs and _phasesets depend on changelog. what we need is to
705 706 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
706 707 # can't be easily expressed in filecache mechanism.
707 708 @storecache('phaseroots', '00changelog.i')
708 709 def _phasecache(self):
709 710 return phases.phasecache(self, self._phasedefaults)
710 711
711 712 @storecache('obsstore')
712 713 def obsstore(self):
713 714 return obsolete.makestore(self.ui, self)
714 715
715 716 @storecache('00changelog.i')
716 717 def changelog(self):
717 718 return changelog.changelog(self.svfs,
718 719 trypending=txnutil.mayhavepending(self.root))
719 720
720 721 def _constructmanifest(self):
721 722 # This is a temporary function while we migrate from manifest to
722 723 # manifestlog. It allows bundlerepo and unionrepo to intercept the
723 724 # manifest creation.
724 725 return manifest.manifestrevlog(self.svfs)
725 726
726 727 @storecache('00manifest.i')
727 728 def manifestlog(self):
728 729 return manifest.manifestlog(self.svfs, self)
729 730
730 731 @repofilecache('dirstate')
731 732 def dirstate(self):
732 733 sparsematchfn = lambda: sparse.matcher(self)
733 734
734 735 return dirstate.dirstate(self.vfs, self.ui, self.root,
735 736 self._dirstatevalidate, sparsematchfn)
736 737
737 738 def _dirstatevalidate(self, node):
738 739 try:
739 740 self.changelog.rev(node)
740 741 return node
741 742 except error.LookupError:
742 743 if not self._dirstatevalidatewarned:
743 744 self._dirstatevalidatewarned = True
744 745 self.ui.warn(_("warning: ignoring unknown"
745 746 " working parent %s!\n") % short(node))
746 747 return nullid
747 748
748 749 def __getitem__(self, changeid):
749 750 if changeid is None:
750 751 return context.workingctx(self)
751 752 if isinstance(changeid, slice):
752 753 # wdirrev isn't contiguous so the slice shouldn't include it
753 754 return [context.changectx(self, i)
754 755 for i in xrange(*changeid.indices(len(self)))
755 756 if i not in self.changelog.filteredrevs]
756 757 try:
757 758 return context.changectx(self, changeid)
758 759 except error.WdirUnsupported:
759 760 return context.workingctx(self)
760 761
761 762 def __contains__(self, changeid):
762 763 """True if the given changeid exists
763 764
764 765 error.LookupError is raised if an ambiguous node specified.
765 766 """
766 767 try:
767 768 self[changeid]
768 769 return True
769 770 except error.RepoLookupError:
770 771 return False
771 772
772 773 def __nonzero__(self):
773 774 return True
774 775
775 776 __bool__ = __nonzero__
776 777
777 778 def __len__(self):
778 779 return len(self.changelog)
779 780
780 781 def __iter__(self):
781 782 return iter(self.changelog)
782 783
783 784 def revs(self, expr, *args):
784 785 '''Find revisions matching a revset.
785 786
786 787 The revset is specified as a string ``expr`` that may contain
787 788 %-formatting to escape certain types. See ``revsetlang.formatspec``.
788 789
789 790 Revset aliases from the configuration are not expanded. To expand
790 791 user aliases, consider calling ``scmutil.revrange()`` or
791 792 ``repo.anyrevs([expr], user=True)``.
792 793
793 794 Returns a revset.abstractsmartset, which is a list-like interface
794 795 that contains integer revisions.
795 796 '''
796 797 expr = revsetlang.formatspec(expr, *args)
797 798 m = revset.match(None, expr)
798 799 return m(self)
799 800
800 801 def set(self, expr, *args):
801 802 '''Find revisions matching a revset and emit changectx instances.
802 803
803 804 This is a convenience wrapper around ``revs()`` that iterates the
804 805 result and is a generator of changectx instances.
805 806
806 807 Revset aliases from the configuration are not expanded. To expand
807 808 user aliases, consider calling ``scmutil.revrange()``.
808 809 '''
809 810 for r in self.revs(expr, *args):
810 811 yield self[r]
811 812
812 813 def anyrevs(self, specs, user=False, localalias=None):
813 814 '''Find revisions matching one of the given revsets.
814 815
815 816 Revset aliases from the configuration are not expanded by default. To
816 817 expand user aliases, specify ``user=True``. To provide some local
817 818 definitions overriding user aliases, set ``localalias`` to
818 819 ``{name: definitionstring}``.
819 820 '''
820 821 if user:
821 822 m = revset.matchany(self.ui, specs, repo=self,
822 823 localalias=localalias)
823 824 else:
824 825 m = revset.matchany(None, specs, localalias=localalias)
825 826 return m(self)
826 827
827 828 def url(self):
828 829 return 'file:' + self.root
829 830
830 831 def hook(self, name, throw=False, **args):
831 832 """Call a hook, passing this repo instance.
832 833
833 834 This a convenience method to aid invoking hooks. Extensions likely
834 835 won't call this unless they have registered a custom hook or are
835 836 replacing code that is expected to call a hook.
836 837 """
837 838 return hook.hook(self.ui, self, name, throw, **args)
838 839
839 840 @filteredpropertycache
840 841 def _tagscache(self):
841 842 '''Returns a tagscache object that contains various tags related
842 843 caches.'''
843 844
844 845 # This simplifies its cache management by having one decorated
845 846 # function (this one) and the rest simply fetch things from it.
846 847 class tagscache(object):
847 848 def __init__(self):
848 849 # These two define the set of tags for this repository. tags
849 850 # maps tag name to node; tagtypes maps tag name to 'global' or
850 851 # 'local'. (Global tags are defined by .hgtags across all
851 852 # heads, and local tags are defined in .hg/localtags.)
852 853 # They constitute the in-memory cache of tags.
853 854 self.tags = self.tagtypes = None
854 855
855 856 self.nodetagscache = self.tagslist = None
856 857
857 858 cache = tagscache()
858 859 cache.tags, cache.tagtypes = self._findtags()
859 860
860 861 return cache
861 862
862 863 def tags(self):
863 864 '''return a mapping of tag to node'''
864 865 t = {}
865 866 if self.changelog.filteredrevs:
866 867 tags, tt = self._findtags()
867 868 else:
868 869 tags = self._tagscache.tags
869 870 for k, v in tags.iteritems():
870 871 try:
871 872 # ignore tags to unknown nodes
872 873 self.changelog.rev(v)
873 874 t[k] = v
874 875 except (error.LookupError, ValueError):
875 876 pass
876 877 return t
877 878
878 879 def _findtags(self):
879 880 '''Do the hard work of finding tags. Return a pair of dicts
880 881 (tags, tagtypes) where tags maps tag name to node, and tagtypes
881 882 maps tag name to a string like \'global\' or \'local\'.
882 883 Subclasses or extensions are free to add their own tags, but
883 884 should be aware that the returned dicts will be retained for the
884 885 duration of the localrepo object.'''
885 886
886 887 # XXX what tagtype should subclasses/extensions use? Currently
887 888 # mq and bookmarks add tags, but do not set the tagtype at all.
888 889 # Should each extension invent its own tag type? Should there
889 890 # be one tagtype for all such "virtual" tags? Or is the status
890 891 # quo fine?
891 892
892 893
893 894 # map tag name to (node, hist)
894 895 alltags = tagsmod.findglobaltags(self.ui, self)
895 896 # map tag name to tag type
896 897 tagtypes = dict((tag, 'global') for tag in alltags)
897 898
898 899 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
899 900
900 901 # Build the return dicts. Have to re-encode tag names because
901 902 # the tags module always uses UTF-8 (in order not to lose info
902 903 # writing to the cache), but the rest of Mercurial wants them in
903 904 # local encoding.
904 905 tags = {}
905 906 for (name, (node, hist)) in alltags.iteritems():
906 907 if node != nullid:
907 908 tags[encoding.tolocal(name)] = node
908 909 tags['tip'] = self.changelog.tip()
909 910 tagtypes = dict([(encoding.tolocal(name), value)
910 911 for (name, value) in tagtypes.iteritems()])
911 912 return (tags, tagtypes)
912 913
913 914 def tagtype(self, tagname):
914 915 '''
915 916 return the type of the given tag. result can be:
916 917
917 918 'local' : a local tag
918 919 'global' : a global tag
919 920 None : tag does not exist
920 921 '''
921 922
922 923 return self._tagscache.tagtypes.get(tagname)
923 924
924 925 def tagslist(self):
925 926 '''return a list of tags ordered by revision'''
926 927 if not self._tagscache.tagslist:
927 928 l = []
928 929 for t, n in self.tags().iteritems():
929 930 l.append((self.changelog.rev(n), t, n))
930 931 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
931 932
932 933 return self._tagscache.tagslist
933 934
934 935 def nodetags(self, node):
935 936 '''return the tags associated with a node'''
936 937 if not self._tagscache.nodetagscache:
937 938 nodetagscache = {}
938 939 for t, n in self._tagscache.tags.iteritems():
939 940 nodetagscache.setdefault(n, []).append(t)
940 941 for tags in nodetagscache.itervalues():
941 942 tags.sort()
942 943 self._tagscache.nodetagscache = nodetagscache
943 944 return self._tagscache.nodetagscache.get(node, [])
944 945
945 946 def nodebookmarks(self, node):
946 947 """return the list of bookmarks pointing to the specified node"""
947 948 marks = []
948 949 for bookmark, n in self._bookmarks.iteritems():
949 950 if n == node:
950 951 marks.append(bookmark)
951 952 return sorted(marks)
952 953
953 954 def branchmap(self):
954 955 '''returns a dictionary {branch: [branchheads]} with branchheads
955 956 ordered by increasing revision number'''
956 957 branchmap.updatecache(self)
957 958 return self._branchcaches[self.filtername]
958 959
959 960 @unfilteredmethod
960 961 def revbranchcache(self):
961 962 if not self._revbranchcache:
962 963 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
963 964 return self._revbranchcache
964 965
965 966 def branchtip(self, branch, ignoremissing=False):
966 967 '''return the tip node for a given branch
967 968
968 969 If ignoremissing is True, then this method will not raise an error.
969 970 This is helpful for callers that only expect None for a missing branch
970 971 (e.g. namespace).
971 972
972 973 '''
973 974 try:
974 975 return self.branchmap().branchtip(branch)
975 976 except KeyError:
976 977 if not ignoremissing:
977 978 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
978 979 else:
979 980 pass
980 981
981 982 def lookup(self, key):
982 983 return self[key].node()
983 984
984 985 def lookupbranch(self, key, remote=None):
985 986 repo = remote or self
986 987 if key in repo.branchmap():
987 988 return key
988 989
989 990 repo = (remote and remote.local()) and remote or self
990 991 return repo[key].branch()
991 992
992 993 def known(self, nodes):
993 994 cl = self.changelog
994 995 nm = cl.nodemap
995 996 filtered = cl.filteredrevs
996 997 result = []
997 998 for n in nodes:
998 999 r = nm.get(n)
999 1000 resp = not (r is None or r in filtered)
1000 1001 result.append(resp)
1001 1002 return result
1002 1003
1003 1004 def local(self):
1004 1005 return self
1005 1006
1006 1007 def publishing(self):
1007 1008 # it's safe (and desirable) to trust the publish flag unconditionally
1008 1009 # so that we don't finalize changes shared between users via ssh or nfs
1009 1010 return self.ui.configbool('phases', 'publish', untrusted=True)
1010 1011
1011 1012 def cancopy(self):
1012 1013 # so statichttprepo's override of local() works
1013 1014 if not self.local():
1014 1015 return False
1015 1016 if not self.publishing():
1016 1017 return True
1017 1018 # if publishing we can't copy if there is filtered content
1018 1019 return not self.filtered('visible').changelog.filteredrevs
1019 1020
1020 1021 def shared(self):
1021 1022 '''the type of shared repository (None if not shared)'''
1022 1023 if self.sharedpath != self.path:
1023 1024 return 'store'
1024 1025 return None
1025 1026
1026 1027 def wjoin(self, f, *insidef):
1027 1028 return self.vfs.reljoin(self.root, f, *insidef)
1028 1029
1029 1030 def file(self, f):
1030 1031 if f[0] == '/':
1031 1032 f = f[1:]
1032 1033 return filelog.filelog(self.svfs, f)
1033 1034
1034 1035 def changectx(self, changeid):
1035 1036 return self[changeid]
1036 1037
1037 1038 def setparents(self, p1, p2=nullid):
1038 1039 with self.dirstate.parentchange():
1039 1040 copies = self.dirstate.setparents(p1, p2)
1040 1041 pctx = self[p1]
1041 1042 if copies:
1042 1043 # Adjust copy records, the dirstate cannot do it, it
1043 1044 # requires access to parents manifests. Preserve them
1044 1045 # only for entries added to first parent.
1045 1046 for f in copies:
1046 1047 if f not in pctx and copies[f] in pctx:
1047 1048 self.dirstate.copy(copies[f], f)
1048 1049 if p2 == nullid:
1049 1050 for f, s in sorted(self.dirstate.copies().items()):
1050 1051 if f not in pctx and s not in pctx:
1051 1052 self.dirstate.copy(None, f)
1052 1053
1053 1054 def filectx(self, path, changeid=None, fileid=None):
1054 1055 """changeid can be a changeset revision, node, or tag.
1055 1056 fileid can be a file revision or node."""
1056 1057 return context.filectx(self, path, changeid, fileid)
1057 1058
1058 1059 def getcwd(self):
1059 1060 return self.dirstate.getcwd()
1060 1061
1061 1062 def pathto(self, f, cwd=None):
1062 1063 return self.dirstate.pathto(f, cwd)
1063 1064
1064 1065 def _loadfilter(self, filter):
1065 1066 if filter not in self.filterpats:
1066 1067 l = []
1067 1068 for pat, cmd in self.ui.configitems(filter):
1068 1069 if cmd == '!':
1069 1070 continue
1070 1071 mf = matchmod.match(self.root, '', [pat])
1071 1072 fn = None
1072 1073 params = cmd
1073 1074 for name, filterfn in self._datafilters.iteritems():
1074 1075 if cmd.startswith(name):
1075 1076 fn = filterfn
1076 1077 params = cmd[len(name):].lstrip()
1077 1078 break
1078 1079 if not fn:
1079 1080 fn = lambda s, c, **kwargs: util.filter(s, c)
1080 1081 # Wrap old filters not supporting keyword arguments
1081 1082 if not inspect.getargspec(fn)[2]:
1082 1083 oldfn = fn
1083 1084 fn = lambda s, c, **kwargs: oldfn(s, c)
1084 1085 l.append((mf, fn, params))
1085 1086 self.filterpats[filter] = l
1086 1087 return self.filterpats[filter]
1087 1088
1088 1089 def _filter(self, filterpats, filename, data):
1089 1090 for mf, fn, cmd in filterpats:
1090 1091 if mf(filename):
1091 1092 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1092 1093 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1093 1094 break
1094 1095
1095 1096 return data
1096 1097
1097 1098 @unfilteredpropertycache
1098 1099 def _encodefilterpats(self):
1099 1100 return self._loadfilter('encode')
1100 1101
1101 1102 @unfilteredpropertycache
1102 1103 def _decodefilterpats(self):
1103 1104 return self._loadfilter('decode')
1104 1105
1105 1106 def adddatafilter(self, name, filter):
1106 1107 self._datafilters[name] = filter
1107 1108
1108 1109 def wread(self, filename):
1109 1110 if self.wvfs.islink(filename):
1110 1111 data = self.wvfs.readlink(filename)
1111 1112 else:
1112 1113 data = self.wvfs.read(filename)
1113 1114 return self._filter(self._encodefilterpats, filename, data)
1114 1115
1115 1116 def wwrite(self, filename, data, flags, backgroundclose=False):
1116 1117 """write ``data`` into ``filename`` in the working directory
1117 1118
1118 1119 This returns length of written (maybe decoded) data.
1119 1120 """
1120 1121 data = self._filter(self._decodefilterpats, filename, data)
1121 1122 if 'l' in flags:
1122 1123 self.wvfs.symlink(data, filename)
1123 1124 else:
1124 1125 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1125 1126 if 'x' in flags:
1126 1127 self.wvfs.setflags(filename, False, True)
1127 1128 return len(data)
1128 1129
1129 1130 def wwritedata(self, filename, data):
1130 1131 return self._filter(self._decodefilterpats, filename, data)
1131 1132
1132 1133 def currenttransaction(self):
1133 1134 """return the current transaction or None if non exists"""
1134 1135 if self._transref:
1135 1136 tr = self._transref()
1136 1137 else:
1137 1138 tr = None
1138 1139
1139 1140 if tr and tr.running():
1140 1141 return tr
1141 1142 return None
1142 1143
1143 1144 def transaction(self, desc, report=None):
1144 1145 if (self.ui.configbool('devel', 'all-warnings')
1145 1146 or self.ui.configbool('devel', 'check-locks')):
1146 1147 if self._currentlock(self._lockref) is None:
1147 1148 raise error.ProgrammingError('transaction requires locking')
1148 1149 tr = self.currenttransaction()
1149 1150 if tr is not None:
1150 1151 scmutil.registersummarycallback(self, tr, desc)
1151 1152 return tr.nest()
1152 1153
1153 1154 # abort here if the journal already exists
1154 1155 if self.svfs.exists("journal"):
1155 1156 raise error.RepoError(
1156 1157 _("abandoned transaction found"),
1157 1158 hint=_("run 'hg recover' to clean up transaction"))
1158 1159
1159 1160 idbase = "%.40f#%f" % (random.random(), time.time())
1160 1161 ha = hex(hashlib.sha1(idbase).digest())
1161 1162 txnid = 'TXN:' + ha
1162 1163 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1163 1164
1164 1165 self._writejournal(desc)
1165 1166 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1166 1167 if report:
1167 1168 rp = report
1168 1169 else:
1169 1170 rp = self.ui.warn
1170 1171 vfsmap = {'plain': self.vfs} # root of .hg/
1171 1172 # we must avoid cyclic reference between repo and transaction.
1172 1173 reporef = weakref.ref(self)
1173 1174 # Code to track tag movement
1174 1175 #
1175 1176 # Since tags are all handled as file content, it is actually quite hard
1176 1177 # to track these movement from a code perspective. So we fallback to a
1177 1178 # tracking at the repository level. One could envision to track changes
1178 1179 # to the '.hgtags' file through changegroup apply but that fails to
1179 1180 # cope with case where transaction expose new heads without changegroup
1180 1181 # being involved (eg: phase movement).
1181 1182 #
1182 1183 # For now, We gate the feature behind a flag since this likely comes
1183 1184 # with performance impacts. The current code run more often than needed
1184 1185 # and do not use caches as much as it could. The current focus is on
1185 1186 # the behavior of the feature so we disable it by default. The flag
1186 1187 # will be removed when we are happy with the performance impact.
1187 1188 #
1188 1189 # Once this feature is no longer experimental move the following
1189 1190 # documentation to the appropriate help section:
1190 1191 #
1191 1192 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1192 1193 # tags (new or changed or deleted tags). In addition the details of
1193 1194 # these changes are made available in a file at:
1194 1195 # ``REPOROOT/.hg/changes/tags.changes``.
1195 1196 # Make sure you check for HG_TAG_MOVED before reading that file as it
1196 1197 # might exist from a previous transaction even if no tag were touched
1197 1198 # in this one. Changes are recorded in a line base format::
1198 1199 #
1199 1200 # <action> <hex-node> <tag-name>\n
1200 1201 #
1201 1202 # Actions are defined as follow:
1202 1203 # "-R": tag is removed,
1203 1204 # "+A": tag is added,
1204 1205 # "-M": tag is moved (old value),
1205 1206 # "+M": tag is moved (new value),
1206 1207 tracktags = lambda x: None
1207 1208 # experimental config: experimental.hook-track-tags
1208 1209 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1209 1210 if desc != 'strip' and shouldtracktags:
1210 1211 oldheads = self.changelog.headrevs()
1211 1212 def tracktags(tr2):
1212 1213 repo = reporef()
1213 1214 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1214 1215 newheads = repo.changelog.headrevs()
1215 1216 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1216 1217 # notes: we compare lists here.
1217 1218 # As we do it only once buiding set would not be cheaper
1218 1219 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1219 1220 if changes:
1220 1221 tr2.hookargs['tag_moved'] = '1'
1221 1222 with repo.vfs('changes/tags.changes', 'w',
1222 1223 atomictemp=True) as changesfile:
1223 1224 # note: we do not register the file to the transaction
1224 1225 # because we needs it to still exist on the transaction
1225 1226 # is close (for txnclose hooks)
1226 1227 tagsmod.writediff(changesfile, changes)
1227 1228 def validate(tr2):
1228 1229 """will run pre-closing hooks"""
1229 1230 # XXX the transaction API is a bit lacking here so we take a hacky
1230 1231 # path for now
1231 1232 #
1232 1233 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1233 1234 # dict is copied before these run. In addition we needs the data
1234 1235 # available to in memory hooks too.
1235 1236 #
1236 1237 # Moreover, we also need to make sure this runs before txnclose
1237 1238 # hooks and there is no "pending" mechanism that would execute
1238 1239 # logic only if hooks are about to run.
1239 1240 #
1240 1241 # Fixing this limitation of the transaction is also needed to track
1241 1242 # other families of changes (bookmarks, phases, obsolescence).
1242 1243 #
1243 1244 # This will have to be fixed before we remove the experimental
1244 1245 # gating.
1245 1246 tracktags(tr2)
1246 1247 repo = reporef()
1247 1248 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1248 1249 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1249 1250 args = tr.hookargs.copy()
1250 1251 args.update(bookmarks.preparehookargs(name, old, new))
1251 1252 repo.hook('pretxnclose-bookmark', throw=True,
1252 1253 txnname=desc,
1253 1254 **pycompat.strkwargs(args))
1254 1255 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1255 1256 cl = repo.unfiltered().changelog
1256 1257 for rev, (old, new) in tr.changes['phases'].items():
1257 1258 args = tr.hookargs.copy()
1258 1259 node = hex(cl.node(rev))
1259 1260 args.update(phases.preparehookargs(node, old, new))
1260 1261 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1261 1262 **pycompat.strkwargs(args))
1262 1263
1263 1264 repo.hook('pretxnclose', throw=True,
1264 1265 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1265 1266 def releasefn(tr, success):
1266 1267 repo = reporef()
1267 1268 if success:
1268 1269 # this should be explicitly invoked here, because
1269 1270 # in-memory changes aren't written out at closing
1270 1271 # transaction, if tr.addfilegenerator (via
1271 1272 # dirstate.write or so) isn't invoked while
1272 1273 # transaction running
1273 1274 repo.dirstate.write(None)
1274 1275 else:
1275 1276 # discard all changes (including ones already written
1276 1277 # out) in this transaction
1277 1278 repo.dirstate.restorebackup(None, 'journal.dirstate')
1278 1279
1279 1280 repo.invalidate(clearfilecache=True)
1280 1281
1281 1282 tr = transaction.transaction(rp, self.svfs, vfsmap,
1282 1283 "journal",
1283 1284 "undo",
1284 1285 aftertrans(renames),
1285 1286 self.store.createmode,
1286 1287 validator=validate,
1287 1288 releasefn=releasefn,
1288 1289 checkambigfiles=_cachedfiles)
1289 1290 tr.changes['revs'] = set()
1290 1291 tr.changes['obsmarkers'] = set()
1291 1292 tr.changes['phases'] = {}
1292 1293 tr.changes['bookmarks'] = {}
1293 1294
1294 1295 tr.hookargs['txnid'] = txnid
1295 1296 # note: writing the fncache only during finalize mean that the file is
1296 1297 # outdated when running hooks. As fncache is used for streaming clone,
1297 1298 # this is not expected to break anything that happen during the hooks.
1298 1299 tr.addfinalize('flush-fncache', self.store.write)
1299 1300 def txnclosehook(tr2):
1300 1301 """To be run if transaction is successful, will schedule a hook run
1301 1302 """
1302 1303 # Don't reference tr2 in hook() so we don't hold a reference.
1303 1304 # This reduces memory consumption when there are multiple
1304 1305 # transactions per lock. This can likely go away if issue5045
1305 1306 # fixes the function accumulation.
1306 1307 hookargs = tr2.hookargs
1307 1308
1308 1309 def hookfunc():
1309 1310 repo = reporef()
1310 1311 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1311 1312 bmchanges = sorted(tr.changes['bookmarks'].items())
1312 1313 for name, (old, new) in bmchanges:
1313 1314 args = tr.hookargs.copy()
1314 1315 args.update(bookmarks.preparehookargs(name, old, new))
1315 1316 repo.hook('txnclose-bookmark', throw=False,
1316 1317 txnname=desc, **pycompat.strkwargs(args))
1317 1318
1318 1319 if hook.hashook(repo.ui, 'txnclose-phase'):
1319 1320 cl = repo.unfiltered().changelog
1320 1321 phasemv = sorted(tr.changes['phases'].items())
1321 1322 for rev, (old, new) in phasemv:
1322 1323 args = tr.hookargs.copy()
1323 1324 node = hex(cl.node(rev))
1324 1325 args.update(phases.preparehookargs(node, old, new))
1325 1326 repo.hook('txnclose-phase', throw=False, txnname=desc,
1326 1327 **pycompat.strkwargs(args))
1327 1328
1328 1329 repo.hook('txnclose', throw=False, txnname=desc,
1329 1330 **pycompat.strkwargs(hookargs))
1330 1331 reporef()._afterlock(hookfunc)
1331 1332 tr.addfinalize('txnclose-hook', txnclosehook)
1332 1333 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1333 1334 def txnaborthook(tr2):
1334 1335 """To be run if transaction is aborted
1335 1336 """
1336 1337 reporef().hook('txnabort', throw=False, txnname=desc,
1337 1338 **tr2.hookargs)
1338 1339 tr.addabort('txnabort-hook', txnaborthook)
1339 1340 # avoid eager cache invalidation. in-memory data should be identical
1340 1341 # to stored data if transaction has no error.
1341 1342 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1342 1343 self._transref = weakref.ref(tr)
1343 1344 scmutil.registersummarycallback(self, tr, desc)
1344 1345 return tr
1345 1346
1346 1347 def _journalfiles(self):
1347 1348 return ((self.svfs, 'journal'),
1348 1349 (self.vfs, 'journal.dirstate'),
1349 1350 (self.vfs, 'journal.branch'),
1350 1351 (self.vfs, 'journal.desc'),
1351 1352 (self.vfs, 'journal.bookmarks'),
1352 1353 (self.svfs, 'journal.phaseroots'))
1353 1354
1354 1355 def undofiles(self):
1355 1356 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1356 1357
1357 1358 @unfilteredmethod
1358 1359 def _writejournal(self, desc):
1359 1360 self.dirstate.savebackup(None, 'journal.dirstate')
1360 1361 self.vfs.write("journal.branch",
1361 1362 encoding.fromlocal(self.dirstate.branch()))
1362 1363 self.vfs.write("journal.desc",
1363 1364 "%d\n%s\n" % (len(self), desc))
1364 1365 self.vfs.write("journal.bookmarks",
1365 1366 self.vfs.tryread("bookmarks"))
1366 1367 self.svfs.write("journal.phaseroots",
1367 1368 self.svfs.tryread("phaseroots"))
1368 1369
1369 1370 def recover(self):
1370 1371 with self.lock():
1371 1372 if self.svfs.exists("journal"):
1372 1373 self.ui.status(_("rolling back interrupted transaction\n"))
1373 1374 vfsmap = {'': self.svfs,
1374 1375 'plain': self.vfs,}
1375 1376 transaction.rollback(self.svfs, vfsmap, "journal",
1376 1377 self.ui.warn,
1377 1378 checkambigfiles=_cachedfiles)
1378 1379 self.invalidate()
1379 1380 return True
1380 1381 else:
1381 1382 self.ui.warn(_("no interrupted transaction available\n"))
1382 1383 return False
1383 1384
1384 1385 def rollback(self, dryrun=False, force=False):
1385 1386 wlock = lock = dsguard = None
1386 1387 try:
1387 1388 wlock = self.wlock()
1388 1389 lock = self.lock()
1389 1390 if self.svfs.exists("undo"):
1390 1391 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1391 1392
1392 1393 return self._rollback(dryrun, force, dsguard)
1393 1394 else:
1394 1395 self.ui.warn(_("no rollback information available\n"))
1395 1396 return 1
1396 1397 finally:
1397 1398 release(dsguard, lock, wlock)
1398 1399
1399 1400 @unfilteredmethod # Until we get smarter cache management
1400 1401 def _rollback(self, dryrun, force, dsguard):
1401 1402 ui = self.ui
1402 1403 try:
1403 1404 args = self.vfs.read('undo.desc').splitlines()
1404 1405 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1405 1406 if len(args) >= 3:
1406 1407 detail = args[2]
1407 1408 oldtip = oldlen - 1
1408 1409
1409 1410 if detail and ui.verbose:
1410 1411 msg = (_('repository tip rolled back to revision %d'
1411 1412 ' (undo %s: %s)\n')
1412 1413 % (oldtip, desc, detail))
1413 1414 else:
1414 1415 msg = (_('repository tip rolled back to revision %d'
1415 1416 ' (undo %s)\n')
1416 1417 % (oldtip, desc))
1417 1418 except IOError:
1418 1419 msg = _('rolling back unknown transaction\n')
1419 1420 desc = None
1420 1421
1421 1422 if not force and self['.'] != self['tip'] and desc == 'commit':
1422 1423 raise error.Abort(
1423 1424 _('rollback of last commit while not checked out '
1424 1425 'may lose data'), hint=_('use -f to force'))
1425 1426
1426 1427 ui.status(msg)
1427 1428 if dryrun:
1428 1429 return 0
1429 1430
1430 1431 parents = self.dirstate.parents()
1431 1432 self.destroying()
1432 1433 vfsmap = {'plain': self.vfs, '': self.svfs}
1433 1434 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1434 1435 checkambigfiles=_cachedfiles)
1435 1436 if self.vfs.exists('undo.bookmarks'):
1436 1437 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1437 1438 if self.svfs.exists('undo.phaseroots'):
1438 1439 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1439 1440 self.invalidate()
1440 1441
1441 1442 parentgone = (parents[0] not in self.changelog.nodemap or
1442 1443 parents[1] not in self.changelog.nodemap)
1443 1444 if parentgone:
1444 1445 # prevent dirstateguard from overwriting already restored one
1445 1446 dsguard.close()
1446 1447
1447 1448 self.dirstate.restorebackup(None, 'undo.dirstate')
1448 1449 try:
1449 1450 branch = self.vfs.read('undo.branch')
1450 1451 self.dirstate.setbranch(encoding.tolocal(branch))
1451 1452 except IOError:
1452 1453 ui.warn(_('named branch could not be reset: '
1453 1454 'current branch is still \'%s\'\n')
1454 1455 % self.dirstate.branch())
1455 1456
1456 1457 parents = tuple([p.rev() for p in self[None].parents()])
1457 1458 if len(parents) > 1:
1458 1459 ui.status(_('working directory now based on '
1459 1460 'revisions %d and %d\n') % parents)
1460 1461 else:
1461 1462 ui.status(_('working directory now based on '
1462 1463 'revision %d\n') % parents)
1463 1464 mergemod.mergestate.clean(self, self['.'].node())
1464 1465
1465 1466 # TODO: if we know which new heads may result from this rollback, pass
1466 1467 # them to destroy(), which will prevent the branchhead cache from being
1467 1468 # invalidated.
1468 1469 self.destroyed()
1469 1470 return 0
1470 1471
1471 1472 def _buildcacheupdater(self, newtransaction):
1472 1473 """called during transaction to build the callback updating cache
1473 1474
1474 1475 Lives on the repository to help extension who might want to augment
1475 1476 this logic. For this purpose, the created transaction is passed to the
1476 1477 method.
1477 1478 """
1478 1479 # we must avoid cyclic reference between repo and transaction.
1479 1480 reporef = weakref.ref(self)
1480 1481 def updater(tr):
1481 1482 repo = reporef()
1482 1483 repo.updatecaches(tr)
1483 1484 return updater
1484 1485
1485 1486 @unfilteredmethod
1486 1487 def updatecaches(self, tr=None):
1487 1488 """warm appropriate caches
1488 1489
1489 1490 If this function is called after a transaction closed. The transaction
1490 1491 will be available in the 'tr' argument. This can be used to selectively
1491 1492 update caches relevant to the changes in that transaction.
1492 1493 """
1493 1494 if tr is not None and tr.hookargs.get('source') == 'strip':
1494 1495 # During strip, many caches are invalid but
1495 1496 # later call to `destroyed` will refresh them.
1496 1497 return
1497 1498
1498 1499 if tr is None or tr.changes['revs']:
1499 1500 # updating the unfiltered branchmap should refresh all the others,
1500 1501 self.ui.debug('updating the branch cache\n')
1501 1502 branchmap.updatecache(self.filtered('served'))
1502 1503
1503 1504 def invalidatecaches(self):
1504 1505
1505 1506 if '_tagscache' in vars(self):
1506 1507 # can't use delattr on proxy
1507 1508 del self.__dict__['_tagscache']
1508 1509
1509 1510 self.unfiltered()._branchcaches.clear()
1510 1511 self.invalidatevolatilesets()
1511 1512 self._sparsesignaturecache.clear()
1512 1513
1513 1514 def invalidatevolatilesets(self):
1514 1515 self.filteredrevcache.clear()
1515 1516 obsolete.clearobscaches(self)
1516 1517
1517 1518 def invalidatedirstate(self):
1518 1519 '''Invalidates the dirstate, causing the next call to dirstate
1519 1520 to check if it was modified since the last time it was read,
1520 1521 rereading it if it has.
1521 1522
1522 1523 This is different to dirstate.invalidate() that it doesn't always
1523 1524 rereads the dirstate. Use dirstate.invalidate() if you want to
1524 1525 explicitly read the dirstate again (i.e. restoring it to a previous
1525 1526 known good state).'''
1526 1527 if hasunfilteredcache(self, 'dirstate'):
1527 1528 for k in self.dirstate._filecache:
1528 1529 try:
1529 1530 delattr(self.dirstate, k)
1530 1531 except AttributeError:
1531 1532 pass
1532 1533 delattr(self.unfiltered(), 'dirstate')
1533 1534
1534 1535 def invalidate(self, clearfilecache=False):
1535 1536 '''Invalidates both store and non-store parts other than dirstate
1536 1537
1537 1538 If a transaction is running, invalidation of store is omitted,
1538 1539 because discarding in-memory changes might cause inconsistency
1539 1540 (e.g. incomplete fncache causes unintentional failure, but
1540 1541 redundant one doesn't).
1541 1542 '''
1542 1543 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1543 1544 for k in list(self._filecache.keys()):
1544 1545 # dirstate is invalidated separately in invalidatedirstate()
1545 1546 if k == 'dirstate':
1546 1547 continue
1547 1548 if (k == 'changelog' and
1548 1549 self.currenttransaction() and
1549 1550 self.changelog._delayed):
1550 1551 # The changelog object may store unwritten revisions. We don't
1551 1552 # want to lose them.
1552 1553 # TODO: Solve the problem instead of working around it.
1553 1554 continue
1554 1555
1555 1556 if clearfilecache:
1556 1557 del self._filecache[k]
1557 1558 try:
1558 1559 delattr(unfiltered, k)
1559 1560 except AttributeError:
1560 1561 pass
1561 1562 self.invalidatecaches()
1562 1563 if not self.currenttransaction():
1563 1564 # TODO: Changing contents of store outside transaction
1564 1565 # causes inconsistency. We should make in-memory store
1565 1566 # changes detectable, and abort if changed.
1566 1567 self.store.invalidatecaches()
1567 1568
1568 1569 def invalidateall(self):
1569 1570 '''Fully invalidates both store and non-store parts, causing the
1570 1571 subsequent operation to reread any outside changes.'''
1571 1572 # extension should hook this to invalidate its caches
1572 1573 self.invalidate()
1573 1574 self.invalidatedirstate()
1574 1575
1575 1576 @unfilteredmethod
1576 1577 def _refreshfilecachestats(self, tr):
1577 1578 """Reload stats of cached files so that they are flagged as valid"""
1578 1579 for k, ce in self._filecache.items():
1579 1580 if k == 'dirstate' or k not in self.__dict__:
1580 1581 continue
1581 1582 ce.refresh()
1582 1583
1583 1584 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1584 1585 inheritchecker=None, parentenvvar=None):
1585 1586 parentlock = None
1586 1587 # the contents of parentenvvar are used by the underlying lock to
1587 1588 # determine whether it can be inherited
1588 1589 if parentenvvar is not None:
1589 1590 parentlock = encoding.environ.get(parentenvvar)
1590 1591 try:
1591 1592 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1592 1593 acquirefn=acquirefn, desc=desc,
1593 1594 inheritchecker=inheritchecker,
1594 1595 parentlock=parentlock)
1595 1596 except error.LockHeld as inst:
1596 1597 if not wait:
1597 1598 raise
1598 1599 # show more details for new-style locks
1599 1600 if ':' in inst.locker:
1600 1601 host, pid = inst.locker.split(":", 1)
1601 1602 self.ui.warn(
1602 1603 _("waiting for lock on %s held by process %r "
1603 1604 "on host %r\n") % (desc, pid, host))
1604 1605 else:
1605 1606 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1606 1607 (desc, inst.locker))
1607 1608 # default to 600 seconds timeout
1608 1609 l = lockmod.lock(vfs, lockname,
1609 1610 int(self.ui.config("ui", "timeout")),
1610 1611 releasefn=releasefn, acquirefn=acquirefn,
1611 1612 desc=desc)
1612 1613 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1613 1614 return l
1614 1615
1615 1616 def _afterlock(self, callback):
1616 1617 """add a callback to be run when the repository is fully unlocked
1617 1618
1618 1619 The callback will be executed when the outermost lock is released
1619 1620 (with wlock being higher level than 'lock')."""
1620 1621 for ref in (self._wlockref, self._lockref):
1621 1622 l = ref and ref()
1622 1623 if l and l.held:
1623 1624 l.postrelease.append(callback)
1624 1625 break
1625 1626 else: # no lock have been found.
1626 1627 callback()
1627 1628
1628 1629 def lock(self, wait=True):
1629 1630 '''Lock the repository store (.hg/store) and return a weak reference
1630 1631 to the lock. Use this before modifying the store (e.g. committing or
1631 1632 stripping). If you are opening a transaction, get a lock as well.)
1632 1633
1633 1634 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1634 1635 'wlock' first to avoid a dead-lock hazard.'''
1635 1636 l = self._currentlock(self._lockref)
1636 1637 if l is not None:
1637 1638 l.lock()
1638 1639 return l
1639 1640
1640 1641 l = self._lock(self.svfs, "lock", wait, None,
1641 1642 self.invalidate, _('repository %s') % self.origroot)
1642 1643 self._lockref = weakref.ref(l)
1643 1644 return l
1644 1645
1645 1646 def _wlockchecktransaction(self):
1646 1647 if self.currenttransaction() is not None:
1647 1648 raise error.LockInheritanceContractViolation(
1648 1649 'wlock cannot be inherited in the middle of a transaction')
1649 1650
1650 1651 def wlock(self, wait=True):
1651 1652 '''Lock the non-store parts of the repository (everything under
1652 1653 .hg except .hg/store) and return a weak reference to the lock.
1653 1654
1654 1655 Use this before modifying files in .hg.
1655 1656
1656 1657 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1657 1658 'wlock' first to avoid a dead-lock hazard.'''
1658 1659 l = self._wlockref and self._wlockref()
1659 1660 if l is not None and l.held:
1660 1661 l.lock()
1661 1662 return l
1662 1663
1663 1664 # We do not need to check for non-waiting lock acquisition. Such
1664 1665 # acquisition would not cause dead-lock as they would just fail.
1665 1666 if wait and (self.ui.configbool('devel', 'all-warnings')
1666 1667 or self.ui.configbool('devel', 'check-locks')):
1667 1668 if self._currentlock(self._lockref) is not None:
1668 1669 self.ui.develwarn('"wlock" acquired after "lock"')
1669 1670
1670 1671 def unlock():
1671 1672 if self.dirstate.pendingparentchange():
1672 1673 self.dirstate.invalidate()
1673 1674 else:
1674 1675 self.dirstate.write(None)
1675 1676
1676 1677 self._filecache['dirstate'].refresh()
1677 1678
1678 1679 l = self._lock(self.vfs, "wlock", wait, unlock,
1679 1680 self.invalidatedirstate, _('working directory of %s') %
1680 1681 self.origroot,
1681 1682 inheritchecker=self._wlockchecktransaction,
1682 1683 parentenvvar='HG_WLOCK_LOCKER')
1683 1684 self._wlockref = weakref.ref(l)
1684 1685 return l
1685 1686
1686 1687 def _currentlock(self, lockref):
1687 1688 """Returns the lock if it's held, or None if it's not."""
1688 1689 if lockref is None:
1689 1690 return None
1690 1691 l = lockref()
1691 1692 if l is None or not l.held:
1692 1693 return None
1693 1694 return l
1694 1695
1695 1696 def currentwlock(self):
1696 1697 """Returns the wlock if it's held, or None if it's not."""
1697 1698 return self._currentlock(self._wlockref)
1698 1699
1699 1700 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1700 1701 """
1701 1702 commit an individual file as part of a larger transaction
1702 1703 """
1703 1704
1704 1705 fname = fctx.path()
1705 1706 fparent1 = manifest1.get(fname, nullid)
1706 1707 fparent2 = manifest2.get(fname, nullid)
1707 1708 if isinstance(fctx, context.filectx):
1708 1709 node = fctx.filenode()
1709 1710 if node in [fparent1, fparent2]:
1710 1711 self.ui.debug('reusing %s filelog entry\n' % fname)
1711 1712 if manifest1.flags(fname) != fctx.flags():
1712 1713 changelist.append(fname)
1713 1714 return node
1714 1715
1715 1716 flog = self.file(fname)
1716 1717 meta = {}
1717 1718 copy = fctx.renamed()
1718 1719 if copy and copy[0] != fname:
1719 1720 # Mark the new revision of this file as a copy of another
1720 1721 # file. This copy data will effectively act as a parent
1721 1722 # of this new revision. If this is a merge, the first
1722 1723 # parent will be the nullid (meaning "look up the copy data")
1723 1724 # and the second one will be the other parent. For example:
1724 1725 #
1725 1726 # 0 --- 1 --- 3 rev1 changes file foo
1726 1727 # \ / rev2 renames foo to bar and changes it
1727 1728 # \- 2 -/ rev3 should have bar with all changes and
1728 1729 # should record that bar descends from
1729 1730 # bar in rev2 and foo in rev1
1730 1731 #
1731 1732 # this allows this merge to succeed:
1732 1733 #
1733 1734 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1734 1735 # \ / merging rev3 and rev4 should use bar@rev2
1735 1736 # \- 2 --- 4 as the merge base
1736 1737 #
1737 1738
1738 1739 cfname = copy[0]
1739 1740 crev = manifest1.get(cfname)
1740 1741 newfparent = fparent2
1741 1742
1742 1743 if manifest2: # branch merge
1743 1744 if fparent2 == nullid or crev is None: # copied on remote side
1744 1745 if cfname in manifest2:
1745 1746 crev = manifest2[cfname]
1746 1747 newfparent = fparent1
1747 1748
1748 1749 # Here, we used to search backwards through history to try to find
1749 1750 # where the file copy came from if the source of a copy was not in
1750 1751 # the parent directory. However, this doesn't actually make sense to
1751 1752 # do (what does a copy from something not in your working copy even
1752 1753 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1753 1754 # the user that copy information was dropped, so if they didn't
1754 1755 # expect this outcome it can be fixed, but this is the correct
1755 1756 # behavior in this circumstance.
1756 1757
1757 1758 if crev:
1758 1759 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1759 1760 meta["copy"] = cfname
1760 1761 meta["copyrev"] = hex(crev)
1761 1762 fparent1, fparent2 = nullid, newfparent
1762 1763 else:
1763 1764 self.ui.warn(_("warning: can't find ancestor for '%s' "
1764 1765 "copied from '%s'!\n") % (fname, cfname))
1765 1766
1766 1767 elif fparent1 == nullid:
1767 1768 fparent1, fparent2 = fparent2, nullid
1768 1769 elif fparent2 != nullid:
1769 1770 # is one parent an ancestor of the other?
1770 1771 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1771 1772 if fparent1 in fparentancestors:
1772 1773 fparent1, fparent2 = fparent2, nullid
1773 1774 elif fparent2 in fparentancestors:
1774 1775 fparent2 = nullid
1775 1776
1776 1777 # is the file changed?
1777 1778 text = fctx.data()
1778 1779 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1779 1780 changelist.append(fname)
1780 1781 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1781 1782 # are just the flags changed during merge?
1782 1783 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1783 1784 changelist.append(fname)
1784 1785
1785 1786 return fparent1
1786 1787
1787 1788 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1788 1789 """check for commit arguments that aren't committable"""
1789 1790 if match.isexact() or match.prefix():
1790 1791 matched = set(status.modified + status.added + status.removed)
1791 1792
1792 1793 for f in match.files():
1793 1794 f = self.dirstate.normalize(f)
1794 1795 if f == '.' or f in matched or f in wctx.substate:
1795 1796 continue
1796 1797 if f in status.deleted:
1797 1798 fail(f, _('file not found!'))
1798 1799 if f in vdirs: # visited directory
1799 1800 d = f + '/'
1800 1801 for mf in matched:
1801 1802 if mf.startswith(d):
1802 1803 break
1803 1804 else:
1804 1805 fail(f, _("no match under directory!"))
1805 1806 elif f not in self.dirstate:
1806 1807 fail(f, _("file not tracked!"))
1807 1808
1808 1809 @unfilteredmethod
1809 1810 def commit(self, text="", user=None, date=None, match=None, force=False,
1810 1811 editor=False, extra=None):
1811 1812 """Add a new revision to current repository.
1812 1813
1813 1814 Revision information is gathered from the working directory,
1814 1815 match can be used to filter the committed files. If editor is
1815 1816 supplied, it is called to get a commit message.
1816 1817 """
1817 1818 if extra is None:
1818 1819 extra = {}
1819 1820
1820 1821 def fail(f, msg):
1821 1822 raise error.Abort('%s: %s' % (f, msg))
1822 1823
1823 1824 if not match:
1824 1825 match = matchmod.always(self.root, '')
1825 1826
1826 1827 if not force:
1827 1828 vdirs = []
1828 1829 match.explicitdir = vdirs.append
1829 1830 match.bad = fail
1830 1831
1831 1832 wlock = lock = tr = None
1832 1833 try:
1833 1834 wlock = self.wlock()
1834 1835 lock = self.lock() # for recent changelog (see issue4368)
1835 1836
1836 1837 wctx = self[None]
1837 1838 merge = len(wctx.parents()) > 1
1838 1839
1839 1840 if not force and merge and not match.always():
1840 1841 raise error.Abort(_('cannot partially commit a merge '
1841 1842 '(do not specify files or patterns)'))
1842 1843
1843 1844 status = self.status(match=match, clean=force)
1844 1845 if force:
1845 1846 status.modified.extend(status.clean) # mq may commit clean files
1846 1847
1847 1848 # check subrepos
1848 1849 subs = []
1849 1850 commitsubs = set()
1850 1851 newstate = wctx.substate.copy()
1851 1852 # only manage subrepos and .hgsubstate if .hgsub is present
1852 1853 if '.hgsub' in wctx:
1853 1854 # we'll decide whether to track this ourselves, thanks
1854 1855 for c in status.modified, status.added, status.removed:
1855 1856 if '.hgsubstate' in c:
1856 1857 c.remove('.hgsubstate')
1857 1858
1858 1859 # compare current state to last committed state
1859 1860 # build new substate based on last committed state
1860 1861 oldstate = wctx.p1().substate
1861 1862 for s in sorted(newstate.keys()):
1862 1863 if not match(s):
1863 1864 # ignore working copy, use old state if present
1864 1865 if s in oldstate:
1865 1866 newstate[s] = oldstate[s]
1866 1867 continue
1867 1868 if not force:
1868 1869 raise error.Abort(
1869 1870 _("commit with new subrepo %s excluded") % s)
1870 1871 dirtyreason = wctx.sub(s).dirtyreason(True)
1871 1872 if dirtyreason:
1872 1873 if not self.ui.configbool('ui', 'commitsubrepos'):
1873 1874 raise error.Abort(dirtyreason,
1874 1875 hint=_("use --subrepos for recursive commit"))
1875 1876 subs.append(s)
1876 1877 commitsubs.add(s)
1877 1878 else:
1878 1879 bs = wctx.sub(s).basestate()
1879 1880 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1880 1881 if oldstate.get(s, (None, None, None))[1] != bs:
1881 1882 subs.append(s)
1882 1883
1883 1884 # check for removed subrepos
1884 1885 for p in wctx.parents():
1885 1886 r = [s for s in p.substate if s not in newstate]
1886 1887 subs += [s for s in r if match(s)]
1887 1888 if subs:
1888 1889 if (not match('.hgsub') and
1889 1890 '.hgsub' in (wctx.modified() + wctx.added())):
1890 1891 raise error.Abort(
1891 1892 _("can't commit subrepos without .hgsub"))
1892 1893 status.modified.insert(0, '.hgsubstate')
1893 1894
1894 1895 elif '.hgsub' in status.removed:
1895 1896 # clean up .hgsubstate when .hgsub is removed
1896 1897 if ('.hgsubstate' in wctx and
1897 1898 '.hgsubstate' not in (status.modified + status.added +
1898 1899 status.removed)):
1899 1900 status.removed.insert(0, '.hgsubstate')
1900 1901
1901 1902 # make sure all explicit patterns are matched
1902 1903 if not force:
1903 1904 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1904 1905
1905 1906 cctx = context.workingcommitctx(self, status,
1906 1907 text, user, date, extra)
1907 1908
1908 1909 # internal config: ui.allowemptycommit
1909 1910 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1910 1911 or extra.get('close') or merge or cctx.files()
1911 1912 or self.ui.configbool('ui', 'allowemptycommit'))
1912 1913 if not allowemptycommit:
1913 1914 return None
1914 1915
1915 1916 if merge and cctx.deleted():
1916 1917 raise error.Abort(_("cannot commit merge with missing files"))
1917 1918
1918 1919 ms = mergemod.mergestate.read(self)
1919 1920 mergeutil.checkunresolved(ms)
1920 1921
1921 1922 if editor:
1922 1923 cctx._text = editor(self, cctx, subs)
1923 1924 edited = (text != cctx._text)
1924 1925
1925 1926 # Save commit message in case this transaction gets rolled back
1926 1927 # (e.g. by a pretxncommit hook). Leave the content alone on
1927 1928 # the assumption that the user will use the same editor again.
1928 1929 msgfn = self.savecommitmessage(cctx._text)
1929 1930
1930 1931 # commit subs and write new state
1931 1932 if subs:
1932 1933 for s in sorted(commitsubs):
1933 1934 sub = wctx.sub(s)
1934 1935 self.ui.status(_('committing subrepository %s\n') %
1935 1936 subrepo.subrelpath(sub))
1936 1937 sr = sub.commit(cctx._text, user, date)
1937 1938 newstate[s] = (newstate[s][0], sr)
1938 1939 subrepo.writestate(self, newstate)
1939 1940
1940 1941 p1, p2 = self.dirstate.parents()
1941 1942 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1942 1943 try:
1943 1944 self.hook("precommit", throw=True, parent1=hookp1,
1944 1945 parent2=hookp2)
1945 1946 tr = self.transaction('commit')
1946 1947 ret = self.commitctx(cctx, True)
1947 1948 except: # re-raises
1948 1949 if edited:
1949 1950 self.ui.write(
1950 1951 _('note: commit message saved in %s\n') % msgfn)
1951 1952 raise
1952 1953 # update bookmarks, dirstate and mergestate
1953 1954 bookmarks.update(self, [p1, p2], ret)
1954 1955 cctx.markcommitted(ret)
1955 1956 ms.reset()
1956 1957 tr.close()
1957 1958
1958 1959 finally:
1959 1960 lockmod.release(tr, lock, wlock)
1960 1961
1961 1962 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1962 1963 # hack for command that use a temporary commit (eg: histedit)
1963 1964 # temporary commit got stripped before hook release
1964 1965 if self.changelog.hasnode(ret):
1965 1966 self.hook("commit", node=node, parent1=parent1,
1966 1967 parent2=parent2)
1967 1968 self._afterlock(commithook)
1968 1969 return ret
1969 1970
1970 1971 @unfilteredmethod
1971 1972 def commitctx(self, ctx, error=False):
1972 1973 """Add a new revision to current repository.
1973 1974 Revision information is passed via the context argument.
1974 1975 """
1975 1976
1976 1977 tr = None
1977 1978 p1, p2 = ctx.p1(), ctx.p2()
1978 1979 user = ctx.user()
1979 1980
1980 1981 lock = self.lock()
1981 1982 try:
1982 1983 tr = self.transaction("commit")
1983 1984 trp = weakref.proxy(tr)
1984 1985
1985 1986 if ctx.manifestnode():
1986 1987 # reuse an existing manifest revision
1987 1988 mn = ctx.manifestnode()
1988 1989 files = ctx.files()
1989 1990 elif ctx.files():
1990 1991 m1ctx = p1.manifestctx()
1991 1992 m2ctx = p2.manifestctx()
1992 1993 mctx = m1ctx.copy()
1993 1994
1994 1995 m = mctx.read()
1995 1996 m1 = m1ctx.read()
1996 1997 m2 = m2ctx.read()
1997 1998
1998 1999 # check in files
1999 2000 added = []
2000 2001 changed = []
2001 2002 removed = list(ctx.removed())
2002 2003 linkrev = len(self)
2003 2004 self.ui.note(_("committing files:\n"))
2004 2005 for f in sorted(ctx.modified() + ctx.added()):
2005 2006 self.ui.note(f + "\n")
2006 2007 try:
2007 2008 fctx = ctx[f]
2008 2009 if fctx is None:
2009 2010 removed.append(f)
2010 2011 else:
2011 2012 added.append(f)
2012 2013 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2013 2014 trp, changed)
2014 2015 m.setflag(f, fctx.flags())
2015 2016 except OSError as inst:
2016 2017 self.ui.warn(_("trouble committing %s!\n") % f)
2017 2018 raise
2018 2019 except IOError as inst:
2019 2020 errcode = getattr(inst, 'errno', errno.ENOENT)
2020 2021 if error or errcode and errcode != errno.ENOENT:
2021 2022 self.ui.warn(_("trouble committing %s!\n") % f)
2022 2023 raise
2023 2024
2024 2025 # update manifest
2025 2026 self.ui.note(_("committing manifest\n"))
2026 2027 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2027 2028 drop = [f for f in removed if f in m]
2028 2029 for f in drop:
2029 2030 del m[f]
2030 2031 mn = mctx.write(trp, linkrev,
2031 2032 p1.manifestnode(), p2.manifestnode(),
2032 2033 added, drop)
2033 2034 files = changed + removed
2034 2035 else:
2035 2036 mn = p1.manifestnode()
2036 2037 files = []
2037 2038
2038 2039 # update changelog
2039 2040 self.ui.note(_("committing changelog\n"))
2040 2041 self.changelog.delayupdate(tr)
2041 2042 n = self.changelog.add(mn, files, ctx.description(),
2042 2043 trp, p1.node(), p2.node(),
2043 2044 user, ctx.date(), ctx.extra().copy())
2044 2045 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2045 2046 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2046 2047 parent2=xp2)
2047 2048 # set the new commit is proper phase
2048 2049 targetphase = subrepo.newcommitphase(self.ui, ctx)
2049 2050 if targetphase:
2050 2051 # retract boundary do not alter parent changeset.
2051 2052 # if a parent have higher the resulting phase will
2052 2053 # be compliant anyway
2053 2054 #
2054 2055 # if minimal phase was 0 we don't need to retract anything
2055 2056 phases.registernew(self, tr, targetphase, [n])
2056 2057 tr.close()
2057 2058 return n
2058 2059 finally:
2059 2060 if tr:
2060 2061 tr.release()
2061 2062 lock.release()
2062 2063
2063 2064 @unfilteredmethod
2064 2065 def destroying(self):
2065 2066 '''Inform the repository that nodes are about to be destroyed.
2066 2067 Intended for use by strip and rollback, so there's a common
2067 2068 place for anything that has to be done before destroying history.
2068 2069
2069 2070 This is mostly useful for saving state that is in memory and waiting
2070 2071 to be flushed when the current lock is released. Because a call to
2071 2072 destroyed is imminent, the repo will be invalidated causing those
2072 2073 changes to stay in memory (waiting for the next unlock), or vanish
2073 2074 completely.
2074 2075 '''
2075 2076 # When using the same lock to commit and strip, the phasecache is left
2076 2077 # dirty after committing. Then when we strip, the repo is invalidated,
2077 2078 # causing those changes to disappear.
2078 2079 if '_phasecache' in vars(self):
2079 2080 self._phasecache.write()
2080 2081
2081 2082 @unfilteredmethod
2082 2083 def destroyed(self):
2083 2084 '''Inform the repository that nodes have been destroyed.
2084 2085 Intended for use by strip and rollback, so there's a common
2085 2086 place for anything that has to be done after destroying history.
2086 2087 '''
2087 2088 # When one tries to:
2088 2089 # 1) destroy nodes thus calling this method (e.g. strip)
2089 2090 # 2) use phasecache somewhere (e.g. commit)
2090 2091 #
2091 2092 # then 2) will fail because the phasecache contains nodes that were
2092 2093 # removed. We can either remove phasecache from the filecache,
2093 2094 # causing it to reload next time it is accessed, or simply filter
2094 2095 # the removed nodes now and write the updated cache.
2095 2096 self._phasecache.filterunknown(self)
2096 2097 self._phasecache.write()
2097 2098
2098 2099 # refresh all repository caches
2099 2100 self.updatecaches()
2100 2101
2101 2102 # Ensure the persistent tag cache is updated. Doing it now
2102 2103 # means that the tag cache only has to worry about destroyed
2103 2104 # heads immediately after a strip/rollback. That in turn
2104 2105 # guarantees that "cachetip == currenttip" (comparing both rev
2105 2106 # and node) always means no nodes have been added or destroyed.
2106 2107
2107 2108 # XXX this is suboptimal when qrefresh'ing: we strip the current
2108 2109 # head, refresh the tag cache, then immediately add a new head.
2109 2110 # But I think doing it this way is necessary for the "instant
2110 2111 # tag cache retrieval" case to work.
2111 2112 self.invalidate()
2112 2113
2113 2114 def walk(self, match, node=None):
2114 2115 '''
2115 2116 walk recursively through the directory tree or a given
2116 2117 changeset, finding all files matched by the match
2117 2118 function
2118 2119 '''
2119 2120 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2120 2121 return self[node].walk(match)
2121 2122
2122 2123 def status(self, node1='.', node2=None, match=None,
2123 2124 ignored=False, clean=False, unknown=False,
2124 2125 listsubrepos=False):
2125 2126 '''a convenience method that calls node1.status(node2)'''
2126 2127 return self[node1].status(node2, match, ignored, clean, unknown,
2127 2128 listsubrepos)
2128 2129
2129 2130 def addpostdsstatus(self, ps):
2130 2131 """Add a callback to run within the wlock, at the point at which status
2131 2132 fixups happen.
2132 2133
2133 2134 On status completion, callback(wctx, status) will be called with the
2134 2135 wlock held, unless the dirstate has changed from underneath or the wlock
2135 2136 couldn't be grabbed.
2136 2137
2137 2138 Callbacks should not capture and use a cached copy of the dirstate --
2138 2139 it might change in the meanwhile. Instead, they should access the
2139 2140 dirstate via wctx.repo().dirstate.
2140 2141
2141 2142 This list is emptied out after each status run -- extensions should
2142 2143 make sure it adds to this list each time dirstate.status is called.
2143 2144 Extensions should also make sure they don't call this for statuses
2144 2145 that don't involve the dirstate.
2145 2146 """
2146 2147
2147 2148 # The list is located here for uniqueness reasons -- it is actually
2148 2149 # managed by the workingctx, but that isn't unique per-repo.
2149 2150 self._postdsstatus.append(ps)
2150 2151
2151 2152 def postdsstatus(self):
2152 2153 """Used by workingctx to get the list of post-dirstate-status hooks."""
2153 2154 return self._postdsstatus
2154 2155
2155 2156 def clearpostdsstatus(self):
2156 2157 """Used by workingctx to clear post-dirstate-status hooks."""
2157 2158 del self._postdsstatus[:]
2158 2159
2159 2160 def heads(self, start=None):
2160 2161 if start is None:
2161 2162 cl = self.changelog
2162 2163 headrevs = reversed(cl.headrevs())
2163 2164 return [cl.node(rev) for rev in headrevs]
2164 2165
2165 2166 heads = self.changelog.heads(start)
2166 2167 # sort the output in rev descending order
2167 2168 return sorted(heads, key=self.changelog.rev, reverse=True)
2168 2169
2169 2170 def branchheads(self, branch=None, start=None, closed=False):
2170 2171 '''return a (possibly filtered) list of heads for the given branch
2171 2172
2172 2173 Heads are returned in topological order, from newest to oldest.
2173 2174 If branch is None, use the dirstate branch.
2174 2175 If start is not None, return only heads reachable from start.
2175 2176 If closed is True, return heads that are marked as closed as well.
2176 2177 '''
2177 2178 if branch is None:
2178 2179 branch = self[None].branch()
2179 2180 branches = self.branchmap()
2180 2181 if branch not in branches:
2181 2182 return []
2182 2183 # the cache returns heads ordered lowest to highest
2183 2184 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2184 2185 if start is not None:
2185 2186 # filter out the heads that cannot be reached from startrev
2186 2187 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2187 2188 bheads = [h for h in bheads if h in fbheads]
2188 2189 return bheads
2189 2190
2190 2191 def branches(self, nodes):
2191 2192 if not nodes:
2192 2193 nodes = [self.changelog.tip()]
2193 2194 b = []
2194 2195 for n in nodes:
2195 2196 t = n
2196 2197 while True:
2197 2198 p = self.changelog.parents(n)
2198 2199 if p[1] != nullid or p[0] == nullid:
2199 2200 b.append((t, n, p[0], p[1]))
2200 2201 break
2201 2202 n = p[0]
2202 2203 return b
2203 2204
2204 2205 def between(self, pairs):
2205 2206 r = []
2206 2207
2207 2208 for top, bottom in pairs:
2208 2209 n, l, i = top, [], 0
2209 2210 f = 1
2210 2211
2211 2212 while n != bottom and n != nullid:
2212 2213 p = self.changelog.parents(n)[0]
2213 2214 if i == f:
2214 2215 l.append(n)
2215 2216 f = f * 2
2216 2217 n = p
2217 2218 i += 1
2218 2219
2219 2220 r.append(l)
2220 2221
2221 2222 return r
2222 2223
2223 2224 def checkpush(self, pushop):
2224 2225 """Extensions can override this function if additional checks have
2225 2226 to be performed before pushing, or call it if they override push
2226 2227 command.
2227 2228 """
2228 2229
2229 2230 @unfilteredpropertycache
2230 2231 def prepushoutgoinghooks(self):
2231 2232 """Return util.hooks consists of a pushop with repo, remote, outgoing
2232 2233 methods, which are called before pushing changesets.
2233 2234 """
2234 2235 return util.hooks()
2235 2236
2236 2237 def pushkey(self, namespace, key, old, new):
2237 2238 try:
2238 2239 tr = self.currenttransaction()
2239 2240 hookargs = {}
2240 2241 if tr is not None:
2241 2242 hookargs.update(tr.hookargs)
2242 2243 hookargs['namespace'] = namespace
2243 2244 hookargs['key'] = key
2244 2245 hookargs['old'] = old
2245 2246 hookargs['new'] = new
2246 2247 self.hook('prepushkey', throw=True, **hookargs)
2247 2248 except error.HookAbort as exc:
2248 2249 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2249 2250 if exc.hint:
2250 2251 self.ui.write_err(_("(%s)\n") % exc.hint)
2251 2252 return False
2252 2253 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2253 2254 ret = pushkey.push(self, namespace, key, old, new)
2254 2255 def runhook():
2255 2256 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2256 2257 ret=ret)
2257 2258 self._afterlock(runhook)
2258 2259 return ret
2259 2260
2260 2261 def listkeys(self, namespace):
2261 2262 self.hook('prelistkeys', throw=True, namespace=namespace)
2262 2263 self.ui.debug('listing keys for "%s"\n' % namespace)
2263 2264 values = pushkey.list(self, namespace)
2264 2265 self.hook('listkeys', namespace=namespace, values=values)
2265 2266 return values
2266 2267
2267 2268 def debugwireargs(self, one, two, three=None, four=None, five=None):
2268 2269 '''used to test argument passing over the wire'''
2269 2270 return "%s %s %s %s %s" % (one, two, three, four, five)
2270 2271
2271 2272 def savecommitmessage(self, text):
2272 2273 fp = self.vfs('last-message.txt', 'wb')
2273 2274 try:
2274 2275 fp.write(text)
2275 2276 finally:
2276 2277 fp.close()
2277 2278 return self.pathto(fp.name[len(self.root) + 1:])
2278 2279
2279 2280 # used to avoid circular references so destructors work
2280 2281 def aftertrans(files):
2281 2282 renamefiles = [tuple(t) for t in files]
2282 2283 def a():
2283 2284 for vfs, src, dest in renamefiles:
2284 2285 # if src and dest refer to a same file, vfs.rename is a no-op,
2285 2286 # leaving both src and dest on disk. delete dest to make sure
2286 2287 # the rename couldn't be such a no-op.
2287 2288 vfs.tryunlink(dest)
2288 2289 try:
2289 2290 vfs.rename(src, dest)
2290 2291 except OSError: # journal file does not yet exist
2291 2292 pass
2292 2293 return a
2293 2294
2294 2295 def undoname(fn):
2295 2296 base, name = os.path.split(fn)
2296 2297 assert name.startswith('journal')
2297 2298 return os.path.join(base, name.replace('journal', 'undo', 1))
2298 2299
2299 2300 def instance(ui, path, create):
2300 2301 return localrepository(ui, util.urllocalpath(path), create)
2301 2302
2302 2303 def islocal(path):
2303 2304 return True
2304 2305
2305 2306 def newreporequirements(repo):
2306 2307 """Determine the set of requirements for a new local repository.
2307 2308
2308 2309 Extensions can wrap this function to specify custom requirements for
2309 2310 new repositories.
2310 2311 """
2311 2312 ui = repo.ui
2312 2313 requirements = {'revlogv1'}
2313 2314 if ui.configbool('format', 'usestore'):
2314 2315 requirements.add('store')
2315 2316 if ui.configbool('format', 'usefncache'):
2316 2317 requirements.add('fncache')
2317 2318 if ui.configbool('format', 'dotencode'):
2318 2319 requirements.add('dotencode')
2319 2320
2320 2321 compengine = ui.config('experimental', 'format.compression')
2321 2322 if compengine not in util.compengines:
2322 2323 raise error.Abort(_('compression engine %s defined by '
2323 2324 'experimental.format.compression not available') %
2324 2325 compengine,
2325 2326 hint=_('run "hg debuginstall" to list available '
2326 2327 'compression engines'))
2327 2328
2328 2329 # zlib is the historical default and doesn't need an explicit requirement.
2329 2330 if compengine != 'zlib':
2330 2331 requirements.add('exp-compression-%s' % compengine)
2331 2332
2332 2333 if scmutil.gdinitconfig(ui):
2333 2334 requirements.add('generaldelta')
2334 2335 if ui.configbool('experimental', 'treemanifest'):
2335 2336 requirements.add('treemanifest')
2336 2337 if ui.configbool('experimental', 'manifestv2'):
2337 2338 requirements.add('manifestv2')
2338 2339
2339 2340 revlogv2 = ui.config('experimental', 'revlogv2')
2340 2341 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2341 2342 requirements.remove('revlogv1')
2342 2343 # generaldelta is implied by revlogv2.
2343 2344 requirements.discard('generaldelta')
2344 2345 requirements.add(REVLOGV2_REQUIREMENT)
2345 2346
2346 2347 return requirements
General Comments 0
You need to be logged in to leave comments. Login now