##// END OF EJS Templates
localrepo: use revsymbol in lookupbranch() too...
Martin von Zweigbergk -
r37370:92171562 default
parent child Browse files
Show More
@@ -1,2330 +1,2330 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 hex,
20 20 nullid,
21 21 short,
22 22 )
23 23 from .thirdparty.zope import (
24 24 interface as zi,
25 25 )
26 26 from . import (
27 27 bookmarks,
28 28 branchmap,
29 29 bundle2,
30 30 changegroup,
31 31 changelog,
32 32 color,
33 33 context,
34 34 dirstate,
35 35 dirstateguard,
36 36 discovery,
37 37 encoding,
38 38 error,
39 39 exchange,
40 40 extensions,
41 41 filelog,
42 42 hook,
43 43 lock as lockmod,
44 44 manifest,
45 45 match as matchmod,
46 46 merge as mergemod,
47 47 mergeutil,
48 48 namespaces,
49 49 narrowspec,
50 50 obsolete,
51 51 pathutil,
52 52 peer,
53 53 phases,
54 54 pushkey,
55 55 pycompat,
56 56 repository,
57 57 repoview,
58 58 revset,
59 59 revsetlang,
60 60 scmutil,
61 61 sparse,
62 62 store,
63 63 subrepoutil,
64 64 tags as tagsmod,
65 65 transaction,
66 66 txnutil,
67 67 util,
68 68 vfs as vfsmod,
69 69 )
70 70 from .utils import (
71 71 procutil,
72 72 stringutil,
73 73 )
74 74
75 75 release = lockmod.release
76 76 urlerr = util.urlerr
77 77 urlreq = util.urlreq
78 78
79 79 # set of (path, vfs-location) tuples. vfs-location is:
80 80 # - 'plain for vfs relative paths
81 81 # - '' for svfs relative paths
82 82 _cachedfiles = set()
83 83
84 84 class _basefilecache(scmutil.filecache):
85 85 """All filecache usage on repo are done for logic that should be unfiltered
86 86 """
87 87 def __get__(self, repo, type=None):
88 88 if repo is None:
89 89 return self
90 90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
91 91 def __set__(self, repo, value):
92 92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
93 93 def __delete__(self, repo):
94 94 return super(_basefilecache, self).__delete__(repo.unfiltered())
95 95
96 96 class repofilecache(_basefilecache):
97 97 """filecache for files in .hg but outside of .hg/store"""
98 98 def __init__(self, *paths):
99 99 super(repofilecache, self).__init__(*paths)
100 100 for path in paths:
101 101 _cachedfiles.add((path, 'plain'))
102 102
103 103 def join(self, obj, fname):
104 104 return obj.vfs.join(fname)
105 105
106 106 class storecache(_basefilecache):
107 107 """filecache for files in the store"""
108 108 def __init__(self, *paths):
109 109 super(storecache, self).__init__(*paths)
110 110 for path in paths:
111 111 _cachedfiles.add((path, ''))
112 112
113 113 def join(self, obj, fname):
114 114 return obj.sjoin(fname)
115 115
116 116 def isfilecached(repo, name):
117 117 """check if a repo has already cached "name" filecache-ed property
118 118
119 119 This returns (cachedobj-or-None, iscached) tuple.
120 120 """
121 121 cacheentry = repo.unfiltered()._filecache.get(name, None)
122 122 if not cacheentry:
123 123 return None, False
124 124 return cacheentry.obj, True
125 125
126 126 class unfilteredpropertycache(util.propertycache):
127 127 """propertycache that apply to unfiltered repo only"""
128 128
129 129 def __get__(self, repo, type=None):
130 130 unfi = repo.unfiltered()
131 131 if unfi is repo:
132 132 return super(unfilteredpropertycache, self).__get__(unfi)
133 133 return getattr(unfi, self.name)
134 134
135 135 class filteredpropertycache(util.propertycache):
136 136 """propertycache that must take filtering in account"""
137 137
138 138 def cachevalue(self, obj, value):
139 139 object.__setattr__(obj, self.name, value)
140 140
141 141
142 142 def hasunfilteredcache(repo, name):
143 143 """check if a repo has an unfilteredpropertycache value for <name>"""
144 144 return name in vars(repo.unfiltered())
145 145
146 146 def unfilteredmethod(orig):
147 147 """decorate method that always need to be run on unfiltered version"""
148 148 def wrapper(repo, *args, **kwargs):
149 149 return orig(repo.unfiltered(), *args, **kwargs)
150 150 return wrapper
151 151
152 152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
153 153 'unbundle'}
154 154 legacycaps = moderncaps.union({'changegroupsubset'})
155 155
156 156 class localpeer(repository.peer):
157 157 '''peer for a local repo; reflects only the most recent API'''
158 158
159 159 def __init__(self, repo, caps=None):
160 160 super(localpeer, self).__init__()
161 161
162 162 if caps is None:
163 163 caps = moderncaps.copy()
164 164 self._repo = repo.filtered('served')
165 165 self.ui = repo.ui
166 166 self._caps = repo._restrictcapabilities(caps)
167 167
168 168 # Begin of _basepeer interface.
169 169
170 170 def url(self):
171 171 return self._repo.url()
172 172
173 173 def local(self):
174 174 return self._repo
175 175
176 176 def peer(self):
177 177 return self
178 178
179 179 def canpush(self):
180 180 return True
181 181
182 182 def close(self):
183 183 self._repo.close()
184 184
185 185 # End of _basepeer interface.
186 186
187 187 # Begin of _basewirecommands interface.
188 188
189 189 def branchmap(self):
190 190 return self._repo.branchmap()
191 191
192 192 def capabilities(self):
193 193 return self._caps
194 194
195 195 def debugwireargs(self, one, two, three=None, four=None, five=None):
196 196 """Used to test argument passing over the wire"""
197 197 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
198 198 pycompat.bytestr(four),
199 199 pycompat.bytestr(five))
200 200
201 201 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
202 202 **kwargs):
203 203 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
204 204 common=common, bundlecaps=bundlecaps,
205 205 **kwargs)[1]
206 206 cb = util.chunkbuffer(chunks)
207 207
208 208 if exchange.bundle2requested(bundlecaps):
209 209 # When requesting a bundle2, getbundle returns a stream to make the
210 210 # wire level function happier. We need to build a proper object
211 211 # from it in local peer.
212 212 return bundle2.getunbundler(self.ui, cb)
213 213 else:
214 214 return changegroup.getunbundler('01', cb, None)
215 215
216 216 def heads(self):
217 217 return self._repo.heads()
218 218
219 219 def known(self, nodes):
220 220 return self._repo.known(nodes)
221 221
222 222 def listkeys(self, namespace):
223 223 return self._repo.listkeys(namespace)
224 224
225 225 def lookup(self, key):
226 226 return self._repo.lookup(key)
227 227
228 228 def pushkey(self, namespace, key, old, new):
229 229 return self._repo.pushkey(namespace, key, old, new)
230 230
231 231 def stream_out(self):
232 232 raise error.Abort(_('cannot perform stream clone against local '
233 233 'peer'))
234 234
235 235 def unbundle(self, cg, heads, url):
236 236 """apply a bundle on a repo
237 237
238 238 This function handles the repo locking itself."""
239 239 try:
240 240 try:
241 241 cg = exchange.readbundle(self.ui, cg, None)
242 242 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
243 243 if util.safehasattr(ret, 'getchunks'):
244 244 # This is a bundle20 object, turn it into an unbundler.
245 245 # This little dance should be dropped eventually when the
246 246 # API is finally improved.
247 247 stream = util.chunkbuffer(ret.getchunks())
248 248 ret = bundle2.getunbundler(self.ui, stream)
249 249 return ret
250 250 except Exception as exc:
251 251 # If the exception contains output salvaged from a bundle2
252 252 # reply, we need to make sure it is printed before continuing
253 253 # to fail. So we build a bundle2 with such output and consume
254 254 # it directly.
255 255 #
256 256 # This is not very elegant but allows a "simple" solution for
257 257 # issue4594
258 258 output = getattr(exc, '_bundle2salvagedoutput', ())
259 259 if output:
260 260 bundler = bundle2.bundle20(self._repo.ui)
261 261 for out in output:
262 262 bundler.addpart(out)
263 263 stream = util.chunkbuffer(bundler.getchunks())
264 264 b = bundle2.getunbundler(self.ui, stream)
265 265 bundle2.processbundle(self._repo, b)
266 266 raise
267 267 except error.PushRaced as exc:
268 268 raise error.ResponseError(_('push failed:'),
269 269 stringutil.forcebytestr(exc))
270 270
271 271 # End of _basewirecommands interface.
272 272
273 273 # Begin of peer interface.
274 274
275 275 def iterbatch(self):
276 276 return peer.localiterbatcher(self)
277 277
278 278 # End of peer interface.
279 279
280 280 class locallegacypeer(repository.legacypeer, localpeer):
281 281 '''peer extension which implements legacy methods too; used for tests with
282 282 restricted capabilities'''
283 283
284 284 def __init__(self, repo):
285 285 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
286 286
287 287 # Begin of baselegacywirecommands interface.
288 288
289 289 def between(self, pairs):
290 290 return self._repo.between(pairs)
291 291
292 292 def branches(self, nodes):
293 293 return self._repo.branches(nodes)
294 294
295 295 def changegroup(self, basenodes, source):
296 296 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
297 297 missingheads=self._repo.heads())
298 298 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
299 299
300 300 def changegroupsubset(self, bases, heads, source):
301 301 outgoing = discovery.outgoing(self._repo, missingroots=bases,
302 302 missingheads=heads)
303 303 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
304 304
305 305 # End of baselegacywirecommands interface.
306 306
307 307 # Increment the sub-version when the revlog v2 format changes to lock out old
308 308 # clients.
309 309 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
310 310
311 311 # Functions receiving (ui, features) that extensions can register to impact
312 312 # the ability to load repositories with custom requirements. Only
313 313 # functions defined in loaded extensions are called.
314 314 #
315 315 # The function receives a set of requirement strings that the repository
316 316 # is capable of opening. Functions will typically add elements to the
317 317 # set to reflect that the extension knows how to handle that requirements.
318 318 featuresetupfuncs = set()
319 319
320 320 @zi.implementer(repository.completelocalrepository)
321 321 class localrepository(object):
322 322
323 323 # obsolete experimental requirements:
324 324 # - manifestv2: An experimental new manifest format that allowed
325 325 # for stem compression of long paths. Experiment ended up not
326 326 # being successful (repository sizes went up due to worse delta
327 327 # chains), and the code was deleted in 4.6.
328 328 supportedformats = {
329 329 'revlogv1',
330 330 'generaldelta',
331 331 'treemanifest',
332 332 REVLOGV2_REQUIREMENT,
333 333 }
334 334 _basesupported = supportedformats | {
335 335 'store',
336 336 'fncache',
337 337 'shared',
338 338 'relshared',
339 339 'dotencode',
340 340 'exp-sparse',
341 341 }
342 342 openerreqs = {
343 343 'revlogv1',
344 344 'generaldelta',
345 345 'treemanifest',
346 346 }
347 347
348 348 # list of prefix for file which can be written without 'wlock'
349 349 # Extensions should extend this list when needed
350 350 _wlockfreeprefix = {
351 351 # We migh consider requiring 'wlock' for the next
352 352 # two, but pretty much all the existing code assume
353 353 # wlock is not needed so we keep them excluded for
354 354 # now.
355 355 'hgrc',
356 356 'requires',
357 357 # XXX cache is a complicatged business someone
358 358 # should investigate this in depth at some point
359 359 'cache/',
360 360 # XXX shouldn't be dirstate covered by the wlock?
361 361 'dirstate',
362 362 # XXX bisect was still a bit too messy at the time
363 363 # this changeset was introduced. Someone should fix
364 364 # the remainig bit and drop this line
365 365 'bisect.state',
366 366 }
367 367
368 368 def __init__(self, baseui, path, create=False):
369 369 self.requirements = set()
370 370 self.filtername = None
371 371 # wvfs: rooted at the repository root, used to access the working copy
372 372 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
373 373 # vfs: rooted at .hg, used to access repo files outside of .hg/store
374 374 self.vfs = None
375 375 # svfs: usually rooted at .hg/store, used to access repository history
376 376 # If this is a shared repository, this vfs may point to another
377 377 # repository's .hg/store directory.
378 378 self.svfs = None
379 379 self.root = self.wvfs.base
380 380 self.path = self.wvfs.join(".hg")
381 381 self.origroot = path
382 382 # This is only used by context.workingctx.match in order to
383 383 # detect files in subrepos.
384 384 self.auditor = pathutil.pathauditor(
385 385 self.root, callback=self._checknested)
386 386 # This is only used by context.basectx.match in order to detect
387 387 # files in subrepos.
388 388 self.nofsauditor = pathutil.pathauditor(
389 389 self.root, callback=self._checknested, realfs=False, cached=True)
390 390 self.baseui = baseui
391 391 self.ui = baseui.copy()
392 392 self.ui.copy = baseui.copy # prevent copying repo configuration
393 393 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
394 394 if (self.ui.configbool('devel', 'all-warnings') or
395 395 self.ui.configbool('devel', 'check-locks')):
396 396 self.vfs.audit = self._getvfsward(self.vfs.audit)
397 397 # A list of callback to shape the phase if no data were found.
398 398 # Callback are in the form: func(repo, roots) --> processed root.
399 399 # This list it to be filled by extension during repo setup
400 400 self._phasedefaults = []
401 401 try:
402 402 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
403 403 self._loadextensions()
404 404 except IOError:
405 405 pass
406 406
407 407 if featuresetupfuncs:
408 408 self.supported = set(self._basesupported) # use private copy
409 409 extmods = set(m.__name__ for n, m
410 410 in extensions.extensions(self.ui))
411 411 for setupfunc in featuresetupfuncs:
412 412 if setupfunc.__module__ in extmods:
413 413 setupfunc(self.ui, self.supported)
414 414 else:
415 415 self.supported = self._basesupported
416 416 color.setup(self.ui)
417 417
418 418 # Add compression engines.
419 419 for name in util.compengines:
420 420 engine = util.compengines[name]
421 421 if engine.revlogheader():
422 422 self.supported.add('exp-compression-%s' % name)
423 423
424 424 if not self.vfs.isdir():
425 425 if create:
426 426 self.requirements = newreporequirements(self)
427 427
428 428 if not self.wvfs.exists():
429 429 self.wvfs.makedirs()
430 430 self.vfs.makedir(notindexed=True)
431 431
432 432 if 'store' in self.requirements:
433 433 self.vfs.mkdir("store")
434 434
435 435 # create an invalid changelog
436 436 self.vfs.append(
437 437 "00changelog.i",
438 438 '\0\0\0\2' # represents revlogv2
439 439 ' dummy changelog to prevent using the old repo layout'
440 440 )
441 441 else:
442 442 raise error.RepoError(_("repository %s not found") % path)
443 443 elif create:
444 444 raise error.RepoError(_("repository %s already exists") % path)
445 445 else:
446 446 try:
447 447 self.requirements = scmutil.readrequires(
448 448 self.vfs, self.supported)
449 449 except IOError as inst:
450 450 if inst.errno != errno.ENOENT:
451 451 raise
452 452
453 453 cachepath = self.vfs.join('cache')
454 454 self.sharedpath = self.path
455 455 try:
456 456 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
457 457 if 'relshared' in self.requirements:
458 458 sharedpath = self.vfs.join(sharedpath)
459 459 vfs = vfsmod.vfs(sharedpath, realpath=True)
460 460 cachepath = vfs.join('cache')
461 461 s = vfs.base
462 462 if not vfs.exists():
463 463 raise error.RepoError(
464 464 _('.hg/sharedpath points to nonexistent directory %s') % s)
465 465 self.sharedpath = s
466 466 except IOError as inst:
467 467 if inst.errno != errno.ENOENT:
468 468 raise
469 469
470 470 if 'exp-sparse' in self.requirements and not sparse.enabled:
471 471 raise error.RepoError(_('repository is using sparse feature but '
472 472 'sparse is not enabled; enable the '
473 473 '"sparse" extensions to access'))
474 474
475 475 self.store = store.store(
476 476 self.requirements, self.sharedpath,
477 477 lambda base: vfsmod.vfs(base, cacheaudited=True))
478 478 self.spath = self.store.path
479 479 self.svfs = self.store.vfs
480 480 self.sjoin = self.store.join
481 481 self.vfs.createmode = self.store.createmode
482 482 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
483 483 self.cachevfs.createmode = self.store.createmode
484 484 if (self.ui.configbool('devel', 'all-warnings') or
485 485 self.ui.configbool('devel', 'check-locks')):
486 486 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
487 487 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
488 488 else: # standard vfs
489 489 self.svfs.audit = self._getsvfsward(self.svfs.audit)
490 490 self._applyopenerreqs()
491 491 if create:
492 492 self._writerequirements()
493 493
494 494 self._dirstatevalidatewarned = False
495 495
496 496 self._branchcaches = {}
497 497 self._revbranchcache = None
498 498 self._filterpats = {}
499 499 self._datafilters = {}
500 500 self._transref = self._lockref = self._wlockref = None
501 501
502 502 # A cache for various files under .hg/ that tracks file changes,
503 503 # (used by the filecache decorator)
504 504 #
505 505 # Maps a property name to its util.filecacheentry
506 506 self._filecache = {}
507 507
508 508 # hold sets of revision to be filtered
509 509 # should be cleared when something might have changed the filter value:
510 510 # - new changesets,
511 511 # - phase change,
512 512 # - new obsolescence marker,
513 513 # - working directory parent change,
514 514 # - bookmark changes
515 515 self.filteredrevcache = {}
516 516
517 517 # post-dirstate-status hooks
518 518 self._postdsstatus = []
519 519
520 520 # generic mapping between names and nodes
521 521 self.names = namespaces.namespaces()
522 522
523 523 # Key to signature value.
524 524 self._sparsesignaturecache = {}
525 525 # Signature to cached matcher instance.
526 526 self._sparsematchercache = {}
527 527
528 528 def _getvfsward(self, origfunc):
529 529 """build a ward for self.vfs"""
530 530 rref = weakref.ref(self)
531 531 def checkvfs(path, mode=None):
532 532 ret = origfunc(path, mode=mode)
533 533 repo = rref()
534 534 if (repo is None
535 535 or not util.safehasattr(repo, '_wlockref')
536 536 or not util.safehasattr(repo, '_lockref')):
537 537 return
538 538 if mode in (None, 'r', 'rb'):
539 539 return
540 540 if path.startswith(repo.path):
541 541 # truncate name relative to the repository (.hg)
542 542 path = path[len(repo.path) + 1:]
543 543 if path.startswith('cache/'):
544 544 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
545 545 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
546 546 if path.startswith('journal.'):
547 547 # journal is covered by 'lock'
548 548 if repo._currentlock(repo._lockref) is None:
549 549 repo.ui.develwarn('write with no lock: "%s"' % path,
550 550 stacklevel=2, config='check-locks')
551 551 elif repo._currentlock(repo._wlockref) is None:
552 552 # rest of vfs files are covered by 'wlock'
553 553 #
554 554 # exclude special files
555 555 for prefix in self._wlockfreeprefix:
556 556 if path.startswith(prefix):
557 557 return
558 558 repo.ui.develwarn('write with no wlock: "%s"' % path,
559 559 stacklevel=2, config='check-locks')
560 560 return ret
561 561 return checkvfs
562 562
563 563 def _getsvfsward(self, origfunc):
564 564 """build a ward for self.svfs"""
565 565 rref = weakref.ref(self)
566 566 def checksvfs(path, mode=None):
567 567 ret = origfunc(path, mode=mode)
568 568 repo = rref()
569 569 if repo is None or not util.safehasattr(repo, '_lockref'):
570 570 return
571 571 if mode in (None, 'r', 'rb'):
572 572 return
573 573 if path.startswith(repo.sharedpath):
574 574 # truncate name relative to the repository (.hg)
575 575 path = path[len(repo.sharedpath) + 1:]
576 576 if repo._currentlock(repo._lockref) is None:
577 577 repo.ui.develwarn('write with no lock: "%s"' % path,
578 578 stacklevel=3)
579 579 return ret
580 580 return checksvfs
581 581
582 582 def close(self):
583 583 self._writecaches()
584 584
585 585 def _loadextensions(self):
586 586 extensions.loadall(self.ui)
587 587
588 588 def _writecaches(self):
589 589 if self._revbranchcache:
590 590 self._revbranchcache.write()
591 591
592 592 def _restrictcapabilities(self, caps):
593 593 if self.ui.configbool('experimental', 'bundle2-advertise'):
594 594 caps = set(caps)
595 595 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
596 596 role='client'))
597 597 caps.add('bundle2=' + urlreq.quote(capsblob))
598 598 return caps
599 599
600 600 def _applyopenerreqs(self):
601 601 self.svfs.options = dict((r, 1) for r in self.requirements
602 602 if r in self.openerreqs)
603 603 # experimental config: format.chunkcachesize
604 604 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
605 605 if chunkcachesize is not None:
606 606 self.svfs.options['chunkcachesize'] = chunkcachesize
607 607 # experimental config: format.maxchainlen
608 608 maxchainlen = self.ui.configint('format', 'maxchainlen')
609 609 if maxchainlen is not None:
610 610 self.svfs.options['maxchainlen'] = maxchainlen
611 611 # experimental config: format.manifestcachesize
612 612 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
613 613 if manifestcachesize is not None:
614 614 self.svfs.options['manifestcachesize'] = manifestcachesize
615 615 # experimental config: format.aggressivemergedeltas
616 616 aggressivemergedeltas = self.ui.configbool('format',
617 617 'aggressivemergedeltas')
618 618 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
619 619 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
620 620 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
621 621 if 0 <= chainspan:
622 622 self.svfs.options['maxdeltachainspan'] = chainspan
623 623 mmapindexthreshold = self.ui.configbytes('experimental',
624 624 'mmapindexthreshold')
625 625 if mmapindexthreshold is not None:
626 626 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
627 627 withsparseread = self.ui.configbool('experimental', 'sparse-read')
628 628 srdensitythres = float(self.ui.config('experimental',
629 629 'sparse-read.density-threshold'))
630 630 srmingapsize = self.ui.configbytes('experimental',
631 631 'sparse-read.min-gap-size')
632 632 self.svfs.options['with-sparse-read'] = withsparseread
633 633 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
634 634 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
635 635
636 636 for r in self.requirements:
637 637 if r.startswith('exp-compression-'):
638 638 self.svfs.options['compengine'] = r[len('exp-compression-'):]
639 639
640 640 # TODO move "revlogv2" to openerreqs once finalized.
641 641 if REVLOGV2_REQUIREMENT in self.requirements:
642 642 self.svfs.options['revlogv2'] = True
643 643
644 644 def _writerequirements(self):
645 645 scmutil.writerequires(self.vfs, self.requirements)
646 646
647 647 def _checknested(self, path):
648 648 """Determine if path is a legal nested repository."""
649 649 if not path.startswith(self.root):
650 650 return False
651 651 subpath = path[len(self.root) + 1:]
652 652 normsubpath = util.pconvert(subpath)
653 653
654 654 # XXX: Checking against the current working copy is wrong in
655 655 # the sense that it can reject things like
656 656 #
657 657 # $ hg cat -r 10 sub/x.txt
658 658 #
659 659 # if sub/ is no longer a subrepository in the working copy
660 660 # parent revision.
661 661 #
662 662 # However, it can of course also allow things that would have
663 663 # been rejected before, such as the above cat command if sub/
664 664 # is a subrepository now, but was a normal directory before.
665 665 # The old path auditor would have rejected by mistake since it
666 666 # panics when it sees sub/.hg/.
667 667 #
668 668 # All in all, checking against the working copy seems sensible
669 669 # since we want to prevent access to nested repositories on
670 670 # the filesystem *now*.
671 671 ctx = self[None]
672 672 parts = util.splitpath(subpath)
673 673 while parts:
674 674 prefix = '/'.join(parts)
675 675 if prefix in ctx.substate:
676 676 if prefix == normsubpath:
677 677 return True
678 678 else:
679 679 sub = ctx.sub(prefix)
680 680 return sub.checknested(subpath[len(prefix) + 1:])
681 681 else:
682 682 parts.pop()
683 683 return False
684 684
685 685 def peer(self):
686 686 return localpeer(self) # not cached to avoid reference cycle
687 687
688 688 def unfiltered(self):
689 689 """Return unfiltered version of the repository
690 690
691 691 Intended to be overwritten by filtered repo."""
692 692 return self
693 693
694 694 def filtered(self, name, visibilityexceptions=None):
695 695 """Return a filtered version of a repository"""
696 696 cls = repoview.newtype(self.unfiltered().__class__)
697 697 return cls(self, name, visibilityexceptions)
698 698
699 699 @repofilecache('bookmarks', 'bookmarks.current')
700 700 def _bookmarks(self):
701 701 return bookmarks.bmstore(self)
702 702
703 703 @property
704 704 def _activebookmark(self):
705 705 return self._bookmarks.active
706 706
707 707 # _phasesets depend on changelog. what we need is to call
708 708 # _phasecache.invalidate() if '00changelog.i' was changed, but it
709 709 # can't be easily expressed in filecache mechanism.
710 710 @storecache('phaseroots', '00changelog.i')
711 711 def _phasecache(self):
712 712 return phases.phasecache(self, self._phasedefaults)
713 713
714 714 @storecache('obsstore')
715 715 def obsstore(self):
716 716 return obsolete.makestore(self.ui, self)
717 717
718 718 @storecache('00changelog.i')
719 719 def changelog(self):
720 720 return changelog.changelog(self.svfs,
721 721 trypending=txnutil.mayhavepending(self.root))
722 722
723 723 def _constructmanifest(self):
724 724 # This is a temporary function while we migrate from manifest to
725 725 # manifestlog. It allows bundlerepo and unionrepo to intercept the
726 726 # manifest creation.
727 727 return manifest.manifestrevlog(self.svfs)
728 728
729 729 @storecache('00manifest.i')
730 730 def manifestlog(self):
731 731 return manifest.manifestlog(self.svfs, self)
732 732
733 733 @repofilecache('dirstate')
734 734 def dirstate(self):
735 735 sparsematchfn = lambda: sparse.matcher(self)
736 736
737 737 return dirstate.dirstate(self.vfs, self.ui, self.root,
738 738 self._dirstatevalidate, sparsematchfn)
739 739
740 740 def _dirstatevalidate(self, node):
741 741 try:
742 742 self.changelog.rev(node)
743 743 return node
744 744 except error.LookupError:
745 745 if not self._dirstatevalidatewarned:
746 746 self._dirstatevalidatewarned = True
747 747 self.ui.warn(_("warning: ignoring unknown"
748 748 " working parent %s!\n") % short(node))
749 749 return nullid
750 750
751 751 @repofilecache(narrowspec.FILENAME)
752 752 def narrowpats(self):
753 753 """matcher patterns for this repository's narrowspec
754 754
755 755 A tuple of (includes, excludes).
756 756 """
757 757 source = self
758 758 if self.shared():
759 759 from . import hg
760 760 source = hg.sharedreposource(self)
761 761 return narrowspec.load(source)
762 762
763 763 @repofilecache(narrowspec.FILENAME)
764 764 def _narrowmatch(self):
765 765 if changegroup.NARROW_REQUIREMENT not in self.requirements:
766 766 return matchmod.always(self.root, '')
767 767 include, exclude = self.narrowpats
768 768 return narrowspec.match(self.root, include=include, exclude=exclude)
769 769
770 770 # TODO(martinvonz): make this property-like instead?
771 771 def narrowmatch(self):
772 772 return self._narrowmatch
773 773
774 774 def setnarrowpats(self, newincludes, newexcludes):
775 775 target = self
776 776 if self.shared():
777 777 from . import hg
778 778 target = hg.sharedreposource(self)
779 779 narrowspec.save(target, newincludes, newexcludes)
780 780 self.invalidate(clearfilecache=True)
781 781
782 782 def __getitem__(self, changeid):
783 783 if changeid is None:
784 784 return context.workingctx(self)
785 785 if isinstance(changeid, context.basectx):
786 786 return changeid
787 787 if isinstance(changeid, slice):
788 788 # wdirrev isn't contiguous so the slice shouldn't include it
789 789 return [context.changectx(self, i)
790 790 for i in xrange(*changeid.indices(len(self)))
791 791 if i not in self.changelog.filteredrevs]
792 792 try:
793 793 return context.changectx(self, changeid)
794 794 except error.WdirUnsupported:
795 795 return context.workingctx(self)
796 796
797 797 def __contains__(self, changeid):
798 798 """True if the given changeid exists
799 799
800 800 error.LookupError is raised if an ambiguous node specified.
801 801 """
802 802 try:
803 803 self[changeid]
804 804 return True
805 805 except error.RepoLookupError:
806 806 return False
807 807
808 808 def __nonzero__(self):
809 809 return True
810 810
811 811 __bool__ = __nonzero__
812 812
813 813 def __len__(self):
814 814 # no need to pay the cost of repoview.changelog
815 815 unfi = self.unfiltered()
816 816 return len(unfi.changelog)
817 817
818 818 def __iter__(self):
819 819 return iter(self.changelog)
820 820
821 821 def revs(self, expr, *args):
822 822 '''Find revisions matching a revset.
823 823
824 824 The revset is specified as a string ``expr`` that may contain
825 825 %-formatting to escape certain types. See ``revsetlang.formatspec``.
826 826
827 827 Revset aliases from the configuration are not expanded. To expand
828 828 user aliases, consider calling ``scmutil.revrange()`` or
829 829 ``repo.anyrevs([expr], user=True)``.
830 830
831 831 Returns a revset.abstractsmartset, which is a list-like interface
832 832 that contains integer revisions.
833 833 '''
834 834 expr = revsetlang.formatspec(expr, *args)
835 835 m = revset.match(None, expr)
836 836 return m(self)
837 837
838 838 def set(self, expr, *args):
839 839 '''Find revisions matching a revset and emit changectx instances.
840 840
841 841 This is a convenience wrapper around ``revs()`` that iterates the
842 842 result and is a generator of changectx instances.
843 843
844 844 Revset aliases from the configuration are not expanded. To expand
845 845 user aliases, consider calling ``scmutil.revrange()``.
846 846 '''
847 847 for r in self.revs(expr, *args):
848 848 yield self[r]
849 849
850 850 def anyrevs(self, specs, user=False, localalias=None):
851 851 '''Find revisions matching one of the given revsets.
852 852
853 853 Revset aliases from the configuration are not expanded by default. To
854 854 expand user aliases, specify ``user=True``. To provide some local
855 855 definitions overriding user aliases, set ``localalias`` to
856 856 ``{name: definitionstring}``.
857 857 '''
858 858 if user:
859 859 m = revset.matchany(self.ui, specs, repo=self,
860 860 localalias=localalias)
861 861 else:
862 862 m = revset.matchany(None, specs, localalias=localalias)
863 863 return m(self)
864 864
865 865 def url(self):
866 866 return 'file:' + self.root
867 867
868 868 def hook(self, name, throw=False, **args):
869 869 """Call a hook, passing this repo instance.
870 870
871 871 This a convenience method to aid invoking hooks. Extensions likely
872 872 won't call this unless they have registered a custom hook or are
873 873 replacing code that is expected to call a hook.
874 874 """
875 875 return hook.hook(self.ui, self, name, throw, **args)
876 876
877 877 @filteredpropertycache
878 878 def _tagscache(self):
879 879 '''Returns a tagscache object that contains various tags related
880 880 caches.'''
881 881
882 882 # This simplifies its cache management by having one decorated
883 883 # function (this one) and the rest simply fetch things from it.
884 884 class tagscache(object):
885 885 def __init__(self):
886 886 # These two define the set of tags for this repository. tags
887 887 # maps tag name to node; tagtypes maps tag name to 'global' or
888 888 # 'local'. (Global tags are defined by .hgtags across all
889 889 # heads, and local tags are defined in .hg/localtags.)
890 890 # They constitute the in-memory cache of tags.
891 891 self.tags = self.tagtypes = None
892 892
893 893 self.nodetagscache = self.tagslist = None
894 894
895 895 cache = tagscache()
896 896 cache.tags, cache.tagtypes = self._findtags()
897 897
898 898 return cache
899 899
900 900 def tags(self):
901 901 '''return a mapping of tag to node'''
902 902 t = {}
903 903 if self.changelog.filteredrevs:
904 904 tags, tt = self._findtags()
905 905 else:
906 906 tags = self._tagscache.tags
907 907 for k, v in tags.iteritems():
908 908 try:
909 909 # ignore tags to unknown nodes
910 910 self.changelog.rev(v)
911 911 t[k] = v
912 912 except (error.LookupError, ValueError):
913 913 pass
914 914 return t
915 915
916 916 def _findtags(self):
917 917 '''Do the hard work of finding tags. Return a pair of dicts
918 918 (tags, tagtypes) where tags maps tag name to node, and tagtypes
919 919 maps tag name to a string like \'global\' or \'local\'.
920 920 Subclasses or extensions are free to add their own tags, but
921 921 should be aware that the returned dicts will be retained for the
922 922 duration of the localrepo object.'''
923 923
924 924 # XXX what tagtype should subclasses/extensions use? Currently
925 925 # mq and bookmarks add tags, but do not set the tagtype at all.
926 926 # Should each extension invent its own tag type? Should there
927 927 # be one tagtype for all such "virtual" tags? Or is the status
928 928 # quo fine?
929 929
930 930
931 931 # map tag name to (node, hist)
932 932 alltags = tagsmod.findglobaltags(self.ui, self)
933 933 # map tag name to tag type
934 934 tagtypes = dict((tag, 'global') for tag in alltags)
935 935
936 936 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
937 937
938 938 # Build the return dicts. Have to re-encode tag names because
939 939 # the tags module always uses UTF-8 (in order not to lose info
940 940 # writing to the cache), but the rest of Mercurial wants them in
941 941 # local encoding.
942 942 tags = {}
943 943 for (name, (node, hist)) in alltags.iteritems():
944 944 if node != nullid:
945 945 tags[encoding.tolocal(name)] = node
946 946 tags['tip'] = self.changelog.tip()
947 947 tagtypes = dict([(encoding.tolocal(name), value)
948 948 for (name, value) in tagtypes.iteritems()])
949 949 return (tags, tagtypes)
950 950
951 951 def tagtype(self, tagname):
952 952 '''
953 953 return the type of the given tag. result can be:
954 954
955 955 'local' : a local tag
956 956 'global' : a global tag
957 957 None : tag does not exist
958 958 '''
959 959
960 960 return self._tagscache.tagtypes.get(tagname)
961 961
962 962 def tagslist(self):
963 963 '''return a list of tags ordered by revision'''
964 964 if not self._tagscache.tagslist:
965 965 l = []
966 966 for t, n in self.tags().iteritems():
967 967 l.append((self.changelog.rev(n), t, n))
968 968 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
969 969
970 970 return self._tagscache.tagslist
971 971
972 972 def nodetags(self, node):
973 973 '''return the tags associated with a node'''
974 974 if not self._tagscache.nodetagscache:
975 975 nodetagscache = {}
976 976 for t, n in self._tagscache.tags.iteritems():
977 977 nodetagscache.setdefault(n, []).append(t)
978 978 for tags in nodetagscache.itervalues():
979 979 tags.sort()
980 980 self._tagscache.nodetagscache = nodetagscache
981 981 return self._tagscache.nodetagscache.get(node, [])
982 982
983 983 def nodebookmarks(self, node):
984 984 """return the list of bookmarks pointing to the specified node"""
985 985 marks = []
986 986 for bookmark, n in self._bookmarks.iteritems():
987 987 if n == node:
988 988 marks.append(bookmark)
989 989 return sorted(marks)
990 990
991 991 def branchmap(self):
992 992 '''returns a dictionary {branch: [branchheads]} with branchheads
993 993 ordered by increasing revision number'''
994 994 branchmap.updatecache(self)
995 995 return self._branchcaches[self.filtername]
996 996
997 997 @unfilteredmethod
998 998 def revbranchcache(self):
999 999 if not self._revbranchcache:
1000 1000 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1001 1001 return self._revbranchcache
1002 1002
1003 1003 def branchtip(self, branch, ignoremissing=False):
1004 1004 '''return the tip node for a given branch
1005 1005
1006 1006 If ignoremissing is True, then this method will not raise an error.
1007 1007 This is helpful for callers that only expect None for a missing branch
1008 1008 (e.g. namespace).
1009 1009
1010 1010 '''
1011 1011 try:
1012 1012 return self.branchmap().branchtip(branch)
1013 1013 except KeyError:
1014 1014 if not ignoremissing:
1015 1015 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1016 1016 else:
1017 1017 pass
1018 1018
1019 1019 def lookup(self, key):
1020 1020 return scmutil.revsymbol(self, key).node()
1021 1021
1022 1022 def lookupbranch(self, key):
1023 1023 if key in self.branchmap():
1024 1024 return key
1025 1025
1026 return self[key].branch()
1026 return scmutil.revsymbol(self, key).branch()
1027 1027
1028 1028 def known(self, nodes):
1029 1029 cl = self.changelog
1030 1030 nm = cl.nodemap
1031 1031 filtered = cl.filteredrevs
1032 1032 result = []
1033 1033 for n in nodes:
1034 1034 r = nm.get(n)
1035 1035 resp = not (r is None or r in filtered)
1036 1036 result.append(resp)
1037 1037 return result
1038 1038
1039 1039 def local(self):
1040 1040 return self
1041 1041
1042 1042 def publishing(self):
1043 1043 # it's safe (and desirable) to trust the publish flag unconditionally
1044 1044 # so that we don't finalize changes shared between users via ssh or nfs
1045 1045 return self.ui.configbool('phases', 'publish', untrusted=True)
1046 1046
1047 1047 def cancopy(self):
1048 1048 # so statichttprepo's override of local() works
1049 1049 if not self.local():
1050 1050 return False
1051 1051 if not self.publishing():
1052 1052 return True
1053 1053 # if publishing we can't copy if there is filtered content
1054 1054 return not self.filtered('visible').changelog.filteredrevs
1055 1055
1056 1056 def shared(self):
1057 1057 '''the type of shared repository (None if not shared)'''
1058 1058 if self.sharedpath != self.path:
1059 1059 return 'store'
1060 1060 return None
1061 1061
1062 1062 def wjoin(self, f, *insidef):
1063 1063 return self.vfs.reljoin(self.root, f, *insidef)
1064 1064
1065 1065 def file(self, f):
1066 1066 if f[0] == '/':
1067 1067 f = f[1:]
1068 1068 return filelog.filelog(self.svfs, f)
1069 1069
1070 1070 def setparents(self, p1, p2=nullid):
1071 1071 with self.dirstate.parentchange():
1072 1072 copies = self.dirstate.setparents(p1, p2)
1073 1073 pctx = self[p1]
1074 1074 if copies:
1075 1075 # Adjust copy records, the dirstate cannot do it, it
1076 1076 # requires access to parents manifests. Preserve them
1077 1077 # only for entries added to first parent.
1078 1078 for f in copies:
1079 1079 if f not in pctx and copies[f] in pctx:
1080 1080 self.dirstate.copy(copies[f], f)
1081 1081 if p2 == nullid:
1082 1082 for f, s in sorted(self.dirstate.copies().items()):
1083 1083 if f not in pctx and s not in pctx:
1084 1084 self.dirstate.copy(None, f)
1085 1085
1086 1086 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1087 1087 """changeid can be a changeset revision, node, or tag.
1088 1088 fileid can be a file revision or node."""
1089 1089 return context.filectx(self, path, changeid, fileid,
1090 1090 changectx=changectx)
1091 1091
1092 1092 def getcwd(self):
1093 1093 return self.dirstate.getcwd()
1094 1094
1095 1095 def pathto(self, f, cwd=None):
1096 1096 return self.dirstate.pathto(f, cwd)
1097 1097
1098 1098 def _loadfilter(self, filter):
1099 1099 if filter not in self._filterpats:
1100 1100 l = []
1101 1101 for pat, cmd in self.ui.configitems(filter):
1102 1102 if cmd == '!':
1103 1103 continue
1104 1104 mf = matchmod.match(self.root, '', [pat])
1105 1105 fn = None
1106 1106 params = cmd
1107 1107 for name, filterfn in self._datafilters.iteritems():
1108 1108 if cmd.startswith(name):
1109 1109 fn = filterfn
1110 1110 params = cmd[len(name):].lstrip()
1111 1111 break
1112 1112 if not fn:
1113 1113 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1114 1114 # Wrap old filters not supporting keyword arguments
1115 1115 if not pycompat.getargspec(fn)[2]:
1116 1116 oldfn = fn
1117 1117 fn = lambda s, c, **kwargs: oldfn(s, c)
1118 1118 l.append((mf, fn, params))
1119 1119 self._filterpats[filter] = l
1120 1120 return self._filterpats[filter]
1121 1121
1122 1122 def _filter(self, filterpats, filename, data):
1123 1123 for mf, fn, cmd in filterpats:
1124 1124 if mf(filename):
1125 1125 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1126 1126 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1127 1127 break
1128 1128
1129 1129 return data
1130 1130
1131 1131 @unfilteredpropertycache
1132 1132 def _encodefilterpats(self):
1133 1133 return self._loadfilter('encode')
1134 1134
1135 1135 @unfilteredpropertycache
1136 1136 def _decodefilterpats(self):
1137 1137 return self._loadfilter('decode')
1138 1138
1139 1139 def adddatafilter(self, name, filter):
1140 1140 self._datafilters[name] = filter
1141 1141
1142 1142 def wread(self, filename):
1143 1143 if self.wvfs.islink(filename):
1144 1144 data = self.wvfs.readlink(filename)
1145 1145 else:
1146 1146 data = self.wvfs.read(filename)
1147 1147 return self._filter(self._encodefilterpats, filename, data)
1148 1148
1149 1149 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1150 1150 """write ``data`` into ``filename`` in the working directory
1151 1151
1152 1152 This returns length of written (maybe decoded) data.
1153 1153 """
1154 1154 data = self._filter(self._decodefilterpats, filename, data)
1155 1155 if 'l' in flags:
1156 1156 self.wvfs.symlink(data, filename)
1157 1157 else:
1158 1158 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1159 1159 **kwargs)
1160 1160 if 'x' in flags:
1161 1161 self.wvfs.setflags(filename, False, True)
1162 1162 else:
1163 1163 self.wvfs.setflags(filename, False, False)
1164 1164 return len(data)
1165 1165
1166 1166 def wwritedata(self, filename, data):
1167 1167 return self._filter(self._decodefilterpats, filename, data)
1168 1168
1169 1169 def currenttransaction(self):
1170 1170 """return the current transaction or None if non exists"""
1171 1171 if self._transref:
1172 1172 tr = self._transref()
1173 1173 else:
1174 1174 tr = None
1175 1175
1176 1176 if tr and tr.running():
1177 1177 return tr
1178 1178 return None
1179 1179
1180 1180 def transaction(self, desc, report=None):
1181 1181 if (self.ui.configbool('devel', 'all-warnings')
1182 1182 or self.ui.configbool('devel', 'check-locks')):
1183 1183 if self._currentlock(self._lockref) is None:
1184 1184 raise error.ProgrammingError('transaction requires locking')
1185 1185 tr = self.currenttransaction()
1186 1186 if tr is not None:
1187 1187 return tr.nest(name=desc)
1188 1188
1189 1189 # abort here if the journal already exists
1190 1190 if self.svfs.exists("journal"):
1191 1191 raise error.RepoError(
1192 1192 _("abandoned transaction found"),
1193 1193 hint=_("run 'hg recover' to clean up transaction"))
1194 1194
1195 1195 idbase = "%.40f#%f" % (random.random(), time.time())
1196 1196 ha = hex(hashlib.sha1(idbase).digest())
1197 1197 txnid = 'TXN:' + ha
1198 1198 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1199 1199
1200 1200 self._writejournal(desc)
1201 1201 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1202 1202 if report:
1203 1203 rp = report
1204 1204 else:
1205 1205 rp = self.ui.warn
1206 1206 vfsmap = {'plain': self.vfs} # root of .hg/
1207 1207 # we must avoid cyclic reference between repo and transaction.
1208 1208 reporef = weakref.ref(self)
1209 1209 # Code to track tag movement
1210 1210 #
1211 1211 # Since tags are all handled as file content, it is actually quite hard
1212 1212 # to track these movement from a code perspective. So we fallback to a
1213 1213 # tracking at the repository level. One could envision to track changes
1214 1214 # to the '.hgtags' file through changegroup apply but that fails to
1215 1215 # cope with case where transaction expose new heads without changegroup
1216 1216 # being involved (eg: phase movement).
1217 1217 #
1218 1218 # For now, We gate the feature behind a flag since this likely comes
1219 1219 # with performance impacts. The current code run more often than needed
1220 1220 # and do not use caches as much as it could. The current focus is on
1221 1221 # the behavior of the feature so we disable it by default. The flag
1222 1222 # will be removed when we are happy with the performance impact.
1223 1223 #
1224 1224 # Once this feature is no longer experimental move the following
1225 1225 # documentation to the appropriate help section:
1226 1226 #
1227 1227 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1228 1228 # tags (new or changed or deleted tags). In addition the details of
1229 1229 # these changes are made available in a file at:
1230 1230 # ``REPOROOT/.hg/changes/tags.changes``.
1231 1231 # Make sure you check for HG_TAG_MOVED before reading that file as it
1232 1232 # might exist from a previous transaction even if no tag were touched
1233 1233 # in this one. Changes are recorded in a line base format::
1234 1234 #
1235 1235 # <action> <hex-node> <tag-name>\n
1236 1236 #
1237 1237 # Actions are defined as follow:
1238 1238 # "-R": tag is removed,
1239 1239 # "+A": tag is added,
1240 1240 # "-M": tag is moved (old value),
1241 1241 # "+M": tag is moved (new value),
1242 1242 tracktags = lambda x: None
1243 1243 # experimental config: experimental.hook-track-tags
1244 1244 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1245 1245 if desc != 'strip' and shouldtracktags:
1246 1246 oldheads = self.changelog.headrevs()
1247 1247 def tracktags(tr2):
1248 1248 repo = reporef()
1249 1249 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1250 1250 newheads = repo.changelog.headrevs()
1251 1251 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1252 1252 # notes: we compare lists here.
1253 1253 # As we do it only once buiding set would not be cheaper
1254 1254 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1255 1255 if changes:
1256 1256 tr2.hookargs['tag_moved'] = '1'
1257 1257 with repo.vfs('changes/tags.changes', 'w',
1258 1258 atomictemp=True) as changesfile:
1259 1259 # note: we do not register the file to the transaction
1260 1260 # because we needs it to still exist on the transaction
1261 1261 # is close (for txnclose hooks)
1262 1262 tagsmod.writediff(changesfile, changes)
1263 1263 def validate(tr2):
1264 1264 """will run pre-closing hooks"""
1265 1265 # XXX the transaction API is a bit lacking here so we take a hacky
1266 1266 # path for now
1267 1267 #
1268 1268 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1269 1269 # dict is copied before these run. In addition we needs the data
1270 1270 # available to in memory hooks too.
1271 1271 #
1272 1272 # Moreover, we also need to make sure this runs before txnclose
1273 1273 # hooks and there is no "pending" mechanism that would execute
1274 1274 # logic only if hooks are about to run.
1275 1275 #
1276 1276 # Fixing this limitation of the transaction is also needed to track
1277 1277 # other families of changes (bookmarks, phases, obsolescence).
1278 1278 #
1279 1279 # This will have to be fixed before we remove the experimental
1280 1280 # gating.
1281 1281 tracktags(tr2)
1282 1282 repo = reporef()
1283 1283 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1284 1284 scmutil.enforcesinglehead(repo, tr2, desc)
1285 1285 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1286 1286 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1287 1287 args = tr.hookargs.copy()
1288 1288 args.update(bookmarks.preparehookargs(name, old, new))
1289 1289 repo.hook('pretxnclose-bookmark', throw=True,
1290 1290 txnname=desc,
1291 1291 **pycompat.strkwargs(args))
1292 1292 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1293 1293 cl = repo.unfiltered().changelog
1294 1294 for rev, (old, new) in tr.changes['phases'].items():
1295 1295 args = tr.hookargs.copy()
1296 1296 node = hex(cl.node(rev))
1297 1297 args.update(phases.preparehookargs(node, old, new))
1298 1298 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1299 1299 **pycompat.strkwargs(args))
1300 1300
1301 1301 repo.hook('pretxnclose', throw=True,
1302 1302 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1303 1303 def releasefn(tr, success):
1304 1304 repo = reporef()
1305 1305 if success:
1306 1306 # this should be explicitly invoked here, because
1307 1307 # in-memory changes aren't written out at closing
1308 1308 # transaction, if tr.addfilegenerator (via
1309 1309 # dirstate.write or so) isn't invoked while
1310 1310 # transaction running
1311 1311 repo.dirstate.write(None)
1312 1312 else:
1313 1313 # discard all changes (including ones already written
1314 1314 # out) in this transaction
1315 1315 repo.dirstate.restorebackup(None, 'journal.dirstate')
1316 1316
1317 1317 repo.invalidate(clearfilecache=True)
1318 1318
1319 1319 tr = transaction.transaction(rp, self.svfs, vfsmap,
1320 1320 "journal",
1321 1321 "undo",
1322 1322 aftertrans(renames),
1323 1323 self.store.createmode,
1324 1324 validator=validate,
1325 1325 releasefn=releasefn,
1326 1326 checkambigfiles=_cachedfiles,
1327 1327 name=desc)
1328 1328 tr.changes['revs'] = xrange(0, 0)
1329 1329 tr.changes['obsmarkers'] = set()
1330 1330 tr.changes['phases'] = {}
1331 1331 tr.changes['bookmarks'] = {}
1332 1332
1333 1333 tr.hookargs['txnid'] = txnid
1334 1334 # note: writing the fncache only during finalize mean that the file is
1335 1335 # outdated when running hooks. As fncache is used for streaming clone,
1336 1336 # this is not expected to break anything that happen during the hooks.
1337 1337 tr.addfinalize('flush-fncache', self.store.write)
1338 1338 def txnclosehook(tr2):
1339 1339 """To be run if transaction is successful, will schedule a hook run
1340 1340 """
1341 1341 # Don't reference tr2 in hook() so we don't hold a reference.
1342 1342 # This reduces memory consumption when there are multiple
1343 1343 # transactions per lock. This can likely go away if issue5045
1344 1344 # fixes the function accumulation.
1345 1345 hookargs = tr2.hookargs
1346 1346
1347 1347 def hookfunc():
1348 1348 repo = reporef()
1349 1349 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1350 1350 bmchanges = sorted(tr.changes['bookmarks'].items())
1351 1351 for name, (old, new) in bmchanges:
1352 1352 args = tr.hookargs.copy()
1353 1353 args.update(bookmarks.preparehookargs(name, old, new))
1354 1354 repo.hook('txnclose-bookmark', throw=False,
1355 1355 txnname=desc, **pycompat.strkwargs(args))
1356 1356
1357 1357 if hook.hashook(repo.ui, 'txnclose-phase'):
1358 1358 cl = repo.unfiltered().changelog
1359 1359 phasemv = sorted(tr.changes['phases'].items())
1360 1360 for rev, (old, new) in phasemv:
1361 1361 args = tr.hookargs.copy()
1362 1362 node = hex(cl.node(rev))
1363 1363 args.update(phases.preparehookargs(node, old, new))
1364 1364 repo.hook('txnclose-phase', throw=False, txnname=desc,
1365 1365 **pycompat.strkwargs(args))
1366 1366
1367 1367 repo.hook('txnclose', throw=False, txnname=desc,
1368 1368 **pycompat.strkwargs(hookargs))
1369 1369 reporef()._afterlock(hookfunc)
1370 1370 tr.addfinalize('txnclose-hook', txnclosehook)
1371 1371 # Include a leading "-" to make it happen before the transaction summary
1372 1372 # reports registered via scmutil.registersummarycallback() whose names
1373 1373 # are 00-txnreport etc. That way, the caches will be warm when the
1374 1374 # callbacks run.
1375 1375 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1376 1376 def txnaborthook(tr2):
1377 1377 """To be run if transaction is aborted
1378 1378 """
1379 1379 reporef().hook('txnabort', throw=False, txnname=desc,
1380 1380 **pycompat.strkwargs(tr2.hookargs))
1381 1381 tr.addabort('txnabort-hook', txnaborthook)
1382 1382 # avoid eager cache invalidation. in-memory data should be identical
1383 1383 # to stored data if transaction has no error.
1384 1384 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1385 1385 self._transref = weakref.ref(tr)
1386 1386 scmutil.registersummarycallback(self, tr, desc)
1387 1387 return tr
1388 1388
1389 1389 def _journalfiles(self):
1390 1390 return ((self.svfs, 'journal'),
1391 1391 (self.vfs, 'journal.dirstate'),
1392 1392 (self.vfs, 'journal.branch'),
1393 1393 (self.vfs, 'journal.desc'),
1394 1394 (self.vfs, 'journal.bookmarks'),
1395 1395 (self.svfs, 'journal.phaseroots'))
1396 1396
1397 1397 def undofiles(self):
1398 1398 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1399 1399
1400 1400 @unfilteredmethod
1401 1401 def _writejournal(self, desc):
1402 1402 self.dirstate.savebackup(None, 'journal.dirstate')
1403 1403 self.vfs.write("journal.branch",
1404 1404 encoding.fromlocal(self.dirstate.branch()))
1405 1405 self.vfs.write("journal.desc",
1406 1406 "%d\n%s\n" % (len(self), desc))
1407 1407 self.vfs.write("journal.bookmarks",
1408 1408 self.vfs.tryread("bookmarks"))
1409 1409 self.svfs.write("journal.phaseroots",
1410 1410 self.svfs.tryread("phaseroots"))
1411 1411
1412 1412 def recover(self):
1413 1413 with self.lock():
1414 1414 if self.svfs.exists("journal"):
1415 1415 self.ui.status(_("rolling back interrupted transaction\n"))
1416 1416 vfsmap = {'': self.svfs,
1417 1417 'plain': self.vfs,}
1418 1418 transaction.rollback(self.svfs, vfsmap, "journal",
1419 1419 self.ui.warn,
1420 1420 checkambigfiles=_cachedfiles)
1421 1421 self.invalidate()
1422 1422 return True
1423 1423 else:
1424 1424 self.ui.warn(_("no interrupted transaction available\n"))
1425 1425 return False
1426 1426
1427 1427 def rollback(self, dryrun=False, force=False):
1428 1428 wlock = lock = dsguard = None
1429 1429 try:
1430 1430 wlock = self.wlock()
1431 1431 lock = self.lock()
1432 1432 if self.svfs.exists("undo"):
1433 1433 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1434 1434
1435 1435 return self._rollback(dryrun, force, dsguard)
1436 1436 else:
1437 1437 self.ui.warn(_("no rollback information available\n"))
1438 1438 return 1
1439 1439 finally:
1440 1440 release(dsguard, lock, wlock)
1441 1441
1442 1442 @unfilteredmethod # Until we get smarter cache management
1443 1443 def _rollback(self, dryrun, force, dsguard):
1444 1444 ui = self.ui
1445 1445 try:
1446 1446 args = self.vfs.read('undo.desc').splitlines()
1447 1447 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1448 1448 if len(args) >= 3:
1449 1449 detail = args[2]
1450 1450 oldtip = oldlen - 1
1451 1451
1452 1452 if detail and ui.verbose:
1453 1453 msg = (_('repository tip rolled back to revision %d'
1454 1454 ' (undo %s: %s)\n')
1455 1455 % (oldtip, desc, detail))
1456 1456 else:
1457 1457 msg = (_('repository tip rolled back to revision %d'
1458 1458 ' (undo %s)\n')
1459 1459 % (oldtip, desc))
1460 1460 except IOError:
1461 1461 msg = _('rolling back unknown transaction\n')
1462 1462 desc = None
1463 1463
1464 1464 if not force and self['.'] != self['tip'] and desc == 'commit':
1465 1465 raise error.Abort(
1466 1466 _('rollback of last commit while not checked out '
1467 1467 'may lose data'), hint=_('use -f to force'))
1468 1468
1469 1469 ui.status(msg)
1470 1470 if dryrun:
1471 1471 return 0
1472 1472
1473 1473 parents = self.dirstate.parents()
1474 1474 self.destroying()
1475 1475 vfsmap = {'plain': self.vfs, '': self.svfs}
1476 1476 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1477 1477 checkambigfiles=_cachedfiles)
1478 1478 if self.vfs.exists('undo.bookmarks'):
1479 1479 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1480 1480 if self.svfs.exists('undo.phaseroots'):
1481 1481 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1482 1482 self.invalidate()
1483 1483
1484 1484 parentgone = (parents[0] not in self.changelog.nodemap or
1485 1485 parents[1] not in self.changelog.nodemap)
1486 1486 if parentgone:
1487 1487 # prevent dirstateguard from overwriting already restored one
1488 1488 dsguard.close()
1489 1489
1490 1490 self.dirstate.restorebackup(None, 'undo.dirstate')
1491 1491 try:
1492 1492 branch = self.vfs.read('undo.branch')
1493 1493 self.dirstate.setbranch(encoding.tolocal(branch))
1494 1494 except IOError:
1495 1495 ui.warn(_('named branch could not be reset: '
1496 1496 'current branch is still \'%s\'\n')
1497 1497 % self.dirstate.branch())
1498 1498
1499 1499 parents = tuple([p.rev() for p in self[None].parents()])
1500 1500 if len(parents) > 1:
1501 1501 ui.status(_('working directory now based on '
1502 1502 'revisions %d and %d\n') % parents)
1503 1503 else:
1504 1504 ui.status(_('working directory now based on '
1505 1505 'revision %d\n') % parents)
1506 1506 mergemod.mergestate.clean(self, self['.'].node())
1507 1507
1508 1508 # TODO: if we know which new heads may result from this rollback, pass
1509 1509 # them to destroy(), which will prevent the branchhead cache from being
1510 1510 # invalidated.
1511 1511 self.destroyed()
1512 1512 return 0
1513 1513
1514 1514 def _buildcacheupdater(self, newtransaction):
1515 1515 """called during transaction to build the callback updating cache
1516 1516
1517 1517 Lives on the repository to help extension who might want to augment
1518 1518 this logic. For this purpose, the created transaction is passed to the
1519 1519 method.
1520 1520 """
1521 1521 # we must avoid cyclic reference between repo and transaction.
1522 1522 reporef = weakref.ref(self)
1523 1523 def updater(tr):
1524 1524 repo = reporef()
1525 1525 repo.updatecaches(tr)
1526 1526 return updater
1527 1527
1528 1528 @unfilteredmethod
1529 1529 def updatecaches(self, tr=None, full=False):
1530 1530 """warm appropriate caches
1531 1531
1532 1532 If this function is called after a transaction closed. The transaction
1533 1533 will be available in the 'tr' argument. This can be used to selectively
1534 1534 update caches relevant to the changes in that transaction.
1535 1535
1536 1536 If 'full' is set, make sure all caches the function knows about have
1537 1537 up-to-date data. Even the ones usually loaded more lazily.
1538 1538 """
1539 1539 if tr is not None and tr.hookargs.get('source') == 'strip':
1540 1540 # During strip, many caches are invalid but
1541 1541 # later call to `destroyed` will refresh them.
1542 1542 return
1543 1543
1544 1544 if tr is None or tr.changes['revs']:
1545 1545 # updating the unfiltered branchmap should refresh all the others,
1546 1546 self.ui.debug('updating the branch cache\n')
1547 1547 branchmap.updatecache(self.filtered('served'))
1548 1548
1549 1549 if full:
1550 1550 rbc = self.revbranchcache()
1551 1551 for r in self.changelog:
1552 1552 rbc.branchinfo(r)
1553 1553 rbc.write()
1554 1554
1555 1555 def invalidatecaches(self):
1556 1556
1557 1557 if '_tagscache' in vars(self):
1558 1558 # can't use delattr on proxy
1559 1559 del self.__dict__['_tagscache']
1560 1560
1561 1561 self.unfiltered()._branchcaches.clear()
1562 1562 self.invalidatevolatilesets()
1563 1563 self._sparsesignaturecache.clear()
1564 1564
1565 1565 def invalidatevolatilesets(self):
1566 1566 self.filteredrevcache.clear()
1567 1567 obsolete.clearobscaches(self)
1568 1568
1569 1569 def invalidatedirstate(self):
1570 1570 '''Invalidates the dirstate, causing the next call to dirstate
1571 1571 to check if it was modified since the last time it was read,
1572 1572 rereading it if it has.
1573 1573
1574 1574 This is different to dirstate.invalidate() that it doesn't always
1575 1575 rereads the dirstate. Use dirstate.invalidate() if you want to
1576 1576 explicitly read the dirstate again (i.e. restoring it to a previous
1577 1577 known good state).'''
1578 1578 if hasunfilteredcache(self, 'dirstate'):
1579 1579 for k in self.dirstate._filecache:
1580 1580 try:
1581 1581 delattr(self.dirstate, k)
1582 1582 except AttributeError:
1583 1583 pass
1584 1584 delattr(self.unfiltered(), 'dirstate')
1585 1585
1586 1586 def invalidate(self, clearfilecache=False):
1587 1587 '''Invalidates both store and non-store parts other than dirstate
1588 1588
1589 1589 If a transaction is running, invalidation of store is omitted,
1590 1590 because discarding in-memory changes might cause inconsistency
1591 1591 (e.g. incomplete fncache causes unintentional failure, but
1592 1592 redundant one doesn't).
1593 1593 '''
1594 1594 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1595 1595 for k in list(self._filecache.keys()):
1596 1596 # dirstate is invalidated separately in invalidatedirstate()
1597 1597 if k == 'dirstate':
1598 1598 continue
1599 1599 if (k == 'changelog' and
1600 1600 self.currenttransaction() and
1601 1601 self.changelog._delayed):
1602 1602 # The changelog object may store unwritten revisions. We don't
1603 1603 # want to lose them.
1604 1604 # TODO: Solve the problem instead of working around it.
1605 1605 continue
1606 1606
1607 1607 if clearfilecache:
1608 1608 del self._filecache[k]
1609 1609 try:
1610 1610 delattr(unfiltered, k)
1611 1611 except AttributeError:
1612 1612 pass
1613 1613 self.invalidatecaches()
1614 1614 if not self.currenttransaction():
1615 1615 # TODO: Changing contents of store outside transaction
1616 1616 # causes inconsistency. We should make in-memory store
1617 1617 # changes detectable, and abort if changed.
1618 1618 self.store.invalidatecaches()
1619 1619
1620 1620 def invalidateall(self):
1621 1621 '''Fully invalidates both store and non-store parts, causing the
1622 1622 subsequent operation to reread any outside changes.'''
1623 1623 # extension should hook this to invalidate its caches
1624 1624 self.invalidate()
1625 1625 self.invalidatedirstate()
1626 1626
1627 1627 @unfilteredmethod
1628 1628 def _refreshfilecachestats(self, tr):
1629 1629 """Reload stats of cached files so that they are flagged as valid"""
1630 1630 for k, ce in self._filecache.items():
1631 1631 k = pycompat.sysstr(k)
1632 1632 if k == r'dirstate' or k not in self.__dict__:
1633 1633 continue
1634 1634 ce.refresh()
1635 1635
1636 1636 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1637 1637 inheritchecker=None, parentenvvar=None):
1638 1638 parentlock = None
1639 1639 # the contents of parentenvvar are used by the underlying lock to
1640 1640 # determine whether it can be inherited
1641 1641 if parentenvvar is not None:
1642 1642 parentlock = encoding.environ.get(parentenvvar)
1643 1643
1644 1644 timeout = 0
1645 1645 warntimeout = 0
1646 1646 if wait:
1647 1647 timeout = self.ui.configint("ui", "timeout")
1648 1648 warntimeout = self.ui.configint("ui", "timeout.warn")
1649 1649
1650 1650 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1651 1651 releasefn=releasefn,
1652 1652 acquirefn=acquirefn, desc=desc,
1653 1653 inheritchecker=inheritchecker,
1654 1654 parentlock=parentlock)
1655 1655 return l
1656 1656
1657 1657 def _afterlock(self, callback):
1658 1658 """add a callback to be run when the repository is fully unlocked
1659 1659
1660 1660 The callback will be executed when the outermost lock is released
1661 1661 (with wlock being higher level than 'lock')."""
1662 1662 for ref in (self._wlockref, self._lockref):
1663 1663 l = ref and ref()
1664 1664 if l and l.held:
1665 1665 l.postrelease.append(callback)
1666 1666 break
1667 1667 else: # no lock have been found.
1668 1668 callback()
1669 1669
1670 1670 def lock(self, wait=True):
1671 1671 '''Lock the repository store (.hg/store) and return a weak reference
1672 1672 to the lock. Use this before modifying the store (e.g. committing or
1673 1673 stripping). If you are opening a transaction, get a lock as well.)
1674 1674
1675 1675 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1676 1676 'wlock' first to avoid a dead-lock hazard.'''
1677 1677 l = self._currentlock(self._lockref)
1678 1678 if l is not None:
1679 1679 l.lock()
1680 1680 return l
1681 1681
1682 1682 l = self._lock(self.svfs, "lock", wait, None,
1683 1683 self.invalidate, _('repository %s') % self.origroot)
1684 1684 self._lockref = weakref.ref(l)
1685 1685 return l
1686 1686
1687 1687 def _wlockchecktransaction(self):
1688 1688 if self.currenttransaction() is not None:
1689 1689 raise error.LockInheritanceContractViolation(
1690 1690 'wlock cannot be inherited in the middle of a transaction')
1691 1691
1692 1692 def wlock(self, wait=True):
1693 1693 '''Lock the non-store parts of the repository (everything under
1694 1694 .hg except .hg/store) and return a weak reference to the lock.
1695 1695
1696 1696 Use this before modifying files in .hg.
1697 1697
1698 1698 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1699 1699 'wlock' first to avoid a dead-lock hazard.'''
1700 1700 l = self._wlockref and self._wlockref()
1701 1701 if l is not None and l.held:
1702 1702 l.lock()
1703 1703 return l
1704 1704
1705 1705 # We do not need to check for non-waiting lock acquisition. Such
1706 1706 # acquisition would not cause dead-lock as they would just fail.
1707 1707 if wait and (self.ui.configbool('devel', 'all-warnings')
1708 1708 or self.ui.configbool('devel', 'check-locks')):
1709 1709 if self._currentlock(self._lockref) is not None:
1710 1710 self.ui.develwarn('"wlock" acquired after "lock"')
1711 1711
1712 1712 def unlock():
1713 1713 if self.dirstate.pendingparentchange():
1714 1714 self.dirstate.invalidate()
1715 1715 else:
1716 1716 self.dirstate.write(None)
1717 1717
1718 1718 self._filecache['dirstate'].refresh()
1719 1719
1720 1720 l = self._lock(self.vfs, "wlock", wait, unlock,
1721 1721 self.invalidatedirstate, _('working directory of %s') %
1722 1722 self.origroot,
1723 1723 inheritchecker=self._wlockchecktransaction,
1724 1724 parentenvvar='HG_WLOCK_LOCKER')
1725 1725 self._wlockref = weakref.ref(l)
1726 1726 return l
1727 1727
1728 1728 def _currentlock(self, lockref):
1729 1729 """Returns the lock if it's held, or None if it's not."""
1730 1730 if lockref is None:
1731 1731 return None
1732 1732 l = lockref()
1733 1733 if l is None or not l.held:
1734 1734 return None
1735 1735 return l
1736 1736
1737 1737 def currentwlock(self):
1738 1738 """Returns the wlock if it's held, or None if it's not."""
1739 1739 return self._currentlock(self._wlockref)
1740 1740
1741 1741 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1742 1742 """
1743 1743 commit an individual file as part of a larger transaction
1744 1744 """
1745 1745
1746 1746 fname = fctx.path()
1747 1747 fparent1 = manifest1.get(fname, nullid)
1748 1748 fparent2 = manifest2.get(fname, nullid)
1749 1749 if isinstance(fctx, context.filectx):
1750 1750 node = fctx.filenode()
1751 1751 if node in [fparent1, fparent2]:
1752 1752 self.ui.debug('reusing %s filelog entry\n' % fname)
1753 1753 if manifest1.flags(fname) != fctx.flags():
1754 1754 changelist.append(fname)
1755 1755 return node
1756 1756
1757 1757 flog = self.file(fname)
1758 1758 meta = {}
1759 1759 copy = fctx.renamed()
1760 1760 if copy and copy[0] != fname:
1761 1761 # Mark the new revision of this file as a copy of another
1762 1762 # file. This copy data will effectively act as a parent
1763 1763 # of this new revision. If this is a merge, the first
1764 1764 # parent will be the nullid (meaning "look up the copy data")
1765 1765 # and the second one will be the other parent. For example:
1766 1766 #
1767 1767 # 0 --- 1 --- 3 rev1 changes file foo
1768 1768 # \ / rev2 renames foo to bar and changes it
1769 1769 # \- 2 -/ rev3 should have bar with all changes and
1770 1770 # should record that bar descends from
1771 1771 # bar in rev2 and foo in rev1
1772 1772 #
1773 1773 # this allows this merge to succeed:
1774 1774 #
1775 1775 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1776 1776 # \ / merging rev3 and rev4 should use bar@rev2
1777 1777 # \- 2 --- 4 as the merge base
1778 1778 #
1779 1779
1780 1780 cfname = copy[0]
1781 1781 crev = manifest1.get(cfname)
1782 1782 newfparent = fparent2
1783 1783
1784 1784 if manifest2: # branch merge
1785 1785 if fparent2 == nullid or crev is None: # copied on remote side
1786 1786 if cfname in manifest2:
1787 1787 crev = manifest2[cfname]
1788 1788 newfparent = fparent1
1789 1789
1790 1790 # Here, we used to search backwards through history to try to find
1791 1791 # where the file copy came from if the source of a copy was not in
1792 1792 # the parent directory. However, this doesn't actually make sense to
1793 1793 # do (what does a copy from something not in your working copy even
1794 1794 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1795 1795 # the user that copy information was dropped, so if they didn't
1796 1796 # expect this outcome it can be fixed, but this is the correct
1797 1797 # behavior in this circumstance.
1798 1798
1799 1799 if crev:
1800 1800 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1801 1801 meta["copy"] = cfname
1802 1802 meta["copyrev"] = hex(crev)
1803 1803 fparent1, fparent2 = nullid, newfparent
1804 1804 else:
1805 1805 self.ui.warn(_("warning: can't find ancestor for '%s' "
1806 1806 "copied from '%s'!\n") % (fname, cfname))
1807 1807
1808 1808 elif fparent1 == nullid:
1809 1809 fparent1, fparent2 = fparent2, nullid
1810 1810 elif fparent2 != nullid:
1811 1811 # is one parent an ancestor of the other?
1812 1812 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1813 1813 if fparent1 in fparentancestors:
1814 1814 fparent1, fparent2 = fparent2, nullid
1815 1815 elif fparent2 in fparentancestors:
1816 1816 fparent2 = nullid
1817 1817
1818 1818 # is the file changed?
1819 1819 text = fctx.data()
1820 1820 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1821 1821 changelist.append(fname)
1822 1822 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1823 1823 # are just the flags changed during merge?
1824 1824 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1825 1825 changelist.append(fname)
1826 1826
1827 1827 return fparent1
1828 1828
1829 1829 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1830 1830 """check for commit arguments that aren't committable"""
1831 1831 if match.isexact() or match.prefix():
1832 1832 matched = set(status.modified + status.added + status.removed)
1833 1833
1834 1834 for f in match.files():
1835 1835 f = self.dirstate.normalize(f)
1836 1836 if f == '.' or f in matched or f in wctx.substate:
1837 1837 continue
1838 1838 if f in status.deleted:
1839 1839 fail(f, _('file not found!'))
1840 1840 if f in vdirs: # visited directory
1841 1841 d = f + '/'
1842 1842 for mf in matched:
1843 1843 if mf.startswith(d):
1844 1844 break
1845 1845 else:
1846 1846 fail(f, _("no match under directory!"))
1847 1847 elif f not in self.dirstate:
1848 1848 fail(f, _("file not tracked!"))
1849 1849
1850 1850 @unfilteredmethod
1851 1851 def commit(self, text="", user=None, date=None, match=None, force=False,
1852 1852 editor=False, extra=None):
1853 1853 """Add a new revision to current repository.
1854 1854
1855 1855 Revision information is gathered from the working directory,
1856 1856 match can be used to filter the committed files. If editor is
1857 1857 supplied, it is called to get a commit message.
1858 1858 """
1859 1859 if extra is None:
1860 1860 extra = {}
1861 1861
1862 1862 def fail(f, msg):
1863 1863 raise error.Abort('%s: %s' % (f, msg))
1864 1864
1865 1865 if not match:
1866 1866 match = matchmod.always(self.root, '')
1867 1867
1868 1868 if not force:
1869 1869 vdirs = []
1870 1870 match.explicitdir = vdirs.append
1871 1871 match.bad = fail
1872 1872
1873 1873 wlock = lock = tr = None
1874 1874 try:
1875 1875 wlock = self.wlock()
1876 1876 lock = self.lock() # for recent changelog (see issue4368)
1877 1877
1878 1878 wctx = self[None]
1879 1879 merge = len(wctx.parents()) > 1
1880 1880
1881 1881 if not force and merge and not match.always():
1882 1882 raise error.Abort(_('cannot partially commit a merge '
1883 1883 '(do not specify files or patterns)'))
1884 1884
1885 1885 status = self.status(match=match, clean=force)
1886 1886 if force:
1887 1887 status.modified.extend(status.clean) # mq may commit clean files
1888 1888
1889 1889 # check subrepos
1890 1890 subs, commitsubs, newstate = subrepoutil.precommit(
1891 1891 self.ui, wctx, status, match, force=force)
1892 1892
1893 1893 # make sure all explicit patterns are matched
1894 1894 if not force:
1895 1895 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1896 1896
1897 1897 cctx = context.workingcommitctx(self, status,
1898 1898 text, user, date, extra)
1899 1899
1900 1900 # internal config: ui.allowemptycommit
1901 1901 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1902 1902 or extra.get('close') or merge or cctx.files()
1903 1903 or self.ui.configbool('ui', 'allowemptycommit'))
1904 1904 if not allowemptycommit:
1905 1905 return None
1906 1906
1907 1907 if merge and cctx.deleted():
1908 1908 raise error.Abort(_("cannot commit merge with missing files"))
1909 1909
1910 1910 ms = mergemod.mergestate.read(self)
1911 1911 mergeutil.checkunresolved(ms)
1912 1912
1913 1913 if editor:
1914 1914 cctx._text = editor(self, cctx, subs)
1915 1915 edited = (text != cctx._text)
1916 1916
1917 1917 # Save commit message in case this transaction gets rolled back
1918 1918 # (e.g. by a pretxncommit hook). Leave the content alone on
1919 1919 # the assumption that the user will use the same editor again.
1920 1920 msgfn = self.savecommitmessage(cctx._text)
1921 1921
1922 1922 # commit subs and write new state
1923 1923 if subs:
1924 1924 for s in sorted(commitsubs):
1925 1925 sub = wctx.sub(s)
1926 1926 self.ui.status(_('committing subrepository %s\n') %
1927 1927 subrepoutil.subrelpath(sub))
1928 1928 sr = sub.commit(cctx._text, user, date)
1929 1929 newstate[s] = (newstate[s][0], sr)
1930 1930 subrepoutil.writestate(self, newstate)
1931 1931
1932 1932 p1, p2 = self.dirstate.parents()
1933 1933 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1934 1934 try:
1935 1935 self.hook("precommit", throw=True, parent1=hookp1,
1936 1936 parent2=hookp2)
1937 1937 tr = self.transaction('commit')
1938 1938 ret = self.commitctx(cctx, True)
1939 1939 except: # re-raises
1940 1940 if edited:
1941 1941 self.ui.write(
1942 1942 _('note: commit message saved in %s\n') % msgfn)
1943 1943 raise
1944 1944 # update bookmarks, dirstate and mergestate
1945 1945 bookmarks.update(self, [p1, p2], ret)
1946 1946 cctx.markcommitted(ret)
1947 1947 ms.reset()
1948 1948 tr.close()
1949 1949
1950 1950 finally:
1951 1951 lockmod.release(tr, lock, wlock)
1952 1952
1953 1953 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1954 1954 # hack for command that use a temporary commit (eg: histedit)
1955 1955 # temporary commit got stripped before hook release
1956 1956 if self.changelog.hasnode(ret):
1957 1957 self.hook("commit", node=node, parent1=parent1,
1958 1958 parent2=parent2)
1959 1959 self._afterlock(commithook)
1960 1960 return ret
1961 1961
1962 1962 @unfilteredmethod
1963 1963 def commitctx(self, ctx, error=False):
1964 1964 """Add a new revision to current repository.
1965 1965 Revision information is passed via the context argument.
1966 1966 """
1967 1967
1968 1968 tr = None
1969 1969 p1, p2 = ctx.p1(), ctx.p2()
1970 1970 user = ctx.user()
1971 1971
1972 1972 lock = self.lock()
1973 1973 try:
1974 1974 tr = self.transaction("commit")
1975 1975 trp = weakref.proxy(tr)
1976 1976
1977 1977 if ctx.manifestnode():
1978 1978 # reuse an existing manifest revision
1979 1979 mn = ctx.manifestnode()
1980 1980 files = ctx.files()
1981 1981 elif ctx.files():
1982 1982 m1ctx = p1.manifestctx()
1983 1983 m2ctx = p2.manifestctx()
1984 1984 mctx = m1ctx.copy()
1985 1985
1986 1986 m = mctx.read()
1987 1987 m1 = m1ctx.read()
1988 1988 m2 = m2ctx.read()
1989 1989
1990 1990 # check in files
1991 1991 added = []
1992 1992 changed = []
1993 1993 removed = list(ctx.removed())
1994 1994 linkrev = len(self)
1995 1995 self.ui.note(_("committing files:\n"))
1996 1996 for f in sorted(ctx.modified() + ctx.added()):
1997 1997 self.ui.note(f + "\n")
1998 1998 try:
1999 1999 fctx = ctx[f]
2000 2000 if fctx is None:
2001 2001 removed.append(f)
2002 2002 else:
2003 2003 added.append(f)
2004 2004 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2005 2005 trp, changed)
2006 2006 m.setflag(f, fctx.flags())
2007 2007 except OSError as inst:
2008 2008 self.ui.warn(_("trouble committing %s!\n") % f)
2009 2009 raise
2010 2010 except IOError as inst:
2011 2011 errcode = getattr(inst, 'errno', errno.ENOENT)
2012 2012 if error or errcode and errcode != errno.ENOENT:
2013 2013 self.ui.warn(_("trouble committing %s!\n") % f)
2014 2014 raise
2015 2015
2016 2016 # update manifest
2017 2017 self.ui.note(_("committing manifest\n"))
2018 2018 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2019 2019 drop = [f for f in removed if f in m]
2020 2020 for f in drop:
2021 2021 del m[f]
2022 2022 mn = mctx.write(trp, linkrev,
2023 2023 p1.manifestnode(), p2.manifestnode(),
2024 2024 added, drop)
2025 2025 files = changed + removed
2026 2026 else:
2027 2027 mn = p1.manifestnode()
2028 2028 files = []
2029 2029
2030 2030 # update changelog
2031 2031 self.ui.note(_("committing changelog\n"))
2032 2032 self.changelog.delayupdate(tr)
2033 2033 n = self.changelog.add(mn, files, ctx.description(),
2034 2034 trp, p1.node(), p2.node(),
2035 2035 user, ctx.date(), ctx.extra().copy())
2036 2036 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2037 2037 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2038 2038 parent2=xp2)
2039 2039 # set the new commit is proper phase
2040 2040 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2041 2041 if targetphase:
2042 2042 # retract boundary do not alter parent changeset.
2043 2043 # if a parent have higher the resulting phase will
2044 2044 # be compliant anyway
2045 2045 #
2046 2046 # if minimal phase was 0 we don't need to retract anything
2047 2047 phases.registernew(self, tr, targetphase, [n])
2048 2048 tr.close()
2049 2049 return n
2050 2050 finally:
2051 2051 if tr:
2052 2052 tr.release()
2053 2053 lock.release()
2054 2054
2055 2055 @unfilteredmethod
2056 2056 def destroying(self):
2057 2057 '''Inform the repository that nodes are about to be destroyed.
2058 2058 Intended for use by strip and rollback, so there's a common
2059 2059 place for anything that has to be done before destroying history.
2060 2060
2061 2061 This is mostly useful for saving state that is in memory and waiting
2062 2062 to be flushed when the current lock is released. Because a call to
2063 2063 destroyed is imminent, the repo will be invalidated causing those
2064 2064 changes to stay in memory (waiting for the next unlock), or vanish
2065 2065 completely.
2066 2066 '''
2067 2067 # When using the same lock to commit and strip, the phasecache is left
2068 2068 # dirty after committing. Then when we strip, the repo is invalidated,
2069 2069 # causing those changes to disappear.
2070 2070 if '_phasecache' in vars(self):
2071 2071 self._phasecache.write()
2072 2072
2073 2073 @unfilteredmethod
2074 2074 def destroyed(self):
2075 2075 '''Inform the repository that nodes have been destroyed.
2076 2076 Intended for use by strip and rollback, so there's a common
2077 2077 place for anything that has to be done after destroying history.
2078 2078 '''
2079 2079 # When one tries to:
2080 2080 # 1) destroy nodes thus calling this method (e.g. strip)
2081 2081 # 2) use phasecache somewhere (e.g. commit)
2082 2082 #
2083 2083 # then 2) will fail because the phasecache contains nodes that were
2084 2084 # removed. We can either remove phasecache from the filecache,
2085 2085 # causing it to reload next time it is accessed, or simply filter
2086 2086 # the removed nodes now and write the updated cache.
2087 2087 self._phasecache.filterunknown(self)
2088 2088 self._phasecache.write()
2089 2089
2090 2090 # refresh all repository caches
2091 2091 self.updatecaches()
2092 2092
2093 2093 # Ensure the persistent tag cache is updated. Doing it now
2094 2094 # means that the tag cache only has to worry about destroyed
2095 2095 # heads immediately after a strip/rollback. That in turn
2096 2096 # guarantees that "cachetip == currenttip" (comparing both rev
2097 2097 # and node) always means no nodes have been added or destroyed.
2098 2098
2099 2099 # XXX this is suboptimal when qrefresh'ing: we strip the current
2100 2100 # head, refresh the tag cache, then immediately add a new head.
2101 2101 # But I think doing it this way is necessary for the "instant
2102 2102 # tag cache retrieval" case to work.
2103 2103 self.invalidate()
2104 2104
2105 2105 def status(self, node1='.', node2=None, match=None,
2106 2106 ignored=False, clean=False, unknown=False,
2107 2107 listsubrepos=False):
2108 2108 '''a convenience method that calls node1.status(node2)'''
2109 2109 return self[node1].status(node2, match, ignored, clean, unknown,
2110 2110 listsubrepos)
2111 2111
2112 2112 def addpostdsstatus(self, ps):
2113 2113 """Add a callback to run within the wlock, at the point at which status
2114 2114 fixups happen.
2115 2115
2116 2116 On status completion, callback(wctx, status) will be called with the
2117 2117 wlock held, unless the dirstate has changed from underneath or the wlock
2118 2118 couldn't be grabbed.
2119 2119
2120 2120 Callbacks should not capture and use a cached copy of the dirstate --
2121 2121 it might change in the meanwhile. Instead, they should access the
2122 2122 dirstate via wctx.repo().dirstate.
2123 2123
2124 2124 This list is emptied out after each status run -- extensions should
2125 2125 make sure it adds to this list each time dirstate.status is called.
2126 2126 Extensions should also make sure they don't call this for statuses
2127 2127 that don't involve the dirstate.
2128 2128 """
2129 2129
2130 2130 # The list is located here for uniqueness reasons -- it is actually
2131 2131 # managed by the workingctx, but that isn't unique per-repo.
2132 2132 self._postdsstatus.append(ps)
2133 2133
2134 2134 def postdsstatus(self):
2135 2135 """Used by workingctx to get the list of post-dirstate-status hooks."""
2136 2136 return self._postdsstatus
2137 2137
2138 2138 def clearpostdsstatus(self):
2139 2139 """Used by workingctx to clear post-dirstate-status hooks."""
2140 2140 del self._postdsstatus[:]
2141 2141
2142 2142 def heads(self, start=None):
2143 2143 if start is None:
2144 2144 cl = self.changelog
2145 2145 headrevs = reversed(cl.headrevs())
2146 2146 return [cl.node(rev) for rev in headrevs]
2147 2147
2148 2148 heads = self.changelog.heads(start)
2149 2149 # sort the output in rev descending order
2150 2150 return sorted(heads, key=self.changelog.rev, reverse=True)
2151 2151
2152 2152 def branchheads(self, branch=None, start=None, closed=False):
2153 2153 '''return a (possibly filtered) list of heads for the given branch
2154 2154
2155 2155 Heads are returned in topological order, from newest to oldest.
2156 2156 If branch is None, use the dirstate branch.
2157 2157 If start is not None, return only heads reachable from start.
2158 2158 If closed is True, return heads that are marked as closed as well.
2159 2159 '''
2160 2160 if branch is None:
2161 2161 branch = self[None].branch()
2162 2162 branches = self.branchmap()
2163 2163 if branch not in branches:
2164 2164 return []
2165 2165 # the cache returns heads ordered lowest to highest
2166 2166 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2167 2167 if start is not None:
2168 2168 # filter out the heads that cannot be reached from startrev
2169 2169 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2170 2170 bheads = [h for h in bheads if h in fbheads]
2171 2171 return bheads
2172 2172
2173 2173 def branches(self, nodes):
2174 2174 if not nodes:
2175 2175 nodes = [self.changelog.tip()]
2176 2176 b = []
2177 2177 for n in nodes:
2178 2178 t = n
2179 2179 while True:
2180 2180 p = self.changelog.parents(n)
2181 2181 if p[1] != nullid or p[0] == nullid:
2182 2182 b.append((t, n, p[0], p[1]))
2183 2183 break
2184 2184 n = p[0]
2185 2185 return b
2186 2186
2187 2187 def between(self, pairs):
2188 2188 r = []
2189 2189
2190 2190 for top, bottom in pairs:
2191 2191 n, l, i = top, [], 0
2192 2192 f = 1
2193 2193
2194 2194 while n != bottom and n != nullid:
2195 2195 p = self.changelog.parents(n)[0]
2196 2196 if i == f:
2197 2197 l.append(n)
2198 2198 f = f * 2
2199 2199 n = p
2200 2200 i += 1
2201 2201
2202 2202 r.append(l)
2203 2203
2204 2204 return r
2205 2205
2206 2206 def checkpush(self, pushop):
2207 2207 """Extensions can override this function if additional checks have
2208 2208 to be performed before pushing, or call it if they override push
2209 2209 command.
2210 2210 """
2211 2211
2212 2212 @unfilteredpropertycache
2213 2213 def prepushoutgoinghooks(self):
2214 2214 """Return util.hooks consists of a pushop with repo, remote, outgoing
2215 2215 methods, which are called before pushing changesets.
2216 2216 """
2217 2217 return util.hooks()
2218 2218
2219 2219 def pushkey(self, namespace, key, old, new):
2220 2220 try:
2221 2221 tr = self.currenttransaction()
2222 2222 hookargs = {}
2223 2223 if tr is not None:
2224 2224 hookargs.update(tr.hookargs)
2225 2225 hookargs = pycompat.strkwargs(hookargs)
2226 2226 hookargs[r'namespace'] = namespace
2227 2227 hookargs[r'key'] = key
2228 2228 hookargs[r'old'] = old
2229 2229 hookargs[r'new'] = new
2230 2230 self.hook('prepushkey', throw=True, **hookargs)
2231 2231 except error.HookAbort as exc:
2232 2232 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2233 2233 if exc.hint:
2234 2234 self.ui.write_err(_("(%s)\n") % exc.hint)
2235 2235 return False
2236 2236 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2237 2237 ret = pushkey.push(self, namespace, key, old, new)
2238 2238 def runhook():
2239 2239 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2240 2240 ret=ret)
2241 2241 self._afterlock(runhook)
2242 2242 return ret
2243 2243
2244 2244 def listkeys(self, namespace):
2245 2245 self.hook('prelistkeys', throw=True, namespace=namespace)
2246 2246 self.ui.debug('listing keys for "%s"\n' % namespace)
2247 2247 values = pushkey.list(self, namespace)
2248 2248 self.hook('listkeys', namespace=namespace, values=values)
2249 2249 return values
2250 2250
2251 2251 def debugwireargs(self, one, two, three=None, four=None, five=None):
2252 2252 '''used to test argument passing over the wire'''
2253 2253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2254 2254 pycompat.bytestr(four),
2255 2255 pycompat.bytestr(five))
2256 2256
2257 2257 def savecommitmessage(self, text):
2258 2258 fp = self.vfs('last-message.txt', 'wb')
2259 2259 try:
2260 2260 fp.write(text)
2261 2261 finally:
2262 2262 fp.close()
2263 2263 return self.pathto(fp.name[len(self.root) + 1:])
2264 2264
2265 2265 # used to avoid circular references so destructors work
2266 2266 def aftertrans(files):
2267 2267 renamefiles = [tuple(t) for t in files]
2268 2268 def a():
2269 2269 for vfs, src, dest in renamefiles:
2270 2270 # if src and dest refer to a same file, vfs.rename is a no-op,
2271 2271 # leaving both src and dest on disk. delete dest to make sure
2272 2272 # the rename couldn't be such a no-op.
2273 2273 vfs.tryunlink(dest)
2274 2274 try:
2275 2275 vfs.rename(src, dest)
2276 2276 except OSError: # journal file does not yet exist
2277 2277 pass
2278 2278 return a
2279 2279
2280 2280 def undoname(fn):
2281 2281 base, name = os.path.split(fn)
2282 2282 assert name.startswith('journal')
2283 2283 return os.path.join(base, name.replace('journal', 'undo', 1))
2284 2284
2285 2285 def instance(ui, path, create):
2286 2286 return localrepository(ui, util.urllocalpath(path), create)
2287 2287
2288 2288 def islocal(path):
2289 2289 return True
2290 2290
2291 2291 def newreporequirements(repo):
2292 2292 """Determine the set of requirements for a new local repository.
2293 2293
2294 2294 Extensions can wrap this function to specify custom requirements for
2295 2295 new repositories.
2296 2296 """
2297 2297 ui = repo.ui
2298 2298 requirements = {'revlogv1'}
2299 2299 if ui.configbool('format', 'usestore'):
2300 2300 requirements.add('store')
2301 2301 if ui.configbool('format', 'usefncache'):
2302 2302 requirements.add('fncache')
2303 2303 if ui.configbool('format', 'dotencode'):
2304 2304 requirements.add('dotencode')
2305 2305
2306 2306 compengine = ui.config('experimental', 'format.compression')
2307 2307 if compengine not in util.compengines:
2308 2308 raise error.Abort(_('compression engine %s defined by '
2309 2309 'experimental.format.compression not available') %
2310 2310 compengine,
2311 2311 hint=_('run "hg debuginstall" to list available '
2312 2312 'compression engines'))
2313 2313
2314 2314 # zlib is the historical default and doesn't need an explicit requirement.
2315 2315 if compengine != 'zlib':
2316 2316 requirements.add('exp-compression-%s' % compengine)
2317 2317
2318 2318 if scmutil.gdinitconfig(ui):
2319 2319 requirements.add('generaldelta')
2320 2320 if ui.configbool('experimental', 'treemanifest'):
2321 2321 requirements.add('treemanifest')
2322 2322
2323 2323 revlogv2 = ui.config('experimental', 'revlogv2')
2324 2324 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2325 2325 requirements.remove('revlogv1')
2326 2326 # generaldelta is implied by revlogv2.
2327 2327 requirements.discard('generaldelta')
2328 2328 requirements.add(REVLOGV2_REQUIREMENT)
2329 2329
2330 2330 return requirements
General Comments 0
You need to be logged in to leave comments. Login now