##// END OF EJS Templates
localrepo: unconditionally enable general delta with sparse revlogs...
Boris Feld -
r38783:17da52bb stable
parent child Browse files
Show More
@@ -1,2395 +1,2397 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 narrowspec,
48 48 obsolete,
49 49 pathutil,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepoutil,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67 from .utils import (
68 68 interfaceutil,
69 69 procutil,
70 70 stringutil,
71 71 )
72 72
73 73 release = lockmod.release
74 74 urlerr = util.urlerr
75 75 urlreq = util.urlreq
76 76
77 77 # set of (path, vfs-location) tuples. vfs-location is:
78 78 # - 'plain for vfs relative paths
79 79 # - '' for svfs relative paths
80 80 _cachedfiles = set()
81 81
82 82 class _basefilecache(scmutil.filecache):
83 83 """All filecache usage on repo are done for logic that should be unfiltered
84 84 """
85 85 def __get__(self, repo, type=None):
86 86 if repo is None:
87 87 return self
88 88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
89 89 def __set__(self, repo, value):
90 90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
91 91 def __delete__(self, repo):
92 92 return super(_basefilecache, self).__delete__(repo.unfiltered())
93 93
94 94 class repofilecache(_basefilecache):
95 95 """filecache for files in .hg but outside of .hg/store"""
96 96 def __init__(self, *paths):
97 97 super(repofilecache, self).__init__(*paths)
98 98 for path in paths:
99 99 _cachedfiles.add((path, 'plain'))
100 100
101 101 def join(self, obj, fname):
102 102 return obj.vfs.join(fname)
103 103
104 104 class storecache(_basefilecache):
105 105 """filecache for files in the store"""
106 106 def __init__(self, *paths):
107 107 super(storecache, self).__init__(*paths)
108 108 for path in paths:
109 109 _cachedfiles.add((path, ''))
110 110
111 111 def join(self, obj, fname):
112 112 return obj.sjoin(fname)
113 113
114 114 def isfilecached(repo, name):
115 115 """check if a repo has already cached "name" filecache-ed property
116 116
117 117 This returns (cachedobj-or-None, iscached) tuple.
118 118 """
119 119 cacheentry = repo.unfiltered()._filecache.get(name, None)
120 120 if not cacheentry:
121 121 return None, False
122 122 return cacheentry.obj, True
123 123
124 124 class unfilteredpropertycache(util.propertycache):
125 125 """propertycache that apply to unfiltered repo only"""
126 126
127 127 def __get__(self, repo, type=None):
128 128 unfi = repo.unfiltered()
129 129 if unfi is repo:
130 130 return super(unfilteredpropertycache, self).__get__(unfi)
131 131 return getattr(unfi, self.name)
132 132
133 133 class filteredpropertycache(util.propertycache):
134 134 """propertycache that must take filtering in account"""
135 135
136 136 def cachevalue(self, obj, value):
137 137 object.__setattr__(obj, self.name, value)
138 138
139 139
140 140 def hasunfilteredcache(repo, name):
141 141 """check if a repo has an unfilteredpropertycache value for <name>"""
142 142 return name in vars(repo.unfiltered())
143 143
144 144 def unfilteredmethod(orig):
145 145 """decorate method that always need to be run on unfiltered version"""
146 146 def wrapper(repo, *args, **kwargs):
147 147 return orig(repo.unfiltered(), *args, **kwargs)
148 148 return wrapper
149 149
150 150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
151 151 'unbundle'}
152 152 legacycaps = moderncaps.union({'changegroupsubset'})
153 153
154 154 @interfaceutil.implementer(repository.ipeercommandexecutor)
155 155 class localcommandexecutor(object):
156 156 def __init__(self, peer):
157 157 self._peer = peer
158 158 self._sent = False
159 159 self._closed = False
160 160
161 161 def __enter__(self):
162 162 return self
163 163
164 164 def __exit__(self, exctype, excvalue, exctb):
165 165 self.close()
166 166
167 167 def callcommand(self, command, args):
168 168 if self._sent:
169 169 raise error.ProgrammingError('callcommand() cannot be used after '
170 170 'sendcommands()')
171 171
172 172 if self._closed:
173 173 raise error.ProgrammingError('callcommand() cannot be used after '
174 174 'close()')
175 175
176 176 # We don't need to support anything fancy. Just call the named
177 177 # method on the peer and return a resolved future.
178 178 fn = getattr(self._peer, pycompat.sysstr(command))
179 179
180 180 f = pycompat.futures.Future()
181 181
182 182 try:
183 183 result = fn(**pycompat.strkwargs(args))
184 184 except Exception:
185 185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
186 186 else:
187 187 f.set_result(result)
188 188
189 189 return f
190 190
191 191 def sendcommands(self):
192 192 self._sent = True
193 193
194 194 def close(self):
195 195 self._closed = True
196 196
197 197 @interfaceutil.implementer(repository.ipeercommands)
198 198 class localpeer(repository.peer):
199 199 '''peer for a local repo; reflects only the most recent API'''
200 200
201 201 def __init__(self, repo, caps=None):
202 202 super(localpeer, self).__init__()
203 203
204 204 if caps is None:
205 205 caps = moderncaps.copy()
206 206 self._repo = repo.filtered('served')
207 207 self.ui = repo.ui
208 208 self._caps = repo._restrictcapabilities(caps)
209 209
210 210 # Begin of _basepeer interface.
211 211
212 212 def url(self):
213 213 return self._repo.url()
214 214
215 215 def local(self):
216 216 return self._repo
217 217
218 218 def peer(self):
219 219 return self
220 220
221 221 def canpush(self):
222 222 return True
223 223
224 224 def close(self):
225 225 self._repo.close()
226 226
227 227 # End of _basepeer interface.
228 228
229 229 # Begin of _basewirecommands interface.
230 230
231 231 def branchmap(self):
232 232 return self._repo.branchmap()
233 233
234 234 def capabilities(self):
235 235 return self._caps
236 236
237 237 def clonebundles(self):
238 238 return self._repo.tryread('clonebundles.manifest')
239 239
240 240 def debugwireargs(self, one, two, three=None, four=None, five=None):
241 241 """Used to test argument passing over the wire"""
242 242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
243 243 pycompat.bytestr(four),
244 244 pycompat.bytestr(five))
245 245
246 246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
247 247 **kwargs):
248 248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
249 249 common=common, bundlecaps=bundlecaps,
250 250 **kwargs)[1]
251 251 cb = util.chunkbuffer(chunks)
252 252
253 253 if exchange.bundle2requested(bundlecaps):
254 254 # When requesting a bundle2, getbundle returns a stream to make the
255 255 # wire level function happier. We need to build a proper object
256 256 # from it in local peer.
257 257 return bundle2.getunbundler(self.ui, cb)
258 258 else:
259 259 return changegroup.getunbundler('01', cb, None)
260 260
261 261 def heads(self):
262 262 return self._repo.heads()
263 263
264 264 def known(self, nodes):
265 265 return self._repo.known(nodes)
266 266
267 267 def listkeys(self, namespace):
268 268 return self._repo.listkeys(namespace)
269 269
270 270 def lookup(self, key):
271 271 return self._repo.lookup(key)
272 272
273 273 def pushkey(self, namespace, key, old, new):
274 274 return self._repo.pushkey(namespace, key, old, new)
275 275
276 276 def stream_out(self):
277 277 raise error.Abort(_('cannot perform stream clone against local '
278 278 'peer'))
279 279
280 280 def unbundle(self, bundle, heads, url):
281 281 """apply a bundle on a repo
282 282
283 283 This function handles the repo locking itself."""
284 284 try:
285 285 try:
286 286 bundle = exchange.readbundle(self.ui, bundle, None)
287 287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
288 288 if util.safehasattr(ret, 'getchunks'):
289 289 # This is a bundle20 object, turn it into an unbundler.
290 290 # This little dance should be dropped eventually when the
291 291 # API is finally improved.
292 292 stream = util.chunkbuffer(ret.getchunks())
293 293 ret = bundle2.getunbundler(self.ui, stream)
294 294 return ret
295 295 except Exception as exc:
296 296 # If the exception contains output salvaged from a bundle2
297 297 # reply, we need to make sure it is printed before continuing
298 298 # to fail. So we build a bundle2 with such output and consume
299 299 # it directly.
300 300 #
301 301 # This is not very elegant but allows a "simple" solution for
302 302 # issue4594
303 303 output = getattr(exc, '_bundle2salvagedoutput', ())
304 304 if output:
305 305 bundler = bundle2.bundle20(self._repo.ui)
306 306 for out in output:
307 307 bundler.addpart(out)
308 308 stream = util.chunkbuffer(bundler.getchunks())
309 309 b = bundle2.getunbundler(self.ui, stream)
310 310 bundle2.processbundle(self._repo, b)
311 311 raise
312 312 except error.PushRaced as exc:
313 313 raise error.ResponseError(_('push failed:'),
314 314 stringutil.forcebytestr(exc))
315 315
316 316 # End of _basewirecommands interface.
317 317
318 318 # Begin of peer interface.
319 319
320 320 def commandexecutor(self):
321 321 return localcommandexecutor(self)
322 322
323 323 # End of peer interface.
324 324
325 325 @interfaceutil.implementer(repository.ipeerlegacycommands)
326 326 class locallegacypeer(localpeer):
327 327 '''peer extension which implements legacy methods too; used for tests with
328 328 restricted capabilities'''
329 329
330 330 def __init__(self, repo):
331 331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
332 332
333 333 # Begin of baselegacywirecommands interface.
334 334
335 335 def between(self, pairs):
336 336 return self._repo.between(pairs)
337 337
338 338 def branches(self, nodes):
339 339 return self._repo.branches(nodes)
340 340
341 341 def changegroup(self, nodes, source):
342 342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
343 343 missingheads=self._repo.heads())
344 344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
345 345
346 346 def changegroupsubset(self, bases, heads, source):
347 347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
348 348 missingheads=heads)
349 349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350 350
351 351 # End of baselegacywirecommands interface.
352 352
353 353 # Increment the sub-version when the revlog v2 format changes to lock out old
354 354 # clients.
355 355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
356 356
357 357 # A repository with the sparserevlog feature will have delta chains that
358 358 # can spread over a larger span. Sparse reading cuts these large spans into
359 359 # pieces, so that each piece isn't too big.
360 360 # Without the sparserevlog capability, reading from the repository could use
361 361 # huge amounts of memory, because the whole span would be read at once,
362 362 # including all the intermediate revisions that aren't pertinent for the chain.
363 363 # This is why once a repository has enabled sparse-read, it becomes required.
364 364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
365 365
366 366 # Functions receiving (ui, features) that extensions can register to impact
367 367 # the ability to load repositories with custom requirements. Only
368 368 # functions defined in loaded extensions are called.
369 369 #
370 370 # The function receives a set of requirement strings that the repository
371 371 # is capable of opening. Functions will typically add elements to the
372 372 # set to reflect that the extension knows how to handle that requirements.
373 373 featuresetupfuncs = set()
374 374
375 375 @interfaceutil.implementer(repository.completelocalrepository)
376 376 class localrepository(object):
377 377
378 378 # obsolete experimental requirements:
379 379 # - manifestv2: An experimental new manifest format that allowed
380 380 # for stem compression of long paths. Experiment ended up not
381 381 # being successful (repository sizes went up due to worse delta
382 382 # chains), and the code was deleted in 4.6.
383 383 supportedformats = {
384 384 'revlogv1',
385 385 'generaldelta',
386 386 'treemanifest',
387 387 REVLOGV2_REQUIREMENT,
388 388 SPARSEREVLOG_REQUIREMENT,
389 389 }
390 390 _basesupported = supportedformats | {
391 391 'store',
392 392 'fncache',
393 393 'shared',
394 394 'relshared',
395 395 'dotencode',
396 396 'exp-sparse',
397 397 }
398 398 openerreqs = {
399 399 'revlogv1',
400 400 'generaldelta',
401 401 'treemanifest',
402 402 }
403 403
404 404 # list of prefix for file which can be written without 'wlock'
405 405 # Extensions should extend this list when needed
406 406 _wlockfreeprefix = {
407 407 # We migh consider requiring 'wlock' for the next
408 408 # two, but pretty much all the existing code assume
409 409 # wlock is not needed so we keep them excluded for
410 410 # now.
411 411 'hgrc',
412 412 'requires',
413 413 # XXX cache is a complicatged business someone
414 414 # should investigate this in depth at some point
415 415 'cache/',
416 416 # XXX shouldn't be dirstate covered by the wlock?
417 417 'dirstate',
418 418 # XXX bisect was still a bit too messy at the time
419 419 # this changeset was introduced. Someone should fix
420 420 # the remainig bit and drop this line
421 421 'bisect.state',
422 422 }
423 423
424 424 def __init__(self, baseui, path, create=False, intents=None):
425 425 self.requirements = set()
426 426 self.filtername = None
427 427 # wvfs: rooted at the repository root, used to access the working copy
428 428 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
429 429 # vfs: rooted at .hg, used to access repo files outside of .hg/store
430 430 self.vfs = None
431 431 # svfs: usually rooted at .hg/store, used to access repository history
432 432 # If this is a shared repository, this vfs may point to another
433 433 # repository's .hg/store directory.
434 434 self.svfs = None
435 435 self.root = self.wvfs.base
436 436 self.path = self.wvfs.join(".hg")
437 437 self.origroot = path
438 438 # This is only used by context.workingctx.match in order to
439 439 # detect files in subrepos.
440 440 self.auditor = pathutil.pathauditor(
441 441 self.root, callback=self._checknested)
442 442 # This is only used by context.basectx.match in order to detect
443 443 # files in subrepos.
444 444 self.nofsauditor = pathutil.pathauditor(
445 445 self.root, callback=self._checknested, realfs=False, cached=True)
446 446 self.baseui = baseui
447 447 self.ui = baseui.copy()
448 448 self.ui.copy = baseui.copy # prevent copying repo configuration
449 449 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
450 450 if (self.ui.configbool('devel', 'all-warnings') or
451 451 self.ui.configbool('devel', 'check-locks')):
452 452 self.vfs.audit = self._getvfsward(self.vfs.audit)
453 453 # A list of callback to shape the phase if no data were found.
454 454 # Callback are in the form: func(repo, roots) --> processed root.
455 455 # This list it to be filled by extension during repo setup
456 456 self._phasedefaults = []
457 457 try:
458 458 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
459 459 self._loadextensions()
460 460 except IOError:
461 461 pass
462 462
463 463 if featuresetupfuncs:
464 464 self.supported = set(self._basesupported) # use private copy
465 465 extmods = set(m.__name__ for n, m
466 466 in extensions.extensions(self.ui))
467 467 for setupfunc in featuresetupfuncs:
468 468 if setupfunc.__module__ in extmods:
469 469 setupfunc(self.ui, self.supported)
470 470 else:
471 471 self.supported = self._basesupported
472 472 color.setup(self.ui)
473 473
474 474 # Add compression engines.
475 475 for name in util.compengines:
476 476 engine = util.compengines[name]
477 477 if engine.revlogheader():
478 478 self.supported.add('exp-compression-%s' % name)
479 479
480 480 if not self.vfs.isdir():
481 481 if create:
482 482 self.requirements = newreporequirements(self)
483 483
484 484 if not self.wvfs.exists():
485 485 self.wvfs.makedirs()
486 486 self.vfs.makedir(notindexed=True)
487 487
488 488 if 'store' in self.requirements:
489 489 self.vfs.mkdir("store")
490 490
491 491 # create an invalid changelog
492 492 self.vfs.append(
493 493 "00changelog.i",
494 494 '\0\0\0\2' # represents revlogv2
495 495 ' dummy changelog to prevent using the old repo layout'
496 496 )
497 497 else:
498 498 raise error.RepoError(_("repository %s not found") % path)
499 499 elif create:
500 500 raise error.RepoError(_("repository %s already exists") % path)
501 501 else:
502 502 try:
503 503 self.requirements = scmutil.readrequires(
504 504 self.vfs, self.supported)
505 505 except IOError as inst:
506 506 if inst.errno != errno.ENOENT:
507 507 raise
508 508
509 509 cachepath = self.vfs.join('cache')
510 510 self.sharedpath = self.path
511 511 try:
512 512 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
513 513 if 'relshared' in self.requirements:
514 514 sharedpath = self.vfs.join(sharedpath)
515 515 vfs = vfsmod.vfs(sharedpath, realpath=True)
516 516 cachepath = vfs.join('cache')
517 517 s = vfs.base
518 518 if not vfs.exists():
519 519 raise error.RepoError(
520 520 _('.hg/sharedpath points to nonexistent directory %s') % s)
521 521 self.sharedpath = s
522 522 except IOError as inst:
523 523 if inst.errno != errno.ENOENT:
524 524 raise
525 525
526 526 if 'exp-sparse' in self.requirements and not sparse.enabled:
527 527 raise error.RepoError(_('repository is using sparse feature but '
528 528 'sparse is not enabled; enable the '
529 529 '"sparse" extensions to access'))
530 530
531 531 self.store = store.store(
532 532 self.requirements, self.sharedpath,
533 533 lambda base: vfsmod.vfs(base, cacheaudited=True))
534 534 self.spath = self.store.path
535 535 self.svfs = self.store.vfs
536 536 self.sjoin = self.store.join
537 537 self.vfs.createmode = self.store.createmode
538 538 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
539 539 self.cachevfs.createmode = self.store.createmode
540 540 if (self.ui.configbool('devel', 'all-warnings') or
541 541 self.ui.configbool('devel', 'check-locks')):
542 542 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
543 543 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
544 544 else: # standard vfs
545 545 self.svfs.audit = self._getsvfsward(self.svfs.audit)
546 546 self._applyopenerreqs()
547 547 if create:
548 548 self._writerequirements()
549 549
550 550 self._dirstatevalidatewarned = False
551 551
552 552 self._branchcaches = {}
553 553 self._revbranchcache = None
554 554 self._filterpats = {}
555 555 self._datafilters = {}
556 556 self._transref = self._lockref = self._wlockref = None
557 557
558 558 # A cache for various files under .hg/ that tracks file changes,
559 559 # (used by the filecache decorator)
560 560 #
561 561 # Maps a property name to its util.filecacheentry
562 562 self._filecache = {}
563 563
564 564 # hold sets of revision to be filtered
565 565 # should be cleared when something might have changed the filter value:
566 566 # - new changesets,
567 567 # - phase change,
568 568 # - new obsolescence marker,
569 569 # - working directory parent change,
570 570 # - bookmark changes
571 571 self.filteredrevcache = {}
572 572
573 573 # post-dirstate-status hooks
574 574 self._postdsstatus = []
575 575
576 576 # generic mapping between names and nodes
577 577 self.names = namespaces.namespaces()
578 578
579 579 # Key to signature value.
580 580 self._sparsesignaturecache = {}
581 581 # Signature to cached matcher instance.
582 582 self._sparsematchercache = {}
583 583
584 584 def _getvfsward(self, origfunc):
585 585 """build a ward for self.vfs"""
586 586 rref = weakref.ref(self)
587 587 def checkvfs(path, mode=None):
588 588 ret = origfunc(path, mode=mode)
589 589 repo = rref()
590 590 if (repo is None
591 591 or not util.safehasattr(repo, '_wlockref')
592 592 or not util.safehasattr(repo, '_lockref')):
593 593 return
594 594 if mode in (None, 'r', 'rb'):
595 595 return
596 596 if path.startswith(repo.path):
597 597 # truncate name relative to the repository (.hg)
598 598 path = path[len(repo.path) + 1:]
599 599 if path.startswith('cache/'):
600 600 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
601 601 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
602 602 if path.startswith('journal.'):
603 603 # journal is covered by 'lock'
604 604 if repo._currentlock(repo._lockref) is None:
605 605 repo.ui.develwarn('write with no lock: "%s"' % path,
606 606 stacklevel=2, config='check-locks')
607 607 elif repo._currentlock(repo._wlockref) is None:
608 608 # rest of vfs files are covered by 'wlock'
609 609 #
610 610 # exclude special files
611 611 for prefix in self._wlockfreeprefix:
612 612 if path.startswith(prefix):
613 613 return
614 614 repo.ui.develwarn('write with no wlock: "%s"' % path,
615 615 stacklevel=2, config='check-locks')
616 616 return ret
617 617 return checkvfs
618 618
619 619 def _getsvfsward(self, origfunc):
620 620 """build a ward for self.svfs"""
621 621 rref = weakref.ref(self)
622 622 def checksvfs(path, mode=None):
623 623 ret = origfunc(path, mode=mode)
624 624 repo = rref()
625 625 if repo is None or not util.safehasattr(repo, '_lockref'):
626 626 return
627 627 if mode in (None, 'r', 'rb'):
628 628 return
629 629 if path.startswith(repo.sharedpath):
630 630 # truncate name relative to the repository (.hg)
631 631 path = path[len(repo.sharedpath) + 1:]
632 632 if repo._currentlock(repo._lockref) is None:
633 633 repo.ui.develwarn('write with no lock: "%s"' % path,
634 634 stacklevel=3)
635 635 return ret
636 636 return checksvfs
637 637
638 638 def close(self):
639 639 self._writecaches()
640 640
641 641 def _loadextensions(self):
642 642 extensions.loadall(self.ui)
643 643
644 644 def _writecaches(self):
645 645 if self._revbranchcache:
646 646 self._revbranchcache.write()
647 647
648 648 def _restrictcapabilities(self, caps):
649 649 if self.ui.configbool('experimental', 'bundle2-advertise'):
650 650 caps = set(caps)
651 651 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
652 652 role='client'))
653 653 caps.add('bundle2=' + urlreq.quote(capsblob))
654 654 return caps
655 655
656 656 def _applyopenerreqs(self):
657 657 self.svfs.options = dict((r, 1) for r in self.requirements
658 658 if r in self.openerreqs)
659 659 # experimental config: format.chunkcachesize
660 660 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
661 661 if chunkcachesize is not None:
662 662 self.svfs.options['chunkcachesize'] = chunkcachesize
663 663 # experimental config: format.maxchainlen
664 664 maxchainlen = self.ui.configint('format', 'maxchainlen')
665 665 if maxchainlen is not None:
666 666 self.svfs.options['maxchainlen'] = maxchainlen
667 667 # experimental config: format.manifestcachesize
668 668 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
669 669 if manifestcachesize is not None:
670 670 self.svfs.options['manifestcachesize'] = manifestcachesize
671 671 deltabothparents = self.ui.configbool('storage',
672 672 'revlog.optimize-delta-parent-choice')
673 673 self.svfs.options['deltabothparents'] = deltabothparents
674 674 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
675 675 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
676 676 if 0 <= chainspan:
677 677 self.svfs.options['maxdeltachainspan'] = chainspan
678 678 mmapindexthreshold = self.ui.configbytes('experimental',
679 679 'mmapindexthreshold')
680 680 if mmapindexthreshold is not None:
681 681 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
682 682 withsparseread = self.ui.configbool('experimental', 'sparse-read')
683 683 srdensitythres = float(self.ui.config('experimental',
684 684 'sparse-read.density-threshold'))
685 685 srmingapsize = self.ui.configbytes('experimental',
686 686 'sparse-read.min-gap-size')
687 687 self.svfs.options['with-sparse-read'] = withsparseread
688 688 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
689 689 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
690 690 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
691 691 self.svfs.options['sparse-revlog'] = sparserevlog
692 if sparserevlog:
693 self.svfs.options['generaldelta'] = True
692 694
693 695 for r in self.requirements:
694 696 if r.startswith('exp-compression-'):
695 697 self.svfs.options['compengine'] = r[len('exp-compression-'):]
696 698
697 699 # TODO move "revlogv2" to openerreqs once finalized.
698 700 if REVLOGV2_REQUIREMENT in self.requirements:
699 701 self.svfs.options['revlogv2'] = True
700 702
701 703 def _writerequirements(self):
702 704 scmutil.writerequires(self.vfs, self.requirements)
703 705
704 706 def _checknested(self, path):
705 707 """Determine if path is a legal nested repository."""
706 708 if not path.startswith(self.root):
707 709 return False
708 710 subpath = path[len(self.root) + 1:]
709 711 normsubpath = util.pconvert(subpath)
710 712
711 713 # XXX: Checking against the current working copy is wrong in
712 714 # the sense that it can reject things like
713 715 #
714 716 # $ hg cat -r 10 sub/x.txt
715 717 #
716 718 # if sub/ is no longer a subrepository in the working copy
717 719 # parent revision.
718 720 #
719 721 # However, it can of course also allow things that would have
720 722 # been rejected before, such as the above cat command if sub/
721 723 # is a subrepository now, but was a normal directory before.
722 724 # The old path auditor would have rejected by mistake since it
723 725 # panics when it sees sub/.hg/.
724 726 #
725 727 # All in all, checking against the working copy seems sensible
726 728 # since we want to prevent access to nested repositories on
727 729 # the filesystem *now*.
728 730 ctx = self[None]
729 731 parts = util.splitpath(subpath)
730 732 while parts:
731 733 prefix = '/'.join(parts)
732 734 if prefix in ctx.substate:
733 735 if prefix == normsubpath:
734 736 return True
735 737 else:
736 738 sub = ctx.sub(prefix)
737 739 return sub.checknested(subpath[len(prefix) + 1:])
738 740 else:
739 741 parts.pop()
740 742 return False
741 743
742 744 def peer(self):
743 745 return localpeer(self) # not cached to avoid reference cycle
744 746
745 747 def unfiltered(self):
746 748 """Return unfiltered version of the repository
747 749
748 750 Intended to be overwritten by filtered repo."""
749 751 return self
750 752
751 753 def filtered(self, name, visibilityexceptions=None):
752 754 """Return a filtered version of a repository"""
753 755 cls = repoview.newtype(self.unfiltered().__class__)
754 756 return cls(self, name, visibilityexceptions)
755 757
756 758 @repofilecache('bookmarks', 'bookmarks.current')
757 759 def _bookmarks(self):
758 760 return bookmarks.bmstore(self)
759 761
760 762 @property
761 763 def _activebookmark(self):
762 764 return self._bookmarks.active
763 765
764 766 # _phasesets depend on changelog. what we need is to call
765 767 # _phasecache.invalidate() if '00changelog.i' was changed, but it
766 768 # can't be easily expressed in filecache mechanism.
767 769 @storecache('phaseroots', '00changelog.i')
768 770 def _phasecache(self):
769 771 return phases.phasecache(self, self._phasedefaults)
770 772
771 773 @storecache('obsstore')
772 774 def obsstore(self):
773 775 return obsolete.makestore(self.ui, self)
774 776
775 777 @storecache('00changelog.i')
776 778 def changelog(self):
777 779 return changelog.changelog(self.svfs,
778 780 trypending=txnutil.mayhavepending(self.root))
779 781
780 782 def _constructmanifest(self):
781 783 # This is a temporary function while we migrate from manifest to
782 784 # manifestlog. It allows bundlerepo and unionrepo to intercept the
783 785 # manifest creation.
784 786 return manifest.manifestrevlog(self.svfs)
785 787
786 788 @storecache('00manifest.i')
787 789 def manifestlog(self):
788 790 return manifest.manifestlog(self.svfs, self)
789 791
790 792 @repofilecache('dirstate')
791 793 def dirstate(self):
792 794 return self._makedirstate()
793 795
794 796 def _makedirstate(self):
795 797 """Extension point for wrapping the dirstate per-repo."""
796 798 sparsematchfn = lambda: sparse.matcher(self)
797 799
798 800 return dirstate.dirstate(self.vfs, self.ui, self.root,
799 801 self._dirstatevalidate, sparsematchfn)
800 802
801 803 def _dirstatevalidate(self, node):
802 804 try:
803 805 self.changelog.rev(node)
804 806 return node
805 807 except error.LookupError:
806 808 if not self._dirstatevalidatewarned:
807 809 self._dirstatevalidatewarned = True
808 810 self.ui.warn(_("warning: ignoring unknown"
809 811 " working parent %s!\n") % short(node))
810 812 return nullid
811 813
812 814 @repofilecache(narrowspec.FILENAME)
813 815 def narrowpats(self):
814 816 """matcher patterns for this repository's narrowspec
815 817
816 818 A tuple of (includes, excludes).
817 819 """
818 820 source = self
819 821 if self.shared():
820 822 from . import hg
821 823 source = hg.sharedreposource(self)
822 824 return narrowspec.load(source)
823 825
824 826 @repofilecache(narrowspec.FILENAME)
825 827 def _narrowmatch(self):
826 828 if changegroup.NARROW_REQUIREMENT not in self.requirements:
827 829 return matchmod.always(self.root, '')
828 830 include, exclude = self.narrowpats
829 831 return narrowspec.match(self.root, include=include, exclude=exclude)
830 832
831 833 # TODO(martinvonz): make this property-like instead?
832 834 def narrowmatch(self):
833 835 return self._narrowmatch
834 836
835 837 def setnarrowpats(self, newincludes, newexcludes):
836 838 target = self
837 839 if self.shared():
838 840 from . import hg
839 841 target = hg.sharedreposource(self)
840 842 narrowspec.save(target, newincludes, newexcludes)
841 843 self.invalidate(clearfilecache=True)
842 844
843 845 def __getitem__(self, changeid):
844 846 if changeid is None:
845 847 return context.workingctx(self)
846 848 if isinstance(changeid, context.basectx):
847 849 return changeid
848 850 if isinstance(changeid, slice):
849 851 # wdirrev isn't contiguous so the slice shouldn't include it
850 852 return [context.changectx(self, i)
851 853 for i in xrange(*changeid.indices(len(self)))
852 854 if i not in self.changelog.filteredrevs]
853 855 try:
854 856 return context.changectx(self, changeid)
855 857 except error.WdirUnsupported:
856 858 return context.workingctx(self)
857 859
858 860 def __contains__(self, changeid):
859 861 """True if the given changeid exists
860 862
861 863 error.LookupError is raised if an ambiguous node specified.
862 864 """
863 865 try:
864 866 self[changeid]
865 867 return True
866 868 except error.RepoLookupError:
867 869 return False
868 870
869 871 def __nonzero__(self):
870 872 return True
871 873
872 874 __bool__ = __nonzero__
873 875
874 876 def __len__(self):
875 877 # no need to pay the cost of repoview.changelog
876 878 unfi = self.unfiltered()
877 879 return len(unfi.changelog)
878 880
879 881 def __iter__(self):
880 882 return iter(self.changelog)
881 883
882 884 def revs(self, expr, *args):
883 885 '''Find revisions matching a revset.
884 886
885 887 The revset is specified as a string ``expr`` that may contain
886 888 %-formatting to escape certain types. See ``revsetlang.formatspec``.
887 889
888 890 Revset aliases from the configuration are not expanded. To expand
889 891 user aliases, consider calling ``scmutil.revrange()`` or
890 892 ``repo.anyrevs([expr], user=True)``.
891 893
892 894 Returns a revset.abstractsmartset, which is a list-like interface
893 895 that contains integer revisions.
894 896 '''
895 897 expr = revsetlang.formatspec(expr, *args)
896 898 m = revset.match(None, expr)
897 899 return m(self)
898 900
899 901 def set(self, expr, *args):
900 902 '''Find revisions matching a revset and emit changectx instances.
901 903
902 904 This is a convenience wrapper around ``revs()`` that iterates the
903 905 result and is a generator of changectx instances.
904 906
905 907 Revset aliases from the configuration are not expanded. To expand
906 908 user aliases, consider calling ``scmutil.revrange()``.
907 909 '''
908 910 for r in self.revs(expr, *args):
909 911 yield self[r]
910 912
911 913 def anyrevs(self, specs, user=False, localalias=None):
912 914 '''Find revisions matching one of the given revsets.
913 915
914 916 Revset aliases from the configuration are not expanded by default. To
915 917 expand user aliases, specify ``user=True``. To provide some local
916 918 definitions overriding user aliases, set ``localalias`` to
917 919 ``{name: definitionstring}``.
918 920 '''
919 921 if user:
920 922 m = revset.matchany(self.ui, specs,
921 923 lookup=revset.lookupfn(self),
922 924 localalias=localalias)
923 925 else:
924 926 m = revset.matchany(None, specs, localalias=localalias)
925 927 return m(self)
926 928
927 929 def url(self):
928 930 return 'file:' + self.root
929 931
930 932 def hook(self, name, throw=False, **args):
931 933 """Call a hook, passing this repo instance.
932 934
933 935 This a convenience method to aid invoking hooks. Extensions likely
934 936 won't call this unless they have registered a custom hook or are
935 937 replacing code that is expected to call a hook.
936 938 """
937 939 return hook.hook(self.ui, self, name, throw, **args)
938 940
939 941 @filteredpropertycache
940 942 def _tagscache(self):
941 943 '''Returns a tagscache object that contains various tags related
942 944 caches.'''
943 945
944 946 # This simplifies its cache management by having one decorated
945 947 # function (this one) and the rest simply fetch things from it.
946 948 class tagscache(object):
947 949 def __init__(self):
948 950 # These two define the set of tags for this repository. tags
949 951 # maps tag name to node; tagtypes maps tag name to 'global' or
950 952 # 'local'. (Global tags are defined by .hgtags across all
951 953 # heads, and local tags are defined in .hg/localtags.)
952 954 # They constitute the in-memory cache of tags.
953 955 self.tags = self.tagtypes = None
954 956
955 957 self.nodetagscache = self.tagslist = None
956 958
957 959 cache = tagscache()
958 960 cache.tags, cache.tagtypes = self._findtags()
959 961
960 962 return cache
961 963
962 964 def tags(self):
963 965 '''return a mapping of tag to node'''
964 966 t = {}
965 967 if self.changelog.filteredrevs:
966 968 tags, tt = self._findtags()
967 969 else:
968 970 tags = self._tagscache.tags
969 971 for k, v in tags.iteritems():
970 972 try:
971 973 # ignore tags to unknown nodes
972 974 self.changelog.rev(v)
973 975 t[k] = v
974 976 except (error.LookupError, ValueError):
975 977 pass
976 978 return t
977 979
978 980 def _findtags(self):
979 981 '''Do the hard work of finding tags. Return a pair of dicts
980 982 (tags, tagtypes) where tags maps tag name to node, and tagtypes
981 983 maps tag name to a string like \'global\' or \'local\'.
982 984 Subclasses or extensions are free to add their own tags, but
983 985 should be aware that the returned dicts will be retained for the
984 986 duration of the localrepo object.'''
985 987
986 988 # XXX what tagtype should subclasses/extensions use? Currently
987 989 # mq and bookmarks add tags, but do not set the tagtype at all.
988 990 # Should each extension invent its own tag type? Should there
989 991 # be one tagtype for all such "virtual" tags? Or is the status
990 992 # quo fine?
991 993
992 994
993 995 # map tag name to (node, hist)
994 996 alltags = tagsmod.findglobaltags(self.ui, self)
995 997 # map tag name to tag type
996 998 tagtypes = dict((tag, 'global') for tag in alltags)
997 999
998 1000 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
999 1001
1000 1002 # Build the return dicts. Have to re-encode tag names because
1001 1003 # the tags module always uses UTF-8 (in order not to lose info
1002 1004 # writing to the cache), but the rest of Mercurial wants them in
1003 1005 # local encoding.
1004 1006 tags = {}
1005 1007 for (name, (node, hist)) in alltags.iteritems():
1006 1008 if node != nullid:
1007 1009 tags[encoding.tolocal(name)] = node
1008 1010 tags['tip'] = self.changelog.tip()
1009 1011 tagtypes = dict([(encoding.tolocal(name), value)
1010 1012 for (name, value) in tagtypes.iteritems()])
1011 1013 return (tags, tagtypes)
1012 1014
1013 1015 def tagtype(self, tagname):
1014 1016 '''
1015 1017 return the type of the given tag. result can be:
1016 1018
1017 1019 'local' : a local tag
1018 1020 'global' : a global tag
1019 1021 None : tag does not exist
1020 1022 '''
1021 1023
1022 1024 return self._tagscache.tagtypes.get(tagname)
1023 1025
1024 1026 def tagslist(self):
1025 1027 '''return a list of tags ordered by revision'''
1026 1028 if not self._tagscache.tagslist:
1027 1029 l = []
1028 1030 for t, n in self.tags().iteritems():
1029 1031 l.append((self.changelog.rev(n), t, n))
1030 1032 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1031 1033
1032 1034 return self._tagscache.tagslist
1033 1035
1034 1036 def nodetags(self, node):
1035 1037 '''return the tags associated with a node'''
1036 1038 if not self._tagscache.nodetagscache:
1037 1039 nodetagscache = {}
1038 1040 for t, n in self._tagscache.tags.iteritems():
1039 1041 nodetagscache.setdefault(n, []).append(t)
1040 1042 for tags in nodetagscache.itervalues():
1041 1043 tags.sort()
1042 1044 self._tagscache.nodetagscache = nodetagscache
1043 1045 return self._tagscache.nodetagscache.get(node, [])
1044 1046
1045 1047 def nodebookmarks(self, node):
1046 1048 """return the list of bookmarks pointing to the specified node"""
1047 1049 return self._bookmarks.names(node)
1048 1050
1049 1051 def branchmap(self):
1050 1052 '''returns a dictionary {branch: [branchheads]} with branchheads
1051 1053 ordered by increasing revision number'''
1052 1054 branchmap.updatecache(self)
1053 1055 return self._branchcaches[self.filtername]
1054 1056
1055 1057 @unfilteredmethod
1056 1058 def revbranchcache(self):
1057 1059 if not self._revbranchcache:
1058 1060 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1059 1061 return self._revbranchcache
1060 1062
1061 1063 def branchtip(self, branch, ignoremissing=False):
1062 1064 '''return the tip node for a given branch
1063 1065
1064 1066 If ignoremissing is True, then this method will not raise an error.
1065 1067 This is helpful for callers that only expect None for a missing branch
1066 1068 (e.g. namespace).
1067 1069
1068 1070 '''
1069 1071 try:
1070 1072 return self.branchmap().branchtip(branch)
1071 1073 except KeyError:
1072 1074 if not ignoremissing:
1073 1075 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1074 1076 else:
1075 1077 pass
1076 1078
1077 1079 def lookup(self, key):
1078 1080 return scmutil.revsymbol(self, key).node()
1079 1081
1080 1082 def lookupbranch(self, key):
1081 1083 if key in self.branchmap():
1082 1084 return key
1083 1085
1084 1086 return scmutil.revsymbol(self, key).branch()
1085 1087
1086 1088 def known(self, nodes):
1087 1089 cl = self.changelog
1088 1090 nm = cl.nodemap
1089 1091 filtered = cl.filteredrevs
1090 1092 result = []
1091 1093 for n in nodes:
1092 1094 r = nm.get(n)
1093 1095 resp = not (r is None or r in filtered)
1094 1096 result.append(resp)
1095 1097 return result
1096 1098
1097 1099 def local(self):
1098 1100 return self
1099 1101
1100 1102 def publishing(self):
1101 1103 # it's safe (and desirable) to trust the publish flag unconditionally
1102 1104 # so that we don't finalize changes shared between users via ssh or nfs
1103 1105 return self.ui.configbool('phases', 'publish', untrusted=True)
1104 1106
1105 1107 def cancopy(self):
1106 1108 # so statichttprepo's override of local() works
1107 1109 if not self.local():
1108 1110 return False
1109 1111 if not self.publishing():
1110 1112 return True
1111 1113 # if publishing we can't copy if there is filtered content
1112 1114 return not self.filtered('visible').changelog.filteredrevs
1113 1115
1114 1116 def shared(self):
1115 1117 '''the type of shared repository (None if not shared)'''
1116 1118 if self.sharedpath != self.path:
1117 1119 return 'store'
1118 1120 return None
1119 1121
1120 1122 def wjoin(self, f, *insidef):
1121 1123 return self.vfs.reljoin(self.root, f, *insidef)
1122 1124
1123 1125 def file(self, f):
1124 1126 if f[0] == '/':
1125 1127 f = f[1:]
1126 1128 return filelog.filelog(self.svfs, f)
1127 1129
1128 1130 def setparents(self, p1, p2=nullid):
1129 1131 with self.dirstate.parentchange():
1130 1132 copies = self.dirstate.setparents(p1, p2)
1131 1133 pctx = self[p1]
1132 1134 if copies:
1133 1135 # Adjust copy records, the dirstate cannot do it, it
1134 1136 # requires access to parents manifests. Preserve them
1135 1137 # only for entries added to first parent.
1136 1138 for f in copies:
1137 1139 if f not in pctx and copies[f] in pctx:
1138 1140 self.dirstate.copy(copies[f], f)
1139 1141 if p2 == nullid:
1140 1142 for f, s in sorted(self.dirstate.copies().items()):
1141 1143 if f not in pctx and s not in pctx:
1142 1144 self.dirstate.copy(None, f)
1143 1145
1144 1146 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1145 1147 """changeid can be a changeset revision, node, or tag.
1146 1148 fileid can be a file revision or node."""
1147 1149 return context.filectx(self, path, changeid, fileid,
1148 1150 changectx=changectx)
1149 1151
1150 1152 def getcwd(self):
1151 1153 return self.dirstate.getcwd()
1152 1154
1153 1155 def pathto(self, f, cwd=None):
1154 1156 return self.dirstate.pathto(f, cwd)
1155 1157
1156 1158 def _loadfilter(self, filter):
1157 1159 if filter not in self._filterpats:
1158 1160 l = []
1159 1161 for pat, cmd in self.ui.configitems(filter):
1160 1162 if cmd == '!':
1161 1163 continue
1162 1164 mf = matchmod.match(self.root, '', [pat])
1163 1165 fn = None
1164 1166 params = cmd
1165 1167 for name, filterfn in self._datafilters.iteritems():
1166 1168 if cmd.startswith(name):
1167 1169 fn = filterfn
1168 1170 params = cmd[len(name):].lstrip()
1169 1171 break
1170 1172 if not fn:
1171 1173 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1172 1174 # Wrap old filters not supporting keyword arguments
1173 1175 if not pycompat.getargspec(fn)[2]:
1174 1176 oldfn = fn
1175 1177 fn = lambda s, c, **kwargs: oldfn(s, c)
1176 1178 l.append((mf, fn, params))
1177 1179 self._filterpats[filter] = l
1178 1180 return self._filterpats[filter]
1179 1181
1180 1182 def _filter(self, filterpats, filename, data):
1181 1183 for mf, fn, cmd in filterpats:
1182 1184 if mf(filename):
1183 1185 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1184 1186 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1185 1187 break
1186 1188
1187 1189 return data
1188 1190
1189 1191 @unfilteredpropertycache
1190 1192 def _encodefilterpats(self):
1191 1193 return self._loadfilter('encode')
1192 1194
1193 1195 @unfilteredpropertycache
1194 1196 def _decodefilterpats(self):
1195 1197 return self._loadfilter('decode')
1196 1198
1197 1199 def adddatafilter(self, name, filter):
1198 1200 self._datafilters[name] = filter
1199 1201
1200 1202 def wread(self, filename):
1201 1203 if self.wvfs.islink(filename):
1202 1204 data = self.wvfs.readlink(filename)
1203 1205 else:
1204 1206 data = self.wvfs.read(filename)
1205 1207 return self._filter(self._encodefilterpats, filename, data)
1206 1208
1207 1209 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1208 1210 """write ``data`` into ``filename`` in the working directory
1209 1211
1210 1212 This returns length of written (maybe decoded) data.
1211 1213 """
1212 1214 data = self._filter(self._decodefilterpats, filename, data)
1213 1215 if 'l' in flags:
1214 1216 self.wvfs.symlink(data, filename)
1215 1217 else:
1216 1218 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1217 1219 **kwargs)
1218 1220 if 'x' in flags:
1219 1221 self.wvfs.setflags(filename, False, True)
1220 1222 else:
1221 1223 self.wvfs.setflags(filename, False, False)
1222 1224 return len(data)
1223 1225
1224 1226 def wwritedata(self, filename, data):
1225 1227 return self._filter(self._decodefilterpats, filename, data)
1226 1228
1227 1229 def currenttransaction(self):
1228 1230 """return the current transaction or None if non exists"""
1229 1231 if self._transref:
1230 1232 tr = self._transref()
1231 1233 else:
1232 1234 tr = None
1233 1235
1234 1236 if tr and tr.running():
1235 1237 return tr
1236 1238 return None
1237 1239
1238 1240 def transaction(self, desc, report=None):
1239 1241 if (self.ui.configbool('devel', 'all-warnings')
1240 1242 or self.ui.configbool('devel', 'check-locks')):
1241 1243 if self._currentlock(self._lockref) is None:
1242 1244 raise error.ProgrammingError('transaction requires locking')
1243 1245 tr = self.currenttransaction()
1244 1246 if tr is not None:
1245 1247 return tr.nest(name=desc)
1246 1248
1247 1249 # abort here if the journal already exists
1248 1250 if self.svfs.exists("journal"):
1249 1251 raise error.RepoError(
1250 1252 _("abandoned transaction found"),
1251 1253 hint=_("run 'hg recover' to clean up transaction"))
1252 1254
1253 1255 idbase = "%.40f#%f" % (random.random(), time.time())
1254 1256 ha = hex(hashlib.sha1(idbase).digest())
1255 1257 txnid = 'TXN:' + ha
1256 1258 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1257 1259
1258 1260 self._writejournal(desc)
1259 1261 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1260 1262 if report:
1261 1263 rp = report
1262 1264 else:
1263 1265 rp = self.ui.warn
1264 1266 vfsmap = {'plain': self.vfs} # root of .hg/
1265 1267 # we must avoid cyclic reference between repo and transaction.
1266 1268 reporef = weakref.ref(self)
1267 1269 # Code to track tag movement
1268 1270 #
1269 1271 # Since tags are all handled as file content, it is actually quite hard
1270 1272 # to track these movement from a code perspective. So we fallback to a
1271 1273 # tracking at the repository level. One could envision to track changes
1272 1274 # to the '.hgtags' file through changegroup apply but that fails to
1273 1275 # cope with case where transaction expose new heads without changegroup
1274 1276 # being involved (eg: phase movement).
1275 1277 #
1276 1278 # For now, We gate the feature behind a flag since this likely comes
1277 1279 # with performance impacts. The current code run more often than needed
1278 1280 # and do not use caches as much as it could. The current focus is on
1279 1281 # the behavior of the feature so we disable it by default. The flag
1280 1282 # will be removed when we are happy with the performance impact.
1281 1283 #
1282 1284 # Once this feature is no longer experimental move the following
1283 1285 # documentation to the appropriate help section:
1284 1286 #
1285 1287 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1286 1288 # tags (new or changed or deleted tags). In addition the details of
1287 1289 # these changes are made available in a file at:
1288 1290 # ``REPOROOT/.hg/changes/tags.changes``.
1289 1291 # Make sure you check for HG_TAG_MOVED before reading that file as it
1290 1292 # might exist from a previous transaction even if no tag were touched
1291 1293 # in this one. Changes are recorded in a line base format::
1292 1294 #
1293 1295 # <action> <hex-node> <tag-name>\n
1294 1296 #
1295 1297 # Actions are defined as follow:
1296 1298 # "-R": tag is removed,
1297 1299 # "+A": tag is added,
1298 1300 # "-M": tag is moved (old value),
1299 1301 # "+M": tag is moved (new value),
1300 1302 tracktags = lambda x: None
1301 1303 # experimental config: experimental.hook-track-tags
1302 1304 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1303 1305 if desc != 'strip' and shouldtracktags:
1304 1306 oldheads = self.changelog.headrevs()
1305 1307 def tracktags(tr2):
1306 1308 repo = reporef()
1307 1309 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1308 1310 newheads = repo.changelog.headrevs()
1309 1311 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1310 1312 # notes: we compare lists here.
1311 1313 # As we do it only once buiding set would not be cheaper
1312 1314 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1313 1315 if changes:
1314 1316 tr2.hookargs['tag_moved'] = '1'
1315 1317 with repo.vfs('changes/tags.changes', 'w',
1316 1318 atomictemp=True) as changesfile:
1317 1319 # note: we do not register the file to the transaction
1318 1320 # because we needs it to still exist on the transaction
1319 1321 # is close (for txnclose hooks)
1320 1322 tagsmod.writediff(changesfile, changes)
1321 1323 def validate(tr2):
1322 1324 """will run pre-closing hooks"""
1323 1325 # XXX the transaction API is a bit lacking here so we take a hacky
1324 1326 # path for now
1325 1327 #
1326 1328 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1327 1329 # dict is copied before these run. In addition we needs the data
1328 1330 # available to in memory hooks too.
1329 1331 #
1330 1332 # Moreover, we also need to make sure this runs before txnclose
1331 1333 # hooks and there is no "pending" mechanism that would execute
1332 1334 # logic only if hooks are about to run.
1333 1335 #
1334 1336 # Fixing this limitation of the transaction is also needed to track
1335 1337 # other families of changes (bookmarks, phases, obsolescence).
1336 1338 #
1337 1339 # This will have to be fixed before we remove the experimental
1338 1340 # gating.
1339 1341 tracktags(tr2)
1340 1342 repo = reporef()
1341 1343 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1342 1344 scmutil.enforcesinglehead(repo, tr2, desc)
1343 1345 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1344 1346 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1345 1347 args = tr.hookargs.copy()
1346 1348 args.update(bookmarks.preparehookargs(name, old, new))
1347 1349 repo.hook('pretxnclose-bookmark', throw=True,
1348 1350 txnname=desc,
1349 1351 **pycompat.strkwargs(args))
1350 1352 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1351 1353 cl = repo.unfiltered().changelog
1352 1354 for rev, (old, new) in tr.changes['phases'].items():
1353 1355 args = tr.hookargs.copy()
1354 1356 node = hex(cl.node(rev))
1355 1357 args.update(phases.preparehookargs(node, old, new))
1356 1358 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1357 1359 **pycompat.strkwargs(args))
1358 1360
1359 1361 repo.hook('pretxnclose', throw=True,
1360 1362 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1361 1363 def releasefn(tr, success):
1362 1364 repo = reporef()
1363 1365 if success:
1364 1366 # this should be explicitly invoked here, because
1365 1367 # in-memory changes aren't written out at closing
1366 1368 # transaction, if tr.addfilegenerator (via
1367 1369 # dirstate.write or so) isn't invoked while
1368 1370 # transaction running
1369 1371 repo.dirstate.write(None)
1370 1372 else:
1371 1373 # discard all changes (including ones already written
1372 1374 # out) in this transaction
1373 1375 repo.dirstate.restorebackup(None, 'journal.dirstate')
1374 1376
1375 1377 repo.invalidate(clearfilecache=True)
1376 1378
1377 1379 tr = transaction.transaction(rp, self.svfs, vfsmap,
1378 1380 "journal",
1379 1381 "undo",
1380 1382 aftertrans(renames),
1381 1383 self.store.createmode,
1382 1384 validator=validate,
1383 1385 releasefn=releasefn,
1384 1386 checkambigfiles=_cachedfiles,
1385 1387 name=desc)
1386 1388 tr.changes['revs'] = xrange(0, 0)
1387 1389 tr.changes['obsmarkers'] = set()
1388 1390 tr.changes['phases'] = {}
1389 1391 tr.changes['bookmarks'] = {}
1390 1392
1391 1393 tr.hookargs['txnid'] = txnid
1392 1394 # note: writing the fncache only during finalize mean that the file is
1393 1395 # outdated when running hooks. As fncache is used for streaming clone,
1394 1396 # this is not expected to break anything that happen during the hooks.
1395 1397 tr.addfinalize('flush-fncache', self.store.write)
1396 1398 def txnclosehook(tr2):
1397 1399 """To be run if transaction is successful, will schedule a hook run
1398 1400 """
1399 1401 # Don't reference tr2 in hook() so we don't hold a reference.
1400 1402 # This reduces memory consumption when there are multiple
1401 1403 # transactions per lock. This can likely go away if issue5045
1402 1404 # fixes the function accumulation.
1403 1405 hookargs = tr2.hookargs
1404 1406
1405 1407 def hookfunc():
1406 1408 repo = reporef()
1407 1409 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1408 1410 bmchanges = sorted(tr.changes['bookmarks'].items())
1409 1411 for name, (old, new) in bmchanges:
1410 1412 args = tr.hookargs.copy()
1411 1413 args.update(bookmarks.preparehookargs(name, old, new))
1412 1414 repo.hook('txnclose-bookmark', throw=False,
1413 1415 txnname=desc, **pycompat.strkwargs(args))
1414 1416
1415 1417 if hook.hashook(repo.ui, 'txnclose-phase'):
1416 1418 cl = repo.unfiltered().changelog
1417 1419 phasemv = sorted(tr.changes['phases'].items())
1418 1420 for rev, (old, new) in phasemv:
1419 1421 args = tr.hookargs.copy()
1420 1422 node = hex(cl.node(rev))
1421 1423 args.update(phases.preparehookargs(node, old, new))
1422 1424 repo.hook('txnclose-phase', throw=False, txnname=desc,
1423 1425 **pycompat.strkwargs(args))
1424 1426
1425 1427 repo.hook('txnclose', throw=False, txnname=desc,
1426 1428 **pycompat.strkwargs(hookargs))
1427 1429 reporef()._afterlock(hookfunc)
1428 1430 tr.addfinalize('txnclose-hook', txnclosehook)
1429 1431 # Include a leading "-" to make it happen before the transaction summary
1430 1432 # reports registered via scmutil.registersummarycallback() whose names
1431 1433 # are 00-txnreport etc. That way, the caches will be warm when the
1432 1434 # callbacks run.
1433 1435 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1434 1436 def txnaborthook(tr2):
1435 1437 """To be run if transaction is aborted
1436 1438 """
1437 1439 reporef().hook('txnabort', throw=False, txnname=desc,
1438 1440 **pycompat.strkwargs(tr2.hookargs))
1439 1441 tr.addabort('txnabort-hook', txnaborthook)
1440 1442 # avoid eager cache invalidation. in-memory data should be identical
1441 1443 # to stored data if transaction has no error.
1442 1444 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1443 1445 self._transref = weakref.ref(tr)
1444 1446 scmutil.registersummarycallback(self, tr, desc)
1445 1447 return tr
1446 1448
1447 1449 def _journalfiles(self):
1448 1450 return ((self.svfs, 'journal'),
1449 1451 (self.vfs, 'journal.dirstate'),
1450 1452 (self.vfs, 'journal.branch'),
1451 1453 (self.vfs, 'journal.desc'),
1452 1454 (self.vfs, 'journal.bookmarks'),
1453 1455 (self.svfs, 'journal.phaseroots'))
1454 1456
1455 1457 def undofiles(self):
1456 1458 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1457 1459
1458 1460 @unfilteredmethod
1459 1461 def _writejournal(self, desc):
1460 1462 self.dirstate.savebackup(None, 'journal.dirstate')
1461 1463 self.vfs.write("journal.branch",
1462 1464 encoding.fromlocal(self.dirstate.branch()))
1463 1465 self.vfs.write("journal.desc",
1464 1466 "%d\n%s\n" % (len(self), desc))
1465 1467 self.vfs.write("journal.bookmarks",
1466 1468 self.vfs.tryread("bookmarks"))
1467 1469 self.svfs.write("journal.phaseroots",
1468 1470 self.svfs.tryread("phaseroots"))
1469 1471
1470 1472 def recover(self):
1471 1473 with self.lock():
1472 1474 if self.svfs.exists("journal"):
1473 1475 self.ui.status(_("rolling back interrupted transaction\n"))
1474 1476 vfsmap = {'': self.svfs,
1475 1477 'plain': self.vfs,}
1476 1478 transaction.rollback(self.svfs, vfsmap, "journal",
1477 1479 self.ui.warn,
1478 1480 checkambigfiles=_cachedfiles)
1479 1481 self.invalidate()
1480 1482 return True
1481 1483 else:
1482 1484 self.ui.warn(_("no interrupted transaction available\n"))
1483 1485 return False
1484 1486
1485 1487 def rollback(self, dryrun=False, force=False):
1486 1488 wlock = lock = dsguard = None
1487 1489 try:
1488 1490 wlock = self.wlock()
1489 1491 lock = self.lock()
1490 1492 if self.svfs.exists("undo"):
1491 1493 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1492 1494
1493 1495 return self._rollback(dryrun, force, dsguard)
1494 1496 else:
1495 1497 self.ui.warn(_("no rollback information available\n"))
1496 1498 return 1
1497 1499 finally:
1498 1500 release(dsguard, lock, wlock)
1499 1501
1500 1502 @unfilteredmethod # Until we get smarter cache management
1501 1503 def _rollback(self, dryrun, force, dsguard):
1502 1504 ui = self.ui
1503 1505 try:
1504 1506 args = self.vfs.read('undo.desc').splitlines()
1505 1507 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1506 1508 if len(args) >= 3:
1507 1509 detail = args[2]
1508 1510 oldtip = oldlen - 1
1509 1511
1510 1512 if detail and ui.verbose:
1511 1513 msg = (_('repository tip rolled back to revision %d'
1512 1514 ' (undo %s: %s)\n')
1513 1515 % (oldtip, desc, detail))
1514 1516 else:
1515 1517 msg = (_('repository tip rolled back to revision %d'
1516 1518 ' (undo %s)\n')
1517 1519 % (oldtip, desc))
1518 1520 except IOError:
1519 1521 msg = _('rolling back unknown transaction\n')
1520 1522 desc = None
1521 1523
1522 1524 if not force and self['.'] != self['tip'] and desc == 'commit':
1523 1525 raise error.Abort(
1524 1526 _('rollback of last commit while not checked out '
1525 1527 'may lose data'), hint=_('use -f to force'))
1526 1528
1527 1529 ui.status(msg)
1528 1530 if dryrun:
1529 1531 return 0
1530 1532
1531 1533 parents = self.dirstate.parents()
1532 1534 self.destroying()
1533 1535 vfsmap = {'plain': self.vfs, '': self.svfs}
1534 1536 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1535 1537 checkambigfiles=_cachedfiles)
1536 1538 if self.vfs.exists('undo.bookmarks'):
1537 1539 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1538 1540 if self.svfs.exists('undo.phaseroots'):
1539 1541 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1540 1542 self.invalidate()
1541 1543
1542 1544 parentgone = (parents[0] not in self.changelog.nodemap or
1543 1545 parents[1] not in self.changelog.nodemap)
1544 1546 if parentgone:
1545 1547 # prevent dirstateguard from overwriting already restored one
1546 1548 dsguard.close()
1547 1549
1548 1550 self.dirstate.restorebackup(None, 'undo.dirstate')
1549 1551 try:
1550 1552 branch = self.vfs.read('undo.branch')
1551 1553 self.dirstate.setbranch(encoding.tolocal(branch))
1552 1554 except IOError:
1553 1555 ui.warn(_('named branch could not be reset: '
1554 1556 'current branch is still \'%s\'\n')
1555 1557 % self.dirstate.branch())
1556 1558
1557 1559 parents = tuple([p.rev() for p in self[None].parents()])
1558 1560 if len(parents) > 1:
1559 1561 ui.status(_('working directory now based on '
1560 1562 'revisions %d and %d\n') % parents)
1561 1563 else:
1562 1564 ui.status(_('working directory now based on '
1563 1565 'revision %d\n') % parents)
1564 1566 mergemod.mergestate.clean(self, self['.'].node())
1565 1567
1566 1568 # TODO: if we know which new heads may result from this rollback, pass
1567 1569 # them to destroy(), which will prevent the branchhead cache from being
1568 1570 # invalidated.
1569 1571 self.destroyed()
1570 1572 return 0
1571 1573
1572 1574 def _buildcacheupdater(self, newtransaction):
1573 1575 """called during transaction to build the callback updating cache
1574 1576
1575 1577 Lives on the repository to help extension who might want to augment
1576 1578 this logic. For this purpose, the created transaction is passed to the
1577 1579 method.
1578 1580 """
1579 1581 # we must avoid cyclic reference between repo and transaction.
1580 1582 reporef = weakref.ref(self)
1581 1583 def updater(tr):
1582 1584 repo = reporef()
1583 1585 repo.updatecaches(tr)
1584 1586 return updater
1585 1587
1586 1588 @unfilteredmethod
1587 1589 def updatecaches(self, tr=None, full=False):
1588 1590 """warm appropriate caches
1589 1591
1590 1592 If this function is called after a transaction closed. The transaction
1591 1593 will be available in the 'tr' argument. This can be used to selectively
1592 1594 update caches relevant to the changes in that transaction.
1593 1595
1594 1596 If 'full' is set, make sure all caches the function knows about have
1595 1597 up-to-date data. Even the ones usually loaded more lazily.
1596 1598 """
1597 1599 if tr is not None and tr.hookargs.get('source') == 'strip':
1598 1600 # During strip, many caches are invalid but
1599 1601 # later call to `destroyed` will refresh them.
1600 1602 return
1601 1603
1602 1604 if tr is None or tr.changes['revs']:
1603 1605 # updating the unfiltered branchmap should refresh all the others,
1604 1606 self.ui.debug('updating the branch cache\n')
1605 1607 branchmap.updatecache(self.filtered('served'))
1606 1608
1607 1609 if full:
1608 1610 rbc = self.revbranchcache()
1609 1611 for r in self.changelog:
1610 1612 rbc.branchinfo(r)
1611 1613 rbc.write()
1612 1614
1613 1615 def invalidatecaches(self):
1614 1616
1615 1617 if '_tagscache' in vars(self):
1616 1618 # can't use delattr on proxy
1617 1619 del self.__dict__['_tagscache']
1618 1620
1619 1621 self.unfiltered()._branchcaches.clear()
1620 1622 self.invalidatevolatilesets()
1621 1623 self._sparsesignaturecache.clear()
1622 1624
1623 1625 def invalidatevolatilesets(self):
1624 1626 self.filteredrevcache.clear()
1625 1627 obsolete.clearobscaches(self)
1626 1628
1627 1629 def invalidatedirstate(self):
1628 1630 '''Invalidates the dirstate, causing the next call to dirstate
1629 1631 to check if it was modified since the last time it was read,
1630 1632 rereading it if it has.
1631 1633
1632 1634 This is different to dirstate.invalidate() that it doesn't always
1633 1635 rereads the dirstate. Use dirstate.invalidate() if you want to
1634 1636 explicitly read the dirstate again (i.e. restoring it to a previous
1635 1637 known good state).'''
1636 1638 if hasunfilteredcache(self, 'dirstate'):
1637 1639 for k in self.dirstate._filecache:
1638 1640 try:
1639 1641 delattr(self.dirstate, k)
1640 1642 except AttributeError:
1641 1643 pass
1642 1644 delattr(self.unfiltered(), 'dirstate')
1643 1645
1644 1646 def invalidate(self, clearfilecache=False):
1645 1647 '''Invalidates both store and non-store parts other than dirstate
1646 1648
1647 1649 If a transaction is running, invalidation of store is omitted,
1648 1650 because discarding in-memory changes might cause inconsistency
1649 1651 (e.g. incomplete fncache causes unintentional failure, but
1650 1652 redundant one doesn't).
1651 1653 '''
1652 1654 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1653 1655 for k in list(self._filecache.keys()):
1654 1656 # dirstate is invalidated separately in invalidatedirstate()
1655 1657 if k == 'dirstate':
1656 1658 continue
1657 1659 if (k == 'changelog' and
1658 1660 self.currenttransaction() and
1659 1661 self.changelog._delayed):
1660 1662 # The changelog object may store unwritten revisions. We don't
1661 1663 # want to lose them.
1662 1664 # TODO: Solve the problem instead of working around it.
1663 1665 continue
1664 1666
1665 1667 if clearfilecache:
1666 1668 del self._filecache[k]
1667 1669 try:
1668 1670 delattr(unfiltered, k)
1669 1671 except AttributeError:
1670 1672 pass
1671 1673 self.invalidatecaches()
1672 1674 if not self.currenttransaction():
1673 1675 # TODO: Changing contents of store outside transaction
1674 1676 # causes inconsistency. We should make in-memory store
1675 1677 # changes detectable, and abort if changed.
1676 1678 self.store.invalidatecaches()
1677 1679
1678 1680 def invalidateall(self):
1679 1681 '''Fully invalidates both store and non-store parts, causing the
1680 1682 subsequent operation to reread any outside changes.'''
1681 1683 # extension should hook this to invalidate its caches
1682 1684 self.invalidate()
1683 1685 self.invalidatedirstate()
1684 1686
1685 1687 @unfilteredmethod
1686 1688 def _refreshfilecachestats(self, tr):
1687 1689 """Reload stats of cached files so that they are flagged as valid"""
1688 1690 for k, ce in self._filecache.items():
1689 1691 k = pycompat.sysstr(k)
1690 1692 if k == r'dirstate' or k not in self.__dict__:
1691 1693 continue
1692 1694 ce.refresh()
1693 1695
1694 1696 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1695 1697 inheritchecker=None, parentenvvar=None):
1696 1698 parentlock = None
1697 1699 # the contents of parentenvvar are used by the underlying lock to
1698 1700 # determine whether it can be inherited
1699 1701 if parentenvvar is not None:
1700 1702 parentlock = encoding.environ.get(parentenvvar)
1701 1703
1702 1704 timeout = 0
1703 1705 warntimeout = 0
1704 1706 if wait:
1705 1707 timeout = self.ui.configint("ui", "timeout")
1706 1708 warntimeout = self.ui.configint("ui", "timeout.warn")
1707 1709 # internal config: ui.signal-safe-lock
1708 1710 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1709 1711
1710 1712 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1711 1713 releasefn=releasefn,
1712 1714 acquirefn=acquirefn, desc=desc,
1713 1715 inheritchecker=inheritchecker,
1714 1716 parentlock=parentlock,
1715 1717 signalsafe=signalsafe)
1716 1718 return l
1717 1719
1718 1720 def _afterlock(self, callback):
1719 1721 """add a callback to be run when the repository is fully unlocked
1720 1722
1721 1723 The callback will be executed when the outermost lock is released
1722 1724 (with wlock being higher level than 'lock')."""
1723 1725 for ref in (self._wlockref, self._lockref):
1724 1726 l = ref and ref()
1725 1727 if l and l.held:
1726 1728 l.postrelease.append(callback)
1727 1729 break
1728 1730 else: # no lock have been found.
1729 1731 callback()
1730 1732
1731 1733 def lock(self, wait=True):
1732 1734 '''Lock the repository store (.hg/store) and return a weak reference
1733 1735 to the lock. Use this before modifying the store (e.g. committing or
1734 1736 stripping). If you are opening a transaction, get a lock as well.)
1735 1737
1736 1738 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1737 1739 'wlock' first to avoid a dead-lock hazard.'''
1738 1740 l = self._currentlock(self._lockref)
1739 1741 if l is not None:
1740 1742 l.lock()
1741 1743 return l
1742 1744
1743 1745 l = self._lock(self.svfs, "lock", wait, None,
1744 1746 self.invalidate, _('repository %s') % self.origroot)
1745 1747 self._lockref = weakref.ref(l)
1746 1748 return l
1747 1749
1748 1750 def _wlockchecktransaction(self):
1749 1751 if self.currenttransaction() is not None:
1750 1752 raise error.LockInheritanceContractViolation(
1751 1753 'wlock cannot be inherited in the middle of a transaction')
1752 1754
1753 1755 def wlock(self, wait=True):
1754 1756 '''Lock the non-store parts of the repository (everything under
1755 1757 .hg except .hg/store) and return a weak reference to the lock.
1756 1758
1757 1759 Use this before modifying files in .hg.
1758 1760
1759 1761 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1760 1762 'wlock' first to avoid a dead-lock hazard.'''
1761 1763 l = self._wlockref and self._wlockref()
1762 1764 if l is not None and l.held:
1763 1765 l.lock()
1764 1766 return l
1765 1767
1766 1768 # We do not need to check for non-waiting lock acquisition. Such
1767 1769 # acquisition would not cause dead-lock as they would just fail.
1768 1770 if wait and (self.ui.configbool('devel', 'all-warnings')
1769 1771 or self.ui.configbool('devel', 'check-locks')):
1770 1772 if self._currentlock(self._lockref) is not None:
1771 1773 self.ui.develwarn('"wlock" acquired after "lock"')
1772 1774
1773 1775 def unlock():
1774 1776 if self.dirstate.pendingparentchange():
1775 1777 self.dirstate.invalidate()
1776 1778 else:
1777 1779 self.dirstate.write(None)
1778 1780
1779 1781 self._filecache['dirstate'].refresh()
1780 1782
1781 1783 l = self._lock(self.vfs, "wlock", wait, unlock,
1782 1784 self.invalidatedirstate, _('working directory of %s') %
1783 1785 self.origroot,
1784 1786 inheritchecker=self._wlockchecktransaction,
1785 1787 parentenvvar='HG_WLOCK_LOCKER')
1786 1788 self._wlockref = weakref.ref(l)
1787 1789 return l
1788 1790
1789 1791 def _currentlock(self, lockref):
1790 1792 """Returns the lock if it's held, or None if it's not."""
1791 1793 if lockref is None:
1792 1794 return None
1793 1795 l = lockref()
1794 1796 if l is None or not l.held:
1795 1797 return None
1796 1798 return l
1797 1799
1798 1800 def currentwlock(self):
1799 1801 """Returns the wlock if it's held, or None if it's not."""
1800 1802 return self._currentlock(self._wlockref)
1801 1803
1802 1804 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1803 1805 """
1804 1806 commit an individual file as part of a larger transaction
1805 1807 """
1806 1808
1807 1809 fname = fctx.path()
1808 1810 fparent1 = manifest1.get(fname, nullid)
1809 1811 fparent2 = manifest2.get(fname, nullid)
1810 1812 if isinstance(fctx, context.filectx):
1811 1813 node = fctx.filenode()
1812 1814 if node in [fparent1, fparent2]:
1813 1815 self.ui.debug('reusing %s filelog entry\n' % fname)
1814 1816 if manifest1.flags(fname) != fctx.flags():
1815 1817 changelist.append(fname)
1816 1818 return node
1817 1819
1818 1820 flog = self.file(fname)
1819 1821 meta = {}
1820 1822 copy = fctx.renamed()
1821 1823 if copy and copy[0] != fname:
1822 1824 # Mark the new revision of this file as a copy of another
1823 1825 # file. This copy data will effectively act as a parent
1824 1826 # of this new revision. If this is a merge, the first
1825 1827 # parent will be the nullid (meaning "look up the copy data")
1826 1828 # and the second one will be the other parent. For example:
1827 1829 #
1828 1830 # 0 --- 1 --- 3 rev1 changes file foo
1829 1831 # \ / rev2 renames foo to bar and changes it
1830 1832 # \- 2 -/ rev3 should have bar with all changes and
1831 1833 # should record that bar descends from
1832 1834 # bar in rev2 and foo in rev1
1833 1835 #
1834 1836 # this allows this merge to succeed:
1835 1837 #
1836 1838 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1837 1839 # \ / merging rev3 and rev4 should use bar@rev2
1838 1840 # \- 2 --- 4 as the merge base
1839 1841 #
1840 1842
1841 1843 cfname = copy[0]
1842 1844 crev = manifest1.get(cfname)
1843 1845 newfparent = fparent2
1844 1846
1845 1847 if manifest2: # branch merge
1846 1848 if fparent2 == nullid or crev is None: # copied on remote side
1847 1849 if cfname in manifest2:
1848 1850 crev = manifest2[cfname]
1849 1851 newfparent = fparent1
1850 1852
1851 1853 # Here, we used to search backwards through history to try to find
1852 1854 # where the file copy came from if the source of a copy was not in
1853 1855 # the parent directory. However, this doesn't actually make sense to
1854 1856 # do (what does a copy from something not in your working copy even
1855 1857 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1856 1858 # the user that copy information was dropped, so if they didn't
1857 1859 # expect this outcome it can be fixed, but this is the correct
1858 1860 # behavior in this circumstance.
1859 1861
1860 1862 if crev:
1861 1863 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1862 1864 meta["copy"] = cfname
1863 1865 meta["copyrev"] = hex(crev)
1864 1866 fparent1, fparent2 = nullid, newfparent
1865 1867 else:
1866 1868 self.ui.warn(_("warning: can't find ancestor for '%s' "
1867 1869 "copied from '%s'!\n") % (fname, cfname))
1868 1870
1869 1871 elif fparent1 == nullid:
1870 1872 fparent1, fparent2 = fparent2, nullid
1871 1873 elif fparent2 != nullid:
1872 1874 # is one parent an ancestor of the other?
1873 1875 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1874 1876 if fparent1 in fparentancestors:
1875 1877 fparent1, fparent2 = fparent2, nullid
1876 1878 elif fparent2 in fparentancestors:
1877 1879 fparent2 = nullid
1878 1880
1879 1881 # is the file changed?
1880 1882 text = fctx.data()
1881 1883 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1882 1884 changelist.append(fname)
1883 1885 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1884 1886 # are just the flags changed during merge?
1885 1887 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1886 1888 changelist.append(fname)
1887 1889
1888 1890 return fparent1
1889 1891
1890 1892 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1891 1893 """check for commit arguments that aren't committable"""
1892 1894 if match.isexact() or match.prefix():
1893 1895 matched = set(status.modified + status.added + status.removed)
1894 1896
1895 1897 for f in match.files():
1896 1898 f = self.dirstate.normalize(f)
1897 1899 if f == '.' or f in matched or f in wctx.substate:
1898 1900 continue
1899 1901 if f in status.deleted:
1900 1902 fail(f, _('file not found!'))
1901 1903 if f in vdirs: # visited directory
1902 1904 d = f + '/'
1903 1905 for mf in matched:
1904 1906 if mf.startswith(d):
1905 1907 break
1906 1908 else:
1907 1909 fail(f, _("no match under directory!"))
1908 1910 elif f not in self.dirstate:
1909 1911 fail(f, _("file not tracked!"))
1910 1912
1911 1913 @unfilteredmethod
1912 1914 def commit(self, text="", user=None, date=None, match=None, force=False,
1913 1915 editor=False, extra=None):
1914 1916 """Add a new revision to current repository.
1915 1917
1916 1918 Revision information is gathered from the working directory,
1917 1919 match can be used to filter the committed files. If editor is
1918 1920 supplied, it is called to get a commit message.
1919 1921 """
1920 1922 if extra is None:
1921 1923 extra = {}
1922 1924
1923 1925 def fail(f, msg):
1924 1926 raise error.Abort('%s: %s' % (f, msg))
1925 1927
1926 1928 if not match:
1927 1929 match = matchmod.always(self.root, '')
1928 1930
1929 1931 if not force:
1930 1932 vdirs = []
1931 1933 match.explicitdir = vdirs.append
1932 1934 match.bad = fail
1933 1935
1934 1936 wlock = lock = tr = None
1935 1937 try:
1936 1938 wlock = self.wlock()
1937 1939 lock = self.lock() # for recent changelog (see issue4368)
1938 1940
1939 1941 wctx = self[None]
1940 1942 merge = len(wctx.parents()) > 1
1941 1943
1942 1944 if not force and merge and not match.always():
1943 1945 raise error.Abort(_('cannot partially commit a merge '
1944 1946 '(do not specify files or patterns)'))
1945 1947
1946 1948 status = self.status(match=match, clean=force)
1947 1949 if force:
1948 1950 status.modified.extend(status.clean) # mq may commit clean files
1949 1951
1950 1952 # check subrepos
1951 1953 subs, commitsubs, newstate = subrepoutil.precommit(
1952 1954 self.ui, wctx, status, match, force=force)
1953 1955
1954 1956 # make sure all explicit patterns are matched
1955 1957 if not force:
1956 1958 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1957 1959
1958 1960 cctx = context.workingcommitctx(self, status,
1959 1961 text, user, date, extra)
1960 1962
1961 1963 # internal config: ui.allowemptycommit
1962 1964 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1963 1965 or extra.get('close') or merge or cctx.files()
1964 1966 or self.ui.configbool('ui', 'allowemptycommit'))
1965 1967 if not allowemptycommit:
1966 1968 return None
1967 1969
1968 1970 if merge and cctx.deleted():
1969 1971 raise error.Abort(_("cannot commit merge with missing files"))
1970 1972
1971 1973 ms = mergemod.mergestate.read(self)
1972 1974 mergeutil.checkunresolved(ms)
1973 1975
1974 1976 if editor:
1975 1977 cctx._text = editor(self, cctx, subs)
1976 1978 edited = (text != cctx._text)
1977 1979
1978 1980 # Save commit message in case this transaction gets rolled back
1979 1981 # (e.g. by a pretxncommit hook). Leave the content alone on
1980 1982 # the assumption that the user will use the same editor again.
1981 1983 msgfn = self.savecommitmessage(cctx._text)
1982 1984
1983 1985 # commit subs and write new state
1984 1986 if subs:
1985 1987 for s in sorted(commitsubs):
1986 1988 sub = wctx.sub(s)
1987 1989 self.ui.status(_('committing subrepository %s\n') %
1988 1990 subrepoutil.subrelpath(sub))
1989 1991 sr = sub.commit(cctx._text, user, date)
1990 1992 newstate[s] = (newstate[s][0], sr)
1991 1993 subrepoutil.writestate(self, newstate)
1992 1994
1993 1995 p1, p2 = self.dirstate.parents()
1994 1996 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1995 1997 try:
1996 1998 self.hook("precommit", throw=True, parent1=hookp1,
1997 1999 parent2=hookp2)
1998 2000 tr = self.transaction('commit')
1999 2001 ret = self.commitctx(cctx, True)
2000 2002 except: # re-raises
2001 2003 if edited:
2002 2004 self.ui.write(
2003 2005 _('note: commit message saved in %s\n') % msgfn)
2004 2006 raise
2005 2007 # update bookmarks, dirstate and mergestate
2006 2008 bookmarks.update(self, [p1, p2], ret)
2007 2009 cctx.markcommitted(ret)
2008 2010 ms.reset()
2009 2011 tr.close()
2010 2012
2011 2013 finally:
2012 2014 lockmod.release(tr, lock, wlock)
2013 2015
2014 2016 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2015 2017 # hack for command that use a temporary commit (eg: histedit)
2016 2018 # temporary commit got stripped before hook release
2017 2019 if self.changelog.hasnode(ret):
2018 2020 self.hook("commit", node=node, parent1=parent1,
2019 2021 parent2=parent2)
2020 2022 self._afterlock(commithook)
2021 2023 return ret
2022 2024
2023 2025 @unfilteredmethod
2024 2026 def commitctx(self, ctx, error=False):
2025 2027 """Add a new revision to current repository.
2026 2028 Revision information is passed via the context argument.
2027 2029 """
2028 2030
2029 2031 tr = None
2030 2032 p1, p2 = ctx.p1(), ctx.p2()
2031 2033 user = ctx.user()
2032 2034
2033 2035 lock = self.lock()
2034 2036 try:
2035 2037 tr = self.transaction("commit")
2036 2038 trp = weakref.proxy(tr)
2037 2039
2038 2040 if ctx.manifestnode():
2039 2041 # reuse an existing manifest revision
2040 2042 mn = ctx.manifestnode()
2041 2043 files = ctx.files()
2042 2044 elif ctx.files():
2043 2045 m1ctx = p1.manifestctx()
2044 2046 m2ctx = p2.manifestctx()
2045 2047 mctx = m1ctx.copy()
2046 2048
2047 2049 m = mctx.read()
2048 2050 m1 = m1ctx.read()
2049 2051 m2 = m2ctx.read()
2050 2052
2051 2053 # check in files
2052 2054 added = []
2053 2055 changed = []
2054 2056 removed = list(ctx.removed())
2055 2057 linkrev = len(self)
2056 2058 self.ui.note(_("committing files:\n"))
2057 2059 for f in sorted(ctx.modified() + ctx.added()):
2058 2060 self.ui.note(f + "\n")
2059 2061 try:
2060 2062 fctx = ctx[f]
2061 2063 if fctx is None:
2062 2064 removed.append(f)
2063 2065 else:
2064 2066 added.append(f)
2065 2067 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2066 2068 trp, changed)
2067 2069 m.setflag(f, fctx.flags())
2068 2070 except OSError as inst:
2069 2071 self.ui.warn(_("trouble committing %s!\n") % f)
2070 2072 raise
2071 2073 except IOError as inst:
2072 2074 errcode = getattr(inst, 'errno', errno.ENOENT)
2073 2075 if error or errcode and errcode != errno.ENOENT:
2074 2076 self.ui.warn(_("trouble committing %s!\n") % f)
2075 2077 raise
2076 2078
2077 2079 # update manifest
2078 2080 self.ui.note(_("committing manifest\n"))
2079 2081 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2080 2082 drop = [f for f in removed if f in m]
2081 2083 for f in drop:
2082 2084 del m[f]
2083 2085 mn = mctx.write(trp, linkrev,
2084 2086 p1.manifestnode(), p2.manifestnode(),
2085 2087 added, drop)
2086 2088 files = changed + removed
2087 2089 else:
2088 2090 mn = p1.manifestnode()
2089 2091 files = []
2090 2092
2091 2093 # update changelog
2092 2094 self.ui.note(_("committing changelog\n"))
2093 2095 self.changelog.delayupdate(tr)
2094 2096 n = self.changelog.add(mn, files, ctx.description(),
2095 2097 trp, p1.node(), p2.node(),
2096 2098 user, ctx.date(), ctx.extra().copy())
2097 2099 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2098 2100 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2099 2101 parent2=xp2)
2100 2102 # set the new commit is proper phase
2101 2103 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2102 2104 if targetphase:
2103 2105 # retract boundary do not alter parent changeset.
2104 2106 # if a parent have higher the resulting phase will
2105 2107 # be compliant anyway
2106 2108 #
2107 2109 # if minimal phase was 0 we don't need to retract anything
2108 2110 phases.registernew(self, tr, targetphase, [n])
2109 2111 tr.close()
2110 2112 return n
2111 2113 finally:
2112 2114 if tr:
2113 2115 tr.release()
2114 2116 lock.release()
2115 2117
2116 2118 @unfilteredmethod
2117 2119 def destroying(self):
2118 2120 '''Inform the repository that nodes are about to be destroyed.
2119 2121 Intended for use by strip and rollback, so there's a common
2120 2122 place for anything that has to be done before destroying history.
2121 2123
2122 2124 This is mostly useful for saving state that is in memory and waiting
2123 2125 to be flushed when the current lock is released. Because a call to
2124 2126 destroyed is imminent, the repo will be invalidated causing those
2125 2127 changes to stay in memory (waiting for the next unlock), or vanish
2126 2128 completely.
2127 2129 '''
2128 2130 # When using the same lock to commit and strip, the phasecache is left
2129 2131 # dirty after committing. Then when we strip, the repo is invalidated,
2130 2132 # causing those changes to disappear.
2131 2133 if '_phasecache' in vars(self):
2132 2134 self._phasecache.write()
2133 2135
2134 2136 @unfilteredmethod
2135 2137 def destroyed(self):
2136 2138 '''Inform the repository that nodes have been destroyed.
2137 2139 Intended for use by strip and rollback, so there's a common
2138 2140 place for anything that has to be done after destroying history.
2139 2141 '''
2140 2142 # When one tries to:
2141 2143 # 1) destroy nodes thus calling this method (e.g. strip)
2142 2144 # 2) use phasecache somewhere (e.g. commit)
2143 2145 #
2144 2146 # then 2) will fail because the phasecache contains nodes that were
2145 2147 # removed. We can either remove phasecache from the filecache,
2146 2148 # causing it to reload next time it is accessed, or simply filter
2147 2149 # the removed nodes now and write the updated cache.
2148 2150 self._phasecache.filterunknown(self)
2149 2151 self._phasecache.write()
2150 2152
2151 2153 # refresh all repository caches
2152 2154 self.updatecaches()
2153 2155
2154 2156 # Ensure the persistent tag cache is updated. Doing it now
2155 2157 # means that the tag cache only has to worry about destroyed
2156 2158 # heads immediately after a strip/rollback. That in turn
2157 2159 # guarantees that "cachetip == currenttip" (comparing both rev
2158 2160 # and node) always means no nodes have been added or destroyed.
2159 2161
2160 2162 # XXX this is suboptimal when qrefresh'ing: we strip the current
2161 2163 # head, refresh the tag cache, then immediately add a new head.
2162 2164 # But I think doing it this way is necessary for the "instant
2163 2165 # tag cache retrieval" case to work.
2164 2166 self.invalidate()
2165 2167
2166 2168 def status(self, node1='.', node2=None, match=None,
2167 2169 ignored=False, clean=False, unknown=False,
2168 2170 listsubrepos=False):
2169 2171 '''a convenience method that calls node1.status(node2)'''
2170 2172 return self[node1].status(node2, match, ignored, clean, unknown,
2171 2173 listsubrepos)
2172 2174
2173 2175 def addpostdsstatus(self, ps):
2174 2176 """Add a callback to run within the wlock, at the point at which status
2175 2177 fixups happen.
2176 2178
2177 2179 On status completion, callback(wctx, status) will be called with the
2178 2180 wlock held, unless the dirstate has changed from underneath or the wlock
2179 2181 couldn't be grabbed.
2180 2182
2181 2183 Callbacks should not capture and use a cached copy of the dirstate --
2182 2184 it might change in the meanwhile. Instead, they should access the
2183 2185 dirstate via wctx.repo().dirstate.
2184 2186
2185 2187 This list is emptied out after each status run -- extensions should
2186 2188 make sure it adds to this list each time dirstate.status is called.
2187 2189 Extensions should also make sure they don't call this for statuses
2188 2190 that don't involve the dirstate.
2189 2191 """
2190 2192
2191 2193 # The list is located here for uniqueness reasons -- it is actually
2192 2194 # managed by the workingctx, but that isn't unique per-repo.
2193 2195 self._postdsstatus.append(ps)
2194 2196
2195 2197 def postdsstatus(self):
2196 2198 """Used by workingctx to get the list of post-dirstate-status hooks."""
2197 2199 return self._postdsstatus
2198 2200
2199 2201 def clearpostdsstatus(self):
2200 2202 """Used by workingctx to clear post-dirstate-status hooks."""
2201 2203 del self._postdsstatus[:]
2202 2204
2203 2205 def heads(self, start=None):
2204 2206 if start is None:
2205 2207 cl = self.changelog
2206 2208 headrevs = reversed(cl.headrevs())
2207 2209 return [cl.node(rev) for rev in headrevs]
2208 2210
2209 2211 heads = self.changelog.heads(start)
2210 2212 # sort the output in rev descending order
2211 2213 return sorted(heads, key=self.changelog.rev, reverse=True)
2212 2214
2213 2215 def branchheads(self, branch=None, start=None, closed=False):
2214 2216 '''return a (possibly filtered) list of heads for the given branch
2215 2217
2216 2218 Heads are returned in topological order, from newest to oldest.
2217 2219 If branch is None, use the dirstate branch.
2218 2220 If start is not None, return only heads reachable from start.
2219 2221 If closed is True, return heads that are marked as closed as well.
2220 2222 '''
2221 2223 if branch is None:
2222 2224 branch = self[None].branch()
2223 2225 branches = self.branchmap()
2224 2226 if branch not in branches:
2225 2227 return []
2226 2228 # the cache returns heads ordered lowest to highest
2227 2229 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2228 2230 if start is not None:
2229 2231 # filter out the heads that cannot be reached from startrev
2230 2232 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2231 2233 bheads = [h for h in bheads if h in fbheads]
2232 2234 return bheads
2233 2235
2234 2236 def branches(self, nodes):
2235 2237 if not nodes:
2236 2238 nodes = [self.changelog.tip()]
2237 2239 b = []
2238 2240 for n in nodes:
2239 2241 t = n
2240 2242 while True:
2241 2243 p = self.changelog.parents(n)
2242 2244 if p[1] != nullid or p[0] == nullid:
2243 2245 b.append((t, n, p[0], p[1]))
2244 2246 break
2245 2247 n = p[0]
2246 2248 return b
2247 2249
2248 2250 def between(self, pairs):
2249 2251 r = []
2250 2252
2251 2253 for top, bottom in pairs:
2252 2254 n, l, i = top, [], 0
2253 2255 f = 1
2254 2256
2255 2257 while n != bottom and n != nullid:
2256 2258 p = self.changelog.parents(n)[0]
2257 2259 if i == f:
2258 2260 l.append(n)
2259 2261 f = f * 2
2260 2262 n = p
2261 2263 i += 1
2262 2264
2263 2265 r.append(l)
2264 2266
2265 2267 return r
2266 2268
2267 2269 def checkpush(self, pushop):
2268 2270 """Extensions can override this function if additional checks have
2269 2271 to be performed before pushing, or call it if they override push
2270 2272 command.
2271 2273 """
2272 2274
2273 2275 @unfilteredpropertycache
2274 2276 def prepushoutgoinghooks(self):
2275 2277 """Return util.hooks consists of a pushop with repo, remote, outgoing
2276 2278 methods, which are called before pushing changesets.
2277 2279 """
2278 2280 return util.hooks()
2279 2281
2280 2282 def pushkey(self, namespace, key, old, new):
2281 2283 try:
2282 2284 tr = self.currenttransaction()
2283 2285 hookargs = {}
2284 2286 if tr is not None:
2285 2287 hookargs.update(tr.hookargs)
2286 2288 hookargs = pycompat.strkwargs(hookargs)
2287 2289 hookargs[r'namespace'] = namespace
2288 2290 hookargs[r'key'] = key
2289 2291 hookargs[r'old'] = old
2290 2292 hookargs[r'new'] = new
2291 2293 self.hook('prepushkey', throw=True, **hookargs)
2292 2294 except error.HookAbort as exc:
2293 2295 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2294 2296 if exc.hint:
2295 2297 self.ui.write_err(_("(%s)\n") % exc.hint)
2296 2298 return False
2297 2299 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2298 2300 ret = pushkey.push(self, namespace, key, old, new)
2299 2301 def runhook():
2300 2302 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2301 2303 ret=ret)
2302 2304 self._afterlock(runhook)
2303 2305 return ret
2304 2306
2305 2307 def listkeys(self, namespace):
2306 2308 self.hook('prelistkeys', throw=True, namespace=namespace)
2307 2309 self.ui.debug('listing keys for "%s"\n' % namespace)
2308 2310 values = pushkey.list(self, namespace)
2309 2311 self.hook('listkeys', namespace=namespace, values=values)
2310 2312 return values
2311 2313
2312 2314 def debugwireargs(self, one, two, three=None, four=None, five=None):
2313 2315 '''used to test argument passing over the wire'''
2314 2316 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2315 2317 pycompat.bytestr(four),
2316 2318 pycompat.bytestr(five))
2317 2319
2318 2320 def savecommitmessage(self, text):
2319 2321 fp = self.vfs('last-message.txt', 'wb')
2320 2322 try:
2321 2323 fp.write(text)
2322 2324 finally:
2323 2325 fp.close()
2324 2326 return self.pathto(fp.name[len(self.root) + 1:])
2325 2327
2326 2328 # used to avoid circular references so destructors work
2327 2329 def aftertrans(files):
2328 2330 renamefiles = [tuple(t) for t in files]
2329 2331 def a():
2330 2332 for vfs, src, dest in renamefiles:
2331 2333 # if src and dest refer to a same file, vfs.rename is a no-op,
2332 2334 # leaving both src and dest on disk. delete dest to make sure
2333 2335 # the rename couldn't be such a no-op.
2334 2336 vfs.tryunlink(dest)
2335 2337 try:
2336 2338 vfs.rename(src, dest)
2337 2339 except OSError: # journal file does not yet exist
2338 2340 pass
2339 2341 return a
2340 2342
2341 2343 def undoname(fn):
2342 2344 base, name = os.path.split(fn)
2343 2345 assert name.startswith('journal')
2344 2346 return os.path.join(base, name.replace('journal', 'undo', 1))
2345 2347
2346 2348 def instance(ui, path, create, intents=None):
2347 2349 return localrepository(ui, util.urllocalpath(path), create,
2348 2350 intents=intents)
2349 2351
2350 2352 def islocal(path):
2351 2353 return True
2352 2354
2353 2355 def newreporequirements(repo):
2354 2356 """Determine the set of requirements for a new local repository.
2355 2357
2356 2358 Extensions can wrap this function to specify custom requirements for
2357 2359 new repositories.
2358 2360 """
2359 2361 ui = repo.ui
2360 2362 requirements = {'revlogv1'}
2361 2363 if ui.configbool('format', 'usestore'):
2362 2364 requirements.add('store')
2363 2365 if ui.configbool('format', 'usefncache'):
2364 2366 requirements.add('fncache')
2365 2367 if ui.configbool('format', 'dotencode'):
2366 2368 requirements.add('dotencode')
2367 2369
2368 2370 compengine = ui.config('experimental', 'format.compression')
2369 2371 if compengine not in util.compengines:
2370 2372 raise error.Abort(_('compression engine %s defined by '
2371 2373 'experimental.format.compression not available') %
2372 2374 compengine,
2373 2375 hint=_('run "hg debuginstall" to list available '
2374 2376 'compression engines'))
2375 2377
2376 2378 # zlib is the historical default and doesn't need an explicit requirement.
2377 2379 if compengine != 'zlib':
2378 2380 requirements.add('exp-compression-%s' % compengine)
2379 2381
2380 2382 if scmutil.gdinitconfig(ui):
2381 2383 requirements.add('generaldelta')
2382 2384 if ui.configbool('experimental', 'treemanifest'):
2383 2385 requirements.add('treemanifest')
2384 2386 # experimental config: format.sparse-revlog
2385 2387 if ui.configbool('format', 'sparse-revlog'):
2386 2388 requirements.add(SPARSEREVLOG_REQUIREMENT)
2387 2389
2388 2390 revlogv2 = ui.config('experimental', 'revlogv2')
2389 2391 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2390 2392 requirements.remove('revlogv1')
2391 2393 # generaldelta is implied by revlogv2.
2392 2394 requirements.discard('generaldelta')
2393 2395 requirements.add(REVLOGV2_REQUIREMENT)
2394 2396
2395 2397 return requirements
General Comments 0
You need to be logged in to leave comments. Login now