##// END OF EJS Templates
cachevfs: add a vfs dedicated to cache...
Boris Feld -
r33533:4133c0b0 default
parent child Browse files
Show More
@@ -1,2247 +1,2249 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 mergeutil,
45 45 namespaces,
46 46 obsolete,
47 47 pathutil,
48 48 peer,
49 49 phases,
50 50 pushkey,
51 51 pycompat,
52 52 repoview,
53 53 revset,
54 54 revsetlang,
55 55 scmutil,
56 56 sparse,
57 57 store,
58 58 subrepo,
59 59 tags as tagsmod,
60 60 transaction,
61 61 txnutil,
62 62 util,
63 63 vfs as vfsmod,
64 64 )
65 65
66 66 release = lockmod.release
67 67 urlerr = util.urlerr
68 68 urlreq = util.urlreq
69 69
70 70 # set of (path, vfs-location) tuples. vfs-location is:
71 71 # - 'plain for vfs relative paths
72 72 # - '' for svfs relative paths
73 73 _cachedfiles = set()
74 74
75 75 class _basefilecache(scmutil.filecache):
76 76 """All filecache usage on repo are done for logic that should be unfiltered
77 77 """
78 78 def __get__(self, repo, type=None):
79 79 if repo is None:
80 80 return self
81 81 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
82 82 def __set__(self, repo, value):
83 83 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
84 84 def __delete__(self, repo):
85 85 return super(_basefilecache, self).__delete__(repo.unfiltered())
86 86
87 87 class repofilecache(_basefilecache):
88 88 """filecache for files in .hg but outside of .hg/store"""
89 89 def __init__(self, *paths):
90 90 super(repofilecache, self).__init__(*paths)
91 91 for path in paths:
92 92 _cachedfiles.add((path, 'plain'))
93 93
94 94 def join(self, obj, fname):
95 95 return obj.vfs.join(fname)
96 96
97 97 class storecache(_basefilecache):
98 98 """filecache for files in the store"""
99 99 def __init__(self, *paths):
100 100 super(storecache, self).__init__(*paths)
101 101 for path in paths:
102 102 _cachedfiles.add((path, ''))
103 103
104 104 def join(self, obj, fname):
105 105 return obj.sjoin(fname)
106 106
107 107 def isfilecached(repo, name):
108 108 """check if a repo has already cached "name" filecache-ed property
109 109
110 110 This returns (cachedobj-or-None, iscached) tuple.
111 111 """
112 112 cacheentry = repo.unfiltered()._filecache.get(name, None)
113 113 if not cacheentry:
114 114 return None, False
115 115 return cacheentry.obj, True
116 116
117 117 class unfilteredpropertycache(util.propertycache):
118 118 """propertycache that apply to unfiltered repo only"""
119 119
120 120 def __get__(self, repo, type=None):
121 121 unfi = repo.unfiltered()
122 122 if unfi is repo:
123 123 return super(unfilteredpropertycache, self).__get__(unfi)
124 124 return getattr(unfi, self.name)
125 125
126 126 class filteredpropertycache(util.propertycache):
127 127 """propertycache that must take filtering in account"""
128 128
129 129 def cachevalue(self, obj, value):
130 130 object.__setattr__(obj, self.name, value)
131 131
132 132
133 133 def hasunfilteredcache(repo, name):
134 134 """check if a repo has an unfilteredpropertycache value for <name>"""
135 135 return name in vars(repo.unfiltered())
136 136
137 137 def unfilteredmethod(orig):
138 138 """decorate method that always need to be run on unfiltered version"""
139 139 def wrapper(repo, *args, **kwargs):
140 140 return orig(repo.unfiltered(), *args, **kwargs)
141 141 return wrapper
142 142
143 143 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
144 144 'unbundle'}
145 145 legacycaps = moderncaps.union({'changegroupsubset'})
146 146
147 147 class localpeer(peer.peerrepository):
148 148 '''peer for a local repo; reflects only the most recent API'''
149 149
150 150 def __init__(self, repo, caps=None):
151 151 if caps is None:
152 152 caps = moderncaps.copy()
153 153 peer.peerrepository.__init__(self)
154 154 self._repo = repo.filtered('served')
155 155 self.ui = repo.ui
156 156 self._caps = repo._restrictcapabilities(caps)
157 157 self.requirements = repo.requirements
158 158 self.supportedformats = repo.supportedformats
159 159
160 160 def close(self):
161 161 self._repo.close()
162 162
163 163 def _capabilities(self):
164 164 return self._caps
165 165
166 166 def local(self):
167 167 return self._repo
168 168
169 169 def canpush(self):
170 170 return True
171 171
172 172 def url(self):
173 173 return self._repo.url()
174 174
175 175 def lookup(self, key):
176 176 return self._repo.lookup(key)
177 177
178 178 def branchmap(self):
179 179 return self._repo.branchmap()
180 180
181 181 def heads(self):
182 182 return self._repo.heads()
183 183
184 184 def known(self, nodes):
185 185 return self._repo.known(nodes)
186 186
187 187 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
188 188 **kwargs):
189 189 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
190 190 common=common, bundlecaps=bundlecaps,
191 191 **kwargs)
192 192 cb = util.chunkbuffer(chunks)
193 193
194 194 if exchange.bundle2requested(bundlecaps):
195 195 # When requesting a bundle2, getbundle returns a stream to make the
196 196 # wire level function happier. We need to build a proper object
197 197 # from it in local peer.
198 198 return bundle2.getunbundler(self.ui, cb)
199 199 else:
200 200 return changegroup.getunbundler('01', cb, None)
201 201
202 202 # TODO We might want to move the next two calls into legacypeer and add
203 203 # unbundle instead.
204 204
205 205 def unbundle(self, cg, heads, url):
206 206 """apply a bundle on a repo
207 207
208 208 This function handles the repo locking itself."""
209 209 try:
210 210 try:
211 211 cg = exchange.readbundle(self.ui, cg, None)
212 212 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
213 213 if util.safehasattr(ret, 'getchunks'):
214 214 # This is a bundle20 object, turn it into an unbundler.
215 215 # This little dance should be dropped eventually when the
216 216 # API is finally improved.
217 217 stream = util.chunkbuffer(ret.getchunks())
218 218 ret = bundle2.getunbundler(self.ui, stream)
219 219 return ret
220 220 except Exception as exc:
221 221 # If the exception contains output salvaged from a bundle2
222 222 # reply, we need to make sure it is printed before continuing
223 223 # to fail. So we build a bundle2 with such output and consume
224 224 # it directly.
225 225 #
226 226 # This is not very elegant but allows a "simple" solution for
227 227 # issue4594
228 228 output = getattr(exc, '_bundle2salvagedoutput', ())
229 229 if output:
230 230 bundler = bundle2.bundle20(self._repo.ui)
231 231 for out in output:
232 232 bundler.addpart(out)
233 233 stream = util.chunkbuffer(bundler.getchunks())
234 234 b = bundle2.getunbundler(self.ui, stream)
235 235 bundle2.processbundle(self._repo, b)
236 236 raise
237 237 except error.PushRaced as exc:
238 238 raise error.ResponseError(_('push failed:'), str(exc))
239 239
240 240 def lock(self):
241 241 return self._repo.lock()
242 242
243 243 def pushkey(self, namespace, key, old, new):
244 244 return self._repo.pushkey(namespace, key, old, new)
245 245
246 246 def listkeys(self, namespace):
247 247 return self._repo.listkeys(namespace)
248 248
249 249 def debugwireargs(self, one, two, three=None, four=None, five=None):
250 250 '''used to test argument passing over the wire'''
251 251 return "%s %s %s %s %s" % (one, two, three, four, five)
252 252
253 253 class locallegacypeer(localpeer):
254 254 '''peer extension which implements legacy methods too; used for tests with
255 255 restricted capabilities'''
256 256
257 257 def __init__(self, repo):
258 258 localpeer.__init__(self, repo, caps=legacycaps)
259 259
260 260 def branches(self, nodes):
261 261 return self._repo.branches(nodes)
262 262
263 263 def between(self, pairs):
264 264 return self._repo.between(pairs)
265 265
266 266 def changegroup(self, basenodes, source):
267 267 return changegroup.changegroup(self._repo, basenodes, source)
268 268
269 269 def changegroupsubset(self, bases, heads, source):
270 270 return changegroup.changegroupsubset(self._repo, bases, heads, source)
271 271
272 272 # Increment the sub-version when the revlog v2 format changes to lock out old
273 273 # clients.
274 274 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
275 275
276 276 class localrepository(object):
277 277
278 278 supportedformats = {
279 279 'revlogv1',
280 280 'generaldelta',
281 281 'treemanifest',
282 282 'manifestv2',
283 283 REVLOGV2_REQUIREMENT,
284 284 }
285 285 _basesupported = supportedformats | {
286 286 'store',
287 287 'fncache',
288 288 'shared',
289 289 'relshared',
290 290 'dotencode',
291 291 }
292 292 openerreqs = {
293 293 'revlogv1',
294 294 'generaldelta',
295 295 'treemanifest',
296 296 'manifestv2',
297 297 }
298 298
299 299 # a list of (ui, featureset) functions.
300 300 # only functions defined in module of enabled extensions are invoked
301 301 featuresetupfuncs = set()
302 302
303 303 # list of prefix for file which can be written without 'wlock'
304 304 # Extensions should extend this list when needed
305 305 _wlockfreeprefix = {
306 306 # We migh consider requiring 'wlock' for the next
307 307 # two, but pretty much all the existing code assume
308 308 # wlock is not needed so we keep them excluded for
309 309 # now.
310 310 'hgrc',
311 311 'requires',
312 312 # XXX cache is a complicatged business someone
313 313 # should investigate this in depth at some point
314 314 'cache/',
315 315 # XXX shouldn't be dirstate covered by the wlock?
316 316 'dirstate',
317 317 # XXX bisect was still a bit too messy at the time
318 318 # this changeset was introduced. Someone should fix
319 319 # the remainig bit and drop this line
320 320 'bisect.state',
321 321 }
322 322
323 323 def __init__(self, baseui, path, create=False):
324 324 self.requirements = set()
325 325 self.filtername = None
326 326 # wvfs: rooted at the repository root, used to access the working copy
327 327 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
328 328 # vfs: rooted at .hg, used to access repo files outside of .hg/store
329 329 self.vfs = None
330 330 # svfs: usually rooted at .hg/store, used to access repository history
331 331 # If this is a shared repository, this vfs may point to another
332 332 # repository's .hg/store directory.
333 333 self.svfs = None
334 334 self.root = self.wvfs.base
335 335 self.path = self.wvfs.join(".hg")
336 336 self.origroot = path
337 337 # These auditor are not used by the vfs,
338 338 # only used when writing this comment: basectx.match
339 339 self.auditor = pathutil.pathauditor(self.root, self._checknested)
340 340 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
341 341 realfs=False)
342 342 self.baseui = baseui
343 343 self.ui = baseui.copy()
344 344 self.ui.copy = baseui.copy # prevent copying repo configuration
345 345 self.vfs = vfsmod.vfs(self.path)
346 346 if (self.ui.configbool('devel', 'all-warnings') or
347 347 self.ui.configbool('devel', 'check-locks')):
348 348 self.vfs.audit = self._getvfsward(self.vfs.audit)
349 349 # A list of callback to shape the phase if no data were found.
350 350 # Callback are in the form: func(repo, roots) --> processed root.
351 351 # This list it to be filled by extension during repo setup
352 352 self._phasedefaults = []
353 353 try:
354 354 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
355 355 self._loadextensions()
356 356 except IOError:
357 357 pass
358 358
359 359 if self.featuresetupfuncs:
360 360 self.supported = set(self._basesupported) # use private copy
361 361 extmods = set(m.__name__ for n, m
362 362 in extensions.extensions(self.ui))
363 363 for setupfunc in self.featuresetupfuncs:
364 364 if setupfunc.__module__ in extmods:
365 365 setupfunc(self.ui, self.supported)
366 366 else:
367 367 self.supported = self._basesupported
368 368 color.setup(self.ui)
369 369
370 370 # Add compression engines.
371 371 for name in util.compengines:
372 372 engine = util.compengines[name]
373 373 if engine.revlogheader():
374 374 self.supported.add('exp-compression-%s' % name)
375 375
376 376 if not self.vfs.isdir():
377 377 if create:
378 378 self.requirements = newreporequirements(self)
379 379
380 380 if not self.wvfs.exists():
381 381 self.wvfs.makedirs()
382 382 self.vfs.makedir(notindexed=True)
383 383
384 384 if 'store' in self.requirements:
385 385 self.vfs.mkdir("store")
386 386
387 387 # create an invalid changelog
388 388 self.vfs.append(
389 389 "00changelog.i",
390 390 '\0\0\0\2' # represents revlogv2
391 391 ' dummy changelog to prevent using the old repo layout'
392 392 )
393 393 else:
394 394 raise error.RepoError(_("repository %s not found") % path)
395 395 elif create:
396 396 raise error.RepoError(_("repository %s already exists") % path)
397 397 else:
398 398 try:
399 399 self.requirements = scmutil.readrequires(
400 400 self.vfs, self.supported)
401 401 except IOError as inst:
402 402 if inst.errno != errno.ENOENT:
403 403 raise
404 404
405 405 self.sharedpath = self.path
406 406 try:
407 407 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
408 408 if 'relshared' in self.requirements:
409 409 sharedpath = self.vfs.join(sharedpath)
410 410 vfs = vfsmod.vfs(sharedpath, realpath=True)
411 411 s = vfs.base
412 412 if not vfs.exists():
413 413 raise error.RepoError(
414 414 _('.hg/sharedpath points to nonexistent directory %s') % s)
415 415 self.sharedpath = s
416 416 except IOError as inst:
417 417 if inst.errno != errno.ENOENT:
418 418 raise
419 419
420 420 self.store = store.store(
421 421 self.requirements, self.sharedpath, vfsmod.vfs)
422 422 self.spath = self.store.path
423 423 self.svfs = self.store.vfs
424 424 self.sjoin = self.store.join
425 425 self.vfs.createmode = self.store.createmode
426 self.cachevfs = vfsmod.vfs(self.vfs.join('cache'))
427 self.cachevfs.createmode = self.store.createmode
426 428 if (self.ui.configbool('devel', 'all-warnings') or
427 429 self.ui.configbool('devel', 'check-locks')):
428 430 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
429 431 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
430 432 else: # standard vfs
431 433 self.svfs.audit = self._getsvfsward(self.svfs.audit)
432 434 self._applyopenerreqs()
433 435 if create:
434 436 self._writerequirements()
435 437
436 438 self._dirstatevalidatewarned = False
437 439
438 440 self._branchcaches = {}
439 441 self._revbranchcache = None
440 442 self.filterpats = {}
441 443 self._datafilters = {}
442 444 self._transref = self._lockref = self._wlockref = None
443 445
444 446 # A cache for various files under .hg/ that tracks file changes,
445 447 # (used by the filecache decorator)
446 448 #
447 449 # Maps a property name to its util.filecacheentry
448 450 self._filecache = {}
449 451
450 452 # hold sets of revision to be filtered
451 453 # should be cleared when something might have changed the filter value:
452 454 # - new changesets,
453 455 # - phase change,
454 456 # - new obsolescence marker,
455 457 # - working directory parent change,
456 458 # - bookmark changes
457 459 self.filteredrevcache = {}
458 460
459 461 # post-dirstate-status hooks
460 462 self._postdsstatus = []
461 463
462 464 # Cache of types representing filtered repos.
463 465 self._filteredrepotypes = weakref.WeakKeyDictionary()
464 466
465 467 # generic mapping between names and nodes
466 468 self.names = namespaces.namespaces()
467 469
468 470 # Key to signature value.
469 471 self._sparsesignaturecache = {}
470 472 # Signature to cached matcher instance.
471 473 self._sparsematchercache = {}
472 474
473 475 def _getvfsward(self, origfunc):
474 476 """build a ward for self.vfs"""
475 477 rref = weakref.ref(self)
476 478 def checkvfs(path, mode=None):
477 479 ret = origfunc(path, mode=mode)
478 480 repo = rref()
479 481 if (repo is None
480 482 or not util.safehasattr(repo, '_wlockref')
481 483 or not util.safehasattr(repo, '_lockref')):
482 484 return
483 485 if mode in (None, 'r', 'rb'):
484 486 return
485 487 if path.startswith(repo.path):
486 488 # truncate name relative to the repository (.hg)
487 489 path = path[len(repo.path) + 1:]
488 490 if path.startswith('journal.'):
489 491 # journal is covered by 'lock'
490 492 if repo._currentlock(repo._lockref) is None:
491 493 repo.ui.develwarn('write with no lock: "%s"' % path,
492 494 stacklevel=2, config='check-locks')
493 495 elif repo._currentlock(repo._wlockref) is None:
494 496 # rest of vfs files are covered by 'wlock'
495 497 #
496 498 # exclude special files
497 499 for prefix in self._wlockfreeprefix:
498 500 if path.startswith(prefix):
499 501 return
500 502 repo.ui.develwarn('write with no wlock: "%s"' % path,
501 503 stacklevel=2, config='check-locks')
502 504 return ret
503 505 return checkvfs
504 506
505 507 def _getsvfsward(self, origfunc):
506 508 """build a ward for self.svfs"""
507 509 rref = weakref.ref(self)
508 510 def checksvfs(path, mode=None):
509 511 ret = origfunc(path, mode=mode)
510 512 repo = rref()
511 513 if repo is None or not util.safehasattr(repo, '_lockref'):
512 514 return
513 515 if mode in (None, 'r', 'rb'):
514 516 return
515 517 if path.startswith(repo.sharedpath):
516 518 # truncate name relative to the repository (.hg)
517 519 path = path[len(repo.sharedpath) + 1:]
518 520 if repo._currentlock(repo._lockref) is None:
519 521 repo.ui.develwarn('write with no lock: "%s"' % path,
520 522 stacklevel=3)
521 523 return ret
522 524 return checksvfs
523 525
524 526 def close(self):
525 527 self._writecaches()
526 528
527 529 def _loadextensions(self):
528 530 extensions.loadall(self.ui)
529 531
530 532 def _writecaches(self):
531 533 if self._revbranchcache:
532 534 self._revbranchcache.write()
533 535
534 536 def _restrictcapabilities(self, caps):
535 537 if self.ui.configbool('experimental', 'bundle2-advertise'):
536 538 caps = set(caps)
537 539 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
538 540 caps.add('bundle2=' + urlreq.quote(capsblob))
539 541 return caps
540 542
541 543 def _applyopenerreqs(self):
542 544 self.svfs.options = dict((r, 1) for r in self.requirements
543 545 if r in self.openerreqs)
544 546 # experimental config: format.chunkcachesize
545 547 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
546 548 if chunkcachesize is not None:
547 549 self.svfs.options['chunkcachesize'] = chunkcachesize
548 550 # experimental config: format.maxchainlen
549 551 maxchainlen = self.ui.configint('format', 'maxchainlen')
550 552 if maxchainlen is not None:
551 553 self.svfs.options['maxchainlen'] = maxchainlen
552 554 # experimental config: format.manifestcachesize
553 555 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
554 556 if manifestcachesize is not None:
555 557 self.svfs.options['manifestcachesize'] = manifestcachesize
556 558 # experimental config: format.aggressivemergedeltas
557 559 aggressivemergedeltas = self.ui.configbool('format',
558 560 'aggressivemergedeltas')
559 561 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
560 562 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
561 563 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
562 564 if 0 <= chainspan:
563 565 self.svfs.options['maxdeltachainspan'] = chainspan
564 566
565 567 for r in self.requirements:
566 568 if r.startswith('exp-compression-'):
567 569 self.svfs.options['compengine'] = r[len('exp-compression-'):]
568 570
569 571 # TODO move "revlogv2" to openerreqs once finalized.
570 572 if REVLOGV2_REQUIREMENT in self.requirements:
571 573 self.svfs.options['revlogv2'] = True
572 574
573 575 def _writerequirements(self):
574 576 scmutil.writerequires(self.vfs, self.requirements)
575 577
576 578 def _checknested(self, path):
577 579 """Determine if path is a legal nested repository."""
578 580 if not path.startswith(self.root):
579 581 return False
580 582 subpath = path[len(self.root) + 1:]
581 583 normsubpath = util.pconvert(subpath)
582 584
583 585 # XXX: Checking against the current working copy is wrong in
584 586 # the sense that it can reject things like
585 587 #
586 588 # $ hg cat -r 10 sub/x.txt
587 589 #
588 590 # if sub/ is no longer a subrepository in the working copy
589 591 # parent revision.
590 592 #
591 593 # However, it can of course also allow things that would have
592 594 # been rejected before, such as the above cat command if sub/
593 595 # is a subrepository now, but was a normal directory before.
594 596 # The old path auditor would have rejected by mistake since it
595 597 # panics when it sees sub/.hg/.
596 598 #
597 599 # All in all, checking against the working copy seems sensible
598 600 # since we want to prevent access to nested repositories on
599 601 # the filesystem *now*.
600 602 ctx = self[None]
601 603 parts = util.splitpath(subpath)
602 604 while parts:
603 605 prefix = '/'.join(parts)
604 606 if prefix in ctx.substate:
605 607 if prefix == normsubpath:
606 608 return True
607 609 else:
608 610 sub = ctx.sub(prefix)
609 611 return sub.checknested(subpath[len(prefix) + 1:])
610 612 else:
611 613 parts.pop()
612 614 return False
613 615
614 616 def peer(self):
615 617 return localpeer(self) # not cached to avoid reference cycle
616 618
617 619 def unfiltered(self):
618 620 """Return unfiltered version of the repository
619 621
620 622 Intended to be overwritten by filtered repo."""
621 623 return self
622 624
623 625 def filtered(self, name):
624 626 """Return a filtered version of a repository"""
625 627 # Python <3.4 easily leaks types via __mro__. See
626 628 # https://bugs.python.org/issue17950. We cache dynamically
627 629 # created types so this method doesn't leak on every
628 630 # invocation.
629 631
630 632 key = self.unfiltered().__class__
631 633 if key not in self._filteredrepotypes:
632 634 # Build a new type with the repoview mixin and the base
633 635 # class of this repo. Give it a name containing the
634 636 # filter name to aid debugging.
635 637 bases = (repoview.repoview, key)
636 638 cls = type(r'%sfilteredrepo' % name, bases, {})
637 639 self._filteredrepotypes[key] = cls
638 640
639 641 return self._filteredrepotypes[key](self, name)
640 642
641 643 @repofilecache('bookmarks', 'bookmarks.current')
642 644 def _bookmarks(self):
643 645 return bookmarks.bmstore(self)
644 646
645 647 @property
646 648 def _activebookmark(self):
647 649 return self._bookmarks.active
648 650
649 651 # _phaserevs and _phasesets depend on changelog. what we need is to
650 652 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
651 653 # can't be easily expressed in filecache mechanism.
652 654 @storecache('phaseroots', '00changelog.i')
653 655 def _phasecache(self):
654 656 return phases.phasecache(self, self._phasedefaults)
655 657
656 658 @storecache('obsstore')
657 659 def obsstore(self):
658 660 return obsolete.makestore(self.ui, self)
659 661
660 662 @storecache('00changelog.i')
661 663 def changelog(self):
662 664 return changelog.changelog(self.svfs,
663 665 trypending=txnutil.mayhavepending(self.root))
664 666
665 667 def _constructmanifest(self):
666 668 # This is a temporary function while we migrate from manifest to
667 669 # manifestlog. It allows bundlerepo and unionrepo to intercept the
668 670 # manifest creation.
669 671 return manifest.manifestrevlog(self.svfs)
670 672
671 673 @storecache('00manifest.i')
672 674 def manifestlog(self):
673 675 return manifest.manifestlog(self.svfs, self)
674 676
675 677 @repofilecache('dirstate')
676 678 def dirstate(self):
677 679 sparsematchfn = lambda: sparse.matcher(self)
678 680
679 681 return dirstate.dirstate(self.vfs, self.ui, self.root,
680 682 self._dirstatevalidate, sparsematchfn)
681 683
682 684 def _dirstatevalidate(self, node):
683 685 try:
684 686 self.changelog.rev(node)
685 687 return node
686 688 except error.LookupError:
687 689 if not self._dirstatevalidatewarned:
688 690 self._dirstatevalidatewarned = True
689 691 self.ui.warn(_("warning: ignoring unknown"
690 692 " working parent %s!\n") % short(node))
691 693 return nullid
692 694
693 695 def __getitem__(self, changeid):
694 696 if changeid is None:
695 697 return context.workingctx(self)
696 698 if isinstance(changeid, slice):
697 699 # wdirrev isn't contiguous so the slice shouldn't include it
698 700 return [context.changectx(self, i)
699 701 for i in xrange(*changeid.indices(len(self)))
700 702 if i not in self.changelog.filteredrevs]
701 703 try:
702 704 return context.changectx(self, changeid)
703 705 except error.WdirUnsupported:
704 706 return context.workingctx(self)
705 707
706 708 def __contains__(self, changeid):
707 709 """True if the given changeid exists
708 710
709 711 error.LookupError is raised if an ambiguous node specified.
710 712 """
711 713 try:
712 714 self[changeid]
713 715 return True
714 716 except error.RepoLookupError:
715 717 return False
716 718
717 719 def __nonzero__(self):
718 720 return True
719 721
720 722 __bool__ = __nonzero__
721 723
722 724 def __len__(self):
723 725 return len(self.changelog)
724 726
725 727 def __iter__(self):
726 728 return iter(self.changelog)
727 729
728 730 def revs(self, expr, *args):
729 731 '''Find revisions matching a revset.
730 732
731 733 The revset is specified as a string ``expr`` that may contain
732 734 %-formatting to escape certain types. See ``revsetlang.formatspec``.
733 735
734 736 Revset aliases from the configuration are not expanded. To expand
735 737 user aliases, consider calling ``scmutil.revrange()`` or
736 738 ``repo.anyrevs([expr], user=True)``.
737 739
738 740 Returns a revset.abstractsmartset, which is a list-like interface
739 741 that contains integer revisions.
740 742 '''
741 743 expr = revsetlang.formatspec(expr, *args)
742 744 m = revset.match(None, expr)
743 745 return m(self)
744 746
745 747 def set(self, expr, *args):
746 748 '''Find revisions matching a revset and emit changectx instances.
747 749
748 750 This is a convenience wrapper around ``revs()`` that iterates the
749 751 result and is a generator of changectx instances.
750 752
751 753 Revset aliases from the configuration are not expanded. To expand
752 754 user aliases, consider calling ``scmutil.revrange()``.
753 755 '''
754 756 for r in self.revs(expr, *args):
755 757 yield self[r]
756 758
757 759 def anyrevs(self, specs, user=False, localalias=None):
758 760 '''Find revisions matching one of the given revsets.
759 761
760 762 Revset aliases from the configuration are not expanded by default. To
761 763 expand user aliases, specify ``user=True``. To provide some local
762 764 definitions overriding user aliases, set ``localalias`` to
763 765 ``{name: definitionstring}``.
764 766 '''
765 767 if user:
766 768 m = revset.matchany(self.ui, specs, repo=self,
767 769 localalias=localalias)
768 770 else:
769 771 m = revset.matchany(None, specs, localalias=localalias)
770 772 return m(self)
771 773
772 774 def url(self):
773 775 return 'file:' + self.root
774 776
775 777 def hook(self, name, throw=False, **args):
776 778 """Call a hook, passing this repo instance.
777 779
778 780 This a convenience method to aid invoking hooks. Extensions likely
779 781 won't call this unless they have registered a custom hook or are
780 782 replacing code that is expected to call a hook.
781 783 """
782 784 return hook.hook(self.ui, self, name, throw, **args)
783 785
784 786 @filteredpropertycache
785 787 def _tagscache(self):
786 788 '''Returns a tagscache object that contains various tags related
787 789 caches.'''
788 790
789 791 # This simplifies its cache management by having one decorated
790 792 # function (this one) and the rest simply fetch things from it.
791 793 class tagscache(object):
792 794 def __init__(self):
793 795 # These two define the set of tags for this repository. tags
794 796 # maps tag name to node; tagtypes maps tag name to 'global' or
795 797 # 'local'. (Global tags are defined by .hgtags across all
796 798 # heads, and local tags are defined in .hg/localtags.)
797 799 # They constitute the in-memory cache of tags.
798 800 self.tags = self.tagtypes = None
799 801
800 802 self.nodetagscache = self.tagslist = None
801 803
802 804 cache = tagscache()
803 805 cache.tags, cache.tagtypes = self._findtags()
804 806
805 807 return cache
806 808
807 809 def tags(self):
808 810 '''return a mapping of tag to node'''
809 811 t = {}
810 812 if self.changelog.filteredrevs:
811 813 tags, tt = self._findtags()
812 814 else:
813 815 tags = self._tagscache.tags
814 816 for k, v in tags.iteritems():
815 817 try:
816 818 # ignore tags to unknown nodes
817 819 self.changelog.rev(v)
818 820 t[k] = v
819 821 except (error.LookupError, ValueError):
820 822 pass
821 823 return t
822 824
823 825 def _findtags(self):
824 826 '''Do the hard work of finding tags. Return a pair of dicts
825 827 (tags, tagtypes) where tags maps tag name to node, and tagtypes
826 828 maps tag name to a string like \'global\' or \'local\'.
827 829 Subclasses or extensions are free to add their own tags, but
828 830 should be aware that the returned dicts will be retained for the
829 831 duration of the localrepo object.'''
830 832
831 833 # XXX what tagtype should subclasses/extensions use? Currently
832 834 # mq and bookmarks add tags, but do not set the tagtype at all.
833 835 # Should each extension invent its own tag type? Should there
834 836 # be one tagtype for all such "virtual" tags? Or is the status
835 837 # quo fine?
836 838
837 839
838 840 # map tag name to (node, hist)
839 841 alltags = tagsmod.findglobaltags(self.ui, self)
840 842 # map tag name to tag type
841 843 tagtypes = dict((tag, 'global') for tag in alltags)
842 844
843 845 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
844 846
845 847 # Build the return dicts. Have to re-encode tag names because
846 848 # the tags module always uses UTF-8 (in order not to lose info
847 849 # writing to the cache), but the rest of Mercurial wants them in
848 850 # local encoding.
849 851 tags = {}
850 852 for (name, (node, hist)) in alltags.iteritems():
851 853 if node != nullid:
852 854 tags[encoding.tolocal(name)] = node
853 855 tags['tip'] = self.changelog.tip()
854 856 tagtypes = dict([(encoding.tolocal(name), value)
855 857 for (name, value) in tagtypes.iteritems()])
856 858 return (tags, tagtypes)
857 859
858 860 def tagtype(self, tagname):
859 861 '''
860 862 return the type of the given tag. result can be:
861 863
862 864 'local' : a local tag
863 865 'global' : a global tag
864 866 None : tag does not exist
865 867 '''
866 868
867 869 return self._tagscache.tagtypes.get(tagname)
868 870
869 871 def tagslist(self):
870 872 '''return a list of tags ordered by revision'''
871 873 if not self._tagscache.tagslist:
872 874 l = []
873 875 for t, n in self.tags().iteritems():
874 876 l.append((self.changelog.rev(n), t, n))
875 877 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
876 878
877 879 return self._tagscache.tagslist
878 880
879 881 def nodetags(self, node):
880 882 '''return the tags associated with a node'''
881 883 if not self._tagscache.nodetagscache:
882 884 nodetagscache = {}
883 885 for t, n in self._tagscache.tags.iteritems():
884 886 nodetagscache.setdefault(n, []).append(t)
885 887 for tags in nodetagscache.itervalues():
886 888 tags.sort()
887 889 self._tagscache.nodetagscache = nodetagscache
888 890 return self._tagscache.nodetagscache.get(node, [])
889 891
890 892 def nodebookmarks(self, node):
891 893 """return the list of bookmarks pointing to the specified node"""
892 894 marks = []
893 895 for bookmark, n in self._bookmarks.iteritems():
894 896 if n == node:
895 897 marks.append(bookmark)
896 898 return sorted(marks)
897 899
898 900 def branchmap(self):
899 901 '''returns a dictionary {branch: [branchheads]} with branchheads
900 902 ordered by increasing revision number'''
901 903 branchmap.updatecache(self)
902 904 return self._branchcaches[self.filtername]
903 905
904 906 @unfilteredmethod
905 907 def revbranchcache(self):
906 908 if not self._revbranchcache:
907 909 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
908 910 return self._revbranchcache
909 911
910 912 def branchtip(self, branch, ignoremissing=False):
911 913 '''return the tip node for a given branch
912 914
913 915 If ignoremissing is True, then this method will not raise an error.
914 916 This is helpful for callers that only expect None for a missing branch
915 917 (e.g. namespace).
916 918
917 919 '''
918 920 try:
919 921 return self.branchmap().branchtip(branch)
920 922 except KeyError:
921 923 if not ignoremissing:
922 924 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
923 925 else:
924 926 pass
925 927
926 928 def lookup(self, key):
927 929 return self[key].node()
928 930
929 931 def lookupbranch(self, key, remote=None):
930 932 repo = remote or self
931 933 if key in repo.branchmap():
932 934 return key
933 935
934 936 repo = (remote and remote.local()) and remote or self
935 937 return repo[key].branch()
936 938
937 939 def known(self, nodes):
938 940 cl = self.changelog
939 941 nm = cl.nodemap
940 942 filtered = cl.filteredrevs
941 943 result = []
942 944 for n in nodes:
943 945 r = nm.get(n)
944 946 resp = not (r is None or r in filtered)
945 947 result.append(resp)
946 948 return result
947 949
948 950 def local(self):
949 951 return self
950 952
951 953 def publishing(self):
952 954 # it's safe (and desirable) to trust the publish flag unconditionally
953 955 # so that we don't finalize changes shared between users via ssh or nfs
954 956 return self.ui.configbool('phases', 'publish', untrusted=True)
955 957
956 958 def cancopy(self):
957 959 # so statichttprepo's override of local() works
958 960 if not self.local():
959 961 return False
960 962 if not self.publishing():
961 963 return True
962 964 # if publishing we can't copy if there is filtered content
963 965 return not self.filtered('visible').changelog.filteredrevs
964 966
965 967 def shared(self):
966 968 '''the type of shared repository (None if not shared)'''
967 969 if self.sharedpath != self.path:
968 970 return 'store'
969 971 return None
970 972
971 973 def wjoin(self, f, *insidef):
972 974 return self.vfs.reljoin(self.root, f, *insidef)
973 975
974 976 def file(self, f):
975 977 if f[0] == '/':
976 978 f = f[1:]
977 979 return filelog.filelog(self.svfs, f)
978 980
979 981 def changectx(self, changeid):
980 982 return self[changeid]
981 983
982 984 def setparents(self, p1, p2=nullid):
983 985 with self.dirstate.parentchange():
984 986 copies = self.dirstate.setparents(p1, p2)
985 987 pctx = self[p1]
986 988 if copies:
987 989 # Adjust copy records, the dirstate cannot do it, it
988 990 # requires access to parents manifests. Preserve them
989 991 # only for entries added to first parent.
990 992 for f in copies:
991 993 if f not in pctx and copies[f] in pctx:
992 994 self.dirstate.copy(copies[f], f)
993 995 if p2 == nullid:
994 996 for f, s in sorted(self.dirstate.copies().items()):
995 997 if f not in pctx and s not in pctx:
996 998 self.dirstate.copy(None, f)
997 999
998 1000 def filectx(self, path, changeid=None, fileid=None):
999 1001 """changeid can be a changeset revision, node, or tag.
1000 1002 fileid can be a file revision or node."""
1001 1003 return context.filectx(self, path, changeid, fileid)
1002 1004
1003 1005 def getcwd(self):
1004 1006 return self.dirstate.getcwd()
1005 1007
1006 1008 def pathto(self, f, cwd=None):
1007 1009 return self.dirstate.pathto(f, cwd)
1008 1010
1009 1011 def _loadfilter(self, filter):
1010 1012 if filter not in self.filterpats:
1011 1013 l = []
1012 1014 for pat, cmd in self.ui.configitems(filter):
1013 1015 if cmd == '!':
1014 1016 continue
1015 1017 mf = matchmod.match(self.root, '', [pat])
1016 1018 fn = None
1017 1019 params = cmd
1018 1020 for name, filterfn in self._datafilters.iteritems():
1019 1021 if cmd.startswith(name):
1020 1022 fn = filterfn
1021 1023 params = cmd[len(name):].lstrip()
1022 1024 break
1023 1025 if not fn:
1024 1026 fn = lambda s, c, **kwargs: util.filter(s, c)
1025 1027 # Wrap old filters not supporting keyword arguments
1026 1028 if not inspect.getargspec(fn)[2]:
1027 1029 oldfn = fn
1028 1030 fn = lambda s, c, **kwargs: oldfn(s, c)
1029 1031 l.append((mf, fn, params))
1030 1032 self.filterpats[filter] = l
1031 1033 return self.filterpats[filter]
1032 1034
1033 1035 def _filter(self, filterpats, filename, data):
1034 1036 for mf, fn, cmd in filterpats:
1035 1037 if mf(filename):
1036 1038 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1037 1039 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1038 1040 break
1039 1041
1040 1042 return data
1041 1043
1042 1044 @unfilteredpropertycache
1043 1045 def _encodefilterpats(self):
1044 1046 return self._loadfilter('encode')
1045 1047
1046 1048 @unfilteredpropertycache
1047 1049 def _decodefilterpats(self):
1048 1050 return self._loadfilter('decode')
1049 1051
1050 1052 def adddatafilter(self, name, filter):
1051 1053 self._datafilters[name] = filter
1052 1054
1053 1055 def wread(self, filename):
1054 1056 if self.wvfs.islink(filename):
1055 1057 data = self.wvfs.readlink(filename)
1056 1058 else:
1057 1059 data = self.wvfs.read(filename)
1058 1060 return self._filter(self._encodefilterpats, filename, data)
1059 1061
1060 1062 def wwrite(self, filename, data, flags, backgroundclose=False):
1061 1063 """write ``data`` into ``filename`` in the working directory
1062 1064
1063 1065 This returns length of written (maybe decoded) data.
1064 1066 """
1065 1067 data = self._filter(self._decodefilterpats, filename, data)
1066 1068 if 'l' in flags:
1067 1069 self.wvfs.symlink(data, filename)
1068 1070 else:
1069 1071 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1070 1072 if 'x' in flags:
1071 1073 self.wvfs.setflags(filename, False, True)
1072 1074 return len(data)
1073 1075
1074 1076 def wwritedata(self, filename, data):
1075 1077 return self._filter(self._decodefilterpats, filename, data)
1076 1078
1077 1079 def currenttransaction(self):
1078 1080 """return the current transaction or None if non exists"""
1079 1081 if self._transref:
1080 1082 tr = self._transref()
1081 1083 else:
1082 1084 tr = None
1083 1085
1084 1086 if tr and tr.running():
1085 1087 return tr
1086 1088 return None
1087 1089
1088 1090 def transaction(self, desc, report=None):
1089 1091 if (self.ui.configbool('devel', 'all-warnings')
1090 1092 or self.ui.configbool('devel', 'check-locks')):
1091 1093 if self._currentlock(self._lockref) is None:
1092 1094 raise error.ProgrammingError('transaction requires locking')
1093 1095 tr = self.currenttransaction()
1094 1096 if tr is not None:
1095 1097 return tr.nest()
1096 1098
1097 1099 # abort here if the journal already exists
1098 1100 if self.svfs.exists("journal"):
1099 1101 raise error.RepoError(
1100 1102 _("abandoned transaction found"),
1101 1103 hint=_("run 'hg recover' to clean up transaction"))
1102 1104
1103 1105 idbase = "%.40f#%f" % (random.random(), time.time())
1104 1106 ha = hex(hashlib.sha1(idbase).digest())
1105 1107 txnid = 'TXN:' + ha
1106 1108 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1107 1109
1108 1110 self._writejournal(desc)
1109 1111 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1110 1112 if report:
1111 1113 rp = report
1112 1114 else:
1113 1115 rp = self.ui.warn
1114 1116 vfsmap = {'plain': self.vfs} # root of .hg/
1115 1117 # we must avoid cyclic reference between repo and transaction.
1116 1118 reporef = weakref.ref(self)
1117 1119 # Code to track tag movement
1118 1120 #
1119 1121 # Since tags are all handled as file content, it is actually quite hard
1120 1122 # to track these movement from a code perspective. So we fallback to a
1121 1123 # tracking at the repository level. One could envision to track changes
1122 1124 # to the '.hgtags' file through changegroup apply but that fails to
1123 1125 # cope with case where transaction expose new heads without changegroup
1124 1126 # being involved (eg: phase movement).
1125 1127 #
1126 1128 # For now, We gate the feature behind a flag since this likely comes
1127 1129 # with performance impacts. The current code run more often than needed
1128 1130 # and do not use caches as much as it could. The current focus is on
1129 1131 # the behavior of the feature so we disable it by default. The flag
1130 1132 # will be removed when we are happy with the performance impact.
1131 1133 #
1132 1134 # Once this feature is no longer experimental move the following
1133 1135 # documentation to the appropriate help section:
1134 1136 #
1135 1137 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1136 1138 # tags (new or changed or deleted tags). In addition the details of
1137 1139 # these changes are made available in a file at:
1138 1140 # ``REPOROOT/.hg/changes/tags.changes``.
1139 1141 # Make sure you check for HG_TAG_MOVED before reading that file as it
1140 1142 # might exist from a previous transaction even if no tag were touched
1141 1143 # in this one. Changes are recorded in a line base format::
1142 1144 #
1143 1145 # <action> <hex-node> <tag-name>\n
1144 1146 #
1145 1147 # Actions are defined as follow:
1146 1148 # "-R": tag is removed,
1147 1149 # "+A": tag is added,
1148 1150 # "-M": tag is moved (old value),
1149 1151 # "+M": tag is moved (new value),
1150 1152 tracktags = lambda x: None
1151 1153 # experimental config: experimental.hook-track-tags
1152 1154 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1153 1155 if desc != 'strip' and shouldtracktags:
1154 1156 oldheads = self.changelog.headrevs()
1155 1157 def tracktags(tr2):
1156 1158 repo = reporef()
1157 1159 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1158 1160 newheads = repo.changelog.headrevs()
1159 1161 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1160 1162 # notes: we compare lists here.
1161 1163 # As we do it only once buiding set would not be cheaper
1162 1164 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1163 1165 if changes:
1164 1166 tr2.hookargs['tag_moved'] = '1'
1165 1167 with repo.vfs('changes/tags.changes', 'w',
1166 1168 atomictemp=True) as changesfile:
1167 1169 # note: we do not register the file to the transaction
1168 1170 # because we needs it to still exist on the transaction
1169 1171 # is close (for txnclose hooks)
1170 1172 tagsmod.writediff(changesfile, changes)
1171 1173 def validate(tr2):
1172 1174 """will run pre-closing hooks"""
1173 1175 # XXX the transaction API is a bit lacking here so we take a hacky
1174 1176 # path for now
1175 1177 #
1176 1178 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1177 1179 # dict is copied before these run. In addition we needs the data
1178 1180 # available to in memory hooks too.
1179 1181 #
1180 1182 # Moreover, we also need to make sure this runs before txnclose
1181 1183 # hooks and there is no "pending" mechanism that would execute
1182 1184 # logic only if hooks are about to run.
1183 1185 #
1184 1186 # Fixing this limitation of the transaction is also needed to track
1185 1187 # other families of changes (bookmarks, phases, obsolescence).
1186 1188 #
1187 1189 # This will have to be fixed before we remove the experimental
1188 1190 # gating.
1189 1191 tracktags(tr2)
1190 1192 reporef().hook('pretxnclose', throw=True,
1191 1193 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1192 1194 def releasefn(tr, success):
1193 1195 repo = reporef()
1194 1196 if success:
1195 1197 # this should be explicitly invoked here, because
1196 1198 # in-memory changes aren't written out at closing
1197 1199 # transaction, if tr.addfilegenerator (via
1198 1200 # dirstate.write or so) isn't invoked while
1199 1201 # transaction running
1200 1202 repo.dirstate.write(None)
1201 1203 else:
1202 1204 # discard all changes (including ones already written
1203 1205 # out) in this transaction
1204 1206 repo.dirstate.restorebackup(None, 'journal.dirstate')
1205 1207
1206 1208 repo.invalidate(clearfilecache=True)
1207 1209
1208 1210 tr = transaction.transaction(rp, self.svfs, vfsmap,
1209 1211 "journal",
1210 1212 "undo",
1211 1213 aftertrans(renames),
1212 1214 self.store.createmode,
1213 1215 validator=validate,
1214 1216 releasefn=releasefn,
1215 1217 checkambigfiles=_cachedfiles)
1216 1218 tr.changes['revs'] = set()
1217 1219 tr.changes['obsmarkers'] = set()
1218 1220 tr.changes['phases'] = {}
1219 1221 tr.changes['bookmarks'] = {}
1220 1222
1221 1223 tr.hookargs['txnid'] = txnid
1222 1224 # note: writing the fncache only during finalize mean that the file is
1223 1225 # outdated when running hooks. As fncache is used for streaming clone,
1224 1226 # this is not expected to break anything that happen during the hooks.
1225 1227 tr.addfinalize('flush-fncache', self.store.write)
1226 1228 def txnclosehook(tr2):
1227 1229 """To be run if transaction is successful, will schedule a hook run
1228 1230 """
1229 1231 # Don't reference tr2 in hook() so we don't hold a reference.
1230 1232 # This reduces memory consumption when there are multiple
1231 1233 # transactions per lock. This can likely go away if issue5045
1232 1234 # fixes the function accumulation.
1233 1235 hookargs = tr2.hookargs
1234 1236
1235 1237 def hook():
1236 1238 reporef().hook('txnclose', throw=False, txnname=desc,
1237 1239 **pycompat.strkwargs(hookargs))
1238 1240 reporef()._afterlock(hook)
1239 1241 tr.addfinalize('txnclose-hook', txnclosehook)
1240 1242 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1241 1243 def txnaborthook(tr2):
1242 1244 """To be run if transaction is aborted
1243 1245 """
1244 1246 reporef().hook('txnabort', throw=False, txnname=desc,
1245 1247 **tr2.hookargs)
1246 1248 tr.addabort('txnabort-hook', txnaborthook)
1247 1249 # avoid eager cache invalidation. in-memory data should be identical
1248 1250 # to stored data if transaction has no error.
1249 1251 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1250 1252 self._transref = weakref.ref(tr)
1251 1253 return tr
1252 1254
1253 1255 def _journalfiles(self):
1254 1256 return ((self.svfs, 'journal'),
1255 1257 (self.vfs, 'journal.dirstate'),
1256 1258 (self.vfs, 'journal.branch'),
1257 1259 (self.vfs, 'journal.desc'),
1258 1260 (self.vfs, 'journal.bookmarks'),
1259 1261 (self.svfs, 'journal.phaseroots'))
1260 1262
1261 1263 def undofiles(self):
1262 1264 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1263 1265
1264 1266 @unfilteredmethod
1265 1267 def _writejournal(self, desc):
1266 1268 self.dirstate.savebackup(None, 'journal.dirstate')
1267 1269 self.vfs.write("journal.branch",
1268 1270 encoding.fromlocal(self.dirstate.branch()))
1269 1271 self.vfs.write("journal.desc",
1270 1272 "%d\n%s\n" % (len(self), desc))
1271 1273 self.vfs.write("journal.bookmarks",
1272 1274 self.vfs.tryread("bookmarks"))
1273 1275 self.svfs.write("journal.phaseroots",
1274 1276 self.svfs.tryread("phaseroots"))
1275 1277
1276 1278 def recover(self):
1277 1279 with self.lock():
1278 1280 if self.svfs.exists("journal"):
1279 1281 self.ui.status(_("rolling back interrupted transaction\n"))
1280 1282 vfsmap = {'': self.svfs,
1281 1283 'plain': self.vfs,}
1282 1284 transaction.rollback(self.svfs, vfsmap, "journal",
1283 1285 self.ui.warn,
1284 1286 checkambigfiles=_cachedfiles)
1285 1287 self.invalidate()
1286 1288 return True
1287 1289 else:
1288 1290 self.ui.warn(_("no interrupted transaction available\n"))
1289 1291 return False
1290 1292
1291 1293 def rollback(self, dryrun=False, force=False):
1292 1294 wlock = lock = dsguard = None
1293 1295 try:
1294 1296 wlock = self.wlock()
1295 1297 lock = self.lock()
1296 1298 if self.svfs.exists("undo"):
1297 1299 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1298 1300
1299 1301 return self._rollback(dryrun, force, dsguard)
1300 1302 else:
1301 1303 self.ui.warn(_("no rollback information available\n"))
1302 1304 return 1
1303 1305 finally:
1304 1306 release(dsguard, lock, wlock)
1305 1307
1306 1308 @unfilteredmethod # Until we get smarter cache management
1307 1309 def _rollback(self, dryrun, force, dsguard):
1308 1310 ui = self.ui
1309 1311 try:
1310 1312 args = self.vfs.read('undo.desc').splitlines()
1311 1313 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1312 1314 if len(args) >= 3:
1313 1315 detail = args[2]
1314 1316 oldtip = oldlen - 1
1315 1317
1316 1318 if detail and ui.verbose:
1317 1319 msg = (_('repository tip rolled back to revision %d'
1318 1320 ' (undo %s: %s)\n')
1319 1321 % (oldtip, desc, detail))
1320 1322 else:
1321 1323 msg = (_('repository tip rolled back to revision %d'
1322 1324 ' (undo %s)\n')
1323 1325 % (oldtip, desc))
1324 1326 except IOError:
1325 1327 msg = _('rolling back unknown transaction\n')
1326 1328 desc = None
1327 1329
1328 1330 if not force and self['.'] != self['tip'] and desc == 'commit':
1329 1331 raise error.Abort(
1330 1332 _('rollback of last commit while not checked out '
1331 1333 'may lose data'), hint=_('use -f to force'))
1332 1334
1333 1335 ui.status(msg)
1334 1336 if dryrun:
1335 1337 return 0
1336 1338
1337 1339 parents = self.dirstate.parents()
1338 1340 self.destroying()
1339 1341 vfsmap = {'plain': self.vfs, '': self.svfs}
1340 1342 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1341 1343 checkambigfiles=_cachedfiles)
1342 1344 if self.vfs.exists('undo.bookmarks'):
1343 1345 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1344 1346 if self.svfs.exists('undo.phaseroots'):
1345 1347 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1346 1348 self.invalidate()
1347 1349
1348 1350 parentgone = (parents[0] not in self.changelog.nodemap or
1349 1351 parents[1] not in self.changelog.nodemap)
1350 1352 if parentgone:
1351 1353 # prevent dirstateguard from overwriting already restored one
1352 1354 dsguard.close()
1353 1355
1354 1356 self.dirstate.restorebackup(None, 'undo.dirstate')
1355 1357 try:
1356 1358 branch = self.vfs.read('undo.branch')
1357 1359 self.dirstate.setbranch(encoding.tolocal(branch))
1358 1360 except IOError:
1359 1361 ui.warn(_('named branch could not be reset: '
1360 1362 'current branch is still \'%s\'\n')
1361 1363 % self.dirstate.branch())
1362 1364
1363 1365 parents = tuple([p.rev() for p in self[None].parents()])
1364 1366 if len(parents) > 1:
1365 1367 ui.status(_('working directory now based on '
1366 1368 'revisions %d and %d\n') % parents)
1367 1369 else:
1368 1370 ui.status(_('working directory now based on '
1369 1371 'revision %d\n') % parents)
1370 1372 mergemod.mergestate.clean(self, self['.'].node())
1371 1373
1372 1374 # TODO: if we know which new heads may result from this rollback, pass
1373 1375 # them to destroy(), which will prevent the branchhead cache from being
1374 1376 # invalidated.
1375 1377 self.destroyed()
1376 1378 return 0
1377 1379
1378 1380 def _buildcacheupdater(self, newtransaction):
1379 1381 """called during transaction to build the callback updating cache
1380 1382
1381 1383 Lives on the repository to help extension who might want to augment
1382 1384 this logic. For this purpose, the created transaction is passed to the
1383 1385 method.
1384 1386 """
1385 1387 # we must avoid cyclic reference between repo and transaction.
1386 1388 reporef = weakref.ref(self)
1387 1389 def updater(tr):
1388 1390 repo = reporef()
1389 1391 repo.updatecaches(tr)
1390 1392 return updater
1391 1393
1392 1394 @unfilteredmethod
1393 1395 def updatecaches(self, tr=None):
1394 1396 """warm appropriate caches
1395 1397
1396 1398 If this function is called after a transaction closed. The transaction
1397 1399 will be available in the 'tr' argument. This can be used to selectively
1398 1400 update caches relevant to the changes in that transaction.
1399 1401 """
1400 1402 if tr is not None and tr.hookargs.get('source') == 'strip':
1401 1403 # During strip, many caches are invalid but
1402 1404 # later call to `destroyed` will refresh them.
1403 1405 return
1404 1406
1405 1407 if tr is None or tr.changes['revs']:
1406 1408 # updating the unfiltered branchmap should refresh all the others,
1407 1409 self.ui.debug('updating the branch cache\n')
1408 1410 branchmap.updatecache(self.filtered('served'))
1409 1411
1410 1412 def invalidatecaches(self):
1411 1413
1412 1414 if '_tagscache' in vars(self):
1413 1415 # can't use delattr on proxy
1414 1416 del self.__dict__['_tagscache']
1415 1417
1416 1418 self.unfiltered()._branchcaches.clear()
1417 1419 self.invalidatevolatilesets()
1418 1420 self._sparsesignaturecache.clear()
1419 1421
1420 1422 def invalidatevolatilesets(self):
1421 1423 self.filteredrevcache.clear()
1422 1424 obsolete.clearobscaches(self)
1423 1425
1424 1426 def invalidatedirstate(self):
1425 1427 '''Invalidates the dirstate, causing the next call to dirstate
1426 1428 to check if it was modified since the last time it was read,
1427 1429 rereading it if it has.
1428 1430
1429 1431 This is different to dirstate.invalidate() that it doesn't always
1430 1432 rereads the dirstate. Use dirstate.invalidate() if you want to
1431 1433 explicitly read the dirstate again (i.e. restoring it to a previous
1432 1434 known good state).'''
1433 1435 if hasunfilteredcache(self, 'dirstate'):
1434 1436 for k in self.dirstate._filecache:
1435 1437 try:
1436 1438 delattr(self.dirstate, k)
1437 1439 except AttributeError:
1438 1440 pass
1439 1441 delattr(self.unfiltered(), 'dirstate')
1440 1442
1441 1443 def invalidate(self, clearfilecache=False):
1442 1444 '''Invalidates both store and non-store parts other than dirstate
1443 1445
1444 1446 If a transaction is running, invalidation of store is omitted,
1445 1447 because discarding in-memory changes might cause inconsistency
1446 1448 (e.g. incomplete fncache causes unintentional failure, but
1447 1449 redundant one doesn't).
1448 1450 '''
1449 1451 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1450 1452 for k in list(self._filecache.keys()):
1451 1453 # dirstate is invalidated separately in invalidatedirstate()
1452 1454 if k == 'dirstate':
1453 1455 continue
1454 1456
1455 1457 if clearfilecache:
1456 1458 del self._filecache[k]
1457 1459 try:
1458 1460 delattr(unfiltered, k)
1459 1461 except AttributeError:
1460 1462 pass
1461 1463 self.invalidatecaches()
1462 1464 if not self.currenttransaction():
1463 1465 # TODO: Changing contents of store outside transaction
1464 1466 # causes inconsistency. We should make in-memory store
1465 1467 # changes detectable, and abort if changed.
1466 1468 self.store.invalidatecaches()
1467 1469
1468 1470 def invalidateall(self):
1469 1471 '''Fully invalidates both store and non-store parts, causing the
1470 1472 subsequent operation to reread any outside changes.'''
1471 1473 # extension should hook this to invalidate its caches
1472 1474 self.invalidate()
1473 1475 self.invalidatedirstate()
1474 1476
1475 1477 @unfilteredmethod
1476 1478 def _refreshfilecachestats(self, tr):
1477 1479 """Reload stats of cached files so that they are flagged as valid"""
1478 1480 for k, ce in self._filecache.items():
1479 1481 if k == 'dirstate' or k not in self.__dict__:
1480 1482 continue
1481 1483 ce.refresh()
1482 1484
1483 1485 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1484 1486 inheritchecker=None, parentenvvar=None):
1485 1487 parentlock = None
1486 1488 # the contents of parentenvvar are used by the underlying lock to
1487 1489 # determine whether it can be inherited
1488 1490 if parentenvvar is not None:
1489 1491 parentlock = encoding.environ.get(parentenvvar)
1490 1492 try:
1491 1493 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1492 1494 acquirefn=acquirefn, desc=desc,
1493 1495 inheritchecker=inheritchecker,
1494 1496 parentlock=parentlock)
1495 1497 except error.LockHeld as inst:
1496 1498 if not wait:
1497 1499 raise
1498 1500 # show more details for new-style locks
1499 1501 if ':' in inst.locker:
1500 1502 host, pid = inst.locker.split(":", 1)
1501 1503 self.ui.warn(
1502 1504 _("waiting for lock on %s held by process %r "
1503 1505 "on host %r\n") % (desc, pid, host))
1504 1506 else:
1505 1507 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1506 1508 (desc, inst.locker))
1507 1509 # default to 600 seconds timeout
1508 1510 l = lockmod.lock(vfs, lockname,
1509 1511 int(self.ui.config("ui", "timeout")),
1510 1512 releasefn=releasefn, acquirefn=acquirefn,
1511 1513 desc=desc)
1512 1514 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1513 1515 return l
1514 1516
1515 1517 def _afterlock(self, callback):
1516 1518 """add a callback to be run when the repository is fully unlocked
1517 1519
1518 1520 The callback will be executed when the outermost lock is released
1519 1521 (with wlock being higher level than 'lock')."""
1520 1522 for ref in (self._wlockref, self._lockref):
1521 1523 l = ref and ref()
1522 1524 if l and l.held:
1523 1525 l.postrelease.append(callback)
1524 1526 break
1525 1527 else: # no lock have been found.
1526 1528 callback()
1527 1529
1528 1530 def lock(self, wait=True):
1529 1531 '''Lock the repository store (.hg/store) and return a weak reference
1530 1532 to the lock. Use this before modifying the store (e.g. committing or
1531 1533 stripping). If you are opening a transaction, get a lock as well.)
1532 1534
1533 1535 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1534 1536 'wlock' first to avoid a dead-lock hazard.'''
1535 1537 l = self._currentlock(self._lockref)
1536 1538 if l is not None:
1537 1539 l.lock()
1538 1540 return l
1539 1541
1540 1542 l = self._lock(self.svfs, "lock", wait, None,
1541 1543 self.invalidate, _('repository %s') % self.origroot)
1542 1544 self._lockref = weakref.ref(l)
1543 1545 return l
1544 1546
1545 1547 def _wlockchecktransaction(self):
1546 1548 if self.currenttransaction() is not None:
1547 1549 raise error.LockInheritanceContractViolation(
1548 1550 'wlock cannot be inherited in the middle of a transaction')
1549 1551
1550 1552 def wlock(self, wait=True):
1551 1553 '''Lock the non-store parts of the repository (everything under
1552 1554 .hg except .hg/store) and return a weak reference to the lock.
1553 1555
1554 1556 Use this before modifying files in .hg.
1555 1557
1556 1558 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1557 1559 'wlock' first to avoid a dead-lock hazard.'''
1558 1560 l = self._wlockref and self._wlockref()
1559 1561 if l is not None and l.held:
1560 1562 l.lock()
1561 1563 return l
1562 1564
1563 1565 # We do not need to check for non-waiting lock acquisition. Such
1564 1566 # acquisition would not cause dead-lock as they would just fail.
1565 1567 if wait and (self.ui.configbool('devel', 'all-warnings')
1566 1568 or self.ui.configbool('devel', 'check-locks')):
1567 1569 if self._currentlock(self._lockref) is not None:
1568 1570 self.ui.develwarn('"wlock" acquired after "lock"')
1569 1571
1570 1572 def unlock():
1571 1573 if self.dirstate.pendingparentchange():
1572 1574 self.dirstate.invalidate()
1573 1575 else:
1574 1576 self.dirstate.write(None)
1575 1577
1576 1578 self._filecache['dirstate'].refresh()
1577 1579
1578 1580 l = self._lock(self.vfs, "wlock", wait, unlock,
1579 1581 self.invalidatedirstate, _('working directory of %s') %
1580 1582 self.origroot,
1581 1583 inheritchecker=self._wlockchecktransaction,
1582 1584 parentenvvar='HG_WLOCK_LOCKER')
1583 1585 self._wlockref = weakref.ref(l)
1584 1586 return l
1585 1587
1586 1588 def _currentlock(self, lockref):
1587 1589 """Returns the lock if it's held, or None if it's not."""
1588 1590 if lockref is None:
1589 1591 return None
1590 1592 l = lockref()
1591 1593 if l is None or not l.held:
1592 1594 return None
1593 1595 return l
1594 1596
1595 1597 def currentwlock(self):
1596 1598 """Returns the wlock if it's held, or None if it's not."""
1597 1599 return self._currentlock(self._wlockref)
1598 1600
1599 1601 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1600 1602 """
1601 1603 commit an individual file as part of a larger transaction
1602 1604 """
1603 1605
1604 1606 fname = fctx.path()
1605 1607 fparent1 = manifest1.get(fname, nullid)
1606 1608 fparent2 = manifest2.get(fname, nullid)
1607 1609 if isinstance(fctx, context.filectx):
1608 1610 node = fctx.filenode()
1609 1611 if node in [fparent1, fparent2]:
1610 1612 self.ui.debug('reusing %s filelog entry\n' % fname)
1611 1613 if manifest1.flags(fname) != fctx.flags():
1612 1614 changelist.append(fname)
1613 1615 return node
1614 1616
1615 1617 flog = self.file(fname)
1616 1618 meta = {}
1617 1619 copy = fctx.renamed()
1618 1620 if copy and copy[0] != fname:
1619 1621 # Mark the new revision of this file as a copy of another
1620 1622 # file. This copy data will effectively act as a parent
1621 1623 # of this new revision. If this is a merge, the first
1622 1624 # parent will be the nullid (meaning "look up the copy data")
1623 1625 # and the second one will be the other parent. For example:
1624 1626 #
1625 1627 # 0 --- 1 --- 3 rev1 changes file foo
1626 1628 # \ / rev2 renames foo to bar and changes it
1627 1629 # \- 2 -/ rev3 should have bar with all changes and
1628 1630 # should record that bar descends from
1629 1631 # bar in rev2 and foo in rev1
1630 1632 #
1631 1633 # this allows this merge to succeed:
1632 1634 #
1633 1635 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1634 1636 # \ / merging rev3 and rev4 should use bar@rev2
1635 1637 # \- 2 --- 4 as the merge base
1636 1638 #
1637 1639
1638 1640 cfname = copy[0]
1639 1641 crev = manifest1.get(cfname)
1640 1642 newfparent = fparent2
1641 1643
1642 1644 if manifest2: # branch merge
1643 1645 if fparent2 == nullid or crev is None: # copied on remote side
1644 1646 if cfname in manifest2:
1645 1647 crev = manifest2[cfname]
1646 1648 newfparent = fparent1
1647 1649
1648 1650 # Here, we used to search backwards through history to try to find
1649 1651 # where the file copy came from if the source of a copy was not in
1650 1652 # the parent directory. However, this doesn't actually make sense to
1651 1653 # do (what does a copy from something not in your working copy even
1652 1654 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1653 1655 # the user that copy information was dropped, so if they didn't
1654 1656 # expect this outcome it can be fixed, but this is the correct
1655 1657 # behavior in this circumstance.
1656 1658
1657 1659 if crev:
1658 1660 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1659 1661 meta["copy"] = cfname
1660 1662 meta["copyrev"] = hex(crev)
1661 1663 fparent1, fparent2 = nullid, newfparent
1662 1664 else:
1663 1665 self.ui.warn(_("warning: can't find ancestor for '%s' "
1664 1666 "copied from '%s'!\n") % (fname, cfname))
1665 1667
1666 1668 elif fparent1 == nullid:
1667 1669 fparent1, fparent2 = fparent2, nullid
1668 1670 elif fparent2 != nullid:
1669 1671 # is one parent an ancestor of the other?
1670 1672 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1671 1673 if fparent1 in fparentancestors:
1672 1674 fparent1, fparent2 = fparent2, nullid
1673 1675 elif fparent2 in fparentancestors:
1674 1676 fparent2 = nullid
1675 1677
1676 1678 # is the file changed?
1677 1679 text = fctx.data()
1678 1680 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1679 1681 changelist.append(fname)
1680 1682 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1681 1683 # are just the flags changed during merge?
1682 1684 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1683 1685 changelist.append(fname)
1684 1686
1685 1687 return fparent1
1686 1688
1687 1689 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1688 1690 """check for commit arguments that aren't committable"""
1689 1691 if match.isexact() or match.prefix():
1690 1692 matched = set(status.modified + status.added + status.removed)
1691 1693
1692 1694 for f in match.files():
1693 1695 f = self.dirstate.normalize(f)
1694 1696 if f == '.' or f in matched or f in wctx.substate:
1695 1697 continue
1696 1698 if f in status.deleted:
1697 1699 fail(f, _('file not found!'))
1698 1700 if f in vdirs: # visited directory
1699 1701 d = f + '/'
1700 1702 for mf in matched:
1701 1703 if mf.startswith(d):
1702 1704 break
1703 1705 else:
1704 1706 fail(f, _("no match under directory!"))
1705 1707 elif f not in self.dirstate:
1706 1708 fail(f, _("file not tracked!"))
1707 1709
1708 1710 @unfilteredmethod
1709 1711 def commit(self, text="", user=None, date=None, match=None, force=False,
1710 1712 editor=False, extra=None):
1711 1713 """Add a new revision to current repository.
1712 1714
1713 1715 Revision information is gathered from the working directory,
1714 1716 match can be used to filter the committed files. If editor is
1715 1717 supplied, it is called to get a commit message.
1716 1718 """
1717 1719 if extra is None:
1718 1720 extra = {}
1719 1721
1720 1722 def fail(f, msg):
1721 1723 raise error.Abort('%s: %s' % (f, msg))
1722 1724
1723 1725 if not match:
1724 1726 match = matchmod.always(self.root, '')
1725 1727
1726 1728 if not force:
1727 1729 vdirs = []
1728 1730 match.explicitdir = vdirs.append
1729 1731 match.bad = fail
1730 1732
1731 1733 wlock = lock = tr = None
1732 1734 try:
1733 1735 wlock = self.wlock()
1734 1736 lock = self.lock() # for recent changelog (see issue4368)
1735 1737
1736 1738 wctx = self[None]
1737 1739 merge = len(wctx.parents()) > 1
1738 1740
1739 1741 if not force and merge and not match.always():
1740 1742 raise error.Abort(_('cannot partially commit a merge '
1741 1743 '(do not specify files or patterns)'))
1742 1744
1743 1745 status = self.status(match=match, clean=force)
1744 1746 if force:
1745 1747 status.modified.extend(status.clean) # mq may commit clean files
1746 1748
1747 1749 # check subrepos
1748 1750 subs = []
1749 1751 commitsubs = set()
1750 1752 newstate = wctx.substate.copy()
1751 1753 # only manage subrepos and .hgsubstate if .hgsub is present
1752 1754 if '.hgsub' in wctx:
1753 1755 # we'll decide whether to track this ourselves, thanks
1754 1756 for c in status.modified, status.added, status.removed:
1755 1757 if '.hgsubstate' in c:
1756 1758 c.remove('.hgsubstate')
1757 1759
1758 1760 # compare current state to last committed state
1759 1761 # build new substate based on last committed state
1760 1762 oldstate = wctx.p1().substate
1761 1763 for s in sorted(newstate.keys()):
1762 1764 if not match(s):
1763 1765 # ignore working copy, use old state if present
1764 1766 if s in oldstate:
1765 1767 newstate[s] = oldstate[s]
1766 1768 continue
1767 1769 if not force:
1768 1770 raise error.Abort(
1769 1771 _("commit with new subrepo %s excluded") % s)
1770 1772 dirtyreason = wctx.sub(s).dirtyreason(True)
1771 1773 if dirtyreason:
1772 1774 if not self.ui.configbool('ui', 'commitsubrepos'):
1773 1775 raise error.Abort(dirtyreason,
1774 1776 hint=_("use --subrepos for recursive commit"))
1775 1777 subs.append(s)
1776 1778 commitsubs.add(s)
1777 1779 else:
1778 1780 bs = wctx.sub(s).basestate()
1779 1781 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1780 1782 if oldstate.get(s, (None, None, None))[1] != bs:
1781 1783 subs.append(s)
1782 1784
1783 1785 # check for removed subrepos
1784 1786 for p in wctx.parents():
1785 1787 r = [s for s in p.substate if s not in newstate]
1786 1788 subs += [s for s in r if match(s)]
1787 1789 if subs:
1788 1790 if (not match('.hgsub') and
1789 1791 '.hgsub' in (wctx.modified() + wctx.added())):
1790 1792 raise error.Abort(
1791 1793 _("can't commit subrepos without .hgsub"))
1792 1794 status.modified.insert(0, '.hgsubstate')
1793 1795
1794 1796 elif '.hgsub' in status.removed:
1795 1797 # clean up .hgsubstate when .hgsub is removed
1796 1798 if ('.hgsubstate' in wctx and
1797 1799 '.hgsubstate' not in (status.modified + status.added +
1798 1800 status.removed)):
1799 1801 status.removed.insert(0, '.hgsubstate')
1800 1802
1801 1803 # make sure all explicit patterns are matched
1802 1804 if not force:
1803 1805 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1804 1806
1805 1807 cctx = context.workingcommitctx(self, status,
1806 1808 text, user, date, extra)
1807 1809
1808 1810 # internal config: ui.allowemptycommit
1809 1811 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1810 1812 or extra.get('close') or merge or cctx.files()
1811 1813 or self.ui.configbool('ui', 'allowemptycommit'))
1812 1814 if not allowemptycommit:
1813 1815 return None
1814 1816
1815 1817 if merge and cctx.deleted():
1816 1818 raise error.Abort(_("cannot commit merge with missing files"))
1817 1819
1818 1820 ms = mergemod.mergestate.read(self)
1819 1821 mergeutil.checkunresolved(ms)
1820 1822
1821 1823 if editor:
1822 1824 cctx._text = editor(self, cctx, subs)
1823 1825 edited = (text != cctx._text)
1824 1826
1825 1827 # Save commit message in case this transaction gets rolled back
1826 1828 # (e.g. by a pretxncommit hook). Leave the content alone on
1827 1829 # the assumption that the user will use the same editor again.
1828 1830 msgfn = self.savecommitmessage(cctx._text)
1829 1831
1830 1832 # commit subs and write new state
1831 1833 if subs:
1832 1834 for s in sorted(commitsubs):
1833 1835 sub = wctx.sub(s)
1834 1836 self.ui.status(_('committing subrepository %s\n') %
1835 1837 subrepo.subrelpath(sub))
1836 1838 sr = sub.commit(cctx._text, user, date)
1837 1839 newstate[s] = (newstate[s][0], sr)
1838 1840 subrepo.writestate(self, newstate)
1839 1841
1840 1842 p1, p2 = self.dirstate.parents()
1841 1843 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1842 1844 try:
1843 1845 self.hook("precommit", throw=True, parent1=hookp1,
1844 1846 parent2=hookp2)
1845 1847 tr = self.transaction('commit')
1846 1848 ret = self.commitctx(cctx, True)
1847 1849 except: # re-raises
1848 1850 if edited:
1849 1851 self.ui.write(
1850 1852 _('note: commit message saved in %s\n') % msgfn)
1851 1853 raise
1852 1854 # update bookmarks, dirstate and mergestate
1853 1855 bookmarks.update(self, [p1, p2], ret)
1854 1856 cctx.markcommitted(ret)
1855 1857 ms.reset()
1856 1858 tr.close()
1857 1859
1858 1860 finally:
1859 1861 lockmod.release(tr, lock, wlock)
1860 1862
1861 1863 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1862 1864 # hack for command that use a temporary commit (eg: histedit)
1863 1865 # temporary commit got stripped before hook release
1864 1866 if self.changelog.hasnode(ret):
1865 1867 self.hook("commit", node=node, parent1=parent1,
1866 1868 parent2=parent2)
1867 1869 self._afterlock(commithook)
1868 1870 return ret
1869 1871
1870 1872 @unfilteredmethod
1871 1873 def commitctx(self, ctx, error=False):
1872 1874 """Add a new revision to current repository.
1873 1875 Revision information is passed via the context argument.
1874 1876 """
1875 1877
1876 1878 tr = None
1877 1879 p1, p2 = ctx.p1(), ctx.p2()
1878 1880 user = ctx.user()
1879 1881
1880 1882 lock = self.lock()
1881 1883 try:
1882 1884 tr = self.transaction("commit")
1883 1885 trp = weakref.proxy(tr)
1884 1886
1885 1887 if ctx.manifestnode():
1886 1888 # reuse an existing manifest revision
1887 1889 mn = ctx.manifestnode()
1888 1890 files = ctx.files()
1889 1891 elif ctx.files():
1890 1892 m1ctx = p1.manifestctx()
1891 1893 m2ctx = p2.manifestctx()
1892 1894 mctx = m1ctx.copy()
1893 1895
1894 1896 m = mctx.read()
1895 1897 m1 = m1ctx.read()
1896 1898 m2 = m2ctx.read()
1897 1899
1898 1900 # check in files
1899 1901 added = []
1900 1902 changed = []
1901 1903 removed = list(ctx.removed())
1902 1904 linkrev = len(self)
1903 1905 self.ui.note(_("committing files:\n"))
1904 1906 for f in sorted(ctx.modified() + ctx.added()):
1905 1907 self.ui.note(f + "\n")
1906 1908 try:
1907 1909 fctx = ctx[f]
1908 1910 if fctx is None:
1909 1911 removed.append(f)
1910 1912 else:
1911 1913 added.append(f)
1912 1914 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1913 1915 trp, changed)
1914 1916 m.setflag(f, fctx.flags())
1915 1917 except OSError as inst:
1916 1918 self.ui.warn(_("trouble committing %s!\n") % f)
1917 1919 raise
1918 1920 except IOError as inst:
1919 1921 errcode = getattr(inst, 'errno', errno.ENOENT)
1920 1922 if error or errcode and errcode != errno.ENOENT:
1921 1923 self.ui.warn(_("trouble committing %s!\n") % f)
1922 1924 raise
1923 1925
1924 1926 # update manifest
1925 1927 self.ui.note(_("committing manifest\n"))
1926 1928 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1927 1929 drop = [f for f in removed if f in m]
1928 1930 for f in drop:
1929 1931 del m[f]
1930 1932 mn = mctx.write(trp, linkrev,
1931 1933 p1.manifestnode(), p2.manifestnode(),
1932 1934 added, drop)
1933 1935 files = changed + removed
1934 1936 else:
1935 1937 mn = p1.manifestnode()
1936 1938 files = []
1937 1939
1938 1940 # update changelog
1939 1941 self.ui.note(_("committing changelog\n"))
1940 1942 self.changelog.delayupdate(tr)
1941 1943 n = self.changelog.add(mn, files, ctx.description(),
1942 1944 trp, p1.node(), p2.node(),
1943 1945 user, ctx.date(), ctx.extra().copy())
1944 1946 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1945 1947 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1946 1948 parent2=xp2)
1947 1949 # set the new commit is proper phase
1948 1950 targetphase = subrepo.newcommitphase(self.ui, ctx)
1949 1951 if targetphase:
1950 1952 # retract boundary do not alter parent changeset.
1951 1953 # if a parent have higher the resulting phase will
1952 1954 # be compliant anyway
1953 1955 #
1954 1956 # if minimal phase was 0 we don't need to retract anything
1955 1957 phases.registernew(self, tr, targetphase, [n])
1956 1958 tr.close()
1957 1959 return n
1958 1960 finally:
1959 1961 if tr:
1960 1962 tr.release()
1961 1963 lock.release()
1962 1964
1963 1965 @unfilteredmethod
1964 1966 def destroying(self):
1965 1967 '''Inform the repository that nodes are about to be destroyed.
1966 1968 Intended for use by strip and rollback, so there's a common
1967 1969 place for anything that has to be done before destroying history.
1968 1970
1969 1971 This is mostly useful for saving state that is in memory and waiting
1970 1972 to be flushed when the current lock is released. Because a call to
1971 1973 destroyed is imminent, the repo will be invalidated causing those
1972 1974 changes to stay in memory (waiting for the next unlock), or vanish
1973 1975 completely.
1974 1976 '''
1975 1977 # When using the same lock to commit and strip, the phasecache is left
1976 1978 # dirty after committing. Then when we strip, the repo is invalidated,
1977 1979 # causing those changes to disappear.
1978 1980 if '_phasecache' in vars(self):
1979 1981 self._phasecache.write()
1980 1982
1981 1983 @unfilteredmethod
1982 1984 def destroyed(self):
1983 1985 '''Inform the repository that nodes have been destroyed.
1984 1986 Intended for use by strip and rollback, so there's a common
1985 1987 place for anything that has to be done after destroying history.
1986 1988 '''
1987 1989 # When one tries to:
1988 1990 # 1) destroy nodes thus calling this method (e.g. strip)
1989 1991 # 2) use phasecache somewhere (e.g. commit)
1990 1992 #
1991 1993 # then 2) will fail because the phasecache contains nodes that were
1992 1994 # removed. We can either remove phasecache from the filecache,
1993 1995 # causing it to reload next time it is accessed, or simply filter
1994 1996 # the removed nodes now and write the updated cache.
1995 1997 self._phasecache.filterunknown(self)
1996 1998 self._phasecache.write()
1997 1999
1998 2000 # refresh all repository caches
1999 2001 self.updatecaches()
2000 2002
2001 2003 # Ensure the persistent tag cache is updated. Doing it now
2002 2004 # means that the tag cache only has to worry about destroyed
2003 2005 # heads immediately after a strip/rollback. That in turn
2004 2006 # guarantees that "cachetip == currenttip" (comparing both rev
2005 2007 # and node) always means no nodes have been added or destroyed.
2006 2008
2007 2009 # XXX this is suboptimal when qrefresh'ing: we strip the current
2008 2010 # head, refresh the tag cache, then immediately add a new head.
2009 2011 # But I think doing it this way is necessary for the "instant
2010 2012 # tag cache retrieval" case to work.
2011 2013 self.invalidate()
2012 2014
2013 2015 def walk(self, match, node=None):
2014 2016 '''
2015 2017 walk recursively through the directory tree or a given
2016 2018 changeset, finding all files matched by the match
2017 2019 function
2018 2020 '''
2019 2021 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2020 2022 return self[node].walk(match)
2021 2023
2022 2024 def status(self, node1='.', node2=None, match=None,
2023 2025 ignored=False, clean=False, unknown=False,
2024 2026 listsubrepos=False):
2025 2027 '''a convenience method that calls node1.status(node2)'''
2026 2028 return self[node1].status(node2, match, ignored, clean, unknown,
2027 2029 listsubrepos)
2028 2030
2029 2031 def addpostdsstatus(self, ps):
2030 2032 """Add a callback to run within the wlock, at the point at which status
2031 2033 fixups happen.
2032 2034
2033 2035 On status completion, callback(wctx, status) will be called with the
2034 2036 wlock held, unless the dirstate has changed from underneath or the wlock
2035 2037 couldn't be grabbed.
2036 2038
2037 2039 Callbacks should not capture and use a cached copy of the dirstate --
2038 2040 it might change in the meanwhile. Instead, they should access the
2039 2041 dirstate via wctx.repo().dirstate.
2040 2042
2041 2043 This list is emptied out after each status run -- extensions should
2042 2044 make sure it adds to this list each time dirstate.status is called.
2043 2045 Extensions should also make sure they don't call this for statuses
2044 2046 that don't involve the dirstate.
2045 2047 """
2046 2048
2047 2049 # The list is located here for uniqueness reasons -- it is actually
2048 2050 # managed by the workingctx, but that isn't unique per-repo.
2049 2051 self._postdsstatus.append(ps)
2050 2052
2051 2053 def postdsstatus(self):
2052 2054 """Used by workingctx to get the list of post-dirstate-status hooks."""
2053 2055 return self._postdsstatus
2054 2056
2055 2057 def clearpostdsstatus(self):
2056 2058 """Used by workingctx to clear post-dirstate-status hooks."""
2057 2059 del self._postdsstatus[:]
2058 2060
2059 2061 def heads(self, start=None):
2060 2062 if start is None:
2061 2063 cl = self.changelog
2062 2064 headrevs = reversed(cl.headrevs())
2063 2065 return [cl.node(rev) for rev in headrevs]
2064 2066
2065 2067 heads = self.changelog.heads(start)
2066 2068 # sort the output in rev descending order
2067 2069 return sorted(heads, key=self.changelog.rev, reverse=True)
2068 2070
2069 2071 def branchheads(self, branch=None, start=None, closed=False):
2070 2072 '''return a (possibly filtered) list of heads for the given branch
2071 2073
2072 2074 Heads are returned in topological order, from newest to oldest.
2073 2075 If branch is None, use the dirstate branch.
2074 2076 If start is not None, return only heads reachable from start.
2075 2077 If closed is True, return heads that are marked as closed as well.
2076 2078 '''
2077 2079 if branch is None:
2078 2080 branch = self[None].branch()
2079 2081 branches = self.branchmap()
2080 2082 if branch not in branches:
2081 2083 return []
2082 2084 # the cache returns heads ordered lowest to highest
2083 2085 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2084 2086 if start is not None:
2085 2087 # filter out the heads that cannot be reached from startrev
2086 2088 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2087 2089 bheads = [h for h in bheads if h in fbheads]
2088 2090 return bheads
2089 2091
2090 2092 def branches(self, nodes):
2091 2093 if not nodes:
2092 2094 nodes = [self.changelog.tip()]
2093 2095 b = []
2094 2096 for n in nodes:
2095 2097 t = n
2096 2098 while True:
2097 2099 p = self.changelog.parents(n)
2098 2100 if p[1] != nullid or p[0] == nullid:
2099 2101 b.append((t, n, p[0], p[1]))
2100 2102 break
2101 2103 n = p[0]
2102 2104 return b
2103 2105
2104 2106 def between(self, pairs):
2105 2107 r = []
2106 2108
2107 2109 for top, bottom in pairs:
2108 2110 n, l, i = top, [], 0
2109 2111 f = 1
2110 2112
2111 2113 while n != bottom and n != nullid:
2112 2114 p = self.changelog.parents(n)[0]
2113 2115 if i == f:
2114 2116 l.append(n)
2115 2117 f = f * 2
2116 2118 n = p
2117 2119 i += 1
2118 2120
2119 2121 r.append(l)
2120 2122
2121 2123 return r
2122 2124
2123 2125 def checkpush(self, pushop):
2124 2126 """Extensions can override this function if additional checks have
2125 2127 to be performed before pushing, or call it if they override push
2126 2128 command.
2127 2129 """
2128 2130 pass
2129 2131
2130 2132 @unfilteredpropertycache
2131 2133 def prepushoutgoinghooks(self):
2132 2134 """Return util.hooks consists of a pushop with repo, remote, outgoing
2133 2135 methods, which are called before pushing changesets.
2134 2136 """
2135 2137 return util.hooks()
2136 2138
2137 2139 def pushkey(self, namespace, key, old, new):
2138 2140 try:
2139 2141 tr = self.currenttransaction()
2140 2142 hookargs = {}
2141 2143 if tr is not None:
2142 2144 hookargs.update(tr.hookargs)
2143 2145 hookargs['namespace'] = namespace
2144 2146 hookargs['key'] = key
2145 2147 hookargs['old'] = old
2146 2148 hookargs['new'] = new
2147 2149 self.hook('prepushkey', throw=True, **hookargs)
2148 2150 except error.HookAbort as exc:
2149 2151 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2150 2152 if exc.hint:
2151 2153 self.ui.write_err(_("(%s)\n") % exc.hint)
2152 2154 return False
2153 2155 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2154 2156 ret = pushkey.push(self, namespace, key, old, new)
2155 2157 def runhook():
2156 2158 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2157 2159 ret=ret)
2158 2160 self._afterlock(runhook)
2159 2161 return ret
2160 2162
2161 2163 def listkeys(self, namespace):
2162 2164 self.hook('prelistkeys', throw=True, namespace=namespace)
2163 2165 self.ui.debug('listing keys for "%s"\n' % namespace)
2164 2166 values = pushkey.list(self, namespace)
2165 2167 self.hook('listkeys', namespace=namespace, values=values)
2166 2168 return values
2167 2169
2168 2170 def debugwireargs(self, one, two, three=None, four=None, five=None):
2169 2171 '''used to test argument passing over the wire'''
2170 2172 return "%s %s %s %s %s" % (one, two, three, four, five)
2171 2173
2172 2174 def savecommitmessage(self, text):
2173 2175 fp = self.vfs('last-message.txt', 'wb')
2174 2176 try:
2175 2177 fp.write(text)
2176 2178 finally:
2177 2179 fp.close()
2178 2180 return self.pathto(fp.name[len(self.root) + 1:])
2179 2181
2180 2182 # used to avoid circular references so destructors work
2181 2183 def aftertrans(files):
2182 2184 renamefiles = [tuple(t) for t in files]
2183 2185 def a():
2184 2186 for vfs, src, dest in renamefiles:
2185 2187 # if src and dest refer to a same file, vfs.rename is a no-op,
2186 2188 # leaving both src and dest on disk. delete dest to make sure
2187 2189 # the rename couldn't be such a no-op.
2188 2190 vfs.tryunlink(dest)
2189 2191 try:
2190 2192 vfs.rename(src, dest)
2191 2193 except OSError: # journal file does not yet exist
2192 2194 pass
2193 2195 return a
2194 2196
2195 2197 def undoname(fn):
2196 2198 base, name = os.path.split(fn)
2197 2199 assert name.startswith('journal')
2198 2200 return os.path.join(base, name.replace('journal', 'undo', 1))
2199 2201
2200 2202 def instance(ui, path, create):
2201 2203 return localrepository(ui, util.urllocalpath(path), create)
2202 2204
2203 2205 def islocal(path):
2204 2206 return True
2205 2207
2206 2208 def newreporequirements(repo):
2207 2209 """Determine the set of requirements for a new local repository.
2208 2210
2209 2211 Extensions can wrap this function to specify custom requirements for
2210 2212 new repositories.
2211 2213 """
2212 2214 ui = repo.ui
2213 2215 requirements = {'revlogv1'}
2214 2216 if ui.configbool('format', 'usestore'):
2215 2217 requirements.add('store')
2216 2218 if ui.configbool('format', 'usefncache'):
2217 2219 requirements.add('fncache')
2218 2220 if ui.configbool('format', 'dotencode'):
2219 2221 requirements.add('dotencode')
2220 2222
2221 2223 compengine = ui.config('experimental', 'format.compression')
2222 2224 if compengine not in util.compengines:
2223 2225 raise error.Abort(_('compression engine %s defined by '
2224 2226 'experimental.format.compression not available') %
2225 2227 compengine,
2226 2228 hint=_('run "hg debuginstall" to list available '
2227 2229 'compression engines'))
2228 2230
2229 2231 # zlib is the historical default and doesn't need an explicit requirement.
2230 2232 if compengine != 'zlib':
2231 2233 requirements.add('exp-compression-%s' % compengine)
2232 2234
2233 2235 if scmutil.gdinitconfig(ui):
2234 2236 requirements.add('generaldelta')
2235 2237 if ui.configbool('experimental', 'treemanifest'):
2236 2238 requirements.add('treemanifest')
2237 2239 if ui.configbool('experimental', 'manifestv2'):
2238 2240 requirements.add('manifestv2')
2239 2241
2240 2242 revlogv2 = ui.config('experimental', 'revlogv2')
2241 2243 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2242 2244 requirements.remove('revlogv1')
2243 2245 # generaldelta is implied by revlogv2.
2244 2246 requirements.discard('generaldelta')
2245 2247 requirements.add(REVLOGV2_REQUIREMENT)
2246 2248
2247 2249 return requirements
@@ -1,193 +1,194 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 from __future__ import absolute_import
11 11
12 12 import errno
13 13 import os
14 14
15 15 from .i18n import _
16 16 from . import (
17 17 byterange,
18 18 changelog,
19 19 error,
20 20 localrepo,
21 21 manifest,
22 22 namespaces,
23 23 scmutil,
24 24 store,
25 25 url,
26 26 util,
27 27 vfs as vfsmod,
28 28 )
29 29
30 30 urlerr = util.urlerr
31 31 urlreq = util.urlreq
32 32
33 33 class httprangereader(object):
34 34 def __init__(self, url, opener):
35 35 # we assume opener has HTTPRangeHandler
36 36 self.url = url
37 37 self.pos = 0
38 38 self.opener = opener
39 39 self.name = url
40 40
41 41 def __enter__(self):
42 42 return self
43 43
44 44 def __exit__(self, exc_type, exc_value, traceback):
45 45 self.close()
46 46
47 47 def seek(self, pos):
48 48 self.pos = pos
49 49 def read(self, bytes=None):
50 50 req = urlreq.request(self.url)
51 51 end = ''
52 52 if bytes:
53 53 end = self.pos + bytes - 1
54 54 if self.pos or end:
55 55 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
56 56
57 57 try:
58 58 f = self.opener.open(req)
59 59 data = f.read()
60 60 code = f.code
61 61 except urlerr.httperror as inst:
62 62 num = inst.code == 404 and errno.ENOENT or None
63 63 raise IOError(num, inst)
64 64 except urlerr.urlerror as inst:
65 65 raise IOError(None, inst.reason[1])
66 66
67 67 if code == 200:
68 68 # HTTPRangeHandler does nothing if remote does not support
69 69 # Range headers and returns the full entity. Let's slice it.
70 70 if bytes:
71 71 data = data[self.pos:self.pos + bytes]
72 72 else:
73 73 data = data[self.pos:]
74 74 elif bytes:
75 75 data = data[:bytes]
76 76 self.pos += len(data)
77 77 return data
78 78 def readlines(self):
79 79 return self.read().splitlines(True)
80 80 def __iter__(self):
81 81 return iter(self.readlines())
82 82 def close(self):
83 83 pass
84 84
85 85 def build_opener(ui, authinfo):
86 86 # urllib cannot handle URLs with embedded user or passwd
87 87 urlopener = url.opener(ui, authinfo)
88 88 urlopener.add_handler(byterange.HTTPRangeHandler())
89 89
90 90 class statichttpvfs(vfsmod.abstractvfs):
91 91 def __init__(self, base):
92 92 self.base = base
93 93
94 94 def __call__(self, path, mode='r', *args, **kw):
95 95 if mode not in ('r', 'rb'):
96 96 raise IOError('Permission denied')
97 97 f = "/".join((self.base, urlreq.quote(path)))
98 98 return httprangereader(f, urlopener)
99 99
100 100 def join(self, path):
101 101 if path:
102 102 return os.path.join(self.base, path)
103 103 else:
104 104 return self.base
105 105
106 106 return statichttpvfs
107 107
108 108 class statichttppeer(localrepo.localpeer):
109 109 def local(self):
110 110 return None
111 111 def canpush(self):
112 112 return False
113 113
114 114 class statichttprepository(localrepo.localrepository):
115 115 supported = localrepo.localrepository._basesupported
116 116
117 117 def __init__(self, ui, path):
118 118 self._url = path
119 119 self.ui = ui
120 120
121 121 self.root = path
122 122 u = util.url(path.rstrip('/') + "/.hg")
123 123 self.path, authinfo = u.authinfo()
124 124
125 125 vfsclass = build_opener(ui, authinfo)
126 126 self.vfs = vfsclass(self.path)
127 self.cachevfs = vfsclass(self.vfs.join('cache'))
127 128 self._phasedefaults = []
128 129
129 130 self.names = namespaces.namespaces()
130 131 self.filtername = None
131 132
132 133 try:
133 134 requirements = scmutil.readrequires(self.vfs, self.supported)
134 135 except IOError as inst:
135 136 if inst.errno != errno.ENOENT:
136 137 raise
137 138 requirements = set()
138 139
139 140 # check if it is a non-empty old-style repository
140 141 try:
141 142 fp = self.vfs("00changelog.i")
142 143 fp.read(1)
143 144 fp.close()
144 145 except IOError as inst:
145 146 if inst.errno != errno.ENOENT:
146 147 raise
147 148 # we do not care about empty old-style repositories here
148 149 msg = _("'%s' does not appear to be an hg repository") % path
149 150 raise error.RepoError(msg)
150 151
151 152 # setup store
152 153 self.store = store.store(requirements, self.path, vfsclass)
153 154 self.spath = self.store.path
154 155 self.svfs = self.store.opener
155 156 self.sjoin = self.store.join
156 157 self._filecache = {}
157 158 self.requirements = requirements
158 159
159 160 self.manifestlog = manifest.manifestlog(self.svfs, self)
160 161 self.changelog = changelog.changelog(self.svfs)
161 162 self._tags = None
162 163 self.nodetagscache = None
163 164 self._branchcaches = {}
164 165 self._revbranchcache = None
165 166 self.encodepats = None
166 167 self.decodepats = None
167 168 self._transref = None
168 169 # Cache of types representing filtered repos.
169 170 self._filteredrepotypes = {}
170 171
171 172 def _restrictcapabilities(self, caps):
172 173 caps = super(statichttprepository, self)._restrictcapabilities(caps)
173 174 return caps.difference(["pushkey"])
174 175
175 176 def url(self):
176 177 return self._url
177 178
178 179 def local(self):
179 180 return False
180 181
181 182 def peer(self):
182 183 return statichttppeer(self)
183 184
184 185 def lock(self, wait=True):
185 186 raise error.Abort(_('cannot lock static-http repository'))
186 187
187 188 def _writecaches(self):
188 189 pass # statichttprepository are read only
189 190
190 191 def instance(ui, path, create):
191 192 if create:
192 193 raise error.Abort(_('cannot create new static-http repository'))
193 194 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now