##// END OF EJS Templates
localrepo: add isfilecached to check filecache-ed property is already cached...
FUJIWARA Katsunori -
r33382:b107a766 default
parent child Browse files
Show More
@@ -1,2143 +1,2153 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 mergeutil,
45 45 namespaces,
46 46 obsolete,
47 47 pathutil,
48 48 peer,
49 49 phases,
50 50 pushkey,
51 51 pycompat,
52 52 repoview,
53 53 revset,
54 54 revsetlang,
55 55 scmutil,
56 56 sparse,
57 57 store,
58 58 subrepo,
59 59 tags as tagsmod,
60 60 transaction,
61 61 txnutil,
62 62 util,
63 63 vfs as vfsmod,
64 64 )
65 65
66 66 release = lockmod.release
67 67 urlerr = util.urlerr
68 68 urlreq = util.urlreq
69 69
70 70 # set of (path, vfs-location) tuples. vfs-location is:
71 71 # - 'plain for vfs relative paths
72 72 # - '' for svfs relative paths
73 73 _cachedfiles = set()
74 74
75 75 class _basefilecache(scmutil.filecache):
76 76 """All filecache usage on repo are done for logic that should be unfiltered
77 77 """
78 78 def __get__(self, repo, type=None):
79 79 if repo is None:
80 80 return self
81 81 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
82 82 def __set__(self, repo, value):
83 83 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
84 84 def __delete__(self, repo):
85 85 return super(_basefilecache, self).__delete__(repo.unfiltered())
86 86
87 87 class repofilecache(_basefilecache):
88 88 """filecache for files in .hg but outside of .hg/store"""
89 89 def __init__(self, *paths):
90 90 super(repofilecache, self).__init__(*paths)
91 91 for path in paths:
92 92 _cachedfiles.add((path, 'plain'))
93 93
94 94 def join(self, obj, fname):
95 95 return obj.vfs.join(fname)
96 96
97 97 class storecache(_basefilecache):
98 98 """filecache for files in the store"""
99 99 def __init__(self, *paths):
100 100 super(storecache, self).__init__(*paths)
101 101 for path in paths:
102 102 _cachedfiles.add((path, ''))
103 103
104 104 def join(self, obj, fname):
105 105 return obj.sjoin(fname)
106 106
107 def isfilecached(repo, name):
108 """check if a repo has already cached "name" filecache-ed property
109
110 This returns (cachedobj-or-None, iscached) tuple.
111 """
112 cacheentry = repo.unfiltered()._filecache.get(name, None)
113 if not cacheentry:
114 return None, False
115 return cacheentry.obj, True
116
107 117 class unfilteredpropertycache(util.propertycache):
108 118 """propertycache that apply to unfiltered repo only"""
109 119
110 120 def __get__(self, repo, type=None):
111 121 unfi = repo.unfiltered()
112 122 if unfi is repo:
113 123 return super(unfilteredpropertycache, self).__get__(unfi)
114 124 return getattr(unfi, self.name)
115 125
116 126 class filteredpropertycache(util.propertycache):
117 127 """propertycache that must take filtering in account"""
118 128
119 129 def cachevalue(self, obj, value):
120 130 object.__setattr__(obj, self.name, value)
121 131
122 132
123 133 def hasunfilteredcache(repo, name):
124 134 """check if a repo has an unfilteredpropertycache value for <name>"""
125 135 return name in vars(repo.unfiltered())
126 136
127 137 def unfilteredmethod(orig):
128 138 """decorate method that always need to be run on unfiltered version"""
129 139 def wrapper(repo, *args, **kwargs):
130 140 return orig(repo.unfiltered(), *args, **kwargs)
131 141 return wrapper
132 142
133 143 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
134 144 'unbundle'}
135 145 legacycaps = moderncaps.union({'changegroupsubset'})
136 146
137 147 class localpeer(peer.peerrepository):
138 148 '''peer for a local repo; reflects only the most recent API'''
139 149
140 150 def __init__(self, repo, caps=None):
141 151 if caps is None:
142 152 caps = moderncaps.copy()
143 153 peer.peerrepository.__init__(self)
144 154 self._repo = repo.filtered('served')
145 155 self.ui = repo.ui
146 156 self._caps = repo._restrictcapabilities(caps)
147 157 self.requirements = repo.requirements
148 158 self.supportedformats = repo.supportedformats
149 159
150 160 def close(self):
151 161 self._repo.close()
152 162
153 163 def _capabilities(self):
154 164 return self._caps
155 165
156 166 def local(self):
157 167 return self._repo
158 168
159 169 def canpush(self):
160 170 return True
161 171
162 172 def url(self):
163 173 return self._repo.url()
164 174
165 175 def lookup(self, key):
166 176 return self._repo.lookup(key)
167 177
168 178 def branchmap(self):
169 179 return self._repo.branchmap()
170 180
171 181 def heads(self):
172 182 return self._repo.heads()
173 183
174 184 def known(self, nodes):
175 185 return self._repo.known(nodes)
176 186
177 187 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
178 188 **kwargs):
179 189 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
180 190 common=common, bundlecaps=bundlecaps,
181 191 **kwargs)
182 192 cb = util.chunkbuffer(chunks)
183 193
184 194 if exchange.bundle2requested(bundlecaps):
185 195 # When requesting a bundle2, getbundle returns a stream to make the
186 196 # wire level function happier. We need to build a proper object
187 197 # from it in local peer.
188 198 return bundle2.getunbundler(self.ui, cb)
189 199 else:
190 200 return changegroup.getunbundler('01', cb, None)
191 201
192 202 # TODO We might want to move the next two calls into legacypeer and add
193 203 # unbundle instead.
194 204
195 205 def unbundle(self, cg, heads, url):
196 206 """apply a bundle on a repo
197 207
198 208 This function handles the repo locking itself."""
199 209 try:
200 210 try:
201 211 cg = exchange.readbundle(self.ui, cg, None)
202 212 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
203 213 if util.safehasattr(ret, 'getchunks'):
204 214 # This is a bundle20 object, turn it into an unbundler.
205 215 # This little dance should be dropped eventually when the
206 216 # API is finally improved.
207 217 stream = util.chunkbuffer(ret.getchunks())
208 218 ret = bundle2.getunbundler(self.ui, stream)
209 219 return ret
210 220 except Exception as exc:
211 221 # If the exception contains output salvaged from a bundle2
212 222 # reply, we need to make sure it is printed before continuing
213 223 # to fail. So we build a bundle2 with such output and consume
214 224 # it directly.
215 225 #
216 226 # This is not very elegant but allows a "simple" solution for
217 227 # issue4594
218 228 output = getattr(exc, '_bundle2salvagedoutput', ())
219 229 if output:
220 230 bundler = bundle2.bundle20(self._repo.ui)
221 231 for out in output:
222 232 bundler.addpart(out)
223 233 stream = util.chunkbuffer(bundler.getchunks())
224 234 b = bundle2.getunbundler(self.ui, stream)
225 235 bundle2.processbundle(self._repo, b)
226 236 raise
227 237 except error.PushRaced as exc:
228 238 raise error.ResponseError(_('push failed:'), str(exc))
229 239
230 240 def lock(self):
231 241 return self._repo.lock()
232 242
233 243 def pushkey(self, namespace, key, old, new):
234 244 return self._repo.pushkey(namespace, key, old, new)
235 245
236 246 def listkeys(self, namespace):
237 247 return self._repo.listkeys(namespace)
238 248
239 249 def debugwireargs(self, one, two, three=None, four=None, five=None):
240 250 '''used to test argument passing over the wire'''
241 251 return "%s %s %s %s %s" % (one, two, three, four, five)
242 252
243 253 class locallegacypeer(localpeer):
244 254 '''peer extension which implements legacy methods too; used for tests with
245 255 restricted capabilities'''
246 256
247 257 def __init__(self, repo):
248 258 localpeer.__init__(self, repo, caps=legacycaps)
249 259
250 260 def branches(self, nodes):
251 261 return self._repo.branches(nodes)
252 262
253 263 def between(self, pairs):
254 264 return self._repo.between(pairs)
255 265
256 266 def changegroup(self, basenodes, source):
257 267 return changegroup.changegroup(self._repo, basenodes, source)
258 268
259 269 def changegroupsubset(self, bases, heads, source):
260 270 return changegroup.changegroupsubset(self._repo, bases, heads, source)
261 271
262 272 # Increment the sub-version when the revlog v2 format changes to lock out old
263 273 # clients.
264 274 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
265 275
266 276 class localrepository(object):
267 277
268 278 supportedformats = {
269 279 'revlogv1',
270 280 'generaldelta',
271 281 'treemanifest',
272 282 'manifestv2',
273 283 REVLOGV2_REQUIREMENT,
274 284 }
275 285 _basesupported = supportedformats | {
276 286 'store',
277 287 'fncache',
278 288 'shared',
279 289 'relshared',
280 290 'dotencode',
281 291 }
282 292 openerreqs = {
283 293 'revlogv1',
284 294 'generaldelta',
285 295 'treemanifest',
286 296 'manifestv2',
287 297 }
288 298
289 299 # a list of (ui, featureset) functions.
290 300 # only functions defined in module of enabled extensions are invoked
291 301 featuresetupfuncs = set()
292 302
293 303 def __init__(self, baseui, path, create=False):
294 304 self.requirements = set()
295 305 self.filtername = None
296 306 # wvfs: rooted at the repository root, used to access the working copy
297 307 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
298 308 # vfs: rooted at .hg, used to access repo files outside of .hg/store
299 309 self.vfs = None
300 310 # svfs: usually rooted at .hg/store, used to access repository history
301 311 # If this is a shared repository, this vfs may point to another
302 312 # repository's .hg/store directory.
303 313 self.svfs = None
304 314 self.root = self.wvfs.base
305 315 self.path = self.wvfs.join(".hg")
306 316 self.origroot = path
307 317 # These auditor are not used by the vfs,
308 318 # only used when writing this comment: basectx.match
309 319 self.auditor = pathutil.pathauditor(self.root, self._checknested)
310 320 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
311 321 realfs=False)
312 322 self.vfs = vfsmod.vfs(self.path)
313 323 self.baseui = baseui
314 324 self.ui = baseui.copy()
315 325 self.ui.copy = baseui.copy # prevent copying repo configuration
316 326 # A list of callback to shape the phase if no data were found.
317 327 # Callback are in the form: func(repo, roots) --> processed root.
318 328 # This list it to be filled by extension during repo setup
319 329 self._phasedefaults = []
320 330 try:
321 331 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
322 332 self._loadextensions()
323 333 except IOError:
324 334 pass
325 335
326 336 if self.featuresetupfuncs:
327 337 self.supported = set(self._basesupported) # use private copy
328 338 extmods = set(m.__name__ for n, m
329 339 in extensions.extensions(self.ui))
330 340 for setupfunc in self.featuresetupfuncs:
331 341 if setupfunc.__module__ in extmods:
332 342 setupfunc(self.ui, self.supported)
333 343 else:
334 344 self.supported = self._basesupported
335 345 color.setup(self.ui)
336 346
337 347 # Add compression engines.
338 348 for name in util.compengines:
339 349 engine = util.compengines[name]
340 350 if engine.revlogheader():
341 351 self.supported.add('exp-compression-%s' % name)
342 352
343 353 if not self.vfs.isdir():
344 354 if create:
345 355 self.requirements = newreporequirements(self)
346 356
347 357 if not self.wvfs.exists():
348 358 self.wvfs.makedirs()
349 359 self.vfs.makedir(notindexed=True)
350 360
351 361 if 'store' in self.requirements:
352 362 self.vfs.mkdir("store")
353 363
354 364 # create an invalid changelog
355 365 self.vfs.append(
356 366 "00changelog.i",
357 367 '\0\0\0\2' # represents revlogv2
358 368 ' dummy changelog to prevent using the old repo layout'
359 369 )
360 370 else:
361 371 raise error.RepoError(_("repository %s not found") % path)
362 372 elif create:
363 373 raise error.RepoError(_("repository %s already exists") % path)
364 374 else:
365 375 try:
366 376 self.requirements = scmutil.readrequires(
367 377 self.vfs, self.supported)
368 378 except IOError as inst:
369 379 if inst.errno != errno.ENOENT:
370 380 raise
371 381
372 382 self.sharedpath = self.path
373 383 try:
374 384 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
375 385 if 'relshared' in self.requirements:
376 386 sharedpath = self.vfs.join(sharedpath)
377 387 vfs = vfsmod.vfs(sharedpath, realpath=True)
378 388 s = vfs.base
379 389 if not vfs.exists():
380 390 raise error.RepoError(
381 391 _('.hg/sharedpath points to nonexistent directory %s') % s)
382 392 self.sharedpath = s
383 393 except IOError as inst:
384 394 if inst.errno != errno.ENOENT:
385 395 raise
386 396
387 397 self.store = store.store(
388 398 self.requirements, self.sharedpath, vfsmod.vfs)
389 399 self.spath = self.store.path
390 400 self.svfs = self.store.vfs
391 401 self.sjoin = self.store.join
392 402 self.vfs.createmode = self.store.createmode
393 403 self._applyopenerreqs()
394 404 if create:
395 405 self._writerequirements()
396 406
397 407 self._dirstatevalidatewarned = False
398 408
399 409 self._branchcaches = {}
400 410 self._revbranchcache = None
401 411 self.filterpats = {}
402 412 self._datafilters = {}
403 413 self._transref = self._lockref = self._wlockref = None
404 414
405 415 # A cache for various files under .hg/ that tracks file changes,
406 416 # (used by the filecache decorator)
407 417 #
408 418 # Maps a property name to its util.filecacheentry
409 419 self._filecache = {}
410 420
411 421 # hold sets of revision to be filtered
412 422 # should be cleared when something might have changed the filter value:
413 423 # - new changesets,
414 424 # - phase change,
415 425 # - new obsolescence marker,
416 426 # - working directory parent change,
417 427 # - bookmark changes
418 428 self.filteredrevcache = {}
419 429
420 430 # post-dirstate-status hooks
421 431 self._postdsstatus = []
422 432
423 433 # generic mapping between names and nodes
424 434 self.names = namespaces.namespaces()
425 435
426 436 # Key to signature value.
427 437 self._sparsesignaturecache = {}
428 438 # Signature to cached matcher instance.
429 439 self._sparsematchercache = {}
430 440
431 441 def close(self):
432 442 self._writecaches()
433 443
434 444 def _loadextensions(self):
435 445 extensions.loadall(self.ui)
436 446
437 447 def _writecaches(self):
438 448 if self._revbranchcache:
439 449 self._revbranchcache.write()
440 450
441 451 def _restrictcapabilities(self, caps):
442 452 if self.ui.configbool('experimental', 'bundle2-advertise', True):
443 453 caps = set(caps)
444 454 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
445 455 caps.add('bundle2=' + urlreq.quote(capsblob))
446 456 return caps
447 457
448 458 def _applyopenerreqs(self):
449 459 self.svfs.options = dict((r, 1) for r in self.requirements
450 460 if r in self.openerreqs)
451 461 # experimental config: format.chunkcachesize
452 462 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
453 463 if chunkcachesize is not None:
454 464 self.svfs.options['chunkcachesize'] = chunkcachesize
455 465 # experimental config: format.maxchainlen
456 466 maxchainlen = self.ui.configint('format', 'maxchainlen')
457 467 if maxchainlen is not None:
458 468 self.svfs.options['maxchainlen'] = maxchainlen
459 469 # experimental config: format.manifestcachesize
460 470 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
461 471 if manifestcachesize is not None:
462 472 self.svfs.options['manifestcachesize'] = manifestcachesize
463 473 # experimental config: format.aggressivemergedeltas
464 474 aggressivemergedeltas = self.ui.configbool('format',
465 475 'aggressivemergedeltas')
466 476 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
467 477 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
468 478 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
469 479 if 0 <= chainspan:
470 480 self.svfs.options['maxdeltachainspan'] = chainspan
471 481
472 482 for r in self.requirements:
473 483 if r.startswith('exp-compression-'):
474 484 self.svfs.options['compengine'] = r[len('exp-compression-'):]
475 485
476 486 # TODO move "revlogv2" to openerreqs once finalized.
477 487 if REVLOGV2_REQUIREMENT in self.requirements:
478 488 self.svfs.options['revlogv2'] = True
479 489
480 490 def _writerequirements(self):
481 491 scmutil.writerequires(self.vfs, self.requirements)
482 492
483 493 def _checknested(self, path):
484 494 """Determine if path is a legal nested repository."""
485 495 if not path.startswith(self.root):
486 496 return False
487 497 subpath = path[len(self.root) + 1:]
488 498 normsubpath = util.pconvert(subpath)
489 499
490 500 # XXX: Checking against the current working copy is wrong in
491 501 # the sense that it can reject things like
492 502 #
493 503 # $ hg cat -r 10 sub/x.txt
494 504 #
495 505 # if sub/ is no longer a subrepository in the working copy
496 506 # parent revision.
497 507 #
498 508 # However, it can of course also allow things that would have
499 509 # been rejected before, such as the above cat command if sub/
500 510 # is a subrepository now, but was a normal directory before.
501 511 # The old path auditor would have rejected by mistake since it
502 512 # panics when it sees sub/.hg/.
503 513 #
504 514 # All in all, checking against the working copy seems sensible
505 515 # since we want to prevent access to nested repositories on
506 516 # the filesystem *now*.
507 517 ctx = self[None]
508 518 parts = util.splitpath(subpath)
509 519 while parts:
510 520 prefix = '/'.join(parts)
511 521 if prefix in ctx.substate:
512 522 if prefix == normsubpath:
513 523 return True
514 524 else:
515 525 sub = ctx.sub(prefix)
516 526 return sub.checknested(subpath[len(prefix) + 1:])
517 527 else:
518 528 parts.pop()
519 529 return False
520 530
521 531 def peer(self):
522 532 return localpeer(self) # not cached to avoid reference cycle
523 533
524 534 def unfiltered(self):
525 535 """Return unfiltered version of the repository
526 536
527 537 Intended to be overwritten by filtered repo."""
528 538 return self
529 539
530 540 def filtered(self, name):
531 541 """Return a filtered version of a repository"""
532 542 # build a new class with the mixin and the current class
533 543 # (possibly subclass of the repo)
534 544 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
535 545 pass
536 546 return filteredrepo(self, name)
537 547
538 548 @repofilecache('bookmarks', 'bookmarks.current')
539 549 def _bookmarks(self):
540 550 return bookmarks.bmstore(self)
541 551
542 552 @property
543 553 def _activebookmark(self):
544 554 return self._bookmarks.active
545 555
546 556 # _phaserevs and _phasesets depend on changelog. what we need is to
547 557 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
548 558 # can't be easily expressed in filecache mechanism.
549 559 @storecache('phaseroots', '00changelog.i')
550 560 def _phasecache(self):
551 561 return phases.phasecache(self, self._phasedefaults)
552 562
553 563 @storecache('obsstore')
554 564 def obsstore(self):
555 565 return obsolete.makestore(self.ui, self)
556 566
557 567 @storecache('00changelog.i')
558 568 def changelog(self):
559 569 return changelog.changelog(self.svfs,
560 570 trypending=txnutil.mayhavepending(self.root))
561 571
562 572 def _constructmanifest(self):
563 573 # This is a temporary function while we migrate from manifest to
564 574 # manifestlog. It allows bundlerepo and unionrepo to intercept the
565 575 # manifest creation.
566 576 return manifest.manifestrevlog(self.svfs)
567 577
568 578 @storecache('00manifest.i')
569 579 def manifestlog(self):
570 580 return manifest.manifestlog(self.svfs, self)
571 581
572 582 @repofilecache('dirstate')
573 583 def dirstate(self):
574 584 sparsematchfn = lambda: sparse.matcher(self)
575 585
576 586 return dirstate.dirstate(self.vfs, self.ui, self.root,
577 587 self._dirstatevalidate, sparsematchfn)
578 588
579 589 def _dirstatevalidate(self, node):
580 590 try:
581 591 self.changelog.rev(node)
582 592 return node
583 593 except error.LookupError:
584 594 if not self._dirstatevalidatewarned:
585 595 self._dirstatevalidatewarned = True
586 596 self.ui.warn(_("warning: ignoring unknown"
587 597 " working parent %s!\n") % short(node))
588 598 return nullid
589 599
590 600 def __getitem__(self, changeid):
591 601 if changeid is None:
592 602 return context.workingctx(self)
593 603 if isinstance(changeid, slice):
594 604 # wdirrev isn't contiguous so the slice shouldn't include it
595 605 return [context.changectx(self, i)
596 606 for i in xrange(*changeid.indices(len(self)))
597 607 if i not in self.changelog.filteredrevs]
598 608 try:
599 609 return context.changectx(self, changeid)
600 610 except error.WdirUnsupported:
601 611 return context.workingctx(self)
602 612
603 613 def __contains__(self, changeid):
604 614 """True if the given changeid exists
605 615
606 616 error.LookupError is raised if an ambiguous node specified.
607 617 """
608 618 try:
609 619 self[changeid]
610 620 return True
611 621 except error.RepoLookupError:
612 622 return False
613 623
614 624 def __nonzero__(self):
615 625 return True
616 626
617 627 __bool__ = __nonzero__
618 628
619 629 def __len__(self):
620 630 return len(self.changelog)
621 631
622 632 def __iter__(self):
623 633 return iter(self.changelog)
624 634
625 635 def revs(self, expr, *args):
626 636 '''Find revisions matching a revset.
627 637
628 638 The revset is specified as a string ``expr`` that may contain
629 639 %-formatting to escape certain types. See ``revsetlang.formatspec``.
630 640
631 641 Revset aliases from the configuration are not expanded. To expand
632 642 user aliases, consider calling ``scmutil.revrange()`` or
633 643 ``repo.anyrevs([expr], user=True)``.
634 644
635 645 Returns a revset.abstractsmartset, which is a list-like interface
636 646 that contains integer revisions.
637 647 '''
638 648 expr = revsetlang.formatspec(expr, *args)
639 649 m = revset.match(None, expr)
640 650 return m(self)
641 651
642 652 def set(self, expr, *args):
643 653 '''Find revisions matching a revset and emit changectx instances.
644 654
645 655 This is a convenience wrapper around ``revs()`` that iterates the
646 656 result and is a generator of changectx instances.
647 657
648 658 Revset aliases from the configuration are not expanded. To expand
649 659 user aliases, consider calling ``scmutil.revrange()``.
650 660 '''
651 661 for r in self.revs(expr, *args):
652 662 yield self[r]
653 663
654 664 def anyrevs(self, specs, user=False, localalias=None):
655 665 '''Find revisions matching one of the given revsets.
656 666
657 667 Revset aliases from the configuration are not expanded by default. To
658 668 expand user aliases, specify ``user=True``. To provide some local
659 669 definitions overriding user aliases, set ``localalias`` to
660 670 ``{name: definitionstring}``.
661 671 '''
662 672 if user:
663 673 m = revset.matchany(self.ui, specs, repo=self,
664 674 localalias=localalias)
665 675 else:
666 676 m = revset.matchany(None, specs, localalias=localalias)
667 677 return m(self)
668 678
669 679 def url(self):
670 680 return 'file:' + self.root
671 681
672 682 def hook(self, name, throw=False, **args):
673 683 """Call a hook, passing this repo instance.
674 684
675 685 This a convenience method to aid invoking hooks. Extensions likely
676 686 won't call this unless they have registered a custom hook or are
677 687 replacing code that is expected to call a hook.
678 688 """
679 689 return hook.hook(self.ui, self, name, throw, **args)
680 690
681 691 @filteredpropertycache
682 692 def _tagscache(self):
683 693 '''Returns a tagscache object that contains various tags related
684 694 caches.'''
685 695
686 696 # This simplifies its cache management by having one decorated
687 697 # function (this one) and the rest simply fetch things from it.
688 698 class tagscache(object):
689 699 def __init__(self):
690 700 # These two define the set of tags for this repository. tags
691 701 # maps tag name to node; tagtypes maps tag name to 'global' or
692 702 # 'local'. (Global tags are defined by .hgtags across all
693 703 # heads, and local tags are defined in .hg/localtags.)
694 704 # They constitute the in-memory cache of tags.
695 705 self.tags = self.tagtypes = None
696 706
697 707 self.nodetagscache = self.tagslist = None
698 708
699 709 cache = tagscache()
700 710 cache.tags, cache.tagtypes = self._findtags()
701 711
702 712 return cache
703 713
704 714 def tags(self):
705 715 '''return a mapping of tag to node'''
706 716 t = {}
707 717 if self.changelog.filteredrevs:
708 718 tags, tt = self._findtags()
709 719 else:
710 720 tags = self._tagscache.tags
711 721 for k, v in tags.iteritems():
712 722 try:
713 723 # ignore tags to unknown nodes
714 724 self.changelog.rev(v)
715 725 t[k] = v
716 726 except (error.LookupError, ValueError):
717 727 pass
718 728 return t
719 729
720 730 def _findtags(self):
721 731 '''Do the hard work of finding tags. Return a pair of dicts
722 732 (tags, tagtypes) where tags maps tag name to node, and tagtypes
723 733 maps tag name to a string like \'global\' or \'local\'.
724 734 Subclasses or extensions are free to add their own tags, but
725 735 should be aware that the returned dicts will be retained for the
726 736 duration of the localrepo object.'''
727 737
728 738 # XXX what tagtype should subclasses/extensions use? Currently
729 739 # mq and bookmarks add tags, but do not set the tagtype at all.
730 740 # Should each extension invent its own tag type? Should there
731 741 # be one tagtype for all such "virtual" tags? Or is the status
732 742 # quo fine?
733 743
734 744
735 745 # map tag name to (node, hist)
736 746 alltags = tagsmod.findglobaltags(self.ui, self)
737 747 # map tag name to tag type
738 748 tagtypes = dict((tag, 'global') for tag in alltags)
739 749
740 750 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
741 751
742 752 # Build the return dicts. Have to re-encode tag names because
743 753 # the tags module always uses UTF-8 (in order not to lose info
744 754 # writing to the cache), but the rest of Mercurial wants them in
745 755 # local encoding.
746 756 tags = {}
747 757 for (name, (node, hist)) in alltags.iteritems():
748 758 if node != nullid:
749 759 tags[encoding.tolocal(name)] = node
750 760 tags['tip'] = self.changelog.tip()
751 761 tagtypes = dict([(encoding.tolocal(name), value)
752 762 for (name, value) in tagtypes.iteritems()])
753 763 return (tags, tagtypes)
754 764
755 765 def tagtype(self, tagname):
756 766 '''
757 767 return the type of the given tag. result can be:
758 768
759 769 'local' : a local tag
760 770 'global' : a global tag
761 771 None : tag does not exist
762 772 '''
763 773
764 774 return self._tagscache.tagtypes.get(tagname)
765 775
766 776 def tagslist(self):
767 777 '''return a list of tags ordered by revision'''
768 778 if not self._tagscache.tagslist:
769 779 l = []
770 780 for t, n in self.tags().iteritems():
771 781 l.append((self.changelog.rev(n), t, n))
772 782 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
773 783
774 784 return self._tagscache.tagslist
775 785
776 786 def nodetags(self, node):
777 787 '''return the tags associated with a node'''
778 788 if not self._tagscache.nodetagscache:
779 789 nodetagscache = {}
780 790 for t, n in self._tagscache.tags.iteritems():
781 791 nodetagscache.setdefault(n, []).append(t)
782 792 for tags in nodetagscache.itervalues():
783 793 tags.sort()
784 794 self._tagscache.nodetagscache = nodetagscache
785 795 return self._tagscache.nodetagscache.get(node, [])
786 796
787 797 def nodebookmarks(self, node):
788 798 """return the list of bookmarks pointing to the specified node"""
789 799 marks = []
790 800 for bookmark, n in self._bookmarks.iteritems():
791 801 if n == node:
792 802 marks.append(bookmark)
793 803 return sorted(marks)
794 804
795 805 def branchmap(self):
796 806 '''returns a dictionary {branch: [branchheads]} with branchheads
797 807 ordered by increasing revision number'''
798 808 branchmap.updatecache(self)
799 809 return self._branchcaches[self.filtername]
800 810
801 811 @unfilteredmethod
802 812 def revbranchcache(self):
803 813 if not self._revbranchcache:
804 814 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
805 815 return self._revbranchcache
806 816
807 817 def branchtip(self, branch, ignoremissing=False):
808 818 '''return the tip node for a given branch
809 819
810 820 If ignoremissing is True, then this method will not raise an error.
811 821 This is helpful for callers that only expect None for a missing branch
812 822 (e.g. namespace).
813 823
814 824 '''
815 825 try:
816 826 return self.branchmap().branchtip(branch)
817 827 except KeyError:
818 828 if not ignoremissing:
819 829 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
820 830 else:
821 831 pass
822 832
823 833 def lookup(self, key):
824 834 return self[key].node()
825 835
826 836 def lookupbranch(self, key, remote=None):
827 837 repo = remote or self
828 838 if key in repo.branchmap():
829 839 return key
830 840
831 841 repo = (remote and remote.local()) and remote or self
832 842 return repo[key].branch()
833 843
834 844 def known(self, nodes):
835 845 cl = self.changelog
836 846 nm = cl.nodemap
837 847 filtered = cl.filteredrevs
838 848 result = []
839 849 for n in nodes:
840 850 r = nm.get(n)
841 851 resp = not (r is None or r in filtered)
842 852 result.append(resp)
843 853 return result
844 854
845 855 def local(self):
846 856 return self
847 857
848 858 def publishing(self):
849 859 # it's safe (and desirable) to trust the publish flag unconditionally
850 860 # so that we don't finalize changes shared between users via ssh or nfs
851 861 return self.ui.configbool('phases', 'publish', True, untrusted=True)
852 862
853 863 def cancopy(self):
854 864 # so statichttprepo's override of local() works
855 865 if not self.local():
856 866 return False
857 867 if not self.publishing():
858 868 return True
859 869 # if publishing we can't copy if there is filtered content
860 870 return not self.filtered('visible').changelog.filteredrevs
861 871
862 872 def shared(self):
863 873 '''the type of shared repository (None if not shared)'''
864 874 if self.sharedpath != self.path:
865 875 return 'store'
866 876 return None
867 877
868 878 def wjoin(self, f, *insidef):
869 879 return self.vfs.reljoin(self.root, f, *insidef)
870 880
871 881 def file(self, f):
872 882 if f[0] == '/':
873 883 f = f[1:]
874 884 return filelog.filelog(self.svfs, f)
875 885
876 886 def changectx(self, changeid):
877 887 return self[changeid]
878 888
879 889 def setparents(self, p1, p2=nullid):
880 890 with self.dirstate.parentchange():
881 891 copies = self.dirstate.setparents(p1, p2)
882 892 pctx = self[p1]
883 893 if copies:
884 894 # Adjust copy records, the dirstate cannot do it, it
885 895 # requires access to parents manifests. Preserve them
886 896 # only for entries added to first parent.
887 897 for f in copies:
888 898 if f not in pctx and copies[f] in pctx:
889 899 self.dirstate.copy(copies[f], f)
890 900 if p2 == nullid:
891 901 for f, s in sorted(self.dirstate.copies().items()):
892 902 if f not in pctx and s not in pctx:
893 903 self.dirstate.copy(None, f)
894 904
895 905 def filectx(self, path, changeid=None, fileid=None):
896 906 """changeid can be a changeset revision, node, or tag.
897 907 fileid can be a file revision or node."""
898 908 return context.filectx(self, path, changeid, fileid)
899 909
900 910 def getcwd(self):
901 911 return self.dirstate.getcwd()
902 912
903 913 def pathto(self, f, cwd=None):
904 914 return self.dirstate.pathto(f, cwd)
905 915
906 916 def _loadfilter(self, filter):
907 917 if filter not in self.filterpats:
908 918 l = []
909 919 for pat, cmd in self.ui.configitems(filter):
910 920 if cmd == '!':
911 921 continue
912 922 mf = matchmod.match(self.root, '', [pat])
913 923 fn = None
914 924 params = cmd
915 925 for name, filterfn in self._datafilters.iteritems():
916 926 if cmd.startswith(name):
917 927 fn = filterfn
918 928 params = cmd[len(name):].lstrip()
919 929 break
920 930 if not fn:
921 931 fn = lambda s, c, **kwargs: util.filter(s, c)
922 932 # Wrap old filters not supporting keyword arguments
923 933 if not inspect.getargspec(fn)[2]:
924 934 oldfn = fn
925 935 fn = lambda s, c, **kwargs: oldfn(s, c)
926 936 l.append((mf, fn, params))
927 937 self.filterpats[filter] = l
928 938 return self.filterpats[filter]
929 939
930 940 def _filter(self, filterpats, filename, data):
931 941 for mf, fn, cmd in filterpats:
932 942 if mf(filename):
933 943 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
934 944 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
935 945 break
936 946
937 947 return data
938 948
939 949 @unfilteredpropertycache
940 950 def _encodefilterpats(self):
941 951 return self._loadfilter('encode')
942 952
943 953 @unfilteredpropertycache
944 954 def _decodefilterpats(self):
945 955 return self._loadfilter('decode')
946 956
947 957 def adddatafilter(self, name, filter):
948 958 self._datafilters[name] = filter
949 959
950 960 def wread(self, filename):
951 961 if self.wvfs.islink(filename):
952 962 data = self.wvfs.readlink(filename)
953 963 else:
954 964 data = self.wvfs.read(filename)
955 965 return self._filter(self._encodefilterpats, filename, data)
956 966
957 967 def wwrite(self, filename, data, flags, backgroundclose=False):
958 968 """write ``data`` into ``filename`` in the working directory
959 969
960 970 This returns length of written (maybe decoded) data.
961 971 """
962 972 data = self._filter(self._decodefilterpats, filename, data)
963 973 if 'l' in flags:
964 974 self.wvfs.symlink(data, filename)
965 975 else:
966 976 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
967 977 if 'x' in flags:
968 978 self.wvfs.setflags(filename, False, True)
969 979 return len(data)
970 980
971 981 def wwritedata(self, filename, data):
972 982 return self._filter(self._decodefilterpats, filename, data)
973 983
974 984 def currenttransaction(self):
975 985 """return the current transaction or None if non exists"""
976 986 if self._transref:
977 987 tr = self._transref()
978 988 else:
979 989 tr = None
980 990
981 991 if tr and tr.running():
982 992 return tr
983 993 return None
984 994
985 995 def transaction(self, desc, report=None):
986 996 if (self.ui.configbool('devel', 'all-warnings')
987 997 or self.ui.configbool('devel', 'check-locks')):
988 998 if self._currentlock(self._lockref) is None:
989 999 raise error.ProgrammingError('transaction requires locking')
990 1000 tr = self.currenttransaction()
991 1001 if tr is not None:
992 1002 return tr.nest()
993 1003
994 1004 # abort here if the journal already exists
995 1005 if self.svfs.exists("journal"):
996 1006 raise error.RepoError(
997 1007 _("abandoned transaction found"),
998 1008 hint=_("run 'hg recover' to clean up transaction"))
999 1009
1000 1010 idbase = "%.40f#%f" % (random.random(), time.time())
1001 1011 ha = hex(hashlib.sha1(idbase).digest())
1002 1012 txnid = 'TXN:' + ha
1003 1013 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1004 1014
1005 1015 self._writejournal(desc)
1006 1016 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1007 1017 if report:
1008 1018 rp = report
1009 1019 else:
1010 1020 rp = self.ui.warn
1011 1021 vfsmap = {'plain': self.vfs} # root of .hg/
1012 1022 # we must avoid cyclic reference between repo and transaction.
1013 1023 reporef = weakref.ref(self)
1014 1024 # Code to track tag movement
1015 1025 #
1016 1026 # Since tags are all handled as file content, it is actually quite hard
1017 1027 # to track these movement from a code perspective. So we fallback to a
1018 1028 # tracking at the repository level. One could envision to track changes
1019 1029 # to the '.hgtags' file through changegroup apply but that fails to
1020 1030 # cope with case where transaction expose new heads without changegroup
1021 1031 # being involved (eg: phase movement).
1022 1032 #
1023 1033 # For now, We gate the feature behind a flag since this likely comes
1024 1034 # with performance impacts. The current code run more often than needed
1025 1035 # and do not use caches as much as it could. The current focus is on
1026 1036 # the behavior of the feature so we disable it by default. The flag
1027 1037 # will be removed when we are happy with the performance impact.
1028 1038 #
1029 1039 # Once this feature is no longer experimental move the following
1030 1040 # documentation to the appropriate help section:
1031 1041 #
1032 1042 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1033 1043 # tags (new or changed or deleted tags). In addition the details of
1034 1044 # these changes are made available in a file at:
1035 1045 # ``REPOROOT/.hg/changes/tags.changes``.
1036 1046 # Make sure you check for HG_TAG_MOVED before reading that file as it
1037 1047 # might exist from a previous transaction even if no tag were touched
1038 1048 # in this one. Changes are recorded in a line base format::
1039 1049 #
1040 1050 # <action> <hex-node> <tag-name>\n
1041 1051 #
1042 1052 # Actions are defined as follow:
1043 1053 # "-R": tag is removed,
1044 1054 # "+A": tag is added,
1045 1055 # "-M": tag is moved (old value),
1046 1056 # "+M": tag is moved (new value),
1047 1057 tracktags = lambda x: None
1048 1058 # experimental config: experimental.hook-track-tags
1049 1059 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1050 1060 False)
1051 1061 if desc != 'strip' and shouldtracktags:
1052 1062 oldheads = self.changelog.headrevs()
1053 1063 def tracktags(tr2):
1054 1064 repo = reporef()
1055 1065 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1056 1066 newheads = repo.changelog.headrevs()
1057 1067 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1058 1068 # notes: we compare lists here.
1059 1069 # As we do it only once buiding set would not be cheaper
1060 1070 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1061 1071 if changes:
1062 1072 tr2.hookargs['tag_moved'] = '1'
1063 1073 with repo.vfs('changes/tags.changes', 'w',
1064 1074 atomictemp=True) as changesfile:
1065 1075 # note: we do not register the file to the transaction
1066 1076 # because we needs it to still exist on the transaction
1067 1077 # is close (for txnclose hooks)
1068 1078 tagsmod.writediff(changesfile, changes)
1069 1079 def validate(tr2):
1070 1080 """will run pre-closing hooks"""
1071 1081 # XXX the transaction API is a bit lacking here so we take a hacky
1072 1082 # path for now
1073 1083 #
1074 1084 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1075 1085 # dict is copied before these run. In addition we needs the data
1076 1086 # available to in memory hooks too.
1077 1087 #
1078 1088 # Moreover, we also need to make sure this runs before txnclose
1079 1089 # hooks and there is no "pending" mechanism that would execute
1080 1090 # logic only if hooks are about to run.
1081 1091 #
1082 1092 # Fixing this limitation of the transaction is also needed to track
1083 1093 # other families of changes (bookmarks, phases, obsolescence).
1084 1094 #
1085 1095 # This will have to be fixed before we remove the experimental
1086 1096 # gating.
1087 1097 tracktags(tr2)
1088 1098 reporef().hook('pretxnclose', throw=True,
1089 1099 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1090 1100 def releasefn(tr, success):
1091 1101 repo = reporef()
1092 1102 if success:
1093 1103 # this should be explicitly invoked here, because
1094 1104 # in-memory changes aren't written out at closing
1095 1105 # transaction, if tr.addfilegenerator (via
1096 1106 # dirstate.write or so) isn't invoked while
1097 1107 # transaction running
1098 1108 repo.dirstate.write(None)
1099 1109 else:
1100 1110 # discard all changes (including ones already written
1101 1111 # out) in this transaction
1102 1112 repo.dirstate.restorebackup(None, prefix='journal.')
1103 1113
1104 1114 repo.invalidate(clearfilecache=True)
1105 1115
1106 1116 tr = transaction.transaction(rp, self.svfs, vfsmap,
1107 1117 "journal",
1108 1118 "undo",
1109 1119 aftertrans(renames),
1110 1120 self.store.createmode,
1111 1121 validator=validate,
1112 1122 releasefn=releasefn,
1113 1123 checkambigfiles=_cachedfiles)
1114 1124 tr.changes['revs'] = set()
1115 1125 tr.changes['obsmarkers'] = set()
1116 1126
1117 1127 tr.hookargs['txnid'] = txnid
1118 1128 # note: writing the fncache only during finalize mean that the file is
1119 1129 # outdated when running hooks. As fncache is used for streaming clone,
1120 1130 # this is not expected to break anything that happen during the hooks.
1121 1131 tr.addfinalize('flush-fncache', self.store.write)
1122 1132 def txnclosehook(tr2):
1123 1133 """To be run if transaction is successful, will schedule a hook run
1124 1134 """
1125 1135 # Don't reference tr2 in hook() so we don't hold a reference.
1126 1136 # This reduces memory consumption when there are multiple
1127 1137 # transactions per lock. This can likely go away if issue5045
1128 1138 # fixes the function accumulation.
1129 1139 hookargs = tr2.hookargs
1130 1140
1131 1141 def hook():
1132 1142 reporef().hook('txnclose', throw=False, txnname=desc,
1133 1143 **pycompat.strkwargs(hookargs))
1134 1144 reporef()._afterlock(hook)
1135 1145 tr.addfinalize('txnclose-hook', txnclosehook)
1136 1146 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1137 1147 def txnaborthook(tr2):
1138 1148 """To be run if transaction is aborted
1139 1149 """
1140 1150 reporef().hook('txnabort', throw=False, txnname=desc,
1141 1151 **tr2.hookargs)
1142 1152 tr.addabort('txnabort-hook', txnaborthook)
1143 1153 # avoid eager cache invalidation. in-memory data should be identical
1144 1154 # to stored data if transaction has no error.
1145 1155 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1146 1156 self._transref = weakref.ref(tr)
1147 1157 return tr
1148 1158
1149 1159 def _journalfiles(self):
1150 1160 return ((self.svfs, 'journal'),
1151 1161 (self.vfs, 'journal.dirstate'),
1152 1162 (self.vfs, 'journal.branch'),
1153 1163 (self.vfs, 'journal.desc'),
1154 1164 (self.vfs, 'journal.bookmarks'),
1155 1165 (self.svfs, 'journal.phaseroots'))
1156 1166
1157 1167 def undofiles(self):
1158 1168 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1159 1169
1160 1170 @unfilteredmethod
1161 1171 def _writejournal(self, desc):
1162 1172 self.dirstate.savebackup(None, prefix='journal.')
1163 1173 self.vfs.write("journal.branch",
1164 1174 encoding.fromlocal(self.dirstate.branch()))
1165 1175 self.vfs.write("journal.desc",
1166 1176 "%d\n%s\n" % (len(self), desc))
1167 1177 self.vfs.write("journal.bookmarks",
1168 1178 self.vfs.tryread("bookmarks"))
1169 1179 self.svfs.write("journal.phaseroots",
1170 1180 self.svfs.tryread("phaseroots"))
1171 1181
1172 1182 def recover(self):
1173 1183 with self.lock():
1174 1184 if self.svfs.exists("journal"):
1175 1185 self.ui.status(_("rolling back interrupted transaction\n"))
1176 1186 vfsmap = {'': self.svfs,
1177 1187 'plain': self.vfs,}
1178 1188 transaction.rollback(self.svfs, vfsmap, "journal",
1179 1189 self.ui.warn,
1180 1190 checkambigfiles=_cachedfiles)
1181 1191 self.invalidate()
1182 1192 return True
1183 1193 else:
1184 1194 self.ui.warn(_("no interrupted transaction available\n"))
1185 1195 return False
1186 1196
1187 1197 def rollback(self, dryrun=False, force=False):
1188 1198 wlock = lock = dsguard = None
1189 1199 try:
1190 1200 wlock = self.wlock()
1191 1201 lock = self.lock()
1192 1202 if self.svfs.exists("undo"):
1193 1203 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1194 1204
1195 1205 return self._rollback(dryrun, force, dsguard)
1196 1206 else:
1197 1207 self.ui.warn(_("no rollback information available\n"))
1198 1208 return 1
1199 1209 finally:
1200 1210 release(dsguard, lock, wlock)
1201 1211
1202 1212 @unfilteredmethod # Until we get smarter cache management
1203 1213 def _rollback(self, dryrun, force, dsguard):
1204 1214 ui = self.ui
1205 1215 try:
1206 1216 args = self.vfs.read('undo.desc').splitlines()
1207 1217 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1208 1218 if len(args) >= 3:
1209 1219 detail = args[2]
1210 1220 oldtip = oldlen - 1
1211 1221
1212 1222 if detail and ui.verbose:
1213 1223 msg = (_('repository tip rolled back to revision %d'
1214 1224 ' (undo %s: %s)\n')
1215 1225 % (oldtip, desc, detail))
1216 1226 else:
1217 1227 msg = (_('repository tip rolled back to revision %d'
1218 1228 ' (undo %s)\n')
1219 1229 % (oldtip, desc))
1220 1230 except IOError:
1221 1231 msg = _('rolling back unknown transaction\n')
1222 1232 desc = None
1223 1233
1224 1234 if not force and self['.'] != self['tip'] and desc == 'commit':
1225 1235 raise error.Abort(
1226 1236 _('rollback of last commit while not checked out '
1227 1237 'may lose data'), hint=_('use -f to force'))
1228 1238
1229 1239 ui.status(msg)
1230 1240 if dryrun:
1231 1241 return 0
1232 1242
1233 1243 parents = self.dirstate.parents()
1234 1244 self.destroying()
1235 1245 vfsmap = {'plain': self.vfs, '': self.svfs}
1236 1246 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1237 1247 checkambigfiles=_cachedfiles)
1238 1248 if self.vfs.exists('undo.bookmarks'):
1239 1249 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1240 1250 if self.svfs.exists('undo.phaseroots'):
1241 1251 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1242 1252 self.invalidate()
1243 1253
1244 1254 parentgone = (parents[0] not in self.changelog.nodemap or
1245 1255 parents[1] not in self.changelog.nodemap)
1246 1256 if parentgone:
1247 1257 # prevent dirstateguard from overwriting already restored one
1248 1258 dsguard.close()
1249 1259
1250 1260 self.dirstate.restorebackup(None, prefix='undo.')
1251 1261 try:
1252 1262 branch = self.vfs.read('undo.branch')
1253 1263 self.dirstate.setbranch(encoding.tolocal(branch))
1254 1264 except IOError:
1255 1265 ui.warn(_('named branch could not be reset: '
1256 1266 'current branch is still \'%s\'\n')
1257 1267 % self.dirstate.branch())
1258 1268
1259 1269 parents = tuple([p.rev() for p in self[None].parents()])
1260 1270 if len(parents) > 1:
1261 1271 ui.status(_('working directory now based on '
1262 1272 'revisions %d and %d\n') % parents)
1263 1273 else:
1264 1274 ui.status(_('working directory now based on '
1265 1275 'revision %d\n') % parents)
1266 1276 mergemod.mergestate.clean(self, self['.'].node())
1267 1277
1268 1278 # TODO: if we know which new heads may result from this rollback, pass
1269 1279 # them to destroy(), which will prevent the branchhead cache from being
1270 1280 # invalidated.
1271 1281 self.destroyed()
1272 1282 return 0
1273 1283
1274 1284 def _buildcacheupdater(self, newtransaction):
1275 1285 """called during transaction to build the callback updating cache
1276 1286
1277 1287 Lives on the repository to help extension who might want to augment
1278 1288 this logic. For this purpose, the created transaction is passed to the
1279 1289 method.
1280 1290 """
1281 1291 # we must avoid cyclic reference between repo and transaction.
1282 1292 reporef = weakref.ref(self)
1283 1293 def updater(tr):
1284 1294 repo = reporef()
1285 1295 repo.updatecaches(tr)
1286 1296 return updater
1287 1297
1288 1298 @unfilteredmethod
1289 1299 def updatecaches(self, tr=None):
1290 1300 """warm appropriate caches
1291 1301
1292 1302 If this function is called after a transaction closed. The transaction
1293 1303 will be available in the 'tr' argument. This can be used to selectively
1294 1304 update caches relevant to the changes in that transaction.
1295 1305 """
1296 1306 if tr is not None and tr.hookargs.get('source') == 'strip':
1297 1307 # During strip, many caches are invalid but
1298 1308 # later call to `destroyed` will refresh them.
1299 1309 return
1300 1310
1301 1311 if tr is None or tr.changes['revs']:
1302 1312 # updating the unfiltered branchmap should refresh all the others,
1303 1313 self.ui.debug('updating the branch cache\n')
1304 1314 branchmap.updatecache(self.filtered('served'))
1305 1315
1306 1316 def invalidatecaches(self):
1307 1317
1308 1318 if '_tagscache' in vars(self):
1309 1319 # can't use delattr on proxy
1310 1320 del self.__dict__['_tagscache']
1311 1321
1312 1322 self.unfiltered()._branchcaches.clear()
1313 1323 self.invalidatevolatilesets()
1314 1324 self._sparsesignaturecache.clear()
1315 1325
1316 1326 def invalidatevolatilesets(self):
1317 1327 self.filteredrevcache.clear()
1318 1328 obsolete.clearobscaches(self)
1319 1329
1320 1330 def invalidatedirstate(self):
1321 1331 '''Invalidates the dirstate, causing the next call to dirstate
1322 1332 to check if it was modified since the last time it was read,
1323 1333 rereading it if it has.
1324 1334
1325 1335 This is different to dirstate.invalidate() that it doesn't always
1326 1336 rereads the dirstate. Use dirstate.invalidate() if you want to
1327 1337 explicitly read the dirstate again (i.e. restoring it to a previous
1328 1338 known good state).'''
1329 1339 if hasunfilteredcache(self, 'dirstate'):
1330 1340 for k in self.dirstate._filecache:
1331 1341 try:
1332 1342 delattr(self.dirstate, k)
1333 1343 except AttributeError:
1334 1344 pass
1335 1345 delattr(self.unfiltered(), 'dirstate')
1336 1346
1337 1347 def invalidate(self, clearfilecache=False):
1338 1348 '''Invalidates both store and non-store parts other than dirstate
1339 1349
1340 1350 If a transaction is running, invalidation of store is omitted,
1341 1351 because discarding in-memory changes might cause inconsistency
1342 1352 (e.g. incomplete fncache causes unintentional failure, but
1343 1353 redundant one doesn't).
1344 1354 '''
1345 1355 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1346 1356 for k in list(self._filecache.keys()):
1347 1357 # dirstate is invalidated separately in invalidatedirstate()
1348 1358 if k == 'dirstate':
1349 1359 continue
1350 1360
1351 1361 if clearfilecache:
1352 1362 del self._filecache[k]
1353 1363 try:
1354 1364 delattr(unfiltered, k)
1355 1365 except AttributeError:
1356 1366 pass
1357 1367 self.invalidatecaches()
1358 1368 if not self.currenttransaction():
1359 1369 # TODO: Changing contents of store outside transaction
1360 1370 # causes inconsistency. We should make in-memory store
1361 1371 # changes detectable, and abort if changed.
1362 1372 self.store.invalidatecaches()
1363 1373
1364 1374 def invalidateall(self):
1365 1375 '''Fully invalidates both store and non-store parts, causing the
1366 1376 subsequent operation to reread any outside changes.'''
1367 1377 # extension should hook this to invalidate its caches
1368 1378 self.invalidate()
1369 1379 self.invalidatedirstate()
1370 1380
1371 1381 @unfilteredmethod
1372 1382 def _refreshfilecachestats(self, tr):
1373 1383 """Reload stats of cached files so that they are flagged as valid"""
1374 1384 for k, ce in self._filecache.items():
1375 1385 if k == 'dirstate' or k not in self.__dict__:
1376 1386 continue
1377 1387 ce.refresh()
1378 1388
1379 1389 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1380 1390 inheritchecker=None, parentenvvar=None):
1381 1391 parentlock = None
1382 1392 # the contents of parentenvvar are used by the underlying lock to
1383 1393 # determine whether it can be inherited
1384 1394 if parentenvvar is not None:
1385 1395 parentlock = encoding.environ.get(parentenvvar)
1386 1396 try:
1387 1397 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1388 1398 acquirefn=acquirefn, desc=desc,
1389 1399 inheritchecker=inheritchecker,
1390 1400 parentlock=parentlock)
1391 1401 except error.LockHeld as inst:
1392 1402 if not wait:
1393 1403 raise
1394 1404 # show more details for new-style locks
1395 1405 if ':' in inst.locker:
1396 1406 host, pid = inst.locker.split(":", 1)
1397 1407 self.ui.warn(
1398 1408 _("waiting for lock on %s held by process %r "
1399 1409 "on host %r\n") % (desc, pid, host))
1400 1410 else:
1401 1411 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1402 1412 (desc, inst.locker))
1403 1413 # default to 600 seconds timeout
1404 1414 l = lockmod.lock(vfs, lockname,
1405 1415 int(self.ui.config("ui", "timeout", "600")),
1406 1416 releasefn=releasefn, acquirefn=acquirefn,
1407 1417 desc=desc)
1408 1418 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1409 1419 return l
1410 1420
1411 1421 def _afterlock(self, callback):
1412 1422 """add a callback to be run when the repository is fully unlocked
1413 1423
1414 1424 The callback will be executed when the outermost lock is released
1415 1425 (with wlock being higher level than 'lock')."""
1416 1426 for ref in (self._wlockref, self._lockref):
1417 1427 l = ref and ref()
1418 1428 if l and l.held:
1419 1429 l.postrelease.append(callback)
1420 1430 break
1421 1431 else: # no lock have been found.
1422 1432 callback()
1423 1433
1424 1434 def lock(self, wait=True):
1425 1435 '''Lock the repository store (.hg/store) and return a weak reference
1426 1436 to the lock. Use this before modifying the store (e.g. committing or
1427 1437 stripping). If you are opening a transaction, get a lock as well.)
1428 1438
1429 1439 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1430 1440 'wlock' first to avoid a dead-lock hazard.'''
1431 1441 l = self._currentlock(self._lockref)
1432 1442 if l is not None:
1433 1443 l.lock()
1434 1444 return l
1435 1445
1436 1446 l = self._lock(self.svfs, "lock", wait, None,
1437 1447 self.invalidate, _('repository %s') % self.origroot)
1438 1448 self._lockref = weakref.ref(l)
1439 1449 return l
1440 1450
1441 1451 def _wlockchecktransaction(self):
1442 1452 if self.currenttransaction() is not None:
1443 1453 raise error.LockInheritanceContractViolation(
1444 1454 'wlock cannot be inherited in the middle of a transaction')
1445 1455
1446 1456 def wlock(self, wait=True):
1447 1457 '''Lock the non-store parts of the repository (everything under
1448 1458 .hg except .hg/store) and return a weak reference to the lock.
1449 1459
1450 1460 Use this before modifying files in .hg.
1451 1461
1452 1462 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1453 1463 'wlock' first to avoid a dead-lock hazard.'''
1454 1464 l = self._wlockref and self._wlockref()
1455 1465 if l is not None and l.held:
1456 1466 l.lock()
1457 1467 return l
1458 1468
1459 1469 # We do not need to check for non-waiting lock acquisition. Such
1460 1470 # acquisition would not cause dead-lock as they would just fail.
1461 1471 if wait and (self.ui.configbool('devel', 'all-warnings')
1462 1472 or self.ui.configbool('devel', 'check-locks')):
1463 1473 if self._currentlock(self._lockref) is not None:
1464 1474 self.ui.develwarn('"wlock" acquired after "lock"')
1465 1475
1466 1476 def unlock():
1467 1477 if self.dirstate.pendingparentchange():
1468 1478 self.dirstate.invalidate()
1469 1479 else:
1470 1480 self.dirstate.write(None)
1471 1481
1472 1482 self._filecache['dirstate'].refresh()
1473 1483
1474 1484 l = self._lock(self.vfs, "wlock", wait, unlock,
1475 1485 self.invalidatedirstate, _('working directory of %s') %
1476 1486 self.origroot,
1477 1487 inheritchecker=self._wlockchecktransaction,
1478 1488 parentenvvar='HG_WLOCK_LOCKER')
1479 1489 self._wlockref = weakref.ref(l)
1480 1490 return l
1481 1491
1482 1492 def _currentlock(self, lockref):
1483 1493 """Returns the lock if it's held, or None if it's not."""
1484 1494 if lockref is None:
1485 1495 return None
1486 1496 l = lockref()
1487 1497 if l is None or not l.held:
1488 1498 return None
1489 1499 return l
1490 1500
1491 1501 def currentwlock(self):
1492 1502 """Returns the wlock if it's held, or None if it's not."""
1493 1503 return self._currentlock(self._wlockref)
1494 1504
1495 1505 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1496 1506 """
1497 1507 commit an individual file as part of a larger transaction
1498 1508 """
1499 1509
1500 1510 fname = fctx.path()
1501 1511 fparent1 = manifest1.get(fname, nullid)
1502 1512 fparent2 = manifest2.get(fname, nullid)
1503 1513 if isinstance(fctx, context.filectx):
1504 1514 node = fctx.filenode()
1505 1515 if node in [fparent1, fparent2]:
1506 1516 self.ui.debug('reusing %s filelog entry\n' % fname)
1507 1517 if manifest1.flags(fname) != fctx.flags():
1508 1518 changelist.append(fname)
1509 1519 return node
1510 1520
1511 1521 flog = self.file(fname)
1512 1522 meta = {}
1513 1523 copy = fctx.renamed()
1514 1524 if copy and copy[0] != fname:
1515 1525 # Mark the new revision of this file as a copy of another
1516 1526 # file. This copy data will effectively act as a parent
1517 1527 # of this new revision. If this is a merge, the first
1518 1528 # parent will be the nullid (meaning "look up the copy data")
1519 1529 # and the second one will be the other parent. For example:
1520 1530 #
1521 1531 # 0 --- 1 --- 3 rev1 changes file foo
1522 1532 # \ / rev2 renames foo to bar and changes it
1523 1533 # \- 2 -/ rev3 should have bar with all changes and
1524 1534 # should record that bar descends from
1525 1535 # bar in rev2 and foo in rev1
1526 1536 #
1527 1537 # this allows this merge to succeed:
1528 1538 #
1529 1539 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1530 1540 # \ / merging rev3 and rev4 should use bar@rev2
1531 1541 # \- 2 --- 4 as the merge base
1532 1542 #
1533 1543
1534 1544 cfname = copy[0]
1535 1545 crev = manifest1.get(cfname)
1536 1546 newfparent = fparent2
1537 1547
1538 1548 if manifest2: # branch merge
1539 1549 if fparent2 == nullid or crev is None: # copied on remote side
1540 1550 if cfname in manifest2:
1541 1551 crev = manifest2[cfname]
1542 1552 newfparent = fparent1
1543 1553
1544 1554 # Here, we used to search backwards through history to try to find
1545 1555 # where the file copy came from if the source of a copy was not in
1546 1556 # the parent directory. However, this doesn't actually make sense to
1547 1557 # do (what does a copy from something not in your working copy even
1548 1558 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1549 1559 # the user that copy information was dropped, so if they didn't
1550 1560 # expect this outcome it can be fixed, but this is the correct
1551 1561 # behavior in this circumstance.
1552 1562
1553 1563 if crev:
1554 1564 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1555 1565 meta["copy"] = cfname
1556 1566 meta["copyrev"] = hex(crev)
1557 1567 fparent1, fparent2 = nullid, newfparent
1558 1568 else:
1559 1569 self.ui.warn(_("warning: can't find ancestor for '%s' "
1560 1570 "copied from '%s'!\n") % (fname, cfname))
1561 1571
1562 1572 elif fparent1 == nullid:
1563 1573 fparent1, fparent2 = fparent2, nullid
1564 1574 elif fparent2 != nullid:
1565 1575 # is one parent an ancestor of the other?
1566 1576 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1567 1577 if fparent1 in fparentancestors:
1568 1578 fparent1, fparent2 = fparent2, nullid
1569 1579 elif fparent2 in fparentancestors:
1570 1580 fparent2 = nullid
1571 1581
1572 1582 # is the file changed?
1573 1583 text = fctx.data()
1574 1584 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1575 1585 changelist.append(fname)
1576 1586 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1577 1587 # are just the flags changed during merge?
1578 1588 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1579 1589 changelist.append(fname)
1580 1590
1581 1591 return fparent1
1582 1592
1583 1593 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1584 1594 """check for commit arguments that aren't committable"""
1585 1595 if match.isexact() or match.prefix():
1586 1596 matched = set(status.modified + status.added + status.removed)
1587 1597
1588 1598 for f in match.files():
1589 1599 f = self.dirstate.normalize(f)
1590 1600 if f == '.' or f in matched or f in wctx.substate:
1591 1601 continue
1592 1602 if f in status.deleted:
1593 1603 fail(f, _('file not found!'))
1594 1604 if f in vdirs: # visited directory
1595 1605 d = f + '/'
1596 1606 for mf in matched:
1597 1607 if mf.startswith(d):
1598 1608 break
1599 1609 else:
1600 1610 fail(f, _("no match under directory!"))
1601 1611 elif f not in self.dirstate:
1602 1612 fail(f, _("file not tracked!"))
1603 1613
1604 1614 @unfilteredmethod
1605 1615 def commit(self, text="", user=None, date=None, match=None, force=False,
1606 1616 editor=False, extra=None):
1607 1617 """Add a new revision to current repository.
1608 1618
1609 1619 Revision information is gathered from the working directory,
1610 1620 match can be used to filter the committed files. If editor is
1611 1621 supplied, it is called to get a commit message.
1612 1622 """
1613 1623 if extra is None:
1614 1624 extra = {}
1615 1625
1616 1626 def fail(f, msg):
1617 1627 raise error.Abort('%s: %s' % (f, msg))
1618 1628
1619 1629 if not match:
1620 1630 match = matchmod.always(self.root, '')
1621 1631
1622 1632 if not force:
1623 1633 vdirs = []
1624 1634 match.explicitdir = vdirs.append
1625 1635 match.bad = fail
1626 1636
1627 1637 wlock = lock = tr = None
1628 1638 try:
1629 1639 wlock = self.wlock()
1630 1640 lock = self.lock() # for recent changelog (see issue4368)
1631 1641
1632 1642 wctx = self[None]
1633 1643 merge = len(wctx.parents()) > 1
1634 1644
1635 1645 if not force and merge and not match.always():
1636 1646 raise error.Abort(_('cannot partially commit a merge '
1637 1647 '(do not specify files or patterns)'))
1638 1648
1639 1649 status = self.status(match=match, clean=force)
1640 1650 if force:
1641 1651 status.modified.extend(status.clean) # mq may commit clean files
1642 1652
1643 1653 # check subrepos
1644 1654 subs = []
1645 1655 commitsubs = set()
1646 1656 newstate = wctx.substate.copy()
1647 1657 # only manage subrepos and .hgsubstate if .hgsub is present
1648 1658 if '.hgsub' in wctx:
1649 1659 # we'll decide whether to track this ourselves, thanks
1650 1660 for c in status.modified, status.added, status.removed:
1651 1661 if '.hgsubstate' in c:
1652 1662 c.remove('.hgsubstate')
1653 1663
1654 1664 # compare current state to last committed state
1655 1665 # build new substate based on last committed state
1656 1666 oldstate = wctx.p1().substate
1657 1667 for s in sorted(newstate.keys()):
1658 1668 if not match(s):
1659 1669 # ignore working copy, use old state if present
1660 1670 if s in oldstate:
1661 1671 newstate[s] = oldstate[s]
1662 1672 continue
1663 1673 if not force:
1664 1674 raise error.Abort(
1665 1675 _("commit with new subrepo %s excluded") % s)
1666 1676 dirtyreason = wctx.sub(s).dirtyreason(True)
1667 1677 if dirtyreason:
1668 1678 if not self.ui.configbool('ui', 'commitsubrepos'):
1669 1679 raise error.Abort(dirtyreason,
1670 1680 hint=_("use --subrepos for recursive commit"))
1671 1681 subs.append(s)
1672 1682 commitsubs.add(s)
1673 1683 else:
1674 1684 bs = wctx.sub(s).basestate()
1675 1685 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1676 1686 if oldstate.get(s, (None, None, None))[1] != bs:
1677 1687 subs.append(s)
1678 1688
1679 1689 # check for removed subrepos
1680 1690 for p in wctx.parents():
1681 1691 r = [s for s in p.substate if s not in newstate]
1682 1692 subs += [s for s in r if match(s)]
1683 1693 if subs:
1684 1694 if (not match('.hgsub') and
1685 1695 '.hgsub' in (wctx.modified() + wctx.added())):
1686 1696 raise error.Abort(
1687 1697 _("can't commit subrepos without .hgsub"))
1688 1698 status.modified.insert(0, '.hgsubstate')
1689 1699
1690 1700 elif '.hgsub' in status.removed:
1691 1701 # clean up .hgsubstate when .hgsub is removed
1692 1702 if ('.hgsubstate' in wctx and
1693 1703 '.hgsubstate' not in (status.modified + status.added +
1694 1704 status.removed)):
1695 1705 status.removed.insert(0, '.hgsubstate')
1696 1706
1697 1707 # make sure all explicit patterns are matched
1698 1708 if not force:
1699 1709 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1700 1710
1701 1711 cctx = context.workingcommitctx(self, status,
1702 1712 text, user, date, extra)
1703 1713
1704 1714 # internal config: ui.allowemptycommit
1705 1715 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1706 1716 or extra.get('close') or merge or cctx.files()
1707 1717 or self.ui.configbool('ui', 'allowemptycommit'))
1708 1718 if not allowemptycommit:
1709 1719 return None
1710 1720
1711 1721 if merge and cctx.deleted():
1712 1722 raise error.Abort(_("cannot commit merge with missing files"))
1713 1723
1714 1724 ms = mergemod.mergestate.read(self)
1715 1725 mergeutil.checkunresolved(ms)
1716 1726
1717 1727 if editor:
1718 1728 cctx._text = editor(self, cctx, subs)
1719 1729 edited = (text != cctx._text)
1720 1730
1721 1731 # Save commit message in case this transaction gets rolled back
1722 1732 # (e.g. by a pretxncommit hook). Leave the content alone on
1723 1733 # the assumption that the user will use the same editor again.
1724 1734 msgfn = self.savecommitmessage(cctx._text)
1725 1735
1726 1736 # commit subs and write new state
1727 1737 if subs:
1728 1738 for s in sorted(commitsubs):
1729 1739 sub = wctx.sub(s)
1730 1740 self.ui.status(_('committing subrepository %s\n') %
1731 1741 subrepo.subrelpath(sub))
1732 1742 sr = sub.commit(cctx._text, user, date)
1733 1743 newstate[s] = (newstate[s][0], sr)
1734 1744 subrepo.writestate(self, newstate)
1735 1745
1736 1746 p1, p2 = self.dirstate.parents()
1737 1747 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1738 1748 try:
1739 1749 self.hook("precommit", throw=True, parent1=hookp1,
1740 1750 parent2=hookp2)
1741 1751 tr = self.transaction('commit')
1742 1752 ret = self.commitctx(cctx, True)
1743 1753 except: # re-raises
1744 1754 if edited:
1745 1755 self.ui.write(
1746 1756 _('note: commit message saved in %s\n') % msgfn)
1747 1757 raise
1748 1758 # update bookmarks, dirstate and mergestate
1749 1759 bookmarks.update(self, [p1, p2], ret)
1750 1760 cctx.markcommitted(ret)
1751 1761 ms.reset()
1752 1762 tr.close()
1753 1763
1754 1764 finally:
1755 1765 lockmod.release(tr, lock, wlock)
1756 1766
1757 1767 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1758 1768 # hack for command that use a temporary commit (eg: histedit)
1759 1769 # temporary commit got stripped before hook release
1760 1770 if self.changelog.hasnode(ret):
1761 1771 self.hook("commit", node=node, parent1=parent1,
1762 1772 parent2=parent2)
1763 1773 self._afterlock(commithook)
1764 1774 return ret
1765 1775
1766 1776 @unfilteredmethod
1767 1777 def commitctx(self, ctx, error=False):
1768 1778 """Add a new revision to current repository.
1769 1779 Revision information is passed via the context argument.
1770 1780 """
1771 1781
1772 1782 tr = None
1773 1783 p1, p2 = ctx.p1(), ctx.p2()
1774 1784 user = ctx.user()
1775 1785
1776 1786 lock = self.lock()
1777 1787 try:
1778 1788 tr = self.transaction("commit")
1779 1789 trp = weakref.proxy(tr)
1780 1790
1781 1791 if ctx.manifestnode():
1782 1792 # reuse an existing manifest revision
1783 1793 mn = ctx.manifestnode()
1784 1794 files = ctx.files()
1785 1795 elif ctx.files():
1786 1796 m1ctx = p1.manifestctx()
1787 1797 m2ctx = p2.manifestctx()
1788 1798 mctx = m1ctx.copy()
1789 1799
1790 1800 m = mctx.read()
1791 1801 m1 = m1ctx.read()
1792 1802 m2 = m2ctx.read()
1793 1803
1794 1804 # check in files
1795 1805 added = []
1796 1806 changed = []
1797 1807 removed = list(ctx.removed())
1798 1808 linkrev = len(self)
1799 1809 self.ui.note(_("committing files:\n"))
1800 1810 for f in sorted(ctx.modified() + ctx.added()):
1801 1811 self.ui.note(f + "\n")
1802 1812 try:
1803 1813 fctx = ctx[f]
1804 1814 if fctx is None:
1805 1815 removed.append(f)
1806 1816 else:
1807 1817 added.append(f)
1808 1818 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1809 1819 trp, changed)
1810 1820 m.setflag(f, fctx.flags())
1811 1821 except OSError as inst:
1812 1822 self.ui.warn(_("trouble committing %s!\n") % f)
1813 1823 raise
1814 1824 except IOError as inst:
1815 1825 errcode = getattr(inst, 'errno', errno.ENOENT)
1816 1826 if error or errcode and errcode != errno.ENOENT:
1817 1827 self.ui.warn(_("trouble committing %s!\n") % f)
1818 1828 raise
1819 1829
1820 1830 # update manifest
1821 1831 self.ui.note(_("committing manifest\n"))
1822 1832 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1823 1833 drop = [f for f in removed if f in m]
1824 1834 for f in drop:
1825 1835 del m[f]
1826 1836 mn = mctx.write(trp, linkrev,
1827 1837 p1.manifestnode(), p2.manifestnode(),
1828 1838 added, drop)
1829 1839 files = changed + removed
1830 1840 else:
1831 1841 mn = p1.manifestnode()
1832 1842 files = []
1833 1843
1834 1844 # update changelog
1835 1845 self.ui.note(_("committing changelog\n"))
1836 1846 self.changelog.delayupdate(tr)
1837 1847 n = self.changelog.add(mn, files, ctx.description(),
1838 1848 trp, p1.node(), p2.node(),
1839 1849 user, ctx.date(), ctx.extra().copy())
1840 1850 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1841 1851 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1842 1852 parent2=xp2)
1843 1853 # set the new commit is proper phase
1844 1854 targetphase = subrepo.newcommitphase(self.ui, ctx)
1845 1855 if targetphase:
1846 1856 # retract boundary do not alter parent changeset.
1847 1857 # if a parent have higher the resulting phase will
1848 1858 # be compliant anyway
1849 1859 #
1850 1860 # if minimal phase was 0 we don't need to retract anything
1851 1861 phases.retractboundary(self, tr, targetphase, [n])
1852 1862 tr.close()
1853 1863 return n
1854 1864 finally:
1855 1865 if tr:
1856 1866 tr.release()
1857 1867 lock.release()
1858 1868
1859 1869 @unfilteredmethod
1860 1870 def destroying(self):
1861 1871 '''Inform the repository that nodes are about to be destroyed.
1862 1872 Intended for use by strip and rollback, so there's a common
1863 1873 place for anything that has to be done before destroying history.
1864 1874
1865 1875 This is mostly useful for saving state that is in memory and waiting
1866 1876 to be flushed when the current lock is released. Because a call to
1867 1877 destroyed is imminent, the repo will be invalidated causing those
1868 1878 changes to stay in memory (waiting for the next unlock), or vanish
1869 1879 completely.
1870 1880 '''
1871 1881 # When using the same lock to commit and strip, the phasecache is left
1872 1882 # dirty after committing. Then when we strip, the repo is invalidated,
1873 1883 # causing those changes to disappear.
1874 1884 if '_phasecache' in vars(self):
1875 1885 self._phasecache.write()
1876 1886
1877 1887 @unfilteredmethod
1878 1888 def destroyed(self):
1879 1889 '''Inform the repository that nodes have been destroyed.
1880 1890 Intended for use by strip and rollback, so there's a common
1881 1891 place for anything that has to be done after destroying history.
1882 1892 '''
1883 1893 # When one tries to:
1884 1894 # 1) destroy nodes thus calling this method (e.g. strip)
1885 1895 # 2) use phasecache somewhere (e.g. commit)
1886 1896 #
1887 1897 # then 2) will fail because the phasecache contains nodes that were
1888 1898 # removed. We can either remove phasecache from the filecache,
1889 1899 # causing it to reload next time it is accessed, or simply filter
1890 1900 # the removed nodes now and write the updated cache.
1891 1901 self._phasecache.filterunknown(self)
1892 1902 self._phasecache.write()
1893 1903
1894 1904 # refresh all repository caches
1895 1905 self.updatecaches()
1896 1906
1897 1907 # Ensure the persistent tag cache is updated. Doing it now
1898 1908 # means that the tag cache only has to worry about destroyed
1899 1909 # heads immediately after a strip/rollback. That in turn
1900 1910 # guarantees that "cachetip == currenttip" (comparing both rev
1901 1911 # and node) always means no nodes have been added or destroyed.
1902 1912
1903 1913 # XXX this is suboptimal when qrefresh'ing: we strip the current
1904 1914 # head, refresh the tag cache, then immediately add a new head.
1905 1915 # But I think doing it this way is necessary for the "instant
1906 1916 # tag cache retrieval" case to work.
1907 1917 self.invalidate()
1908 1918
1909 1919 def walk(self, match, node=None):
1910 1920 '''
1911 1921 walk recursively through the directory tree or a given
1912 1922 changeset, finding all files matched by the match
1913 1923 function
1914 1924 '''
1915 1925 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1916 1926 return self[node].walk(match)
1917 1927
1918 1928 def status(self, node1='.', node2=None, match=None,
1919 1929 ignored=False, clean=False, unknown=False,
1920 1930 listsubrepos=False):
1921 1931 '''a convenience method that calls node1.status(node2)'''
1922 1932 return self[node1].status(node2, match, ignored, clean, unknown,
1923 1933 listsubrepos)
1924 1934
1925 1935 def addpostdsstatus(self, ps):
1926 1936 """Add a callback to run within the wlock, at the point at which status
1927 1937 fixups happen.
1928 1938
1929 1939 On status completion, callback(wctx, status) will be called with the
1930 1940 wlock held, unless the dirstate has changed from underneath or the wlock
1931 1941 couldn't be grabbed.
1932 1942
1933 1943 Callbacks should not capture and use a cached copy of the dirstate --
1934 1944 it might change in the meanwhile. Instead, they should access the
1935 1945 dirstate via wctx.repo().dirstate.
1936 1946
1937 1947 This list is emptied out after each status run -- extensions should
1938 1948 make sure it adds to this list each time dirstate.status is called.
1939 1949 Extensions should also make sure they don't call this for statuses
1940 1950 that don't involve the dirstate.
1941 1951 """
1942 1952
1943 1953 # The list is located here for uniqueness reasons -- it is actually
1944 1954 # managed by the workingctx, but that isn't unique per-repo.
1945 1955 self._postdsstatus.append(ps)
1946 1956
1947 1957 def postdsstatus(self):
1948 1958 """Used by workingctx to get the list of post-dirstate-status hooks."""
1949 1959 return self._postdsstatus
1950 1960
1951 1961 def clearpostdsstatus(self):
1952 1962 """Used by workingctx to clear post-dirstate-status hooks."""
1953 1963 del self._postdsstatus[:]
1954 1964
1955 1965 def heads(self, start=None):
1956 1966 if start is None:
1957 1967 cl = self.changelog
1958 1968 headrevs = reversed(cl.headrevs())
1959 1969 return [cl.node(rev) for rev in headrevs]
1960 1970
1961 1971 heads = self.changelog.heads(start)
1962 1972 # sort the output in rev descending order
1963 1973 return sorted(heads, key=self.changelog.rev, reverse=True)
1964 1974
1965 1975 def branchheads(self, branch=None, start=None, closed=False):
1966 1976 '''return a (possibly filtered) list of heads for the given branch
1967 1977
1968 1978 Heads are returned in topological order, from newest to oldest.
1969 1979 If branch is None, use the dirstate branch.
1970 1980 If start is not None, return only heads reachable from start.
1971 1981 If closed is True, return heads that are marked as closed as well.
1972 1982 '''
1973 1983 if branch is None:
1974 1984 branch = self[None].branch()
1975 1985 branches = self.branchmap()
1976 1986 if branch not in branches:
1977 1987 return []
1978 1988 # the cache returns heads ordered lowest to highest
1979 1989 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1980 1990 if start is not None:
1981 1991 # filter out the heads that cannot be reached from startrev
1982 1992 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1983 1993 bheads = [h for h in bheads if h in fbheads]
1984 1994 return bheads
1985 1995
1986 1996 def branches(self, nodes):
1987 1997 if not nodes:
1988 1998 nodes = [self.changelog.tip()]
1989 1999 b = []
1990 2000 for n in nodes:
1991 2001 t = n
1992 2002 while True:
1993 2003 p = self.changelog.parents(n)
1994 2004 if p[1] != nullid or p[0] == nullid:
1995 2005 b.append((t, n, p[0], p[1]))
1996 2006 break
1997 2007 n = p[0]
1998 2008 return b
1999 2009
2000 2010 def between(self, pairs):
2001 2011 r = []
2002 2012
2003 2013 for top, bottom in pairs:
2004 2014 n, l, i = top, [], 0
2005 2015 f = 1
2006 2016
2007 2017 while n != bottom and n != nullid:
2008 2018 p = self.changelog.parents(n)[0]
2009 2019 if i == f:
2010 2020 l.append(n)
2011 2021 f = f * 2
2012 2022 n = p
2013 2023 i += 1
2014 2024
2015 2025 r.append(l)
2016 2026
2017 2027 return r
2018 2028
2019 2029 def checkpush(self, pushop):
2020 2030 """Extensions can override this function if additional checks have
2021 2031 to be performed before pushing, or call it if they override push
2022 2032 command.
2023 2033 """
2024 2034 pass
2025 2035
2026 2036 @unfilteredpropertycache
2027 2037 def prepushoutgoinghooks(self):
2028 2038 """Return util.hooks consists of a pushop with repo, remote, outgoing
2029 2039 methods, which are called before pushing changesets.
2030 2040 """
2031 2041 return util.hooks()
2032 2042
2033 2043 def pushkey(self, namespace, key, old, new):
2034 2044 try:
2035 2045 tr = self.currenttransaction()
2036 2046 hookargs = {}
2037 2047 if tr is not None:
2038 2048 hookargs.update(tr.hookargs)
2039 2049 hookargs['namespace'] = namespace
2040 2050 hookargs['key'] = key
2041 2051 hookargs['old'] = old
2042 2052 hookargs['new'] = new
2043 2053 self.hook('prepushkey', throw=True, **hookargs)
2044 2054 except error.HookAbort as exc:
2045 2055 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2046 2056 if exc.hint:
2047 2057 self.ui.write_err(_("(%s)\n") % exc.hint)
2048 2058 return False
2049 2059 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2050 2060 ret = pushkey.push(self, namespace, key, old, new)
2051 2061 def runhook():
2052 2062 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2053 2063 ret=ret)
2054 2064 self._afterlock(runhook)
2055 2065 return ret
2056 2066
2057 2067 def listkeys(self, namespace):
2058 2068 self.hook('prelistkeys', throw=True, namespace=namespace)
2059 2069 self.ui.debug('listing keys for "%s"\n' % namespace)
2060 2070 values = pushkey.list(self, namespace)
2061 2071 self.hook('listkeys', namespace=namespace, values=values)
2062 2072 return values
2063 2073
2064 2074 def debugwireargs(self, one, two, three=None, four=None, five=None):
2065 2075 '''used to test argument passing over the wire'''
2066 2076 return "%s %s %s %s %s" % (one, two, three, four, five)
2067 2077
2068 2078 def savecommitmessage(self, text):
2069 2079 fp = self.vfs('last-message.txt', 'wb')
2070 2080 try:
2071 2081 fp.write(text)
2072 2082 finally:
2073 2083 fp.close()
2074 2084 return self.pathto(fp.name[len(self.root) + 1:])
2075 2085
2076 2086 # used to avoid circular references so destructors work
2077 2087 def aftertrans(files):
2078 2088 renamefiles = [tuple(t) for t in files]
2079 2089 def a():
2080 2090 for vfs, src, dest in renamefiles:
2081 2091 # if src and dest refer to a same file, vfs.rename is a no-op,
2082 2092 # leaving both src and dest on disk. delete dest to make sure
2083 2093 # the rename couldn't be such a no-op.
2084 2094 vfs.tryunlink(dest)
2085 2095 try:
2086 2096 vfs.rename(src, dest)
2087 2097 except OSError: # journal file does not yet exist
2088 2098 pass
2089 2099 return a
2090 2100
2091 2101 def undoname(fn):
2092 2102 base, name = os.path.split(fn)
2093 2103 assert name.startswith('journal')
2094 2104 return os.path.join(base, name.replace('journal', 'undo', 1))
2095 2105
2096 2106 def instance(ui, path, create):
2097 2107 return localrepository(ui, util.urllocalpath(path), create)
2098 2108
2099 2109 def islocal(path):
2100 2110 return True
2101 2111
2102 2112 def newreporequirements(repo):
2103 2113 """Determine the set of requirements for a new local repository.
2104 2114
2105 2115 Extensions can wrap this function to specify custom requirements for
2106 2116 new repositories.
2107 2117 """
2108 2118 ui = repo.ui
2109 2119 requirements = {'revlogv1'}
2110 2120 if ui.configbool('format', 'usestore'):
2111 2121 requirements.add('store')
2112 2122 if ui.configbool('format', 'usefncache'):
2113 2123 requirements.add('fncache')
2114 2124 if ui.configbool('format', 'dotencode'):
2115 2125 requirements.add('dotencode')
2116 2126
2117 2127 compengine = ui.config('experimental', 'format.compression', 'zlib')
2118 2128 if compengine not in util.compengines:
2119 2129 raise error.Abort(_('compression engine %s defined by '
2120 2130 'experimental.format.compression not available') %
2121 2131 compengine,
2122 2132 hint=_('run "hg debuginstall" to list available '
2123 2133 'compression engines'))
2124 2134
2125 2135 # zlib is the historical default and doesn't need an explicit requirement.
2126 2136 if compengine != 'zlib':
2127 2137 requirements.add('exp-compression-%s' % compengine)
2128 2138
2129 2139 if scmutil.gdinitconfig(ui):
2130 2140 requirements.add('generaldelta')
2131 2141 if ui.configbool('experimental', 'treemanifest', False):
2132 2142 requirements.add('treemanifest')
2133 2143 if ui.configbool('experimental', 'manifestv2', False):
2134 2144 requirements.add('manifestv2')
2135 2145
2136 2146 revlogv2 = ui.config('experimental', 'revlogv2')
2137 2147 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2138 2148 requirements.remove('revlogv1')
2139 2149 # generaldelta is implied by revlogv2.
2140 2150 requirements.discard('generaldelta')
2141 2151 requirements.add(REVLOGV2_REQUIREMENT)
2142 2152
2143 2153 return requirements
General Comments 0
You need to be logged in to leave comments. Login now