##// END OF EJS Templates
auditor: add simple comment about repo.auditor and al...
marmoute -
r33254:4ea0b7a6 default
parent child Browse files
Show More
@@ -1,2111 +1,2113 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 mergeutil,
45 45 namespaces,
46 46 obsolete,
47 47 pathutil,
48 48 peer,
49 49 phases,
50 50 pushkey,
51 51 pycompat,
52 52 repoview,
53 53 revset,
54 54 revsetlang,
55 55 scmutil,
56 56 store,
57 57 subrepo,
58 58 tags as tagsmod,
59 59 transaction,
60 60 txnutil,
61 61 util,
62 62 vfs as vfsmod,
63 63 )
64 64
65 65 release = lockmod.release
66 66 urlerr = util.urlerr
67 67 urlreq = util.urlreq
68 68
69 69 class _basefilecache(scmutil.filecache):
70 70 """All filecache usage on repo are done for logic that should be unfiltered
71 71 """
72 72 def __get__(self, repo, type=None):
73 73 if repo is None:
74 74 return self
75 75 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
76 76 def __set__(self, repo, value):
77 77 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
78 78 def __delete__(self, repo):
79 79 return super(_basefilecache, self).__delete__(repo.unfiltered())
80 80
81 81 class repofilecache(_basefilecache):
82 82 """filecache for files in .hg but outside of .hg/store"""
83 83 def join(self, obj, fname):
84 84 return obj.vfs.join(fname)
85 85
86 86 class storecache(_basefilecache):
87 87 """filecache for files in the store"""
88 88 def join(self, obj, fname):
89 89 return obj.sjoin(fname)
90 90
91 91 class unfilteredpropertycache(util.propertycache):
92 92 """propertycache that apply to unfiltered repo only"""
93 93
94 94 def __get__(self, repo, type=None):
95 95 unfi = repo.unfiltered()
96 96 if unfi is repo:
97 97 return super(unfilteredpropertycache, self).__get__(unfi)
98 98 return getattr(unfi, self.name)
99 99
100 100 class filteredpropertycache(util.propertycache):
101 101 """propertycache that must take filtering in account"""
102 102
103 103 def cachevalue(self, obj, value):
104 104 object.__setattr__(obj, self.name, value)
105 105
106 106
107 107 def hasunfilteredcache(repo, name):
108 108 """check if a repo has an unfilteredpropertycache value for <name>"""
109 109 return name in vars(repo.unfiltered())
110 110
111 111 def unfilteredmethod(orig):
112 112 """decorate method that always need to be run on unfiltered version"""
113 113 def wrapper(repo, *args, **kwargs):
114 114 return orig(repo.unfiltered(), *args, **kwargs)
115 115 return wrapper
116 116
117 117 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
118 118 'unbundle'}
119 119 legacycaps = moderncaps.union({'changegroupsubset'})
120 120
121 121 class localpeer(peer.peerrepository):
122 122 '''peer for a local repo; reflects only the most recent API'''
123 123
124 124 def __init__(self, repo, caps=None):
125 125 if caps is None:
126 126 caps = moderncaps.copy()
127 127 peer.peerrepository.__init__(self)
128 128 self._repo = repo.filtered('served')
129 129 self.ui = repo.ui
130 130 self._caps = repo._restrictcapabilities(caps)
131 131 self.requirements = repo.requirements
132 132 self.supportedformats = repo.supportedformats
133 133
134 134 def close(self):
135 135 self._repo.close()
136 136
137 137 def _capabilities(self):
138 138 return self._caps
139 139
140 140 def local(self):
141 141 return self._repo
142 142
143 143 def canpush(self):
144 144 return True
145 145
146 146 def url(self):
147 147 return self._repo.url()
148 148
149 149 def lookup(self, key):
150 150 return self._repo.lookup(key)
151 151
152 152 def branchmap(self):
153 153 return self._repo.branchmap()
154 154
155 155 def heads(self):
156 156 return self._repo.heads()
157 157
158 158 def known(self, nodes):
159 159 return self._repo.known(nodes)
160 160
161 161 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
162 162 **kwargs):
163 163 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
164 164 common=common, bundlecaps=bundlecaps,
165 165 **kwargs)
166 166 cb = util.chunkbuffer(chunks)
167 167
168 168 if exchange.bundle2requested(bundlecaps):
169 169 # When requesting a bundle2, getbundle returns a stream to make the
170 170 # wire level function happier. We need to build a proper object
171 171 # from it in local peer.
172 172 return bundle2.getunbundler(self.ui, cb)
173 173 else:
174 174 return changegroup.getunbundler('01', cb, None)
175 175
176 176 # TODO We might want to move the next two calls into legacypeer and add
177 177 # unbundle instead.
178 178
179 179 def unbundle(self, cg, heads, url):
180 180 """apply a bundle on a repo
181 181
182 182 This function handles the repo locking itself."""
183 183 try:
184 184 try:
185 185 cg = exchange.readbundle(self.ui, cg, None)
186 186 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
187 187 if util.safehasattr(ret, 'getchunks'):
188 188 # This is a bundle20 object, turn it into an unbundler.
189 189 # This little dance should be dropped eventually when the
190 190 # API is finally improved.
191 191 stream = util.chunkbuffer(ret.getchunks())
192 192 ret = bundle2.getunbundler(self.ui, stream)
193 193 return ret
194 194 except Exception as exc:
195 195 # If the exception contains output salvaged from a bundle2
196 196 # reply, we need to make sure it is printed before continuing
197 197 # to fail. So we build a bundle2 with such output and consume
198 198 # it directly.
199 199 #
200 200 # This is not very elegant but allows a "simple" solution for
201 201 # issue4594
202 202 output = getattr(exc, '_bundle2salvagedoutput', ())
203 203 if output:
204 204 bundler = bundle2.bundle20(self._repo.ui)
205 205 for out in output:
206 206 bundler.addpart(out)
207 207 stream = util.chunkbuffer(bundler.getchunks())
208 208 b = bundle2.getunbundler(self.ui, stream)
209 209 bundle2.processbundle(self._repo, b)
210 210 raise
211 211 except error.PushRaced as exc:
212 212 raise error.ResponseError(_('push failed:'), str(exc))
213 213
214 214 def lock(self):
215 215 return self._repo.lock()
216 216
217 217 def pushkey(self, namespace, key, old, new):
218 218 return self._repo.pushkey(namespace, key, old, new)
219 219
220 220 def listkeys(self, namespace):
221 221 return self._repo.listkeys(namespace)
222 222
223 223 def debugwireargs(self, one, two, three=None, four=None, five=None):
224 224 '''used to test argument passing over the wire'''
225 225 return "%s %s %s %s %s" % (one, two, three, four, five)
226 226
227 227 class locallegacypeer(localpeer):
228 228 '''peer extension which implements legacy methods too; used for tests with
229 229 restricted capabilities'''
230 230
231 231 def __init__(self, repo):
232 232 localpeer.__init__(self, repo, caps=legacycaps)
233 233
234 234 def branches(self, nodes):
235 235 return self._repo.branches(nodes)
236 236
237 237 def between(self, pairs):
238 238 return self._repo.between(pairs)
239 239
240 240 def changegroup(self, basenodes, source):
241 241 return changegroup.changegroup(self._repo, basenodes, source)
242 242
243 243 def changegroupsubset(self, bases, heads, source):
244 244 return changegroup.changegroupsubset(self._repo, bases, heads, source)
245 245
246 246 # Increment the sub-version when the revlog v2 format changes to lock out old
247 247 # clients.
248 248 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
249 249
250 250 class localrepository(object):
251 251
252 252 supportedformats = {
253 253 'revlogv1',
254 254 'generaldelta',
255 255 'treemanifest',
256 256 'manifestv2',
257 257 REVLOGV2_REQUIREMENT,
258 258 }
259 259 _basesupported = supportedformats | {
260 260 'store',
261 261 'fncache',
262 262 'shared',
263 263 'relshared',
264 264 'dotencode',
265 265 }
266 266 openerreqs = {
267 267 'revlogv1',
268 268 'generaldelta',
269 269 'treemanifest',
270 270 'manifestv2',
271 271 }
272 272
273 273 # a list of (ui, featureset) functions.
274 274 # only functions defined in module of enabled extensions are invoked
275 275 featuresetupfuncs = set()
276 276
277 277 def __init__(self, baseui, path, create=False):
278 278 self.requirements = set()
279 279 self.filtername = None
280 280 # wvfs: rooted at the repository root, used to access the working copy
281 281 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
282 282 # vfs: rooted at .hg, used to access repo files outside of .hg/store
283 283 self.vfs = None
284 284 # svfs: usually rooted at .hg/store, used to access repository history
285 285 # If this is a shared repository, this vfs may point to another
286 286 # repository's .hg/store directory.
287 287 self.svfs = None
288 288 self.root = self.wvfs.base
289 289 self.path = self.wvfs.join(".hg")
290 290 self.origroot = path
291 # These auditor are not used by the vfs,
292 # only used when writing this comment: basectx.match
291 293 self.auditor = pathutil.pathauditor(self.root, self._checknested)
292 294 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
293 295 realfs=False)
294 296 self.vfs = vfsmod.vfs(self.path)
295 297 self.baseui = baseui
296 298 self.ui = baseui.copy()
297 299 self.ui.copy = baseui.copy # prevent copying repo configuration
298 300 # A list of callback to shape the phase if no data were found.
299 301 # Callback are in the form: func(repo, roots) --> processed root.
300 302 # This list it to be filled by extension during repo setup
301 303 self._phasedefaults = []
302 304 try:
303 305 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
304 306 self._loadextensions()
305 307 except IOError:
306 308 pass
307 309
308 310 if self.featuresetupfuncs:
309 311 self.supported = set(self._basesupported) # use private copy
310 312 extmods = set(m.__name__ for n, m
311 313 in extensions.extensions(self.ui))
312 314 for setupfunc in self.featuresetupfuncs:
313 315 if setupfunc.__module__ in extmods:
314 316 setupfunc(self.ui, self.supported)
315 317 else:
316 318 self.supported = self._basesupported
317 319 color.setup(self.ui)
318 320
319 321 # Add compression engines.
320 322 for name in util.compengines:
321 323 engine = util.compengines[name]
322 324 if engine.revlogheader():
323 325 self.supported.add('exp-compression-%s' % name)
324 326
325 327 if not self.vfs.isdir():
326 328 if create:
327 329 self.requirements = newreporequirements(self)
328 330
329 331 if not self.wvfs.exists():
330 332 self.wvfs.makedirs()
331 333 self.vfs.makedir(notindexed=True)
332 334
333 335 if 'store' in self.requirements:
334 336 self.vfs.mkdir("store")
335 337
336 338 # create an invalid changelog
337 339 self.vfs.append(
338 340 "00changelog.i",
339 341 '\0\0\0\2' # represents revlogv2
340 342 ' dummy changelog to prevent using the old repo layout'
341 343 )
342 344 else:
343 345 raise error.RepoError(_("repository %s not found") % path)
344 346 elif create:
345 347 raise error.RepoError(_("repository %s already exists") % path)
346 348 else:
347 349 try:
348 350 self.requirements = scmutil.readrequires(
349 351 self.vfs, self.supported)
350 352 except IOError as inst:
351 353 if inst.errno != errno.ENOENT:
352 354 raise
353 355
354 356 self.sharedpath = self.path
355 357 try:
356 358 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
357 359 if 'relshared' in self.requirements:
358 360 sharedpath = self.vfs.join(sharedpath)
359 361 vfs = vfsmod.vfs(sharedpath, realpath=True)
360 362 s = vfs.base
361 363 if not vfs.exists():
362 364 raise error.RepoError(
363 365 _('.hg/sharedpath points to nonexistent directory %s') % s)
364 366 self.sharedpath = s
365 367 except IOError as inst:
366 368 if inst.errno != errno.ENOENT:
367 369 raise
368 370
369 371 self.store = store.store(
370 372 self.requirements, self.sharedpath, vfsmod.vfs)
371 373 self.spath = self.store.path
372 374 self.svfs = self.store.vfs
373 375 self.sjoin = self.store.join
374 376 self.vfs.createmode = self.store.createmode
375 377 self._applyopenerreqs()
376 378 if create:
377 379 self._writerequirements()
378 380
379 381 self._dirstatevalidatewarned = False
380 382
381 383 self._branchcaches = {}
382 384 self._revbranchcache = None
383 385 self.filterpats = {}
384 386 self._datafilters = {}
385 387 self._transref = self._lockref = self._wlockref = None
386 388
387 389 # A cache for various files under .hg/ that tracks file changes,
388 390 # (used by the filecache decorator)
389 391 #
390 392 # Maps a property name to its util.filecacheentry
391 393 self._filecache = {}
392 394
393 395 # hold sets of revision to be filtered
394 396 # should be cleared when something might have changed the filter value:
395 397 # - new changesets,
396 398 # - phase change,
397 399 # - new obsolescence marker,
398 400 # - working directory parent change,
399 401 # - bookmark changes
400 402 self.filteredrevcache = {}
401 403
402 404 # post-dirstate-status hooks
403 405 self._postdsstatus = []
404 406
405 407 # generic mapping between names and nodes
406 408 self.names = namespaces.namespaces()
407 409
408 410 def close(self):
409 411 self._writecaches()
410 412
411 413 def _loadextensions(self):
412 414 extensions.loadall(self.ui)
413 415
414 416 def _writecaches(self):
415 417 if self._revbranchcache:
416 418 self._revbranchcache.write()
417 419
418 420 def _restrictcapabilities(self, caps):
419 421 if self.ui.configbool('experimental', 'bundle2-advertise', True):
420 422 caps = set(caps)
421 423 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
422 424 caps.add('bundle2=' + urlreq.quote(capsblob))
423 425 return caps
424 426
425 427 def _applyopenerreqs(self):
426 428 self.svfs.options = dict((r, 1) for r in self.requirements
427 429 if r in self.openerreqs)
428 430 # experimental config: format.chunkcachesize
429 431 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
430 432 if chunkcachesize is not None:
431 433 self.svfs.options['chunkcachesize'] = chunkcachesize
432 434 # experimental config: format.maxchainlen
433 435 maxchainlen = self.ui.configint('format', 'maxchainlen')
434 436 if maxchainlen is not None:
435 437 self.svfs.options['maxchainlen'] = maxchainlen
436 438 # experimental config: format.manifestcachesize
437 439 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
438 440 if manifestcachesize is not None:
439 441 self.svfs.options['manifestcachesize'] = manifestcachesize
440 442 # experimental config: format.aggressivemergedeltas
441 443 aggressivemergedeltas = self.ui.configbool('format',
442 444 'aggressivemergedeltas')
443 445 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
444 446 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
445 447 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
446 448 if 0 <= chainspan:
447 449 self.svfs.options['maxdeltachainspan'] = chainspan
448 450
449 451 for r in self.requirements:
450 452 if r.startswith('exp-compression-'):
451 453 self.svfs.options['compengine'] = r[len('exp-compression-'):]
452 454
453 455 # TODO move "revlogv2" to openerreqs once finalized.
454 456 if REVLOGV2_REQUIREMENT in self.requirements:
455 457 self.svfs.options['revlogv2'] = True
456 458
457 459 def _writerequirements(self):
458 460 scmutil.writerequires(self.vfs, self.requirements)
459 461
460 462 def _checknested(self, path):
461 463 """Determine if path is a legal nested repository."""
462 464 if not path.startswith(self.root):
463 465 return False
464 466 subpath = path[len(self.root) + 1:]
465 467 normsubpath = util.pconvert(subpath)
466 468
467 469 # XXX: Checking against the current working copy is wrong in
468 470 # the sense that it can reject things like
469 471 #
470 472 # $ hg cat -r 10 sub/x.txt
471 473 #
472 474 # if sub/ is no longer a subrepository in the working copy
473 475 # parent revision.
474 476 #
475 477 # However, it can of course also allow things that would have
476 478 # been rejected before, such as the above cat command if sub/
477 479 # is a subrepository now, but was a normal directory before.
478 480 # The old path auditor would have rejected by mistake since it
479 481 # panics when it sees sub/.hg/.
480 482 #
481 483 # All in all, checking against the working copy seems sensible
482 484 # since we want to prevent access to nested repositories on
483 485 # the filesystem *now*.
484 486 ctx = self[None]
485 487 parts = util.splitpath(subpath)
486 488 while parts:
487 489 prefix = '/'.join(parts)
488 490 if prefix in ctx.substate:
489 491 if prefix == normsubpath:
490 492 return True
491 493 else:
492 494 sub = ctx.sub(prefix)
493 495 return sub.checknested(subpath[len(prefix) + 1:])
494 496 else:
495 497 parts.pop()
496 498 return False
497 499
498 500 def peer(self):
499 501 return localpeer(self) # not cached to avoid reference cycle
500 502
501 503 def unfiltered(self):
502 504 """Return unfiltered version of the repository
503 505
504 506 Intended to be overwritten by filtered repo."""
505 507 return self
506 508
507 509 def filtered(self, name):
508 510 """Return a filtered version of a repository"""
509 511 # build a new class with the mixin and the current class
510 512 # (possibly subclass of the repo)
511 513 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
512 514 pass
513 515 return filteredrepo(self, name)
514 516
515 517 @repofilecache('bookmarks', 'bookmarks.current')
516 518 def _bookmarks(self):
517 519 return bookmarks.bmstore(self)
518 520
519 521 @property
520 522 def _activebookmark(self):
521 523 return self._bookmarks.active
522 524
523 525 # _phaserevs and _phasesets depend on changelog. what we need is to
524 526 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
525 527 # can't be easily expressed in filecache mechanism.
526 528 @storecache('phaseroots', '00changelog.i')
527 529 def _phasecache(self):
528 530 return phases.phasecache(self, self._phasedefaults)
529 531
530 532 @storecache('obsstore')
531 533 def obsstore(self):
532 534 return obsolete.makestore(self.ui, self)
533 535
534 536 @storecache('00changelog.i')
535 537 def changelog(self):
536 538 return changelog.changelog(self.svfs,
537 539 trypending=txnutil.mayhavepending(self.root))
538 540
539 541 def _constructmanifest(self):
540 542 # This is a temporary function while we migrate from manifest to
541 543 # manifestlog. It allows bundlerepo and unionrepo to intercept the
542 544 # manifest creation.
543 545 return manifest.manifestrevlog(self.svfs)
544 546
545 547 @storecache('00manifest.i')
546 548 def manifestlog(self):
547 549 return manifest.manifestlog(self.svfs, self)
548 550
549 551 @repofilecache('dirstate')
550 552 def dirstate(self):
551 553 return dirstate.dirstate(self.vfs, self.ui, self.root,
552 554 self._dirstatevalidate)
553 555
554 556 def _dirstatevalidate(self, node):
555 557 try:
556 558 self.changelog.rev(node)
557 559 return node
558 560 except error.LookupError:
559 561 if not self._dirstatevalidatewarned:
560 562 self._dirstatevalidatewarned = True
561 563 self.ui.warn(_("warning: ignoring unknown"
562 564 " working parent %s!\n") % short(node))
563 565 return nullid
564 566
565 567 def __getitem__(self, changeid):
566 568 if changeid is None:
567 569 return context.workingctx(self)
568 570 if isinstance(changeid, slice):
569 571 # wdirrev isn't contiguous so the slice shouldn't include it
570 572 return [context.changectx(self, i)
571 573 for i in xrange(*changeid.indices(len(self)))
572 574 if i not in self.changelog.filteredrevs]
573 575 try:
574 576 return context.changectx(self, changeid)
575 577 except error.WdirUnsupported:
576 578 return context.workingctx(self)
577 579
578 580 def __contains__(self, changeid):
579 581 """True if the given changeid exists
580 582
581 583 error.LookupError is raised if an ambiguous node specified.
582 584 """
583 585 try:
584 586 self[changeid]
585 587 return True
586 588 except error.RepoLookupError:
587 589 return False
588 590
589 591 def __nonzero__(self):
590 592 return True
591 593
592 594 __bool__ = __nonzero__
593 595
594 596 def __len__(self):
595 597 return len(self.changelog)
596 598
597 599 def __iter__(self):
598 600 return iter(self.changelog)
599 601
600 602 def revs(self, expr, *args):
601 603 '''Find revisions matching a revset.
602 604
603 605 The revset is specified as a string ``expr`` that may contain
604 606 %-formatting to escape certain types. See ``revsetlang.formatspec``.
605 607
606 608 Revset aliases from the configuration are not expanded. To expand
607 609 user aliases, consider calling ``scmutil.revrange()`` or
608 610 ``repo.anyrevs([expr], user=True)``.
609 611
610 612 Returns a revset.abstractsmartset, which is a list-like interface
611 613 that contains integer revisions.
612 614 '''
613 615 expr = revsetlang.formatspec(expr, *args)
614 616 m = revset.match(None, expr)
615 617 return m(self)
616 618
617 619 def set(self, expr, *args):
618 620 '''Find revisions matching a revset and emit changectx instances.
619 621
620 622 This is a convenience wrapper around ``revs()`` that iterates the
621 623 result and is a generator of changectx instances.
622 624
623 625 Revset aliases from the configuration are not expanded. To expand
624 626 user aliases, consider calling ``scmutil.revrange()``.
625 627 '''
626 628 for r in self.revs(expr, *args):
627 629 yield self[r]
628 630
629 631 def anyrevs(self, specs, user=False):
630 632 '''Find revisions matching one of the given revsets.
631 633
632 634 Revset aliases from the configuration are not expanded by default. To
633 635 expand user aliases, specify ``user=True``.
634 636 '''
635 637 if user:
636 638 m = revset.matchany(self.ui, specs, repo=self)
637 639 else:
638 640 m = revset.matchany(None, specs)
639 641 return m(self)
640 642
641 643 def url(self):
642 644 return 'file:' + self.root
643 645
644 646 def hook(self, name, throw=False, **args):
645 647 """Call a hook, passing this repo instance.
646 648
647 649 This a convenience method to aid invoking hooks. Extensions likely
648 650 won't call this unless they have registered a custom hook or are
649 651 replacing code that is expected to call a hook.
650 652 """
651 653 return hook.hook(self.ui, self, name, throw, **args)
652 654
653 655 @filteredpropertycache
654 656 def _tagscache(self):
655 657 '''Returns a tagscache object that contains various tags related
656 658 caches.'''
657 659
658 660 # This simplifies its cache management by having one decorated
659 661 # function (this one) and the rest simply fetch things from it.
660 662 class tagscache(object):
661 663 def __init__(self):
662 664 # These two define the set of tags for this repository. tags
663 665 # maps tag name to node; tagtypes maps tag name to 'global' or
664 666 # 'local'. (Global tags are defined by .hgtags across all
665 667 # heads, and local tags are defined in .hg/localtags.)
666 668 # They constitute the in-memory cache of tags.
667 669 self.tags = self.tagtypes = None
668 670
669 671 self.nodetagscache = self.tagslist = None
670 672
671 673 cache = tagscache()
672 674 cache.tags, cache.tagtypes = self._findtags()
673 675
674 676 return cache
675 677
676 678 def tags(self):
677 679 '''return a mapping of tag to node'''
678 680 t = {}
679 681 if self.changelog.filteredrevs:
680 682 tags, tt = self._findtags()
681 683 else:
682 684 tags = self._tagscache.tags
683 685 for k, v in tags.iteritems():
684 686 try:
685 687 # ignore tags to unknown nodes
686 688 self.changelog.rev(v)
687 689 t[k] = v
688 690 except (error.LookupError, ValueError):
689 691 pass
690 692 return t
691 693
692 694 def _findtags(self):
693 695 '''Do the hard work of finding tags. Return a pair of dicts
694 696 (tags, tagtypes) where tags maps tag name to node, and tagtypes
695 697 maps tag name to a string like \'global\' or \'local\'.
696 698 Subclasses or extensions are free to add their own tags, but
697 699 should be aware that the returned dicts will be retained for the
698 700 duration of the localrepo object.'''
699 701
700 702 # XXX what tagtype should subclasses/extensions use? Currently
701 703 # mq and bookmarks add tags, but do not set the tagtype at all.
702 704 # Should each extension invent its own tag type? Should there
703 705 # be one tagtype for all such "virtual" tags? Or is the status
704 706 # quo fine?
705 707
706 708
707 709 # map tag name to (node, hist)
708 710 alltags = tagsmod.findglobaltags(self.ui, self)
709 711 # map tag name to tag type
710 712 tagtypes = dict((tag, 'global') for tag in alltags)
711 713
712 714 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
713 715
714 716 # Build the return dicts. Have to re-encode tag names because
715 717 # the tags module always uses UTF-8 (in order not to lose info
716 718 # writing to the cache), but the rest of Mercurial wants them in
717 719 # local encoding.
718 720 tags = {}
719 721 for (name, (node, hist)) in alltags.iteritems():
720 722 if node != nullid:
721 723 tags[encoding.tolocal(name)] = node
722 724 tags['tip'] = self.changelog.tip()
723 725 tagtypes = dict([(encoding.tolocal(name), value)
724 726 for (name, value) in tagtypes.iteritems()])
725 727 return (tags, tagtypes)
726 728
727 729 def tagtype(self, tagname):
728 730 '''
729 731 return the type of the given tag. result can be:
730 732
731 733 'local' : a local tag
732 734 'global' : a global tag
733 735 None : tag does not exist
734 736 '''
735 737
736 738 return self._tagscache.tagtypes.get(tagname)
737 739
738 740 def tagslist(self):
739 741 '''return a list of tags ordered by revision'''
740 742 if not self._tagscache.tagslist:
741 743 l = []
742 744 for t, n in self.tags().iteritems():
743 745 l.append((self.changelog.rev(n), t, n))
744 746 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
745 747
746 748 return self._tagscache.tagslist
747 749
748 750 def nodetags(self, node):
749 751 '''return the tags associated with a node'''
750 752 if not self._tagscache.nodetagscache:
751 753 nodetagscache = {}
752 754 for t, n in self._tagscache.tags.iteritems():
753 755 nodetagscache.setdefault(n, []).append(t)
754 756 for tags in nodetagscache.itervalues():
755 757 tags.sort()
756 758 self._tagscache.nodetagscache = nodetagscache
757 759 return self._tagscache.nodetagscache.get(node, [])
758 760
759 761 def nodebookmarks(self, node):
760 762 """return the list of bookmarks pointing to the specified node"""
761 763 marks = []
762 764 for bookmark, n in self._bookmarks.iteritems():
763 765 if n == node:
764 766 marks.append(bookmark)
765 767 return sorted(marks)
766 768
767 769 def branchmap(self):
768 770 '''returns a dictionary {branch: [branchheads]} with branchheads
769 771 ordered by increasing revision number'''
770 772 branchmap.updatecache(self)
771 773 return self._branchcaches[self.filtername]
772 774
773 775 @unfilteredmethod
774 776 def revbranchcache(self):
775 777 if not self._revbranchcache:
776 778 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
777 779 return self._revbranchcache
778 780
779 781 def branchtip(self, branch, ignoremissing=False):
780 782 '''return the tip node for a given branch
781 783
782 784 If ignoremissing is True, then this method will not raise an error.
783 785 This is helpful for callers that only expect None for a missing branch
784 786 (e.g. namespace).
785 787
786 788 '''
787 789 try:
788 790 return self.branchmap().branchtip(branch)
789 791 except KeyError:
790 792 if not ignoremissing:
791 793 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
792 794 else:
793 795 pass
794 796
795 797 def lookup(self, key):
796 798 return self[key].node()
797 799
798 800 def lookupbranch(self, key, remote=None):
799 801 repo = remote or self
800 802 if key in repo.branchmap():
801 803 return key
802 804
803 805 repo = (remote and remote.local()) and remote or self
804 806 return repo[key].branch()
805 807
806 808 def known(self, nodes):
807 809 cl = self.changelog
808 810 nm = cl.nodemap
809 811 filtered = cl.filteredrevs
810 812 result = []
811 813 for n in nodes:
812 814 r = nm.get(n)
813 815 resp = not (r is None or r in filtered)
814 816 result.append(resp)
815 817 return result
816 818
817 819 def local(self):
818 820 return self
819 821
820 822 def publishing(self):
821 823 # it's safe (and desirable) to trust the publish flag unconditionally
822 824 # so that we don't finalize changes shared between users via ssh or nfs
823 825 return self.ui.configbool('phases', 'publish', True, untrusted=True)
824 826
825 827 def cancopy(self):
826 828 # so statichttprepo's override of local() works
827 829 if not self.local():
828 830 return False
829 831 if not self.publishing():
830 832 return True
831 833 # if publishing we can't copy if there is filtered content
832 834 return not self.filtered('visible').changelog.filteredrevs
833 835
834 836 def shared(self):
835 837 '''the type of shared repository (None if not shared)'''
836 838 if self.sharedpath != self.path:
837 839 return 'store'
838 840 return None
839 841
840 842 def wjoin(self, f, *insidef):
841 843 return self.vfs.reljoin(self.root, f, *insidef)
842 844
843 845 def file(self, f):
844 846 if f[0] == '/':
845 847 f = f[1:]
846 848 return filelog.filelog(self.svfs, f)
847 849
848 850 def changectx(self, changeid):
849 851 return self[changeid]
850 852
851 853 def setparents(self, p1, p2=nullid):
852 854 with self.dirstate.parentchange():
853 855 copies = self.dirstate.setparents(p1, p2)
854 856 pctx = self[p1]
855 857 if copies:
856 858 # Adjust copy records, the dirstate cannot do it, it
857 859 # requires access to parents manifests. Preserve them
858 860 # only for entries added to first parent.
859 861 for f in copies:
860 862 if f not in pctx and copies[f] in pctx:
861 863 self.dirstate.copy(copies[f], f)
862 864 if p2 == nullid:
863 865 for f, s in sorted(self.dirstate.copies().items()):
864 866 if f not in pctx and s not in pctx:
865 867 self.dirstate.copy(None, f)
866 868
867 869 def filectx(self, path, changeid=None, fileid=None):
868 870 """changeid can be a changeset revision, node, or tag.
869 871 fileid can be a file revision or node."""
870 872 return context.filectx(self, path, changeid, fileid)
871 873
872 874 def getcwd(self):
873 875 return self.dirstate.getcwd()
874 876
875 877 def pathto(self, f, cwd=None):
876 878 return self.dirstate.pathto(f, cwd)
877 879
878 880 def _loadfilter(self, filter):
879 881 if filter not in self.filterpats:
880 882 l = []
881 883 for pat, cmd in self.ui.configitems(filter):
882 884 if cmd == '!':
883 885 continue
884 886 mf = matchmod.match(self.root, '', [pat])
885 887 fn = None
886 888 params = cmd
887 889 for name, filterfn in self._datafilters.iteritems():
888 890 if cmd.startswith(name):
889 891 fn = filterfn
890 892 params = cmd[len(name):].lstrip()
891 893 break
892 894 if not fn:
893 895 fn = lambda s, c, **kwargs: util.filter(s, c)
894 896 # Wrap old filters not supporting keyword arguments
895 897 if not inspect.getargspec(fn)[2]:
896 898 oldfn = fn
897 899 fn = lambda s, c, **kwargs: oldfn(s, c)
898 900 l.append((mf, fn, params))
899 901 self.filterpats[filter] = l
900 902 return self.filterpats[filter]
901 903
902 904 def _filter(self, filterpats, filename, data):
903 905 for mf, fn, cmd in filterpats:
904 906 if mf(filename):
905 907 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
906 908 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
907 909 break
908 910
909 911 return data
910 912
911 913 @unfilteredpropertycache
912 914 def _encodefilterpats(self):
913 915 return self._loadfilter('encode')
914 916
915 917 @unfilteredpropertycache
916 918 def _decodefilterpats(self):
917 919 return self._loadfilter('decode')
918 920
919 921 def adddatafilter(self, name, filter):
920 922 self._datafilters[name] = filter
921 923
922 924 def wread(self, filename):
923 925 if self.wvfs.islink(filename):
924 926 data = self.wvfs.readlink(filename)
925 927 else:
926 928 data = self.wvfs.read(filename)
927 929 return self._filter(self._encodefilterpats, filename, data)
928 930
929 931 def wwrite(self, filename, data, flags, backgroundclose=False):
930 932 """write ``data`` into ``filename`` in the working directory
931 933
932 934 This returns length of written (maybe decoded) data.
933 935 """
934 936 data = self._filter(self._decodefilterpats, filename, data)
935 937 if 'l' in flags:
936 938 self.wvfs.symlink(data, filename)
937 939 else:
938 940 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
939 941 if 'x' in flags:
940 942 self.wvfs.setflags(filename, False, True)
941 943 return len(data)
942 944
943 945 def wwritedata(self, filename, data):
944 946 return self._filter(self._decodefilterpats, filename, data)
945 947
946 948 def currenttransaction(self):
947 949 """return the current transaction or None if non exists"""
948 950 if self._transref:
949 951 tr = self._transref()
950 952 else:
951 953 tr = None
952 954
953 955 if tr and tr.running():
954 956 return tr
955 957 return None
956 958
957 959 def transaction(self, desc, report=None):
958 960 if (self.ui.configbool('devel', 'all-warnings')
959 961 or self.ui.configbool('devel', 'check-locks')):
960 962 if self._currentlock(self._lockref) is None:
961 963 raise error.ProgrammingError('transaction requires locking')
962 964 tr = self.currenttransaction()
963 965 if tr is not None:
964 966 return tr.nest()
965 967
966 968 # abort here if the journal already exists
967 969 if self.svfs.exists("journal"):
968 970 raise error.RepoError(
969 971 _("abandoned transaction found"),
970 972 hint=_("run 'hg recover' to clean up transaction"))
971 973
972 974 idbase = "%.40f#%f" % (random.random(), time.time())
973 975 ha = hex(hashlib.sha1(idbase).digest())
974 976 txnid = 'TXN:' + ha
975 977 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
976 978
977 979 self._writejournal(desc)
978 980 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
979 981 if report:
980 982 rp = report
981 983 else:
982 984 rp = self.ui.warn
983 985 vfsmap = {'plain': self.vfs} # root of .hg/
984 986 # we must avoid cyclic reference between repo and transaction.
985 987 reporef = weakref.ref(self)
986 988 # Code to track tag movement
987 989 #
988 990 # Since tags are all handled as file content, it is actually quite hard
989 991 # to track these movement from a code perspective. So we fallback to a
990 992 # tracking at the repository level. One could envision to track changes
991 993 # to the '.hgtags' file through changegroup apply but that fails to
992 994 # cope with case where transaction expose new heads without changegroup
993 995 # being involved (eg: phase movement).
994 996 #
995 997 # For now, We gate the feature behind a flag since this likely comes
996 998 # with performance impacts. The current code run more often than needed
997 999 # and do not use caches as much as it could. The current focus is on
998 1000 # the behavior of the feature so we disable it by default. The flag
999 1001 # will be removed when we are happy with the performance impact.
1000 1002 #
1001 1003 # Once this feature is no longer experimental move the following
1002 1004 # documentation to the appropriate help section:
1003 1005 #
1004 1006 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1005 1007 # tags (new or changed or deleted tags). In addition the details of
1006 1008 # these changes are made available in a file at:
1007 1009 # ``REPOROOT/.hg/changes/tags.changes``.
1008 1010 # Make sure you check for HG_TAG_MOVED before reading that file as it
1009 1011 # might exist from a previous transaction even if no tag were touched
1010 1012 # in this one. Changes are recorded in a line base format::
1011 1013 #
1012 1014 # <action> <hex-node> <tag-name>\n
1013 1015 #
1014 1016 # Actions are defined as follow:
1015 1017 # "-R": tag is removed,
1016 1018 # "+A": tag is added,
1017 1019 # "-M": tag is moved (old value),
1018 1020 # "+M": tag is moved (new value),
1019 1021 tracktags = lambda x: None
1020 1022 # experimental config: experimental.hook-track-tags
1021 1023 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1022 1024 False)
1023 1025 if desc != 'strip' and shouldtracktags:
1024 1026 oldheads = self.changelog.headrevs()
1025 1027 def tracktags(tr2):
1026 1028 repo = reporef()
1027 1029 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1028 1030 newheads = repo.changelog.headrevs()
1029 1031 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1030 1032 # notes: we compare lists here.
1031 1033 # As we do it only once buiding set would not be cheaper
1032 1034 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1033 1035 if changes:
1034 1036 tr2.hookargs['tag_moved'] = '1'
1035 1037 with repo.vfs('changes/tags.changes', 'w',
1036 1038 atomictemp=True) as changesfile:
1037 1039 # note: we do not register the file to the transaction
1038 1040 # because we needs it to still exist on the transaction
1039 1041 # is close (for txnclose hooks)
1040 1042 tagsmod.writediff(changesfile, changes)
1041 1043 def validate(tr2):
1042 1044 """will run pre-closing hooks"""
1043 1045 # XXX the transaction API is a bit lacking here so we take a hacky
1044 1046 # path for now
1045 1047 #
1046 1048 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1047 1049 # dict is copied before these run. In addition we needs the data
1048 1050 # available to in memory hooks too.
1049 1051 #
1050 1052 # Moreover, we also need to make sure this runs before txnclose
1051 1053 # hooks and there is no "pending" mechanism that would execute
1052 1054 # logic only if hooks are about to run.
1053 1055 #
1054 1056 # Fixing this limitation of the transaction is also needed to track
1055 1057 # other families of changes (bookmarks, phases, obsolescence).
1056 1058 #
1057 1059 # This will have to be fixed before we remove the experimental
1058 1060 # gating.
1059 1061 tracktags(tr2)
1060 1062 reporef().hook('pretxnclose', throw=True,
1061 1063 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1062 1064 def releasefn(tr, success):
1063 1065 repo = reporef()
1064 1066 if success:
1065 1067 # this should be explicitly invoked here, because
1066 1068 # in-memory changes aren't written out at closing
1067 1069 # transaction, if tr.addfilegenerator (via
1068 1070 # dirstate.write or so) isn't invoked while
1069 1071 # transaction running
1070 1072 repo.dirstate.write(None)
1071 1073 else:
1072 1074 # discard all changes (including ones already written
1073 1075 # out) in this transaction
1074 1076 repo.dirstate.restorebackup(None, prefix='journal.')
1075 1077
1076 1078 repo.invalidate(clearfilecache=True)
1077 1079
1078 1080 tr = transaction.transaction(rp, self.svfs, vfsmap,
1079 1081 "journal",
1080 1082 "undo",
1081 1083 aftertrans(renames),
1082 1084 self.store.createmode,
1083 1085 validator=validate,
1084 1086 releasefn=releasefn)
1085 1087 tr.changes['revs'] = set()
1086 1088 tr.changes['obsmarkers'] = set()
1087 1089
1088 1090 tr.hookargs['txnid'] = txnid
1089 1091 # note: writing the fncache only during finalize mean that the file is
1090 1092 # outdated when running hooks. As fncache is used for streaming clone,
1091 1093 # this is not expected to break anything that happen during the hooks.
1092 1094 tr.addfinalize('flush-fncache', self.store.write)
1093 1095 def txnclosehook(tr2):
1094 1096 """To be run if transaction is successful, will schedule a hook run
1095 1097 """
1096 1098 # Don't reference tr2 in hook() so we don't hold a reference.
1097 1099 # This reduces memory consumption when there are multiple
1098 1100 # transactions per lock. This can likely go away if issue5045
1099 1101 # fixes the function accumulation.
1100 1102 hookargs = tr2.hookargs
1101 1103
1102 1104 def hook():
1103 1105 reporef().hook('txnclose', throw=False, txnname=desc,
1104 1106 **pycompat.strkwargs(hookargs))
1105 1107 reporef()._afterlock(hook)
1106 1108 tr.addfinalize('txnclose-hook', txnclosehook)
1107 1109 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1108 1110 def txnaborthook(tr2):
1109 1111 """To be run if transaction is aborted
1110 1112 """
1111 1113 reporef().hook('txnabort', throw=False, txnname=desc,
1112 1114 **tr2.hookargs)
1113 1115 tr.addabort('txnabort-hook', txnaborthook)
1114 1116 # avoid eager cache invalidation. in-memory data should be identical
1115 1117 # to stored data if transaction has no error.
1116 1118 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1117 1119 self._transref = weakref.ref(tr)
1118 1120 return tr
1119 1121
1120 1122 def _journalfiles(self):
1121 1123 return ((self.svfs, 'journal'),
1122 1124 (self.vfs, 'journal.dirstate'),
1123 1125 (self.vfs, 'journal.branch'),
1124 1126 (self.vfs, 'journal.desc'),
1125 1127 (self.vfs, 'journal.bookmarks'),
1126 1128 (self.svfs, 'journal.phaseroots'))
1127 1129
1128 1130 def undofiles(self):
1129 1131 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1130 1132
1131 1133 @unfilteredmethod
1132 1134 def _writejournal(self, desc):
1133 1135 self.dirstate.savebackup(None, prefix='journal.')
1134 1136 self.vfs.write("journal.branch",
1135 1137 encoding.fromlocal(self.dirstate.branch()))
1136 1138 self.vfs.write("journal.desc",
1137 1139 "%d\n%s\n" % (len(self), desc))
1138 1140 self.vfs.write("journal.bookmarks",
1139 1141 self.vfs.tryread("bookmarks"))
1140 1142 self.svfs.write("journal.phaseroots",
1141 1143 self.svfs.tryread("phaseroots"))
1142 1144
1143 1145 def recover(self):
1144 1146 with self.lock():
1145 1147 if self.svfs.exists("journal"):
1146 1148 self.ui.status(_("rolling back interrupted transaction\n"))
1147 1149 vfsmap = {'': self.svfs,
1148 1150 'plain': self.vfs,}
1149 1151 transaction.rollback(self.svfs, vfsmap, "journal",
1150 1152 self.ui.warn)
1151 1153 self.invalidate()
1152 1154 return True
1153 1155 else:
1154 1156 self.ui.warn(_("no interrupted transaction available\n"))
1155 1157 return False
1156 1158
1157 1159 def rollback(self, dryrun=False, force=False):
1158 1160 wlock = lock = dsguard = None
1159 1161 try:
1160 1162 wlock = self.wlock()
1161 1163 lock = self.lock()
1162 1164 if self.svfs.exists("undo"):
1163 1165 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1164 1166
1165 1167 return self._rollback(dryrun, force, dsguard)
1166 1168 else:
1167 1169 self.ui.warn(_("no rollback information available\n"))
1168 1170 return 1
1169 1171 finally:
1170 1172 release(dsguard, lock, wlock)
1171 1173
1172 1174 @unfilteredmethod # Until we get smarter cache management
1173 1175 def _rollback(self, dryrun, force, dsguard):
1174 1176 ui = self.ui
1175 1177 try:
1176 1178 args = self.vfs.read('undo.desc').splitlines()
1177 1179 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1178 1180 if len(args) >= 3:
1179 1181 detail = args[2]
1180 1182 oldtip = oldlen - 1
1181 1183
1182 1184 if detail and ui.verbose:
1183 1185 msg = (_('repository tip rolled back to revision %d'
1184 1186 ' (undo %s: %s)\n')
1185 1187 % (oldtip, desc, detail))
1186 1188 else:
1187 1189 msg = (_('repository tip rolled back to revision %d'
1188 1190 ' (undo %s)\n')
1189 1191 % (oldtip, desc))
1190 1192 except IOError:
1191 1193 msg = _('rolling back unknown transaction\n')
1192 1194 desc = None
1193 1195
1194 1196 if not force and self['.'] != self['tip'] and desc == 'commit':
1195 1197 raise error.Abort(
1196 1198 _('rollback of last commit while not checked out '
1197 1199 'may lose data'), hint=_('use -f to force'))
1198 1200
1199 1201 ui.status(msg)
1200 1202 if dryrun:
1201 1203 return 0
1202 1204
1203 1205 parents = self.dirstate.parents()
1204 1206 self.destroying()
1205 1207 vfsmap = {'plain': self.vfs, '': self.svfs}
1206 1208 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1207 1209 if self.vfs.exists('undo.bookmarks'):
1208 1210 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1209 1211 if self.svfs.exists('undo.phaseroots'):
1210 1212 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1211 1213 self.invalidate()
1212 1214
1213 1215 parentgone = (parents[0] not in self.changelog.nodemap or
1214 1216 parents[1] not in self.changelog.nodemap)
1215 1217 if parentgone:
1216 1218 # prevent dirstateguard from overwriting already restored one
1217 1219 dsguard.close()
1218 1220
1219 1221 self.dirstate.restorebackup(None, prefix='undo.')
1220 1222 try:
1221 1223 branch = self.vfs.read('undo.branch')
1222 1224 self.dirstate.setbranch(encoding.tolocal(branch))
1223 1225 except IOError:
1224 1226 ui.warn(_('named branch could not be reset: '
1225 1227 'current branch is still \'%s\'\n')
1226 1228 % self.dirstate.branch())
1227 1229
1228 1230 parents = tuple([p.rev() for p in self[None].parents()])
1229 1231 if len(parents) > 1:
1230 1232 ui.status(_('working directory now based on '
1231 1233 'revisions %d and %d\n') % parents)
1232 1234 else:
1233 1235 ui.status(_('working directory now based on '
1234 1236 'revision %d\n') % parents)
1235 1237 mergemod.mergestate.clean(self, self['.'].node())
1236 1238
1237 1239 # TODO: if we know which new heads may result from this rollback, pass
1238 1240 # them to destroy(), which will prevent the branchhead cache from being
1239 1241 # invalidated.
1240 1242 self.destroyed()
1241 1243 return 0
1242 1244
1243 1245 def _buildcacheupdater(self, newtransaction):
1244 1246 """called during transaction to build the callback updating cache
1245 1247
1246 1248 Lives on the repository to help extension who might want to augment
1247 1249 this logic. For this purpose, the created transaction is passed to the
1248 1250 method.
1249 1251 """
1250 1252 # we must avoid cyclic reference between repo and transaction.
1251 1253 reporef = weakref.ref(self)
1252 1254 def updater(tr):
1253 1255 repo = reporef()
1254 1256 repo.updatecaches(tr)
1255 1257 return updater
1256 1258
1257 1259 @unfilteredmethod
1258 1260 def updatecaches(self, tr=None):
1259 1261 """warm appropriate caches
1260 1262
1261 1263 If this function is called after a transaction closed. The transaction
1262 1264 will be available in the 'tr' argument. This can be used to selectively
1263 1265 update caches relevant to the changes in that transaction.
1264 1266 """
1265 1267 if tr is not None and tr.hookargs.get('source') == 'strip':
1266 1268 # During strip, many caches are invalid but
1267 1269 # later call to `destroyed` will refresh them.
1268 1270 return
1269 1271
1270 1272 if tr is None or tr.changes['revs']:
1271 1273 # updating the unfiltered branchmap should refresh all the others,
1272 1274 self.ui.debug('updating the branch cache\n')
1273 1275 branchmap.updatecache(self.filtered('served'))
1274 1276
1275 1277 def invalidatecaches(self):
1276 1278
1277 1279 if '_tagscache' in vars(self):
1278 1280 # can't use delattr on proxy
1279 1281 del self.__dict__['_tagscache']
1280 1282
1281 1283 self.unfiltered()._branchcaches.clear()
1282 1284 self.invalidatevolatilesets()
1283 1285
1284 1286 def invalidatevolatilesets(self):
1285 1287 self.filteredrevcache.clear()
1286 1288 obsolete.clearobscaches(self)
1287 1289
1288 1290 def invalidatedirstate(self):
1289 1291 '''Invalidates the dirstate, causing the next call to dirstate
1290 1292 to check if it was modified since the last time it was read,
1291 1293 rereading it if it has.
1292 1294
1293 1295 This is different to dirstate.invalidate() that it doesn't always
1294 1296 rereads the dirstate. Use dirstate.invalidate() if you want to
1295 1297 explicitly read the dirstate again (i.e. restoring it to a previous
1296 1298 known good state).'''
1297 1299 if hasunfilteredcache(self, 'dirstate'):
1298 1300 for k in self.dirstate._filecache:
1299 1301 try:
1300 1302 delattr(self.dirstate, k)
1301 1303 except AttributeError:
1302 1304 pass
1303 1305 delattr(self.unfiltered(), 'dirstate')
1304 1306
1305 1307 def invalidate(self, clearfilecache=False):
1306 1308 '''Invalidates both store and non-store parts other than dirstate
1307 1309
1308 1310 If a transaction is running, invalidation of store is omitted,
1309 1311 because discarding in-memory changes might cause inconsistency
1310 1312 (e.g. incomplete fncache causes unintentional failure, but
1311 1313 redundant one doesn't).
1312 1314 '''
1313 1315 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1314 1316 for k in list(self._filecache.keys()):
1315 1317 # dirstate is invalidated separately in invalidatedirstate()
1316 1318 if k == 'dirstate':
1317 1319 continue
1318 1320
1319 1321 if clearfilecache:
1320 1322 del self._filecache[k]
1321 1323 try:
1322 1324 delattr(unfiltered, k)
1323 1325 except AttributeError:
1324 1326 pass
1325 1327 self.invalidatecaches()
1326 1328 if not self.currenttransaction():
1327 1329 # TODO: Changing contents of store outside transaction
1328 1330 # causes inconsistency. We should make in-memory store
1329 1331 # changes detectable, and abort if changed.
1330 1332 self.store.invalidatecaches()
1331 1333
1332 1334 def invalidateall(self):
1333 1335 '''Fully invalidates both store and non-store parts, causing the
1334 1336 subsequent operation to reread any outside changes.'''
1335 1337 # extension should hook this to invalidate its caches
1336 1338 self.invalidate()
1337 1339 self.invalidatedirstate()
1338 1340
1339 1341 @unfilteredmethod
1340 1342 def _refreshfilecachestats(self, tr):
1341 1343 """Reload stats of cached files so that they are flagged as valid"""
1342 1344 for k, ce in self._filecache.items():
1343 1345 if k == 'dirstate' or k not in self.__dict__:
1344 1346 continue
1345 1347 ce.refresh()
1346 1348
1347 1349 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1348 1350 inheritchecker=None, parentenvvar=None):
1349 1351 parentlock = None
1350 1352 # the contents of parentenvvar are used by the underlying lock to
1351 1353 # determine whether it can be inherited
1352 1354 if parentenvvar is not None:
1353 1355 parentlock = encoding.environ.get(parentenvvar)
1354 1356 try:
1355 1357 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1356 1358 acquirefn=acquirefn, desc=desc,
1357 1359 inheritchecker=inheritchecker,
1358 1360 parentlock=parentlock)
1359 1361 except error.LockHeld as inst:
1360 1362 if not wait:
1361 1363 raise
1362 1364 # show more details for new-style locks
1363 1365 if ':' in inst.locker:
1364 1366 host, pid = inst.locker.split(":", 1)
1365 1367 self.ui.warn(
1366 1368 _("waiting for lock on %s held by process %r "
1367 1369 "on host %r\n") % (desc, pid, host))
1368 1370 else:
1369 1371 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1370 1372 (desc, inst.locker))
1371 1373 # default to 600 seconds timeout
1372 1374 l = lockmod.lock(vfs, lockname,
1373 1375 int(self.ui.config("ui", "timeout", "600")),
1374 1376 releasefn=releasefn, acquirefn=acquirefn,
1375 1377 desc=desc)
1376 1378 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1377 1379 return l
1378 1380
1379 1381 def _afterlock(self, callback):
1380 1382 """add a callback to be run when the repository is fully unlocked
1381 1383
1382 1384 The callback will be executed when the outermost lock is released
1383 1385 (with wlock being higher level than 'lock')."""
1384 1386 for ref in (self._wlockref, self._lockref):
1385 1387 l = ref and ref()
1386 1388 if l and l.held:
1387 1389 l.postrelease.append(callback)
1388 1390 break
1389 1391 else: # no lock have been found.
1390 1392 callback()
1391 1393
1392 1394 def lock(self, wait=True):
1393 1395 '''Lock the repository store (.hg/store) and return a weak reference
1394 1396 to the lock. Use this before modifying the store (e.g. committing or
1395 1397 stripping). If you are opening a transaction, get a lock as well.)
1396 1398
1397 1399 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1398 1400 'wlock' first to avoid a dead-lock hazard.'''
1399 1401 l = self._currentlock(self._lockref)
1400 1402 if l is not None:
1401 1403 l.lock()
1402 1404 return l
1403 1405
1404 1406 l = self._lock(self.svfs, "lock", wait, None,
1405 1407 self.invalidate, _('repository %s') % self.origroot)
1406 1408 self._lockref = weakref.ref(l)
1407 1409 return l
1408 1410
1409 1411 def _wlockchecktransaction(self):
1410 1412 if self.currenttransaction() is not None:
1411 1413 raise error.LockInheritanceContractViolation(
1412 1414 'wlock cannot be inherited in the middle of a transaction')
1413 1415
1414 1416 def wlock(self, wait=True):
1415 1417 '''Lock the non-store parts of the repository (everything under
1416 1418 .hg except .hg/store) and return a weak reference to the lock.
1417 1419
1418 1420 Use this before modifying files in .hg.
1419 1421
1420 1422 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1421 1423 'wlock' first to avoid a dead-lock hazard.'''
1422 1424 l = self._wlockref and self._wlockref()
1423 1425 if l is not None and l.held:
1424 1426 l.lock()
1425 1427 return l
1426 1428
1427 1429 # We do not need to check for non-waiting lock acquisition. Such
1428 1430 # acquisition would not cause dead-lock as they would just fail.
1429 1431 if wait and (self.ui.configbool('devel', 'all-warnings')
1430 1432 or self.ui.configbool('devel', 'check-locks')):
1431 1433 if self._currentlock(self._lockref) is not None:
1432 1434 self.ui.develwarn('"wlock" acquired after "lock"')
1433 1435
1434 1436 def unlock():
1435 1437 if self.dirstate.pendingparentchange():
1436 1438 self.dirstate.invalidate()
1437 1439 else:
1438 1440 self.dirstate.write(None)
1439 1441
1440 1442 self._filecache['dirstate'].refresh()
1441 1443
1442 1444 l = self._lock(self.vfs, "wlock", wait, unlock,
1443 1445 self.invalidatedirstate, _('working directory of %s') %
1444 1446 self.origroot,
1445 1447 inheritchecker=self._wlockchecktransaction,
1446 1448 parentenvvar='HG_WLOCK_LOCKER')
1447 1449 self._wlockref = weakref.ref(l)
1448 1450 return l
1449 1451
1450 1452 def _currentlock(self, lockref):
1451 1453 """Returns the lock if it's held, or None if it's not."""
1452 1454 if lockref is None:
1453 1455 return None
1454 1456 l = lockref()
1455 1457 if l is None or not l.held:
1456 1458 return None
1457 1459 return l
1458 1460
1459 1461 def currentwlock(self):
1460 1462 """Returns the wlock if it's held, or None if it's not."""
1461 1463 return self._currentlock(self._wlockref)
1462 1464
1463 1465 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1464 1466 """
1465 1467 commit an individual file as part of a larger transaction
1466 1468 """
1467 1469
1468 1470 fname = fctx.path()
1469 1471 fparent1 = manifest1.get(fname, nullid)
1470 1472 fparent2 = manifest2.get(fname, nullid)
1471 1473 if isinstance(fctx, context.filectx):
1472 1474 node = fctx.filenode()
1473 1475 if node in [fparent1, fparent2]:
1474 1476 self.ui.debug('reusing %s filelog entry\n' % fname)
1475 1477 if manifest1.flags(fname) != fctx.flags():
1476 1478 changelist.append(fname)
1477 1479 return node
1478 1480
1479 1481 flog = self.file(fname)
1480 1482 meta = {}
1481 1483 copy = fctx.renamed()
1482 1484 if copy and copy[0] != fname:
1483 1485 # Mark the new revision of this file as a copy of another
1484 1486 # file. This copy data will effectively act as a parent
1485 1487 # of this new revision. If this is a merge, the first
1486 1488 # parent will be the nullid (meaning "look up the copy data")
1487 1489 # and the second one will be the other parent. For example:
1488 1490 #
1489 1491 # 0 --- 1 --- 3 rev1 changes file foo
1490 1492 # \ / rev2 renames foo to bar and changes it
1491 1493 # \- 2 -/ rev3 should have bar with all changes and
1492 1494 # should record that bar descends from
1493 1495 # bar in rev2 and foo in rev1
1494 1496 #
1495 1497 # this allows this merge to succeed:
1496 1498 #
1497 1499 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1498 1500 # \ / merging rev3 and rev4 should use bar@rev2
1499 1501 # \- 2 --- 4 as the merge base
1500 1502 #
1501 1503
1502 1504 cfname = copy[0]
1503 1505 crev = manifest1.get(cfname)
1504 1506 newfparent = fparent2
1505 1507
1506 1508 if manifest2: # branch merge
1507 1509 if fparent2 == nullid or crev is None: # copied on remote side
1508 1510 if cfname in manifest2:
1509 1511 crev = manifest2[cfname]
1510 1512 newfparent = fparent1
1511 1513
1512 1514 # Here, we used to search backwards through history to try to find
1513 1515 # where the file copy came from if the source of a copy was not in
1514 1516 # the parent directory. However, this doesn't actually make sense to
1515 1517 # do (what does a copy from something not in your working copy even
1516 1518 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1517 1519 # the user that copy information was dropped, so if they didn't
1518 1520 # expect this outcome it can be fixed, but this is the correct
1519 1521 # behavior in this circumstance.
1520 1522
1521 1523 if crev:
1522 1524 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1523 1525 meta["copy"] = cfname
1524 1526 meta["copyrev"] = hex(crev)
1525 1527 fparent1, fparent2 = nullid, newfparent
1526 1528 else:
1527 1529 self.ui.warn(_("warning: can't find ancestor for '%s' "
1528 1530 "copied from '%s'!\n") % (fname, cfname))
1529 1531
1530 1532 elif fparent1 == nullid:
1531 1533 fparent1, fparent2 = fparent2, nullid
1532 1534 elif fparent2 != nullid:
1533 1535 # is one parent an ancestor of the other?
1534 1536 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1535 1537 if fparent1 in fparentancestors:
1536 1538 fparent1, fparent2 = fparent2, nullid
1537 1539 elif fparent2 in fparentancestors:
1538 1540 fparent2 = nullid
1539 1541
1540 1542 # is the file changed?
1541 1543 text = fctx.data()
1542 1544 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1543 1545 changelist.append(fname)
1544 1546 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1545 1547 # are just the flags changed during merge?
1546 1548 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1547 1549 changelist.append(fname)
1548 1550
1549 1551 return fparent1
1550 1552
1551 1553 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1552 1554 """check for commit arguments that aren't committable"""
1553 1555 if match.isexact() or match.prefix():
1554 1556 matched = set(status.modified + status.added + status.removed)
1555 1557
1556 1558 for f in match.files():
1557 1559 f = self.dirstate.normalize(f)
1558 1560 if f == '.' or f in matched or f in wctx.substate:
1559 1561 continue
1560 1562 if f in status.deleted:
1561 1563 fail(f, _('file not found!'))
1562 1564 if f in vdirs: # visited directory
1563 1565 d = f + '/'
1564 1566 for mf in matched:
1565 1567 if mf.startswith(d):
1566 1568 break
1567 1569 else:
1568 1570 fail(f, _("no match under directory!"))
1569 1571 elif f not in self.dirstate:
1570 1572 fail(f, _("file not tracked!"))
1571 1573
1572 1574 @unfilteredmethod
1573 1575 def commit(self, text="", user=None, date=None, match=None, force=False,
1574 1576 editor=False, extra=None):
1575 1577 """Add a new revision to current repository.
1576 1578
1577 1579 Revision information is gathered from the working directory,
1578 1580 match can be used to filter the committed files. If editor is
1579 1581 supplied, it is called to get a commit message.
1580 1582 """
1581 1583 if extra is None:
1582 1584 extra = {}
1583 1585
1584 1586 def fail(f, msg):
1585 1587 raise error.Abort('%s: %s' % (f, msg))
1586 1588
1587 1589 if not match:
1588 1590 match = matchmod.always(self.root, '')
1589 1591
1590 1592 if not force:
1591 1593 vdirs = []
1592 1594 match.explicitdir = vdirs.append
1593 1595 match.bad = fail
1594 1596
1595 1597 wlock = lock = tr = None
1596 1598 try:
1597 1599 wlock = self.wlock()
1598 1600 lock = self.lock() # for recent changelog (see issue4368)
1599 1601
1600 1602 wctx = self[None]
1601 1603 merge = len(wctx.parents()) > 1
1602 1604
1603 1605 if not force and merge and not match.always():
1604 1606 raise error.Abort(_('cannot partially commit a merge '
1605 1607 '(do not specify files or patterns)'))
1606 1608
1607 1609 status = self.status(match=match, clean=force)
1608 1610 if force:
1609 1611 status.modified.extend(status.clean) # mq may commit clean files
1610 1612
1611 1613 # check subrepos
1612 1614 subs = []
1613 1615 commitsubs = set()
1614 1616 newstate = wctx.substate.copy()
1615 1617 # only manage subrepos and .hgsubstate if .hgsub is present
1616 1618 if '.hgsub' in wctx:
1617 1619 # we'll decide whether to track this ourselves, thanks
1618 1620 for c in status.modified, status.added, status.removed:
1619 1621 if '.hgsubstate' in c:
1620 1622 c.remove('.hgsubstate')
1621 1623
1622 1624 # compare current state to last committed state
1623 1625 # build new substate based on last committed state
1624 1626 oldstate = wctx.p1().substate
1625 1627 for s in sorted(newstate.keys()):
1626 1628 if not match(s):
1627 1629 # ignore working copy, use old state if present
1628 1630 if s in oldstate:
1629 1631 newstate[s] = oldstate[s]
1630 1632 continue
1631 1633 if not force:
1632 1634 raise error.Abort(
1633 1635 _("commit with new subrepo %s excluded") % s)
1634 1636 dirtyreason = wctx.sub(s).dirtyreason(True)
1635 1637 if dirtyreason:
1636 1638 if not self.ui.configbool('ui', 'commitsubrepos'):
1637 1639 raise error.Abort(dirtyreason,
1638 1640 hint=_("use --subrepos for recursive commit"))
1639 1641 subs.append(s)
1640 1642 commitsubs.add(s)
1641 1643 else:
1642 1644 bs = wctx.sub(s).basestate()
1643 1645 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1644 1646 if oldstate.get(s, (None, None, None))[1] != bs:
1645 1647 subs.append(s)
1646 1648
1647 1649 # check for removed subrepos
1648 1650 for p in wctx.parents():
1649 1651 r = [s for s in p.substate if s not in newstate]
1650 1652 subs += [s for s in r if match(s)]
1651 1653 if subs:
1652 1654 if (not match('.hgsub') and
1653 1655 '.hgsub' in (wctx.modified() + wctx.added())):
1654 1656 raise error.Abort(
1655 1657 _("can't commit subrepos without .hgsub"))
1656 1658 status.modified.insert(0, '.hgsubstate')
1657 1659
1658 1660 elif '.hgsub' in status.removed:
1659 1661 # clean up .hgsubstate when .hgsub is removed
1660 1662 if ('.hgsubstate' in wctx and
1661 1663 '.hgsubstate' not in (status.modified + status.added +
1662 1664 status.removed)):
1663 1665 status.removed.insert(0, '.hgsubstate')
1664 1666
1665 1667 # make sure all explicit patterns are matched
1666 1668 if not force:
1667 1669 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1668 1670
1669 1671 cctx = context.workingcommitctx(self, status,
1670 1672 text, user, date, extra)
1671 1673
1672 1674 # internal config: ui.allowemptycommit
1673 1675 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1674 1676 or extra.get('close') or merge or cctx.files()
1675 1677 or self.ui.configbool('ui', 'allowemptycommit'))
1676 1678 if not allowemptycommit:
1677 1679 return None
1678 1680
1679 1681 if merge and cctx.deleted():
1680 1682 raise error.Abort(_("cannot commit merge with missing files"))
1681 1683
1682 1684 ms = mergemod.mergestate.read(self)
1683 1685 mergeutil.checkunresolved(ms)
1684 1686
1685 1687 if editor:
1686 1688 cctx._text = editor(self, cctx, subs)
1687 1689 edited = (text != cctx._text)
1688 1690
1689 1691 # Save commit message in case this transaction gets rolled back
1690 1692 # (e.g. by a pretxncommit hook). Leave the content alone on
1691 1693 # the assumption that the user will use the same editor again.
1692 1694 msgfn = self.savecommitmessage(cctx._text)
1693 1695
1694 1696 # commit subs and write new state
1695 1697 if subs:
1696 1698 for s in sorted(commitsubs):
1697 1699 sub = wctx.sub(s)
1698 1700 self.ui.status(_('committing subrepository %s\n') %
1699 1701 subrepo.subrelpath(sub))
1700 1702 sr = sub.commit(cctx._text, user, date)
1701 1703 newstate[s] = (newstate[s][0], sr)
1702 1704 subrepo.writestate(self, newstate)
1703 1705
1704 1706 p1, p2 = self.dirstate.parents()
1705 1707 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1706 1708 try:
1707 1709 self.hook("precommit", throw=True, parent1=hookp1,
1708 1710 parent2=hookp2)
1709 1711 tr = self.transaction('commit')
1710 1712 ret = self.commitctx(cctx, True)
1711 1713 except: # re-raises
1712 1714 if edited:
1713 1715 self.ui.write(
1714 1716 _('note: commit message saved in %s\n') % msgfn)
1715 1717 raise
1716 1718 # update bookmarks, dirstate and mergestate
1717 1719 bookmarks.update(self, [p1, p2], ret)
1718 1720 cctx.markcommitted(ret)
1719 1721 ms.reset()
1720 1722 tr.close()
1721 1723
1722 1724 finally:
1723 1725 lockmod.release(tr, lock, wlock)
1724 1726
1725 1727 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1726 1728 # hack for command that use a temporary commit (eg: histedit)
1727 1729 # temporary commit got stripped before hook release
1728 1730 if self.changelog.hasnode(ret):
1729 1731 self.hook("commit", node=node, parent1=parent1,
1730 1732 parent2=parent2)
1731 1733 self._afterlock(commithook)
1732 1734 return ret
1733 1735
1734 1736 @unfilteredmethod
1735 1737 def commitctx(self, ctx, error=False):
1736 1738 """Add a new revision to current repository.
1737 1739 Revision information is passed via the context argument.
1738 1740 """
1739 1741
1740 1742 tr = None
1741 1743 p1, p2 = ctx.p1(), ctx.p2()
1742 1744 user = ctx.user()
1743 1745
1744 1746 lock = self.lock()
1745 1747 try:
1746 1748 tr = self.transaction("commit")
1747 1749 trp = weakref.proxy(tr)
1748 1750
1749 1751 if ctx.manifestnode():
1750 1752 # reuse an existing manifest revision
1751 1753 mn = ctx.manifestnode()
1752 1754 files = ctx.files()
1753 1755 elif ctx.files():
1754 1756 m1ctx = p1.manifestctx()
1755 1757 m2ctx = p2.manifestctx()
1756 1758 mctx = m1ctx.copy()
1757 1759
1758 1760 m = mctx.read()
1759 1761 m1 = m1ctx.read()
1760 1762 m2 = m2ctx.read()
1761 1763
1762 1764 # check in files
1763 1765 added = []
1764 1766 changed = []
1765 1767 removed = list(ctx.removed())
1766 1768 linkrev = len(self)
1767 1769 self.ui.note(_("committing files:\n"))
1768 1770 for f in sorted(ctx.modified() + ctx.added()):
1769 1771 self.ui.note(f + "\n")
1770 1772 try:
1771 1773 fctx = ctx[f]
1772 1774 if fctx is None:
1773 1775 removed.append(f)
1774 1776 else:
1775 1777 added.append(f)
1776 1778 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1777 1779 trp, changed)
1778 1780 m.setflag(f, fctx.flags())
1779 1781 except OSError as inst:
1780 1782 self.ui.warn(_("trouble committing %s!\n") % f)
1781 1783 raise
1782 1784 except IOError as inst:
1783 1785 errcode = getattr(inst, 'errno', errno.ENOENT)
1784 1786 if error or errcode and errcode != errno.ENOENT:
1785 1787 self.ui.warn(_("trouble committing %s!\n") % f)
1786 1788 raise
1787 1789
1788 1790 # update manifest
1789 1791 self.ui.note(_("committing manifest\n"))
1790 1792 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1791 1793 drop = [f for f in removed if f in m]
1792 1794 for f in drop:
1793 1795 del m[f]
1794 1796 mn = mctx.write(trp, linkrev,
1795 1797 p1.manifestnode(), p2.manifestnode(),
1796 1798 added, drop)
1797 1799 files = changed + removed
1798 1800 else:
1799 1801 mn = p1.manifestnode()
1800 1802 files = []
1801 1803
1802 1804 # update changelog
1803 1805 self.ui.note(_("committing changelog\n"))
1804 1806 self.changelog.delayupdate(tr)
1805 1807 n = self.changelog.add(mn, files, ctx.description(),
1806 1808 trp, p1.node(), p2.node(),
1807 1809 user, ctx.date(), ctx.extra().copy())
1808 1810 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1809 1811 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1810 1812 parent2=xp2)
1811 1813 # set the new commit is proper phase
1812 1814 targetphase = subrepo.newcommitphase(self.ui, ctx)
1813 1815 if targetphase:
1814 1816 # retract boundary do not alter parent changeset.
1815 1817 # if a parent have higher the resulting phase will
1816 1818 # be compliant anyway
1817 1819 #
1818 1820 # if minimal phase was 0 we don't need to retract anything
1819 1821 phases.retractboundary(self, tr, targetphase, [n])
1820 1822 tr.close()
1821 1823 return n
1822 1824 finally:
1823 1825 if tr:
1824 1826 tr.release()
1825 1827 lock.release()
1826 1828
1827 1829 @unfilteredmethod
1828 1830 def destroying(self):
1829 1831 '''Inform the repository that nodes are about to be destroyed.
1830 1832 Intended for use by strip and rollback, so there's a common
1831 1833 place for anything that has to be done before destroying history.
1832 1834
1833 1835 This is mostly useful for saving state that is in memory and waiting
1834 1836 to be flushed when the current lock is released. Because a call to
1835 1837 destroyed is imminent, the repo will be invalidated causing those
1836 1838 changes to stay in memory (waiting for the next unlock), or vanish
1837 1839 completely.
1838 1840 '''
1839 1841 # When using the same lock to commit and strip, the phasecache is left
1840 1842 # dirty after committing. Then when we strip, the repo is invalidated,
1841 1843 # causing those changes to disappear.
1842 1844 if '_phasecache' in vars(self):
1843 1845 self._phasecache.write()
1844 1846
1845 1847 @unfilteredmethod
1846 1848 def destroyed(self):
1847 1849 '''Inform the repository that nodes have been destroyed.
1848 1850 Intended for use by strip and rollback, so there's a common
1849 1851 place for anything that has to be done after destroying history.
1850 1852 '''
1851 1853 # When one tries to:
1852 1854 # 1) destroy nodes thus calling this method (e.g. strip)
1853 1855 # 2) use phasecache somewhere (e.g. commit)
1854 1856 #
1855 1857 # then 2) will fail because the phasecache contains nodes that were
1856 1858 # removed. We can either remove phasecache from the filecache,
1857 1859 # causing it to reload next time it is accessed, or simply filter
1858 1860 # the removed nodes now and write the updated cache.
1859 1861 self._phasecache.filterunknown(self)
1860 1862 self._phasecache.write()
1861 1863
1862 1864 # refresh all repository caches
1863 1865 self.updatecaches()
1864 1866
1865 1867 # Ensure the persistent tag cache is updated. Doing it now
1866 1868 # means that the tag cache only has to worry about destroyed
1867 1869 # heads immediately after a strip/rollback. That in turn
1868 1870 # guarantees that "cachetip == currenttip" (comparing both rev
1869 1871 # and node) always means no nodes have been added or destroyed.
1870 1872
1871 1873 # XXX this is suboptimal when qrefresh'ing: we strip the current
1872 1874 # head, refresh the tag cache, then immediately add a new head.
1873 1875 # But I think doing it this way is necessary for the "instant
1874 1876 # tag cache retrieval" case to work.
1875 1877 self.invalidate()
1876 1878
1877 1879 def walk(self, match, node=None):
1878 1880 '''
1879 1881 walk recursively through the directory tree or a given
1880 1882 changeset, finding all files matched by the match
1881 1883 function
1882 1884 '''
1883 1885 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1884 1886 return self[node].walk(match)
1885 1887
1886 1888 def status(self, node1='.', node2=None, match=None,
1887 1889 ignored=False, clean=False, unknown=False,
1888 1890 listsubrepos=False):
1889 1891 '''a convenience method that calls node1.status(node2)'''
1890 1892 return self[node1].status(node2, match, ignored, clean, unknown,
1891 1893 listsubrepos)
1892 1894
1893 1895 def addpostdsstatus(self, ps):
1894 1896 """Add a callback to run within the wlock, at the point at which status
1895 1897 fixups happen.
1896 1898
1897 1899 On status completion, callback(wctx, status) will be called with the
1898 1900 wlock held, unless the dirstate has changed from underneath or the wlock
1899 1901 couldn't be grabbed.
1900 1902
1901 1903 Callbacks should not capture and use a cached copy of the dirstate --
1902 1904 it might change in the meanwhile. Instead, they should access the
1903 1905 dirstate via wctx.repo().dirstate.
1904 1906
1905 1907 This list is emptied out after each status run -- extensions should
1906 1908 make sure it adds to this list each time dirstate.status is called.
1907 1909 Extensions should also make sure they don't call this for statuses
1908 1910 that don't involve the dirstate.
1909 1911 """
1910 1912
1911 1913 # The list is located here for uniqueness reasons -- it is actually
1912 1914 # managed by the workingctx, but that isn't unique per-repo.
1913 1915 self._postdsstatus.append(ps)
1914 1916
1915 1917 def postdsstatus(self):
1916 1918 """Used by workingctx to get the list of post-dirstate-status hooks."""
1917 1919 return self._postdsstatus
1918 1920
1919 1921 def clearpostdsstatus(self):
1920 1922 """Used by workingctx to clear post-dirstate-status hooks."""
1921 1923 del self._postdsstatus[:]
1922 1924
1923 1925 def heads(self, start=None):
1924 1926 if start is None:
1925 1927 cl = self.changelog
1926 1928 headrevs = reversed(cl.headrevs())
1927 1929 return [cl.node(rev) for rev in headrevs]
1928 1930
1929 1931 heads = self.changelog.heads(start)
1930 1932 # sort the output in rev descending order
1931 1933 return sorted(heads, key=self.changelog.rev, reverse=True)
1932 1934
1933 1935 def branchheads(self, branch=None, start=None, closed=False):
1934 1936 '''return a (possibly filtered) list of heads for the given branch
1935 1937
1936 1938 Heads are returned in topological order, from newest to oldest.
1937 1939 If branch is None, use the dirstate branch.
1938 1940 If start is not None, return only heads reachable from start.
1939 1941 If closed is True, return heads that are marked as closed as well.
1940 1942 '''
1941 1943 if branch is None:
1942 1944 branch = self[None].branch()
1943 1945 branches = self.branchmap()
1944 1946 if branch not in branches:
1945 1947 return []
1946 1948 # the cache returns heads ordered lowest to highest
1947 1949 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1948 1950 if start is not None:
1949 1951 # filter out the heads that cannot be reached from startrev
1950 1952 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1951 1953 bheads = [h for h in bheads if h in fbheads]
1952 1954 return bheads
1953 1955
1954 1956 def branches(self, nodes):
1955 1957 if not nodes:
1956 1958 nodes = [self.changelog.tip()]
1957 1959 b = []
1958 1960 for n in nodes:
1959 1961 t = n
1960 1962 while True:
1961 1963 p = self.changelog.parents(n)
1962 1964 if p[1] != nullid or p[0] == nullid:
1963 1965 b.append((t, n, p[0], p[1]))
1964 1966 break
1965 1967 n = p[0]
1966 1968 return b
1967 1969
1968 1970 def between(self, pairs):
1969 1971 r = []
1970 1972
1971 1973 for top, bottom in pairs:
1972 1974 n, l, i = top, [], 0
1973 1975 f = 1
1974 1976
1975 1977 while n != bottom and n != nullid:
1976 1978 p = self.changelog.parents(n)[0]
1977 1979 if i == f:
1978 1980 l.append(n)
1979 1981 f = f * 2
1980 1982 n = p
1981 1983 i += 1
1982 1984
1983 1985 r.append(l)
1984 1986
1985 1987 return r
1986 1988
1987 1989 def checkpush(self, pushop):
1988 1990 """Extensions can override this function if additional checks have
1989 1991 to be performed before pushing, or call it if they override push
1990 1992 command.
1991 1993 """
1992 1994 pass
1993 1995
1994 1996 @unfilteredpropertycache
1995 1997 def prepushoutgoinghooks(self):
1996 1998 """Return util.hooks consists of a pushop with repo, remote, outgoing
1997 1999 methods, which are called before pushing changesets.
1998 2000 """
1999 2001 return util.hooks()
2000 2002
2001 2003 def pushkey(self, namespace, key, old, new):
2002 2004 try:
2003 2005 tr = self.currenttransaction()
2004 2006 hookargs = {}
2005 2007 if tr is not None:
2006 2008 hookargs.update(tr.hookargs)
2007 2009 hookargs['namespace'] = namespace
2008 2010 hookargs['key'] = key
2009 2011 hookargs['old'] = old
2010 2012 hookargs['new'] = new
2011 2013 self.hook('prepushkey', throw=True, **hookargs)
2012 2014 except error.HookAbort as exc:
2013 2015 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2014 2016 if exc.hint:
2015 2017 self.ui.write_err(_("(%s)\n") % exc.hint)
2016 2018 return False
2017 2019 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2018 2020 ret = pushkey.push(self, namespace, key, old, new)
2019 2021 def runhook():
2020 2022 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2021 2023 ret=ret)
2022 2024 self._afterlock(runhook)
2023 2025 return ret
2024 2026
2025 2027 def listkeys(self, namespace):
2026 2028 self.hook('prelistkeys', throw=True, namespace=namespace)
2027 2029 self.ui.debug('listing keys for "%s"\n' % namespace)
2028 2030 values = pushkey.list(self, namespace)
2029 2031 self.hook('listkeys', namespace=namespace, values=values)
2030 2032 return values
2031 2033
2032 2034 def debugwireargs(self, one, two, three=None, four=None, five=None):
2033 2035 '''used to test argument passing over the wire'''
2034 2036 return "%s %s %s %s %s" % (one, two, three, four, five)
2035 2037
2036 2038 def savecommitmessage(self, text):
2037 2039 fp = self.vfs('last-message.txt', 'wb')
2038 2040 try:
2039 2041 fp.write(text)
2040 2042 finally:
2041 2043 fp.close()
2042 2044 return self.pathto(fp.name[len(self.root) + 1:])
2043 2045
2044 2046 # used to avoid circular references so destructors work
2045 2047 def aftertrans(files):
2046 2048 renamefiles = [tuple(t) for t in files]
2047 2049 def a():
2048 2050 for vfs, src, dest in renamefiles:
2049 2051 # if src and dest refer to a same file, vfs.rename is a no-op,
2050 2052 # leaving both src and dest on disk. delete dest to make sure
2051 2053 # the rename couldn't be such a no-op.
2052 2054 vfs.tryunlink(dest)
2053 2055 try:
2054 2056 vfs.rename(src, dest)
2055 2057 except OSError: # journal file does not yet exist
2056 2058 pass
2057 2059 return a
2058 2060
2059 2061 def undoname(fn):
2060 2062 base, name = os.path.split(fn)
2061 2063 assert name.startswith('journal')
2062 2064 return os.path.join(base, name.replace('journal', 'undo', 1))
2063 2065
2064 2066 def instance(ui, path, create):
2065 2067 return localrepository(ui, util.urllocalpath(path), create)
2066 2068
2067 2069 def islocal(path):
2068 2070 return True
2069 2071
2070 2072 def newreporequirements(repo):
2071 2073 """Determine the set of requirements for a new local repository.
2072 2074
2073 2075 Extensions can wrap this function to specify custom requirements for
2074 2076 new repositories.
2075 2077 """
2076 2078 ui = repo.ui
2077 2079 requirements = {'revlogv1'}
2078 2080 if ui.configbool('format', 'usestore'):
2079 2081 requirements.add('store')
2080 2082 if ui.configbool('format', 'usefncache'):
2081 2083 requirements.add('fncache')
2082 2084 if ui.configbool('format', 'dotencode'):
2083 2085 requirements.add('dotencode')
2084 2086
2085 2087 compengine = ui.config('experimental', 'format.compression', 'zlib')
2086 2088 if compengine not in util.compengines:
2087 2089 raise error.Abort(_('compression engine %s defined by '
2088 2090 'experimental.format.compression not available') %
2089 2091 compengine,
2090 2092 hint=_('run "hg debuginstall" to list available '
2091 2093 'compression engines'))
2092 2094
2093 2095 # zlib is the historical default and doesn't need an explicit requirement.
2094 2096 if compengine != 'zlib':
2095 2097 requirements.add('exp-compression-%s' % compengine)
2096 2098
2097 2099 if scmutil.gdinitconfig(ui):
2098 2100 requirements.add('generaldelta')
2099 2101 if ui.configbool('experimental', 'treemanifest', False):
2100 2102 requirements.add('treemanifest')
2101 2103 if ui.configbool('experimental', 'manifestv2', False):
2102 2104 requirements.add('manifestv2')
2103 2105
2104 2106 revlogv2 = ui.config('experimental', 'revlogv2')
2105 2107 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2106 2108 requirements.remove('revlogv1')
2107 2109 # generaldelta is implied by revlogv2.
2108 2110 requirements.discard('generaldelta')
2109 2111 requirements.add(REVLOGV2_REQUIREMENT)
2110 2112
2111 2113 return requirements
General Comments 0
You need to be logged in to leave comments. Login now