##// END OF EJS Templates
transaction: track new obsmarkers in the 'changes' mapping...
marmoute -
r33248:a5cb2e44 default
parent child Browse files
Show More
@@ -1,2110 +1,2111
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 mergeutil,
45 45 namespaces,
46 46 obsolete,
47 47 pathutil,
48 48 peer,
49 49 phases,
50 50 pushkey,
51 51 pycompat,
52 52 repoview,
53 53 revset,
54 54 revsetlang,
55 55 scmutil,
56 56 store,
57 57 subrepo,
58 58 tags as tagsmod,
59 59 transaction,
60 60 txnutil,
61 61 util,
62 62 vfs as vfsmod,
63 63 )
64 64
65 65 release = lockmod.release
66 66 urlerr = util.urlerr
67 67 urlreq = util.urlreq
68 68
69 69 class _basefilecache(scmutil.filecache):
70 70 """All filecache usage on repo are done for logic that should be unfiltered
71 71 """
72 72 def __get__(self, repo, type=None):
73 73 if repo is None:
74 74 return self
75 75 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
76 76 def __set__(self, repo, value):
77 77 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
78 78 def __delete__(self, repo):
79 79 return super(_basefilecache, self).__delete__(repo.unfiltered())
80 80
81 81 class repofilecache(_basefilecache):
82 82 """filecache for files in .hg but outside of .hg/store"""
83 83 def join(self, obj, fname):
84 84 return obj.vfs.join(fname)
85 85
86 86 class storecache(_basefilecache):
87 87 """filecache for files in the store"""
88 88 def join(self, obj, fname):
89 89 return obj.sjoin(fname)
90 90
91 91 class unfilteredpropertycache(util.propertycache):
92 92 """propertycache that apply to unfiltered repo only"""
93 93
94 94 def __get__(self, repo, type=None):
95 95 unfi = repo.unfiltered()
96 96 if unfi is repo:
97 97 return super(unfilteredpropertycache, self).__get__(unfi)
98 98 return getattr(unfi, self.name)
99 99
100 100 class filteredpropertycache(util.propertycache):
101 101 """propertycache that must take filtering in account"""
102 102
103 103 def cachevalue(self, obj, value):
104 104 object.__setattr__(obj, self.name, value)
105 105
106 106
107 107 def hasunfilteredcache(repo, name):
108 108 """check if a repo has an unfilteredpropertycache value for <name>"""
109 109 return name in vars(repo.unfiltered())
110 110
111 111 def unfilteredmethod(orig):
112 112 """decorate method that always need to be run on unfiltered version"""
113 113 def wrapper(repo, *args, **kwargs):
114 114 return orig(repo.unfiltered(), *args, **kwargs)
115 115 return wrapper
116 116
117 117 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
118 118 'unbundle'}
119 119 legacycaps = moderncaps.union({'changegroupsubset'})
120 120
121 121 class localpeer(peer.peerrepository):
122 122 '''peer for a local repo; reflects only the most recent API'''
123 123
124 124 def __init__(self, repo, caps=None):
125 125 if caps is None:
126 126 caps = moderncaps.copy()
127 127 peer.peerrepository.__init__(self)
128 128 self._repo = repo.filtered('served')
129 129 self.ui = repo.ui
130 130 self._caps = repo._restrictcapabilities(caps)
131 131 self.requirements = repo.requirements
132 132 self.supportedformats = repo.supportedformats
133 133
134 134 def close(self):
135 135 self._repo.close()
136 136
137 137 def _capabilities(self):
138 138 return self._caps
139 139
140 140 def local(self):
141 141 return self._repo
142 142
143 143 def canpush(self):
144 144 return True
145 145
146 146 def url(self):
147 147 return self._repo.url()
148 148
149 149 def lookup(self, key):
150 150 return self._repo.lookup(key)
151 151
152 152 def branchmap(self):
153 153 return self._repo.branchmap()
154 154
155 155 def heads(self):
156 156 return self._repo.heads()
157 157
158 158 def known(self, nodes):
159 159 return self._repo.known(nodes)
160 160
161 161 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
162 162 **kwargs):
163 163 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
164 164 common=common, bundlecaps=bundlecaps,
165 165 **kwargs)
166 166 cb = util.chunkbuffer(chunks)
167 167
168 168 if exchange.bundle2requested(bundlecaps):
169 169 # When requesting a bundle2, getbundle returns a stream to make the
170 170 # wire level function happier. We need to build a proper object
171 171 # from it in local peer.
172 172 return bundle2.getunbundler(self.ui, cb)
173 173 else:
174 174 return changegroup.getunbundler('01', cb, None)
175 175
176 176 # TODO We might want to move the next two calls into legacypeer and add
177 177 # unbundle instead.
178 178
179 179 def unbundle(self, cg, heads, url):
180 180 """apply a bundle on a repo
181 181
182 182 This function handles the repo locking itself."""
183 183 try:
184 184 try:
185 185 cg = exchange.readbundle(self.ui, cg, None)
186 186 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
187 187 if util.safehasattr(ret, 'getchunks'):
188 188 # This is a bundle20 object, turn it into an unbundler.
189 189 # This little dance should be dropped eventually when the
190 190 # API is finally improved.
191 191 stream = util.chunkbuffer(ret.getchunks())
192 192 ret = bundle2.getunbundler(self.ui, stream)
193 193 return ret
194 194 except Exception as exc:
195 195 # If the exception contains output salvaged from a bundle2
196 196 # reply, we need to make sure it is printed before continuing
197 197 # to fail. So we build a bundle2 with such output and consume
198 198 # it directly.
199 199 #
200 200 # This is not very elegant but allows a "simple" solution for
201 201 # issue4594
202 202 output = getattr(exc, '_bundle2salvagedoutput', ())
203 203 if output:
204 204 bundler = bundle2.bundle20(self._repo.ui)
205 205 for out in output:
206 206 bundler.addpart(out)
207 207 stream = util.chunkbuffer(bundler.getchunks())
208 208 b = bundle2.getunbundler(self.ui, stream)
209 209 bundle2.processbundle(self._repo, b)
210 210 raise
211 211 except error.PushRaced as exc:
212 212 raise error.ResponseError(_('push failed:'), str(exc))
213 213
214 214 def lock(self):
215 215 return self._repo.lock()
216 216
217 217 def pushkey(self, namespace, key, old, new):
218 218 return self._repo.pushkey(namespace, key, old, new)
219 219
220 220 def listkeys(self, namespace):
221 221 return self._repo.listkeys(namespace)
222 222
223 223 def debugwireargs(self, one, two, three=None, four=None, five=None):
224 224 '''used to test argument passing over the wire'''
225 225 return "%s %s %s %s %s" % (one, two, three, four, five)
226 226
227 227 class locallegacypeer(localpeer):
228 228 '''peer extension which implements legacy methods too; used for tests with
229 229 restricted capabilities'''
230 230
231 231 def __init__(self, repo):
232 232 localpeer.__init__(self, repo, caps=legacycaps)
233 233
234 234 def branches(self, nodes):
235 235 return self._repo.branches(nodes)
236 236
237 237 def between(self, pairs):
238 238 return self._repo.between(pairs)
239 239
240 240 def changegroup(self, basenodes, source):
241 241 return changegroup.changegroup(self._repo, basenodes, source)
242 242
243 243 def changegroupsubset(self, bases, heads, source):
244 244 return changegroup.changegroupsubset(self._repo, bases, heads, source)
245 245
246 246 # Increment the sub-version when the revlog v2 format changes to lock out old
247 247 # clients.
248 248 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
249 249
250 250 class localrepository(object):
251 251
252 252 supportedformats = {
253 253 'revlogv1',
254 254 'generaldelta',
255 255 'treemanifest',
256 256 'manifestv2',
257 257 REVLOGV2_REQUIREMENT,
258 258 }
259 259 _basesupported = supportedformats | {
260 260 'store',
261 261 'fncache',
262 262 'shared',
263 263 'relshared',
264 264 'dotencode',
265 265 }
266 266 openerreqs = {
267 267 'revlogv1',
268 268 'generaldelta',
269 269 'treemanifest',
270 270 'manifestv2',
271 271 }
272 272
273 273 # a list of (ui, featureset) functions.
274 274 # only functions defined in module of enabled extensions are invoked
275 275 featuresetupfuncs = set()
276 276
277 277 def __init__(self, baseui, path, create=False):
278 278 self.requirements = set()
279 279 self.filtername = None
280 280 # wvfs: rooted at the repository root, used to access the working copy
281 281 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
282 282 # vfs: rooted at .hg, used to access repo files outside of .hg/store
283 283 self.vfs = None
284 284 # svfs: usually rooted at .hg/store, used to access repository history
285 285 # If this is a shared repository, this vfs may point to another
286 286 # repository's .hg/store directory.
287 287 self.svfs = None
288 288 self.root = self.wvfs.base
289 289 self.path = self.wvfs.join(".hg")
290 290 self.origroot = path
291 291 self.auditor = pathutil.pathauditor(self.root, self._checknested)
292 292 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
293 293 realfs=False)
294 294 self.vfs = vfsmod.vfs(self.path)
295 295 self.baseui = baseui
296 296 self.ui = baseui.copy()
297 297 self.ui.copy = baseui.copy # prevent copying repo configuration
298 298 # A list of callback to shape the phase if no data were found.
299 299 # Callback are in the form: func(repo, roots) --> processed root.
300 300 # This list it to be filled by extension during repo setup
301 301 self._phasedefaults = []
302 302 try:
303 303 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
304 304 self._loadextensions()
305 305 except IOError:
306 306 pass
307 307
308 308 if self.featuresetupfuncs:
309 309 self.supported = set(self._basesupported) # use private copy
310 310 extmods = set(m.__name__ for n, m
311 311 in extensions.extensions(self.ui))
312 312 for setupfunc in self.featuresetupfuncs:
313 313 if setupfunc.__module__ in extmods:
314 314 setupfunc(self.ui, self.supported)
315 315 else:
316 316 self.supported = self._basesupported
317 317 color.setup(self.ui)
318 318
319 319 # Add compression engines.
320 320 for name in util.compengines:
321 321 engine = util.compengines[name]
322 322 if engine.revlogheader():
323 323 self.supported.add('exp-compression-%s' % name)
324 324
325 325 if not self.vfs.isdir():
326 326 if create:
327 327 self.requirements = newreporequirements(self)
328 328
329 329 if not self.wvfs.exists():
330 330 self.wvfs.makedirs()
331 331 self.vfs.makedir(notindexed=True)
332 332
333 333 if 'store' in self.requirements:
334 334 self.vfs.mkdir("store")
335 335
336 336 # create an invalid changelog
337 337 self.vfs.append(
338 338 "00changelog.i",
339 339 '\0\0\0\2' # represents revlogv2
340 340 ' dummy changelog to prevent using the old repo layout'
341 341 )
342 342 else:
343 343 raise error.RepoError(_("repository %s not found") % path)
344 344 elif create:
345 345 raise error.RepoError(_("repository %s already exists") % path)
346 346 else:
347 347 try:
348 348 self.requirements = scmutil.readrequires(
349 349 self.vfs, self.supported)
350 350 except IOError as inst:
351 351 if inst.errno != errno.ENOENT:
352 352 raise
353 353
354 354 self.sharedpath = self.path
355 355 try:
356 356 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
357 357 if 'relshared' in self.requirements:
358 358 sharedpath = self.vfs.join(sharedpath)
359 359 vfs = vfsmod.vfs(sharedpath, realpath=True)
360 360 s = vfs.base
361 361 if not vfs.exists():
362 362 raise error.RepoError(
363 363 _('.hg/sharedpath points to nonexistent directory %s') % s)
364 364 self.sharedpath = s
365 365 except IOError as inst:
366 366 if inst.errno != errno.ENOENT:
367 367 raise
368 368
369 369 self.store = store.store(
370 370 self.requirements, self.sharedpath, vfsmod.vfs)
371 371 self.spath = self.store.path
372 372 self.svfs = self.store.vfs
373 373 self.sjoin = self.store.join
374 374 self.vfs.createmode = self.store.createmode
375 375 self._applyopenerreqs()
376 376 if create:
377 377 self._writerequirements()
378 378
379 379 self._dirstatevalidatewarned = False
380 380
381 381 self._branchcaches = {}
382 382 self._revbranchcache = None
383 383 self.filterpats = {}
384 384 self._datafilters = {}
385 385 self._transref = self._lockref = self._wlockref = None
386 386
387 387 # A cache for various files under .hg/ that tracks file changes,
388 388 # (used by the filecache decorator)
389 389 #
390 390 # Maps a property name to its util.filecacheentry
391 391 self._filecache = {}
392 392
393 393 # hold sets of revision to be filtered
394 394 # should be cleared when something might have changed the filter value:
395 395 # - new changesets,
396 396 # - phase change,
397 397 # - new obsolescence marker,
398 398 # - working directory parent change,
399 399 # - bookmark changes
400 400 self.filteredrevcache = {}
401 401
402 402 # post-dirstate-status hooks
403 403 self._postdsstatus = []
404 404
405 405 # generic mapping between names and nodes
406 406 self.names = namespaces.namespaces()
407 407
408 408 def close(self):
409 409 self._writecaches()
410 410
411 411 def _loadextensions(self):
412 412 extensions.loadall(self.ui)
413 413
414 414 def _writecaches(self):
415 415 if self._revbranchcache:
416 416 self._revbranchcache.write()
417 417
418 418 def _restrictcapabilities(self, caps):
419 419 if self.ui.configbool('experimental', 'bundle2-advertise', True):
420 420 caps = set(caps)
421 421 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
422 422 caps.add('bundle2=' + urlreq.quote(capsblob))
423 423 return caps
424 424
425 425 def _applyopenerreqs(self):
426 426 self.svfs.options = dict((r, 1) for r in self.requirements
427 427 if r in self.openerreqs)
428 428 # experimental config: format.chunkcachesize
429 429 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
430 430 if chunkcachesize is not None:
431 431 self.svfs.options['chunkcachesize'] = chunkcachesize
432 432 # experimental config: format.maxchainlen
433 433 maxchainlen = self.ui.configint('format', 'maxchainlen')
434 434 if maxchainlen is not None:
435 435 self.svfs.options['maxchainlen'] = maxchainlen
436 436 # experimental config: format.manifestcachesize
437 437 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
438 438 if manifestcachesize is not None:
439 439 self.svfs.options['manifestcachesize'] = manifestcachesize
440 440 # experimental config: format.aggressivemergedeltas
441 441 aggressivemergedeltas = self.ui.configbool('format',
442 442 'aggressivemergedeltas')
443 443 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
444 444 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
445 445 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
446 446 if 0 <= chainspan:
447 447 self.svfs.options['maxdeltachainspan'] = chainspan
448 448
449 449 for r in self.requirements:
450 450 if r.startswith('exp-compression-'):
451 451 self.svfs.options['compengine'] = r[len('exp-compression-'):]
452 452
453 453 # TODO move "revlogv2" to openerreqs once finalized.
454 454 if REVLOGV2_REQUIREMENT in self.requirements:
455 455 self.svfs.options['revlogv2'] = True
456 456
457 457 def _writerequirements(self):
458 458 scmutil.writerequires(self.vfs, self.requirements)
459 459
460 460 def _checknested(self, path):
461 461 """Determine if path is a legal nested repository."""
462 462 if not path.startswith(self.root):
463 463 return False
464 464 subpath = path[len(self.root) + 1:]
465 465 normsubpath = util.pconvert(subpath)
466 466
467 467 # XXX: Checking against the current working copy is wrong in
468 468 # the sense that it can reject things like
469 469 #
470 470 # $ hg cat -r 10 sub/x.txt
471 471 #
472 472 # if sub/ is no longer a subrepository in the working copy
473 473 # parent revision.
474 474 #
475 475 # However, it can of course also allow things that would have
476 476 # been rejected before, such as the above cat command if sub/
477 477 # is a subrepository now, but was a normal directory before.
478 478 # The old path auditor would have rejected by mistake since it
479 479 # panics when it sees sub/.hg/.
480 480 #
481 481 # All in all, checking against the working copy seems sensible
482 482 # since we want to prevent access to nested repositories on
483 483 # the filesystem *now*.
484 484 ctx = self[None]
485 485 parts = util.splitpath(subpath)
486 486 while parts:
487 487 prefix = '/'.join(parts)
488 488 if prefix in ctx.substate:
489 489 if prefix == normsubpath:
490 490 return True
491 491 else:
492 492 sub = ctx.sub(prefix)
493 493 return sub.checknested(subpath[len(prefix) + 1:])
494 494 else:
495 495 parts.pop()
496 496 return False
497 497
498 498 def peer(self):
499 499 return localpeer(self) # not cached to avoid reference cycle
500 500
501 501 def unfiltered(self):
502 502 """Return unfiltered version of the repository
503 503
504 504 Intended to be overwritten by filtered repo."""
505 505 return self
506 506
507 507 def filtered(self, name):
508 508 """Return a filtered version of a repository"""
509 509 # build a new class with the mixin and the current class
510 510 # (possibly subclass of the repo)
511 511 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
512 512 pass
513 513 return filteredrepo(self, name)
514 514
515 515 @repofilecache('bookmarks', 'bookmarks.current')
516 516 def _bookmarks(self):
517 517 return bookmarks.bmstore(self)
518 518
519 519 @property
520 520 def _activebookmark(self):
521 521 return self._bookmarks.active
522 522
523 523 # _phaserevs and _phasesets depend on changelog. what we need is to
524 524 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
525 525 # can't be easily expressed in filecache mechanism.
526 526 @storecache('phaseroots', '00changelog.i')
527 527 def _phasecache(self):
528 528 return phases.phasecache(self, self._phasedefaults)
529 529
530 530 @storecache('obsstore')
531 531 def obsstore(self):
532 532 return obsolete.makestore(self.ui, self)
533 533
534 534 @storecache('00changelog.i')
535 535 def changelog(self):
536 536 return changelog.changelog(self.svfs,
537 537 trypending=txnutil.mayhavepending(self.root))
538 538
539 539 def _constructmanifest(self):
540 540 # This is a temporary function while we migrate from manifest to
541 541 # manifestlog. It allows bundlerepo and unionrepo to intercept the
542 542 # manifest creation.
543 543 return manifest.manifestrevlog(self.svfs)
544 544
545 545 @storecache('00manifest.i')
546 546 def manifestlog(self):
547 547 return manifest.manifestlog(self.svfs, self)
548 548
549 549 @repofilecache('dirstate')
550 550 def dirstate(self):
551 551 return dirstate.dirstate(self.vfs, self.ui, self.root,
552 552 self._dirstatevalidate)
553 553
554 554 def _dirstatevalidate(self, node):
555 555 try:
556 556 self.changelog.rev(node)
557 557 return node
558 558 except error.LookupError:
559 559 if not self._dirstatevalidatewarned:
560 560 self._dirstatevalidatewarned = True
561 561 self.ui.warn(_("warning: ignoring unknown"
562 562 " working parent %s!\n") % short(node))
563 563 return nullid
564 564
565 565 def __getitem__(self, changeid):
566 566 if changeid is None:
567 567 return context.workingctx(self)
568 568 if isinstance(changeid, slice):
569 569 # wdirrev isn't contiguous so the slice shouldn't include it
570 570 return [context.changectx(self, i)
571 571 for i in xrange(*changeid.indices(len(self)))
572 572 if i not in self.changelog.filteredrevs]
573 573 try:
574 574 return context.changectx(self, changeid)
575 575 except error.WdirUnsupported:
576 576 return context.workingctx(self)
577 577
578 578 def __contains__(self, changeid):
579 579 """True if the given changeid exists
580 580
581 581 error.LookupError is raised if an ambiguous node specified.
582 582 """
583 583 try:
584 584 self[changeid]
585 585 return True
586 586 except error.RepoLookupError:
587 587 return False
588 588
589 589 def __nonzero__(self):
590 590 return True
591 591
592 592 __bool__ = __nonzero__
593 593
594 594 def __len__(self):
595 595 return len(self.changelog)
596 596
597 597 def __iter__(self):
598 598 return iter(self.changelog)
599 599
600 600 def revs(self, expr, *args):
601 601 '''Find revisions matching a revset.
602 602
603 603 The revset is specified as a string ``expr`` that may contain
604 604 %-formatting to escape certain types. See ``revsetlang.formatspec``.
605 605
606 606 Revset aliases from the configuration are not expanded. To expand
607 607 user aliases, consider calling ``scmutil.revrange()`` or
608 608 ``repo.anyrevs([expr], user=True)``.
609 609
610 610 Returns a revset.abstractsmartset, which is a list-like interface
611 611 that contains integer revisions.
612 612 '''
613 613 expr = revsetlang.formatspec(expr, *args)
614 614 m = revset.match(None, expr)
615 615 return m(self)
616 616
617 617 def set(self, expr, *args):
618 618 '''Find revisions matching a revset and emit changectx instances.
619 619
620 620 This is a convenience wrapper around ``revs()`` that iterates the
621 621 result and is a generator of changectx instances.
622 622
623 623 Revset aliases from the configuration are not expanded. To expand
624 624 user aliases, consider calling ``scmutil.revrange()``.
625 625 '''
626 626 for r in self.revs(expr, *args):
627 627 yield self[r]
628 628
629 629 def anyrevs(self, specs, user=False):
630 630 '''Find revisions matching one of the given revsets.
631 631
632 632 Revset aliases from the configuration are not expanded by default. To
633 633 expand user aliases, specify ``user=True``.
634 634 '''
635 635 if user:
636 636 m = revset.matchany(self.ui, specs, repo=self)
637 637 else:
638 638 m = revset.matchany(None, specs)
639 639 return m(self)
640 640
641 641 def url(self):
642 642 return 'file:' + self.root
643 643
644 644 def hook(self, name, throw=False, **args):
645 645 """Call a hook, passing this repo instance.
646 646
647 647 This a convenience method to aid invoking hooks. Extensions likely
648 648 won't call this unless they have registered a custom hook or are
649 649 replacing code that is expected to call a hook.
650 650 """
651 651 return hook.hook(self.ui, self, name, throw, **args)
652 652
653 653 @filteredpropertycache
654 654 def _tagscache(self):
655 655 '''Returns a tagscache object that contains various tags related
656 656 caches.'''
657 657
658 658 # This simplifies its cache management by having one decorated
659 659 # function (this one) and the rest simply fetch things from it.
660 660 class tagscache(object):
661 661 def __init__(self):
662 662 # These two define the set of tags for this repository. tags
663 663 # maps tag name to node; tagtypes maps tag name to 'global' or
664 664 # 'local'. (Global tags are defined by .hgtags across all
665 665 # heads, and local tags are defined in .hg/localtags.)
666 666 # They constitute the in-memory cache of tags.
667 667 self.tags = self.tagtypes = None
668 668
669 669 self.nodetagscache = self.tagslist = None
670 670
671 671 cache = tagscache()
672 672 cache.tags, cache.tagtypes = self._findtags()
673 673
674 674 return cache
675 675
676 676 def tags(self):
677 677 '''return a mapping of tag to node'''
678 678 t = {}
679 679 if self.changelog.filteredrevs:
680 680 tags, tt = self._findtags()
681 681 else:
682 682 tags = self._tagscache.tags
683 683 for k, v in tags.iteritems():
684 684 try:
685 685 # ignore tags to unknown nodes
686 686 self.changelog.rev(v)
687 687 t[k] = v
688 688 except (error.LookupError, ValueError):
689 689 pass
690 690 return t
691 691
692 692 def _findtags(self):
693 693 '''Do the hard work of finding tags. Return a pair of dicts
694 694 (tags, tagtypes) where tags maps tag name to node, and tagtypes
695 695 maps tag name to a string like \'global\' or \'local\'.
696 696 Subclasses or extensions are free to add their own tags, but
697 697 should be aware that the returned dicts will be retained for the
698 698 duration of the localrepo object.'''
699 699
700 700 # XXX what tagtype should subclasses/extensions use? Currently
701 701 # mq and bookmarks add tags, but do not set the tagtype at all.
702 702 # Should each extension invent its own tag type? Should there
703 703 # be one tagtype for all such "virtual" tags? Or is the status
704 704 # quo fine?
705 705
706 706
707 707 # map tag name to (node, hist)
708 708 alltags = tagsmod.findglobaltags(self.ui, self)
709 709 # map tag name to tag type
710 710 tagtypes = dict((tag, 'global') for tag in alltags)
711 711
712 712 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
713 713
714 714 # Build the return dicts. Have to re-encode tag names because
715 715 # the tags module always uses UTF-8 (in order not to lose info
716 716 # writing to the cache), but the rest of Mercurial wants them in
717 717 # local encoding.
718 718 tags = {}
719 719 for (name, (node, hist)) in alltags.iteritems():
720 720 if node != nullid:
721 721 tags[encoding.tolocal(name)] = node
722 722 tags['tip'] = self.changelog.tip()
723 723 tagtypes = dict([(encoding.tolocal(name), value)
724 724 for (name, value) in tagtypes.iteritems()])
725 725 return (tags, tagtypes)
726 726
727 727 def tagtype(self, tagname):
728 728 '''
729 729 return the type of the given tag. result can be:
730 730
731 731 'local' : a local tag
732 732 'global' : a global tag
733 733 None : tag does not exist
734 734 '''
735 735
736 736 return self._tagscache.tagtypes.get(tagname)
737 737
738 738 def tagslist(self):
739 739 '''return a list of tags ordered by revision'''
740 740 if not self._tagscache.tagslist:
741 741 l = []
742 742 for t, n in self.tags().iteritems():
743 743 l.append((self.changelog.rev(n), t, n))
744 744 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
745 745
746 746 return self._tagscache.tagslist
747 747
748 748 def nodetags(self, node):
749 749 '''return the tags associated with a node'''
750 750 if not self._tagscache.nodetagscache:
751 751 nodetagscache = {}
752 752 for t, n in self._tagscache.tags.iteritems():
753 753 nodetagscache.setdefault(n, []).append(t)
754 754 for tags in nodetagscache.itervalues():
755 755 tags.sort()
756 756 self._tagscache.nodetagscache = nodetagscache
757 757 return self._tagscache.nodetagscache.get(node, [])
758 758
759 759 def nodebookmarks(self, node):
760 760 """return the list of bookmarks pointing to the specified node"""
761 761 marks = []
762 762 for bookmark, n in self._bookmarks.iteritems():
763 763 if n == node:
764 764 marks.append(bookmark)
765 765 return sorted(marks)
766 766
767 767 def branchmap(self):
768 768 '''returns a dictionary {branch: [branchheads]} with branchheads
769 769 ordered by increasing revision number'''
770 770 branchmap.updatecache(self)
771 771 return self._branchcaches[self.filtername]
772 772
773 773 @unfilteredmethod
774 774 def revbranchcache(self):
775 775 if not self._revbranchcache:
776 776 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
777 777 return self._revbranchcache
778 778
779 779 def branchtip(self, branch, ignoremissing=False):
780 780 '''return the tip node for a given branch
781 781
782 782 If ignoremissing is True, then this method will not raise an error.
783 783 This is helpful for callers that only expect None for a missing branch
784 784 (e.g. namespace).
785 785
786 786 '''
787 787 try:
788 788 return self.branchmap().branchtip(branch)
789 789 except KeyError:
790 790 if not ignoremissing:
791 791 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
792 792 else:
793 793 pass
794 794
795 795 def lookup(self, key):
796 796 return self[key].node()
797 797
798 798 def lookupbranch(self, key, remote=None):
799 799 repo = remote or self
800 800 if key in repo.branchmap():
801 801 return key
802 802
803 803 repo = (remote and remote.local()) and remote or self
804 804 return repo[key].branch()
805 805
806 806 def known(self, nodes):
807 807 cl = self.changelog
808 808 nm = cl.nodemap
809 809 filtered = cl.filteredrevs
810 810 result = []
811 811 for n in nodes:
812 812 r = nm.get(n)
813 813 resp = not (r is None or r in filtered)
814 814 result.append(resp)
815 815 return result
816 816
817 817 def local(self):
818 818 return self
819 819
820 820 def publishing(self):
821 821 # it's safe (and desirable) to trust the publish flag unconditionally
822 822 # so that we don't finalize changes shared between users via ssh or nfs
823 823 return self.ui.configbool('phases', 'publish', True, untrusted=True)
824 824
825 825 def cancopy(self):
826 826 # so statichttprepo's override of local() works
827 827 if not self.local():
828 828 return False
829 829 if not self.publishing():
830 830 return True
831 831 # if publishing we can't copy if there is filtered content
832 832 return not self.filtered('visible').changelog.filteredrevs
833 833
834 834 def shared(self):
835 835 '''the type of shared repository (None if not shared)'''
836 836 if self.sharedpath != self.path:
837 837 return 'store'
838 838 return None
839 839
840 840 def wjoin(self, f, *insidef):
841 841 return self.vfs.reljoin(self.root, f, *insidef)
842 842
843 843 def file(self, f):
844 844 if f[0] == '/':
845 845 f = f[1:]
846 846 return filelog.filelog(self.svfs, f)
847 847
848 848 def changectx(self, changeid):
849 849 return self[changeid]
850 850
851 851 def setparents(self, p1, p2=nullid):
852 852 with self.dirstate.parentchange():
853 853 copies = self.dirstate.setparents(p1, p2)
854 854 pctx = self[p1]
855 855 if copies:
856 856 # Adjust copy records, the dirstate cannot do it, it
857 857 # requires access to parents manifests. Preserve them
858 858 # only for entries added to first parent.
859 859 for f in copies:
860 860 if f not in pctx and copies[f] in pctx:
861 861 self.dirstate.copy(copies[f], f)
862 862 if p2 == nullid:
863 863 for f, s in sorted(self.dirstate.copies().items()):
864 864 if f not in pctx and s not in pctx:
865 865 self.dirstate.copy(None, f)
866 866
867 867 def filectx(self, path, changeid=None, fileid=None):
868 868 """changeid can be a changeset revision, node, or tag.
869 869 fileid can be a file revision or node."""
870 870 return context.filectx(self, path, changeid, fileid)
871 871
872 872 def getcwd(self):
873 873 return self.dirstate.getcwd()
874 874
875 875 def pathto(self, f, cwd=None):
876 876 return self.dirstate.pathto(f, cwd)
877 877
878 878 def _loadfilter(self, filter):
879 879 if filter not in self.filterpats:
880 880 l = []
881 881 for pat, cmd in self.ui.configitems(filter):
882 882 if cmd == '!':
883 883 continue
884 884 mf = matchmod.match(self.root, '', [pat])
885 885 fn = None
886 886 params = cmd
887 887 for name, filterfn in self._datafilters.iteritems():
888 888 if cmd.startswith(name):
889 889 fn = filterfn
890 890 params = cmd[len(name):].lstrip()
891 891 break
892 892 if not fn:
893 893 fn = lambda s, c, **kwargs: util.filter(s, c)
894 894 # Wrap old filters not supporting keyword arguments
895 895 if not inspect.getargspec(fn)[2]:
896 896 oldfn = fn
897 897 fn = lambda s, c, **kwargs: oldfn(s, c)
898 898 l.append((mf, fn, params))
899 899 self.filterpats[filter] = l
900 900 return self.filterpats[filter]
901 901
902 902 def _filter(self, filterpats, filename, data):
903 903 for mf, fn, cmd in filterpats:
904 904 if mf(filename):
905 905 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
906 906 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
907 907 break
908 908
909 909 return data
910 910
911 911 @unfilteredpropertycache
912 912 def _encodefilterpats(self):
913 913 return self._loadfilter('encode')
914 914
915 915 @unfilteredpropertycache
916 916 def _decodefilterpats(self):
917 917 return self._loadfilter('decode')
918 918
919 919 def adddatafilter(self, name, filter):
920 920 self._datafilters[name] = filter
921 921
922 922 def wread(self, filename):
923 923 if self.wvfs.islink(filename):
924 924 data = self.wvfs.readlink(filename)
925 925 else:
926 926 data = self.wvfs.read(filename)
927 927 return self._filter(self._encodefilterpats, filename, data)
928 928
929 929 def wwrite(self, filename, data, flags, backgroundclose=False):
930 930 """write ``data`` into ``filename`` in the working directory
931 931
932 932 This returns length of written (maybe decoded) data.
933 933 """
934 934 data = self._filter(self._decodefilterpats, filename, data)
935 935 if 'l' in flags:
936 936 self.wvfs.symlink(data, filename)
937 937 else:
938 938 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
939 939 if 'x' in flags:
940 940 self.wvfs.setflags(filename, False, True)
941 941 return len(data)
942 942
943 943 def wwritedata(self, filename, data):
944 944 return self._filter(self._decodefilterpats, filename, data)
945 945
946 946 def currenttransaction(self):
947 947 """return the current transaction or None if non exists"""
948 948 if self._transref:
949 949 tr = self._transref()
950 950 else:
951 951 tr = None
952 952
953 953 if tr and tr.running():
954 954 return tr
955 955 return None
956 956
957 957 def transaction(self, desc, report=None):
958 958 if (self.ui.configbool('devel', 'all-warnings')
959 959 or self.ui.configbool('devel', 'check-locks')):
960 960 if self._currentlock(self._lockref) is None:
961 961 raise error.ProgrammingError('transaction requires locking')
962 962 tr = self.currenttransaction()
963 963 if tr is not None:
964 964 return tr.nest()
965 965
966 966 # abort here if the journal already exists
967 967 if self.svfs.exists("journal"):
968 968 raise error.RepoError(
969 969 _("abandoned transaction found"),
970 970 hint=_("run 'hg recover' to clean up transaction"))
971 971
972 972 idbase = "%.40f#%f" % (random.random(), time.time())
973 973 ha = hex(hashlib.sha1(idbase).digest())
974 974 txnid = 'TXN:' + ha
975 975 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
976 976
977 977 self._writejournal(desc)
978 978 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
979 979 if report:
980 980 rp = report
981 981 else:
982 982 rp = self.ui.warn
983 983 vfsmap = {'plain': self.vfs} # root of .hg/
984 984 # we must avoid cyclic reference between repo and transaction.
985 985 reporef = weakref.ref(self)
986 986 # Code to track tag movement
987 987 #
988 988 # Since tags are all handled as file content, it is actually quite hard
989 989 # to track these movement from a code perspective. So we fallback to a
990 990 # tracking at the repository level. One could envision to track changes
991 991 # to the '.hgtags' file through changegroup apply but that fails to
992 992 # cope with case where transaction expose new heads without changegroup
993 993 # being involved (eg: phase movement).
994 994 #
995 995 # For now, We gate the feature behind a flag since this likely comes
996 996 # with performance impacts. The current code run more often than needed
997 997 # and do not use caches as much as it could. The current focus is on
998 998 # the behavior of the feature so we disable it by default. The flag
999 999 # will be removed when we are happy with the performance impact.
1000 1000 #
1001 1001 # Once this feature is no longer experimental move the following
1002 1002 # documentation to the appropriate help section:
1003 1003 #
1004 1004 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1005 1005 # tags (new or changed or deleted tags). In addition the details of
1006 1006 # these changes are made available in a file at:
1007 1007 # ``REPOROOT/.hg/changes/tags.changes``.
1008 1008 # Make sure you check for HG_TAG_MOVED before reading that file as it
1009 1009 # might exist from a previous transaction even if no tag were touched
1010 1010 # in this one. Changes are recorded in a line base format::
1011 1011 #
1012 1012 # <action> <hex-node> <tag-name>\n
1013 1013 #
1014 1014 # Actions are defined as follow:
1015 1015 # "-R": tag is removed,
1016 1016 # "+A": tag is added,
1017 1017 # "-M": tag is moved (old value),
1018 1018 # "+M": tag is moved (new value),
1019 1019 tracktags = lambda x: None
1020 1020 # experimental config: experimental.hook-track-tags
1021 1021 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1022 1022 False)
1023 1023 if desc != 'strip' and shouldtracktags:
1024 1024 oldheads = self.changelog.headrevs()
1025 1025 def tracktags(tr2):
1026 1026 repo = reporef()
1027 1027 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1028 1028 newheads = repo.changelog.headrevs()
1029 1029 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1030 1030 # notes: we compare lists here.
1031 1031 # As we do it only once buiding set would not be cheaper
1032 1032 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1033 1033 if changes:
1034 1034 tr2.hookargs['tag_moved'] = '1'
1035 1035 with repo.vfs('changes/tags.changes', 'w',
1036 1036 atomictemp=True) as changesfile:
1037 1037 # note: we do not register the file to the transaction
1038 1038 # because we needs it to still exist on the transaction
1039 1039 # is close (for txnclose hooks)
1040 1040 tagsmod.writediff(changesfile, changes)
1041 1041 def validate(tr2):
1042 1042 """will run pre-closing hooks"""
1043 1043 # XXX the transaction API is a bit lacking here so we take a hacky
1044 1044 # path for now
1045 1045 #
1046 1046 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1047 1047 # dict is copied before these run. In addition we needs the data
1048 1048 # available to in memory hooks too.
1049 1049 #
1050 1050 # Moreover, we also need to make sure this runs before txnclose
1051 1051 # hooks and there is no "pending" mechanism that would execute
1052 1052 # logic only if hooks are about to run.
1053 1053 #
1054 1054 # Fixing this limitation of the transaction is also needed to track
1055 1055 # other families of changes (bookmarks, phases, obsolescence).
1056 1056 #
1057 1057 # This will have to be fixed before we remove the experimental
1058 1058 # gating.
1059 1059 tracktags(tr2)
1060 1060 reporef().hook('pretxnclose', throw=True,
1061 1061 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1062 1062 def releasefn(tr, success):
1063 1063 repo = reporef()
1064 1064 if success:
1065 1065 # this should be explicitly invoked here, because
1066 1066 # in-memory changes aren't written out at closing
1067 1067 # transaction, if tr.addfilegenerator (via
1068 1068 # dirstate.write or so) isn't invoked while
1069 1069 # transaction running
1070 1070 repo.dirstate.write(None)
1071 1071 else:
1072 1072 # discard all changes (including ones already written
1073 1073 # out) in this transaction
1074 1074 repo.dirstate.restorebackup(None, prefix='journal.')
1075 1075
1076 1076 repo.invalidate(clearfilecache=True)
1077 1077
1078 1078 tr = transaction.transaction(rp, self.svfs, vfsmap,
1079 1079 "journal",
1080 1080 "undo",
1081 1081 aftertrans(renames),
1082 1082 self.store.createmode,
1083 1083 validator=validate,
1084 1084 releasefn=releasefn)
1085 1085 tr.changes['revs'] = set()
1086 tr.changes['obsmarkers'] = set()
1086 1087
1087 1088 tr.hookargs['txnid'] = txnid
1088 1089 # note: writing the fncache only during finalize mean that the file is
1089 1090 # outdated when running hooks. As fncache is used for streaming clone,
1090 1091 # this is not expected to break anything that happen during the hooks.
1091 1092 tr.addfinalize('flush-fncache', self.store.write)
1092 1093 def txnclosehook(tr2):
1093 1094 """To be run if transaction is successful, will schedule a hook run
1094 1095 """
1095 1096 # Don't reference tr2 in hook() so we don't hold a reference.
1096 1097 # This reduces memory consumption when there are multiple
1097 1098 # transactions per lock. This can likely go away if issue5045
1098 1099 # fixes the function accumulation.
1099 1100 hookargs = tr2.hookargs
1100 1101
1101 1102 def hook():
1102 1103 reporef().hook('txnclose', throw=False, txnname=desc,
1103 1104 **pycompat.strkwargs(hookargs))
1104 1105 reporef()._afterlock(hook)
1105 1106 tr.addfinalize('txnclose-hook', txnclosehook)
1106 1107 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1107 1108 def txnaborthook(tr2):
1108 1109 """To be run if transaction is aborted
1109 1110 """
1110 1111 reporef().hook('txnabort', throw=False, txnname=desc,
1111 1112 **tr2.hookargs)
1112 1113 tr.addabort('txnabort-hook', txnaborthook)
1113 1114 # avoid eager cache invalidation. in-memory data should be identical
1114 1115 # to stored data if transaction has no error.
1115 1116 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1116 1117 self._transref = weakref.ref(tr)
1117 1118 return tr
1118 1119
1119 1120 def _journalfiles(self):
1120 1121 return ((self.svfs, 'journal'),
1121 1122 (self.vfs, 'journal.dirstate'),
1122 1123 (self.vfs, 'journal.branch'),
1123 1124 (self.vfs, 'journal.desc'),
1124 1125 (self.vfs, 'journal.bookmarks'),
1125 1126 (self.svfs, 'journal.phaseroots'))
1126 1127
1127 1128 def undofiles(self):
1128 1129 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1129 1130
1130 1131 @unfilteredmethod
1131 1132 def _writejournal(self, desc):
1132 1133 self.dirstate.savebackup(None, prefix='journal.')
1133 1134 self.vfs.write("journal.branch",
1134 1135 encoding.fromlocal(self.dirstate.branch()))
1135 1136 self.vfs.write("journal.desc",
1136 1137 "%d\n%s\n" % (len(self), desc))
1137 1138 self.vfs.write("journal.bookmarks",
1138 1139 self.vfs.tryread("bookmarks"))
1139 1140 self.svfs.write("journal.phaseroots",
1140 1141 self.svfs.tryread("phaseroots"))
1141 1142
1142 1143 def recover(self):
1143 1144 with self.lock():
1144 1145 if self.svfs.exists("journal"):
1145 1146 self.ui.status(_("rolling back interrupted transaction\n"))
1146 1147 vfsmap = {'': self.svfs,
1147 1148 'plain': self.vfs,}
1148 1149 transaction.rollback(self.svfs, vfsmap, "journal",
1149 1150 self.ui.warn)
1150 1151 self.invalidate()
1151 1152 return True
1152 1153 else:
1153 1154 self.ui.warn(_("no interrupted transaction available\n"))
1154 1155 return False
1155 1156
1156 1157 def rollback(self, dryrun=False, force=False):
1157 1158 wlock = lock = dsguard = None
1158 1159 try:
1159 1160 wlock = self.wlock()
1160 1161 lock = self.lock()
1161 1162 if self.svfs.exists("undo"):
1162 1163 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1163 1164
1164 1165 return self._rollback(dryrun, force, dsguard)
1165 1166 else:
1166 1167 self.ui.warn(_("no rollback information available\n"))
1167 1168 return 1
1168 1169 finally:
1169 1170 release(dsguard, lock, wlock)
1170 1171
1171 1172 @unfilteredmethod # Until we get smarter cache management
1172 1173 def _rollback(self, dryrun, force, dsguard):
1173 1174 ui = self.ui
1174 1175 try:
1175 1176 args = self.vfs.read('undo.desc').splitlines()
1176 1177 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1177 1178 if len(args) >= 3:
1178 1179 detail = args[2]
1179 1180 oldtip = oldlen - 1
1180 1181
1181 1182 if detail and ui.verbose:
1182 1183 msg = (_('repository tip rolled back to revision %d'
1183 1184 ' (undo %s: %s)\n')
1184 1185 % (oldtip, desc, detail))
1185 1186 else:
1186 1187 msg = (_('repository tip rolled back to revision %d'
1187 1188 ' (undo %s)\n')
1188 1189 % (oldtip, desc))
1189 1190 except IOError:
1190 1191 msg = _('rolling back unknown transaction\n')
1191 1192 desc = None
1192 1193
1193 1194 if not force and self['.'] != self['tip'] and desc == 'commit':
1194 1195 raise error.Abort(
1195 1196 _('rollback of last commit while not checked out '
1196 1197 'may lose data'), hint=_('use -f to force'))
1197 1198
1198 1199 ui.status(msg)
1199 1200 if dryrun:
1200 1201 return 0
1201 1202
1202 1203 parents = self.dirstate.parents()
1203 1204 self.destroying()
1204 1205 vfsmap = {'plain': self.vfs, '': self.svfs}
1205 1206 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1206 1207 if self.vfs.exists('undo.bookmarks'):
1207 1208 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1208 1209 if self.svfs.exists('undo.phaseroots'):
1209 1210 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1210 1211 self.invalidate()
1211 1212
1212 1213 parentgone = (parents[0] not in self.changelog.nodemap or
1213 1214 parents[1] not in self.changelog.nodemap)
1214 1215 if parentgone:
1215 1216 # prevent dirstateguard from overwriting already restored one
1216 1217 dsguard.close()
1217 1218
1218 1219 self.dirstate.restorebackup(None, prefix='undo.')
1219 1220 try:
1220 1221 branch = self.vfs.read('undo.branch')
1221 1222 self.dirstate.setbranch(encoding.tolocal(branch))
1222 1223 except IOError:
1223 1224 ui.warn(_('named branch could not be reset: '
1224 1225 'current branch is still \'%s\'\n')
1225 1226 % self.dirstate.branch())
1226 1227
1227 1228 parents = tuple([p.rev() for p in self[None].parents()])
1228 1229 if len(parents) > 1:
1229 1230 ui.status(_('working directory now based on '
1230 1231 'revisions %d and %d\n') % parents)
1231 1232 else:
1232 1233 ui.status(_('working directory now based on '
1233 1234 'revision %d\n') % parents)
1234 1235 mergemod.mergestate.clean(self, self['.'].node())
1235 1236
1236 1237 # TODO: if we know which new heads may result from this rollback, pass
1237 1238 # them to destroy(), which will prevent the branchhead cache from being
1238 1239 # invalidated.
1239 1240 self.destroyed()
1240 1241 return 0
1241 1242
1242 1243 def _buildcacheupdater(self, newtransaction):
1243 1244 """called during transaction to build the callback updating cache
1244 1245
1245 1246 Lives on the repository to help extension who might want to augment
1246 1247 this logic. For this purpose, the created transaction is passed to the
1247 1248 method.
1248 1249 """
1249 1250 # we must avoid cyclic reference between repo and transaction.
1250 1251 reporef = weakref.ref(self)
1251 1252 def updater(tr):
1252 1253 repo = reporef()
1253 1254 repo.updatecaches(tr)
1254 1255 return updater
1255 1256
1256 1257 @unfilteredmethod
1257 1258 def updatecaches(self, tr=None):
1258 1259 """warm appropriate caches
1259 1260
1260 1261 If this function is called after a transaction closed. The transaction
1261 1262 will be available in the 'tr' argument. This can be used to selectively
1262 1263 update caches relevant to the changes in that transaction.
1263 1264 """
1264 1265 if tr is not None and tr.hookargs.get('source') == 'strip':
1265 1266 # During strip, many caches are invalid but
1266 1267 # later call to `destroyed` will refresh them.
1267 1268 return
1268 1269
1269 1270 if tr is None or tr.changes['revs']:
1270 1271 # updating the unfiltered branchmap should refresh all the others,
1271 1272 self.ui.debug('updating the branch cache\n')
1272 1273 branchmap.updatecache(self.filtered('served'))
1273 1274
1274 1275 def invalidatecaches(self):
1275 1276
1276 1277 if '_tagscache' in vars(self):
1277 1278 # can't use delattr on proxy
1278 1279 del self.__dict__['_tagscache']
1279 1280
1280 1281 self.unfiltered()._branchcaches.clear()
1281 1282 self.invalidatevolatilesets()
1282 1283
1283 1284 def invalidatevolatilesets(self):
1284 1285 self.filteredrevcache.clear()
1285 1286 obsolete.clearobscaches(self)
1286 1287
1287 1288 def invalidatedirstate(self):
1288 1289 '''Invalidates the dirstate, causing the next call to dirstate
1289 1290 to check if it was modified since the last time it was read,
1290 1291 rereading it if it has.
1291 1292
1292 1293 This is different to dirstate.invalidate() that it doesn't always
1293 1294 rereads the dirstate. Use dirstate.invalidate() if you want to
1294 1295 explicitly read the dirstate again (i.e. restoring it to a previous
1295 1296 known good state).'''
1296 1297 if hasunfilteredcache(self, 'dirstate'):
1297 1298 for k in self.dirstate._filecache:
1298 1299 try:
1299 1300 delattr(self.dirstate, k)
1300 1301 except AttributeError:
1301 1302 pass
1302 1303 delattr(self.unfiltered(), 'dirstate')
1303 1304
1304 1305 def invalidate(self, clearfilecache=False):
1305 1306 '''Invalidates both store and non-store parts other than dirstate
1306 1307
1307 1308 If a transaction is running, invalidation of store is omitted,
1308 1309 because discarding in-memory changes might cause inconsistency
1309 1310 (e.g. incomplete fncache causes unintentional failure, but
1310 1311 redundant one doesn't).
1311 1312 '''
1312 1313 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1313 1314 for k in list(self._filecache.keys()):
1314 1315 # dirstate is invalidated separately in invalidatedirstate()
1315 1316 if k == 'dirstate':
1316 1317 continue
1317 1318
1318 1319 if clearfilecache:
1319 1320 del self._filecache[k]
1320 1321 try:
1321 1322 delattr(unfiltered, k)
1322 1323 except AttributeError:
1323 1324 pass
1324 1325 self.invalidatecaches()
1325 1326 if not self.currenttransaction():
1326 1327 # TODO: Changing contents of store outside transaction
1327 1328 # causes inconsistency. We should make in-memory store
1328 1329 # changes detectable, and abort if changed.
1329 1330 self.store.invalidatecaches()
1330 1331
1331 1332 def invalidateall(self):
1332 1333 '''Fully invalidates both store and non-store parts, causing the
1333 1334 subsequent operation to reread any outside changes.'''
1334 1335 # extension should hook this to invalidate its caches
1335 1336 self.invalidate()
1336 1337 self.invalidatedirstate()
1337 1338
1338 1339 @unfilteredmethod
1339 1340 def _refreshfilecachestats(self, tr):
1340 1341 """Reload stats of cached files so that they are flagged as valid"""
1341 1342 for k, ce in self._filecache.items():
1342 1343 if k == 'dirstate' or k not in self.__dict__:
1343 1344 continue
1344 1345 ce.refresh()
1345 1346
1346 1347 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1347 1348 inheritchecker=None, parentenvvar=None):
1348 1349 parentlock = None
1349 1350 # the contents of parentenvvar are used by the underlying lock to
1350 1351 # determine whether it can be inherited
1351 1352 if parentenvvar is not None:
1352 1353 parentlock = encoding.environ.get(parentenvvar)
1353 1354 try:
1354 1355 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1355 1356 acquirefn=acquirefn, desc=desc,
1356 1357 inheritchecker=inheritchecker,
1357 1358 parentlock=parentlock)
1358 1359 except error.LockHeld as inst:
1359 1360 if not wait:
1360 1361 raise
1361 1362 # show more details for new-style locks
1362 1363 if ':' in inst.locker:
1363 1364 host, pid = inst.locker.split(":", 1)
1364 1365 self.ui.warn(
1365 1366 _("waiting for lock on %s held by process %r "
1366 1367 "on host %r\n") % (desc, pid, host))
1367 1368 else:
1368 1369 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1369 1370 (desc, inst.locker))
1370 1371 # default to 600 seconds timeout
1371 1372 l = lockmod.lock(vfs, lockname,
1372 1373 int(self.ui.config("ui", "timeout", "600")),
1373 1374 releasefn=releasefn, acquirefn=acquirefn,
1374 1375 desc=desc)
1375 1376 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1376 1377 return l
1377 1378
1378 1379 def _afterlock(self, callback):
1379 1380 """add a callback to be run when the repository is fully unlocked
1380 1381
1381 1382 The callback will be executed when the outermost lock is released
1382 1383 (with wlock being higher level than 'lock')."""
1383 1384 for ref in (self._wlockref, self._lockref):
1384 1385 l = ref and ref()
1385 1386 if l and l.held:
1386 1387 l.postrelease.append(callback)
1387 1388 break
1388 1389 else: # no lock have been found.
1389 1390 callback()
1390 1391
1391 1392 def lock(self, wait=True):
1392 1393 '''Lock the repository store (.hg/store) and return a weak reference
1393 1394 to the lock. Use this before modifying the store (e.g. committing or
1394 1395 stripping). If you are opening a transaction, get a lock as well.)
1395 1396
1396 1397 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1397 1398 'wlock' first to avoid a dead-lock hazard.'''
1398 1399 l = self._currentlock(self._lockref)
1399 1400 if l is not None:
1400 1401 l.lock()
1401 1402 return l
1402 1403
1403 1404 l = self._lock(self.svfs, "lock", wait, None,
1404 1405 self.invalidate, _('repository %s') % self.origroot)
1405 1406 self._lockref = weakref.ref(l)
1406 1407 return l
1407 1408
1408 1409 def _wlockchecktransaction(self):
1409 1410 if self.currenttransaction() is not None:
1410 1411 raise error.LockInheritanceContractViolation(
1411 1412 'wlock cannot be inherited in the middle of a transaction')
1412 1413
1413 1414 def wlock(self, wait=True):
1414 1415 '''Lock the non-store parts of the repository (everything under
1415 1416 .hg except .hg/store) and return a weak reference to the lock.
1416 1417
1417 1418 Use this before modifying files in .hg.
1418 1419
1419 1420 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1420 1421 'wlock' first to avoid a dead-lock hazard.'''
1421 1422 l = self._wlockref and self._wlockref()
1422 1423 if l is not None and l.held:
1423 1424 l.lock()
1424 1425 return l
1425 1426
1426 1427 # We do not need to check for non-waiting lock acquisition. Such
1427 1428 # acquisition would not cause dead-lock as they would just fail.
1428 1429 if wait and (self.ui.configbool('devel', 'all-warnings')
1429 1430 or self.ui.configbool('devel', 'check-locks')):
1430 1431 if self._currentlock(self._lockref) is not None:
1431 1432 self.ui.develwarn('"wlock" acquired after "lock"')
1432 1433
1433 1434 def unlock():
1434 1435 if self.dirstate.pendingparentchange():
1435 1436 self.dirstate.invalidate()
1436 1437 else:
1437 1438 self.dirstate.write(None)
1438 1439
1439 1440 self._filecache['dirstate'].refresh()
1440 1441
1441 1442 l = self._lock(self.vfs, "wlock", wait, unlock,
1442 1443 self.invalidatedirstate, _('working directory of %s') %
1443 1444 self.origroot,
1444 1445 inheritchecker=self._wlockchecktransaction,
1445 1446 parentenvvar='HG_WLOCK_LOCKER')
1446 1447 self._wlockref = weakref.ref(l)
1447 1448 return l
1448 1449
1449 1450 def _currentlock(self, lockref):
1450 1451 """Returns the lock if it's held, or None if it's not."""
1451 1452 if lockref is None:
1452 1453 return None
1453 1454 l = lockref()
1454 1455 if l is None or not l.held:
1455 1456 return None
1456 1457 return l
1457 1458
1458 1459 def currentwlock(self):
1459 1460 """Returns the wlock if it's held, or None if it's not."""
1460 1461 return self._currentlock(self._wlockref)
1461 1462
1462 1463 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1463 1464 """
1464 1465 commit an individual file as part of a larger transaction
1465 1466 """
1466 1467
1467 1468 fname = fctx.path()
1468 1469 fparent1 = manifest1.get(fname, nullid)
1469 1470 fparent2 = manifest2.get(fname, nullid)
1470 1471 if isinstance(fctx, context.filectx):
1471 1472 node = fctx.filenode()
1472 1473 if node in [fparent1, fparent2]:
1473 1474 self.ui.debug('reusing %s filelog entry\n' % fname)
1474 1475 if manifest1.flags(fname) != fctx.flags():
1475 1476 changelist.append(fname)
1476 1477 return node
1477 1478
1478 1479 flog = self.file(fname)
1479 1480 meta = {}
1480 1481 copy = fctx.renamed()
1481 1482 if copy and copy[0] != fname:
1482 1483 # Mark the new revision of this file as a copy of another
1483 1484 # file. This copy data will effectively act as a parent
1484 1485 # of this new revision. If this is a merge, the first
1485 1486 # parent will be the nullid (meaning "look up the copy data")
1486 1487 # and the second one will be the other parent. For example:
1487 1488 #
1488 1489 # 0 --- 1 --- 3 rev1 changes file foo
1489 1490 # \ / rev2 renames foo to bar and changes it
1490 1491 # \- 2 -/ rev3 should have bar with all changes and
1491 1492 # should record that bar descends from
1492 1493 # bar in rev2 and foo in rev1
1493 1494 #
1494 1495 # this allows this merge to succeed:
1495 1496 #
1496 1497 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1497 1498 # \ / merging rev3 and rev4 should use bar@rev2
1498 1499 # \- 2 --- 4 as the merge base
1499 1500 #
1500 1501
1501 1502 cfname = copy[0]
1502 1503 crev = manifest1.get(cfname)
1503 1504 newfparent = fparent2
1504 1505
1505 1506 if manifest2: # branch merge
1506 1507 if fparent2 == nullid or crev is None: # copied on remote side
1507 1508 if cfname in manifest2:
1508 1509 crev = manifest2[cfname]
1509 1510 newfparent = fparent1
1510 1511
1511 1512 # Here, we used to search backwards through history to try to find
1512 1513 # where the file copy came from if the source of a copy was not in
1513 1514 # the parent directory. However, this doesn't actually make sense to
1514 1515 # do (what does a copy from something not in your working copy even
1515 1516 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1516 1517 # the user that copy information was dropped, so if they didn't
1517 1518 # expect this outcome it can be fixed, but this is the correct
1518 1519 # behavior in this circumstance.
1519 1520
1520 1521 if crev:
1521 1522 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1522 1523 meta["copy"] = cfname
1523 1524 meta["copyrev"] = hex(crev)
1524 1525 fparent1, fparent2 = nullid, newfparent
1525 1526 else:
1526 1527 self.ui.warn(_("warning: can't find ancestor for '%s' "
1527 1528 "copied from '%s'!\n") % (fname, cfname))
1528 1529
1529 1530 elif fparent1 == nullid:
1530 1531 fparent1, fparent2 = fparent2, nullid
1531 1532 elif fparent2 != nullid:
1532 1533 # is one parent an ancestor of the other?
1533 1534 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1534 1535 if fparent1 in fparentancestors:
1535 1536 fparent1, fparent2 = fparent2, nullid
1536 1537 elif fparent2 in fparentancestors:
1537 1538 fparent2 = nullid
1538 1539
1539 1540 # is the file changed?
1540 1541 text = fctx.data()
1541 1542 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1542 1543 changelist.append(fname)
1543 1544 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1544 1545 # are just the flags changed during merge?
1545 1546 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1546 1547 changelist.append(fname)
1547 1548
1548 1549 return fparent1
1549 1550
1550 1551 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1551 1552 """check for commit arguments that aren't committable"""
1552 1553 if match.isexact() or match.prefix():
1553 1554 matched = set(status.modified + status.added + status.removed)
1554 1555
1555 1556 for f in match.files():
1556 1557 f = self.dirstate.normalize(f)
1557 1558 if f == '.' or f in matched or f in wctx.substate:
1558 1559 continue
1559 1560 if f in status.deleted:
1560 1561 fail(f, _('file not found!'))
1561 1562 if f in vdirs: # visited directory
1562 1563 d = f + '/'
1563 1564 for mf in matched:
1564 1565 if mf.startswith(d):
1565 1566 break
1566 1567 else:
1567 1568 fail(f, _("no match under directory!"))
1568 1569 elif f not in self.dirstate:
1569 1570 fail(f, _("file not tracked!"))
1570 1571
1571 1572 @unfilteredmethod
1572 1573 def commit(self, text="", user=None, date=None, match=None, force=False,
1573 1574 editor=False, extra=None):
1574 1575 """Add a new revision to current repository.
1575 1576
1576 1577 Revision information is gathered from the working directory,
1577 1578 match can be used to filter the committed files. If editor is
1578 1579 supplied, it is called to get a commit message.
1579 1580 """
1580 1581 if extra is None:
1581 1582 extra = {}
1582 1583
1583 1584 def fail(f, msg):
1584 1585 raise error.Abort('%s: %s' % (f, msg))
1585 1586
1586 1587 if not match:
1587 1588 match = matchmod.always(self.root, '')
1588 1589
1589 1590 if not force:
1590 1591 vdirs = []
1591 1592 match.explicitdir = vdirs.append
1592 1593 match.bad = fail
1593 1594
1594 1595 wlock = lock = tr = None
1595 1596 try:
1596 1597 wlock = self.wlock()
1597 1598 lock = self.lock() # for recent changelog (see issue4368)
1598 1599
1599 1600 wctx = self[None]
1600 1601 merge = len(wctx.parents()) > 1
1601 1602
1602 1603 if not force and merge and not match.always():
1603 1604 raise error.Abort(_('cannot partially commit a merge '
1604 1605 '(do not specify files or patterns)'))
1605 1606
1606 1607 status = self.status(match=match, clean=force)
1607 1608 if force:
1608 1609 status.modified.extend(status.clean) # mq may commit clean files
1609 1610
1610 1611 # check subrepos
1611 1612 subs = []
1612 1613 commitsubs = set()
1613 1614 newstate = wctx.substate.copy()
1614 1615 # only manage subrepos and .hgsubstate if .hgsub is present
1615 1616 if '.hgsub' in wctx:
1616 1617 # we'll decide whether to track this ourselves, thanks
1617 1618 for c in status.modified, status.added, status.removed:
1618 1619 if '.hgsubstate' in c:
1619 1620 c.remove('.hgsubstate')
1620 1621
1621 1622 # compare current state to last committed state
1622 1623 # build new substate based on last committed state
1623 1624 oldstate = wctx.p1().substate
1624 1625 for s in sorted(newstate.keys()):
1625 1626 if not match(s):
1626 1627 # ignore working copy, use old state if present
1627 1628 if s in oldstate:
1628 1629 newstate[s] = oldstate[s]
1629 1630 continue
1630 1631 if not force:
1631 1632 raise error.Abort(
1632 1633 _("commit with new subrepo %s excluded") % s)
1633 1634 dirtyreason = wctx.sub(s).dirtyreason(True)
1634 1635 if dirtyreason:
1635 1636 if not self.ui.configbool('ui', 'commitsubrepos'):
1636 1637 raise error.Abort(dirtyreason,
1637 1638 hint=_("use --subrepos for recursive commit"))
1638 1639 subs.append(s)
1639 1640 commitsubs.add(s)
1640 1641 else:
1641 1642 bs = wctx.sub(s).basestate()
1642 1643 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1643 1644 if oldstate.get(s, (None, None, None))[1] != bs:
1644 1645 subs.append(s)
1645 1646
1646 1647 # check for removed subrepos
1647 1648 for p in wctx.parents():
1648 1649 r = [s for s in p.substate if s not in newstate]
1649 1650 subs += [s for s in r if match(s)]
1650 1651 if subs:
1651 1652 if (not match('.hgsub') and
1652 1653 '.hgsub' in (wctx.modified() + wctx.added())):
1653 1654 raise error.Abort(
1654 1655 _("can't commit subrepos without .hgsub"))
1655 1656 status.modified.insert(0, '.hgsubstate')
1656 1657
1657 1658 elif '.hgsub' in status.removed:
1658 1659 # clean up .hgsubstate when .hgsub is removed
1659 1660 if ('.hgsubstate' in wctx and
1660 1661 '.hgsubstate' not in (status.modified + status.added +
1661 1662 status.removed)):
1662 1663 status.removed.insert(0, '.hgsubstate')
1663 1664
1664 1665 # make sure all explicit patterns are matched
1665 1666 if not force:
1666 1667 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1667 1668
1668 1669 cctx = context.workingcommitctx(self, status,
1669 1670 text, user, date, extra)
1670 1671
1671 1672 # internal config: ui.allowemptycommit
1672 1673 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1673 1674 or extra.get('close') or merge or cctx.files()
1674 1675 or self.ui.configbool('ui', 'allowemptycommit'))
1675 1676 if not allowemptycommit:
1676 1677 return None
1677 1678
1678 1679 if merge and cctx.deleted():
1679 1680 raise error.Abort(_("cannot commit merge with missing files"))
1680 1681
1681 1682 ms = mergemod.mergestate.read(self)
1682 1683 mergeutil.checkunresolved(ms)
1683 1684
1684 1685 if editor:
1685 1686 cctx._text = editor(self, cctx, subs)
1686 1687 edited = (text != cctx._text)
1687 1688
1688 1689 # Save commit message in case this transaction gets rolled back
1689 1690 # (e.g. by a pretxncommit hook). Leave the content alone on
1690 1691 # the assumption that the user will use the same editor again.
1691 1692 msgfn = self.savecommitmessage(cctx._text)
1692 1693
1693 1694 # commit subs and write new state
1694 1695 if subs:
1695 1696 for s in sorted(commitsubs):
1696 1697 sub = wctx.sub(s)
1697 1698 self.ui.status(_('committing subrepository %s\n') %
1698 1699 subrepo.subrelpath(sub))
1699 1700 sr = sub.commit(cctx._text, user, date)
1700 1701 newstate[s] = (newstate[s][0], sr)
1701 1702 subrepo.writestate(self, newstate)
1702 1703
1703 1704 p1, p2 = self.dirstate.parents()
1704 1705 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1705 1706 try:
1706 1707 self.hook("precommit", throw=True, parent1=hookp1,
1707 1708 parent2=hookp2)
1708 1709 tr = self.transaction('commit')
1709 1710 ret = self.commitctx(cctx, True)
1710 1711 except: # re-raises
1711 1712 if edited:
1712 1713 self.ui.write(
1713 1714 _('note: commit message saved in %s\n') % msgfn)
1714 1715 raise
1715 1716 # update bookmarks, dirstate and mergestate
1716 1717 bookmarks.update(self, [p1, p2], ret)
1717 1718 cctx.markcommitted(ret)
1718 1719 ms.reset()
1719 1720 tr.close()
1720 1721
1721 1722 finally:
1722 1723 lockmod.release(tr, lock, wlock)
1723 1724
1724 1725 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1725 1726 # hack for command that use a temporary commit (eg: histedit)
1726 1727 # temporary commit got stripped before hook release
1727 1728 if self.changelog.hasnode(ret):
1728 1729 self.hook("commit", node=node, parent1=parent1,
1729 1730 parent2=parent2)
1730 1731 self._afterlock(commithook)
1731 1732 return ret
1732 1733
1733 1734 @unfilteredmethod
1734 1735 def commitctx(self, ctx, error=False):
1735 1736 """Add a new revision to current repository.
1736 1737 Revision information is passed via the context argument.
1737 1738 """
1738 1739
1739 1740 tr = None
1740 1741 p1, p2 = ctx.p1(), ctx.p2()
1741 1742 user = ctx.user()
1742 1743
1743 1744 lock = self.lock()
1744 1745 try:
1745 1746 tr = self.transaction("commit")
1746 1747 trp = weakref.proxy(tr)
1747 1748
1748 1749 if ctx.manifestnode():
1749 1750 # reuse an existing manifest revision
1750 1751 mn = ctx.manifestnode()
1751 1752 files = ctx.files()
1752 1753 elif ctx.files():
1753 1754 m1ctx = p1.manifestctx()
1754 1755 m2ctx = p2.manifestctx()
1755 1756 mctx = m1ctx.copy()
1756 1757
1757 1758 m = mctx.read()
1758 1759 m1 = m1ctx.read()
1759 1760 m2 = m2ctx.read()
1760 1761
1761 1762 # check in files
1762 1763 added = []
1763 1764 changed = []
1764 1765 removed = list(ctx.removed())
1765 1766 linkrev = len(self)
1766 1767 self.ui.note(_("committing files:\n"))
1767 1768 for f in sorted(ctx.modified() + ctx.added()):
1768 1769 self.ui.note(f + "\n")
1769 1770 try:
1770 1771 fctx = ctx[f]
1771 1772 if fctx is None:
1772 1773 removed.append(f)
1773 1774 else:
1774 1775 added.append(f)
1775 1776 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1776 1777 trp, changed)
1777 1778 m.setflag(f, fctx.flags())
1778 1779 except OSError as inst:
1779 1780 self.ui.warn(_("trouble committing %s!\n") % f)
1780 1781 raise
1781 1782 except IOError as inst:
1782 1783 errcode = getattr(inst, 'errno', errno.ENOENT)
1783 1784 if error or errcode and errcode != errno.ENOENT:
1784 1785 self.ui.warn(_("trouble committing %s!\n") % f)
1785 1786 raise
1786 1787
1787 1788 # update manifest
1788 1789 self.ui.note(_("committing manifest\n"))
1789 1790 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1790 1791 drop = [f for f in removed if f in m]
1791 1792 for f in drop:
1792 1793 del m[f]
1793 1794 mn = mctx.write(trp, linkrev,
1794 1795 p1.manifestnode(), p2.manifestnode(),
1795 1796 added, drop)
1796 1797 files = changed + removed
1797 1798 else:
1798 1799 mn = p1.manifestnode()
1799 1800 files = []
1800 1801
1801 1802 # update changelog
1802 1803 self.ui.note(_("committing changelog\n"))
1803 1804 self.changelog.delayupdate(tr)
1804 1805 n = self.changelog.add(mn, files, ctx.description(),
1805 1806 trp, p1.node(), p2.node(),
1806 1807 user, ctx.date(), ctx.extra().copy())
1807 1808 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1808 1809 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1809 1810 parent2=xp2)
1810 1811 # set the new commit is proper phase
1811 1812 targetphase = subrepo.newcommitphase(self.ui, ctx)
1812 1813 if targetphase:
1813 1814 # retract boundary do not alter parent changeset.
1814 1815 # if a parent have higher the resulting phase will
1815 1816 # be compliant anyway
1816 1817 #
1817 1818 # if minimal phase was 0 we don't need to retract anything
1818 1819 phases.retractboundary(self, tr, targetphase, [n])
1819 1820 tr.close()
1820 1821 return n
1821 1822 finally:
1822 1823 if tr:
1823 1824 tr.release()
1824 1825 lock.release()
1825 1826
1826 1827 @unfilteredmethod
1827 1828 def destroying(self):
1828 1829 '''Inform the repository that nodes are about to be destroyed.
1829 1830 Intended for use by strip and rollback, so there's a common
1830 1831 place for anything that has to be done before destroying history.
1831 1832
1832 1833 This is mostly useful for saving state that is in memory and waiting
1833 1834 to be flushed when the current lock is released. Because a call to
1834 1835 destroyed is imminent, the repo will be invalidated causing those
1835 1836 changes to stay in memory (waiting for the next unlock), or vanish
1836 1837 completely.
1837 1838 '''
1838 1839 # When using the same lock to commit and strip, the phasecache is left
1839 1840 # dirty after committing. Then when we strip, the repo is invalidated,
1840 1841 # causing those changes to disappear.
1841 1842 if '_phasecache' in vars(self):
1842 1843 self._phasecache.write()
1843 1844
1844 1845 @unfilteredmethod
1845 1846 def destroyed(self):
1846 1847 '''Inform the repository that nodes have been destroyed.
1847 1848 Intended for use by strip and rollback, so there's a common
1848 1849 place for anything that has to be done after destroying history.
1849 1850 '''
1850 1851 # When one tries to:
1851 1852 # 1) destroy nodes thus calling this method (e.g. strip)
1852 1853 # 2) use phasecache somewhere (e.g. commit)
1853 1854 #
1854 1855 # then 2) will fail because the phasecache contains nodes that were
1855 1856 # removed. We can either remove phasecache from the filecache,
1856 1857 # causing it to reload next time it is accessed, or simply filter
1857 1858 # the removed nodes now and write the updated cache.
1858 1859 self._phasecache.filterunknown(self)
1859 1860 self._phasecache.write()
1860 1861
1861 1862 # refresh all repository caches
1862 1863 self.updatecaches()
1863 1864
1864 1865 # Ensure the persistent tag cache is updated. Doing it now
1865 1866 # means that the tag cache only has to worry about destroyed
1866 1867 # heads immediately after a strip/rollback. That in turn
1867 1868 # guarantees that "cachetip == currenttip" (comparing both rev
1868 1869 # and node) always means no nodes have been added or destroyed.
1869 1870
1870 1871 # XXX this is suboptimal when qrefresh'ing: we strip the current
1871 1872 # head, refresh the tag cache, then immediately add a new head.
1872 1873 # But I think doing it this way is necessary for the "instant
1873 1874 # tag cache retrieval" case to work.
1874 1875 self.invalidate()
1875 1876
1876 1877 def walk(self, match, node=None):
1877 1878 '''
1878 1879 walk recursively through the directory tree or a given
1879 1880 changeset, finding all files matched by the match
1880 1881 function
1881 1882 '''
1882 1883 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1883 1884 return self[node].walk(match)
1884 1885
1885 1886 def status(self, node1='.', node2=None, match=None,
1886 1887 ignored=False, clean=False, unknown=False,
1887 1888 listsubrepos=False):
1888 1889 '''a convenience method that calls node1.status(node2)'''
1889 1890 return self[node1].status(node2, match, ignored, clean, unknown,
1890 1891 listsubrepos)
1891 1892
1892 1893 def addpostdsstatus(self, ps):
1893 1894 """Add a callback to run within the wlock, at the point at which status
1894 1895 fixups happen.
1895 1896
1896 1897 On status completion, callback(wctx, status) will be called with the
1897 1898 wlock held, unless the dirstate has changed from underneath or the wlock
1898 1899 couldn't be grabbed.
1899 1900
1900 1901 Callbacks should not capture and use a cached copy of the dirstate --
1901 1902 it might change in the meanwhile. Instead, they should access the
1902 1903 dirstate via wctx.repo().dirstate.
1903 1904
1904 1905 This list is emptied out after each status run -- extensions should
1905 1906 make sure it adds to this list each time dirstate.status is called.
1906 1907 Extensions should also make sure they don't call this for statuses
1907 1908 that don't involve the dirstate.
1908 1909 """
1909 1910
1910 1911 # The list is located here for uniqueness reasons -- it is actually
1911 1912 # managed by the workingctx, but that isn't unique per-repo.
1912 1913 self._postdsstatus.append(ps)
1913 1914
1914 1915 def postdsstatus(self):
1915 1916 """Used by workingctx to get the list of post-dirstate-status hooks."""
1916 1917 return self._postdsstatus
1917 1918
1918 1919 def clearpostdsstatus(self):
1919 1920 """Used by workingctx to clear post-dirstate-status hooks."""
1920 1921 del self._postdsstatus[:]
1921 1922
1922 1923 def heads(self, start=None):
1923 1924 if start is None:
1924 1925 cl = self.changelog
1925 1926 headrevs = reversed(cl.headrevs())
1926 1927 return [cl.node(rev) for rev in headrevs]
1927 1928
1928 1929 heads = self.changelog.heads(start)
1929 1930 # sort the output in rev descending order
1930 1931 return sorted(heads, key=self.changelog.rev, reverse=True)
1931 1932
1932 1933 def branchheads(self, branch=None, start=None, closed=False):
1933 1934 '''return a (possibly filtered) list of heads for the given branch
1934 1935
1935 1936 Heads are returned in topological order, from newest to oldest.
1936 1937 If branch is None, use the dirstate branch.
1937 1938 If start is not None, return only heads reachable from start.
1938 1939 If closed is True, return heads that are marked as closed as well.
1939 1940 '''
1940 1941 if branch is None:
1941 1942 branch = self[None].branch()
1942 1943 branches = self.branchmap()
1943 1944 if branch not in branches:
1944 1945 return []
1945 1946 # the cache returns heads ordered lowest to highest
1946 1947 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1947 1948 if start is not None:
1948 1949 # filter out the heads that cannot be reached from startrev
1949 1950 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1950 1951 bheads = [h for h in bheads if h in fbheads]
1951 1952 return bheads
1952 1953
1953 1954 def branches(self, nodes):
1954 1955 if not nodes:
1955 1956 nodes = [self.changelog.tip()]
1956 1957 b = []
1957 1958 for n in nodes:
1958 1959 t = n
1959 1960 while True:
1960 1961 p = self.changelog.parents(n)
1961 1962 if p[1] != nullid or p[0] == nullid:
1962 1963 b.append((t, n, p[0], p[1]))
1963 1964 break
1964 1965 n = p[0]
1965 1966 return b
1966 1967
1967 1968 def between(self, pairs):
1968 1969 r = []
1969 1970
1970 1971 for top, bottom in pairs:
1971 1972 n, l, i = top, [], 0
1972 1973 f = 1
1973 1974
1974 1975 while n != bottom and n != nullid:
1975 1976 p = self.changelog.parents(n)[0]
1976 1977 if i == f:
1977 1978 l.append(n)
1978 1979 f = f * 2
1979 1980 n = p
1980 1981 i += 1
1981 1982
1982 1983 r.append(l)
1983 1984
1984 1985 return r
1985 1986
1986 1987 def checkpush(self, pushop):
1987 1988 """Extensions can override this function if additional checks have
1988 1989 to be performed before pushing, or call it if they override push
1989 1990 command.
1990 1991 """
1991 1992 pass
1992 1993
1993 1994 @unfilteredpropertycache
1994 1995 def prepushoutgoinghooks(self):
1995 1996 """Return util.hooks consists of a pushop with repo, remote, outgoing
1996 1997 methods, which are called before pushing changesets.
1997 1998 """
1998 1999 return util.hooks()
1999 2000
2000 2001 def pushkey(self, namespace, key, old, new):
2001 2002 try:
2002 2003 tr = self.currenttransaction()
2003 2004 hookargs = {}
2004 2005 if tr is not None:
2005 2006 hookargs.update(tr.hookargs)
2006 2007 hookargs['namespace'] = namespace
2007 2008 hookargs['key'] = key
2008 2009 hookargs['old'] = old
2009 2010 hookargs['new'] = new
2010 2011 self.hook('prepushkey', throw=True, **hookargs)
2011 2012 except error.HookAbort as exc:
2012 2013 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2013 2014 if exc.hint:
2014 2015 self.ui.write_err(_("(%s)\n") % exc.hint)
2015 2016 return False
2016 2017 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2017 2018 ret = pushkey.push(self, namespace, key, old, new)
2018 2019 def runhook():
2019 2020 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2020 2021 ret=ret)
2021 2022 self._afterlock(runhook)
2022 2023 return ret
2023 2024
2024 2025 def listkeys(self, namespace):
2025 2026 self.hook('prelistkeys', throw=True, namespace=namespace)
2026 2027 self.ui.debug('listing keys for "%s"\n' % namespace)
2027 2028 values = pushkey.list(self, namespace)
2028 2029 self.hook('listkeys', namespace=namespace, values=values)
2029 2030 return values
2030 2031
2031 2032 def debugwireargs(self, one, two, three=None, four=None, five=None):
2032 2033 '''used to test argument passing over the wire'''
2033 2034 return "%s %s %s %s %s" % (one, two, three, four, five)
2034 2035
2035 2036 def savecommitmessage(self, text):
2036 2037 fp = self.vfs('last-message.txt', 'wb')
2037 2038 try:
2038 2039 fp.write(text)
2039 2040 finally:
2040 2041 fp.close()
2041 2042 return self.pathto(fp.name[len(self.root) + 1:])
2042 2043
2043 2044 # used to avoid circular references so destructors work
2044 2045 def aftertrans(files):
2045 2046 renamefiles = [tuple(t) for t in files]
2046 2047 def a():
2047 2048 for vfs, src, dest in renamefiles:
2048 2049 # if src and dest refer to a same file, vfs.rename is a no-op,
2049 2050 # leaving both src and dest on disk. delete dest to make sure
2050 2051 # the rename couldn't be such a no-op.
2051 2052 vfs.tryunlink(dest)
2052 2053 try:
2053 2054 vfs.rename(src, dest)
2054 2055 except OSError: # journal file does not yet exist
2055 2056 pass
2056 2057 return a
2057 2058
2058 2059 def undoname(fn):
2059 2060 base, name = os.path.split(fn)
2060 2061 assert name.startswith('journal')
2061 2062 return os.path.join(base, name.replace('journal', 'undo', 1))
2062 2063
2063 2064 def instance(ui, path, create):
2064 2065 return localrepository(ui, util.urllocalpath(path), create)
2065 2066
2066 2067 def islocal(path):
2067 2068 return True
2068 2069
2069 2070 def newreporequirements(repo):
2070 2071 """Determine the set of requirements for a new local repository.
2071 2072
2072 2073 Extensions can wrap this function to specify custom requirements for
2073 2074 new repositories.
2074 2075 """
2075 2076 ui = repo.ui
2076 2077 requirements = {'revlogv1'}
2077 2078 if ui.configbool('format', 'usestore'):
2078 2079 requirements.add('store')
2079 2080 if ui.configbool('format', 'usefncache'):
2080 2081 requirements.add('fncache')
2081 2082 if ui.configbool('format', 'dotencode'):
2082 2083 requirements.add('dotencode')
2083 2084
2084 2085 compengine = ui.config('experimental', 'format.compression', 'zlib')
2085 2086 if compengine not in util.compengines:
2086 2087 raise error.Abort(_('compression engine %s defined by '
2087 2088 'experimental.format.compression not available') %
2088 2089 compengine,
2089 2090 hint=_('run "hg debuginstall" to list available '
2090 2091 'compression engines'))
2091 2092
2092 2093 # zlib is the historical default and doesn't need an explicit requirement.
2093 2094 if compengine != 'zlib':
2094 2095 requirements.add('exp-compression-%s' % compengine)
2095 2096
2096 2097 if scmutil.gdinitconfig(ui):
2097 2098 requirements.add('generaldelta')
2098 2099 if ui.configbool('experimental', 'treemanifest', False):
2099 2100 requirements.add('treemanifest')
2100 2101 if ui.configbool('experimental', 'manifestv2', False):
2101 2102 requirements.add('manifestv2')
2102 2103
2103 2104 revlogv2 = ui.config('experimental', 'revlogv2')
2104 2105 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2105 2106 requirements.remove('revlogv1')
2106 2107 # generaldelta is implied by revlogv2.
2107 2108 requirements.discard('generaldelta')
2108 2109 requirements.add(REVLOGV2_REQUIREMENT)
2109 2110
2110 2111 return requirements
@@ -1,1031 +1,1034
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 obsutil,
80 80 phases,
81 81 policy,
82 82 util,
83 83 )
84 84
85 85 parsers = policy.importmod(r'parsers')
86 86
87 87 _pack = struct.pack
88 88 _unpack = struct.unpack
89 89 _calcsize = struct.calcsize
90 90 propertycache = util.propertycache
91 91
92 92 # the obsolete feature is not mature enough to be enabled by default.
93 93 # you have to rely on third party extension extension to enable this.
94 94 _enabled = False
95 95
96 96 # Options for obsolescence
97 97 createmarkersopt = 'createmarkers'
98 98 allowunstableopt = 'allowunstable'
99 99 exchangeopt = 'exchange'
100 100
101 101 def isenabled(repo, option):
102 102 """Returns True if the given repository has the given obsolete option
103 103 enabled.
104 104 """
105 105 result = set(repo.ui.configlist('experimental', 'evolution'))
106 106 if 'all' in result:
107 107 return True
108 108
109 109 # For migration purposes, temporarily return true if the config hasn't been
110 110 # set but _enabled is true.
111 111 if len(result) == 0 and _enabled:
112 112 return True
113 113
114 114 # createmarkers must be enabled if other options are enabled
115 115 if ((allowunstableopt in result or exchangeopt in result) and
116 116 not createmarkersopt in result):
117 117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 118 "if other obsolete options are enabled"))
119 119
120 120 return option in result
121 121
122 122 ### obsolescence marker flag
123 123
124 124 ## bumpedfix flag
125 125 #
126 126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 127 # "bumped" because it's a successors of a public changesets
128 128 #
129 129 # o A' (bumped)
130 130 # |`:
131 131 # | o A
132 132 # |/
133 133 # o Z
134 134 #
135 135 # The way to solve this situation is to create a new changeset Ad as children
136 136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 138 #
139 139 # o Ad
140 140 # |`:
141 141 # | x A'
142 142 # |'|
143 143 # o | A
144 144 # |/
145 145 # o Z
146 146 #
147 147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 149 # This flag mean that the successors express the changes between the public and
150 150 # bumped version and fix the situation, breaking the transitivity of
151 151 # "bumped" here.
152 152 bumpedfix = 1
153 153 usingsha256 = 2
154 154
155 155 ## Parsing and writing of version "0"
156 156 #
157 157 # The header is followed by the markers. Each marker is made of:
158 158 #
159 159 # - 1 uint8 : number of new changesets "N", can be zero.
160 160 #
161 161 # - 1 uint32: metadata size "M" in bytes.
162 162 #
163 163 # - 1 byte: a bit field. It is reserved for flags used in common
164 164 # obsolete marker operations, to avoid repeated decoding of metadata
165 165 # entries.
166 166 #
167 167 # - 20 bytes: obsoleted changeset identifier.
168 168 #
169 169 # - N*20 bytes: new changesets identifiers.
170 170 #
171 171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 172 # string contains a key and a value, separated by a colon ':', without
173 173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 174 # cannot contain '\0'.
175 175 _fm0version = 0
176 176 _fm0fixed = '>BIB20s'
177 177 _fm0node = '20s'
178 178 _fm0fsize = _calcsize(_fm0fixed)
179 179 _fm0fnodesize = _calcsize(_fm0node)
180 180
181 181 def _fm0readmarkers(data, off):
182 182 # Loop on markers
183 183 l = len(data)
184 184 while off + _fm0fsize <= l:
185 185 # read fixed part
186 186 cur = data[off:off + _fm0fsize]
187 187 off += _fm0fsize
188 188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
189 189 # read replacement
190 190 sucs = ()
191 191 if numsuc:
192 192 s = (_fm0fnodesize * numsuc)
193 193 cur = data[off:off + s]
194 194 sucs = _unpack(_fm0node * numsuc, cur)
195 195 off += s
196 196 # read metadata
197 197 # (metadata will be decoded on demand)
198 198 metadata = data[off:off + mdsize]
199 199 if len(metadata) != mdsize:
200 200 raise error.Abort(_('parsing obsolete marker: metadata is too '
201 201 'short, %d bytes expected, got %d')
202 202 % (mdsize, len(metadata)))
203 203 off += mdsize
204 204 metadata = _fm0decodemeta(metadata)
205 205 try:
206 206 when, offset = metadata.pop('date', '0 0').split(' ')
207 207 date = float(when), int(offset)
208 208 except ValueError:
209 209 date = (0., 0)
210 210 parents = None
211 211 if 'p2' in metadata:
212 212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
213 213 elif 'p1' in metadata:
214 214 parents = (metadata.pop('p1', None),)
215 215 elif 'p0' in metadata:
216 216 parents = ()
217 217 if parents is not None:
218 218 try:
219 219 parents = tuple(node.bin(p) for p in parents)
220 220 # if parent content is not a nodeid, drop the data
221 221 for p in parents:
222 222 if len(p) != 20:
223 223 parents = None
224 224 break
225 225 except TypeError:
226 226 # if content cannot be translated to nodeid drop the data.
227 227 parents = None
228 228
229 229 metadata = tuple(sorted(metadata.iteritems()))
230 230
231 231 yield (pre, sucs, flags, metadata, date, parents)
232 232
233 233 def _fm0encodeonemarker(marker):
234 234 pre, sucs, flags, metadata, date, parents = marker
235 235 if flags & usingsha256:
236 236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
237 237 metadata = dict(metadata)
238 238 time, tz = date
239 239 metadata['date'] = '%r %i' % (time, tz)
240 240 if parents is not None:
241 241 if not parents:
242 242 # mark that we explicitly recorded no parents
243 243 metadata['p0'] = ''
244 244 for i, p in enumerate(parents, 1):
245 245 metadata['p%i' % i] = node.hex(p)
246 246 metadata = _fm0encodemeta(metadata)
247 247 numsuc = len(sucs)
248 248 format = _fm0fixed + (_fm0node * numsuc)
249 249 data = [numsuc, len(metadata), flags, pre]
250 250 data.extend(sucs)
251 251 return _pack(format, *data) + metadata
252 252
253 253 def _fm0encodemeta(meta):
254 254 """Return encoded metadata string to string mapping.
255 255
256 256 Assume no ':' in key and no '\0' in both key and value."""
257 257 for key, value in meta.iteritems():
258 258 if ':' in key or '\0' in key:
259 259 raise ValueError("':' and '\0' are forbidden in metadata key'")
260 260 if '\0' in value:
261 261 raise ValueError("':' is forbidden in metadata value'")
262 262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
263 263
264 264 def _fm0decodemeta(data):
265 265 """Return string to string dictionary from encoded version."""
266 266 d = {}
267 267 for l in data.split('\0'):
268 268 if l:
269 269 key, value = l.split(':')
270 270 d[key] = value
271 271 return d
272 272
273 273 ## Parsing and writing of version "1"
274 274 #
275 275 # The header is followed by the markers. Each marker is made of:
276 276 #
277 277 # - uint32: total size of the marker (including this field)
278 278 #
279 279 # - float64: date in seconds since epoch
280 280 #
281 281 # - int16: timezone offset in minutes
282 282 #
283 283 # - uint16: a bit field. It is reserved for flags used in common
284 284 # obsolete marker operations, to avoid repeated decoding of metadata
285 285 # entries.
286 286 #
287 287 # - uint8: number of successors "N", can be zero.
288 288 #
289 289 # - uint8: number of parents "P", can be zero.
290 290 #
291 291 # 0: parents data stored but no parent,
292 292 # 1: one parent stored,
293 293 # 2: two parents stored,
294 294 # 3: no parent data stored
295 295 #
296 296 # - uint8: number of metadata entries M
297 297 #
298 298 # - 20 or 32 bytes: precursor changeset identifier.
299 299 #
300 300 # - N*(20 or 32) bytes: successors changesets identifiers.
301 301 #
302 302 # - P*(20 or 32) bytes: parents of the precursors changesets.
303 303 #
304 304 # - M*(uint8, uint8): size of all metadata entries (key and value)
305 305 #
306 306 # - remaining bytes: the metadata, each (key, value) pair after the other.
307 307 _fm1version = 1
308 308 _fm1fixed = '>IdhHBBB20s'
309 309 _fm1nodesha1 = '20s'
310 310 _fm1nodesha256 = '32s'
311 311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
312 312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
313 313 _fm1fsize = _calcsize(_fm1fixed)
314 314 _fm1parentnone = 3
315 315 _fm1parentshift = 14
316 316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
317 317 _fm1metapair = 'BB'
318 318 _fm1metapairsize = _calcsize('BB')
319 319
320 320 def _fm1purereadmarkers(data, off):
321 321 # make some global constants local for performance
322 322 noneflag = _fm1parentnone
323 323 sha2flag = usingsha256
324 324 sha1size = _fm1nodesha1size
325 325 sha2size = _fm1nodesha256size
326 326 sha1fmt = _fm1nodesha1
327 327 sha2fmt = _fm1nodesha256
328 328 metasize = _fm1metapairsize
329 329 metafmt = _fm1metapair
330 330 fsize = _fm1fsize
331 331 unpack = _unpack
332 332
333 333 # Loop on markers
334 334 stop = len(data) - _fm1fsize
335 335 ufixed = struct.Struct(_fm1fixed).unpack
336 336
337 337 while off <= stop:
338 338 # read fixed part
339 339 o1 = off + fsize
340 340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
341 341
342 342 if flags & sha2flag:
343 343 # FIXME: prec was read as a SHA1, needs to be amended
344 344
345 345 # read 0 or more successors
346 346 if numsuc == 1:
347 347 o2 = o1 + sha2size
348 348 sucs = (data[o1:o2],)
349 349 else:
350 350 o2 = o1 + sha2size * numsuc
351 351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
352 352
353 353 # read parents
354 354 if numpar == noneflag:
355 355 o3 = o2
356 356 parents = None
357 357 elif numpar == 1:
358 358 o3 = o2 + sha2size
359 359 parents = (data[o2:o3],)
360 360 else:
361 361 o3 = o2 + sha2size * numpar
362 362 parents = unpack(sha2fmt * numpar, data[o2:o3])
363 363 else:
364 364 # read 0 or more successors
365 365 if numsuc == 1:
366 366 o2 = o1 + sha1size
367 367 sucs = (data[o1:o2],)
368 368 else:
369 369 o2 = o1 + sha1size * numsuc
370 370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
371 371
372 372 # read parents
373 373 if numpar == noneflag:
374 374 o3 = o2
375 375 parents = None
376 376 elif numpar == 1:
377 377 o3 = o2 + sha1size
378 378 parents = (data[o2:o3],)
379 379 else:
380 380 o3 = o2 + sha1size * numpar
381 381 parents = unpack(sha1fmt * numpar, data[o2:o3])
382 382
383 383 # read metadata
384 384 off = o3 + metasize * nummeta
385 385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
386 386 metadata = []
387 387 for idx in xrange(0, len(metapairsize), 2):
388 388 o1 = off + metapairsize[idx]
389 389 o2 = o1 + metapairsize[idx + 1]
390 390 metadata.append((data[off:o1], data[o1:o2]))
391 391 off = o2
392 392
393 393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
394 394
395 395 def _fm1encodeonemarker(marker):
396 396 pre, sucs, flags, metadata, date, parents = marker
397 397 # determine node size
398 398 _fm1node = _fm1nodesha1
399 399 if flags & usingsha256:
400 400 _fm1node = _fm1nodesha256
401 401 numsuc = len(sucs)
402 402 numextranodes = numsuc
403 403 if parents is None:
404 404 numpar = _fm1parentnone
405 405 else:
406 406 numpar = len(parents)
407 407 numextranodes += numpar
408 408 formatnodes = _fm1node * numextranodes
409 409 formatmeta = _fm1metapair * len(metadata)
410 410 format = _fm1fixed + formatnodes + formatmeta
411 411 # tz is stored in minutes so we divide by 60
412 412 tz = date[1]//60
413 413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
414 414 data.extend(sucs)
415 415 if parents is not None:
416 416 data.extend(parents)
417 417 totalsize = _calcsize(format)
418 418 for key, value in metadata:
419 419 lk = len(key)
420 420 lv = len(value)
421 421 data.append(lk)
422 422 data.append(lv)
423 423 totalsize += lk + lv
424 424 data[0] = totalsize
425 425 data = [_pack(format, *data)]
426 426 for key, value in metadata:
427 427 data.append(key)
428 428 data.append(value)
429 429 return ''.join(data)
430 430
431 431 def _fm1readmarkers(data, off):
432 432 native = getattr(parsers, 'fm1readmarkers', None)
433 433 if not native:
434 434 return _fm1purereadmarkers(data, off)
435 435 stop = len(data) - _fm1fsize
436 436 return native(data, off, stop)
437 437
438 438 # mapping to read/write various marker formats
439 439 # <version> -> (decoder, encoder)
440 440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
441 441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
442 442
443 443 def _readmarkerversion(data):
444 444 return _unpack('>B', data[0:1])[0]
445 445
446 446 @util.nogc
447 447 def _readmarkers(data):
448 448 """Read and enumerate markers from raw data"""
449 449 diskversion = _readmarkerversion(data)
450 450 off = 1
451 451 if diskversion not in formats:
452 452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 453 raise error.UnknownVersion(msg, version=diskversion)
454 454 return diskversion, formats[diskversion][0](data, off)
455 455
456 456 def encodeheader(version=_fm0version):
457 457 return _pack('>B', version)
458 458
459 459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 460 # Kept separate from flushmarkers(), it will be reused for
461 461 # markers exchange.
462 462 encodeone = formats[version][1]
463 463 if addheader:
464 464 yield encodeheader(version)
465 465 for marker in markers:
466 466 yield encodeone(marker)
467 467
468 468 @util.nogc
469 469 def _addsuccessors(successors, markers):
470 470 for mark in markers:
471 471 successors.setdefault(mark[0], set()).add(mark)
472 472
473 473 @util.nogc
474 474 def _addprecursors(precursors, markers):
475 475 for mark in markers:
476 476 for suc in mark[1]:
477 477 precursors.setdefault(suc, set()).add(mark)
478 478
479 479 @util.nogc
480 480 def _addchildren(children, markers):
481 481 for mark in markers:
482 482 parents = mark[5]
483 483 if parents is not None:
484 484 for p in parents:
485 485 children.setdefault(p, set()).add(mark)
486 486
487 487 def _checkinvalidmarkers(markers):
488 488 """search for marker with invalid data and raise error if needed
489 489
490 490 Exist as a separated function to allow the evolve extension for a more
491 491 subtle handling.
492 492 """
493 493 for mark in markers:
494 494 if node.nullid in mark[1]:
495 495 raise error.Abort(_('bad obsolescence marker detected: '
496 496 'invalid successors nullid'))
497 497
498 498 class obsstore(object):
499 499 """Store obsolete markers
500 500
501 501 Markers can be accessed with two mappings:
502 502 - precursors[x] -> set(markers on precursors edges of x)
503 503 - successors[x] -> set(markers on successors edges of x)
504 504 - children[x] -> set(markers on precursors edges of children(x)
505 505 """
506 506
507 507 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
508 508 # prec: nodeid, precursor changesets
509 509 # succs: tuple of nodeid, successor changesets (0-N length)
510 510 # flag: integer, flag field carrying modifier for the markers (see doc)
511 511 # meta: binary blob, encoded metadata dictionary
512 512 # date: (float, int) tuple, date of marker creation
513 513 # parents: (tuple of nodeid) or None, parents of precursors
514 514 # None is used when no data has been recorded
515 515
516 516 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
517 517 # caches for various obsolescence related cache
518 518 self.caches = {}
519 519 self.svfs = svfs
520 520 self._defaultformat = defaultformat
521 521 self._readonly = readonly
522 522
523 523 def __iter__(self):
524 524 return iter(self._all)
525 525
526 526 def __len__(self):
527 527 return len(self._all)
528 528
529 529 def __nonzero__(self):
530 530 if not self._cached('_all'):
531 531 try:
532 532 return self.svfs.stat('obsstore').st_size > 1
533 533 except OSError as inst:
534 534 if inst.errno != errno.ENOENT:
535 535 raise
536 536 # just build an empty _all list if no obsstore exists, which
537 537 # avoids further stat() syscalls
538 538 pass
539 539 return bool(self._all)
540 540
541 541 __bool__ = __nonzero__
542 542
543 543 @property
544 544 def readonly(self):
545 545 """True if marker creation is disabled
546 546
547 547 Remove me in the future when obsolete marker is always on."""
548 548 return self._readonly
549 549
550 550 def create(self, transaction, prec, succs=(), flag=0, parents=None,
551 551 date=None, metadata=None, ui=None):
552 552 """obsolete: add a new obsolete marker
553 553
554 554 * ensuring it is hashable
555 555 * check mandatory metadata
556 556 * encode metadata
557 557
558 558 If you are a human writing code creating marker you want to use the
559 559 `createmarkers` function in this module instead.
560 560
561 561 return True if a new marker have been added, False if the markers
562 562 already existed (no op).
563 563 """
564 564 if metadata is None:
565 565 metadata = {}
566 566 if date is None:
567 567 if 'date' in metadata:
568 568 # as a courtesy for out-of-tree extensions
569 569 date = util.parsedate(metadata.pop('date'))
570 570 elif ui is not None:
571 571 date = ui.configdate('devel', 'default-date')
572 572 if date is None:
573 573 date = util.makedate()
574 574 else:
575 575 date = util.makedate()
576 576 if len(prec) != 20:
577 577 raise ValueError(prec)
578 578 for succ in succs:
579 579 if len(succ) != 20:
580 580 raise ValueError(succ)
581 581 if prec in succs:
582 582 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
583 583
584 584 metadata = tuple(sorted(metadata.iteritems()))
585 585
586 586 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
587 587 return bool(self.add(transaction, [marker]))
588 588
589 589 def add(self, transaction, markers):
590 590 """Add new markers to the store
591 591
592 592 Take care of filtering duplicate.
593 593 Return the number of new marker."""
594 594 if self._readonly:
595 595 raise error.Abort(_('creating obsolete markers is not enabled on '
596 596 'this repo'))
597 597 known = set()
598 598 getsuccessors = self.successors.get
599 599 new = []
600 600 for m in markers:
601 601 if m not in getsuccessors(m[0], ()) and m not in known:
602 602 known.add(m)
603 603 new.append(m)
604 604 if new:
605 605 f = self.svfs('obsstore', 'ab')
606 606 try:
607 607 offset = f.tell()
608 608 transaction.add('obsstore', offset)
609 609 # offset == 0: new file - add the version header
610 610 for bytes in encodemarkers(new, offset == 0, self._version):
611 611 f.write(bytes)
612 612 finally:
613 613 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
614 614 # call 'filecacheentry.refresh()' here
615 615 f.close()
616 addedmarkers = transaction.changes.get('obsmarkers')
617 if addedmarkers is not None:
618 addedmarkers.update(new)
616 619 self._addmarkers(new)
617 620 # new marker *may* have changed several set. invalidate the cache.
618 621 self.caches.clear()
619 622 # records the number of new markers for the transaction hooks
620 623 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
621 624 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
622 625 return len(new)
623 626
624 627 def mergemarkers(self, transaction, data):
625 628 """merge a binary stream of markers inside the obsstore
626 629
627 630 Returns the number of new markers added."""
628 631 version, markers = _readmarkers(data)
629 632 return self.add(transaction, markers)
630 633
631 634 @propertycache
632 635 def _data(self):
633 636 return self.svfs.tryread('obsstore')
634 637
635 638 @propertycache
636 639 def _version(self):
637 640 if len(self._data) >= 1:
638 641 return _readmarkerversion(self._data)
639 642 else:
640 643 return self._defaultformat
641 644
642 645 @propertycache
643 646 def _all(self):
644 647 data = self._data
645 648 if not data:
646 649 return []
647 650 self._version, markers = _readmarkers(data)
648 651 markers = list(markers)
649 652 _checkinvalidmarkers(markers)
650 653 return markers
651 654
652 655 @propertycache
653 656 def successors(self):
654 657 successors = {}
655 658 _addsuccessors(successors, self._all)
656 659 return successors
657 660
658 661 @propertycache
659 662 def precursors(self):
660 663 precursors = {}
661 664 _addprecursors(precursors, self._all)
662 665 return precursors
663 666
664 667 @propertycache
665 668 def children(self):
666 669 children = {}
667 670 _addchildren(children, self._all)
668 671 return children
669 672
670 673 def _cached(self, attr):
671 674 return attr in self.__dict__
672 675
673 676 def _addmarkers(self, markers):
674 677 markers = list(markers) # to allow repeated iteration
675 678 self._all.extend(markers)
676 679 if self._cached('successors'):
677 680 _addsuccessors(self.successors, markers)
678 681 if self._cached('precursors'):
679 682 _addprecursors(self.precursors, markers)
680 683 if self._cached('children'):
681 684 _addchildren(self.children, markers)
682 685 _checkinvalidmarkers(markers)
683 686
684 687 def relevantmarkers(self, nodes):
685 688 """return a set of all obsolescence markers relevant to a set of nodes.
686 689
687 690 "relevant" to a set of nodes mean:
688 691
689 692 - marker that use this changeset as successor
690 693 - prune marker of direct children on this changeset
691 694 - recursive application of the two rules on precursors of these markers
692 695
693 696 It is a set so you cannot rely on order."""
694 697
695 698 pendingnodes = set(nodes)
696 699 seenmarkers = set()
697 700 seennodes = set(pendingnodes)
698 701 precursorsmarkers = self.precursors
699 702 succsmarkers = self.successors
700 703 children = self.children
701 704 while pendingnodes:
702 705 direct = set()
703 706 for current in pendingnodes:
704 707 direct.update(precursorsmarkers.get(current, ()))
705 708 pruned = [m for m in children.get(current, ()) if not m[1]]
706 709 direct.update(pruned)
707 710 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
708 711 direct.update(pruned)
709 712 direct -= seenmarkers
710 713 pendingnodes = set([m[0] for m in direct])
711 714 seenmarkers |= direct
712 715 pendingnodes -= seennodes
713 716 seennodes |= pendingnodes
714 717 return seenmarkers
715 718
716 719 def makestore(ui, repo):
717 720 """Create an obsstore instance from a repo."""
718 721 # read default format for new obsstore.
719 722 # developer config: format.obsstore-version
720 723 defaultformat = ui.configint('format', 'obsstore-version')
721 724 # rely on obsstore class default when possible.
722 725 kwargs = {}
723 726 if defaultformat is not None:
724 727 kwargs['defaultformat'] = defaultformat
725 728 readonly = not isenabled(repo, createmarkersopt)
726 729 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
727 730 if store and readonly:
728 731 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
729 732 % len(list(store)))
730 733 return store
731 734
732 735 def commonversion(versions):
733 736 """Return the newest version listed in both versions and our local formats.
734 737
735 738 Returns None if no common version exists.
736 739 """
737 740 versions.sort(reverse=True)
738 741 # search for highest version known on both side
739 742 for v in versions:
740 743 if v in formats:
741 744 return v
742 745 return None
743 746
744 747 # arbitrary picked to fit into 8K limit from HTTP server
745 748 # you have to take in account:
746 749 # - the version header
747 750 # - the base85 encoding
748 751 _maxpayload = 5300
749 752
750 753 def _pushkeyescape(markers):
751 754 """encode markers into a dict suitable for pushkey exchange
752 755
753 756 - binary data is base85 encoded
754 757 - split in chunks smaller than 5300 bytes"""
755 758 keys = {}
756 759 parts = []
757 760 currentlen = _maxpayload * 2 # ensure we create a new part
758 761 for marker in markers:
759 762 nextdata = _fm0encodeonemarker(marker)
760 763 if (len(nextdata) + currentlen > _maxpayload):
761 764 currentpart = []
762 765 currentlen = 0
763 766 parts.append(currentpart)
764 767 currentpart.append(nextdata)
765 768 currentlen += len(nextdata)
766 769 for idx, part in enumerate(reversed(parts)):
767 770 data = ''.join([_pack('>B', _fm0version)] + part)
768 771 keys['dump%i' % idx] = util.b85encode(data)
769 772 return keys
770 773
771 774 def listmarkers(repo):
772 775 """List markers over pushkey"""
773 776 if not repo.obsstore:
774 777 return {}
775 778 return _pushkeyescape(sorted(repo.obsstore))
776 779
777 780 def pushmarker(repo, key, old, new):
778 781 """Push markers over pushkey"""
779 782 if not key.startswith('dump'):
780 783 repo.ui.warn(_('unknown key: %r') % key)
781 784 return False
782 785 if old:
783 786 repo.ui.warn(_('unexpected old value for %r') % key)
784 787 return False
785 788 data = util.b85decode(new)
786 789 lock = repo.lock()
787 790 try:
788 791 tr = repo.transaction('pushkey: obsolete markers')
789 792 try:
790 793 repo.obsstore.mergemarkers(tr, data)
791 794 repo.invalidatevolatilesets()
792 795 tr.close()
793 796 return True
794 797 finally:
795 798 tr.release()
796 799 finally:
797 800 lock.release()
798 801
799 802 # keep compatibility for the 4.3 cycle
800 803 def allprecursors(obsstore, nodes, ignoreflags=0):
801 804 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
802 805 util.nouideprecwarn(movemsg, '4.3')
803 806 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
804 807
805 808 def allsuccessors(obsstore, nodes, ignoreflags=0):
806 809 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
807 810 util.nouideprecwarn(movemsg, '4.3')
808 811 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
809 812
810 813 def marker(repo, data):
811 814 movemsg = 'obsolete.marker moved to obsutil.marker'
812 815 repo.ui.deprecwarn(movemsg, '4.3')
813 816 return obsutil.marker(repo, data)
814 817
815 818 def getmarkers(repo, nodes=None, exclusive=False):
816 819 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
817 820 repo.ui.deprecwarn(movemsg, '4.3')
818 821 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
819 822
820 823 def exclusivemarkers(repo, nodes):
821 824 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
822 825 repo.ui.deprecwarn(movemsg, '4.3')
823 826 return obsutil.exclusivemarkers(repo, nodes)
824 827
825 828 def foreground(repo, nodes):
826 829 movemsg = 'obsolete.foreground moved to obsutil.foreground'
827 830 repo.ui.deprecwarn(movemsg, '4.3')
828 831 return obsutil.foreground(repo, nodes)
829 832
830 833 def successorssets(repo, initialnode, cache=None):
831 834 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
832 835 repo.ui.deprecwarn(movemsg, '4.3')
833 836 return obsutil.successorssets(repo, initialnode, cache=cache)
834 837
835 838 # mapping of 'set-name' -> <function to compute this set>
836 839 cachefuncs = {}
837 840 def cachefor(name):
838 841 """Decorator to register a function as computing the cache for a set"""
839 842 def decorator(func):
840 843 if name in cachefuncs:
841 844 msg = "duplicated registration for volatileset '%s' (existing: %r)"
842 845 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
843 846 cachefuncs[name] = func
844 847 return func
845 848 return decorator
846 849
847 850 def getrevs(repo, name):
848 851 """Return the set of revision that belong to the <name> set
849 852
850 853 Such access may compute the set and cache it for future use"""
851 854 repo = repo.unfiltered()
852 855 if not repo.obsstore:
853 856 return frozenset()
854 857 if name not in repo.obsstore.caches:
855 858 repo.obsstore.caches[name] = cachefuncs[name](repo)
856 859 return repo.obsstore.caches[name]
857 860
858 861 # To be simple we need to invalidate obsolescence cache when:
859 862 #
860 863 # - new changeset is added:
861 864 # - public phase is changed
862 865 # - obsolescence marker are added
863 866 # - strip is used a repo
864 867 def clearobscaches(repo):
865 868 """Remove all obsolescence related cache from a repo
866 869
867 870 This remove all cache in obsstore is the obsstore already exist on the
868 871 repo.
869 872
870 873 (We could be smarter here given the exact event that trigger the cache
871 874 clearing)"""
872 875 # only clear cache is there is obsstore data in this repo
873 876 if 'obsstore' in repo._filecache:
874 877 repo.obsstore.caches.clear()
875 878
876 879 def _mutablerevs(repo):
877 880 """the set of mutable revision in the repository"""
878 881 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
879 882
880 883 @cachefor('obsolete')
881 884 def _computeobsoleteset(repo):
882 885 """the set of obsolete revisions"""
883 886 getnode = repo.changelog.node
884 887 notpublic = _mutablerevs(repo)
885 888 isobs = repo.obsstore.successors.__contains__
886 889 obs = set(r for r in notpublic if isobs(getnode(r)))
887 890 return obs
888 891
889 892 @cachefor('unstable')
890 893 def _computeunstableset(repo):
891 894 """the set of non obsolete revisions with obsolete parents"""
892 895 pfunc = repo.changelog.parentrevs
893 896 mutable = _mutablerevs(repo)
894 897 obsolete = getrevs(repo, 'obsolete')
895 898 others = mutable - obsolete
896 899 unstable = set()
897 900 for r in sorted(others):
898 901 # A rev is unstable if one of its parent is obsolete or unstable
899 902 # this works since we traverse following growing rev order
900 903 for p in pfunc(r):
901 904 if p in obsolete or p in unstable:
902 905 unstable.add(r)
903 906 break
904 907 return unstable
905 908
906 909 @cachefor('suspended')
907 910 def _computesuspendedset(repo):
908 911 """the set of obsolete parents with non obsolete descendants"""
909 912 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
910 913 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
911 914
912 915 @cachefor('extinct')
913 916 def _computeextinctset(repo):
914 917 """the set of obsolete parents without non obsolete descendants"""
915 918 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
916 919
917 920
918 921 @cachefor('bumped')
919 922 def _computebumpedset(repo):
920 923 """the set of revs trying to obsolete public revisions"""
921 924 bumped = set()
922 925 # util function (avoid attribute lookup in the loop)
923 926 phase = repo._phasecache.phase # would be faster to grab the full list
924 927 public = phases.public
925 928 cl = repo.changelog
926 929 torev = cl.nodemap.get
927 930 for ctx in repo.set('(not public()) and (not obsolete())'):
928 931 rev = ctx.rev()
929 932 # We only evaluate mutable, non-obsolete revision
930 933 node = ctx.node()
931 934 # (future) A cache of precursors may worth if split is very common
932 935 for pnode in obsutil.allprecursors(repo.obsstore, [node],
933 936 ignoreflags=bumpedfix):
934 937 prev = torev(pnode) # unfiltered! but so is phasecache
935 938 if (prev is not None) and (phase(repo, prev) <= public):
936 939 # we have a public precursor
937 940 bumped.add(rev)
938 941 break # Next draft!
939 942 return bumped
940 943
941 944 @cachefor('divergent')
942 945 def _computedivergentset(repo):
943 946 """the set of rev that compete to be the final successors of some revision.
944 947 """
945 948 divergent = set()
946 949 obsstore = repo.obsstore
947 950 newermap = {}
948 951 for ctx in repo.set('(not public()) - obsolete()'):
949 952 mark = obsstore.precursors.get(ctx.node(), ())
950 953 toprocess = set(mark)
951 954 seen = set()
952 955 while toprocess:
953 956 prec = toprocess.pop()[0]
954 957 if prec in seen:
955 958 continue # emergency cycle hanging prevention
956 959 seen.add(prec)
957 960 if prec not in newermap:
958 961 obsutil.successorssets(repo, prec, newermap)
959 962 newer = [n for n in newermap[prec] if n]
960 963 if len(newer) > 1:
961 964 divergent.add(ctx.rev())
962 965 break
963 966 toprocess.update(obsstore.precursors.get(prec, ()))
964 967 return divergent
965 968
966 969
967 970 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
968 971 operation=None):
969 972 """Add obsolete markers between changesets in a repo
970 973
971 974 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
972 975 tuple. `old` and `news` are changectx. metadata is an optional dictionary
973 976 containing metadata for this marker only. It is merged with the global
974 977 metadata specified through the `metadata` argument of this function,
975 978
976 979 Trying to obsolete a public changeset will raise an exception.
977 980
978 981 Current user and date are used except if specified otherwise in the
979 982 metadata attribute.
980 983
981 984 This function operates within a transaction of its own, but does
982 985 not take any lock on the repo.
983 986 """
984 987 # prepare metadata
985 988 if metadata is None:
986 989 metadata = {}
987 990 if 'user' not in metadata:
988 991 metadata['user'] = repo.ui.username()
989 992 useoperation = repo.ui.configbool('experimental',
990 993 'evolution.track-operation',
991 994 False)
992 995 if useoperation and operation:
993 996 metadata['operation'] = operation
994 997 tr = repo.transaction('add-obsolescence-marker')
995 998 try:
996 999 markerargs = []
997 1000 for rel in relations:
998 1001 prec = rel[0]
999 1002 sucs = rel[1]
1000 1003 localmetadata = metadata.copy()
1001 1004 if 2 < len(rel):
1002 1005 localmetadata.update(rel[2])
1003 1006
1004 1007 if not prec.mutable():
1005 1008 raise error.Abort(_("cannot obsolete public changeset: %s")
1006 1009 % prec,
1007 1010 hint="see 'hg help phases' for details")
1008 1011 nprec = prec.node()
1009 1012 nsucs = tuple(s.node() for s in sucs)
1010 1013 npare = None
1011 1014 if not nsucs:
1012 1015 npare = tuple(p.node() for p in prec.parents())
1013 1016 if nprec in nsucs:
1014 1017 raise error.Abort(_("changeset %s cannot obsolete itself")
1015 1018 % prec)
1016 1019
1017 1020 # Creating the marker causes the hidden cache to become invalid,
1018 1021 # which causes recomputation when we ask for prec.parents() above.
1019 1022 # Resulting in n^2 behavior. So let's prepare all of the args
1020 1023 # first, then create the markers.
1021 1024 markerargs.append((nprec, nsucs, npare, localmetadata))
1022 1025
1023 1026 for args in markerargs:
1024 1027 nprec, nsucs, npare, localmetadata = args
1025 1028 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1026 1029 date=date, metadata=localmetadata,
1027 1030 ui=repo.ui)
1028 1031 repo.filteredrevcache.clear()
1029 1032 tr.close()
1030 1033 finally:
1031 1034 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now