obsolete.py
1099 lines
| 36.6 KiB
| text/x-python
|
PythonLexer
/ mercurial / obsolete.py
Pierre-Yves.David@ens-lyon.org
|
r17070 | # obsolete.py - obsolete markers handling | ||
# | ||||
# Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> | ||||
# Logilab SA <contact@logilab.fr> | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
Martin Geisler
|
r21164 | """Obsolete marker handling | ||
Pierre-Yves.David@ens-lyon.org
|
r17070 | |||
An obsolete marker maps an old changeset to a list of new | ||||
changesets. If the list of new changesets is empty, the old changeset | ||||
is said to be "killed". Otherwise, the old changeset is being | ||||
"replaced" by the new changesets. | ||||
Obsolete markers can be used to record and distribute changeset graph | ||||
Martin Geisler
|
r21164 | transformations performed by history rewrite operations, and help | ||
building new tools to reconcile conflicting rewrite actions. To | ||||
facilitate conflict resolution, markers include various annotations | ||||
Pierre-Yves.David@ens-lyon.org
|
r17070 | besides old and news changeset identifiers, such as creation date or | ||
author name. | ||||
Boris Feld
|
r33700 | The old obsoleted changeset is called a "predecessor" and possible | ||
Martin Geisler
|
r21164 | replacements are called "successors". Markers that used changeset X as | ||
Boris Feld
|
r33700 | a predecessor are called "successor markers of X" because they hold | ||
Martin Geisler
|
r21164 | information about the successors of X. Markers that use changeset Y as | ||
Boris Feld
|
r33700 | a successors are call "predecessor markers of Y" because they hold | ||
information about the predecessors of Y. | ||||
Pierre-Yves David
|
r17776 | |||
Pierre-Yves David
|
r17775 | Examples: | ||
Martin Geisler
|
r21164 | - When changeset A is replaced by changeset A', one marker is stored: | ||
Pierre-Yves David
|
r17775 | |||
Martin Geisler
|
r21166 | (A, (A',)) | ||
Pierre-Yves David
|
r17775 | |||
Martin Geisler
|
r21164 | - When changesets A and B are folded into a new changeset C, two markers are | ||
Pierre-Yves David
|
r17775 | stored: | ||
(A, (C,)) and (B, (C,)) | ||||
Martin Geisler
|
r21164 | - When changeset A is simply "pruned" from the graph, a marker is created: | ||
Pierre-Yves David
|
r17775 | |||
(A, ()) | ||||
liscju
|
r29894 | - When changeset A is split into B and C, a single marker is used: | ||
Pierre-Yves David
|
r17775 | |||
liscju
|
r29894 | (A, (B, C)) | ||
Pierre-Yves David
|
r17775 | |||
Martin Geisler
|
r21164 | We use a single marker to distinguish the "split" case from the "divergence" | ||
case. If two independent operations rewrite the same changeset A in to A' and | ||||
A'', we have an error case: divergent rewriting. We can detect it because | ||||
Pierre-Yves David
|
r17775 | two markers will be created independently: | ||
(A, (B,)) and (A, (C,)) | ||||
Pierre-Yves.David@ens-lyon.org
|
r17070 | |||
Format | ||||
------ | ||||
Markers are stored in an append-only file stored in | ||||
'.hg/store/obsstore'. | ||||
The file starts with a version header: | ||||
- 1 unsigned byte: version number, starting at zero. | ||||
Pierre-Yves David
|
r22612 | The header is followed by the markers. Marker format depend of the version. See | ||
comment associated with each format for details. | ||||
Martin Geisler
|
r21164 | |||
Pierre-Yves.David@ens-lyon.org
|
r17070 | """ | ||
Gregory Szorc
|
r27332 | from __future__ import absolute_import | ||
import errno | ||||
import struct | ||||
from .i18n import _ | ||||
from . import ( | ||||
error, | ||||
node, | ||||
r33143 | obsutil, | |||
Gregory Szorc
|
r27332 | phases, | ||
Yuya Nishihara
|
r32372 | policy, | ||
Gregory Szorc
|
r27332 | util, | ||
) | ||||
Pierre-Yves.David@ens-lyon.org
|
r17070 | |||
Yuya Nishihara
|
r32372 | parsers = policy.importmod(r'parsers') | ||
Pierre-Yves.David@ens-lyon.org
|
r17070 | _pack = struct.pack | ||
_unpack = struct.unpack | ||||
Pierre-Yves David
|
r23498 | _calcsize = struct.calcsize | ||
Martin von Zweigbergk
|
r24046 | propertycache = util.propertycache | ||
Pierre-Yves.David@ens-lyon.org
|
r17070 | |||
Mads Kiilerich
|
r17424 | # the obsolete feature is not mature enough to be enabled by default. | ||
Pierre-Yves David
|
r17296 | # you have to rely on third party extension extension to enable this. | ||
_enabled = False | ||||
Pierre-Yves.David@ens-lyon.org
|
r17070 | |||
Durham Goode
|
r22951 | # Options for obsolescence | ||
createmarkersopt = 'createmarkers' | ||||
Durham Goode
|
r22952 | allowunstableopt = 'allowunstable' | ||
Durham Goode
|
r22953 | exchangeopt = 'exchange' | ||
Durham Goode
|
r22951 | |||
r32333 | def isenabled(repo, option): | |||
"""Returns True if the given repository has the given obsolete option | ||||
enabled. | ||||
""" | ||||
Boris Feld
|
r33767 | result = set(repo.ui.configlist('experimental', 'stabilization')) | ||
r32333 | if 'all' in result: | |||
return True | ||||
# For migration purposes, temporarily return true if the config hasn't been | ||||
# set but _enabled is true. | ||||
if len(result) == 0 and _enabled: | ||||
return True | ||||
# createmarkers must be enabled if other options are enabled | ||||
if ((allowunstableopt in result or exchangeopt in result) and | ||||
not createmarkersopt in result): | ||||
raise error.Abort(_("'createmarkers' obsolete option must be enabled " | ||||
"if other obsolete options are enabled")) | ||||
return option in result | ||||
Pierre-Yves David
|
r17831 | ### obsolescence marker flag | ||
## bumpedfix flag | ||||
# | ||||
# When a changeset A' succeed to a changeset A which became public, we call A' | ||||
# "bumped" because it's a successors of a public changesets | ||||
# | ||||
# o A' (bumped) | ||||
# |`: | ||||
# | o A | ||||
# |/ | ||||
# o Z | ||||
# | ||||
# The way to solve this situation is to create a new changeset Ad as children | ||||
# of A. This changeset have the same content than A'. So the diff from A to A' | ||||
# is the same than the diff from A to Ad. Ad is marked as a successors of A' | ||||
# | ||||
# o Ad | ||||
# |`: | ||||
# | x A' | ||||
# |'| | ||||
# o | A | ||||
# |/ | ||||
# o Z | ||||
# | ||||
# But by transitivity Ad is also a successors of A. To avoid having Ad marked | ||||
# as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>. | ||||
Mads Kiilerich
|
r18644 | # This flag mean that the successors express the changes between the public and | ||
# bumped version and fix the situation, breaking the transitivity of | ||||
# "bumped" here. | ||||
Pierre-Yves David
|
r17831 | bumpedfix = 1 | ||
Pierre-Yves David
|
r22850 | usingsha256 = 2 | ||
Pierre-Yves David
|
r17831 | |||
Pierre-Yves David
|
r22612 | ## Parsing and writing of version "0" | ||
# | ||||
# The header is followed by the markers. Each marker is made of: | ||||
# | ||||
Pierre-Yves David
|
r22849 | # - 1 uint8 : number of new changesets "N", can be zero. | ||
Pierre-Yves David
|
r22612 | # | ||
Pierre-Yves David
|
r22849 | # - 1 uint32: metadata size "M" in bytes. | ||
Pierre-Yves David
|
r22612 | # | ||
# - 1 byte: a bit field. It is reserved for flags used in common | ||||
# obsolete marker operations, to avoid repeated decoding of metadata | ||||
# entries. | ||||
# | ||||
# - 20 bytes: obsoleted changeset identifier. | ||||
# | ||||
# - N*20 bytes: new changesets identifiers. | ||||
# | ||||
# - M bytes: metadata as a sequence of nul-terminated strings. Each | ||||
# string contains a key and a value, separated by a colon ':', without | ||||
# additional encoding. Keys cannot contain '\0' or ':' and values | ||||
# cannot contain '\0'. | ||||
_fm0version = 0 | ||||
_fm0fixed = '>BIB20s' | ||||
_fm0node = '20s' | ||||
Pierre-Yves David
|
r23498 | _fm0fsize = _calcsize(_fm0fixed) | ||
_fm0fnodesize = _calcsize(_fm0node) | ||||
Pierre-Yves David
|
r22334 | |||
Jun Wu
|
r33504 | def _fm0readmarkers(data, off, stop): | ||
Pierre-Yves.David@ens-lyon.org
|
r17070 | # Loop on markers | ||
Jun Wu
|
r33504 | while off < stop: | ||
Pierre-Yves.David@ens-lyon.org
|
r17070 | # read fixed part | ||
Pierre-Yves David
|
r22327 | cur = data[off:off + _fm0fsize] | ||
off += _fm0fsize | ||||
Pierre-Yves David
|
r22685 | numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur) | ||
Pierre-Yves.David@ens-lyon.org
|
r17070 | # read replacement | ||
sucs = () | ||||
Pierre-Yves David
|
r22685 | if numsuc: | ||
s = (_fm0fnodesize * numsuc) | ||||
Pierre-Yves.David@ens-lyon.org
|
r17070 | cur = data[off:off + s] | ||
Pierre-Yves David
|
r22685 | sucs = _unpack(_fm0node * numsuc, cur) | ||
Pierre-Yves.David@ens-lyon.org
|
r17070 | off += s | ||
# read metadata | ||||
# (metadata will be decoded on demand) | ||||
metadata = data[off:off + mdsize] | ||||
if len(metadata) != mdsize: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_('parsing obsolete marker: metadata is too ' | ||
Pierre-Yves.David@ens-lyon.org
|
r17070 | 'short, %d bytes expected, got %d') | ||
Pierre-Yves David
|
r17253 | % (mdsize, len(metadata))) | ||
Pierre-Yves.David@ens-lyon.org
|
r17070 | off += mdsize | ||
Pierre-Yves David
|
r22847 | metadata = _fm0decodemeta(metadata) | ||
Pierre-Yves David
|
r22222 | try: | ||
Pierre-Yves David
|
r22845 | when, offset = metadata.pop('date', '0 0').split(' ') | ||
Gregory Szorc
|
r22309 | date = float(when), int(offset) | ||
except ValueError: | ||||
Pierre-Yves David
|
r22222 | date = (0., 0) | ||
Pierre-Yves David
|
r22258 | parents = None | ||
Pierre-Yves David
|
r22845 | if 'p2' in metadata: | ||
parents = (metadata.pop('p1', None), metadata.pop('p2', None)) | ||||
elif 'p1' in metadata: | ||||
parents = (metadata.pop('p1', None),) | ||||
elif 'p0' in metadata: | ||||
Pierre-Yves David
|
r22258 | parents = () | ||
if parents is not None: | ||||
try: | ||||
parents = tuple(node.bin(p) for p in parents) | ||||
# if parent content is not a nodeid, drop the data | ||||
for p in parents: | ||||
if len(p) != 20: | ||||
parents = None | ||||
break | ||||
except TypeError: | ||||
# if content cannot be translated to nodeid drop the data. | ||||
parents = None | ||||
Pierre-Yves David
|
r22845 | metadata = tuple(sorted(metadata.iteritems())) | ||
Pierre-Yves David
|
r22222 | |||
Pierre-Yves David
|
r22258 | yield (pre, sucs, flags, metadata, date, parents) | ||
Pierre-Yves.David@ens-lyon.org
|
r17070 | |||
Pierre-Yves David
|
r22330 | def _fm0encodeonemarker(marker): | ||
pre, sucs, flags, metadata, date, parents = marker | ||||
Pierre-Yves David
|
r22850 | if flags & usingsha256: | ||
Pierre-Yves David
|
r26587 | raise error.Abort(_('cannot handle sha256 with old obsstore format')) | ||
Pierre-Yves David
|
r22845 | metadata = dict(metadata) | ||
Pierre-Yves David
|
r23002 | time, tz = date | ||
metadata['date'] = '%r %i' % (time, tz) | ||||
Pierre-Yves David
|
r22330 | if parents is not None: | ||
if not parents: | ||||
# mark that we explicitly recorded no parents | ||||
metadata['p0'] = '' | ||||
Gregory Szorc
|
r32278 | for i, p in enumerate(parents, 1): | ||
metadata['p%i' % i] = node.hex(p) | ||||
Pierre-Yves David
|
r22846 | metadata = _fm0encodemeta(metadata) | ||
Pierre-Yves David
|
r22685 | numsuc = len(sucs) | ||
format = _fm0fixed + (_fm0node * numsuc) | ||||
data = [numsuc, len(metadata), flags, pre] | ||||
Pierre-Yves David
|
r22330 | data.extend(sucs) | ||
return _pack(format, *data) + metadata | ||||
Pierre-Yves David
|
r22848 | def _fm0encodemeta(meta): | ||
"""Return encoded metadata string to string mapping. | ||||
Assume no ':' in key and no '\0' in both key and value.""" | ||||
for key, value in meta.iteritems(): | ||||
if ':' in key or '\0' in key: | ||||
raise ValueError("':' and '\0' are forbidden in metadata key'") | ||||
if '\0' in value: | ||||
raise ValueError("':' is forbidden in metadata value'") | ||||
return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) | ||||
def _fm0decodemeta(data): | ||||
"""Return string to string dictionary from encoded version.""" | ||||
d = {} | ||||
for l in data.split('\0'): | ||||
if l: | ||||
key, value = l.split(':') | ||||
d[key] = value | ||||
return d | ||||
Pierre-Yves David
|
r22851 | ## Parsing and writing of version "1" | ||
# | ||||
# The header is followed by the markers. Each marker is made of: | ||||
# | ||||
# - uint32: total size of the marker (including this field) | ||||
# | ||||
# - float64: date in seconds since epoch | ||||
# | ||||
# - int16: timezone offset in minutes | ||||
# | ||||
# - uint16: a bit field. It is reserved for flags used in common | ||||
# obsolete marker operations, to avoid repeated decoding of metadata | ||||
# entries. | ||||
# | ||||
# - uint8: number of successors "N", can be zero. | ||||
# | ||||
# - uint8: number of parents "P", can be zero. | ||||
# | ||||
# 0: parents data stored but no parent, | ||||
# 1: one parent stored, | ||||
# 2: two parents stored, | ||||
# 3: no parent data stored | ||||
# | ||||
# - uint8: number of metadata entries M | ||||
# | ||||
Boris Feld
|
r33700 | # - 20 or 32 bytes: predecessor changeset identifier. | ||
Pierre-Yves David
|
r22851 | # | ||
# - N*(20 or 32) bytes: successors changesets identifiers. | ||||
# | ||||
Boris Feld
|
r33700 | # - P*(20 or 32) bytes: parents of the predecessors changesets. | ||
Pierre-Yves David
|
r22851 | # | ||
# - M*(uint8, uint8): size of all metadata entries (key and value) | ||||
# | ||||
# - remaining bytes: the metadata, each (key, value) pair after the other. | ||||
_fm1version = 1 | ||||
_fm1fixed = '>IdhHBBB20s' | ||||
_fm1nodesha1 = '20s' | ||||
_fm1nodesha256 = '32s' | ||||
Pierre-Yves David
|
r23499 | _fm1nodesha1size = _calcsize(_fm1nodesha1) | ||
_fm1nodesha256size = _calcsize(_fm1nodesha256) | ||||
Pierre-Yves David
|
r23498 | _fm1fsize = _calcsize(_fm1fixed) | ||
Pierre-Yves David
|
r22851 | _fm1parentnone = 3 | ||
_fm1parentshift = 14 | ||||
_fm1parentmask = (_fm1parentnone << _fm1parentshift) | ||||
_fm1metapair = 'BB' | ||||
Augie Fackler
|
r33633 | _fm1metapairsize = _calcsize(_fm1metapair) | ||
Pierre-Yves David
|
r22851 | |||
Jun Wu
|
r33504 | def _fm1purereadmarkers(data, off, stop): | ||
Matt Mackall
|
r23803 | # make some global constants local for performance | ||
noneflag = _fm1parentnone | ||||
sha2flag = usingsha256 | ||||
sha1size = _fm1nodesha1size | ||||
sha2size = _fm1nodesha256size | ||||
sha1fmt = _fm1nodesha1 | ||||
sha2fmt = _fm1nodesha256 | ||||
metasize = _fm1metapairsize | ||||
metafmt = _fm1metapair | ||||
fsize = _fm1fsize | ||||
unpack = _unpack | ||||
Pierre-Yves David
|
r22851 | # Loop on markers | ||
Pierre-Yves David
|
r25211 | ufixed = struct.Struct(_fm1fixed).unpack | ||
Augie Fackler
|
r24018 | |||
Jun Wu
|
r33504 | while off < stop: | ||
Pierre-Yves David
|
r22851 | # read fixed part | ||
Matt Mackall
|
r23803 | o1 = off + fsize | ||
Matt Mackall
|
r23800 | t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1]) | ||
Matt Mackall
|
r23792 | |||
Matt Mackall
|
r23803 | if flags & sha2flag: | ||
Matt Mackall
|
r23805 | # FIXME: prec was read as a SHA1, needs to be amended | ||
Matt Mackall
|
r23801 | # read 0 or more successors | ||
Matt Mackall
|
r23804 | if numsuc == 1: | ||
o2 = o1 + sha2size | ||||
sucs = (data[o1:o2],) | ||||
else: | ||||
o2 = o1 + sha2size * numsuc | ||||
sucs = unpack(sha2fmt * numsuc, data[o1:o2]) | ||||
Matt Mackall
|
r23792 | |||
Matt Mackall
|
r23801 | # read parents | ||
Matt Mackall
|
r23803 | if numpar == noneflag: | ||
Matt Mackall
|
r23801 | o3 = o2 | ||
parents = None | ||||
Matt Mackall
|
r23804 | elif numpar == 1: | ||
o3 = o2 + sha2size | ||||
parents = (data[o2:o3],) | ||||
Matt Mackall
|
r23801 | else: | ||
Matt Mackall
|
r23803 | o3 = o2 + sha2size * numpar | ||
parents = unpack(sha2fmt * numpar, data[o2:o3]) | ||||
Matt Mackall
|
r23801 | else: | ||
# read 0 or more successors | ||||
Matt Mackall
|
r23804 | if numsuc == 1: | ||
o2 = o1 + sha1size | ||||
sucs = (data[o1:o2],) | ||||
else: | ||||
o2 = o1 + sha1size * numsuc | ||||
sucs = unpack(sha1fmt * numsuc, data[o1:o2]) | ||||
Matt Mackall
|
r23792 | |||
Matt Mackall
|
r23801 | # read parents | ||
Matt Mackall
|
r23803 | if numpar == noneflag: | ||
Matt Mackall
|
r23801 | o3 = o2 | ||
parents = None | ||||
Matt Mackall
|
r23804 | elif numpar == 1: | ||
o3 = o2 + sha1size | ||||
parents = (data[o2:o3],) | ||||
Matt Mackall
|
r23801 | else: | ||
Matt Mackall
|
r23803 | o3 = o2 + sha1size * numpar | ||
parents = unpack(sha1fmt * numpar, data[o2:o3]) | ||||
Matt Mackall
|
r23792 | |||
Pierre-Yves David
|
r22851 | # read metadata | ||
Matt Mackall
|
r23803 | off = o3 + metasize * nummeta | ||
metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off]) | ||||
Pierre-Yves David
|
r22851 | metadata = [] | ||
for idx in xrange(0, len(metapairsize), 2): | ||||
Matt Mackall
|
r23798 | o1 = off + metapairsize[idx] | ||
o2 = o1 + metapairsize[idx + 1] | ||||
metadata.append((data[off:o1], data[o1:o2])) | ||||
off = o2 | ||||
Pierre-Yves David
|
r22851 | |||
Matt Mackall
|
r23800 | yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents) | ||
Pierre-Yves David
|
r22851 | |||
def _fm1encodeonemarker(marker): | ||||
pre, sucs, flags, metadata, date, parents = marker | ||||
# determine node size | ||||
_fm1node = _fm1nodesha1 | ||||
if flags & usingsha256: | ||||
_fm1node = _fm1nodesha256 | ||||
numsuc = len(sucs) | ||||
numextranodes = numsuc | ||||
if parents is None: | ||||
numpar = _fm1parentnone | ||||
else: | ||||
numpar = len(parents) | ||||
numextranodes += numpar | ||||
formatnodes = _fm1node * numextranodes | ||||
formatmeta = _fm1metapair * len(metadata) | ||||
format = _fm1fixed + formatnodes + formatmeta | ||||
# tz is stored in minutes so we divide by 60 | ||||
tz = date[1]//60 | ||||
data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre] | ||||
data.extend(sucs) | ||||
if parents is not None: | ||||
data.extend(parents) | ||||
Pierre-Yves David
|
r23498 | totalsize = _calcsize(format) | ||
Pierre-Yves David
|
r22851 | for key, value in metadata: | ||
lk = len(key) | ||||
lv = len(value) | ||||
Simon Whitaker
|
r34408 | if lk > 255: | ||
msg = ('obsstore metadata key cannot be longer than 255 bytes' | ||||
' (key "%s" is %u bytes)') % (key, lk) | ||||
raise error.ProgrammingError(msg) | ||||
if lv > 255: | ||||
msg = ('obsstore metadata value cannot be longer than 255 bytes' | ||||
' (value "%s" for key "%s" is %u bytes)') % (value, key, lv) | ||||
raise error.ProgrammingError(msg) | ||||
Pierre-Yves David
|
r22851 | data.append(lk) | ||
data.append(lv) | ||||
totalsize += lk + lv | ||||
data[0] = totalsize | ||||
data = [_pack(format, *data)] | ||||
for key, value in metadata: | ||||
data.append(key) | ||||
data.append(value) | ||||
return ''.join(data) | ||||
Pierre-Yves David
|
r22848 | |||
Jun Wu
|
r33504 | def _fm1readmarkers(data, off, stop): | ||
Martin von Zweigbergk
|
r24019 | native = getattr(parsers, 'fm1readmarkers', None) | ||
if not native: | ||||
Jun Wu
|
r33504 | return _fm1purereadmarkers(data, off, stop) | ||
Martin von Zweigbergk
|
r24019 | return native(data, off, stop) | ||
Pierre-Yves David
|
r22331 | # mapping to read/write various marker formats | ||
# <version> -> (decoder, encoder) | ||||
Pierre-Yves David
|
r22851 | formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker), | ||
_fm1version: (_fm1readmarkers, _fm1encodeonemarker)} | ||||
Pierre-Yves David
|
r22331 | |||
Jun Wu
|
r32689 | def _readmarkerversion(data): | ||
return _unpack('>B', data[0:1])[0] | ||||
Pierre-Yves David
|
r23497 | @util.nogc | ||
Jun Wu
|
r33504 | def _readmarkers(data, off=None, stop=None): | ||
Pierre-Yves David
|
r22612 | """Read and enumerate markers from raw data""" | ||
Jun Wu
|
r32689 | diskversion = _readmarkerversion(data) | ||
Jun Wu
|
r33504 | if not off: | ||
off = 1 # skip 1 byte version number | ||||
if stop is None: | ||||
stop = len(data) | ||||
Pierre-Yves David
|
r22612 | if diskversion not in formats: | ||
r32591 | msg = _('parsing obsolete marker: unknown version %r') % diskversion | |||
raise error.UnknownVersion(msg, version=diskversion) | ||||
Jun Wu
|
r33504 | return diskversion, formats[diskversion][0](data, off, stop) | ||
Pierre-Yves David
|
r22612 | |||
Jun Wu
|
r32692 | def encodeheader(version=_fm0version): | ||
return _pack('>B', version) | ||||
Pierre-Yves David
|
r22612 | def encodemarkers(markers, addheader=False, version=_fm0version): | ||
# Kept separate from flushmarkers(), it will be reused for | ||||
# markers exchange. | ||||
encodeone = formats[version][1] | ||||
if addheader: | ||||
Jun Wu
|
r32692 | yield encodeheader(version) | ||
Pierre-Yves David
|
r22612 | for marker in markers: | ||
yield encodeone(marker) | ||||
Martin von Zweigbergk
|
r24044 | @util.nogc | ||
def _addsuccessors(successors, markers): | ||||
for mark in markers: | ||||
successors.setdefault(mark[0], set()).add(mark) | ||||
Boris Feld
|
r33698 | def _addprecursors(*args, **kwargs): | ||
msg = ("'obsolete._addprecursors' is deprecated, " | ||||
"use 'obsolete._addpredecessors'") | ||||
util.nouideprecwarn(msg, '4.4') | ||||
return _addpredecessors(*args, **kwargs) | ||||
Martin von Zweigbergk
|
r24044 | @util.nogc | ||
Boris Feld
|
r33698 | def _addpredecessors(predecessors, markers): | ||
Martin von Zweigbergk
|
r24044 | for mark in markers: | ||
for suc in mark[1]: | ||||
Boris Feld
|
r33698 | predecessors.setdefault(suc, set()).add(mark) | ||
Martin von Zweigbergk
|
r24044 | |||
@util.nogc | ||||
def _addchildren(children, markers): | ||||
for mark in markers: | ||||
parents = mark[5] | ||||
if parents is not None: | ||||
for p in parents: | ||||
children.setdefault(p, set()).add(mark) | ||||
Martin von Zweigbergk
|
r24045 | def _checkinvalidmarkers(markers): | ||
Pierre-Yves David
|
r23973 | """search for marker with invalid data and raise error if needed | ||
Exist as a separated function to allow the evolve extension for a more | ||||
subtle handling. | ||||
""" | ||||
Martin von Zweigbergk
|
r24045 | for mark in markers: | ||
if node.nullid in mark[1]: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_('bad obsolescence marker detected: ' | ||
Martin von Zweigbergk
|
r24045 | 'invalid successors nullid')) | ||
Pierre-Yves David
|
r23973 | |||
Pierre-Yves.David@ens-lyon.org
|
r17070 | class obsstore(object): | ||
"""Store obsolete markers | ||||
Markers can be accessed with two mappings: | ||||
Boris Feld
|
r33700 | - predecessors[x] -> set(markers on predecessors edges of x) | ||
Pierre-Yves David
|
r17776 | - successors[x] -> set(markers on successors edges of x) | ||
Boris Feld
|
r33700 | - children[x] -> set(markers on predecessors edges of children(x) | ||
Pierre-Yves.David@ens-lyon.org
|
r17070 | """ | ||
Pierre-Yves David
|
r22254 | fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents') | ||
Boris Feld
|
r33700 | # prec: nodeid, predecessors changesets | ||
Pierre-Yves David
|
r22254 | # succs: tuple of nodeid, successor changesets (0-N length) | ||
# flag: integer, flag field carrying modifier for the markers (see doc) | ||||
# meta: binary blob, encoded metadata dictionary | ||||
# date: (float, int) tuple, date of marker creation | ||||
Boris Feld
|
r33700 | # parents: (tuple of nodeid) or None, parents of predecessors | ||
Pierre-Yves David
|
r22254 | # None is used when no data has been recorded | ||
Pierre-Yves David
|
r22221 | |||
Siddharth Agarwal
|
r25669 | def __init__(self, svfs, defaultformat=_fm1version, readonly=False): | ||
Pierre-Yves David
|
r17469 | # caches for various obsolescence related cache | ||
self.caches = {} | ||||
Siddharth Agarwal
|
r25669 | self.svfs = svfs | ||
Jun Wu
|
r32691 | self._defaultformat = defaultformat | ||
Durham Goode
|
r22950 | self._readonly = readonly | ||
Pierre-Yves.David@ens-lyon.org
|
r17070 | |||
Pierre-Yves.David@ens-lyon.org
|
r17073 | def __iter__(self): | ||
return iter(self._all) | ||||
Pierre-Yves David
|
r20585 | def __len__(self): | ||
return len(self._all) | ||||
Pierre-Yves.David@ens-lyon.org
|
r17075 | def __nonzero__(self): | ||
Yuya Nishihara
|
r26310 | if not self._cached('_all'): | ||
try: | ||||
return self.svfs.stat('obsstore').st_size > 1 | ||||
except OSError as inst: | ||||
if inst.errno != errno.ENOENT: | ||||
raise | ||||
# just build an empty _all list if no obsstore exists, which | ||||
# avoids further stat() syscalls | ||||
Pierre-Yves.David@ens-lyon.org
|
r17075 | return bool(self._all) | ||
Gregory Szorc
|
r31476 | __bool__ = __nonzero__ | ||
Pierre-Yves David
|
r26684 | @property | ||
def readonly(self): | ||||
"""True if marker creation is disabled | ||||
Remove me in the future when obsolete marker is always on.""" | ||||
return self._readonly | ||||
Pierre-Yves David
|
r22255 | def create(self, transaction, prec, succs=(), flag=0, parents=None, | ||
Boris Feld
|
r32411 | date=None, metadata=None, ui=None): | ||
Pierre-Yves.David@ens-lyon.org
|
r17071 | """obsolete: add a new obsolete marker | ||
* ensuring it is hashable | ||||
* check mandatory metadata | ||||
* encode metadata | ||||
Pierre-Yves David
|
r20516 | |||
If you are a human writing code creating marker you want to use the | ||||
`createmarkers` function in this module instead. | ||||
Pierre-Yves David
|
r20584 | |||
return True if a new marker have been added, False if the markers | ||||
already existed (no op). | ||||
Pierre-Yves.David@ens-lyon.org
|
r17071 | """ | ||
if metadata is None: | ||||
metadata = {} | ||||
Pierre-Yves David
|
r22222 | if date is None: | ||
if 'date' in metadata: | ||||
# as a courtesy for out-of-tree extensions | ||||
date = util.parsedate(metadata.pop('date')) | ||||
Boris Feld
|
r32411 | elif ui is not None: | ||
date = ui.configdate('devel', 'default-date') | ||||
if date is None: | ||||
date = util.makedate() | ||||
Pierre-Yves David
|
r22222 | else: | ||
Pierre-Yves David
|
r22217 | date = util.makedate() | ||
Pierre-Yves.David@ens-lyon.org
|
r17071 | if len(prec) != 20: | ||
raise ValueError(prec) | ||||
for succ in succs: | ||||
if len(succ) != 20: | ||||
Pierre-Yves David
|
r17117 | raise ValueError(succ) | ||
Pierre-Yves David
|
r22177 | if prec in succs: | ||
raise ValueError(_('in-marker cycle with %s') % node.hex(prec)) | ||||
Pierre-Yves David
|
r22845 | |||
metadata = tuple(sorted(metadata.iteritems())) | ||||
Augie Fackler
|
r33685 | marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents) | ||
Pierre-Yves David
|
r20584 | return bool(self.add(transaction, [marker])) | ||
Pierre-Yves.David@ens-lyon.org
|
r17220 | |||
def add(self, transaction, markers): | ||||
"""Add new markers to the store | ||||
Pierre-Yves.David@ens-lyon.org
|
r17070 | |||
Pierre-Yves.David@ens-lyon.org
|
r17220 | Take care of filtering duplicate. | ||
Return the number of new marker.""" | ||||
Durham Goode
|
r22950 | if self._readonly: | ||
liscju
|
r29389 | raise error.Abort(_('creating obsolete markers is not enabled on ' | ||
'this repo')) | ||||
Jun Wu
|
r32774 | known = set() | ||
getsuccessors = self.successors.get | ||||
Pierre-Yves David
|
r20030 | new = [] | ||
for m in markers: | ||||
Jun Wu
|
r32774 | if m not in getsuccessors(m[0], ()) and m not in known: | ||
Pierre-Yves David
|
r20030 | known.add(m) | ||
new.append(m) | ||||
Pierre-Yves.David@ens-lyon.org
|
r17220 | if new: | ||
Siddharth Agarwal
|
r25669 | f = self.svfs('obsstore', 'ab') | ||
Pierre-Yves David
|
r17124 | try: | ||
Pierre-Yves David
|
r17126 | offset = f.tell() | ||
transaction.add('obsstore', offset) | ||||
Pierre-Yves.David@ens-lyon.org
|
r17219 | # offset == 0: new file - add the version header | ||
Jun Wu
|
r33479 | data = b''.join(encodemarkers(new, offset == 0, self._version)) | ||
f.write(data) | ||||
Pierre-Yves David
|
r17126 | finally: | ||
# XXX: f.close() == filecache invalidation == obsstore rebuilt. | ||||
# call 'filecacheentry.refresh()' here | ||||
Pierre-Yves David
|
r17124 | f.close() | ||
r33248 | addedmarkers = transaction.changes.get('obsmarkers') | |||
if addedmarkers is not None: | ||||
addedmarkers.update(new) | ||||
Jun Wu
|
r33479 | self._addmarkers(new, data) | ||
Pierre-Yves David
|
r17469 | # new marker *may* have changed several set. invalidate the cache. | ||
self.caches.clear() | ||||
Pierre-Yves David
|
r22339 | # records the number of new markers for the transaction hooks | ||
previous = int(transaction.hookargs.get('new_obsmarkers', '0')) | ||||
transaction.hookargs['new_obsmarkers'] = str(previous + len(new)) | ||||
Pierre-Yves.David@ens-lyon.org
|
r17220 | return len(new) | ||
Pierre-Yves David
|
r17126 | |||
timeless@mozdev.org
|
r17524 | def mergemarkers(self, transaction, data): | ||
Pierre-Yves David
|
r22325 | """merge a binary stream of markers inside the obsstore | ||
Returns the number of new markers added.""" | ||||
Pierre-Yves David
|
r22332 | version, markers = _readmarkers(data) | ||
Pierre-Yves David
|
r22325 | return self.add(transaction, markers) | ||
Pierre-Yves.David@ens-lyon.org
|
r17070 | |||
Martin von Zweigbergk
|
r24046 | @propertycache | ||
Jun Wu
|
r32690 | def _data(self): | ||
return self.svfs.tryread('obsstore') | ||||
@propertycache | ||||
Jun Wu
|
r32691 | def _version(self): | ||
if len(self._data) >= 1: | ||||
return _readmarkerversion(self._data) | ||||
else: | ||||
return self._defaultformat | ||||
@propertycache | ||||
Yuya Nishihara
|
r26309 | def _all(self): | ||
Jun Wu
|
r32690 | data = self._data | ||
Yuya Nishihara
|
r26309 | if not data: | ||
return [] | ||||
self._version, markers = _readmarkers(data) | ||||
markers = list(markers) | ||||
_checkinvalidmarkers(markers) | ||||
return markers | ||||
@propertycache | ||||
Martin von Zweigbergk
|
r24046 | def successors(self): | ||
successors = {} | ||||
_addsuccessors(successors, self._all) | ||||
return successors | ||||
Boris Feld
|
r33699 | @property | ||
def precursors(self): | ||||
msg = ("'obsstore.precursors' is deprecated, " | ||||
"use 'obsstore.predecessors'") | ||||
util.nouideprecwarn(msg, '4.4') | ||||
return self.predecessors | ||||
Martin von Zweigbergk
|
r24046 | @propertycache | ||
Boris Feld
|
r33699 | def predecessors(self): | ||
Boris Feld
|
r33698 | predecessors = {} | ||
_addpredecessors(predecessors, self._all) | ||||
return predecessors | ||||
Martin von Zweigbergk
|
r24046 | |||
@propertycache | ||||
def children(self): | ||||
children = {} | ||||
_addchildren(children, self._all) | ||||
return children | ||||
def _cached(self, attr): | ||||
return attr in self.__dict__ | ||||
Jun Wu
|
r33479 | def _addmarkers(self, markers, rawdata): | ||
Martin von Zweigbergk
|
r24044 | markers = list(markers) # to allow repeated iteration | ||
Jun Wu
|
r33479 | self._data = self._data + rawdata | ||
Martin von Zweigbergk
|
r24044 | self._all.extend(markers) | ||
Martin von Zweigbergk
|
r24046 | if self._cached('successors'): | ||
_addsuccessors(self.successors, markers) | ||||
Boris Feld
|
r33699 | if self._cached('predecessors'): | ||
_addpredecessors(self.predecessors, markers) | ||||
Martin von Zweigbergk
|
r24046 | if self._cached('children'): | ||
_addchildren(self.children, markers) | ||||
Martin von Zweigbergk
|
r24045 | _checkinvalidmarkers(markers) | ||
Pierre-Yves David
|
r23973 | |||
Pierre-Yves David
|
r22271 | def relevantmarkers(self, nodes): | ||
"""return a set of all obsolescence markers relevant to a set of nodes. | ||||
"relevant" to a set of nodes mean: | ||||
- marker that use this changeset as successor | ||||
- prune marker of direct children on this changeset | ||||
Boris Feld
|
r33700 | - recursive application of the two rules on predecessors of these | ||
markers | ||||
Pierre-Yves David
|
r22271 | |||
It is a set so you cannot rely on order.""" | ||||
pendingnodes = set(nodes) | ||||
seenmarkers = set() | ||||
seennodes = set(pendingnodes) | ||||
Boris Feld
|
r33699 | precursorsmarkers = self.predecessors | ||
r32488 | succsmarkers = self.successors | |||
Pierre-Yves David
|
r22271 | children = self.children | ||
while pendingnodes: | ||||
direct = set() | ||||
for current in pendingnodes: | ||||
direct.update(precursorsmarkers.get(current, ())) | ||||
pruned = [m for m in children.get(current, ()) if not m[1]] | ||||
direct.update(pruned) | ||||
r32488 | pruned = [m for m in succsmarkers.get(current, ()) if not m[1]] | |||
direct.update(pruned) | ||||
Pierre-Yves David
|
r22271 | direct -= seenmarkers | ||
pendingnodes = set([m[0] for m in direct]) | ||||
seenmarkers |= direct | ||||
pendingnodes -= seennodes | ||||
seennodes |= pendingnodes | ||||
return seenmarkers | ||||
Pierre-Yves.David@ens-lyon.org
|
r17070 | |||
Gregory Szorc
|
r32729 | def makestore(ui, repo): | ||
"""Create an obsstore instance from a repo.""" | ||||
# read default format for new obsstore. | ||||
# developer config: format.obsstore-version | ||||
r33241 | defaultformat = ui.configint('format', 'obsstore-version') | |||
Gregory Szorc
|
r32729 | # rely on obsstore class default when possible. | ||
kwargs = {} | ||||
if defaultformat is not None: | ||||
kwargs['defaultformat'] = defaultformat | ||||
readonly = not isenabled(repo, createmarkersopt) | ||||
store = obsstore(repo.svfs, readonly=readonly, **kwargs) | ||||
if store and readonly: | ||||
ui.warn(_('obsolete feature not enabled but %i markers found!\n') | ||||
% len(list(store))) | ||||
return store | ||||
Pierre-Yves David
|
r22345 | def commonversion(versions): | ||
"""Return the newest version listed in both versions and our local formats. | ||||
Returns None if no common version exists. | ||||
""" | ||||
versions.sort(reverse=True) | ||||
# search for highest version known on both side | ||||
for v in versions: | ||||
if v in formats: | ||||
return v | ||||
return None | ||||
Pierre-Yves David
|
r17295 | |||
# arbitrary picked to fit into 8K limit from HTTP server | ||||
# you have to take in account: | ||||
# - the version header | ||||
# - the base85 encoding | ||||
_maxpayload = 5300 | ||||
Pierre-Yves.David@ens-lyon.org
|
r17075 | |||
Pierre-Yves David
|
r20599 | def _pushkeyescape(markers): | ||
"""encode markers into a dict suitable for pushkey exchange | ||||
Mads Kiilerich
|
r21024 | - binary data is base85 encoded | ||
- split in chunks smaller than 5300 bytes""" | ||||
Pierre-Yves David
|
r17295 | keys = {} | ||
parts = [] | ||||
currentlen = _maxpayload * 2 # ensure we create a new part | ||||
Pierre-Yves David
|
r20599 | for marker in markers: | ||
Pierre-Yves David
|
r22329 | nextdata = _fm0encodeonemarker(marker) | ||
Pierre-Yves David
|
r17295 | if (len(nextdata) + currentlen > _maxpayload): | ||
currentpart = [] | ||||
currentlen = 0 | ||||
parts.append(currentpart) | ||||
currentpart.append(nextdata) | ||||
Pierre-Yves David
|
r17304 | currentlen += len(nextdata) | ||
Pierre-Yves David
|
r17295 | for idx, part in enumerate(reversed(parts)): | ||
Pierre-Yves David
|
r22327 | data = ''.join([_pack('>B', _fm0version)] + part) | ||
Yuya Nishihara
|
r32200 | keys['dump%i' % idx] = util.b85encode(data) | ||
Pierre-Yves David
|
r17295 | return keys | ||
Pierre-Yves.David@ens-lyon.org
|
r17073 | |||
Pierre-Yves David
|
r20599 | def listmarkers(repo): | ||
"""List markers over pushkey""" | ||||
if not repo.obsstore: | ||||
return {} | ||||
Pierre-Yves David
|
r25118 | return _pushkeyescape(sorted(repo.obsstore)) | ||
Pierre-Yves David
|
r20599 | |||
Pierre-Yves.David@ens-lyon.org
|
r17075 | def pushmarker(repo, key, old, new): | ||
"""Push markers over pushkey""" | ||||
Pierre-Yves David
|
r17295 | if not key.startswith('dump'): | ||
Pierre-Yves.David@ens-lyon.org
|
r17075 | repo.ui.warn(_('unknown key: %r') % key) | ||
Martin von Zweigbergk
|
r32822 | return False | ||
Pierre-Yves.David@ens-lyon.org
|
r17075 | if old: | ||
FUJIWARA Katsunori
|
r21098 | repo.ui.warn(_('unexpected old value for %r') % key) | ||
Martin von Zweigbergk
|
r32822 | return False | ||
Yuya Nishihara
|
r32200 | data = util.b85decode(new) | ||
Pierre-Yves.David@ens-lyon.org
|
r17075 | lock = repo.lock() | ||
try: | ||||
Pierre-Yves David
|
r17126 | tr = repo.transaction('pushkey: obsolete markers') | ||
try: | ||||
repo.obsstore.mergemarkers(tr, data) | ||||
r32314 | repo.invalidatevolatilesets() | |||
Pierre-Yves David
|
r17126 | tr.close() | ||
Martin von Zweigbergk
|
r32822 | return True | ||
Pierre-Yves David
|
r17126 | finally: | ||
tr.release() | ||||
Pierre-Yves.David@ens-lyon.org
|
r17075 | finally: | ||
lock.release() | ||||
Pierre-Yves.David@ens-lyon.org
|
r17073 | |||
r33145 | # keep compatibility for the 4.3 cycle | |||
def allprecursors(obsstore, nodes, ignoreflags=0): | ||||
movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors' | ||||
util.nouideprecwarn(movemsg, '4.3') | ||||
return obsutil.allprecursors(obsstore, nodes, ignoreflags) | ||||
r33146 | def allsuccessors(obsstore, nodes, ignoreflags=0): | |||
movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors' | ||||
util.nouideprecwarn(movemsg, '4.3') | ||||
return obsutil.allsuccessors(obsstore, nodes, ignoreflags) | ||||
r33149 | def marker(repo, data): | |||
movemsg = 'obsolete.marker moved to obsutil.marker' | ||||
repo.ui.deprecwarn(movemsg, '4.3') | ||||
return obsutil.marker(repo, data) | ||||
r33150 | def getmarkers(repo, nodes=None, exclusive=False): | |||
movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers' | ||||
repo.ui.deprecwarn(movemsg, '4.3') | ||||
return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive) | ||||
r33144 | def exclusivemarkers(repo, nodes): | |||
movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers' | ||||
repo.ui.deprecwarn(movemsg, '4.3') | ||||
return obsutil.exclusivemarkers(repo, nodes) | ||||
r33147 | def foreground(repo, nodes): | |||
movemsg = 'obsolete.foreground moved to obsutil.foreground' | ||||
repo.ui.deprecwarn(movemsg, '4.3') | ||||
return obsutil.foreground(repo, nodes) | ||||
Pierre-Yves David
|
r18068 | def successorssets(repo, initialnode, cache=None): | ||
r33143 | movemsg = 'obsolete.successorssets moved to obsutil.successorssets' | |||
repo.ui.deprecwarn(movemsg, '4.3') | ||||
return obsutil.successorssets(repo, initialnode, cache=cache) | ||||
Pierre-Yves David
|
r18068 | |||
Pierre-Yves David
|
r17828 | # mapping of 'set-name' -> <function to compute this set> | ||
Pierre-Yves David
|
r17469 | cachefuncs = {} | ||
def cachefor(name): | ||||
"""Decorator to register a function as computing the cache for a set""" | ||||
def decorator(func): | ||||
Pierre-Yves David
|
r32884 | if name in cachefuncs: | ||
msg = "duplicated registration for volatileset '%s' (existing: %r)" | ||||
raise error.ProgrammingError(msg % (name, cachefuncs[name])) | ||||
Pierre-Yves David
|
r17469 | cachefuncs[name] = func | ||
return func | ||||
return decorator | ||||
Pierre-Yves David
|
r17825 | def getrevs(repo, name): | ||
Pierre-Yves David
|
r17469 | """Return the set of revision that belong to the <name> set | ||
Such access may compute the set and cache it for future use""" | ||||
Pierre-Yves David
|
r18001 | repo = repo.unfiltered() | ||
Pierre-Yves David
|
r17469 | if not repo.obsstore: | ||
Pierre-Yves David
|
r22507 | return frozenset() | ||
Pierre-Yves David
|
r17469 | if name not in repo.obsstore.caches: | ||
repo.obsstore.caches[name] = cachefuncs[name](repo) | ||||
return repo.obsstore.caches[name] | ||||
# To be simple we need to invalidate obsolescence cache when: | ||||
# | ||||
# - new changeset is added: | ||||
# - public phase is changed | ||||
# - obsolescence marker are added | ||||
# - strip is used a repo | ||||
def clearobscaches(repo): | ||||
"""Remove all obsolescence related cache from a repo | ||||
This remove all cache in obsstore is the obsstore already exist on the | ||||
repo. | ||||
(We could be smarter here given the exact event that trigger the cache | ||||
clearing)""" | ||||
# only clear cache is there is obsstore data in this repo | ||||
if 'obsstore' in repo._filecache: | ||||
repo.obsstore.caches.clear() | ||||
r33124 | def _mutablerevs(repo): | |||
"""the set of mutable revision in the repository""" | ||||
return repo._phasecache.getrevset(repo, (phases.draft, phases.secret)) | ||||
Pierre-Yves David
|
r17469 | @cachefor('obsolete') | ||
def _computeobsoleteset(repo): | ||||
"""the set of obsolete revisions""" | ||||
Laurent Charignon
|
r27784 | getnode = repo.changelog.node | ||
r33124 | notpublic = _mutablerevs(repo) | |||
Jun Wu
|
r32688 | isobs = repo.obsstore.successors.__contains__ | ||
obs = set(r for r in notpublic if isobs(getnode(r))) | ||||
Pierre-Yves David
|
r18271 | return obs | ||
Pierre-Yves David
|
r17469 | |||
@cachefor('unstable') | ||||
def _computeunstableset(repo): | ||||
Boris Feld
|
r33772 | msg = ("'unstable' volatile set is deprecated, " | ||
"use 'orphan'") | ||||
repo.ui.deprecwarn(msg, '4.4') | ||||
return _computeorphanset(repo) | ||||
@cachefor('orphan') | ||||
def _computeorphanset(repo): | ||||
Pierre-Yves David
|
r17469 | """the set of non obsolete revisions with obsolete parents""" | ||
r33125 | pfunc = repo.changelog.parentrevs | |||
mutable = _mutablerevs(repo) | ||||
obsolete = getrevs(repo, 'obsolete') | ||||
others = mutable - obsolete | ||||
Laurent Charignon
|
r24928 | unstable = set() | ||
r33125 | for r in sorted(others): | |||
Laurent Charignon
|
r24928 | # A rev is unstable if one of its parent is obsolete or unstable | ||
# this works since we traverse following growing rev order | ||||
r33125 | for p in pfunc(r): | |||
if p in obsolete or p in unstable: | ||||
unstable.add(r) | ||||
break | ||||
Laurent Charignon
|
r24928 | return unstable | ||
Pierre-Yves David
|
r17469 | |||
@cachefor('suspended') | ||||
def _computesuspendedset(repo): | ||||
"""the set of obsolete parents with non obsolete descendants""" | ||||
Boris Feld
|
r33772 | suspended = repo.changelog.ancestors(getrevs(repo, 'orphan')) | ||
Pierre-Yves David
|
r18276 | return set(r for r in getrevs(repo, 'obsolete') if r in suspended) | ||
Pierre-Yves David
|
r17469 | |||
@cachefor('extinct') | ||||
def _computeextinctset(repo): | ||||
"""the set of obsolete parents without non obsolete descendants""" | ||||
Pierre-Yves David
|
r18277 | return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended') | ||
Pierre-Yves David
|
r17474 | |||
Pierre-Yves David
|
r17828 | @cachefor('bumped') | ||
def _computebumpedset(repo): | ||||
Boris Feld
|
r33774 | msg = ("'bumped' volatile set is deprecated, " | ||
"use 'phasedivergent'") | ||||
repo.ui.deprecwarn(msg, '4.4') | ||||
return _computephasedivergentset(repo) | ||||
@cachefor('phasedivergent') | ||||
def _computephasedivergentset(repo): | ||||
Pierre-Yves David
|
r17828 | """the set of revs trying to obsolete public revisions""" | ||
Pierre-Yves David
|
r20207 | bumped = set() | ||
Mads Kiilerich
|
r21024 | # util function (avoid attribute lookup in the loop) | ||
Pierre-Yves David
|
r20207 | phase = repo._phasecache.phase # would be faster to grab the full list | ||
public = phases.public | ||||
cl = repo.changelog | ||||
torev = cl.nodemap.get | ||||
Laurent Charignon
|
r24927 | for ctx in repo.set('(not public()) and (not obsolete())'): | ||
rev = ctx.rev() | ||||
Pierre-Yves David
|
r20207 | # We only evaluate mutable, non-obsolete revision | ||
Laurent Charignon
|
r24927 | node = ctx.node() | ||
Boris Feld
|
r33700 | # (future) A cache of predecessors may worth if split is very common | ||
Boris Feld
|
r33701 | for pnode in obsutil.allpredecessors(repo.obsstore, [node], | ||
Laurent Charignon
|
r24927 | ignoreflags=bumpedfix): | ||
prev = torev(pnode) # unfiltered! but so is phasecache | ||||
if (prev is not None) and (phase(repo, prev) <= public): | ||||
Boris Feld
|
r33700 | # we have a public predecessor | ||
Laurent Charignon
|
r24927 | bumped.add(rev) | ||
break # Next draft! | ||||
Pierre-Yves David
|
r20207 | return bumped | ||
Pierre-Yves David
|
r17828 | |||
Pierre-Yves David
|
r18070 | @cachefor('divergent') | ||
def _computedivergentset(repo): | ||||
Boris Feld
|
r33773 | msg = ("'divergent' volatile set is deprecated, " | ||
"use 'contentdivergent'") | ||||
repo.ui.deprecwarn(msg, '4.4') | ||||
return _computecontentdivergentset(repo) | ||||
@cachefor('contentdivergent') | ||||
def _computecontentdivergentset(repo): | ||||
Pierre-Yves David
|
r18070 | """the set of rev that compete to be the final successors of some revision. | ||
""" | ||||
divergent = set() | ||||
obsstore = repo.obsstore | ||||
newermap = {} | ||||
for ctx in repo.set('(not public()) - obsolete()'): | ||||
Boris Feld
|
r33699 | mark = obsstore.predecessors.get(ctx.node(), ()) | ||
Pierre-Yves David
|
r18070 | toprocess = set(mark) | ||
Pierre-Yves David
|
r24393 | seen = set() | ||
Pierre-Yves David
|
r18070 | while toprocess: | ||
prec = toprocess.pop()[0] | ||||
Pierre-Yves David
|
r24393 | if prec in seen: | ||
continue # emergency cycle hanging prevention | ||||
seen.add(prec) | ||||
Pierre-Yves David
|
r18070 | if prec not in newermap: | ||
Boris Feld
|
r33273 | obsutil.successorssets(repo, prec, cache=newermap) | ||
Pierre-Yves David
|
r18070 | newer = [n for n in newermap[prec] if n] | ||
if len(newer) > 1: | ||||
divergent.add(ctx.rev()) | ||||
break | ||||
Boris Feld
|
r33699 | toprocess.update(obsstore.predecessors.get(prec, ())) | ||
Pierre-Yves David
|
r18070 | return divergent | ||
Durham Goode
|
r32327 | def createmarkers(repo, relations, flag=0, date=None, metadata=None, | ||
operation=None): | ||||
Pierre-Yves David
|
r17474 | """Add obsolete markers between changesets in a repo | ||
Pierre-Yves David
|
r20517 | <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}]) | ||
Mads Kiilerich
|
r21024 | tuple. `old` and `news` are changectx. metadata is an optional dictionary | ||
Pierre-Yves David
|
r20517 | containing metadata for this marker only. It is merged with the global | ||
metadata specified through the `metadata` argument of this function, | ||||
Pierre-Yves David
|
r17474 | |||
Trying to obsolete a public changeset will raise an exception. | ||||
Current user and date are used except if specified otherwise in the | ||||
metadata attribute. | ||||
This function operates within a transaction of its own, but does | ||||
not take any lock on the repo. | ||||
""" | ||||
# prepare metadata | ||||
if metadata is None: | ||||
metadata = {} | ||||
if 'user' not in metadata: | ||||
Boris Feld
|
r34576 | develuser = repo.ui.config('devel', 'user.obsmarker') | ||
if develuser: | ||||
metadata['user'] = develuser | ||||
else: | ||||
metadata['user'] = repo.ui.username() | ||||
Boris Feld
|
r34389 | |||
# Operation metadata handling | ||||
r32354 | useoperation = repo.ui.configbool('experimental', | |||
Boris Feld
|
r33767 | 'stabilization.track-operation') | ||
r32354 | if useoperation and operation: | |||
Durham Goode
|
r32327 | metadata['operation'] = operation | ||
Boris Feld
|
r34389 | |||
Boris Feld
|
r34414 | # Effect flag metadata handling | ||
saveeffectflag = repo.ui.configbool('experimental', | ||||
'effect-flags', | ||||
False) | ||||
Pierre-Yves David
|
r17474 | tr = repo.transaction('add-obsolescence-marker') | ||
try: | ||||
Durham Goode
|
r27984 | markerargs = [] | ||
Pierre-Yves David
|
r20517 | for rel in relations: | ||
prec = rel[0] | ||||
sucs = rel[1] | ||||
localmetadata = metadata.copy() | ||||
if 2 < len(rel): | ||||
localmetadata.update(rel[2]) | ||||
Pierre-Yves David
|
r17474 | if not prec.mutable(): | ||
liscju
|
r29389 | raise error.Abort(_("cannot obsolete public changeset: %s") | ||
Jordi Gutiérrez Hermoso
|
r25412 | % prec, | ||
timeless
|
r29976 | hint="see 'hg help phases' for details") | ||
Pierre-Yves David
|
r17474 | nprec = prec.node() | ||
nsucs = tuple(s.node() for s in sucs) | ||||
Pierre-Yves David
|
r22256 | npare = None | ||
if not nsucs: | ||||
npare = tuple(p.node() for p in prec.parents()) | ||||
Pierre-Yves David
|
r17474 | if nprec in nsucs: | ||
liscju
|
r29389 | raise error.Abort(_("changeset %s cannot obsolete itself") | ||
% prec) | ||||
Durham Goode
|
r27984 | |||
Boris Feld
|
r34414 | # Effect flag can be different by relation | ||
if saveeffectflag: | ||||
# The effect flag is saved in a versioned field name for future | ||||
# evolution | ||||
effectflag = obsutil.geteffectflag(rel) | ||||
localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag | ||||
Durham Goode
|
r27984 | # Creating the marker causes the hidden cache to become invalid, | ||
# which causes recomputation when we ask for prec.parents() above. | ||||
# Resulting in n^2 behavior. So let's prepare all of the args | ||||
# first, then create the markers. | ||||
markerargs.append((nprec, nsucs, npare, localmetadata)) | ||||
for args in markerargs: | ||||
nprec, nsucs, npare, localmetadata = args | ||||
Pierre-Yves David
|
r22256 | repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare, | ||
Boris Feld
|
r32411 | date=date, metadata=localmetadata, | ||
ui=repo.ui) | ||||
Pierre-Yves David
|
r18101 | repo.filteredrevcache.clear() | ||
Pierre-Yves David
|
r17474 | tr.close() | ||
finally: | ||||
tr.release() | ||||