##// END OF EJS Templates
obsolete: raise richer exception on unknown version...
marmoute -
r32591:19df975e default
parent child Browse files
Show More
@@ -1,260 +1,268
1 # error.py - Mercurial exceptions
1 # error.py - Mercurial exceptions
2 #
2 #
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Mercurial exceptions.
8 """Mercurial exceptions.
9
9
10 This allows us to catch exceptions at higher levels without forcing
10 This allows us to catch exceptions at higher levels without forcing
11 imports.
11 imports.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 # Do not import anything here, please
16 # Do not import anything here, please
17
17
18 class Hint(object):
18 class Hint(object):
19 """Mix-in to provide a hint of an error
19 """Mix-in to provide a hint of an error
20
20
21 This should come first in the inheritance list to consume a hint and
21 This should come first in the inheritance list to consume a hint and
22 pass remaining arguments to the exception class.
22 pass remaining arguments to the exception class.
23 """
23 """
24 def __init__(self, *args, **kw):
24 def __init__(self, *args, **kw):
25 self.hint = kw.pop(r'hint', None)
25 self.hint = kw.pop(r'hint', None)
26 super(Hint, self).__init__(*args, **kw)
26 super(Hint, self).__init__(*args, **kw)
27
27
28 class RevlogError(Hint, Exception):
28 class RevlogError(Hint, Exception):
29 pass
29 pass
30
30
31 class FilteredIndexError(IndexError):
31 class FilteredIndexError(IndexError):
32 pass
32 pass
33
33
34 class LookupError(RevlogError, KeyError):
34 class LookupError(RevlogError, KeyError):
35 def __init__(self, name, index, message):
35 def __init__(self, name, index, message):
36 self.name = name
36 self.name = name
37 self.index = index
37 self.index = index
38 # this can't be called 'message' because at least some installs of
38 # this can't be called 'message' because at least some installs of
39 # Python 2.6+ complain about the 'message' property being deprecated
39 # Python 2.6+ complain about the 'message' property being deprecated
40 self.lookupmessage = message
40 self.lookupmessage = message
41 if isinstance(name, str) and len(name) == 20:
41 if isinstance(name, str) and len(name) == 20:
42 from .node import short
42 from .node import short
43 name = short(name)
43 name = short(name)
44 RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
44 RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
45
45
46 def __str__(self):
46 def __str__(self):
47 return RevlogError.__str__(self)
47 return RevlogError.__str__(self)
48
48
49 class FilteredLookupError(LookupError):
49 class FilteredLookupError(LookupError):
50 pass
50 pass
51
51
52 class ManifestLookupError(LookupError):
52 class ManifestLookupError(LookupError):
53 pass
53 pass
54
54
55 class CommandError(Exception):
55 class CommandError(Exception):
56 """Exception raised on errors in parsing the command line."""
56 """Exception raised on errors in parsing the command line."""
57
57
58 class InterventionRequired(Hint, Exception):
58 class InterventionRequired(Hint, Exception):
59 """Exception raised when a command requires human intervention."""
59 """Exception raised when a command requires human intervention."""
60
60
61 class Abort(Hint, Exception):
61 class Abort(Hint, Exception):
62 """Raised if a command needs to print an error and exit."""
62 """Raised if a command needs to print an error and exit."""
63
63
64 class HookLoadError(Abort):
64 class HookLoadError(Abort):
65 """raised when loading a hook fails, aborting an operation
65 """raised when loading a hook fails, aborting an operation
66
66
67 Exists to allow more specialized catching."""
67 Exists to allow more specialized catching."""
68
68
69 class HookAbort(Abort):
69 class HookAbort(Abort):
70 """raised when a validation hook fails, aborting an operation
70 """raised when a validation hook fails, aborting an operation
71
71
72 Exists to allow more specialized catching."""
72 Exists to allow more specialized catching."""
73
73
74 class ConfigError(Abort):
74 class ConfigError(Abort):
75 """Exception raised when parsing config files"""
75 """Exception raised when parsing config files"""
76
76
77 class UpdateAbort(Abort):
77 class UpdateAbort(Abort):
78 """Raised when an update is aborted for destination issue"""
78 """Raised when an update is aborted for destination issue"""
79
79
80 class MergeDestAbort(Abort):
80 class MergeDestAbort(Abort):
81 """Raised when an update is aborted for destination issues"""
81 """Raised when an update is aborted for destination issues"""
82
82
83 class NoMergeDestAbort(MergeDestAbort):
83 class NoMergeDestAbort(MergeDestAbort):
84 """Raised when an update is aborted because there is nothing to merge"""
84 """Raised when an update is aborted because there is nothing to merge"""
85
85
86 class ManyMergeDestAbort(MergeDestAbort):
86 class ManyMergeDestAbort(MergeDestAbort):
87 """Raised when an update is aborted because destination is ambiguous"""
87 """Raised when an update is aborted because destination is ambiguous"""
88
88
89 class ResponseExpected(Abort):
89 class ResponseExpected(Abort):
90 """Raised when an EOF is received for a prompt"""
90 """Raised when an EOF is received for a prompt"""
91 def __init__(self):
91 def __init__(self):
92 from .i18n import _
92 from .i18n import _
93 Abort.__init__(self, _('response expected'))
93 Abort.__init__(self, _('response expected'))
94
94
95 class OutOfBandError(Hint, Exception):
95 class OutOfBandError(Hint, Exception):
96 """Exception raised when a remote repo reports failure"""
96 """Exception raised when a remote repo reports failure"""
97
97
98 class ParseError(Hint, Exception):
98 class ParseError(Hint, Exception):
99 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
99 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
100
100
101 class UnknownIdentifier(ParseError):
101 class UnknownIdentifier(ParseError):
102 """Exception raised when a {rev,file}set references an unknown identifier"""
102 """Exception raised when a {rev,file}set references an unknown identifier"""
103
103
104 def __init__(self, function, symbols):
104 def __init__(self, function, symbols):
105 from .i18n import _
105 from .i18n import _
106 ParseError.__init__(self, _("unknown identifier: %s") % function)
106 ParseError.__init__(self, _("unknown identifier: %s") % function)
107 self.function = function
107 self.function = function
108 self.symbols = symbols
108 self.symbols = symbols
109
109
110 class RepoError(Hint, Exception):
110 class RepoError(Hint, Exception):
111 pass
111 pass
112
112
113 class RepoLookupError(RepoError):
113 class RepoLookupError(RepoError):
114 pass
114 pass
115
115
116 class FilteredRepoLookupError(RepoLookupError):
116 class FilteredRepoLookupError(RepoLookupError):
117 pass
117 pass
118
118
119 class CapabilityError(RepoError):
119 class CapabilityError(RepoError):
120 pass
120 pass
121
121
122 class RequirementError(RepoError):
122 class RequirementError(RepoError):
123 """Exception raised if .hg/requires has an unknown entry."""
123 """Exception raised if .hg/requires has an unknown entry."""
124
124
125 class StdioError(IOError):
125 class StdioError(IOError):
126 """Raised if I/O to stdout or stderr fails"""
126 """Raised if I/O to stdout or stderr fails"""
127
127
128 def __init__(self, err):
128 def __init__(self, err):
129 IOError.__init__(self, err.errno, err.strerror)
129 IOError.__init__(self, err.errno, err.strerror)
130
130
131 class UnsupportedMergeRecords(Abort):
131 class UnsupportedMergeRecords(Abort):
132 def __init__(self, recordtypes):
132 def __init__(self, recordtypes):
133 from .i18n import _
133 from .i18n import _
134 self.recordtypes = sorted(recordtypes)
134 self.recordtypes = sorted(recordtypes)
135 s = ' '.join(self.recordtypes)
135 s = ' '.join(self.recordtypes)
136 Abort.__init__(
136 Abort.__init__(
137 self, _('unsupported merge state records: %s') % s,
137 self, _('unsupported merge state records: %s') % s,
138 hint=_('see https://mercurial-scm.org/wiki/MergeStateRecords for '
138 hint=_('see https://mercurial-scm.org/wiki/MergeStateRecords for '
139 'more information'))
139 'more information'))
140
140
141 class UnknownVersion(Abort):
142 """generic exception for aborting from an encounter with an unknown version
143 """
144
145 def __init__(self, msg, hint=None, version=None):
146 self.version = version
147 super(UnknownVersion, self).__init__(msg, hint=hint)
148
141 class LockError(IOError):
149 class LockError(IOError):
142 def __init__(self, errno, strerror, filename, desc):
150 def __init__(self, errno, strerror, filename, desc):
143 IOError.__init__(self, errno, strerror, filename)
151 IOError.__init__(self, errno, strerror, filename)
144 self.desc = desc
152 self.desc = desc
145
153
146 class LockHeld(LockError):
154 class LockHeld(LockError):
147 def __init__(self, errno, filename, desc, locker):
155 def __init__(self, errno, filename, desc, locker):
148 LockError.__init__(self, errno, 'Lock held', filename, desc)
156 LockError.__init__(self, errno, 'Lock held', filename, desc)
149 self.locker = locker
157 self.locker = locker
150
158
151 class LockUnavailable(LockError):
159 class LockUnavailable(LockError):
152 pass
160 pass
153
161
154 # LockError is for errors while acquiring the lock -- this is unrelated
162 # LockError is for errors while acquiring the lock -- this is unrelated
155 class LockInheritanceContractViolation(RuntimeError):
163 class LockInheritanceContractViolation(RuntimeError):
156 pass
164 pass
157
165
158 class ResponseError(Exception):
166 class ResponseError(Exception):
159 """Raised to print an error with part of output and exit."""
167 """Raised to print an error with part of output and exit."""
160
168
161 class UnknownCommand(Exception):
169 class UnknownCommand(Exception):
162 """Exception raised if command is not in the command table."""
170 """Exception raised if command is not in the command table."""
163
171
164 class AmbiguousCommand(Exception):
172 class AmbiguousCommand(Exception):
165 """Exception raised if command shortcut matches more than one command."""
173 """Exception raised if command shortcut matches more than one command."""
166
174
167 # derived from KeyboardInterrupt to simplify some breakout code
175 # derived from KeyboardInterrupt to simplify some breakout code
168 class SignalInterrupt(KeyboardInterrupt):
176 class SignalInterrupt(KeyboardInterrupt):
169 """Exception raised on SIGTERM and SIGHUP."""
177 """Exception raised on SIGTERM and SIGHUP."""
170
178
171 class SignatureError(Exception):
179 class SignatureError(Exception):
172 pass
180 pass
173
181
174 class PushRaced(RuntimeError):
182 class PushRaced(RuntimeError):
175 """An exception raised during unbundling that indicate a push race"""
183 """An exception raised during unbundling that indicate a push race"""
176
184
177 class ProgrammingError(Hint, RuntimeError):
185 class ProgrammingError(Hint, RuntimeError):
178 """Raised if a mercurial (core or extension) developer made a mistake"""
186 """Raised if a mercurial (core or extension) developer made a mistake"""
179
187
180 class WdirUnsupported(Exception):
188 class WdirUnsupported(Exception):
181 """An exception which is raised when 'wdir()' is not supported"""
189 """An exception which is raised when 'wdir()' is not supported"""
182
190
183 # bundle2 related errors
191 # bundle2 related errors
184 class BundleValueError(ValueError):
192 class BundleValueError(ValueError):
185 """error raised when bundle2 cannot be processed"""
193 """error raised when bundle2 cannot be processed"""
186
194
187 class BundleUnknownFeatureError(BundleValueError):
195 class BundleUnknownFeatureError(BundleValueError):
188 def __init__(self, parttype=None, params=(), values=()):
196 def __init__(self, parttype=None, params=(), values=()):
189 self.parttype = parttype
197 self.parttype = parttype
190 self.params = params
198 self.params = params
191 self.values = values
199 self.values = values
192 if self.parttype is None:
200 if self.parttype is None:
193 msg = 'Stream Parameter'
201 msg = 'Stream Parameter'
194 else:
202 else:
195 msg = parttype
203 msg = parttype
196 entries = self.params
204 entries = self.params
197 if self.params and self.values:
205 if self.params and self.values:
198 assert len(self.params) == len(self.values)
206 assert len(self.params) == len(self.values)
199 entries = []
207 entries = []
200 for idx, par in enumerate(self.params):
208 for idx, par in enumerate(self.params):
201 val = self.values[idx]
209 val = self.values[idx]
202 if val is None:
210 if val is None:
203 entries.append(val)
211 entries.append(val)
204 else:
212 else:
205 entries.append("%s=%r" % (par, val))
213 entries.append("%s=%r" % (par, val))
206 if entries:
214 if entries:
207 msg = '%s - %s' % (msg, ', '.join(entries))
215 msg = '%s - %s' % (msg, ', '.join(entries))
208 ValueError.__init__(self, msg)
216 ValueError.__init__(self, msg)
209
217
210 class ReadOnlyPartError(RuntimeError):
218 class ReadOnlyPartError(RuntimeError):
211 """error raised when code tries to alter a part being generated"""
219 """error raised when code tries to alter a part being generated"""
212
220
213 class PushkeyFailed(Abort):
221 class PushkeyFailed(Abort):
214 """error raised when a pushkey part failed to update a value"""
222 """error raised when a pushkey part failed to update a value"""
215
223
216 def __init__(self, partid, namespace=None, key=None, new=None, old=None,
224 def __init__(self, partid, namespace=None, key=None, new=None, old=None,
217 ret=None):
225 ret=None):
218 self.partid = partid
226 self.partid = partid
219 self.namespace = namespace
227 self.namespace = namespace
220 self.key = key
228 self.key = key
221 self.new = new
229 self.new = new
222 self.old = old
230 self.old = old
223 self.ret = ret
231 self.ret = ret
224 # no i18n expected to be processed into a better message
232 # no i18n expected to be processed into a better message
225 Abort.__init__(self, 'failed to update value for "%s/%s"'
233 Abort.__init__(self, 'failed to update value for "%s/%s"'
226 % (namespace, key))
234 % (namespace, key))
227
235
228 class CensoredNodeError(RevlogError):
236 class CensoredNodeError(RevlogError):
229 """error raised when content verification fails on a censored node
237 """error raised when content verification fails on a censored node
230
238
231 Also contains the tombstone data substituted for the uncensored data.
239 Also contains the tombstone data substituted for the uncensored data.
232 """
240 """
233
241
234 def __init__(self, filename, node, tombstone):
242 def __init__(self, filename, node, tombstone):
235 from .node import short
243 from .node import short
236 RevlogError.__init__(self, '%s:%s' % (filename, short(node)))
244 RevlogError.__init__(self, '%s:%s' % (filename, short(node)))
237 self.tombstone = tombstone
245 self.tombstone = tombstone
238
246
239 class CensoredBaseError(RevlogError):
247 class CensoredBaseError(RevlogError):
240 """error raised when a delta is rejected because its base is censored
248 """error raised when a delta is rejected because its base is censored
241
249
242 A delta based on a censored revision must be formed as single patch
250 A delta based on a censored revision must be formed as single patch
243 operation which replaces the entire base with new content. This ensures
251 operation which replaces the entire base with new content. This ensures
244 the delta may be applied by clones which have not censored the base.
252 the delta may be applied by clones which have not censored the base.
245 """
253 """
246
254
247 class InvalidBundleSpecification(Exception):
255 class InvalidBundleSpecification(Exception):
248 """error raised when a bundle specification is invalid.
256 """error raised when a bundle specification is invalid.
249
257
250 This is used for syntax errors as opposed to support errors.
258 This is used for syntax errors as opposed to support errors.
251 """
259 """
252
260
253 class UnsupportedBundleSpecification(Exception):
261 class UnsupportedBundleSpecification(Exception):
254 """error raised when a bundle specification is not supported."""
262 """error raised when a bundle specification is not supported."""
255
263
256 class CorruptedState(Exception):
264 class CorruptedState(Exception):
257 """error raised when a command is not able to read its state from file"""
265 """error raised when a command is not able to read its state from file"""
258
266
259 class PeerTransportError(Abort):
267 class PeerTransportError(Abort):
260 """Transport-level I/O error when communicating with a peer repo."""
268 """Transport-level I/O error when communicating with a peer repo."""
@@ -1,1301 +1,1301
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 phases,
79 phases,
80 policy,
80 policy,
81 util,
81 util,
82 )
82 )
83
83
84 parsers = policy.importmod(r'parsers')
84 parsers = policy.importmod(r'parsers')
85
85
86 _pack = struct.pack
86 _pack = struct.pack
87 _unpack = struct.unpack
87 _unpack = struct.unpack
88 _calcsize = struct.calcsize
88 _calcsize = struct.calcsize
89 propertycache = util.propertycache
89 propertycache = util.propertycache
90
90
91 # the obsolete feature is not mature enough to be enabled by default.
91 # the obsolete feature is not mature enough to be enabled by default.
92 # you have to rely on third party extension extension to enable this.
92 # you have to rely on third party extension extension to enable this.
93 _enabled = False
93 _enabled = False
94
94
95 # Options for obsolescence
95 # Options for obsolescence
96 createmarkersopt = 'createmarkers'
96 createmarkersopt = 'createmarkers'
97 allowunstableopt = 'allowunstable'
97 allowunstableopt = 'allowunstable'
98 exchangeopt = 'exchange'
98 exchangeopt = 'exchange'
99
99
100 def isenabled(repo, option):
100 def isenabled(repo, option):
101 """Returns True if the given repository has the given obsolete option
101 """Returns True if the given repository has the given obsolete option
102 enabled.
102 enabled.
103 """
103 """
104 result = set(repo.ui.configlist('experimental', 'evolution'))
104 result = set(repo.ui.configlist('experimental', 'evolution'))
105 if 'all' in result:
105 if 'all' in result:
106 return True
106 return True
107
107
108 # For migration purposes, temporarily return true if the config hasn't been
108 # For migration purposes, temporarily return true if the config hasn't been
109 # set but _enabled is true.
109 # set but _enabled is true.
110 if len(result) == 0 and _enabled:
110 if len(result) == 0 and _enabled:
111 return True
111 return True
112
112
113 # createmarkers must be enabled if other options are enabled
113 # createmarkers must be enabled if other options are enabled
114 if ((allowunstableopt in result or exchangeopt in result) and
114 if ((allowunstableopt in result or exchangeopt in result) and
115 not createmarkersopt in result):
115 not createmarkersopt in result):
116 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
116 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 "if other obsolete options are enabled"))
117 "if other obsolete options are enabled"))
118
118
119 return option in result
119 return option in result
120
120
121 ### obsolescence marker flag
121 ### obsolescence marker flag
122
122
123 ## bumpedfix flag
123 ## bumpedfix flag
124 #
124 #
125 # When a changeset A' succeed to a changeset A which became public, we call A'
125 # When a changeset A' succeed to a changeset A which became public, we call A'
126 # "bumped" because it's a successors of a public changesets
126 # "bumped" because it's a successors of a public changesets
127 #
127 #
128 # o A' (bumped)
128 # o A' (bumped)
129 # |`:
129 # |`:
130 # | o A
130 # | o A
131 # |/
131 # |/
132 # o Z
132 # o Z
133 #
133 #
134 # The way to solve this situation is to create a new changeset Ad as children
134 # The way to solve this situation is to create a new changeset Ad as children
135 # of A. This changeset have the same content than A'. So the diff from A to A'
135 # of A. This changeset have the same content than A'. So the diff from A to A'
136 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
136 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 #
137 #
138 # o Ad
138 # o Ad
139 # |`:
139 # |`:
140 # | x A'
140 # | x A'
141 # |'|
141 # |'|
142 # o | A
142 # o | A
143 # |/
143 # |/
144 # o Z
144 # o Z
145 #
145 #
146 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
146 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
147 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 # This flag mean that the successors express the changes between the public and
148 # This flag mean that the successors express the changes between the public and
149 # bumped version and fix the situation, breaking the transitivity of
149 # bumped version and fix the situation, breaking the transitivity of
150 # "bumped" here.
150 # "bumped" here.
151 bumpedfix = 1
151 bumpedfix = 1
152 usingsha256 = 2
152 usingsha256 = 2
153
153
154 ## Parsing and writing of version "0"
154 ## Parsing and writing of version "0"
155 #
155 #
156 # The header is followed by the markers. Each marker is made of:
156 # The header is followed by the markers. Each marker is made of:
157 #
157 #
158 # - 1 uint8 : number of new changesets "N", can be zero.
158 # - 1 uint8 : number of new changesets "N", can be zero.
159 #
159 #
160 # - 1 uint32: metadata size "M" in bytes.
160 # - 1 uint32: metadata size "M" in bytes.
161 #
161 #
162 # - 1 byte: a bit field. It is reserved for flags used in common
162 # - 1 byte: a bit field. It is reserved for flags used in common
163 # obsolete marker operations, to avoid repeated decoding of metadata
163 # obsolete marker operations, to avoid repeated decoding of metadata
164 # entries.
164 # entries.
165 #
165 #
166 # - 20 bytes: obsoleted changeset identifier.
166 # - 20 bytes: obsoleted changeset identifier.
167 #
167 #
168 # - N*20 bytes: new changesets identifiers.
168 # - N*20 bytes: new changesets identifiers.
169 #
169 #
170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # string contains a key and a value, separated by a colon ':', without
171 # string contains a key and a value, separated by a colon ':', without
172 # additional encoding. Keys cannot contain '\0' or ':' and values
172 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # cannot contain '\0'.
173 # cannot contain '\0'.
174 _fm0version = 0
174 _fm0version = 0
175 _fm0fixed = '>BIB20s'
175 _fm0fixed = '>BIB20s'
176 _fm0node = '20s'
176 _fm0node = '20s'
177 _fm0fsize = _calcsize(_fm0fixed)
177 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fnodesize = _calcsize(_fm0node)
178 _fm0fnodesize = _calcsize(_fm0node)
179
179
180 def _fm0readmarkers(data, off):
180 def _fm0readmarkers(data, off):
181 # Loop on markers
181 # Loop on markers
182 l = len(data)
182 l = len(data)
183 while off + _fm0fsize <= l:
183 while off + _fm0fsize <= l:
184 # read fixed part
184 # read fixed part
185 cur = data[off:off + _fm0fsize]
185 cur = data[off:off + _fm0fsize]
186 off += _fm0fsize
186 off += _fm0fsize
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 # read replacement
188 # read replacement
189 sucs = ()
189 sucs = ()
190 if numsuc:
190 if numsuc:
191 s = (_fm0fnodesize * numsuc)
191 s = (_fm0fnodesize * numsuc)
192 cur = data[off:off + s]
192 cur = data[off:off + s]
193 sucs = _unpack(_fm0node * numsuc, cur)
193 sucs = _unpack(_fm0node * numsuc, cur)
194 off += s
194 off += s
195 # read metadata
195 # read metadata
196 # (metadata will be decoded on demand)
196 # (metadata will be decoded on demand)
197 metadata = data[off:off + mdsize]
197 metadata = data[off:off + mdsize]
198 if len(metadata) != mdsize:
198 if len(metadata) != mdsize:
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 'short, %d bytes expected, got %d')
200 'short, %d bytes expected, got %d')
201 % (mdsize, len(metadata)))
201 % (mdsize, len(metadata)))
202 off += mdsize
202 off += mdsize
203 metadata = _fm0decodemeta(metadata)
203 metadata = _fm0decodemeta(metadata)
204 try:
204 try:
205 when, offset = metadata.pop('date', '0 0').split(' ')
205 when, offset = metadata.pop('date', '0 0').split(' ')
206 date = float(when), int(offset)
206 date = float(when), int(offset)
207 except ValueError:
207 except ValueError:
208 date = (0., 0)
208 date = (0., 0)
209 parents = None
209 parents = None
210 if 'p2' in metadata:
210 if 'p2' in metadata:
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 elif 'p1' in metadata:
212 elif 'p1' in metadata:
213 parents = (metadata.pop('p1', None),)
213 parents = (metadata.pop('p1', None),)
214 elif 'p0' in metadata:
214 elif 'p0' in metadata:
215 parents = ()
215 parents = ()
216 if parents is not None:
216 if parents is not None:
217 try:
217 try:
218 parents = tuple(node.bin(p) for p in parents)
218 parents = tuple(node.bin(p) for p in parents)
219 # if parent content is not a nodeid, drop the data
219 # if parent content is not a nodeid, drop the data
220 for p in parents:
220 for p in parents:
221 if len(p) != 20:
221 if len(p) != 20:
222 parents = None
222 parents = None
223 break
223 break
224 except TypeError:
224 except TypeError:
225 # if content cannot be translated to nodeid drop the data.
225 # if content cannot be translated to nodeid drop the data.
226 parents = None
226 parents = None
227
227
228 metadata = tuple(sorted(metadata.iteritems()))
228 metadata = tuple(sorted(metadata.iteritems()))
229
229
230 yield (pre, sucs, flags, metadata, date, parents)
230 yield (pre, sucs, flags, metadata, date, parents)
231
231
232 def _fm0encodeonemarker(marker):
232 def _fm0encodeonemarker(marker):
233 pre, sucs, flags, metadata, date, parents = marker
233 pre, sucs, flags, metadata, date, parents = marker
234 if flags & usingsha256:
234 if flags & usingsha256:
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 metadata = dict(metadata)
236 metadata = dict(metadata)
237 time, tz = date
237 time, tz = date
238 metadata['date'] = '%r %i' % (time, tz)
238 metadata['date'] = '%r %i' % (time, tz)
239 if parents is not None:
239 if parents is not None:
240 if not parents:
240 if not parents:
241 # mark that we explicitly recorded no parents
241 # mark that we explicitly recorded no parents
242 metadata['p0'] = ''
242 metadata['p0'] = ''
243 for i, p in enumerate(parents, 1):
243 for i, p in enumerate(parents, 1):
244 metadata['p%i' % i] = node.hex(p)
244 metadata['p%i' % i] = node.hex(p)
245 metadata = _fm0encodemeta(metadata)
245 metadata = _fm0encodemeta(metadata)
246 numsuc = len(sucs)
246 numsuc = len(sucs)
247 format = _fm0fixed + (_fm0node * numsuc)
247 format = _fm0fixed + (_fm0node * numsuc)
248 data = [numsuc, len(metadata), flags, pre]
248 data = [numsuc, len(metadata), flags, pre]
249 data.extend(sucs)
249 data.extend(sucs)
250 return _pack(format, *data) + metadata
250 return _pack(format, *data) + metadata
251
251
252 def _fm0encodemeta(meta):
252 def _fm0encodemeta(meta):
253 """Return encoded metadata string to string mapping.
253 """Return encoded metadata string to string mapping.
254
254
255 Assume no ':' in key and no '\0' in both key and value."""
255 Assume no ':' in key and no '\0' in both key and value."""
256 for key, value in meta.iteritems():
256 for key, value in meta.iteritems():
257 if ':' in key or '\0' in key:
257 if ':' in key or '\0' in key:
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 if '\0' in value:
259 if '\0' in value:
260 raise ValueError("':' is forbidden in metadata value'")
260 raise ValueError("':' is forbidden in metadata value'")
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262
262
263 def _fm0decodemeta(data):
263 def _fm0decodemeta(data):
264 """Return string to string dictionary from encoded version."""
264 """Return string to string dictionary from encoded version."""
265 d = {}
265 d = {}
266 for l in data.split('\0'):
266 for l in data.split('\0'):
267 if l:
267 if l:
268 key, value = l.split(':')
268 key, value = l.split(':')
269 d[key] = value
269 d[key] = value
270 return d
270 return d
271
271
272 ## Parsing and writing of version "1"
272 ## Parsing and writing of version "1"
273 #
273 #
274 # The header is followed by the markers. Each marker is made of:
274 # The header is followed by the markers. Each marker is made of:
275 #
275 #
276 # - uint32: total size of the marker (including this field)
276 # - uint32: total size of the marker (including this field)
277 #
277 #
278 # - float64: date in seconds since epoch
278 # - float64: date in seconds since epoch
279 #
279 #
280 # - int16: timezone offset in minutes
280 # - int16: timezone offset in minutes
281 #
281 #
282 # - uint16: a bit field. It is reserved for flags used in common
282 # - uint16: a bit field. It is reserved for flags used in common
283 # obsolete marker operations, to avoid repeated decoding of metadata
283 # obsolete marker operations, to avoid repeated decoding of metadata
284 # entries.
284 # entries.
285 #
285 #
286 # - uint8: number of successors "N", can be zero.
286 # - uint8: number of successors "N", can be zero.
287 #
287 #
288 # - uint8: number of parents "P", can be zero.
288 # - uint8: number of parents "P", can be zero.
289 #
289 #
290 # 0: parents data stored but no parent,
290 # 0: parents data stored but no parent,
291 # 1: one parent stored,
291 # 1: one parent stored,
292 # 2: two parents stored,
292 # 2: two parents stored,
293 # 3: no parent data stored
293 # 3: no parent data stored
294 #
294 #
295 # - uint8: number of metadata entries M
295 # - uint8: number of metadata entries M
296 #
296 #
297 # - 20 or 32 bytes: precursor changeset identifier.
297 # - 20 or 32 bytes: precursor changeset identifier.
298 #
298 #
299 # - N*(20 or 32) bytes: successors changesets identifiers.
299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 #
300 #
301 # - P*(20 or 32) bytes: parents of the precursors changesets.
301 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 #
302 #
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 #
304 #
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 _fm1version = 1
306 _fm1version = 1
307 _fm1fixed = '>IdhHBBB20s'
307 _fm1fixed = '>IdhHBBB20s'
308 _fm1nodesha1 = '20s'
308 _fm1nodesha1 = '20s'
309 _fm1nodesha256 = '32s'
309 _fm1nodesha256 = '32s'
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 _fm1fsize = _calcsize(_fm1fixed)
312 _fm1fsize = _calcsize(_fm1fixed)
313 _fm1parentnone = 3
313 _fm1parentnone = 3
314 _fm1parentshift = 14
314 _fm1parentshift = 14
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 _fm1metapair = 'BB'
316 _fm1metapair = 'BB'
317 _fm1metapairsize = _calcsize('BB')
317 _fm1metapairsize = _calcsize('BB')
318
318
319 def _fm1purereadmarkers(data, off):
319 def _fm1purereadmarkers(data, off):
320 # make some global constants local for performance
320 # make some global constants local for performance
321 noneflag = _fm1parentnone
321 noneflag = _fm1parentnone
322 sha2flag = usingsha256
322 sha2flag = usingsha256
323 sha1size = _fm1nodesha1size
323 sha1size = _fm1nodesha1size
324 sha2size = _fm1nodesha256size
324 sha2size = _fm1nodesha256size
325 sha1fmt = _fm1nodesha1
325 sha1fmt = _fm1nodesha1
326 sha2fmt = _fm1nodesha256
326 sha2fmt = _fm1nodesha256
327 metasize = _fm1metapairsize
327 metasize = _fm1metapairsize
328 metafmt = _fm1metapair
328 metafmt = _fm1metapair
329 fsize = _fm1fsize
329 fsize = _fm1fsize
330 unpack = _unpack
330 unpack = _unpack
331
331
332 # Loop on markers
332 # Loop on markers
333 stop = len(data) - _fm1fsize
333 stop = len(data) - _fm1fsize
334 ufixed = struct.Struct(_fm1fixed).unpack
334 ufixed = struct.Struct(_fm1fixed).unpack
335
335
336 while off <= stop:
336 while off <= stop:
337 # read fixed part
337 # read fixed part
338 o1 = off + fsize
338 o1 = off + fsize
339 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
339 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340
340
341 if flags & sha2flag:
341 if flags & sha2flag:
342 # FIXME: prec was read as a SHA1, needs to be amended
342 # FIXME: prec was read as a SHA1, needs to be amended
343
343
344 # read 0 or more successors
344 # read 0 or more successors
345 if numsuc == 1:
345 if numsuc == 1:
346 o2 = o1 + sha2size
346 o2 = o1 + sha2size
347 sucs = (data[o1:o2],)
347 sucs = (data[o1:o2],)
348 else:
348 else:
349 o2 = o1 + sha2size * numsuc
349 o2 = o1 + sha2size * numsuc
350 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
350 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351
351
352 # read parents
352 # read parents
353 if numpar == noneflag:
353 if numpar == noneflag:
354 o3 = o2
354 o3 = o2
355 parents = None
355 parents = None
356 elif numpar == 1:
356 elif numpar == 1:
357 o3 = o2 + sha2size
357 o3 = o2 + sha2size
358 parents = (data[o2:o3],)
358 parents = (data[o2:o3],)
359 else:
359 else:
360 o3 = o2 + sha2size * numpar
360 o3 = o2 + sha2size * numpar
361 parents = unpack(sha2fmt * numpar, data[o2:o3])
361 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 else:
362 else:
363 # read 0 or more successors
363 # read 0 or more successors
364 if numsuc == 1:
364 if numsuc == 1:
365 o2 = o1 + sha1size
365 o2 = o1 + sha1size
366 sucs = (data[o1:o2],)
366 sucs = (data[o1:o2],)
367 else:
367 else:
368 o2 = o1 + sha1size * numsuc
368 o2 = o1 + sha1size * numsuc
369 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
369 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370
370
371 # read parents
371 # read parents
372 if numpar == noneflag:
372 if numpar == noneflag:
373 o3 = o2
373 o3 = o2
374 parents = None
374 parents = None
375 elif numpar == 1:
375 elif numpar == 1:
376 o3 = o2 + sha1size
376 o3 = o2 + sha1size
377 parents = (data[o2:o3],)
377 parents = (data[o2:o3],)
378 else:
378 else:
379 o3 = o2 + sha1size * numpar
379 o3 = o2 + sha1size * numpar
380 parents = unpack(sha1fmt * numpar, data[o2:o3])
380 parents = unpack(sha1fmt * numpar, data[o2:o3])
381
381
382 # read metadata
382 # read metadata
383 off = o3 + metasize * nummeta
383 off = o3 + metasize * nummeta
384 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
384 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 metadata = []
385 metadata = []
386 for idx in xrange(0, len(metapairsize), 2):
386 for idx in xrange(0, len(metapairsize), 2):
387 o1 = off + metapairsize[idx]
387 o1 = off + metapairsize[idx]
388 o2 = o1 + metapairsize[idx + 1]
388 o2 = o1 + metapairsize[idx + 1]
389 metadata.append((data[off:o1], data[o1:o2]))
389 metadata.append((data[off:o1], data[o1:o2]))
390 off = o2
390 off = o2
391
391
392 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
392 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393
393
394 def _fm1encodeonemarker(marker):
394 def _fm1encodeonemarker(marker):
395 pre, sucs, flags, metadata, date, parents = marker
395 pre, sucs, flags, metadata, date, parents = marker
396 # determine node size
396 # determine node size
397 _fm1node = _fm1nodesha1
397 _fm1node = _fm1nodesha1
398 if flags & usingsha256:
398 if flags & usingsha256:
399 _fm1node = _fm1nodesha256
399 _fm1node = _fm1nodesha256
400 numsuc = len(sucs)
400 numsuc = len(sucs)
401 numextranodes = numsuc
401 numextranodes = numsuc
402 if parents is None:
402 if parents is None:
403 numpar = _fm1parentnone
403 numpar = _fm1parentnone
404 else:
404 else:
405 numpar = len(parents)
405 numpar = len(parents)
406 numextranodes += numpar
406 numextranodes += numpar
407 formatnodes = _fm1node * numextranodes
407 formatnodes = _fm1node * numextranodes
408 formatmeta = _fm1metapair * len(metadata)
408 formatmeta = _fm1metapair * len(metadata)
409 format = _fm1fixed + formatnodes + formatmeta
409 format = _fm1fixed + formatnodes + formatmeta
410 # tz is stored in minutes so we divide by 60
410 # tz is stored in minutes so we divide by 60
411 tz = date[1]//60
411 tz = date[1]//60
412 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
412 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 data.extend(sucs)
413 data.extend(sucs)
414 if parents is not None:
414 if parents is not None:
415 data.extend(parents)
415 data.extend(parents)
416 totalsize = _calcsize(format)
416 totalsize = _calcsize(format)
417 for key, value in metadata:
417 for key, value in metadata:
418 lk = len(key)
418 lk = len(key)
419 lv = len(value)
419 lv = len(value)
420 data.append(lk)
420 data.append(lk)
421 data.append(lv)
421 data.append(lv)
422 totalsize += lk + lv
422 totalsize += lk + lv
423 data[0] = totalsize
423 data[0] = totalsize
424 data = [_pack(format, *data)]
424 data = [_pack(format, *data)]
425 for key, value in metadata:
425 for key, value in metadata:
426 data.append(key)
426 data.append(key)
427 data.append(value)
427 data.append(value)
428 return ''.join(data)
428 return ''.join(data)
429
429
430 def _fm1readmarkers(data, off):
430 def _fm1readmarkers(data, off):
431 native = getattr(parsers, 'fm1readmarkers', None)
431 native = getattr(parsers, 'fm1readmarkers', None)
432 if not native:
432 if not native:
433 return _fm1purereadmarkers(data, off)
433 return _fm1purereadmarkers(data, off)
434 stop = len(data) - _fm1fsize
434 stop = len(data) - _fm1fsize
435 return native(data, off, stop)
435 return native(data, off, stop)
436
436
437 # mapping to read/write various marker formats
437 # mapping to read/write various marker formats
438 # <version> -> (decoder, encoder)
438 # <version> -> (decoder, encoder)
439 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
439 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
440 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
440 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
441
441
442 @util.nogc
442 @util.nogc
443 def _readmarkers(data):
443 def _readmarkers(data):
444 """Read and enumerate markers from raw data"""
444 """Read and enumerate markers from raw data"""
445 off = 0
445 off = 0
446 diskversion = _unpack('>B', data[off:off + 1])[0]
446 diskversion = _unpack('>B', data[off:off + 1])[0]
447 off += 1
447 off += 1
448 if diskversion not in formats:
448 if diskversion not in formats:
449 raise error.Abort(_('parsing obsolete marker: unknown version %r')
449 msg = _('parsing obsolete marker: unknown version %r') % diskversion
450 % diskversion)
450 raise error.UnknownVersion(msg, version=diskversion)
451 return diskversion, formats[diskversion][0](data, off)
451 return diskversion, formats[diskversion][0](data, off)
452
452
453 def encodemarkers(markers, addheader=False, version=_fm0version):
453 def encodemarkers(markers, addheader=False, version=_fm0version):
454 # Kept separate from flushmarkers(), it will be reused for
454 # Kept separate from flushmarkers(), it will be reused for
455 # markers exchange.
455 # markers exchange.
456 encodeone = formats[version][1]
456 encodeone = formats[version][1]
457 if addheader:
457 if addheader:
458 yield _pack('>B', version)
458 yield _pack('>B', version)
459 for marker in markers:
459 for marker in markers:
460 yield encodeone(marker)
460 yield encodeone(marker)
461
461
462
462
463 class marker(object):
463 class marker(object):
464 """Wrap obsolete marker raw data"""
464 """Wrap obsolete marker raw data"""
465
465
466 def __init__(self, repo, data):
466 def __init__(self, repo, data):
467 # the repo argument will be used to create changectx in later version
467 # the repo argument will be used to create changectx in later version
468 self._repo = repo
468 self._repo = repo
469 self._data = data
469 self._data = data
470 self._decodedmeta = None
470 self._decodedmeta = None
471
471
472 def __hash__(self):
472 def __hash__(self):
473 return hash(self._data)
473 return hash(self._data)
474
474
475 def __eq__(self, other):
475 def __eq__(self, other):
476 if type(other) != type(self):
476 if type(other) != type(self):
477 return False
477 return False
478 return self._data == other._data
478 return self._data == other._data
479
479
480 def precnode(self):
480 def precnode(self):
481 """Precursor changeset node identifier"""
481 """Precursor changeset node identifier"""
482 return self._data[0]
482 return self._data[0]
483
483
484 def succnodes(self):
484 def succnodes(self):
485 """List of successor changesets node identifiers"""
485 """List of successor changesets node identifiers"""
486 return self._data[1]
486 return self._data[1]
487
487
488 def parentnodes(self):
488 def parentnodes(self):
489 """Parents of the precursors (None if not recorded)"""
489 """Parents of the precursors (None if not recorded)"""
490 return self._data[5]
490 return self._data[5]
491
491
492 def metadata(self):
492 def metadata(self):
493 """Decoded metadata dictionary"""
493 """Decoded metadata dictionary"""
494 return dict(self._data[3])
494 return dict(self._data[3])
495
495
496 def date(self):
496 def date(self):
497 """Creation date as (unixtime, offset)"""
497 """Creation date as (unixtime, offset)"""
498 return self._data[4]
498 return self._data[4]
499
499
500 def flags(self):
500 def flags(self):
501 """The flags field of the marker"""
501 """The flags field of the marker"""
502 return self._data[2]
502 return self._data[2]
503
503
504 @util.nogc
504 @util.nogc
505 def _addsuccessors(successors, markers):
505 def _addsuccessors(successors, markers):
506 for mark in markers:
506 for mark in markers:
507 successors.setdefault(mark[0], set()).add(mark)
507 successors.setdefault(mark[0], set()).add(mark)
508
508
509 @util.nogc
509 @util.nogc
510 def _addprecursors(precursors, markers):
510 def _addprecursors(precursors, markers):
511 for mark in markers:
511 for mark in markers:
512 for suc in mark[1]:
512 for suc in mark[1]:
513 precursors.setdefault(suc, set()).add(mark)
513 precursors.setdefault(suc, set()).add(mark)
514
514
515 @util.nogc
515 @util.nogc
516 def _addchildren(children, markers):
516 def _addchildren(children, markers):
517 for mark in markers:
517 for mark in markers:
518 parents = mark[5]
518 parents = mark[5]
519 if parents is not None:
519 if parents is not None:
520 for p in parents:
520 for p in parents:
521 children.setdefault(p, set()).add(mark)
521 children.setdefault(p, set()).add(mark)
522
522
523 def _checkinvalidmarkers(markers):
523 def _checkinvalidmarkers(markers):
524 """search for marker with invalid data and raise error if needed
524 """search for marker with invalid data and raise error if needed
525
525
526 Exist as a separated function to allow the evolve extension for a more
526 Exist as a separated function to allow the evolve extension for a more
527 subtle handling.
527 subtle handling.
528 """
528 """
529 for mark in markers:
529 for mark in markers:
530 if node.nullid in mark[1]:
530 if node.nullid in mark[1]:
531 raise error.Abort(_('bad obsolescence marker detected: '
531 raise error.Abort(_('bad obsolescence marker detected: '
532 'invalid successors nullid'))
532 'invalid successors nullid'))
533
533
534 class obsstore(object):
534 class obsstore(object):
535 """Store obsolete markers
535 """Store obsolete markers
536
536
537 Markers can be accessed with two mappings:
537 Markers can be accessed with two mappings:
538 - precursors[x] -> set(markers on precursors edges of x)
538 - precursors[x] -> set(markers on precursors edges of x)
539 - successors[x] -> set(markers on successors edges of x)
539 - successors[x] -> set(markers on successors edges of x)
540 - children[x] -> set(markers on precursors edges of children(x)
540 - children[x] -> set(markers on precursors edges of children(x)
541 """
541 """
542
542
543 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
543 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
544 # prec: nodeid, precursor changesets
544 # prec: nodeid, precursor changesets
545 # succs: tuple of nodeid, successor changesets (0-N length)
545 # succs: tuple of nodeid, successor changesets (0-N length)
546 # flag: integer, flag field carrying modifier for the markers (see doc)
546 # flag: integer, flag field carrying modifier for the markers (see doc)
547 # meta: binary blob, encoded metadata dictionary
547 # meta: binary blob, encoded metadata dictionary
548 # date: (float, int) tuple, date of marker creation
548 # date: (float, int) tuple, date of marker creation
549 # parents: (tuple of nodeid) or None, parents of precursors
549 # parents: (tuple of nodeid) or None, parents of precursors
550 # None is used when no data has been recorded
550 # None is used when no data has been recorded
551
551
552 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
552 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
553 # caches for various obsolescence related cache
553 # caches for various obsolescence related cache
554 self.caches = {}
554 self.caches = {}
555 self.svfs = svfs
555 self.svfs = svfs
556 self._version = defaultformat
556 self._version = defaultformat
557 self._readonly = readonly
557 self._readonly = readonly
558
558
559 def __iter__(self):
559 def __iter__(self):
560 return iter(self._all)
560 return iter(self._all)
561
561
562 def __len__(self):
562 def __len__(self):
563 return len(self._all)
563 return len(self._all)
564
564
565 def __nonzero__(self):
565 def __nonzero__(self):
566 if not self._cached('_all'):
566 if not self._cached('_all'):
567 try:
567 try:
568 return self.svfs.stat('obsstore').st_size > 1
568 return self.svfs.stat('obsstore').st_size > 1
569 except OSError as inst:
569 except OSError as inst:
570 if inst.errno != errno.ENOENT:
570 if inst.errno != errno.ENOENT:
571 raise
571 raise
572 # just build an empty _all list if no obsstore exists, which
572 # just build an empty _all list if no obsstore exists, which
573 # avoids further stat() syscalls
573 # avoids further stat() syscalls
574 pass
574 pass
575 return bool(self._all)
575 return bool(self._all)
576
576
577 __bool__ = __nonzero__
577 __bool__ = __nonzero__
578
578
579 @property
579 @property
580 def readonly(self):
580 def readonly(self):
581 """True if marker creation is disabled
581 """True if marker creation is disabled
582
582
583 Remove me in the future when obsolete marker is always on."""
583 Remove me in the future when obsolete marker is always on."""
584 return self._readonly
584 return self._readonly
585
585
586 def create(self, transaction, prec, succs=(), flag=0, parents=None,
586 def create(self, transaction, prec, succs=(), flag=0, parents=None,
587 date=None, metadata=None, ui=None):
587 date=None, metadata=None, ui=None):
588 """obsolete: add a new obsolete marker
588 """obsolete: add a new obsolete marker
589
589
590 * ensuring it is hashable
590 * ensuring it is hashable
591 * check mandatory metadata
591 * check mandatory metadata
592 * encode metadata
592 * encode metadata
593
593
594 If you are a human writing code creating marker you want to use the
594 If you are a human writing code creating marker you want to use the
595 `createmarkers` function in this module instead.
595 `createmarkers` function in this module instead.
596
596
597 return True if a new marker have been added, False if the markers
597 return True if a new marker have been added, False if the markers
598 already existed (no op).
598 already existed (no op).
599 """
599 """
600 if metadata is None:
600 if metadata is None:
601 metadata = {}
601 metadata = {}
602 if date is None:
602 if date is None:
603 if 'date' in metadata:
603 if 'date' in metadata:
604 # as a courtesy for out-of-tree extensions
604 # as a courtesy for out-of-tree extensions
605 date = util.parsedate(metadata.pop('date'))
605 date = util.parsedate(metadata.pop('date'))
606 elif ui is not None:
606 elif ui is not None:
607 date = ui.configdate('devel', 'default-date')
607 date = ui.configdate('devel', 'default-date')
608 if date is None:
608 if date is None:
609 date = util.makedate()
609 date = util.makedate()
610 else:
610 else:
611 date = util.makedate()
611 date = util.makedate()
612 if len(prec) != 20:
612 if len(prec) != 20:
613 raise ValueError(prec)
613 raise ValueError(prec)
614 for succ in succs:
614 for succ in succs:
615 if len(succ) != 20:
615 if len(succ) != 20:
616 raise ValueError(succ)
616 raise ValueError(succ)
617 if prec in succs:
617 if prec in succs:
618 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
618 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
619
619
620 metadata = tuple(sorted(metadata.iteritems()))
620 metadata = tuple(sorted(metadata.iteritems()))
621
621
622 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
622 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
623 return bool(self.add(transaction, [marker]))
623 return bool(self.add(transaction, [marker]))
624
624
625 def add(self, transaction, markers):
625 def add(self, transaction, markers):
626 """Add new markers to the store
626 """Add new markers to the store
627
627
628 Take care of filtering duplicate.
628 Take care of filtering duplicate.
629 Return the number of new marker."""
629 Return the number of new marker."""
630 if self._readonly:
630 if self._readonly:
631 raise error.Abort(_('creating obsolete markers is not enabled on '
631 raise error.Abort(_('creating obsolete markers is not enabled on '
632 'this repo'))
632 'this repo'))
633 known = set(self._all)
633 known = set(self._all)
634 new = []
634 new = []
635 for m in markers:
635 for m in markers:
636 if m not in known:
636 if m not in known:
637 known.add(m)
637 known.add(m)
638 new.append(m)
638 new.append(m)
639 if new:
639 if new:
640 f = self.svfs('obsstore', 'ab')
640 f = self.svfs('obsstore', 'ab')
641 try:
641 try:
642 offset = f.tell()
642 offset = f.tell()
643 transaction.add('obsstore', offset)
643 transaction.add('obsstore', offset)
644 # offset == 0: new file - add the version header
644 # offset == 0: new file - add the version header
645 for bytes in encodemarkers(new, offset == 0, self._version):
645 for bytes in encodemarkers(new, offset == 0, self._version):
646 f.write(bytes)
646 f.write(bytes)
647 finally:
647 finally:
648 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
648 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
649 # call 'filecacheentry.refresh()' here
649 # call 'filecacheentry.refresh()' here
650 f.close()
650 f.close()
651 self._addmarkers(new)
651 self._addmarkers(new)
652 # new marker *may* have changed several set. invalidate the cache.
652 # new marker *may* have changed several set. invalidate the cache.
653 self.caches.clear()
653 self.caches.clear()
654 # records the number of new markers for the transaction hooks
654 # records the number of new markers for the transaction hooks
655 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
655 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
656 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
656 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
657 return len(new)
657 return len(new)
658
658
659 def mergemarkers(self, transaction, data):
659 def mergemarkers(self, transaction, data):
660 """merge a binary stream of markers inside the obsstore
660 """merge a binary stream of markers inside the obsstore
661
661
662 Returns the number of new markers added."""
662 Returns the number of new markers added."""
663 version, markers = _readmarkers(data)
663 version, markers = _readmarkers(data)
664 return self.add(transaction, markers)
664 return self.add(transaction, markers)
665
665
666 @propertycache
666 @propertycache
667 def _all(self):
667 def _all(self):
668 data = self.svfs.tryread('obsstore')
668 data = self.svfs.tryread('obsstore')
669 if not data:
669 if not data:
670 return []
670 return []
671 self._version, markers = _readmarkers(data)
671 self._version, markers = _readmarkers(data)
672 markers = list(markers)
672 markers = list(markers)
673 _checkinvalidmarkers(markers)
673 _checkinvalidmarkers(markers)
674 return markers
674 return markers
675
675
676 @propertycache
676 @propertycache
677 def successors(self):
677 def successors(self):
678 successors = {}
678 successors = {}
679 _addsuccessors(successors, self._all)
679 _addsuccessors(successors, self._all)
680 return successors
680 return successors
681
681
682 @propertycache
682 @propertycache
683 def precursors(self):
683 def precursors(self):
684 precursors = {}
684 precursors = {}
685 _addprecursors(precursors, self._all)
685 _addprecursors(precursors, self._all)
686 return precursors
686 return precursors
687
687
688 @propertycache
688 @propertycache
689 def children(self):
689 def children(self):
690 children = {}
690 children = {}
691 _addchildren(children, self._all)
691 _addchildren(children, self._all)
692 return children
692 return children
693
693
694 def _cached(self, attr):
694 def _cached(self, attr):
695 return attr in self.__dict__
695 return attr in self.__dict__
696
696
697 def _addmarkers(self, markers):
697 def _addmarkers(self, markers):
698 markers = list(markers) # to allow repeated iteration
698 markers = list(markers) # to allow repeated iteration
699 self._all.extend(markers)
699 self._all.extend(markers)
700 if self._cached('successors'):
700 if self._cached('successors'):
701 _addsuccessors(self.successors, markers)
701 _addsuccessors(self.successors, markers)
702 if self._cached('precursors'):
702 if self._cached('precursors'):
703 _addprecursors(self.precursors, markers)
703 _addprecursors(self.precursors, markers)
704 if self._cached('children'):
704 if self._cached('children'):
705 _addchildren(self.children, markers)
705 _addchildren(self.children, markers)
706 _checkinvalidmarkers(markers)
706 _checkinvalidmarkers(markers)
707
707
708 def relevantmarkers(self, nodes):
708 def relevantmarkers(self, nodes):
709 """return a set of all obsolescence markers relevant to a set of nodes.
709 """return a set of all obsolescence markers relevant to a set of nodes.
710
710
711 "relevant" to a set of nodes mean:
711 "relevant" to a set of nodes mean:
712
712
713 - marker that use this changeset as successor
713 - marker that use this changeset as successor
714 - prune marker of direct children on this changeset
714 - prune marker of direct children on this changeset
715 - recursive application of the two rules on precursors of these markers
715 - recursive application of the two rules on precursors of these markers
716
716
717 It is a set so you cannot rely on order."""
717 It is a set so you cannot rely on order."""
718
718
719 pendingnodes = set(nodes)
719 pendingnodes = set(nodes)
720 seenmarkers = set()
720 seenmarkers = set()
721 seennodes = set(pendingnodes)
721 seennodes = set(pendingnodes)
722 precursorsmarkers = self.precursors
722 precursorsmarkers = self.precursors
723 succsmarkers = self.successors
723 succsmarkers = self.successors
724 children = self.children
724 children = self.children
725 while pendingnodes:
725 while pendingnodes:
726 direct = set()
726 direct = set()
727 for current in pendingnodes:
727 for current in pendingnodes:
728 direct.update(precursorsmarkers.get(current, ()))
728 direct.update(precursorsmarkers.get(current, ()))
729 pruned = [m for m in children.get(current, ()) if not m[1]]
729 pruned = [m for m in children.get(current, ()) if not m[1]]
730 direct.update(pruned)
730 direct.update(pruned)
731 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
731 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
732 direct.update(pruned)
732 direct.update(pruned)
733 direct -= seenmarkers
733 direct -= seenmarkers
734 pendingnodes = set([m[0] for m in direct])
734 pendingnodes = set([m[0] for m in direct])
735 seenmarkers |= direct
735 seenmarkers |= direct
736 pendingnodes -= seennodes
736 pendingnodes -= seennodes
737 seennodes |= pendingnodes
737 seennodes |= pendingnodes
738 return seenmarkers
738 return seenmarkers
739
739
740 def commonversion(versions):
740 def commonversion(versions):
741 """Return the newest version listed in both versions and our local formats.
741 """Return the newest version listed in both versions and our local formats.
742
742
743 Returns None if no common version exists.
743 Returns None if no common version exists.
744 """
744 """
745 versions.sort(reverse=True)
745 versions.sort(reverse=True)
746 # search for highest version known on both side
746 # search for highest version known on both side
747 for v in versions:
747 for v in versions:
748 if v in formats:
748 if v in formats:
749 return v
749 return v
750 return None
750 return None
751
751
752 # arbitrary picked to fit into 8K limit from HTTP server
752 # arbitrary picked to fit into 8K limit from HTTP server
753 # you have to take in account:
753 # you have to take in account:
754 # - the version header
754 # - the version header
755 # - the base85 encoding
755 # - the base85 encoding
756 _maxpayload = 5300
756 _maxpayload = 5300
757
757
758 def _pushkeyescape(markers):
758 def _pushkeyescape(markers):
759 """encode markers into a dict suitable for pushkey exchange
759 """encode markers into a dict suitable for pushkey exchange
760
760
761 - binary data is base85 encoded
761 - binary data is base85 encoded
762 - split in chunks smaller than 5300 bytes"""
762 - split in chunks smaller than 5300 bytes"""
763 keys = {}
763 keys = {}
764 parts = []
764 parts = []
765 currentlen = _maxpayload * 2 # ensure we create a new part
765 currentlen = _maxpayload * 2 # ensure we create a new part
766 for marker in markers:
766 for marker in markers:
767 nextdata = _fm0encodeonemarker(marker)
767 nextdata = _fm0encodeonemarker(marker)
768 if (len(nextdata) + currentlen > _maxpayload):
768 if (len(nextdata) + currentlen > _maxpayload):
769 currentpart = []
769 currentpart = []
770 currentlen = 0
770 currentlen = 0
771 parts.append(currentpart)
771 parts.append(currentpart)
772 currentpart.append(nextdata)
772 currentpart.append(nextdata)
773 currentlen += len(nextdata)
773 currentlen += len(nextdata)
774 for idx, part in enumerate(reversed(parts)):
774 for idx, part in enumerate(reversed(parts)):
775 data = ''.join([_pack('>B', _fm0version)] + part)
775 data = ''.join([_pack('>B', _fm0version)] + part)
776 keys['dump%i' % idx] = util.b85encode(data)
776 keys['dump%i' % idx] = util.b85encode(data)
777 return keys
777 return keys
778
778
779 def listmarkers(repo):
779 def listmarkers(repo):
780 """List markers over pushkey"""
780 """List markers over pushkey"""
781 if not repo.obsstore:
781 if not repo.obsstore:
782 return {}
782 return {}
783 return _pushkeyescape(sorted(repo.obsstore))
783 return _pushkeyescape(sorted(repo.obsstore))
784
784
785 def pushmarker(repo, key, old, new):
785 def pushmarker(repo, key, old, new):
786 """Push markers over pushkey"""
786 """Push markers over pushkey"""
787 if not key.startswith('dump'):
787 if not key.startswith('dump'):
788 repo.ui.warn(_('unknown key: %r') % key)
788 repo.ui.warn(_('unknown key: %r') % key)
789 return 0
789 return 0
790 if old:
790 if old:
791 repo.ui.warn(_('unexpected old value for %r') % key)
791 repo.ui.warn(_('unexpected old value for %r') % key)
792 return 0
792 return 0
793 data = util.b85decode(new)
793 data = util.b85decode(new)
794 lock = repo.lock()
794 lock = repo.lock()
795 try:
795 try:
796 tr = repo.transaction('pushkey: obsolete markers')
796 tr = repo.transaction('pushkey: obsolete markers')
797 try:
797 try:
798 repo.obsstore.mergemarkers(tr, data)
798 repo.obsstore.mergemarkers(tr, data)
799 repo.invalidatevolatilesets()
799 repo.invalidatevolatilesets()
800 tr.close()
800 tr.close()
801 return 1
801 return 1
802 finally:
802 finally:
803 tr.release()
803 tr.release()
804 finally:
804 finally:
805 lock.release()
805 lock.release()
806
806
807 def getmarkers(repo, nodes=None):
807 def getmarkers(repo, nodes=None):
808 """returns markers known in a repository
808 """returns markers known in a repository
809
809
810 If <nodes> is specified, only markers "relevant" to those nodes are are
810 If <nodes> is specified, only markers "relevant" to those nodes are are
811 returned"""
811 returned"""
812 if nodes is None:
812 if nodes is None:
813 rawmarkers = repo.obsstore
813 rawmarkers = repo.obsstore
814 else:
814 else:
815 rawmarkers = repo.obsstore.relevantmarkers(nodes)
815 rawmarkers = repo.obsstore.relevantmarkers(nodes)
816
816
817 for markerdata in rawmarkers:
817 for markerdata in rawmarkers:
818 yield marker(repo, markerdata)
818 yield marker(repo, markerdata)
819
819
820 def relevantmarkers(repo, node):
820 def relevantmarkers(repo, node):
821 """all obsolete markers relevant to some revision"""
821 """all obsolete markers relevant to some revision"""
822 for markerdata in repo.obsstore.relevantmarkers(node):
822 for markerdata in repo.obsstore.relevantmarkers(node):
823 yield marker(repo, markerdata)
823 yield marker(repo, markerdata)
824
824
825
825
826 def precursormarkers(ctx):
826 def precursormarkers(ctx):
827 """obsolete marker marking this changeset as a successors"""
827 """obsolete marker marking this changeset as a successors"""
828 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
828 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
829 yield marker(ctx.repo(), data)
829 yield marker(ctx.repo(), data)
830
830
831 def successormarkers(ctx):
831 def successormarkers(ctx):
832 """obsolete marker making this changeset obsolete"""
832 """obsolete marker making this changeset obsolete"""
833 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
833 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
834 yield marker(ctx.repo(), data)
834 yield marker(ctx.repo(), data)
835
835
836 def allsuccessors(obsstore, nodes, ignoreflags=0):
836 def allsuccessors(obsstore, nodes, ignoreflags=0):
837 """Yield node for every successor of <nodes>.
837 """Yield node for every successor of <nodes>.
838
838
839 Some successors may be unknown locally.
839 Some successors may be unknown locally.
840
840
841 This is a linear yield unsuited to detecting split changesets. It includes
841 This is a linear yield unsuited to detecting split changesets. It includes
842 initial nodes too."""
842 initial nodes too."""
843 remaining = set(nodes)
843 remaining = set(nodes)
844 seen = set(remaining)
844 seen = set(remaining)
845 while remaining:
845 while remaining:
846 current = remaining.pop()
846 current = remaining.pop()
847 yield current
847 yield current
848 for mark in obsstore.successors.get(current, ()):
848 for mark in obsstore.successors.get(current, ()):
849 # ignore marker flagged with specified flag
849 # ignore marker flagged with specified flag
850 if mark[2] & ignoreflags:
850 if mark[2] & ignoreflags:
851 continue
851 continue
852 for suc in mark[1]:
852 for suc in mark[1]:
853 if suc not in seen:
853 if suc not in seen:
854 seen.add(suc)
854 seen.add(suc)
855 remaining.add(suc)
855 remaining.add(suc)
856
856
857 def allprecursors(obsstore, nodes, ignoreflags=0):
857 def allprecursors(obsstore, nodes, ignoreflags=0):
858 """Yield node for every precursors of <nodes>.
858 """Yield node for every precursors of <nodes>.
859
859
860 Some precursors may be unknown locally.
860 Some precursors may be unknown locally.
861
861
862 This is a linear yield unsuited to detecting folded changesets. It includes
862 This is a linear yield unsuited to detecting folded changesets. It includes
863 initial nodes too."""
863 initial nodes too."""
864
864
865 remaining = set(nodes)
865 remaining = set(nodes)
866 seen = set(remaining)
866 seen = set(remaining)
867 while remaining:
867 while remaining:
868 current = remaining.pop()
868 current = remaining.pop()
869 yield current
869 yield current
870 for mark in obsstore.precursors.get(current, ()):
870 for mark in obsstore.precursors.get(current, ()):
871 # ignore marker flagged with specified flag
871 # ignore marker flagged with specified flag
872 if mark[2] & ignoreflags:
872 if mark[2] & ignoreflags:
873 continue
873 continue
874 suc = mark[0]
874 suc = mark[0]
875 if suc not in seen:
875 if suc not in seen:
876 seen.add(suc)
876 seen.add(suc)
877 remaining.add(suc)
877 remaining.add(suc)
878
878
879 def foreground(repo, nodes):
879 def foreground(repo, nodes):
880 """return all nodes in the "foreground" of other node
880 """return all nodes in the "foreground" of other node
881
881
882 The foreground of a revision is anything reachable using parent -> children
882 The foreground of a revision is anything reachable using parent -> children
883 or precursor -> successor relation. It is very similar to "descendant" but
883 or precursor -> successor relation. It is very similar to "descendant" but
884 augmented with obsolescence information.
884 augmented with obsolescence information.
885
885
886 Beware that possible obsolescence cycle may result if complex situation.
886 Beware that possible obsolescence cycle may result if complex situation.
887 """
887 """
888 repo = repo.unfiltered()
888 repo = repo.unfiltered()
889 foreground = set(repo.set('%ln::', nodes))
889 foreground = set(repo.set('%ln::', nodes))
890 if repo.obsstore:
890 if repo.obsstore:
891 # We only need this complicated logic if there is obsolescence
891 # We only need this complicated logic if there is obsolescence
892 # XXX will probably deserve an optimised revset.
892 # XXX will probably deserve an optimised revset.
893 nm = repo.changelog.nodemap
893 nm = repo.changelog.nodemap
894 plen = -1
894 plen = -1
895 # compute the whole set of successors or descendants
895 # compute the whole set of successors or descendants
896 while len(foreground) != plen:
896 while len(foreground) != plen:
897 plen = len(foreground)
897 plen = len(foreground)
898 succs = set(c.node() for c in foreground)
898 succs = set(c.node() for c in foreground)
899 mutable = [c.node() for c in foreground if c.mutable()]
899 mutable = [c.node() for c in foreground if c.mutable()]
900 succs.update(allsuccessors(repo.obsstore, mutable))
900 succs.update(allsuccessors(repo.obsstore, mutable))
901 known = (n for n in succs if n in nm)
901 known = (n for n in succs if n in nm)
902 foreground = set(repo.set('%ln::', known))
902 foreground = set(repo.set('%ln::', known))
903 return set(c.node() for c in foreground)
903 return set(c.node() for c in foreground)
904
904
905
905
906 def successorssets(repo, initialnode, cache=None):
906 def successorssets(repo, initialnode, cache=None):
907 """Return set of all latest successors of initial nodes
907 """Return set of all latest successors of initial nodes
908
908
909 The successors set of a changeset A are the group of revisions that succeed
909 The successors set of a changeset A are the group of revisions that succeed
910 A. It succeeds A as a consistent whole, each revision being only a partial
910 A. It succeeds A as a consistent whole, each revision being only a partial
911 replacement. The successors set contains non-obsolete changesets only.
911 replacement. The successors set contains non-obsolete changesets only.
912
912
913 This function returns the full list of successor sets which is why it
913 This function returns the full list of successor sets which is why it
914 returns a list of tuples and not just a single tuple. Each tuple is a valid
914 returns a list of tuples and not just a single tuple. Each tuple is a valid
915 successors set. Note that (A,) may be a valid successors set for changeset A
915 successors set. Note that (A,) may be a valid successors set for changeset A
916 (see below).
916 (see below).
917
917
918 In most cases, a changeset A will have a single element (e.g. the changeset
918 In most cases, a changeset A will have a single element (e.g. the changeset
919 A is replaced by A') in its successors set. Though, it is also common for a
919 A is replaced by A') in its successors set. Though, it is also common for a
920 changeset A to have no elements in its successor set (e.g. the changeset
920 changeset A to have no elements in its successor set (e.g. the changeset
921 has been pruned). Therefore, the returned list of successors sets will be
921 has been pruned). Therefore, the returned list of successors sets will be
922 [(A',)] or [], respectively.
922 [(A',)] or [], respectively.
923
923
924 When a changeset A is split into A' and B', however, it will result in a
924 When a changeset A is split into A' and B', however, it will result in a
925 successors set containing more than a single element, i.e. [(A',B')].
925 successors set containing more than a single element, i.e. [(A',B')].
926 Divergent changesets will result in multiple successors sets, i.e. [(A',),
926 Divergent changesets will result in multiple successors sets, i.e. [(A',),
927 (A'')].
927 (A'')].
928
928
929 If a changeset A is not obsolete, then it will conceptually have no
929 If a changeset A is not obsolete, then it will conceptually have no
930 successors set. To distinguish this from a pruned changeset, the successor
930 successors set. To distinguish this from a pruned changeset, the successor
931 set will contain itself only, i.e. [(A,)].
931 set will contain itself only, i.e. [(A,)].
932
932
933 Finally, successors unknown locally are considered to be pruned (obsoleted
933 Finally, successors unknown locally are considered to be pruned (obsoleted
934 without any successors).
934 without any successors).
935
935
936 The optional `cache` parameter is a dictionary that may contain precomputed
936 The optional `cache` parameter is a dictionary that may contain precomputed
937 successors sets. It is meant to reuse the computation of a previous call to
937 successors sets. It is meant to reuse the computation of a previous call to
938 `successorssets` when multiple calls are made at the same time. The cache
938 `successorssets` when multiple calls are made at the same time. The cache
939 dictionary is updated in place. The caller is responsible for its life
939 dictionary is updated in place. The caller is responsible for its life
940 span. Code that makes multiple calls to `successorssets` *must* use this
940 span. Code that makes multiple calls to `successorssets` *must* use this
941 cache mechanism or suffer terrible performance.
941 cache mechanism or suffer terrible performance.
942 """
942 """
943
943
944 succmarkers = repo.obsstore.successors
944 succmarkers = repo.obsstore.successors
945
945
946 # Stack of nodes we search successors sets for
946 # Stack of nodes we search successors sets for
947 toproceed = [initialnode]
947 toproceed = [initialnode]
948 # set version of above list for fast loop detection
948 # set version of above list for fast loop detection
949 # element added to "toproceed" must be added here
949 # element added to "toproceed" must be added here
950 stackedset = set(toproceed)
950 stackedset = set(toproceed)
951 if cache is None:
951 if cache is None:
952 cache = {}
952 cache = {}
953
953
954 # This while loop is the flattened version of a recursive search for
954 # This while loop is the flattened version of a recursive search for
955 # successors sets
955 # successors sets
956 #
956 #
957 # def successorssets(x):
957 # def successorssets(x):
958 # successors = directsuccessors(x)
958 # successors = directsuccessors(x)
959 # ss = [[]]
959 # ss = [[]]
960 # for succ in directsuccessors(x):
960 # for succ in directsuccessors(x):
961 # # product as in itertools cartesian product
961 # # product as in itertools cartesian product
962 # ss = product(ss, successorssets(succ))
962 # ss = product(ss, successorssets(succ))
963 # return ss
963 # return ss
964 #
964 #
965 # But we can not use plain recursive calls here:
965 # But we can not use plain recursive calls here:
966 # - that would blow the python call stack
966 # - that would blow the python call stack
967 # - obsolescence markers may have cycles, we need to handle them.
967 # - obsolescence markers may have cycles, we need to handle them.
968 #
968 #
969 # The `toproceed` list act as our call stack. Every node we search
969 # The `toproceed` list act as our call stack. Every node we search
970 # successors set for are stacked there.
970 # successors set for are stacked there.
971 #
971 #
972 # The `stackedset` is set version of this stack used to check if a node is
972 # The `stackedset` is set version of this stack used to check if a node is
973 # already stacked. This check is used to detect cycles and prevent infinite
973 # already stacked. This check is used to detect cycles and prevent infinite
974 # loop.
974 # loop.
975 #
975 #
976 # successors set of all nodes are stored in the `cache` dictionary.
976 # successors set of all nodes are stored in the `cache` dictionary.
977 #
977 #
978 # After this while loop ends we use the cache to return the successors sets
978 # After this while loop ends we use the cache to return the successors sets
979 # for the node requested by the caller.
979 # for the node requested by the caller.
980 while toproceed:
980 while toproceed:
981 # Every iteration tries to compute the successors sets of the topmost
981 # Every iteration tries to compute the successors sets of the topmost
982 # node of the stack: CURRENT.
982 # node of the stack: CURRENT.
983 #
983 #
984 # There are four possible outcomes:
984 # There are four possible outcomes:
985 #
985 #
986 # 1) We already know the successors sets of CURRENT:
986 # 1) We already know the successors sets of CURRENT:
987 # -> mission accomplished, pop it from the stack.
987 # -> mission accomplished, pop it from the stack.
988 # 2) Node is not obsolete:
988 # 2) Node is not obsolete:
989 # -> the node is its own successors sets. Add it to the cache.
989 # -> the node is its own successors sets. Add it to the cache.
990 # 3) We do not know successors set of direct successors of CURRENT:
990 # 3) We do not know successors set of direct successors of CURRENT:
991 # -> We add those successors to the stack.
991 # -> We add those successors to the stack.
992 # 4) We know successors sets of all direct successors of CURRENT:
992 # 4) We know successors sets of all direct successors of CURRENT:
993 # -> We can compute CURRENT successors set and add it to the
993 # -> We can compute CURRENT successors set and add it to the
994 # cache.
994 # cache.
995 #
995 #
996 current = toproceed[-1]
996 current = toproceed[-1]
997 if current in cache:
997 if current in cache:
998 # case (1): We already know the successors sets
998 # case (1): We already know the successors sets
999 stackedset.remove(toproceed.pop())
999 stackedset.remove(toproceed.pop())
1000 elif current not in succmarkers:
1000 elif current not in succmarkers:
1001 # case (2): The node is not obsolete.
1001 # case (2): The node is not obsolete.
1002 if current in repo:
1002 if current in repo:
1003 # We have a valid last successors.
1003 # We have a valid last successors.
1004 cache[current] = [(current,)]
1004 cache[current] = [(current,)]
1005 else:
1005 else:
1006 # Final obsolete version is unknown locally.
1006 # Final obsolete version is unknown locally.
1007 # Do not count that as a valid successors
1007 # Do not count that as a valid successors
1008 cache[current] = []
1008 cache[current] = []
1009 else:
1009 else:
1010 # cases (3) and (4)
1010 # cases (3) and (4)
1011 #
1011 #
1012 # We proceed in two phases. Phase 1 aims to distinguish case (3)
1012 # We proceed in two phases. Phase 1 aims to distinguish case (3)
1013 # from case (4):
1013 # from case (4):
1014 #
1014 #
1015 # For each direct successors of CURRENT, we check whether its
1015 # For each direct successors of CURRENT, we check whether its
1016 # successors sets are known. If they are not, we stack the
1016 # successors sets are known. If they are not, we stack the
1017 # unknown node and proceed to the next iteration of the while
1017 # unknown node and proceed to the next iteration of the while
1018 # loop. (case 3)
1018 # loop. (case 3)
1019 #
1019 #
1020 # During this step, we may detect obsolescence cycles: a node
1020 # During this step, we may detect obsolescence cycles: a node
1021 # with unknown successors sets but already in the call stack.
1021 # with unknown successors sets but already in the call stack.
1022 # In such a situation, we arbitrary set the successors sets of
1022 # In such a situation, we arbitrary set the successors sets of
1023 # the node to nothing (node pruned) to break the cycle.
1023 # the node to nothing (node pruned) to break the cycle.
1024 #
1024 #
1025 # If no break was encountered we proceed to phase 2.
1025 # If no break was encountered we proceed to phase 2.
1026 #
1026 #
1027 # Phase 2 computes successors sets of CURRENT (case 4); see details
1027 # Phase 2 computes successors sets of CURRENT (case 4); see details
1028 # in phase 2 itself.
1028 # in phase 2 itself.
1029 #
1029 #
1030 # Note the two levels of iteration in each phase.
1030 # Note the two levels of iteration in each phase.
1031 # - The first one handles obsolescence markers using CURRENT as
1031 # - The first one handles obsolescence markers using CURRENT as
1032 # precursor (successors markers of CURRENT).
1032 # precursor (successors markers of CURRENT).
1033 #
1033 #
1034 # Having multiple entry here means divergence.
1034 # Having multiple entry here means divergence.
1035 #
1035 #
1036 # - The second one handles successors defined in each marker.
1036 # - The second one handles successors defined in each marker.
1037 #
1037 #
1038 # Having none means pruned node, multiple successors means split,
1038 # Having none means pruned node, multiple successors means split,
1039 # single successors are standard replacement.
1039 # single successors are standard replacement.
1040 #
1040 #
1041 for mark in sorted(succmarkers[current]):
1041 for mark in sorted(succmarkers[current]):
1042 for suc in mark[1]:
1042 for suc in mark[1]:
1043 if suc not in cache:
1043 if suc not in cache:
1044 if suc in stackedset:
1044 if suc in stackedset:
1045 # cycle breaking
1045 # cycle breaking
1046 cache[suc] = []
1046 cache[suc] = []
1047 else:
1047 else:
1048 # case (3) If we have not computed successors sets
1048 # case (3) If we have not computed successors sets
1049 # of one of those successors we add it to the
1049 # of one of those successors we add it to the
1050 # `toproceed` stack and stop all work for this
1050 # `toproceed` stack and stop all work for this
1051 # iteration.
1051 # iteration.
1052 toproceed.append(suc)
1052 toproceed.append(suc)
1053 stackedset.add(suc)
1053 stackedset.add(suc)
1054 break
1054 break
1055 else:
1055 else:
1056 continue
1056 continue
1057 break
1057 break
1058 else:
1058 else:
1059 # case (4): we know all successors sets of all direct
1059 # case (4): we know all successors sets of all direct
1060 # successors
1060 # successors
1061 #
1061 #
1062 # Successors set contributed by each marker depends on the
1062 # Successors set contributed by each marker depends on the
1063 # successors sets of all its "successors" node.
1063 # successors sets of all its "successors" node.
1064 #
1064 #
1065 # Each different marker is a divergence in the obsolescence
1065 # Each different marker is a divergence in the obsolescence
1066 # history. It contributes successors sets distinct from other
1066 # history. It contributes successors sets distinct from other
1067 # markers.
1067 # markers.
1068 #
1068 #
1069 # Within a marker, a successor may have divergent successors
1069 # Within a marker, a successor may have divergent successors
1070 # sets. In such a case, the marker will contribute multiple
1070 # sets. In such a case, the marker will contribute multiple
1071 # divergent successors sets. If multiple successors have
1071 # divergent successors sets. If multiple successors have
1072 # divergent successors sets, a Cartesian product is used.
1072 # divergent successors sets, a Cartesian product is used.
1073 #
1073 #
1074 # At the end we post-process successors sets to remove
1074 # At the end we post-process successors sets to remove
1075 # duplicated entry and successors set that are strict subset of
1075 # duplicated entry and successors set that are strict subset of
1076 # another one.
1076 # another one.
1077 succssets = []
1077 succssets = []
1078 for mark in sorted(succmarkers[current]):
1078 for mark in sorted(succmarkers[current]):
1079 # successors sets contributed by this marker
1079 # successors sets contributed by this marker
1080 markss = [[]]
1080 markss = [[]]
1081 for suc in mark[1]:
1081 for suc in mark[1]:
1082 # cardinal product with previous successors
1082 # cardinal product with previous successors
1083 productresult = []
1083 productresult = []
1084 for prefix in markss:
1084 for prefix in markss:
1085 for suffix in cache[suc]:
1085 for suffix in cache[suc]:
1086 newss = list(prefix)
1086 newss = list(prefix)
1087 for part in suffix:
1087 for part in suffix:
1088 # do not duplicated entry in successors set
1088 # do not duplicated entry in successors set
1089 # first entry wins.
1089 # first entry wins.
1090 if part not in newss:
1090 if part not in newss:
1091 newss.append(part)
1091 newss.append(part)
1092 productresult.append(newss)
1092 productresult.append(newss)
1093 markss = productresult
1093 markss = productresult
1094 succssets.extend(markss)
1094 succssets.extend(markss)
1095 # remove duplicated and subset
1095 # remove duplicated and subset
1096 seen = []
1096 seen = []
1097 final = []
1097 final = []
1098 candidate = sorted(((set(s), s) for s in succssets if s),
1098 candidate = sorted(((set(s), s) for s in succssets if s),
1099 key=lambda x: len(x[1]), reverse=True)
1099 key=lambda x: len(x[1]), reverse=True)
1100 for setversion, listversion in candidate:
1100 for setversion, listversion in candidate:
1101 for seenset in seen:
1101 for seenset in seen:
1102 if setversion.issubset(seenset):
1102 if setversion.issubset(seenset):
1103 break
1103 break
1104 else:
1104 else:
1105 final.append(listversion)
1105 final.append(listversion)
1106 seen.append(setversion)
1106 seen.append(setversion)
1107 final.reverse() # put small successors set first
1107 final.reverse() # put small successors set first
1108 cache[current] = final
1108 cache[current] = final
1109 return cache[initialnode]
1109 return cache[initialnode]
1110
1110
1111 # mapping of 'set-name' -> <function to compute this set>
1111 # mapping of 'set-name' -> <function to compute this set>
1112 cachefuncs = {}
1112 cachefuncs = {}
1113 def cachefor(name):
1113 def cachefor(name):
1114 """Decorator to register a function as computing the cache for a set"""
1114 """Decorator to register a function as computing the cache for a set"""
1115 def decorator(func):
1115 def decorator(func):
1116 assert name not in cachefuncs
1116 assert name not in cachefuncs
1117 cachefuncs[name] = func
1117 cachefuncs[name] = func
1118 return func
1118 return func
1119 return decorator
1119 return decorator
1120
1120
1121 def getrevs(repo, name):
1121 def getrevs(repo, name):
1122 """Return the set of revision that belong to the <name> set
1122 """Return the set of revision that belong to the <name> set
1123
1123
1124 Such access may compute the set and cache it for future use"""
1124 Such access may compute the set and cache it for future use"""
1125 repo = repo.unfiltered()
1125 repo = repo.unfiltered()
1126 if not repo.obsstore:
1126 if not repo.obsstore:
1127 return frozenset()
1127 return frozenset()
1128 if name not in repo.obsstore.caches:
1128 if name not in repo.obsstore.caches:
1129 repo.obsstore.caches[name] = cachefuncs[name](repo)
1129 repo.obsstore.caches[name] = cachefuncs[name](repo)
1130 return repo.obsstore.caches[name]
1130 return repo.obsstore.caches[name]
1131
1131
1132 # To be simple we need to invalidate obsolescence cache when:
1132 # To be simple we need to invalidate obsolescence cache when:
1133 #
1133 #
1134 # - new changeset is added:
1134 # - new changeset is added:
1135 # - public phase is changed
1135 # - public phase is changed
1136 # - obsolescence marker are added
1136 # - obsolescence marker are added
1137 # - strip is used a repo
1137 # - strip is used a repo
1138 def clearobscaches(repo):
1138 def clearobscaches(repo):
1139 """Remove all obsolescence related cache from a repo
1139 """Remove all obsolescence related cache from a repo
1140
1140
1141 This remove all cache in obsstore is the obsstore already exist on the
1141 This remove all cache in obsstore is the obsstore already exist on the
1142 repo.
1142 repo.
1143
1143
1144 (We could be smarter here given the exact event that trigger the cache
1144 (We could be smarter here given the exact event that trigger the cache
1145 clearing)"""
1145 clearing)"""
1146 # only clear cache is there is obsstore data in this repo
1146 # only clear cache is there is obsstore data in this repo
1147 if 'obsstore' in repo._filecache:
1147 if 'obsstore' in repo._filecache:
1148 repo.obsstore.caches.clear()
1148 repo.obsstore.caches.clear()
1149
1149
1150 @cachefor('obsolete')
1150 @cachefor('obsolete')
1151 def _computeobsoleteset(repo):
1151 def _computeobsoleteset(repo):
1152 """the set of obsolete revisions"""
1152 """the set of obsolete revisions"""
1153 obs = set()
1153 obs = set()
1154 getnode = repo.changelog.node
1154 getnode = repo.changelog.node
1155 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1155 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1156 for r in notpublic:
1156 for r in notpublic:
1157 if getnode(r) in repo.obsstore.successors:
1157 if getnode(r) in repo.obsstore.successors:
1158 obs.add(r)
1158 obs.add(r)
1159 return obs
1159 return obs
1160
1160
1161 @cachefor('unstable')
1161 @cachefor('unstable')
1162 def _computeunstableset(repo):
1162 def _computeunstableset(repo):
1163 """the set of non obsolete revisions with obsolete parents"""
1163 """the set of non obsolete revisions with obsolete parents"""
1164 revs = [(ctx.rev(), ctx) for ctx in
1164 revs = [(ctx.rev(), ctx) for ctx in
1165 repo.set('(not public()) and (not obsolete())')]
1165 repo.set('(not public()) and (not obsolete())')]
1166 revs.sort(key=lambda x:x[0])
1166 revs.sort(key=lambda x:x[0])
1167 unstable = set()
1167 unstable = set()
1168 for rev, ctx in revs:
1168 for rev, ctx in revs:
1169 # A rev is unstable if one of its parent is obsolete or unstable
1169 # A rev is unstable if one of its parent is obsolete or unstable
1170 # this works since we traverse following growing rev order
1170 # this works since we traverse following growing rev order
1171 if any((x.obsolete() or (x.rev() in unstable))
1171 if any((x.obsolete() or (x.rev() in unstable))
1172 for x in ctx.parents()):
1172 for x in ctx.parents()):
1173 unstable.add(rev)
1173 unstable.add(rev)
1174 return unstable
1174 return unstable
1175
1175
1176 @cachefor('suspended')
1176 @cachefor('suspended')
1177 def _computesuspendedset(repo):
1177 def _computesuspendedset(repo):
1178 """the set of obsolete parents with non obsolete descendants"""
1178 """the set of obsolete parents with non obsolete descendants"""
1179 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1179 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1180 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1180 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1181
1181
1182 @cachefor('extinct')
1182 @cachefor('extinct')
1183 def _computeextinctset(repo):
1183 def _computeextinctset(repo):
1184 """the set of obsolete parents without non obsolete descendants"""
1184 """the set of obsolete parents without non obsolete descendants"""
1185 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1185 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1186
1186
1187
1187
1188 @cachefor('bumped')
1188 @cachefor('bumped')
1189 def _computebumpedset(repo):
1189 def _computebumpedset(repo):
1190 """the set of revs trying to obsolete public revisions"""
1190 """the set of revs trying to obsolete public revisions"""
1191 bumped = set()
1191 bumped = set()
1192 # util function (avoid attribute lookup in the loop)
1192 # util function (avoid attribute lookup in the loop)
1193 phase = repo._phasecache.phase # would be faster to grab the full list
1193 phase = repo._phasecache.phase # would be faster to grab the full list
1194 public = phases.public
1194 public = phases.public
1195 cl = repo.changelog
1195 cl = repo.changelog
1196 torev = cl.nodemap.get
1196 torev = cl.nodemap.get
1197 for ctx in repo.set('(not public()) and (not obsolete())'):
1197 for ctx in repo.set('(not public()) and (not obsolete())'):
1198 rev = ctx.rev()
1198 rev = ctx.rev()
1199 # We only evaluate mutable, non-obsolete revision
1199 # We only evaluate mutable, non-obsolete revision
1200 node = ctx.node()
1200 node = ctx.node()
1201 # (future) A cache of precursors may worth if split is very common
1201 # (future) A cache of precursors may worth if split is very common
1202 for pnode in allprecursors(repo.obsstore, [node],
1202 for pnode in allprecursors(repo.obsstore, [node],
1203 ignoreflags=bumpedfix):
1203 ignoreflags=bumpedfix):
1204 prev = torev(pnode) # unfiltered! but so is phasecache
1204 prev = torev(pnode) # unfiltered! but so is phasecache
1205 if (prev is not None) and (phase(repo, prev) <= public):
1205 if (prev is not None) and (phase(repo, prev) <= public):
1206 # we have a public precursor
1206 # we have a public precursor
1207 bumped.add(rev)
1207 bumped.add(rev)
1208 break # Next draft!
1208 break # Next draft!
1209 return bumped
1209 return bumped
1210
1210
1211 @cachefor('divergent')
1211 @cachefor('divergent')
1212 def _computedivergentset(repo):
1212 def _computedivergentset(repo):
1213 """the set of rev that compete to be the final successors of some revision.
1213 """the set of rev that compete to be the final successors of some revision.
1214 """
1214 """
1215 divergent = set()
1215 divergent = set()
1216 obsstore = repo.obsstore
1216 obsstore = repo.obsstore
1217 newermap = {}
1217 newermap = {}
1218 for ctx in repo.set('(not public()) - obsolete()'):
1218 for ctx in repo.set('(not public()) - obsolete()'):
1219 mark = obsstore.precursors.get(ctx.node(), ())
1219 mark = obsstore.precursors.get(ctx.node(), ())
1220 toprocess = set(mark)
1220 toprocess = set(mark)
1221 seen = set()
1221 seen = set()
1222 while toprocess:
1222 while toprocess:
1223 prec = toprocess.pop()[0]
1223 prec = toprocess.pop()[0]
1224 if prec in seen:
1224 if prec in seen:
1225 continue # emergency cycle hanging prevention
1225 continue # emergency cycle hanging prevention
1226 seen.add(prec)
1226 seen.add(prec)
1227 if prec not in newermap:
1227 if prec not in newermap:
1228 successorssets(repo, prec, newermap)
1228 successorssets(repo, prec, newermap)
1229 newer = [n for n in newermap[prec] if n]
1229 newer = [n for n in newermap[prec] if n]
1230 if len(newer) > 1:
1230 if len(newer) > 1:
1231 divergent.add(ctx.rev())
1231 divergent.add(ctx.rev())
1232 break
1232 break
1233 toprocess.update(obsstore.precursors.get(prec, ()))
1233 toprocess.update(obsstore.precursors.get(prec, ()))
1234 return divergent
1234 return divergent
1235
1235
1236
1236
1237 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1237 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1238 operation=None):
1238 operation=None):
1239 """Add obsolete markers between changesets in a repo
1239 """Add obsolete markers between changesets in a repo
1240
1240
1241 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1241 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1242 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1242 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1243 containing metadata for this marker only. It is merged with the global
1243 containing metadata for this marker only. It is merged with the global
1244 metadata specified through the `metadata` argument of this function,
1244 metadata specified through the `metadata` argument of this function,
1245
1245
1246 Trying to obsolete a public changeset will raise an exception.
1246 Trying to obsolete a public changeset will raise an exception.
1247
1247
1248 Current user and date are used except if specified otherwise in the
1248 Current user and date are used except if specified otherwise in the
1249 metadata attribute.
1249 metadata attribute.
1250
1250
1251 This function operates within a transaction of its own, but does
1251 This function operates within a transaction of its own, but does
1252 not take any lock on the repo.
1252 not take any lock on the repo.
1253 """
1253 """
1254 # prepare metadata
1254 # prepare metadata
1255 if metadata is None:
1255 if metadata is None:
1256 metadata = {}
1256 metadata = {}
1257 if 'user' not in metadata:
1257 if 'user' not in metadata:
1258 metadata['user'] = repo.ui.username()
1258 metadata['user'] = repo.ui.username()
1259 useoperation = repo.ui.configbool('experimental',
1259 useoperation = repo.ui.configbool('experimental',
1260 'evolution.track-operation',
1260 'evolution.track-operation',
1261 False)
1261 False)
1262 if useoperation and operation:
1262 if useoperation and operation:
1263 metadata['operation'] = operation
1263 metadata['operation'] = operation
1264 tr = repo.transaction('add-obsolescence-marker')
1264 tr = repo.transaction('add-obsolescence-marker')
1265 try:
1265 try:
1266 markerargs = []
1266 markerargs = []
1267 for rel in relations:
1267 for rel in relations:
1268 prec = rel[0]
1268 prec = rel[0]
1269 sucs = rel[1]
1269 sucs = rel[1]
1270 localmetadata = metadata.copy()
1270 localmetadata = metadata.copy()
1271 if 2 < len(rel):
1271 if 2 < len(rel):
1272 localmetadata.update(rel[2])
1272 localmetadata.update(rel[2])
1273
1273
1274 if not prec.mutable():
1274 if not prec.mutable():
1275 raise error.Abort(_("cannot obsolete public changeset: %s")
1275 raise error.Abort(_("cannot obsolete public changeset: %s")
1276 % prec,
1276 % prec,
1277 hint="see 'hg help phases' for details")
1277 hint="see 'hg help phases' for details")
1278 nprec = prec.node()
1278 nprec = prec.node()
1279 nsucs = tuple(s.node() for s in sucs)
1279 nsucs = tuple(s.node() for s in sucs)
1280 npare = None
1280 npare = None
1281 if not nsucs:
1281 if not nsucs:
1282 npare = tuple(p.node() for p in prec.parents())
1282 npare = tuple(p.node() for p in prec.parents())
1283 if nprec in nsucs:
1283 if nprec in nsucs:
1284 raise error.Abort(_("changeset %s cannot obsolete itself")
1284 raise error.Abort(_("changeset %s cannot obsolete itself")
1285 % prec)
1285 % prec)
1286
1286
1287 # Creating the marker causes the hidden cache to become invalid,
1287 # Creating the marker causes the hidden cache to become invalid,
1288 # which causes recomputation when we ask for prec.parents() above.
1288 # which causes recomputation when we ask for prec.parents() above.
1289 # Resulting in n^2 behavior. So let's prepare all of the args
1289 # Resulting in n^2 behavior. So let's prepare all of the args
1290 # first, then create the markers.
1290 # first, then create the markers.
1291 markerargs.append((nprec, nsucs, npare, localmetadata))
1291 markerargs.append((nprec, nsucs, npare, localmetadata))
1292
1292
1293 for args in markerargs:
1293 for args in markerargs:
1294 nprec, nsucs, npare, localmetadata = args
1294 nprec, nsucs, npare, localmetadata = args
1295 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1295 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1296 date=date, metadata=localmetadata,
1296 date=date, metadata=localmetadata,
1297 ui=repo.ui)
1297 ui=repo.ui)
1298 repo.filteredrevcache.clear()
1298 repo.filteredrevcache.clear()
1299 tr.close()
1299 tr.close()
1300 finally:
1300 finally:
1301 tr.release()
1301 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now