##// END OF EJS Templates
obsolete: raise richer exception on unknown version...
marmoute -
r32591:19df975e default
parent child Browse files
Show More
@@ -1,260 +1,268
1 1 # error.py - Mercurial exceptions
2 2 #
3 3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Mercurial exceptions.
9 9
10 10 This allows us to catch exceptions at higher levels without forcing
11 11 imports.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 # Do not import anything here, please
17 17
18 18 class Hint(object):
19 19 """Mix-in to provide a hint of an error
20 20
21 21 This should come first in the inheritance list to consume a hint and
22 22 pass remaining arguments to the exception class.
23 23 """
24 24 def __init__(self, *args, **kw):
25 25 self.hint = kw.pop(r'hint', None)
26 26 super(Hint, self).__init__(*args, **kw)
27 27
28 28 class RevlogError(Hint, Exception):
29 29 pass
30 30
31 31 class FilteredIndexError(IndexError):
32 32 pass
33 33
34 34 class LookupError(RevlogError, KeyError):
35 35 def __init__(self, name, index, message):
36 36 self.name = name
37 37 self.index = index
38 38 # this can't be called 'message' because at least some installs of
39 39 # Python 2.6+ complain about the 'message' property being deprecated
40 40 self.lookupmessage = message
41 41 if isinstance(name, str) and len(name) == 20:
42 42 from .node import short
43 43 name = short(name)
44 44 RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
45 45
46 46 def __str__(self):
47 47 return RevlogError.__str__(self)
48 48
49 49 class FilteredLookupError(LookupError):
50 50 pass
51 51
52 52 class ManifestLookupError(LookupError):
53 53 pass
54 54
55 55 class CommandError(Exception):
56 56 """Exception raised on errors in parsing the command line."""
57 57
58 58 class InterventionRequired(Hint, Exception):
59 59 """Exception raised when a command requires human intervention."""
60 60
61 61 class Abort(Hint, Exception):
62 62 """Raised if a command needs to print an error and exit."""
63 63
64 64 class HookLoadError(Abort):
65 65 """raised when loading a hook fails, aborting an operation
66 66
67 67 Exists to allow more specialized catching."""
68 68
69 69 class HookAbort(Abort):
70 70 """raised when a validation hook fails, aborting an operation
71 71
72 72 Exists to allow more specialized catching."""
73 73
74 74 class ConfigError(Abort):
75 75 """Exception raised when parsing config files"""
76 76
77 77 class UpdateAbort(Abort):
78 78 """Raised when an update is aborted for destination issue"""
79 79
80 80 class MergeDestAbort(Abort):
81 81 """Raised when an update is aborted for destination issues"""
82 82
83 83 class NoMergeDestAbort(MergeDestAbort):
84 84 """Raised when an update is aborted because there is nothing to merge"""
85 85
86 86 class ManyMergeDestAbort(MergeDestAbort):
87 87 """Raised when an update is aborted because destination is ambiguous"""
88 88
89 89 class ResponseExpected(Abort):
90 90 """Raised when an EOF is received for a prompt"""
91 91 def __init__(self):
92 92 from .i18n import _
93 93 Abort.__init__(self, _('response expected'))
94 94
95 95 class OutOfBandError(Hint, Exception):
96 96 """Exception raised when a remote repo reports failure"""
97 97
98 98 class ParseError(Hint, Exception):
99 99 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
100 100
101 101 class UnknownIdentifier(ParseError):
102 102 """Exception raised when a {rev,file}set references an unknown identifier"""
103 103
104 104 def __init__(self, function, symbols):
105 105 from .i18n import _
106 106 ParseError.__init__(self, _("unknown identifier: %s") % function)
107 107 self.function = function
108 108 self.symbols = symbols
109 109
110 110 class RepoError(Hint, Exception):
111 111 pass
112 112
113 113 class RepoLookupError(RepoError):
114 114 pass
115 115
116 116 class FilteredRepoLookupError(RepoLookupError):
117 117 pass
118 118
119 119 class CapabilityError(RepoError):
120 120 pass
121 121
122 122 class RequirementError(RepoError):
123 123 """Exception raised if .hg/requires has an unknown entry."""
124 124
125 125 class StdioError(IOError):
126 126 """Raised if I/O to stdout or stderr fails"""
127 127
128 128 def __init__(self, err):
129 129 IOError.__init__(self, err.errno, err.strerror)
130 130
131 131 class UnsupportedMergeRecords(Abort):
132 132 def __init__(self, recordtypes):
133 133 from .i18n import _
134 134 self.recordtypes = sorted(recordtypes)
135 135 s = ' '.join(self.recordtypes)
136 136 Abort.__init__(
137 137 self, _('unsupported merge state records: %s') % s,
138 138 hint=_('see https://mercurial-scm.org/wiki/MergeStateRecords for '
139 139 'more information'))
140 140
141 class UnknownVersion(Abort):
142 """generic exception for aborting from an encounter with an unknown version
143 """
144
145 def __init__(self, msg, hint=None, version=None):
146 self.version = version
147 super(UnknownVersion, self).__init__(msg, hint=hint)
148
141 149 class LockError(IOError):
142 150 def __init__(self, errno, strerror, filename, desc):
143 151 IOError.__init__(self, errno, strerror, filename)
144 152 self.desc = desc
145 153
146 154 class LockHeld(LockError):
147 155 def __init__(self, errno, filename, desc, locker):
148 156 LockError.__init__(self, errno, 'Lock held', filename, desc)
149 157 self.locker = locker
150 158
151 159 class LockUnavailable(LockError):
152 160 pass
153 161
154 162 # LockError is for errors while acquiring the lock -- this is unrelated
155 163 class LockInheritanceContractViolation(RuntimeError):
156 164 pass
157 165
158 166 class ResponseError(Exception):
159 167 """Raised to print an error with part of output and exit."""
160 168
161 169 class UnknownCommand(Exception):
162 170 """Exception raised if command is not in the command table."""
163 171
164 172 class AmbiguousCommand(Exception):
165 173 """Exception raised if command shortcut matches more than one command."""
166 174
167 175 # derived from KeyboardInterrupt to simplify some breakout code
168 176 class SignalInterrupt(KeyboardInterrupt):
169 177 """Exception raised on SIGTERM and SIGHUP."""
170 178
171 179 class SignatureError(Exception):
172 180 pass
173 181
174 182 class PushRaced(RuntimeError):
175 183 """An exception raised during unbundling that indicate a push race"""
176 184
177 185 class ProgrammingError(Hint, RuntimeError):
178 186 """Raised if a mercurial (core or extension) developer made a mistake"""
179 187
180 188 class WdirUnsupported(Exception):
181 189 """An exception which is raised when 'wdir()' is not supported"""
182 190
183 191 # bundle2 related errors
184 192 class BundleValueError(ValueError):
185 193 """error raised when bundle2 cannot be processed"""
186 194
187 195 class BundleUnknownFeatureError(BundleValueError):
188 196 def __init__(self, parttype=None, params=(), values=()):
189 197 self.parttype = parttype
190 198 self.params = params
191 199 self.values = values
192 200 if self.parttype is None:
193 201 msg = 'Stream Parameter'
194 202 else:
195 203 msg = parttype
196 204 entries = self.params
197 205 if self.params and self.values:
198 206 assert len(self.params) == len(self.values)
199 207 entries = []
200 208 for idx, par in enumerate(self.params):
201 209 val = self.values[idx]
202 210 if val is None:
203 211 entries.append(val)
204 212 else:
205 213 entries.append("%s=%r" % (par, val))
206 214 if entries:
207 215 msg = '%s - %s' % (msg, ', '.join(entries))
208 216 ValueError.__init__(self, msg)
209 217
210 218 class ReadOnlyPartError(RuntimeError):
211 219 """error raised when code tries to alter a part being generated"""
212 220
213 221 class PushkeyFailed(Abort):
214 222 """error raised when a pushkey part failed to update a value"""
215 223
216 224 def __init__(self, partid, namespace=None, key=None, new=None, old=None,
217 225 ret=None):
218 226 self.partid = partid
219 227 self.namespace = namespace
220 228 self.key = key
221 229 self.new = new
222 230 self.old = old
223 231 self.ret = ret
224 232 # no i18n expected to be processed into a better message
225 233 Abort.__init__(self, 'failed to update value for "%s/%s"'
226 234 % (namespace, key))
227 235
228 236 class CensoredNodeError(RevlogError):
229 237 """error raised when content verification fails on a censored node
230 238
231 239 Also contains the tombstone data substituted for the uncensored data.
232 240 """
233 241
234 242 def __init__(self, filename, node, tombstone):
235 243 from .node import short
236 244 RevlogError.__init__(self, '%s:%s' % (filename, short(node)))
237 245 self.tombstone = tombstone
238 246
239 247 class CensoredBaseError(RevlogError):
240 248 """error raised when a delta is rejected because its base is censored
241 249
242 250 A delta based on a censored revision must be formed as single patch
243 251 operation which replaces the entire base with new content. This ensures
244 252 the delta may be applied by clones which have not censored the base.
245 253 """
246 254
247 255 class InvalidBundleSpecification(Exception):
248 256 """error raised when a bundle specification is invalid.
249 257
250 258 This is used for syntax errors as opposed to support errors.
251 259 """
252 260
253 261 class UnsupportedBundleSpecification(Exception):
254 262 """error raised when a bundle specification is not supported."""
255 263
256 264 class CorruptedState(Exception):
257 265 """error raised when a command is not able to read its state from file"""
258 266
259 267 class PeerTransportError(Abort):
260 268 """Transport-level I/O error when communicating with a peer repo."""
@@ -1,1301 +1,1301
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 phases,
80 80 policy,
81 81 util,
82 82 )
83 83
84 84 parsers = policy.importmod(r'parsers')
85 85
86 86 _pack = struct.pack
87 87 _unpack = struct.unpack
88 88 _calcsize = struct.calcsize
89 89 propertycache = util.propertycache
90 90
91 91 # the obsolete feature is not mature enough to be enabled by default.
92 92 # you have to rely on third party extension extension to enable this.
93 93 _enabled = False
94 94
95 95 # Options for obsolescence
96 96 createmarkersopt = 'createmarkers'
97 97 allowunstableopt = 'allowunstable'
98 98 exchangeopt = 'exchange'
99 99
100 100 def isenabled(repo, option):
101 101 """Returns True if the given repository has the given obsolete option
102 102 enabled.
103 103 """
104 104 result = set(repo.ui.configlist('experimental', 'evolution'))
105 105 if 'all' in result:
106 106 return True
107 107
108 108 # For migration purposes, temporarily return true if the config hasn't been
109 109 # set but _enabled is true.
110 110 if len(result) == 0 and _enabled:
111 111 return True
112 112
113 113 # createmarkers must be enabled if other options are enabled
114 114 if ((allowunstableopt in result or exchangeopt in result) and
115 115 not createmarkersopt in result):
116 116 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 117 "if other obsolete options are enabled"))
118 118
119 119 return option in result
120 120
121 121 ### obsolescence marker flag
122 122
123 123 ## bumpedfix flag
124 124 #
125 125 # When a changeset A' succeed to a changeset A which became public, we call A'
126 126 # "bumped" because it's a successors of a public changesets
127 127 #
128 128 # o A' (bumped)
129 129 # |`:
130 130 # | o A
131 131 # |/
132 132 # o Z
133 133 #
134 134 # The way to solve this situation is to create a new changeset Ad as children
135 135 # of A. This changeset have the same content than A'. So the diff from A to A'
136 136 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 137 #
138 138 # o Ad
139 139 # |`:
140 140 # | x A'
141 141 # |'|
142 142 # o | A
143 143 # |/
144 144 # o Z
145 145 #
146 146 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 147 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 148 # This flag mean that the successors express the changes between the public and
149 149 # bumped version and fix the situation, breaking the transitivity of
150 150 # "bumped" here.
151 151 bumpedfix = 1
152 152 usingsha256 = 2
153 153
154 154 ## Parsing and writing of version "0"
155 155 #
156 156 # The header is followed by the markers. Each marker is made of:
157 157 #
158 158 # - 1 uint8 : number of new changesets "N", can be zero.
159 159 #
160 160 # - 1 uint32: metadata size "M" in bytes.
161 161 #
162 162 # - 1 byte: a bit field. It is reserved for flags used in common
163 163 # obsolete marker operations, to avoid repeated decoding of metadata
164 164 # entries.
165 165 #
166 166 # - 20 bytes: obsoleted changeset identifier.
167 167 #
168 168 # - N*20 bytes: new changesets identifiers.
169 169 #
170 170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 171 # string contains a key and a value, separated by a colon ':', without
172 172 # additional encoding. Keys cannot contain '\0' or ':' and values
173 173 # cannot contain '\0'.
174 174 _fm0version = 0
175 175 _fm0fixed = '>BIB20s'
176 176 _fm0node = '20s'
177 177 _fm0fsize = _calcsize(_fm0fixed)
178 178 _fm0fnodesize = _calcsize(_fm0node)
179 179
180 180 def _fm0readmarkers(data, off):
181 181 # Loop on markers
182 182 l = len(data)
183 183 while off + _fm0fsize <= l:
184 184 # read fixed part
185 185 cur = data[off:off + _fm0fsize]
186 186 off += _fm0fsize
187 187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 188 # read replacement
189 189 sucs = ()
190 190 if numsuc:
191 191 s = (_fm0fnodesize * numsuc)
192 192 cur = data[off:off + s]
193 193 sucs = _unpack(_fm0node * numsuc, cur)
194 194 off += s
195 195 # read metadata
196 196 # (metadata will be decoded on demand)
197 197 metadata = data[off:off + mdsize]
198 198 if len(metadata) != mdsize:
199 199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 200 'short, %d bytes expected, got %d')
201 201 % (mdsize, len(metadata)))
202 202 off += mdsize
203 203 metadata = _fm0decodemeta(metadata)
204 204 try:
205 205 when, offset = metadata.pop('date', '0 0').split(' ')
206 206 date = float(when), int(offset)
207 207 except ValueError:
208 208 date = (0., 0)
209 209 parents = None
210 210 if 'p2' in metadata:
211 211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 212 elif 'p1' in metadata:
213 213 parents = (metadata.pop('p1', None),)
214 214 elif 'p0' in metadata:
215 215 parents = ()
216 216 if parents is not None:
217 217 try:
218 218 parents = tuple(node.bin(p) for p in parents)
219 219 # if parent content is not a nodeid, drop the data
220 220 for p in parents:
221 221 if len(p) != 20:
222 222 parents = None
223 223 break
224 224 except TypeError:
225 225 # if content cannot be translated to nodeid drop the data.
226 226 parents = None
227 227
228 228 metadata = tuple(sorted(metadata.iteritems()))
229 229
230 230 yield (pre, sucs, flags, metadata, date, parents)
231 231
232 232 def _fm0encodeonemarker(marker):
233 233 pre, sucs, flags, metadata, date, parents = marker
234 234 if flags & usingsha256:
235 235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 236 metadata = dict(metadata)
237 237 time, tz = date
238 238 metadata['date'] = '%r %i' % (time, tz)
239 239 if parents is not None:
240 240 if not parents:
241 241 # mark that we explicitly recorded no parents
242 242 metadata['p0'] = ''
243 243 for i, p in enumerate(parents, 1):
244 244 metadata['p%i' % i] = node.hex(p)
245 245 metadata = _fm0encodemeta(metadata)
246 246 numsuc = len(sucs)
247 247 format = _fm0fixed + (_fm0node * numsuc)
248 248 data = [numsuc, len(metadata), flags, pre]
249 249 data.extend(sucs)
250 250 return _pack(format, *data) + metadata
251 251
252 252 def _fm0encodemeta(meta):
253 253 """Return encoded metadata string to string mapping.
254 254
255 255 Assume no ':' in key and no '\0' in both key and value."""
256 256 for key, value in meta.iteritems():
257 257 if ':' in key or '\0' in key:
258 258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 259 if '\0' in value:
260 260 raise ValueError("':' is forbidden in metadata value'")
261 261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262 262
263 263 def _fm0decodemeta(data):
264 264 """Return string to string dictionary from encoded version."""
265 265 d = {}
266 266 for l in data.split('\0'):
267 267 if l:
268 268 key, value = l.split(':')
269 269 d[key] = value
270 270 return d
271 271
272 272 ## Parsing and writing of version "1"
273 273 #
274 274 # The header is followed by the markers. Each marker is made of:
275 275 #
276 276 # - uint32: total size of the marker (including this field)
277 277 #
278 278 # - float64: date in seconds since epoch
279 279 #
280 280 # - int16: timezone offset in minutes
281 281 #
282 282 # - uint16: a bit field. It is reserved for flags used in common
283 283 # obsolete marker operations, to avoid repeated decoding of metadata
284 284 # entries.
285 285 #
286 286 # - uint8: number of successors "N", can be zero.
287 287 #
288 288 # - uint8: number of parents "P", can be zero.
289 289 #
290 290 # 0: parents data stored but no parent,
291 291 # 1: one parent stored,
292 292 # 2: two parents stored,
293 293 # 3: no parent data stored
294 294 #
295 295 # - uint8: number of metadata entries M
296 296 #
297 297 # - 20 or 32 bytes: precursor changeset identifier.
298 298 #
299 299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 300 #
301 301 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 302 #
303 303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 304 #
305 305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 306 _fm1version = 1
307 307 _fm1fixed = '>IdhHBBB20s'
308 308 _fm1nodesha1 = '20s'
309 309 _fm1nodesha256 = '32s'
310 310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 312 _fm1fsize = _calcsize(_fm1fixed)
313 313 _fm1parentnone = 3
314 314 _fm1parentshift = 14
315 315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 316 _fm1metapair = 'BB'
317 317 _fm1metapairsize = _calcsize('BB')
318 318
319 319 def _fm1purereadmarkers(data, off):
320 320 # make some global constants local for performance
321 321 noneflag = _fm1parentnone
322 322 sha2flag = usingsha256
323 323 sha1size = _fm1nodesha1size
324 324 sha2size = _fm1nodesha256size
325 325 sha1fmt = _fm1nodesha1
326 326 sha2fmt = _fm1nodesha256
327 327 metasize = _fm1metapairsize
328 328 metafmt = _fm1metapair
329 329 fsize = _fm1fsize
330 330 unpack = _unpack
331 331
332 332 # Loop on markers
333 333 stop = len(data) - _fm1fsize
334 334 ufixed = struct.Struct(_fm1fixed).unpack
335 335
336 336 while off <= stop:
337 337 # read fixed part
338 338 o1 = off + fsize
339 339 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340 340
341 341 if flags & sha2flag:
342 342 # FIXME: prec was read as a SHA1, needs to be amended
343 343
344 344 # read 0 or more successors
345 345 if numsuc == 1:
346 346 o2 = o1 + sha2size
347 347 sucs = (data[o1:o2],)
348 348 else:
349 349 o2 = o1 + sha2size * numsuc
350 350 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351 351
352 352 # read parents
353 353 if numpar == noneflag:
354 354 o3 = o2
355 355 parents = None
356 356 elif numpar == 1:
357 357 o3 = o2 + sha2size
358 358 parents = (data[o2:o3],)
359 359 else:
360 360 o3 = o2 + sha2size * numpar
361 361 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 362 else:
363 363 # read 0 or more successors
364 364 if numsuc == 1:
365 365 o2 = o1 + sha1size
366 366 sucs = (data[o1:o2],)
367 367 else:
368 368 o2 = o1 + sha1size * numsuc
369 369 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370 370
371 371 # read parents
372 372 if numpar == noneflag:
373 373 o3 = o2
374 374 parents = None
375 375 elif numpar == 1:
376 376 o3 = o2 + sha1size
377 377 parents = (data[o2:o3],)
378 378 else:
379 379 o3 = o2 + sha1size * numpar
380 380 parents = unpack(sha1fmt * numpar, data[o2:o3])
381 381
382 382 # read metadata
383 383 off = o3 + metasize * nummeta
384 384 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 385 metadata = []
386 386 for idx in xrange(0, len(metapairsize), 2):
387 387 o1 = off + metapairsize[idx]
388 388 o2 = o1 + metapairsize[idx + 1]
389 389 metadata.append((data[off:o1], data[o1:o2]))
390 390 off = o2
391 391
392 392 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393 393
394 394 def _fm1encodeonemarker(marker):
395 395 pre, sucs, flags, metadata, date, parents = marker
396 396 # determine node size
397 397 _fm1node = _fm1nodesha1
398 398 if flags & usingsha256:
399 399 _fm1node = _fm1nodesha256
400 400 numsuc = len(sucs)
401 401 numextranodes = numsuc
402 402 if parents is None:
403 403 numpar = _fm1parentnone
404 404 else:
405 405 numpar = len(parents)
406 406 numextranodes += numpar
407 407 formatnodes = _fm1node * numextranodes
408 408 formatmeta = _fm1metapair * len(metadata)
409 409 format = _fm1fixed + formatnodes + formatmeta
410 410 # tz is stored in minutes so we divide by 60
411 411 tz = date[1]//60
412 412 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 413 data.extend(sucs)
414 414 if parents is not None:
415 415 data.extend(parents)
416 416 totalsize = _calcsize(format)
417 417 for key, value in metadata:
418 418 lk = len(key)
419 419 lv = len(value)
420 420 data.append(lk)
421 421 data.append(lv)
422 422 totalsize += lk + lv
423 423 data[0] = totalsize
424 424 data = [_pack(format, *data)]
425 425 for key, value in metadata:
426 426 data.append(key)
427 427 data.append(value)
428 428 return ''.join(data)
429 429
430 430 def _fm1readmarkers(data, off):
431 431 native = getattr(parsers, 'fm1readmarkers', None)
432 432 if not native:
433 433 return _fm1purereadmarkers(data, off)
434 434 stop = len(data) - _fm1fsize
435 435 return native(data, off, stop)
436 436
437 437 # mapping to read/write various marker formats
438 438 # <version> -> (decoder, encoder)
439 439 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
440 440 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
441 441
442 442 @util.nogc
443 443 def _readmarkers(data):
444 444 """Read and enumerate markers from raw data"""
445 445 off = 0
446 446 diskversion = _unpack('>B', data[off:off + 1])[0]
447 447 off += 1
448 448 if diskversion not in formats:
449 raise error.Abort(_('parsing obsolete marker: unknown version %r')
450 % diskversion)
449 msg = _('parsing obsolete marker: unknown version %r') % diskversion
450 raise error.UnknownVersion(msg, version=diskversion)
451 451 return diskversion, formats[diskversion][0](data, off)
452 452
453 453 def encodemarkers(markers, addheader=False, version=_fm0version):
454 454 # Kept separate from flushmarkers(), it will be reused for
455 455 # markers exchange.
456 456 encodeone = formats[version][1]
457 457 if addheader:
458 458 yield _pack('>B', version)
459 459 for marker in markers:
460 460 yield encodeone(marker)
461 461
462 462
463 463 class marker(object):
464 464 """Wrap obsolete marker raw data"""
465 465
466 466 def __init__(self, repo, data):
467 467 # the repo argument will be used to create changectx in later version
468 468 self._repo = repo
469 469 self._data = data
470 470 self._decodedmeta = None
471 471
472 472 def __hash__(self):
473 473 return hash(self._data)
474 474
475 475 def __eq__(self, other):
476 476 if type(other) != type(self):
477 477 return False
478 478 return self._data == other._data
479 479
480 480 def precnode(self):
481 481 """Precursor changeset node identifier"""
482 482 return self._data[0]
483 483
484 484 def succnodes(self):
485 485 """List of successor changesets node identifiers"""
486 486 return self._data[1]
487 487
488 488 def parentnodes(self):
489 489 """Parents of the precursors (None if not recorded)"""
490 490 return self._data[5]
491 491
492 492 def metadata(self):
493 493 """Decoded metadata dictionary"""
494 494 return dict(self._data[3])
495 495
496 496 def date(self):
497 497 """Creation date as (unixtime, offset)"""
498 498 return self._data[4]
499 499
500 500 def flags(self):
501 501 """The flags field of the marker"""
502 502 return self._data[2]
503 503
504 504 @util.nogc
505 505 def _addsuccessors(successors, markers):
506 506 for mark in markers:
507 507 successors.setdefault(mark[0], set()).add(mark)
508 508
509 509 @util.nogc
510 510 def _addprecursors(precursors, markers):
511 511 for mark in markers:
512 512 for suc in mark[1]:
513 513 precursors.setdefault(suc, set()).add(mark)
514 514
515 515 @util.nogc
516 516 def _addchildren(children, markers):
517 517 for mark in markers:
518 518 parents = mark[5]
519 519 if parents is not None:
520 520 for p in parents:
521 521 children.setdefault(p, set()).add(mark)
522 522
523 523 def _checkinvalidmarkers(markers):
524 524 """search for marker with invalid data and raise error if needed
525 525
526 526 Exist as a separated function to allow the evolve extension for a more
527 527 subtle handling.
528 528 """
529 529 for mark in markers:
530 530 if node.nullid in mark[1]:
531 531 raise error.Abort(_('bad obsolescence marker detected: '
532 532 'invalid successors nullid'))
533 533
534 534 class obsstore(object):
535 535 """Store obsolete markers
536 536
537 537 Markers can be accessed with two mappings:
538 538 - precursors[x] -> set(markers on precursors edges of x)
539 539 - successors[x] -> set(markers on successors edges of x)
540 540 - children[x] -> set(markers on precursors edges of children(x)
541 541 """
542 542
543 543 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
544 544 # prec: nodeid, precursor changesets
545 545 # succs: tuple of nodeid, successor changesets (0-N length)
546 546 # flag: integer, flag field carrying modifier for the markers (see doc)
547 547 # meta: binary blob, encoded metadata dictionary
548 548 # date: (float, int) tuple, date of marker creation
549 549 # parents: (tuple of nodeid) or None, parents of precursors
550 550 # None is used when no data has been recorded
551 551
552 552 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
553 553 # caches for various obsolescence related cache
554 554 self.caches = {}
555 555 self.svfs = svfs
556 556 self._version = defaultformat
557 557 self._readonly = readonly
558 558
559 559 def __iter__(self):
560 560 return iter(self._all)
561 561
562 562 def __len__(self):
563 563 return len(self._all)
564 564
565 565 def __nonzero__(self):
566 566 if not self._cached('_all'):
567 567 try:
568 568 return self.svfs.stat('obsstore').st_size > 1
569 569 except OSError as inst:
570 570 if inst.errno != errno.ENOENT:
571 571 raise
572 572 # just build an empty _all list if no obsstore exists, which
573 573 # avoids further stat() syscalls
574 574 pass
575 575 return bool(self._all)
576 576
577 577 __bool__ = __nonzero__
578 578
579 579 @property
580 580 def readonly(self):
581 581 """True if marker creation is disabled
582 582
583 583 Remove me in the future when obsolete marker is always on."""
584 584 return self._readonly
585 585
586 586 def create(self, transaction, prec, succs=(), flag=0, parents=None,
587 587 date=None, metadata=None, ui=None):
588 588 """obsolete: add a new obsolete marker
589 589
590 590 * ensuring it is hashable
591 591 * check mandatory metadata
592 592 * encode metadata
593 593
594 594 If you are a human writing code creating marker you want to use the
595 595 `createmarkers` function in this module instead.
596 596
597 597 return True if a new marker have been added, False if the markers
598 598 already existed (no op).
599 599 """
600 600 if metadata is None:
601 601 metadata = {}
602 602 if date is None:
603 603 if 'date' in metadata:
604 604 # as a courtesy for out-of-tree extensions
605 605 date = util.parsedate(metadata.pop('date'))
606 606 elif ui is not None:
607 607 date = ui.configdate('devel', 'default-date')
608 608 if date is None:
609 609 date = util.makedate()
610 610 else:
611 611 date = util.makedate()
612 612 if len(prec) != 20:
613 613 raise ValueError(prec)
614 614 for succ in succs:
615 615 if len(succ) != 20:
616 616 raise ValueError(succ)
617 617 if prec in succs:
618 618 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
619 619
620 620 metadata = tuple(sorted(metadata.iteritems()))
621 621
622 622 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
623 623 return bool(self.add(transaction, [marker]))
624 624
625 625 def add(self, transaction, markers):
626 626 """Add new markers to the store
627 627
628 628 Take care of filtering duplicate.
629 629 Return the number of new marker."""
630 630 if self._readonly:
631 631 raise error.Abort(_('creating obsolete markers is not enabled on '
632 632 'this repo'))
633 633 known = set(self._all)
634 634 new = []
635 635 for m in markers:
636 636 if m not in known:
637 637 known.add(m)
638 638 new.append(m)
639 639 if new:
640 640 f = self.svfs('obsstore', 'ab')
641 641 try:
642 642 offset = f.tell()
643 643 transaction.add('obsstore', offset)
644 644 # offset == 0: new file - add the version header
645 645 for bytes in encodemarkers(new, offset == 0, self._version):
646 646 f.write(bytes)
647 647 finally:
648 648 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
649 649 # call 'filecacheentry.refresh()' here
650 650 f.close()
651 651 self._addmarkers(new)
652 652 # new marker *may* have changed several set. invalidate the cache.
653 653 self.caches.clear()
654 654 # records the number of new markers for the transaction hooks
655 655 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
656 656 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
657 657 return len(new)
658 658
659 659 def mergemarkers(self, transaction, data):
660 660 """merge a binary stream of markers inside the obsstore
661 661
662 662 Returns the number of new markers added."""
663 663 version, markers = _readmarkers(data)
664 664 return self.add(transaction, markers)
665 665
666 666 @propertycache
667 667 def _all(self):
668 668 data = self.svfs.tryread('obsstore')
669 669 if not data:
670 670 return []
671 671 self._version, markers = _readmarkers(data)
672 672 markers = list(markers)
673 673 _checkinvalidmarkers(markers)
674 674 return markers
675 675
676 676 @propertycache
677 677 def successors(self):
678 678 successors = {}
679 679 _addsuccessors(successors, self._all)
680 680 return successors
681 681
682 682 @propertycache
683 683 def precursors(self):
684 684 precursors = {}
685 685 _addprecursors(precursors, self._all)
686 686 return precursors
687 687
688 688 @propertycache
689 689 def children(self):
690 690 children = {}
691 691 _addchildren(children, self._all)
692 692 return children
693 693
694 694 def _cached(self, attr):
695 695 return attr in self.__dict__
696 696
697 697 def _addmarkers(self, markers):
698 698 markers = list(markers) # to allow repeated iteration
699 699 self._all.extend(markers)
700 700 if self._cached('successors'):
701 701 _addsuccessors(self.successors, markers)
702 702 if self._cached('precursors'):
703 703 _addprecursors(self.precursors, markers)
704 704 if self._cached('children'):
705 705 _addchildren(self.children, markers)
706 706 _checkinvalidmarkers(markers)
707 707
708 708 def relevantmarkers(self, nodes):
709 709 """return a set of all obsolescence markers relevant to a set of nodes.
710 710
711 711 "relevant" to a set of nodes mean:
712 712
713 713 - marker that use this changeset as successor
714 714 - prune marker of direct children on this changeset
715 715 - recursive application of the two rules on precursors of these markers
716 716
717 717 It is a set so you cannot rely on order."""
718 718
719 719 pendingnodes = set(nodes)
720 720 seenmarkers = set()
721 721 seennodes = set(pendingnodes)
722 722 precursorsmarkers = self.precursors
723 723 succsmarkers = self.successors
724 724 children = self.children
725 725 while pendingnodes:
726 726 direct = set()
727 727 for current in pendingnodes:
728 728 direct.update(precursorsmarkers.get(current, ()))
729 729 pruned = [m for m in children.get(current, ()) if not m[1]]
730 730 direct.update(pruned)
731 731 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
732 732 direct.update(pruned)
733 733 direct -= seenmarkers
734 734 pendingnodes = set([m[0] for m in direct])
735 735 seenmarkers |= direct
736 736 pendingnodes -= seennodes
737 737 seennodes |= pendingnodes
738 738 return seenmarkers
739 739
740 740 def commonversion(versions):
741 741 """Return the newest version listed in both versions and our local formats.
742 742
743 743 Returns None if no common version exists.
744 744 """
745 745 versions.sort(reverse=True)
746 746 # search for highest version known on both side
747 747 for v in versions:
748 748 if v in formats:
749 749 return v
750 750 return None
751 751
752 752 # arbitrary picked to fit into 8K limit from HTTP server
753 753 # you have to take in account:
754 754 # - the version header
755 755 # - the base85 encoding
756 756 _maxpayload = 5300
757 757
758 758 def _pushkeyescape(markers):
759 759 """encode markers into a dict suitable for pushkey exchange
760 760
761 761 - binary data is base85 encoded
762 762 - split in chunks smaller than 5300 bytes"""
763 763 keys = {}
764 764 parts = []
765 765 currentlen = _maxpayload * 2 # ensure we create a new part
766 766 for marker in markers:
767 767 nextdata = _fm0encodeonemarker(marker)
768 768 if (len(nextdata) + currentlen > _maxpayload):
769 769 currentpart = []
770 770 currentlen = 0
771 771 parts.append(currentpart)
772 772 currentpart.append(nextdata)
773 773 currentlen += len(nextdata)
774 774 for idx, part in enumerate(reversed(parts)):
775 775 data = ''.join([_pack('>B', _fm0version)] + part)
776 776 keys['dump%i' % idx] = util.b85encode(data)
777 777 return keys
778 778
779 779 def listmarkers(repo):
780 780 """List markers over pushkey"""
781 781 if not repo.obsstore:
782 782 return {}
783 783 return _pushkeyescape(sorted(repo.obsstore))
784 784
785 785 def pushmarker(repo, key, old, new):
786 786 """Push markers over pushkey"""
787 787 if not key.startswith('dump'):
788 788 repo.ui.warn(_('unknown key: %r') % key)
789 789 return 0
790 790 if old:
791 791 repo.ui.warn(_('unexpected old value for %r') % key)
792 792 return 0
793 793 data = util.b85decode(new)
794 794 lock = repo.lock()
795 795 try:
796 796 tr = repo.transaction('pushkey: obsolete markers')
797 797 try:
798 798 repo.obsstore.mergemarkers(tr, data)
799 799 repo.invalidatevolatilesets()
800 800 tr.close()
801 801 return 1
802 802 finally:
803 803 tr.release()
804 804 finally:
805 805 lock.release()
806 806
807 807 def getmarkers(repo, nodes=None):
808 808 """returns markers known in a repository
809 809
810 810 If <nodes> is specified, only markers "relevant" to those nodes are are
811 811 returned"""
812 812 if nodes is None:
813 813 rawmarkers = repo.obsstore
814 814 else:
815 815 rawmarkers = repo.obsstore.relevantmarkers(nodes)
816 816
817 817 for markerdata in rawmarkers:
818 818 yield marker(repo, markerdata)
819 819
820 820 def relevantmarkers(repo, node):
821 821 """all obsolete markers relevant to some revision"""
822 822 for markerdata in repo.obsstore.relevantmarkers(node):
823 823 yield marker(repo, markerdata)
824 824
825 825
826 826 def precursormarkers(ctx):
827 827 """obsolete marker marking this changeset as a successors"""
828 828 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
829 829 yield marker(ctx.repo(), data)
830 830
831 831 def successormarkers(ctx):
832 832 """obsolete marker making this changeset obsolete"""
833 833 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
834 834 yield marker(ctx.repo(), data)
835 835
836 836 def allsuccessors(obsstore, nodes, ignoreflags=0):
837 837 """Yield node for every successor of <nodes>.
838 838
839 839 Some successors may be unknown locally.
840 840
841 841 This is a linear yield unsuited to detecting split changesets. It includes
842 842 initial nodes too."""
843 843 remaining = set(nodes)
844 844 seen = set(remaining)
845 845 while remaining:
846 846 current = remaining.pop()
847 847 yield current
848 848 for mark in obsstore.successors.get(current, ()):
849 849 # ignore marker flagged with specified flag
850 850 if mark[2] & ignoreflags:
851 851 continue
852 852 for suc in mark[1]:
853 853 if suc not in seen:
854 854 seen.add(suc)
855 855 remaining.add(suc)
856 856
857 857 def allprecursors(obsstore, nodes, ignoreflags=0):
858 858 """Yield node for every precursors of <nodes>.
859 859
860 860 Some precursors may be unknown locally.
861 861
862 862 This is a linear yield unsuited to detecting folded changesets. It includes
863 863 initial nodes too."""
864 864
865 865 remaining = set(nodes)
866 866 seen = set(remaining)
867 867 while remaining:
868 868 current = remaining.pop()
869 869 yield current
870 870 for mark in obsstore.precursors.get(current, ()):
871 871 # ignore marker flagged with specified flag
872 872 if mark[2] & ignoreflags:
873 873 continue
874 874 suc = mark[0]
875 875 if suc not in seen:
876 876 seen.add(suc)
877 877 remaining.add(suc)
878 878
879 879 def foreground(repo, nodes):
880 880 """return all nodes in the "foreground" of other node
881 881
882 882 The foreground of a revision is anything reachable using parent -> children
883 883 or precursor -> successor relation. It is very similar to "descendant" but
884 884 augmented with obsolescence information.
885 885
886 886 Beware that possible obsolescence cycle may result if complex situation.
887 887 """
888 888 repo = repo.unfiltered()
889 889 foreground = set(repo.set('%ln::', nodes))
890 890 if repo.obsstore:
891 891 # We only need this complicated logic if there is obsolescence
892 892 # XXX will probably deserve an optimised revset.
893 893 nm = repo.changelog.nodemap
894 894 plen = -1
895 895 # compute the whole set of successors or descendants
896 896 while len(foreground) != plen:
897 897 plen = len(foreground)
898 898 succs = set(c.node() for c in foreground)
899 899 mutable = [c.node() for c in foreground if c.mutable()]
900 900 succs.update(allsuccessors(repo.obsstore, mutable))
901 901 known = (n for n in succs if n in nm)
902 902 foreground = set(repo.set('%ln::', known))
903 903 return set(c.node() for c in foreground)
904 904
905 905
906 906 def successorssets(repo, initialnode, cache=None):
907 907 """Return set of all latest successors of initial nodes
908 908
909 909 The successors set of a changeset A are the group of revisions that succeed
910 910 A. It succeeds A as a consistent whole, each revision being only a partial
911 911 replacement. The successors set contains non-obsolete changesets only.
912 912
913 913 This function returns the full list of successor sets which is why it
914 914 returns a list of tuples and not just a single tuple. Each tuple is a valid
915 915 successors set. Note that (A,) may be a valid successors set for changeset A
916 916 (see below).
917 917
918 918 In most cases, a changeset A will have a single element (e.g. the changeset
919 919 A is replaced by A') in its successors set. Though, it is also common for a
920 920 changeset A to have no elements in its successor set (e.g. the changeset
921 921 has been pruned). Therefore, the returned list of successors sets will be
922 922 [(A',)] or [], respectively.
923 923
924 924 When a changeset A is split into A' and B', however, it will result in a
925 925 successors set containing more than a single element, i.e. [(A',B')].
926 926 Divergent changesets will result in multiple successors sets, i.e. [(A',),
927 927 (A'')].
928 928
929 929 If a changeset A is not obsolete, then it will conceptually have no
930 930 successors set. To distinguish this from a pruned changeset, the successor
931 931 set will contain itself only, i.e. [(A,)].
932 932
933 933 Finally, successors unknown locally are considered to be pruned (obsoleted
934 934 without any successors).
935 935
936 936 The optional `cache` parameter is a dictionary that may contain precomputed
937 937 successors sets. It is meant to reuse the computation of a previous call to
938 938 `successorssets` when multiple calls are made at the same time. The cache
939 939 dictionary is updated in place. The caller is responsible for its life
940 940 span. Code that makes multiple calls to `successorssets` *must* use this
941 941 cache mechanism or suffer terrible performance.
942 942 """
943 943
944 944 succmarkers = repo.obsstore.successors
945 945
946 946 # Stack of nodes we search successors sets for
947 947 toproceed = [initialnode]
948 948 # set version of above list for fast loop detection
949 949 # element added to "toproceed" must be added here
950 950 stackedset = set(toproceed)
951 951 if cache is None:
952 952 cache = {}
953 953
954 954 # This while loop is the flattened version of a recursive search for
955 955 # successors sets
956 956 #
957 957 # def successorssets(x):
958 958 # successors = directsuccessors(x)
959 959 # ss = [[]]
960 960 # for succ in directsuccessors(x):
961 961 # # product as in itertools cartesian product
962 962 # ss = product(ss, successorssets(succ))
963 963 # return ss
964 964 #
965 965 # But we can not use plain recursive calls here:
966 966 # - that would blow the python call stack
967 967 # - obsolescence markers may have cycles, we need to handle them.
968 968 #
969 969 # The `toproceed` list act as our call stack. Every node we search
970 970 # successors set for are stacked there.
971 971 #
972 972 # The `stackedset` is set version of this stack used to check if a node is
973 973 # already stacked. This check is used to detect cycles and prevent infinite
974 974 # loop.
975 975 #
976 976 # successors set of all nodes are stored in the `cache` dictionary.
977 977 #
978 978 # After this while loop ends we use the cache to return the successors sets
979 979 # for the node requested by the caller.
980 980 while toproceed:
981 981 # Every iteration tries to compute the successors sets of the topmost
982 982 # node of the stack: CURRENT.
983 983 #
984 984 # There are four possible outcomes:
985 985 #
986 986 # 1) We already know the successors sets of CURRENT:
987 987 # -> mission accomplished, pop it from the stack.
988 988 # 2) Node is not obsolete:
989 989 # -> the node is its own successors sets. Add it to the cache.
990 990 # 3) We do not know successors set of direct successors of CURRENT:
991 991 # -> We add those successors to the stack.
992 992 # 4) We know successors sets of all direct successors of CURRENT:
993 993 # -> We can compute CURRENT successors set and add it to the
994 994 # cache.
995 995 #
996 996 current = toproceed[-1]
997 997 if current in cache:
998 998 # case (1): We already know the successors sets
999 999 stackedset.remove(toproceed.pop())
1000 1000 elif current not in succmarkers:
1001 1001 # case (2): The node is not obsolete.
1002 1002 if current in repo:
1003 1003 # We have a valid last successors.
1004 1004 cache[current] = [(current,)]
1005 1005 else:
1006 1006 # Final obsolete version is unknown locally.
1007 1007 # Do not count that as a valid successors
1008 1008 cache[current] = []
1009 1009 else:
1010 1010 # cases (3) and (4)
1011 1011 #
1012 1012 # We proceed in two phases. Phase 1 aims to distinguish case (3)
1013 1013 # from case (4):
1014 1014 #
1015 1015 # For each direct successors of CURRENT, we check whether its
1016 1016 # successors sets are known. If they are not, we stack the
1017 1017 # unknown node and proceed to the next iteration of the while
1018 1018 # loop. (case 3)
1019 1019 #
1020 1020 # During this step, we may detect obsolescence cycles: a node
1021 1021 # with unknown successors sets but already in the call stack.
1022 1022 # In such a situation, we arbitrary set the successors sets of
1023 1023 # the node to nothing (node pruned) to break the cycle.
1024 1024 #
1025 1025 # If no break was encountered we proceed to phase 2.
1026 1026 #
1027 1027 # Phase 2 computes successors sets of CURRENT (case 4); see details
1028 1028 # in phase 2 itself.
1029 1029 #
1030 1030 # Note the two levels of iteration in each phase.
1031 1031 # - The first one handles obsolescence markers using CURRENT as
1032 1032 # precursor (successors markers of CURRENT).
1033 1033 #
1034 1034 # Having multiple entry here means divergence.
1035 1035 #
1036 1036 # - The second one handles successors defined in each marker.
1037 1037 #
1038 1038 # Having none means pruned node, multiple successors means split,
1039 1039 # single successors are standard replacement.
1040 1040 #
1041 1041 for mark in sorted(succmarkers[current]):
1042 1042 for suc in mark[1]:
1043 1043 if suc not in cache:
1044 1044 if suc in stackedset:
1045 1045 # cycle breaking
1046 1046 cache[suc] = []
1047 1047 else:
1048 1048 # case (3) If we have not computed successors sets
1049 1049 # of one of those successors we add it to the
1050 1050 # `toproceed` stack and stop all work for this
1051 1051 # iteration.
1052 1052 toproceed.append(suc)
1053 1053 stackedset.add(suc)
1054 1054 break
1055 1055 else:
1056 1056 continue
1057 1057 break
1058 1058 else:
1059 1059 # case (4): we know all successors sets of all direct
1060 1060 # successors
1061 1061 #
1062 1062 # Successors set contributed by each marker depends on the
1063 1063 # successors sets of all its "successors" node.
1064 1064 #
1065 1065 # Each different marker is a divergence in the obsolescence
1066 1066 # history. It contributes successors sets distinct from other
1067 1067 # markers.
1068 1068 #
1069 1069 # Within a marker, a successor may have divergent successors
1070 1070 # sets. In such a case, the marker will contribute multiple
1071 1071 # divergent successors sets. If multiple successors have
1072 1072 # divergent successors sets, a Cartesian product is used.
1073 1073 #
1074 1074 # At the end we post-process successors sets to remove
1075 1075 # duplicated entry and successors set that are strict subset of
1076 1076 # another one.
1077 1077 succssets = []
1078 1078 for mark in sorted(succmarkers[current]):
1079 1079 # successors sets contributed by this marker
1080 1080 markss = [[]]
1081 1081 for suc in mark[1]:
1082 1082 # cardinal product with previous successors
1083 1083 productresult = []
1084 1084 for prefix in markss:
1085 1085 for suffix in cache[suc]:
1086 1086 newss = list(prefix)
1087 1087 for part in suffix:
1088 1088 # do not duplicated entry in successors set
1089 1089 # first entry wins.
1090 1090 if part not in newss:
1091 1091 newss.append(part)
1092 1092 productresult.append(newss)
1093 1093 markss = productresult
1094 1094 succssets.extend(markss)
1095 1095 # remove duplicated and subset
1096 1096 seen = []
1097 1097 final = []
1098 1098 candidate = sorted(((set(s), s) for s in succssets if s),
1099 1099 key=lambda x: len(x[1]), reverse=True)
1100 1100 for setversion, listversion in candidate:
1101 1101 for seenset in seen:
1102 1102 if setversion.issubset(seenset):
1103 1103 break
1104 1104 else:
1105 1105 final.append(listversion)
1106 1106 seen.append(setversion)
1107 1107 final.reverse() # put small successors set first
1108 1108 cache[current] = final
1109 1109 return cache[initialnode]
1110 1110
1111 1111 # mapping of 'set-name' -> <function to compute this set>
1112 1112 cachefuncs = {}
1113 1113 def cachefor(name):
1114 1114 """Decorator to register a function as computing the cache for a set"""
1115 1115 def decorator(func):
1116 1116 assert name not in cachefuncs
1117 1117 cachefuncs[name] = func
1118 1118 return func
1119 1119 return decorator
1120 1120
1121 1121 def getrevs(repo, name):
1122 1122 """Return the set of revision that belong to the <name> set
1123 1123
1124 1124 Such access may compute the set and cache it for future use"""
1125 1125 repo = repo.unfiltered()
1126 1126 if not repo.obsstore:
1127 1127 return frozenset()
1128 1128 if name not in repo.obsstore.caches:
1129 1129 repo.obsstore.caches[name] = cachefuncs[name](repo)
1130 1130 return repo.obsstore.caches[name]
1131 1131
1132 1132 # To be simple we need to invalidate obsolescence cache when:
1133 1133 #
1134 1134 # - new changeset is added:
1135 1135 # - public phase is changed
1136 1136 # - obsolescence marker are added
1137 1137 # - strip is used a repo
1138 1138 def clearobscaches(repo):
1139 1139 """Remove all obsolescence related cache from a repo
1140 1140
1141 1141 This remove all cache in obsstore is the obsstore already exist on the
1142 1142 repo.
1143 1143
1144 1144 (We could be smarter here given the exact event that trigger the cache
1145 1145 clearing)"""
1146 1146 # only clear cache is there is obsstore data in this repo
1147 1147 if 'obsstore' in repo._filecache:
1148 1148 repo.obsstore.caches.clear()
1149 1149
1150 1150 @cachefor('obsolete')
1151 1151 def _computeobsoleteset(repo):
1152 1152 """the set of obsolete revisions"""
1153 1153 obs = set()
1154 1154 getnode = repo.changelog.node
1155 1155 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1156 1156 for r in notpublic:
1157 1157 if getnode(r) in repo.obsstore.successors:
1158 1158 obs.add(r)
1159 1159 return obs
1160 1160
1161 1161 @cachefor('unstable')
1162 1162 def _computeunstableset(repo):
1163 1163 """the set of non obsolete revisions with obsolete parents"""
1164 1164 revs = [(ctx.rev(), ctx) for ctx in
1165 1165 repo.set('(not public()) and (not obsolete())')]
1166 1166 revs.sort(key=lambda x:x[0])
1167 1167 unstable = set()
1168 1168 for rev, ctx in revs:
1169 1169 # A rev is unstable if one of its parent is obsolete or unstable
1170 1170 # this works since we traverse following growing rev order
1171 1171 if any((x.obsolete() or (x.rev() in unstable))
1172 1172 for x in ctx.parents()):
1173 1173 unstable.add(rev)
1174 1174 return unstable
1175 1175
1176 1176 @cachefor('suspended')
1177 1177 def _computesuspendedset(repo):
1178 1178 """the set of obsolete parents with non obsolete descendants"""
1179 1179 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1180 1180 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1181 1181
1182 1182 @cachefor('extinct')
1183 1183 def _computeextinctset(repo):
1184 1184 """the set of obsolete parents without non obsolete descendants"""
1185 1185 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1186 1186
1187 1187
1188 1188 @cachefor('bumped')
1189 1189 def _computebumpedset(repo):
1190 1190 """the set of revs trying to obsolete public revisions"""
1191 1191 bumped = set()
1192 1192 # util function (avoid attribute lookup in the loop)
1193 1193 phase = repo._phasecache.phase # would be faster to grab the full list
1194 1194 public = phases.public
1195 1195 cl = repo.changelog
1196 1196 torev = cl.nodemap.get
1197 1197 for ctx in repo.set('(not public()) and (not obsolete())'):
1198 1198 rev = ctx.rev()
1199 1199 # We only evaluate mutable, non-obsolete revision
1200 1200 node = ctx.node()
1201 1201 # (future) A cache of precursors may worth if split is very common
1202 1202 for pnode in allprecursors(repo.obsstore, [node],
1203 1203 ignoreflags=bumpedfix):
1204 1204 prev = torev(pnode) # unfiltered! but so is phasecache
1205 1205 if (prev is not None) and (phase(repo, prev) <= public):
1206 1206 # we have a public precursor
1207 1207 bumped.add(rev)
1208 1208 break # Next draft!
1209 1209 return bumped
1210 1210
1211 1211 @cachefor('divergent')
1212 1212 def _computedivergentset(repo):
1213 1213 """the set of rev that compete to be the final successors of some revision.
1214 1214 """
1215 1215 divergent = set()
1216 1216 obsstore = repo.obsstore
1217 1217 newermap = {}
1218 1218 for ctx in repo.set('(not public()) - obsolete()'):
1219 1219 mark = obsstore.precursors.get(ctx.node(), ())
1220 1220 toprocess = set(mark)
1221 1221 seen = set()
1222 1222 while toprocess:
1223 1223 prec = toprocess.pop()[0]
1224 1224 if prec in seen:
1225 1225 continue # emergency cycle hanging prevention
1226 1226 seen.add(prec)
1227 1227 if prec not in newermap:
1228 1228 successorssets(repo, prec, newermap)
1229 1229 newer = [n for n in newermap[prec] if n]
1230 1230 if len(newer) > 1:
1231 1231 divergent.add(ctx.rev())
1232 1232 break
1233 1233 toprocess.update(obsstore.precursors.get(prec, ()))
1234 1234 return divergent
1235 1235
1236 1236
1237 1237 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1238 1238 operation=None):
1239 1239 """Add obsolete markers between changesets in a repo
1240 1240
1241 1241 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1242 1242 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1243 1243 containing metadata for this marker only. It is merged with the global
1244 1244 metadata specified through the `metadata` argument of this function,
1245 1245
1246 1246 Trying to obsolete a public changeset will raise an exception.
1247 1247
1248 1248 Current user and date are used except if specified otherwise in the
1249 1249 metadata attribute.
1250 1250
1251 1251 This function operates within a transaction of its own, but does
1252 1252 not take any lock on the repo.
1253 1253 """
1254 1254 # prepare metadata
1255 1255 if metadata is None:
1256 1256 metadata = {}
1257 1257 if 'user' not in metadata:
1258 1258 metadata['user'] = repo.ui.username()
1259 1259 useoperation = repo.ui.configbool('experimental',
1260 1260 'evolution.track-operation',
1261 1261 False)
1262 1262 if useoperation and operation:
1263 1263 metadata['operation'] = operation
1264 1264 tr = repo.transaction('add-obsolescence-marker')
1265 1265 try:
1266 1266 markerargs = []
1267 1267 for rel in relations:
1268 1268 prec = rel[0]
1269 1269 sucs = rel[1]
1270 1270 localmetadata = metadata.copy()
1271 1271 if 2 < len(rel):
1272 1272 localmetadata.update(rel[2])
1273 1273
1274 1274 if not prec.mutable():
1275 1275 raise error.Abort(_("cannot obsolete public changeset: %s")
1276 1276 % prec,
1277 1277 hint="see 'hg help phases' for details")
1278 1278 nprec = prec.node()
1279 1279 nsucs = tuple(s.node() for s in sucs)
1280 1280 npare = None
1281 1281 if not nsucs:
1282 1282 npare = tuple(p.node() for p in prec.parents())
1283 1283 if nprec in nsucs:
1284 1284 raise error.Abort(_("changeset %s cannot obsolete itself")
1285 1285 % prec)
1286 1286
1287 1287 # Creating the marker causes the hidden cache to become invalid,
1288 1288 # which causes recomputation when we ask for prec.parents() above.
1289 1289 # Resulting in n^2 behavior. So let's prepare all of the args
1290 1290 # first, then create the markers.
1291 1291 markerargs.append((nprec, nsucs, npare, localmetadata))
1292 1292
1293 1293 for args in markerargs:
1294 1294 nprec, nsucs, npare, localmetadata = args
1295 1295 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1296 1296 date=date, metadata=localmetadata,
1297 1297 ui=repo.ui)
1298 1298 repo.filteredrevcache.clear()
1299 1299 tr.close()
1300 1300 finally:
1301 1301 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now