##// END OF EJS Templates
obsolete: move the 'isenabled' function at the top of the file...
marmoute -
r32333:566cfe9c default
parent child Browse files
Show More
@@ -1,1287 +1,1287 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 parsers,
79 parsers,
80 phases,
80 phases,
81 util,
81 util,
82 )
82 )
83
83
84 _pack = struct.pack
84 _pack = struct.pack
85 _unpack = struct.unpack
85 _unpack = struct.unpack
86 _calcsize = struct.calcsize
86 _calcsize = struct.calcsize
87 propertycache = util.propertycache
87 propertycache = util.propertycache
88
88
89 # the obsolete feature is not mature enough to be enabled by default.
89 # the obsolete feature is not mature enough to be enabled by default.
90 # you have to rely on third party extension extension to enable this.
90 # you have to rely on third party extension extension to enable this.
91 _enabled = False
91 _enabled = False
92
92
93 # Options for obsolescence
93 # Options for obsolescence
94 createmarkersopt = 'createmarkers'
94 createmarkersopt = 'createmarkers'
95 allowunstableopt = 'allowunstable'
95 allowunstableopt = 'allowunstable'
96 exchangeopt = 'exchange'
96 exchangeopt = 'exchange'
97
97
98 def isenabled(repo, option):
99 """Returns True if the given repository has the given obsolete option
100 enabled.
101 """
102 result = set(repo.ui.configlist('experimental', 'evolution'))
103 if 'all' in result:
104 return True
105
106 # For migration purposes, temporarily return true if the config hasn't been
107 # set but _enabled is true.
108 if len(result) == 0 and _enabled:
109 return True
110
111 # createmarkers must be enabled if other options are enabled
112 if ((allowunstableopt in result or exchangeopt in result) and
113 not createmarkersopt in result):
114 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
115 "if other obsolete options are enabled"))
116
117 return option in result
118
98 ### obsolescence marker flag
119 ### obsolescence marker flag
99
120
100 ## bumpedfix flag
121 ## bumpedfix flag
101 #
122 #
102 # When a changeset A' succeed to a changeset A which became public, we call A'
123 # When a changeset A' succeed to a changeset A which became public, we call A'
103 # "bumped" because it's a successors of a public changesets
124 # "bumped" because it's a successors of a public changesets
104 #
125 #
105 # o A' (bumped)
126 # o A' (bumped)
106 # |`:
127 # |`:
107 # | o A
128 # | o A
108 # |/
129 # |/
109 # o Z
130 # o Z
110 #
131 #
111 # The way to solve this situation is to create a new changeset Ad as children
132 # The way to solve this situation is to create a new changeset Ad as children
112 # of A. This changeset have the same content than A'. So the diff from A to A'
133 # of A. This changeset have the same content than A'. So the diff from A to A'
113 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
134 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
114 #
135 #
115 # o Ad
136 # o Ad
116 # |`:
137 # |`:
117 # | x A'
138 # | x A'
118 # |'|
139 # |'|
119 # o | A
140 # o | A
120 # |/
141 # |/
121 # o Z
142 # o Z
122 #
143 #
123 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
144 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
124 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
145 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
125 # This flag mean that the successors express the changes between the public and
146 # This flag mean that the successors express the changes between the public and
126 # bumped version and fix the situation, breaking the transitivity of
147 # bumped version and fix the situation, breaking the transitivity of
127 # "bumped" here.
148 # "bumped" here.
128 bumpedfix = 1
149 bumpedfix = 1
129 usingsha256 = 2
150 usingsha256 = 2
130
151
131 ## Parsing and writing of version "0"
152 ## Parsing and writing of version "0"
132 #
153 #
133 # The header is followed by the markers. Each marker is made of:
154 # The header is followed by the markers. Each marker is made of:
134 #
155 #
135 # - 1 uint8 : number of new changesets "N", can be zero.
156 # - 1 uint8 : number of new changesets "N", can be zero.
136 #
157 #
137 # - 1 uint32: metadata size "M" in bytes.
158 # - 1 uint32: metadata size "M" in bytes.
138 #
159 #
139 # - 1 byte: a bit field. It is reserved for flags used in common
160 # - 1 byte: a bit field. It is reserved for flags used in common
140 # obsolete marker operations, to avoid repeated decoding of metadata
161 # obsolete marker operations, to avoid repeated decoding of metadata
141 # entries.
162 # entries.
142 #
163 #
143 # - 20 bytes: obsoleted changeset identifier.
164 # - 20 bytes: obsoleted changeset identifier.
144 #
165 #
145 # - N*20 bytes: new changesets identifiers.
166 # - N*20 bytes: new changesets identifiers.
146 #
167 #
147 # - M bytes: metadata as a sequence of nul-terminated strings. Each
168 # - M bytes: metadata as a sequence of nul-terminated strings. Each
148 # string contains a key and a value, separated by a colon ':', without
169 # string contains a key and a value, separated by a colon ':', without
149 # additional encoding. Keys cannot contain '\0' or ':' and values
170 # additional encoding. Keys cannot contain '\0' or ':' and values
150 # cannot contain '\0'.
171 # cannot contain '\0'.
151 _fm0version = 0
172 _fm0version = 0
152 _fm0fixed = '>BIB20s'
173 _fm0fixed = '>BIB20s'
153 _fm0node = '20s'
174 _fm0node = '20s'
154 _fm0fsize = _calcsize(_fm0fixed)
175 _fm0fsize = _calcsize(_fm0fixed)
155 _fm0fnodesize = _calcsize(_fm0node)
176 _fm0fnodesize = _calcsize(_fm0node)
156
177
157 def _fm0readmarkers(data, off):
178 def _fm0readmarkers(data, off):
158 # Loop on markers
179 # Loop on markers
159 l = len(data)
180 l = len(data)
160 while off + _fm0fsize <= l:
181 while off + _fm0fsize <= l:
161 # read fixed part
182 # read fixed part
162 cur = data[off:off + _fm0fsize]
183 cur = data[off:off + _fm0fsize]
163 off += _fm0fsize
184 off += _fm0fsize
164 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
185 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
165 # read replacement
186 # read replacement
166 sucs = ()
187 sucs = ()
167 if numsuc:
188 if numsuc:
168 s = (_fm0fnodesize * numsuc)
189 s = (_fm0fnodesize * numsuc)
169 cur = data[off:off + s]
190 cur = data[off:off + s]
170 sucs = _unpack(_fm0node * numsuc, cur)
191 sucs = _unpack(_fm0node * numsuc, cur)
171 off += s
192 off += s
172 # read metadata
193 # read metadata
173 # (metadata will be decoded on demand)
194 # (metadata will be decoded on demand)
174 metadata = data[off:off + mdsize]
195 metadata = data[off:off + mdsize]
175 if len(metadata) != mdsize:
196 if len(metadata) != mdsize:
176 raise error.Abort(_('parsing obsolete marker: metadata is too '
197 raise error.Abort(_('parsing obsolete marker: metadata is too '
177 'short, %d bytes expected, got %d')
198 'short, %d bytes expected, got %d')
178 % (mdsize, len(metadata)))
199 % (mdsize, len(metadata)))
179 off += mdsize
200 off += mdsize
180 metadata = _fm0decodemeta(metadata)
201 metadata = _fm0decodemeta(metadata)
181 try:
202 try:
182 when, offset = metadata.pop('date', '0 0').split(' ')
203 when, offset = metadata.pop('date', '0 0').split(' ')
183 date = float(when), int(offset)
204 date = float(when), int(offset)
184 except ValueError:
205 except ValueError:
185 date = (0., 0)
206 date = (0., 0)
186 parents = None
207 parents = None
187 if 'p2' in metadata:
208 if 'p2' in metadata:
188 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
209 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
189 elif 'p1' in metadata:
210 elif 'p1' in metadata:
190 parents = (metadata.pop('p1', None),)
211 parents = (metadata.pop('p1', None),)
191 elif 'p0' in metadata:
212 elif 'p0' in metadata:
192 parents = ()
213 parents = ()
193 if parents is not None:
214 if parents is not None:
194 try:
215 try:
195 parents = tuple(node.bin(p) for p in parents)
216 parents = tuple(node.bin(p) for p in parents)
196 # if parent content is not a nodeid, drop the data
217 # if parent content is not a nodeid, drop the data
197 for p in parents:
218 for p in parents:
198 if len(p) != 20:
219 if len(p) != 20:
199 parents = None
220 parents = None
200 break
221 break
201 except TypeError:
222 except TypeError:
202 # if content cannot be translated to nodeid drop the data.
223 # if content cannot be translated to nodeid drop the data.
203 parents = None
224 parents = None
204
225
205 metadata = tuple(sorted(metadata.iteritems()))
226 metadata = tuple(sorted(metadata.iteritems()))
206
227
207 yield (pre, sucs, flags, metadata, date, parents)
228 yield (pre, sucs, flags, metadata, date, parents)
208
229
209 def _fm0encodeonemarker(marker):
230 def _fm0encodeonemarker(marker):
210 pre, sucs, flags, metadata, date, parents = marker
231 pre, sucs, flags, metadata, date, parents = marker
211 if flags & usingsha256:
232 if flags & usingsha256:
212 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
233 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
213 metadata = dict(metadata)
234 metadata = dict(metadata)
214 time, tz = date
235 time, tz = date
215 metadata['date'] = '%r %i' % (time, tz)
236 metadata['date'] = '%r %i' % (time, tz)
216 if parents is not None:
237 if parents is not None:
217 if not parents:
238 if not parents:
218 # mark that we explicitly recorded no parents
239 # mark that we explicitly recorded no parents
219 metadata['p0'] = ''
240 metadata['p0'] = ''
220 for i, p in enumerate(parents, 1):
241 for i, p in enumerate(parents, 1):
221 metadata['p%i' % i] = node.hex(p)
242 metadata['p%i' % i] = node.hex(p)
222 metadata = _fm0encodemeta(metadata)
243 metadata = _fm0encodemeta(metadata)
223 numsuc = len(sucs)
244 numsuc = len(sucs)
224 format = _fm0fixed + (_fm0node * numsuc)
245 format = _fm0fixed + (_fm0node * numsuc)
225 data = [numsuc, len(metadata), flags, pre]
246 data = [numsuc, len(metadata), flags, pre]
226 data.extend(sucs)
247 data.extend(sucs)
227 return _pack(format, *data) + metadata
248 return _pack(format, *data) + metadata
228
249
229 def _fm0encodemeta(meta):
250 def _fm0encodemeta(meta):
230 """Return encoded metadata string to string mapping.
251 """Return encoded metadata string to string mapping.
231
252
232 Assume no ':' in key and no '\0' in both key and value."""
253 Assume no ':' in key and no '\0' in both key and value."""
233 for key, value in meta.iteritems():
254 for key, value in meta.iteritems():
234 if ':' in key or '\0' in key:
255 if ':' in key or '\0' in key:
235 raise ValueError("':' and '\0' are forbidden in metadata key'")
256 raise ValueError("':' and '\0' are forbidden in metadata key'")
236 if '\0' in value:
257 if '\0' in value:
237 raise ValueError("':' is forbidden in metadata value'")
258 raise ValueError("':' is forbidden in metadata value'")
238 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
259 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
239
260
240 def _fm0decodemeta(data):
261 def _fm0decodemeta(data):
241 """Return string to string dictionary from encoded version."""
262 """Return string to string dictionary from encoded version."""
242 d = {}
263 d = {}
243 for l in data.split('\0'):
264 for l in data.split('\0'):
244 if l:
265 if l:
245 key, value = l.split(':')
266 key, value = l.split(':')
246 d[key] = value
267 d[key] = value
247 return d
268 return d
248
269
249 ## Parsing and writing of version "1"
270 ## Parsing and writing of version "1"
250 #
271 #
251 # The header is followed by the markers. Each marker is made of:
272 # The header is followed by the markers. Each marker is made of:
252 #
273 #
253 # - uint32: total size of the marker (including this field)
274 # - uint32: total size of the marker (including this field)
254 #
275 #
255 # - float64: date in seconds since epoch
276 # - float64: date in seconds since epoch
256 #
277 #
257 # - int16: timezone offset in minutes
278 # - int16: timezone offset in minutes
258 #
279 #
259 # - uint16: a bit field. It is reserved for flags used in common
280 # - uint16: a bit field. It is reserved for flags used in common
260 # obsolete marker operations, to avoid repeated decoding of metadata
281 # obsolete marker operations, to avoid repeated decoding of metadata
261 # entries.
282 # entries.
262 #
283 #
263 # - uint8: number of successors "N", can be zero.
284 # - uint8: number of successors "N", can be zero.
264 #
285 #
265 # - uint8: number of parents "P", can be zero.
286 # - uint8: number of parents "P", can be zero.
266 #
287 #
267 # 0: parents data stored but no parent,
288 # 0: parents data stored but no parent,
268 # 1: one parent stored,
289 # 1: one parent stored,
269 # 2: two parents stored,
290 # 2: two parents stored,
270 # 3: no parent data stored
291 # 3: no parent data stored
271 #
292 #
272 # - uint8: number of metadata entries M
293 # - uint8: number of metadata entries M
273 #
294 #
274 # - 20 or 32 bytes: precursor changeset identifier.
295 # - 20 or 32 bytes: precursor changeset identifier.
275 #
296 #
276 # - N*(20 or 32) bytes: successors changesets identifiers.
297 # - N*(20 or 32) bytes: successors changesets identifiers.
277 #
298 #
278 # - P*(20 or 32) bytes: parents of the precursors changesets.
299 # - P*(20 or 32) bytes: parents of the precursors changesets.
279 #
300 #
280 # - M*(uint8, uint8): size of all metadata entries (key and value)
301 # - M*(uint8, uint8): size of all metadata entries (key and value)
281 #
302 #
282 # - remaining bytes: the metadata, each (key, value) pair after the other.
303 # - remaining bytes: the metadata, each (key, value) pair after the other.
283 _fm1version = 1
304 _fm1version = 1
284 _fm1fixed = '>IdhHBBB20s'
305 _fm1fixed = '>IdhHBBB20s'
285 _fm1nodesha1 = '20s'
306 _fm1nodesha1 = '20s'
286 _fm1nodesha256 = '32s'
307 _fm1nodesha256 = '32s'
287 _fm1nodesha1size = _calcsize(_fm1nodesha1)
308 _fm1nodesha1size = _calcsize(_fm1nodesha1)
288 _fm1nodesha256size = _calcsize(_fm1nodesha256)
309 _fm1nodesha256size = _calcsize(_fm1nodesha256)
289 _fm1fsize = _calcsize(_fm1fixed)
310 _fm1fsize = _calcsize(_fm1fixed)
290 _fm1parentnone = 3
311 _fm1parentnone = 3
291 _fm1parentshift = 14
312 _fm1parentshift = 14
292 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
313 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
293 _fm1metapair = 'BB'
314 _fm1metapair = 'BB'
294 _fm1metapairsize = _calcsize('BB')
315 _fm1metapairsize = _calcsize('BB')
295
316
296 def _fm1purereadmarkers(data, off):
317 def _fm1purereadmarkers(data, off):
297 # make some global constants local for performance
318 # make some global constants local for performance
298 noneflag = _fm1parentnone
319 noneflag = _fm1parentnone
299 sha2flag = usingsha256
320 sha2flag = usingsha256
300 sha1size = _fm1nodesha1size
321 sha1size = _fm1nodesha1size
301 sha2size = _fm1nodesha256size
322 sha2size = _fm1nodesha256size
302 sha1fmt = _fm1nodesha1
323 sha1fmt = _fm1nodesha1
303 sha2fmt = _fm1nodesha256
324 sha2fmt = _fm1nodesha256
304 metasize = _fm1metapairsize
325 metasize = _fm1metapairsize
305 metafmt = _fm1metapair
326 metafmt = _fm1metapair
306 fsize = _fm1fsize
327 fsize = _fm1fsize
307 unpack = _unpack
328 unpack = _unpack
308
329
309 # Loop on markers
330 # Loop on markers
310 stop = len(data) - _fm1fsize
331 stop = len(data) - _fm1fsize
311 ufixed = struct.Struct(_fm1fixed).unpack
332 ufixed = struct.Struct(_fm1fixed).unpack
312
333
313 while off <= stop:
334 while off <= stop:
314 # read fixed part
335 # read fixed part
315 o1 = off + fsize
336 o1 = off + fsize
316 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
337 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
317
338
318 if flags & sha2flag:
339 if flags & sha2flag:
319 # FIXME: prec was read as a SHA1, needs to be amended
340 # FIXME: prec was read as a SHA1, needs to be amended
320
341
321 # read 0 or more successors
342 # read 0 or more successors
322 if numsuc == 1:
343 if numsuc == 1:
323 o2 = o1 + sha2size
344 o2 = o1 + sha2size
324 sucs = (data[o1:o2],)
345 sucs = (data[o1:o2],)
325 else:
346 else:
326 o2 = o1 + sha2size * numsuc
347 o2 = o1 + sha2size * numsuc
327 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
348 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
328
349
329 # read parents
350 # read parents
330 if numpar == noneflag:
351 if numpar == noneflag:
331 o3 = o2
352 o3 = o2
332 parents = None
353 parents = None
333 elif numpar == 1:
354 elif numpar == 1:
334 o3 = o2 + sha2size
355 o3 = o2 + sha2size
335 parents = (data[o2:o3],)
356 parents = (data[o2:o3],)
336 else:
357 else:
337 o3 = o2 + sha2size * numpar
358 o3 = o2 + sha2size * numpar
338 parents = unpack(sha2fmt * numpar, data[o2:o3])
359 parents = unpack(sha2fmt * numpar, data[o2:o3])
339 else:
360 else:
340 # read 0 or more successors
361 # read 0 or more successors
341 if numsuc == 1:
362 if numsuc == 1:
342 o2 = o1 + sha1size
363 o2 = o1 + sha1size
343 sucs = (data[o1:o2],)
364 sucs = (data[o1:o2],)
344 else:
365 else:
345 o2 = o1 + sha1size * numsuc
366 o2 = o1 + sha1size * numsuc
346 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
367 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
347
368
348 # read parents
369 # read parents
349 if numpar == noneflag:
370 if numpar == noneflag:
350 o3 = o2
371 o3 = o2
351 parents = None
372 parents = None
352 elif numpar == 1:
373 elif numpar == 1:
353 o3 = o2 + sha1size
374 o3 = o2 + sha1size
354 parents = (data[o2:o3],)
375 parents = (data[o2:o3],)
355 else:
376 else:
356 o3 = o2 + sha1size * numpar
377 o3 = o2 + sha1size * numpar
357 parents = unpack(sha1fmt * numpar, data[o2:o3])
378 parents = unpack(sha1fmt * numpar, data[o2:o3])
358
379
359 # read metadata
380 # read metadata
360 off = o3 + metasize * nummeta
381 off = o3 + metasize * nummeta
361 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
382 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
362 metadata = []
383 metadata = []
363 for idx in xrange(0, len(metapairsize), 2):
384 for idx in xrange(0, len(metapairsize), 2):
364 o1 = off + metapairsize[idx]
385 o1 = off + metapairsize[idx]
365 o2 = o1 + metapairsize[idx + 1]
386 o2 = o1 + metapairsize[idx + 1]
366 metadata.append((data[off:o1], data[o1:o2]))
387 metadata.append((data[off:o1], data[o1:o2]))
367 off = o2
388 off = o2
368
389
369 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
390 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
370
391
371 def _fm1encodeonemarker(marker):
392 def _fm1encodeonemarker(marker):
372 pre, sucs, flags, metadata, date, parents = marker
393 pre, sucs, flags, metadata, date, parents = marker
373 # determine node size
394 # determine node size
374 _fm1node = _fm1nodesha1
395 _fm1node = _fm1nodesha1
375 if flags & usingsha256:
396 if flags & usingsha256:
376 _fm1node = _fm1nodesha256
397 _fm1node = _fm1nodesha256
377 numsuc = len(sucs)
398 numsuc = len(sucs)
378 numextranodes = numsuc
399 numextranodes = numsuc
379 if parents is None:
400 if parents is None:
380 numpar = _fm1parentnone
401 numpar = _fm1parentnone
381 else:
402 else:
382 numpar = len(parents)
403 numpar = len(parents)
383 numextranodes += numpar
404 numextranodes += numpar
384 formatnodes = _fm1node * numextranodes
405 formatnodes = _fm1node * numextranodes
385 formatmeta = _fm1metapair * len(metadata)
406 formatmeta = _fm1metapair * len(metadata)
386 format = _fm1fixed + formatnodes + formatmeta
407 format = _fm1fixed + formatnodes + formatmeta
387 # tz is stored in minutes so we divide by 60
408 # tz is stored in minutes so we divide by 60
388 tz = date[1]//60
409 tz = date[1]//60
389 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
410 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
390 data.extend(sucs)
411 data.extend(sucs)
391 if parents is not None:
412 if parents is not None:
392 data.extend(parents)
413 data.extend(parents)
393 totalsize = _calcsize(format)
414 totalsize = _calcsize(format)
394 for key, value in metadata:
415 for key, value in metadata:
395 lk = len(key)
416 lk = len(key)
396 lv = len(value)
417 lv = len(value)
397 data.append(lk)
418 data.append(lk)
398 data.append(lv)
419 data.append(lv)
399 totalsize += lk + lv
420 totalsize += lk + lv
400 data[0] = totalsize
421 data[0] = totalsize
401 data = [_pack(format, *data)]
422 data = [_pack(format, *data)]
402 for key, value in metadata:
423 for key, value in metadata:
403 data.append(key)
424 data.append(key)
404 data.append(value)
425 data.append(value)
405 return ''.join(data)
426 return ''.join(data)
406
427
407 def _fm1readmarkers(data, off):
428 def _fm1readmarkers(data, off):
408 native = getattr(parsers, 'fm1readmarkers', None)
429 native = getattr(parsers, 'fm1readmarkers', None)
409 if not native:
430 if not native:
410 return _fm1purereadmarkers(data, off)
431 return _fm1purereadmarkers(data, off)
411 stop = len(data) - _fm1fsize
432 stop = len(data) - _fm1fsize
412 return native(data, off, stop)
433 return native(data, off, stop)
413
434
414 # mapping to read/write various marker formats
435 # mapping to read/write various marker formats
415 # <version> -> (decoder, encoder)
436 # <version> -> (decoder, encoder)
416 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
437 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
417 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
438 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
418
439
419 @util.nogc
440 @util.nogc
420 def _readmarkers(data):
441 def _readmarkers(data):
421 """Read and enumerate markers from raw data"""
442 """Read and enumerate markers from raw data"""
422 off = 0
443 off = 0
423 diskversion = _unpack('>B', data[off:off + 1])[0]
444 diskversion = _unpack('>B', data[off:off + 1])[0]
424 off += 1
445 off += 1
425 if diskversion not in formats:
446 if diskversion not in formats:
426 raise error.Abort(_('parsing obsolete marker: unknown version %r')
447 raise error.Abort(_('parsing obsolete marker: unknown version %r')
427 % diskversion)
448 % diskversion)
428 return diskversion, formats[diskversion][0](data, off)
449 return diskversion, formats[diskversion][0](data, off)
429
450
430 def encodemarkers(markers, addheader=False, version=_fm0version):
451 def encodemarkers(markers, addheader=False, version=_fm0version):
431 # Kept separate from flushmarkers(), it will be reused for
452 # Kept separate from flushmarkers(), it will be reused for
432 # markers exchange.
453 # markers exchange.
433 encodeone = formats[version][1]
454 encodeone = formats[version][1]
434 if addheader:
455 if addheader:
435 yield _pack('>B', version)
456 yield _pack('>B', version)
436 for marker in markers:
457 for marker in markers:
437 yield encodeone(marker)
458 yield encodeone(marker)
438
459
439
460
440 class marker(object):
461 class marker(object):
441 """Wrap obsolete marker raw data"""
462 """Wrap obsolete marker raw data"""
442
463
443 def __init__(self, repo, data):
464 def __init__(self, repo, data):
444 # the repo argument will be used to create changectx in later version
465 # the repo argument will be used to create changectx in later version
445 self._repo = repo
466 self._repo = repo
446 self._data = data
467 self._data = data
447 self._decodedmeta = None
468 self._decodedmeta = None
448
469
449 def __hash__(self):
470 def __hash__(self):
450 return hash(self._data)
471 return hash(self._data)
451
472
452 def __eq__(self, other):
473 def __eq__(self, other):
453 if type(other) != type(self):
474 if type(other) != type(self):
454 return False
475 return False
455 return self._data == other._data
476 return self._data == other._data
456
477
457 def precnode(self):
478 def precnode(self):
458 """Precursor changeset node identifier"""
479 """Precursor changeset node identifier"""
459 return self._data[0]
480 return self._data[0]
460
481
461 def succnodes(self):
482 def succnodes(self):
462 """List of successor changesets node identifiers"""
483 """List of successor changesets node identifiers"""
463 return self._data[1]
484 return self._data[1]
464
485
465 def parentnodes(self):
486 def parentnodes(self):
466 """Parents of the precursors (None if not recorded)"""
487 """Parents of the precursors (None if not recorded)"""
467 return self._data[5]
488 return self._data[5]
468
489
469 def metadata(self):
490 def metadata(self):
470 """Decoded metadata dictionary"""
491 """Decoded metadata dictionary"""
471 return dict(self._data[3])
492 return dict(self._data[3])
472
493
473 def date(self):
494 def date(self):
474 """Creation date as (unixtime, offset)"""
495 """Creation date as (unixtime, offset)"""
475 return self._data[4]
496 return self._data[4]
476
497
477 def flags(self):
498 def flags(self):
478 """The flags field of the marker"""
499 """The flags field of the marker"""
479 return self._data[2]
500 return self._data[2]
480
501
481 @util.nogc
502 @util.nogc
482 def _addsuccessors(successors, markers):
503 def _addsuccessors(successors, markers):
483 for mark in markers:
504 for mark in markers:
484 successors.setdefault(mark[0], set()).add(mark)
505 successors.setdefault(mark[0], set()).add(mark)
485
506
486 @util.nogc
507 @util.nogc
487 def _addprecursors(precursors, markers):
508 def _addprecursors(precursors, markers):
488 for mark in markers:
509 for mark in markers:
489 for suc in mark[1]:
510 for suc in mark[1]:
490 precursors.setdefault(suc, set()).add(mark)
511 precursors.setdefault(suc, set()).add(mark)
491
512
492 @util.nogc
513 @util.nogc
493 def _addchildren(children, markers):
514 def _addchildren(children, markers):
494 for mark in markers:
515 for mark in markers:
495 parents = mark[5]
516 parents = mark[5]
496 if parents is not None:
517 if parents is not None:
497 for p in parents:
518 for p in parents:
498 children.setdefault(p, set()).add(mark)
519 children.setdefault(p, set()).add(mark)
499
520
500 def _checkinvalidmarkers(markers):
521 def _checkinvalidmarkers(markers):
501 """search for marker with invalid data and raise error if needed
522 """search for marker with invalid data and raise error if needed
502
523
503 Exist as a separated function to allow the evolve extension for a more
524 Exist as a separated function to allow the evolve extension for a more
504 subtle handling.
525 subtle handling.
505 """
526 """
506 for mark in markers:
527 for mark in markers:
507 if node.nullid in mark[1]:
528 if node.nullid in mark[1]:
508 raise error.Abort(_('bad obsolescence marker detected: '
529 raise error.Abort(_('bad obsolescence marker detected: '
509 'invalid successors nullid'))
530 'invalid successors nullid'))
510
531
511 class obsstore(object):
532 class obsstore(object):
512 """Store obsolete markers
533 """Store obsolete markers
513
534
514 Markers can be accessed with two mappings:
535 Markers can be accessed with two mappings:
515 - precursors[x] -> set(markers on precursors edges of x)
536 - precursors[x] -> set(markers on precursors edges of x)
516 - successors[x] -> set(markers on successors edges of x)
537 - successors[x] -> set(markers on successors edges of x)
517 - children[x] -> set(markers on precursors edges of children(x)
538 - children[x] -> set(markers on precursors edges of children(x)
518 """
539 """
519
540
520 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
541 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
521 # prec: nodeid, precursor changesets
542 # prec: nodeid, precursor changesets
522 # succs: tuple of nodeid, successor changesets (0-N length)
543 # succs: tuple of nodeid, successor changesets (0-N length)
523 # flag: integer, flag field carrying modifier for the markers (see doc)
544 # flag: integer, flag field carrying modifier for the markers (see doc)
524 # meta: binary blob, encoded metadata dictionary
545 # meta: binary blob, encoded metadata dictionary
525 # date: (float, int) tuple, date of marker creation
546 # date: (float, int) tuple, date of marker creation
526 # parents: (tuple of nodeid) or None, parents of precursors
547 # parents: (tuple of nodeid) or None, parents of precursors
527 # None is used when no data has been recorded
548 # None is used when no data has been recorded
528
549
529 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
550 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
530 # caches for various obsolescence related cache
551 # caches for various obsolescence related cache
531 self.caches = {}
552 self.caches = {}
532 self.svfs = svfs
553 self.svfs = svfs
533 self._version = defaultformat
554 self._version = defaultformat
534 self._readonly = readonly
555 self._readonly = readonly
535
556
536 def __iter__(self):
557 def __iter__(self):
537 return iter(self._all)
558 return iter(self._all)
538
559
539 def __len__(self):
560 def __len__(self):
540 return len(self._all)
561 return len(self._all)
541
562
542 def __nonzero__(self):
563 def __nonzero__(self):
543 if not self._cached('_all'):
564 if not self._cached('_all'):
544 try:
565 try:
545 return self.svfs.stat('obsstore').st_size > 1
566 return self.svfs.stat('obsstore').st_size > 1
546 except OSError as inst:
567 except OSError as inst:
547 if inst.errno != errno.ENOENT:
568 if inst.errno != errno.ENOENT:
548 raise
569 raise
549 # just build an empty _all list if no obsstore exists, which
570 # just build an empty _all list if no obsstore exists, which
550 # avoids further stat() syscalls
571 # avoids further stat() syscalls
551 pass
572 pass
552 return bool(self._all)
573 return bool(self._all)
553
574
554 __bool__ = __nonzero__
575 __bool__ = __nonzero__
555
576
556 @property
577 @property
557 def readonly(self):
578 def readonly(self):
558 """True if marker creation is disabled
579 """True if marker creation is disabled
559
580
560 Remove me in the future when obsolete marker is always on."""
581 Remove me in the future when obsolete marker is always on."""
561 return self._readonly
582 return self._readonly
562
583
563 def create(self, transaction, prec, succs=(), flag=0, parents=None,
584 def create(self, transaction, prec, succs=(), flag=0, parents=None,
564 date=None, metadata=None):
585 date=None, metadata=None):
565 """obsolete: add a new obsolete marker
586 """obsolete: add a new obsolete marker
566
587
567 * ensuring it is hashable
588 * ensuring it is hashable
568 * check mandatory metadata
589 * check mandatory metadata
569 * encode metadata
590 * encode metadata
570
591
571 If you are a human writing code creating marker you want to use the
592 If you are a human writing code creating marker you want to use the
572 `createmarkers` function in this module instead.
593 `createmarkers` function in this module instead.
573
594
574 return True if a new marker have been added, False if the markers
595 return True if a new marker have been added, False if the markers
575 already existed (no op).
596 already existed (no op).
576 """
597 """
577 if metadata is None:
598 if metadata is None:
578 metadata = {}
599 metadata = {}
579 if date is None:
600 if date is None:
580 if 'date' in metadata:
601 if 'date' in metadata:
581 # as a courtesy for out-of-tree extensions
602 # as a courtesy for out-of-tree extensions
582 date = util.parsedate(metadata.pop('date'))
603 date = util.parsedate(metadata.pop('date'))
583 else:
604 else:
584 date = util.makedate()
605 date = util.makedate()
585 if len(prec) != 20:
606 if len(prec) != 20:
586 raise ValueError(prec)
607 raise ValueError(prec)
587 for succ in succs:
608 for succ in succs:
588 if len(succ) != 20:
609 if len(succ) != 20:
589 raise ValueError(succ)
610 raise ValueError(succ)
590 if prec in succs:
611 if prec in succs:
591 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
612 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
592
613
593 metadata = tuple(sorted(metadata.iteritems()))
614 metadata = tuple(sorted(metadata.iteritems()))
594
615
595 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
616 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
596 return bool(self.add(transaction, [marker]))
617 return bool(self.add(transaction, [marker]))
597
618
598 def add(self, transaction, markers):
619 def add(self, transaction, markers):
599 """Add new markers to the store
620 """Add new markers to the store
600
621
601 Take care of filtering duplicate.
622 Take care of filtering duplicate.
602 Return the number of new marker."""
623 Return the number of new marker."""
603 if self._readonly:
624 if self._readonly:
604 raise error.Abort(_('creating obsolete markers is not enabled on '
625 raise error.Abort(_('creating obsolete markers is not enabled on '
605 'this repo'))
626 'this repo'))
606 known = set(self._all)
627 known = set(self._all)
607 new = []
628 new = []
608 for m in markers:
629 for m in markers:
609 if m not in known:
630 if m not in known:
610 known.add(m)
631 known.add(m)
611 new.append(m)
632 new.append(m)
612 if new:
633 if new:
613 f = self.svfs('obsstore', 'ab')
634 f = self.svfs('obsstore', 'ab')
614 try:
635 try:
615 offset = f.tell()
636 offset = f.tell()
616 transaction.add('obsstore', offset)
637 transaction.add('obsstore', offset)
617 # offset == 0: new file - add the version header
638 # offset == 0: new file - add the version header
618 for bytes in encodemarkers(new, offset == 0, self._version):
639 for bytes in encodemarkers(new, offset == 0, self._version):
619 f.write(bytes)
640 f.write(bytes)
620 finally:
641 finally:
621 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
642 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
622 # call 'filecacheentry.refresh()' here
643 # call 'filecacheentry.refresh()' here
623 f.close()
644 f.close()
624 self._addmarkers(new)
645 self._addmarkers(new)
625 # new marker *may* have changed several set. invalidate the cache.
646 # new marker *may* have changed several set. invalidate the cache.
626 self.caches.clear()
647 self.caches.clear()
627 # records the number of new markers for the transaction hooks
648 # records the number of new markers for the transaction hooks
628 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
649 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
629 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
650 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
630 return len(new)
651 return len(new)
631
652
632 def mergemarkers(self, transaction, data):
653 def mergemarkers(self, transaction, data):
633 """merge a binary stream of markers inside the obsstore
654 """merge a binary stream of markers inside the obsstore
634
655
635 Returns the number of new markers added."""
656 Returns the number of new markers added."""
636 version, markers = _readmarkers(data)
657 version, markers = _readmarkers(data)
637 return self.add(transaction, markers)
658 return self.add(transaction, markers)
638
659
639 @propertycache
660 @propertycache
640 def _all(self):
661 def _all(self):
641 data = self.svfs.tryread('obsstore')
662 data = self.svfs.tryread('obsstore')
642 if not data:
663 if not data:
643 return []
664 return []
644 self._version, markers = _readmarkers(data)
665 self._version, markers = _readmarkers(data)
645 markers = list(markers)
666 markers = list(markers)
646 _checkinvalidmarkers(markers)
667 _checkinvalidmarkers(markers)
647 return markers
668 return markers
648
669
649 @propertycache
670 @propertycache
650 def successors(self):
671 def successors(self):
651 successors = {}
672 successors = {}
652 _addsuccessors(successors, self._all)
673 _addsuccessors(successors, self._all)
653 return successors
674 return successors
654
675
655 @propertycache
676 @propertycache
656 def precursors(self):
677 def precursors(self):
657 precursors = {}
678 precursors = {}
658 _addprecursors(precursors, self._all)
679 _addprecursors(precursors, self._all)
659 return precursors
680 return precursors
660
681
661 @propertycache
682 @propertycache
662 def children(self):
683 def children(self):
663 children = {}
684 children = {}
664 _addchildren(children, self._all)
685 _addchildren(children, self._all)
665 return children
686 return children
666
687
667 def _cached(self, attr):
688 def _cached(self, attr):
668 return attr in self.__dict__
689 return attr in self.__dict__
669
690
670 def _addmarkers(self, markers):
691 def _addmarkers(self, markers):
671 markers = list(markers) # to allow repeated iteration
692 markers = list(markers) # to allow repeated iteration
672 self._all.extend(markers)
693 self._all.extend(markers)
673 if self._cached('successors'):
694 if self._cached('successors'):
674 _addsuccessors(self.successors, markers)
695 _addsuccessors(self.successors, markers)
675 if self._cached('precursors'):
696 if self._cached('precursors'):
676 _addprecursors(self.precursors, markers)
697 _addprecursors(self.precursors, markers)
677 if self._cached('children'):
698 if self._cached('children'):
678 _addchildren(self.children, markers)
699 _addchildren(self.children, markers)
679 _checkinvalidmarkers(markers)
700 _checkinvalidmarkers(markers)
680
701
681 def relevantmarkers(self, nodes):
702 def relevantmarkers(self, nodes):
682 """return a set of all obsolescence markers relevant to a set of nodes.
703 """return a set of all obsolescence markers relevant to a set of nodes.
683
704
684 "relevant" to a set of nodes mean:
705 "relevant" to a set of nodes mean:
685
706
686 - marker that use this changeset as successor
707 - marker that use this changeset as successor
687 - prune marker of direct children on this changeset
708 - prune marker of direct children on this changeset
688 - recursive application of the two rules on precursors of these markers
709 - recursive application of the two rules on precursors of these markers
689
710
690 It is a set so you cannot rely on order."""
711 It is a set so you cannot rely on order."""
691
712
692 pendingnodes = set(nodes)
713 pendingnodes = set(nodes)
693 seenmarkers = set()
714 seenmarkers = set()
694 seennodes = set(pendingnodes)
715 seennodes = set(pendingnodes)
695 precursorsmarkers = self.precursors
716 precursorsmarkers = self.precursors
696 children = self.children
717 children = self.children
697 while pendingnodes:
718 while pendingnodes:
698 direct = set()
719 direct = set()
699 for current in pendingnodes:
720 for current in pendingnodes:
700 direct.update(precursorsmarkers.get(current, ()))
721 direct.update(precursorsmarkers.get(current, ()))
701 pruned = [m for m in children.get(current, ()) if not m[1]]
722 pruned = [m for m in children.get(current, ()) if not m[1]]
702 direct.update(pruned)
723 direct.update(pruned)
703 direct -= seenmarkers
724 direct -= seenmarkers
704 pendingnodes = set([m[0] for m in direct])
725 pendingnodes = set([m[0] for m in direct])
705 seenmarkers |= direct
726 seenmarkers |= direct
706 pendingnodes -= seennodes
727 pendingnodes -= seennodes
707 seennodes |= pendingnodes
728 seennodes |= pendingnodes
708 return seenmarkers
729 return seenmarkers
709
730
710 def commonversion(versions):
731 def commonversion(versions):
711 """Return the newest version listed in both versions and our local formats.
732 """Return the newest version listed in both versions and our local formats.
712
733
713 Returns None if no common version exists.
734 Returns None if no common version exists.
714 """
735 """
715 versions.sort(reverse=True)
736 versions.sort(reverse=True)
716 # search for highest version known on both side
737 # search for highest version known on both side
717 for v in versions:
738 for v in versions:
718 if v in formats:
739 if v in formats:
719 return v
740 return v
720 return None
741 return None
721
742
722 # arbitrary picked to fit into 8K limit from HTTP server
743 # arbitrary picked to fit into 8K limit from HTTP server
723 # you have to take in account:
744 # you have to take in account:
724 # - the version header
745 # - the version header
725 # - the base85 encoding
746 # - the base85 encoding
726 _maxpayload = 5300
747 _maxpayload = 5300
727
748
728 def _pushkeyescape(markers):
749 def _pushkeyescape(markers):
729 """encode markers into a dict suitable for pushkey exchange
750 """encode markers into a dict suitable for pushkey exchange
730
751
731 - binary data is base85 encoded
752 - binary data is base85 encoded
732 - split in chunks smaller than 5300 bytes"""
753 - split in chunks smaller than 5300 bytes"""
733 keys = {}
754 keys = {}
734 parts = []
755 parts = []
735 currentlen = _maxpayload * 2 # ensure we create a new part
756 currentlen = _maxpayload * 2 # ensure we create a new part
736 for marker in markers:
757 for marker in markers:
737 nextdata = _fm0encodeonemarker(marker)
758 nextdata = _fm0encodeonemarker(marker)
738 if (len(nextdata) + currentlen > _maxpayload):
759 if (len(nextdata) + currentlen > _maxpayload):
739 currentpart = []
760 currentpart = []
740 currentlen = 0
761 currentlen = 0
741 parts.append(currentpart)
762 parts.append(currentpart)
742 currentpart.append(nextdata)
763 currentpart.append(nextdata)
743 currentlen += len(nextdata)
764 currentlen += len(nextdata)
744 for idx, part in enumerate(reversed(parts)):
765 for idx, part in enumerate(reversed(parts)):
745 data = ''.join([_pack('>B', _fm0version)] + part)
766 data = ''.join([_pack('>B', _fm0version)] + part)
746 keys['dump%i' % idx] = util.b85encode(data)
767 keys['dump%i' % idx] = util.b85encode(data)
747 return keys
768 return keys
748
769
749 def listmarkers(repo):
770 def listmarkers(repo):
750 """List markers over pushkey"""
771 """List markers over pushkey"""
751 if not repo.obsstore:
772 if not repo.obsstore:
752 return {}
773 return {}
753 return _pushkeyescape(sorted(repo.obsstore))
774 return _pushkeyescape(sorted(repo.obsstore))
754
775
755 def pushmarker(repo, key, old, new):
776 def pushmarker(repo, key, old, new):
756 """Push markers over pushkey"""
777 """Push markers over pushkey"""
757 if not key.startswith('dump'):
778 if not key.startswith('dump'):
758 repo.ui.warn(_('unknown key: %r') % key)
779 repo.ui.warn(_('unknown key: %r') % key)
759 return 0
780 return 0
760 if old:
781 if old:
761 repo.ui.warn(_('unexpected old value for %r') % key)
782 repo.ui.warn(_('unexpected old value for %r') % key)
762 return 0
783 return 0
763 data = util.b85decode(new)
784 data = util.b85decode(new)
764 lock = repo.lock()
785 lock = repo.lock()
765 try:
786 try:
766 tr = repo.transaction('pushkey: obsolete markers')
787 tr = repo.transaction('pushkey: obsolete markers')
767 try:
788 try:
768 repo.obsstore.mergemarkers(tr, data)
789 repo.obsstore.mergemarkers(tr, data)
769 tr.close()
790 tr.close()
770 return 1
791 return 1
771 finally:
792 finally:
772 tr.release()
793 tr.release()
773 finally:
794 finally:
774 lock.release()
795 lock.release()
775
796
776 def getmarkers(repo, nodes=None):
797 def getmarkers(repo, nodes=None):
777 """returns markers known in a repository
798 """returns markers known in a repository
778
799
779 If <nodes> is specified, only markers "relevant" to those nodes are are
800 If <nodes> is specified, only markers "relevant" to those nodes are are
780 returned"""
801 returned"""
781 if nodes is None:
802 if nodes is None:
782 rawmarkers = repo.obsstore
803 rawmarkers = repo.obsstore
783 else:
804 else:
784 rawmarkers = repo.obsstore.relevantmarkers(nodes)
805 rawmarkers = repo.obsstore.relevantmarkers(nodes)
785
806
786 for markerdata in rawmarkers:
807 for markerdata in rawmarkers:
787 yield marker(repo, markerdata)
808 yield marker(repo, markerdata)
788
809
789 def relevantmarkers(repo, node):
810 def relevantmarkers(repo, node):
790 """all obsolete markers relevant to some revision"""
811 """all obsolete markers relevant to some revision"""
791 for markerdata in repo.obsstore.relevantmarkers(node):
812 for markerdata in repo.obsstore.relevantmarkers(node):
792 yield marker(repo, markerdata)
813 yield marker(repo, markerdata)
793
814
794
815
795 def precursormarkers(ctx):
816 def precursormarkers(ctx):
796 """obsolete marker marking this changeset as a successors"""
817 """obsolete marker marking this changeset as a successors"""
797 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
818 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
798 yield marker(ctx.repo(), data)
819 yield marker(ctx.repo(), data)
799
820
800 def successormarkers(ctx):
821 def successormarkers(ctx):
801 """obsolete marker making this changeset obsolete"""
822 """obsolete marker making this changeset obsolete"""
802 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
823 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
803 yield marker(ctx.repo(), data)
824 yield marker(ctx.repo(), data)
804
825
805 def allsuccessors(obsstore, nodes, ignoreflags=0):
826 def allsuccessors(obsstore, nodes, ignoreflags=0):
806 """Yield node for every successor of <nodes>.
827 """Yield node for every successor of <nodes>.
807
828
808 Some successors may be unknown locally.
829 Some successors may be unknown locally.
809
830
810 This is a linear yield unsuited to detecting split changesets. It includes
831 This is a linear yield unsuited to detecting split changesets. It includes
811 initial nodes too."""
832 initial nodes too."""
812 remaining = set(nodes)
833 remaining = set(nodes)
813 seen = set(remaining)
834 seen = set(remaining)
814 while remaining:
835 while remaining:
815 current = remaining.pop()
836 current = remaining.pop()
816 yield current
837 yield current
817 for mark in obsstore.successors.get(current, ()):
838 for mark in obsstore.successors.get(current, ()):
818 # ignore marker flagged with specified flag
839 # ignore marker flagged with specified flag
819 if mark[2] & ignoreflags:
840 if mark[2] & ignoreflags:
820 continue
841 continue
821 for suc in mark[1]:
842 for suc in mark[1]:
822 if suc not in seen:
843 if suc not in seen:
823 seen.add(suc)
844 seen.add(suc)
824 remaining.add(suc)
845 remaining.add(suc)
825
846
826 def allprecursors(obsstore, nodes, ignoreflags=0):
847 def allprecursors(obsstore, nodes, ignoreflags=0):
827 """Yield node for every precursors of <nodes>.
848 """Yield node for every precursors of <nodes>.
828
849
829 Some precursors may be unknown locally.
850 Some precursors may be unknown locally.
830
851
831 This is a linear yield unsuited to detecting folded changesets. It includes
852 This is a linear yield unsuited to detecting folded changesets. It includes
832 initial nodes too."""
853 initial nodes too."""
833
854
834 remaining = set(nodes)
855 remaining = set(nodes)
835 seen = set(remaining)
856 seen = set(remaining)
836 while remaining:
857 while remaining:
837 current = remaining.pop()
858 current = remaining.pop()
838 yield current
859 yield current
839 for mark in obsstore.precursors.get(current, ()):
860 for mark in obsstore.precursors.get(current, ()):
840 # ignore marker flagged with specified flag
861 # ignore marker flagged with specified flag
841 if mark[2] & ignoreflags:
862 if mark[2] & ignoreflags:
842 continue
863 continue
843 suc = mark[0]
864 suc = mark[0]
844 if suc not in seen:
865 if suc not in seen:
845 seen.add(suc)
866 seen.add(suc)
846 remaining.add(suc)
867 remaining.add(suc)
847
868
848 def foreground(repo, nodes):
869 def foreground(repo, nodes):
849 """return all nodes in the "foreground" of other node
870 """return all nodes in the "foreground" of other node
850
871
851 The foreground of a revision is anything reachable using parent -> children
872 The foreground of a revision is anything reachable using parent -> children
852 or precursor -> successor relation. It is very similar to "descendant" but
873 or precursor -> successor relation. It is very similar to "descendant" but
853 augmented with obsolescence information.
874 augmented with obsolescence information.
854
875
855 Beware that possible obsolescence cycle may result if complex situation.
876 Beware that possible obsolescence cycle may result if complex situation.
856 """
877 """
857 repo = repo.unfiltered()
878 repo = repo.unfiltered()
858 foreground = set(repo.set('%ln::', nodes))
879 foreground = set(repo.set('%ln::', nodes))
859 if repo.obsstore:
880 if repo.obsstore:
860 # We only need this complicated logic if there is obsolescence
881 # We only need this complicated logic if there is obsolescence
861 # XXX will probably deserve an optimised revset.
882 # XXX will probably deserve an optimised revset.
862 nm = repo.changelog.nodemap
883 nm = repo.changelog.nodemap
863 plen = -1
884 plen = -1
864 # compute the whole set of successors or descendants
885 # compute the whole set of successors or descendants
865 while len(foreground) != plen:
886 while len(foreground) != plen:
866 plen = len(foreground)
887 plen = len(foreground)
867 succs = set(c.node() for c in foreground)
888 succs = set(c.node() for c in foreground)
868 mutable = [c.node() for c in foreground if c.mutable()]
889 mutable = [c.node() for c in foreground if c.mutable()]
869 succs.update(allsuccessors(repo.obsstore, mutable))
890 succs.update(allsuccessors(repo.obsstore, mutable))
870 known = (n for n in succs if n in nm)
891 known = (n for n in succs if n in nm)
871 foreground = set(repo.set('%ln::', known))
892 foreground = set(repo.set('%ln::', known))
872 return set(c.node() for c in foreground)
893 return set(c.node() for c in foreground)
873
894
874
895
875 def successorssets(repo, initialnode, cache=None):
896 def successorssets(repo, initialnode, cache=None):
876 """Return set of all latest successors of initial nodes
897 """Return set of all latest successors of initial nodes
877
898
878 The successors set of a changeset A are the group of revisions that succeed
899 The successors set of a changeset A are the group of revisions that succeed
879 A. It succeeds A as a consistent whole, each revision being only a partial
900 A. It succeeds A as a consistent whole, each revision being only a partial
880 replacement. The successors set contains non-obsolete changesets only.
901 replacement. The successors set contains non-obsolete changesets only.
881
902
882 This function returns the full list of successor sets which is why it
903 This function returns the full list of successor sets which is why it
883 returns a list of tuples and not just a single tuple. Each tuple is a valid
904 returns a list of tuples and not just a single tuple. Each tuple is a valid
884 successors set. Note that (A,) may be a valid successors set for changeset A
905 successors set. Note that (A,) may be a valid successors set for changeset A
885 (see below).
906 (see below).
886
907
887 In most cases, a changeset A will have a single element (e.g. the changeset
908 In most cases, a changeset A will have a single element (e.g. the changeset
888 A is replaced by A') in its successors set. Though, it is also common for a
909 A is replaced by A') in its successors set. Though, it is also common for a
889 changeset A to have no elements in its successor set (e.g. the changeset
910 changeset A to have no elements in its successor set (e.g. the changeset
890 has been pruned). Therefore, the returned list of successors sets will be
911 has been pruned). Therefore, the returned list of successors sets will be
891 [(A',)] or [], respectively.
912 [(A',)] or [], respectively.
892
913
893 When a changeset A is split into A' and B', however, it will result in a
914 When a changeset A is split into A' and B', however, it will result in a
894 successors set containing more than a single element, i.e. [(A',B')].
915 successors set containing more than a single element, i.e. [(A',B')].
895 Divergent changesets will result in multiple successors sets, i.e. [(A',),
916 Divergent changesets will result in multiple successors sets, i.e. [(A',),
896 (A'')].
917 (A'')].
897
918
898 If a changeset A is not obsolete, then it will conceptually have no
919 If a changeset A is not obsolete, then it will conceptually have no
899 successors set. To distinguish this from a pruned changeset, the successor
920 successors set. To distinguish this from a pruned changeset, the successor
900 set will contain itself only, i.e. [(A,)].
921 set will contain itself only, i.e. [(A,)].
901
922
902 Finally, successors unknown locally are considered to be pruned (obsoleted
923 Finally, successors unknown locally are considered to be pruned (obsoleted
903 without any successors).
924 without any successors).
904
925
905 The optional `cache` parameter is a dictionary that may contain precomputed
926 The optional `cache` parameter is a dictionary that may contain precomputed
906 successors sets. It is meant to reuse the computation of a previous call to
927 successors sets. It is meant to reuse the computation of a previous call to
907 `successorssets` when multiple calls are made at the same time. The cache
928 `successorssets` when multiple calls are made at the same time. The cache
908 dictionary is updated in place. The caller is responsible for its life
929 dictionary is updated in place. The caller is responsible for its life
909 span. Code that makes multiple calls to `successorssets` *must* use this
930 span. Code that makes multiple calls to `successorssets` *must* use this
910 cache mechanism or suffer terrible performance.
931 cache mechanism or suffer terrible performance.
911 """
932 """
912
933
913 succmarkers = repo.obsstore.successors
934 succmarkers = repo.obsstore.successors
914
935
915 # Stack of nodes we search successors sets for
936 # Stack of nodes we search successors sets for
916 toproceed = [initialnode]
937 toproceed = [initialnode]
917 # set version of above list for fast loop detection
938 # set version of above list for fast loop detection
918 # element added to "toproceed" must be added here
939 # element added to "toproceed" must be added here
919 stackedset = set(toproceed)
940 stackedset = set(toproceed)
920 if cache is None:
941 if cache is None:
921 cache = {}
942 cache = {}
922
943
923 # This while loop is the flattened version of a recursive search for
944 # This while loop is the flattened version of a recursive search for
924 # successors sets
945 # successors sets
925 #
946 #
926 # def successorssets(x):
947 # def successorssets(x):
927 # successors = directsuccessors(x)
948 # successors = directsuccessors(x)
928 # ss = [[]]
949 # ss = [[]]
929 # for succ in directsuccessors(x):
950 # for succ in directsuccessors(x):
930 # # product as in itertools cartesian product
951 # # product as in itertools cartesian product
931 # ss = product(ss, successorssets(succ))
952 # ss = product(ss, successorssets(succ))
932 # return ss
953 # return ss
933 #
954 #
934 # But we can not use plain recursive calls here:
955 # But we can not use plain recursive calls here:
935 # - that would blow the python call stack
956 # - that would blow the python call stack
936 # - obsolescence markers may have cycles, we need to handle them.
957 # - obsolescence markers may have cycles, we need to handle them.
937 #
958 #
938 # The `toproceed` list act as our call stack. Every node we search
959 # The `toproceed` list act as our call stack. Every node we search
939 # successors set for are stacked there.
960 # successors set for are stacked there.
940 #
961 #
941 # The `stackedset` is set version of this stack used to check if a node is
962 # The `stackedset` is set version of this stack used to check if a node is
942 # already stacked. This check is used to detect cycles and prevent infinite
963 # already stacked. This check is used to detect cycles and prevent infinite
943 # loop.
964 # loop.
944 #
965 #
945 # successors set of all nodes are stored in the `cache` dictionary.
966 # successors set of all nodes are stored in the `cache` dictionary.
946 #
967 #
947 # After this while loop ends we use the cache to return the successors sets
968 # After this while loop ends we use the cache to return the successors sets
948 # for the node requested by the caller.
969 # for the node requested by the caller.
949 while toproceed:
970 while toproceed:
950 # Every iteration tries to compute the successors sets of the topmost
971 # Every iteration tries to compute the successors sets of the topmost
951 # node of the stack: CURRENT.
972 # node of the stack: CURRENT.
952 #
973 #
953 # There are four possible outcomes:
974 # There are four possible outcomes:
954 #
975 #
955 # 1) We already know the successors sets of CURRENT:
976 # 1) We already know the successors sets of CURRENT:
956 # -> mission accomplished, pop it from the stack.
977 # -> mission accomplished, pop it from the stack.
957 # 2) Node is not obsolete:
978 # 2) Node is not obsolete:
958 # -> the node is its own successors sets. Add it to the cache.
979 # -> the node is its own successors sets. Add it to the cache.
959 # 3) We do not know successors set of direct successors of CURRENT:
980 # 3) We do not know successors set of direct successors of CURRENT:
960 # -> We add those successors to the stack.
981 # -> We add those successors to the stack.
961 # 4) We know successors sets of all direct successors of CURRENT:
982 # 4) We know successors sets of all direct successors of CURRENT:
962 # -> We can compute CURRENT successors set and add it to the
983 # -> We can compute CURRENT successors set and add it to the
963 # cache.
984 # cache.
964 #
985 #
965 current = toproceed[-1]
986 current = toproceed[-1]
966 if current in cache:
987 if current in cache:
967 # case (1): We already know the successors sets
988 # case (1): We already know the successors sets
968 stackedset.remove(toproceed.pop())
989 stackedset.remove(toproceed.pop())
969 elif current not in succmarkers:
990 elif current not in succmarkers:
970 # case (2): The node is not obsolete.
991 # case (2): The node is not obsolete.
971 if current in repo:
992 if current in repo:
972 # We have a valid last successors.
993 # We have a valid last successors.
973 cache[current] = [(current,)]
994 cache[current] = [(current,)]
974 else:
995 else:
975 # Final obsolete version is unknown locally.
996 # Final obsolete version is unknown locally.
976 # Do not count that as a valid successors
997 # Do not count that as a valid successors
977 cache[current] = []
998 cache[current] = []
978 else:
999 else:
979 # cases (3) and (4)
1000 # cases (3) and (4)
980 #
1001 #
981 # We proceed in two phases. Phase 1 aims to distinguish case (3)
1002 # We proceed in two phases. Phase 1 aims to distinguish case (3)
982 # from case (4):
1003 # from case (4):
983 #
1004 #
984 # For each direct successors of CURRENT, we check whether its
1005 # For each direct successors of CURRENT, we check whether its
985 # successors sets are known. If they are not, we stack the
1006 # successors sets are known. If they are not, we stack the
986 # unknown node and proceed to the next iteration of the while
1007 # unknown node and proceed to the next iteration of the while
987 # loop. (case 3)
1008 # loop. (case 3)
988 #
1009 #
989 # During this step, we may detect obsolescence cycles: a node
1010 # During this step, we may detect obsolescence cycles: a node
990 # with unknown successors sets but already in the call stack.
1011 # with unknown successors sets but already in the call stack.
991 # In such a situation, we arbitrary set the successors sets of
1012 # In such a situation, we arbitrary set the successors sets of
992 # the node to nothing (node pruned) to break the cycle.
1013 # the node to nothing (node pruned) to break the cycle.
993 #
1014 #
994 # If no break was encountered we proceed to phase 2.
1015 # If no break was encountered we proceed to phase 2.
995 #
1016 #
996 # Phase 2 computes successors sets of CURRENT (case 4); see details
1017 # Phase 2 computes successors sets of CURRENT (case 4); see details
997 # in phase 2 itself.
1018 # in phase 2 itself.
998 #
1019 #
999 # Note the two levels of iteration in each phase.
1020 # Note the two levels of iteration in each phase.
1000 # - The first one handles obsolescence markers using CURRENT as
1021 # - The first one handles obsolescence markers using CURRENT as
1001 # precursor (successors markers of CURRENT).
1022 # precursor (successors markers of CURRENT).
1002 #
1023 #
1003 # Having multiple entry here means divergence.
1024 # Having multiple entry here means divergence.
1004 #
1025 #
1005 # - The second one handles successors defined in each marker.
1026 # - The second one handles successors defined in each marker.
1006 #
1027 #
1007 # Having none means pruned node, multiple successors means split,
1028 # Having none means pruned node, multiple successors means split,
1008 # single successors are standard replacement.
1029 # single successors are standard replacement.
1009 #
1030 #
1010 for mark in sorted(succmarkers[current]):
1031 for mark in sorted(succmarkers[current]):
1011 for suc in mark[1]:
1032 for suc in mark[1]:
1012 if suc not in cache:
1033 if suc not in cache:
1013 if suc in stackedset:
1034 if suc in stackedset:
1014 # cycle breaking
1035 # cycle breaking
1015 cache[suc] = []
1036 cache[suc] = []
1016 else:
1037 else:
1017 # case (3) If we have not computed successors sets
1038 # case (3) If we have not computed successors sets
1018 # of one of those successors we add it to the
1039 # of one of those successors we add it to the
1019 # `toproceed` stack and stop all work for this
1040 # `toproceed` stack and stop all work for this
1020 # iteration.
1041 # iteration.
1021 toproceed.append(suc)
1042 toproceed.append(suc)
1022 stackedset.add(suc)
1043 stackedset.add(suc)
1023 break
1044 break
1024 else:
1045 else:
1025 continue
1046 continue
1026 break
1047 break
1027 else:
1048 else:
1028 # case (4): we know all successors sets of all direct
1049 # case (4): we know all successors sets of all direct
1029 # successors
1050 # successors
1030 #
1051 #
1031 # Successors set contributed by each marker depends on the
1052 # Successors set contributed by each marker depends on the
1032 # successors sets of all its "successors" node.
1053 # successors sets of all its "successors" node.
1033 #
1054 #
1034 # Each different marker is a divergence in the obsolescence
1055 # Each different marker is a divergence in the obsolescence
1035 # history. It contributes successors sets distinct from other
1056 # history. It contributes successors sets distinct from other
1036 # markers.
1057 # markers.
1037 #
1058 #
1038 # Within a marker, a successor may have divergent successors
1059 # Within a marker, a successor may have divergent successors
1039 # sets. In such a case, the marker will contribute multiple
1060 # sets. In such a case, the marker will contribute multiple
1040 # divergent successors sets. If multiple successors have
1061 # divergent successors sets. If multiple successors have
1041 # divergent successors sets, a Cartesian product is used.
1062 # divergent successors sets, a Cartesian product is used.
1042 #
1063 #
1043 # At the end we post-process successors sets to remove
1064 # At the end we post-process successors sets to remove
1044 # duplicated entry and successors set that are strict subset of
1065 # duplicated entry and successors set that are strict subset of
1045 # another one.
1066 # another one.
1046 succssets = []
1067 succssets = []
1047 for mark in sorted(succmarkers[current]):
1068 for mark in sorted(succmarkers[current]):
1048 # successors sets contributed by this marker
1069 # successors sets contributed by this marker
1049 markss = [[]]
1070 markss = [[]]
1050 for suc in mark[1]:
1071 for suc in mark[1]:
1051 # cardinal product with previous successors
1072 # cardinal product with previous successors
1052 productresult = []
1073 productresult = []
1053 for prefix in markss:
1074 for prefix in markss:
1054 for suffix in cache[suc]:
1075 for suffix in cache[suc]:
1055 newss = list(prefix)
1076 newss = list(prefix)
1056 for part in suffix:
1077 for part in suffix:
1057 # do not duplicated entry in successors set
1078 # do not duplicated entry in successors set
1058 # first entry wins.
1079 # first entry wins.
1059 if part not in newss:
1080 if part not in newss:
1060 newss.append(part)
1081 newss.append(part)
1061 productresult.append(newss)
1082 productresult.append(newss)
1062 markss = productresult
1083 markss = productresult
1063 succssets.extend(markss)
1084 succssets.extend(markss)
1064 # remove duplicated and subset
1085 # remove duplicated and subset
1065 seen = []
1086 seen = []
1066 final = []
1087 final = []
1067 candidate = sorted(((set(s), s) for s in succssets if s),
1088 candidate = sorted(((set(s), s) for s in succssets if s),
1068 key=lambda x: len(x[1]), reverse=True)
1089 key=lambda x: len(x[1]), reverse=True)
1069 for setversion, listversion in candidate:
1090 for setversion, listversion in candidate:
1070 for seenset in seen:
1091 for seenset in seen:
1071 if setversion.issubset(seenset):
1092 if setversion.issubset(seenset):
1072 break
1093 break
1073 else:
1094 else:
1074 final.append(listversion)
1095 final.append(listversion)
1075 seen.append(setversion)
1096 seen.append(setversion)
1076 final.reverse() # put small successors set first
1097 final.reverse() # put small successors set first
1077 cache[current] = final
1098 cache[current] = final
1078 return cache[initialnode]
1099 return cache[initialnode]
1079
1100
1080 # mapping of 'set-name' -> <function to compute this set>
1101 # mapping of 'set-name' -> <function to compute this set>
1081 cachefuncs = {}
1102 cachefuncs = {}
1082 def cachefor(name):
1103 def cachefor(name):
1083 """Decorator to register a function as computing the cache for a set"""
1104 """Decorator to register a function as computing the cache for a set"""
1084 def decorator(func):
1105 def decorator(func):
1085 assert name not in cachefuncs
1106 assert name not in cachefuncs
1086 cachefuncs[name] = func
1107 cachefuncs[name] = func
1087 return func
1108 return func
1088 return decorator
1109 return decorator
1089
1110
1090 def getrevs(repo, name):
1111 def getrevs(repo, name):
1091 """Return the set of revision that belong to the <name> set
1112 """Return the set of revision that belong to the <name> set
1092
1113
1093 Such access may compute the set and cache it for future use"""
1114 Such access may compute the set and cache it for future use"""
1094 repo = repo.unfiltered()
1115 repo = repo.unfiltered()
1095 if not repo.obsstore:
1116 if not repo.obsstore:
1096 return frozenset()
1117 return frozenset()
1097 if name not in repo.obsstore.caches:
1118 if name not in repo.obsstore.caches:
1098 repo.obsstore.caches[name] = cachefuncs[name](repo)
1119 repo.obsstore.caches[name] = cachefuncs[name](repo)
1099 return repo.obsstore.caches[name]
1120 return repo.obsstore.caches[name]
1100
1121
1101 # To be simple we need to invalidate obsolescence cache when:
1122 # To be simple we need to invalidate obsolescence cache when:
1102 #
1123 #
1103 # - new changeset is added:
1124 # - new changeset is added:
1104 # - public phase is changed
1125 # - public phase is changed
1105 # - obsolescence marker are added
1126 # - obsolescence marker are added
1106 # - strip is used a repo
1127 # - strip is used a repo
1107 def clearobscaches(repo):
1128 def clearobscaches(repo):
1108 """Remove all obsolescence related cache from a repo
1129 """Remove all obsolescence related cache from a repo
1109
1130
1110 This remove all cache in obsstore is the obsstore already exist on the
1131 This remove all cache in obsstore is the obsstore already exist on the
1111 repo.
1132 repo.
1112
1133
1113 (We could be smarter here given the exact event that trigger the cache
1134 (We could be smarter here given the exact event that trigger the cache
1114 clearing)"""
1135 clearing)"""
1115 # only clear cache is there is obsstore data in this repo
1136 # only clear cache is there is obsstore data in this repo
1116 if 'obsstore' in repo._filecache:
1137 if 'obsstore' in repo._filecache:
1117 repo.obsstore.caches.clear()
1138 repo.obsstore.caches.clear()
1118
1139
1119 @cachefor('obsolete')
1140 @cachefor('obsolete')
1120 def _computeobsoleteset(repo):
1141 def _computeobsoleteset(repo):
1121 """the set of obsolete revisions"""
1142 """the set of obsolete revisions"""
1122 obs = set()
1143 obs = set()
1123 getnode = repo.changelog.node
1144 getnode = repo.changelog.node
1124 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1145 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1125 for r in notpublic:
1146 for r in notpublic:
1126 if getnode(r) in repo.obsstore.successors:
1147 if getnode(r) in repo.obsstore.successors:
1127 obs.add(r)
1148 obs.add(r)
1128 return obs
1149 return obs
1129
1150
1130 @cachefor('unstable')
1151 @cachefor('unstable')
1131 def _computeunstableset(repo):
1152 def _computeunstableset(repo):
1132 """the set of non obsolete revisions with obsolete parents"""
1153 """the set of non obsolete revisions with obsolete parents"""
1133 revs = [(ctx.rev(), ctx) for ctx in
1154 revs = [(ctx.rev(), ctx) for ctx in
1134 repo.set('(not public()) and (not obsolete())')]
1155 repo.set('(not public()) and (not obsolete())')]
1135 revs.sort(key=lambda x:x[0])
1156 revs.sort(key=lambda x:x[0])
1136 unstable = set()
1157 unstable = set()
1137 for rev, ctx in revs:
1158 for rev, ctx in revs:
1138 # A rev is unstable if one of its parent is obsolete or unstable
1159 # A rev is unstable if one of its parent is obsolete or unstable
1139 # this works since we traverse following growing rev order
1160 # this works since we traverse following growing rev order
1140 if any((x.obsolete() or (x.rev() in unstable))
1161 if any((x.obsolete() or (x.rev() in unstable))
1141 for x in ctx.parents()):
1162 for x in ctx.parents()):
1142 unstable.add(rev)
1163 unstable.add(rev)
1143 return unstable
1164 return unstable
1144
1165
1145 @cachefor('suspended')
1166 @cachefor('suspended')
1146 def _computesuspendedset(repo):
1167 def _computesuspendedset(repo):
1147 """the set of obsolete parents with non obsolete descendants"""
1168 """the set of obsolete parents with non obsolete descendants"""
1148 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1169 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1149 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1170 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1150
1171
1151 @cachefor('extinct')
1172 @cachefor('extinct')
1152 def _computeextinctset(repo):
1173 def _computeextinctset(repo):
1153 """the set of obsolete parents without non obsolete descendants"""
1174 """the set of obsolete parents without non obsolete descendants"""
1154 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1175 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1155
1176
1156
1177
1157 @cachefor('bumped')
1178 @cachefor('bumped')
1158 def _computebumpedset(repo):
1179 def _computebumpedset(repo):
1159 """the set of revs trying to obsolete public revisions"""
1180 """the set of revs trying to obsolete public revisions"""
1160 bumped = set()
1181 bumped = set()
1161 # util function (avoid attribute lookup in the loop)
1182 # util function (avoid attribute lookup in the loop)
1162 phase = repo._phasecache.phase # would be faster to grab the full list
1183 phase = repo._phasecache.phase # would be faster to grab the full list
1163 public = phases.public
1184 public = phases.public
1164 cl = repo.changelog
1185 cl = repo.changelog
1165 torev = cl.nodemap.get
1186 torev = cl.nodemap.get
1166 for ctx in repo.set('(not public()) and (not obsolete())'):
1187 for ctx in repo.set('(not public()) and (not obsolete())'):
1167 rev = ctx.rev()
1188 rev = ctx.rev()
1168 # We only evaluate mutable, non-obsolete revision
1189 # We only evaluate mutable, non-obsolete revision
1169 node = ctx.node()
1190 node = ctx.node()
1170 # (future) A cache of precursors may worth if split is very common
1191 # (future) A cache of precursors may worth if split is very common
1171 for pnode in allprecursors(repo.obsstore, [node],
1192 for pnode in allprecursors(repo.obsstore, [node],
1172 ignoreflags=bumpedfix):
1193 ignoreflags=bumpedfix):
1173 prev = torev(pnode) # unfiltered! but so is phasecache
1194 prev = torev(pnode) # unfiltered! but so is phasecache
1174 if (prev is not None) and (phase(repo, prev) <= public):
1195 if (prev is not None) and (phase(repo, prev) <= public):
1175 # we have a public precursor
1196 # we have a public precursor
1176 bumped.add(rev)
1197 bumped.add(rev)
1177 break # Next draft!
1198 break # Next draft!
1178 return bumped
1199 return bumped
1179
1200
1180 @cachefor('divergent')
1201 @cachefor('divergent')
1181 def _computedivergentset(repo):
1202 def _computedivergentset(repo):
1182 """the set of rev that compete to be the final successors of some revision.
1203 """the set of rev that compete to be the final successors of some revision.
1183 """
1204 """
1184 divergent = set()
1205 divergent = set()
1185 obsstore = repo.obsstore
1206 obsstore = repo.obsstore
1186 newermap = {}
1207 newermap = {}
1187 for ctx in repo.set('(not public()) - obsolete()'):
1208 for ctx in repo.set('(not public()) - obsolete()'):
1188 mark = obsstore.precursors.get(ctx.node(), ())
1209 mark = obsstore.precursors.get(ctx.node(), ())
1189 toprocess = set(mark)
1210 toprocess = set(mark)
1190 seen = set()
1211 seen = set()
1191 while toprocess:
1212 while toprocess:
1192 prec = toprocess.pop()[0]
1213 prec = toprocess.pop()[0]
1193 if prec in seen:
1214 if prec in seen:
1194 continue # emergency cycle hanging prevention
1215 continue # emergency cycle hanging prevention
1195 seen.add(prec)
1216 seen.add(prec)
1196 if prec not in newermap:
1217 if prec not in newermap:
1197 successorssets(repo, prec, newermap)
1218 successorssets(repo, prec, newermap)
1198 newer = [n for n in newermap[prec] if n]
1219 newer = [n for n in newermap[prec] if n]
1199 if len(newer) > 1:
1220 if len(newer) > 1:
1200 divergent.add(ctx.rev())
1221 divergent.add(ctx.rev())
1201 break
1222 break
1202 toprocess.update(obsstore.precursors.get(prec, ()))
1223 toprocess.update(obsstore.precursors.get(prec, ()))
1203 return divergent
1224 return divergent
1204
1225
1205
1226
1206 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1227 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1207 operation=None):
1228 operation=None):
1208 """Add obsolete markers between changesets in a repo
1229 """Add obsolete markers between changesets in a repo
1209
1230
1210 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1231 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1211 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1232 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1212 containing metadata for this marker only. It is merged with the global
1233 containing metadata for this marker only. It is merged with the global
1213 metadata specified through the `metadata` argument of this function,
1234 metadata specified through the `metadata` argument of this function,
1214
1235
1215 Trying to obsolete a public changeset will raise an exception.
1236 Trying to obsolete a public changeset will raise an exception.
1216
1237
1217 Current user and date are used except if specified otherwise in the
1238 Current user and date are used except if specified otherwise in the
1218 metadata attribute.
1239 metadata attribute.
1219
1240
1220 This function operates within a transaction of its own, but does
1241 This function operates within a transaction of its own, but does
1221 not take any lock on the repo.
1242 not take any lock on the repo.
1222 """
1243 """
1223 # prepare metadata
1244 # prepare metadata
1224 if metadata is None:
1245 if metadata is None:
1225 metadata = {}
1246 metadata = {}
1226 if 'user' not in metadata:
1247 if 'user' not in metadata:
1227 metadata['user'] = repo.ui.username()
1248 metadata['user'] = repo.ui.username()
1228 if operation:
1249 if operation:
1229 metadata['operation'] = operation
1250 metadata['operation'] = operation
1230 tr = repo.transaction('add-obsolescence-marker')
1251 tr = repo.transaction('add-obsolescence-marker')
1231 try:
1252 try:
1232 markerargs = []
1253 markerargs = []
1233 for rel in relations:
1254 for rel in relations:
1234 prec = rel[0]
1255 prec = rel[0]
1235 sucs = rel[1]
1256 sucs = rel[1]
1236 localmetadata = metadata.copy()
1257 localmetadata = metadata.copy()
1237 if 2 < len(rel):
1258 if 2 < len(rel):
1238 localmetadata.update(rel[2])
1259 localmetadata.update(rel[2])
1239
1260
1240 if not prec.mutable():
1261 if not prec.mutable():
1241 raise error.Abort(_("cannot obsolete public changeset: %s")
1262 raise error.Abort(_("cannot obsolete public changeset: %s")
1242 % prec,
1263 % prec,
1243 hint="see 'hg help phases' for details")
1264 hint="see 'hg help phases' for details")
1244 nprec = prec.node()
1265 nprec = prec.node()
1245 nsucs = tuple(s.node() for s in sucs)
1266 nsucs = tuple(s.node() for s in sucs)
1246 npare = None
1267 npare = None
1247 if not nsucs:
1268 if not nsucs:
1248 npare = tuple(p.node() for p in prec.parents())
1269 npare = tuple(p.node() for p in prec.parents())
1249 if nprec in nsucs:
1270 if nprec in nsucs:
1250 raise error.Abort(_("changeset %s cannot obsolete itself")
1271 raise error.Abort(_("changeset %s cannot obsolete itself")
1251 % prec)
1272 % prec)
1252
1273
1253 # Creating the marker causes the hidden cache to become invalid,
1274 # Creating the marker causes the hidden cache to become invalid,
1254 # which causes recomputation when we ask for prec.parents() above.
1275 # which causes recomputation when we ask for prec.parents() above.
1255 # Resulting in n^2 behavior. So let's prepare all of the args
1276 # Resulting in n^2 behavior. So let's prepare all of the args
1256 # first, then create the markers.
1277 # first, then create the markers.
1257 markerargs.append((nprec, nsucs, npare, localmetadata))
1278 markerargs.append((nprec, nsucs, npare, localmetadata))
1258
1279
1259 for args in markerargs:
1280 for args in markerargs:
1260 nprec, nsucs, npare, localmetadata = args
1281 nprec, nsucs, npare, localmetadata = args
1261 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1282 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1262 date=date, metadata=localmetadata)
1283 date=date, metadata=localmetadata)
1263 repo.filteredrevcache.clear()
1284 repo.filteredrevcache.clear()
1264 tr.close()
1285 tr.close()
1265 finally:
1286 finally:
1266 tr.release()
1287 tr.release()
1267
1268 def isenabled(repo, option):
1269 """Returns True if the given repository has the given obsolete option
1270 enabled.
1271 """
1272 result = set(repo.ui.configlist('experimental', 'evolution'))
1273 if 'all' in result:
1274 return True
1275
1276 # For migration purposes, temporarily return true if the config hasn't been
1277 # set but _enabled is true.
1278 if len(result) == 0 and _enabled:
1279 return True
1280
1281 # createmarkers must be enabled if other options are enabled
1282 if ((allowunstableopt in result or exchangeopt in result) and
1283 not createmarkersopt in result):
1284 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
1285 "if other obsolete options are enabled"))
1286
1287 return option in result
General Comments 0
You need to be logged in to leave comments. Login now