##// END OF EJS Templates
obsolete: replace references to 'sopener' with 'svfs'
Siddharth Agarwal -
r25669:8d948618 default
parent child Browse files
Show More
@@ -1,1253 +1,1253 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 import struct
70 import struct
71 import util, base85, node, parsers
71 import util, base85, node, parsers
72 import phases
72 import phases
73 from i18n import _
73 from i18n import _
74
74
75 _pack = struct.pack
75 _pack = struct.pack
76 _unpack = struct.unpack
76 _unpack = struct.unpack
77 _calcsize = struct.calcsize
77 _calcsize = struct.calcsize
78 propertycache = util.propertycache
78 propertycache = util.propertycache
79
79
80 # the obsolete feature is not mature enough to be enabled by default.
80 # the obsolete feature is not mature enough to be enabled by default.
81 # you have to rely on third party extension extension to enable this.
81 # you have to rely on third party extension extension to enable this.
82 _enabled = False
82 _enabled = False
83
83
84 # Options for obsolescence
84 # Options for obsolescence
85 createmarkersopt = 'createmarkers'
85 createmarkersopt = 'createmarkers'
86 allowunstableopt = 'allowunstable'
86 allowunstableopt = 'allowunstable'
87 exchangeopt = 'exchange'
87 exchangeopt = 'exchange'
88
88
89 ### obsolescence marker flag
89 ### obsolescence marker flag
90
90
91 ## bumpedfix flag
91 ## bumpedfix flag
92 #
92 #
93 # When a changeset A' succeed to a changeset A which became public, we call A'
93 # When a changeset A' succeed to a changeset A which became public, we call A'
94 # "bumped" because it's a successors of a public changesets
94 # "bumped" because it's a successors of a public changesets
95 #
95 #
96 # o A' (bumped)
96 # o A' (bumped)
97 # |`:
97 # |`:
98 # | o A
98 # | o A
99 # |/
99 # |/
100 # o Z
100 # o Z
101 #
101 #
102 # The way to solve this situation is to create a new changeset Ad as children
102 # The way to solve this situation is to create a new changeset Ad as children
103 # of A. This changeset have the same content than A'. So the diff from A to A'
103 # of A. This changeset have the same content than A'. So the diff from A to A'
104 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
104 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 #
105 #
106 # o Ad
106 # o Ad
107 # |`:
107 # |`:
108 # | x A'
108 # | x A'
109 # |'|
109 # |'|
110 # o | A
110 # o | A
111 # |/
111 # |/
112 # o Z
112 # o Z
113 #
113 #
114 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
114 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
115 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 # This flag mean that the successors express the changes between the public and
116 # This flag mean that the successors express the changes between the public and
117 # bumped version and fix the situation, breaking the transitivity of
117 # bumped version and fix the situation, breaking the transitivity of
118 # "bumped" here.
118 # "bumped" here.
119 bumpedfix = 1
119 bumpedfix = 1
120 usingsha256 = 2
120 usingsha256 = 2
121
121
122 ## Parsing and writing of version "0"
122 ## Parsing and writing of version "0"
123 #
123 #
124 # The header is followed by the markers. Each marker is made of:
124 # The header is followed by the markers. Each marker is made of:
125 #
125 #
126 # - 1 uint8 : number of new changesets "N", can be zero.
126 # - 1 uint8 : number of new changesets "N", can be zero.
127 #
127 #
128 # - 1 uint32: metadata size "M" in bytes.
128 # - 1 uint32: metadata size "M" in bytes.
129 #
129 #
130 # - 1 byte: a bit field. It is reserved for flags used in common
130 # - 1 byte: a bit field. It is reserved for flags used in common
131 # obsolete marker operations, to avoid repeated decoding of metadata
131 # obsolete marker operations, to avoid repeated decoding of metadata
132 # entries.
132 # entries.
133 #
133 #
134 # - 20 bytes: obsoleted changeset identifier.
134 # - 20 bytes: obsoleted changeset identifier.
135 #
135 #
136 # - N*20 bytes: new changesets identifiers.
136 # - N*20 bytes: new changesets identifiers.
137 #
137 #
138 # - M bytes: metadata as a sequence of nul-terminated strings. Each
138 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 # string contains a key and a value, separated by a colon ':', without
139 # string contains a key and a value, separated by a colon ':', without
140 # additional encoding. Keys cannot contain '\0' or ':' and values
140 # additional encoding. Keys cannot contain '\0' or ':' and values
141 # cannot contain '\0'.
141 # cannot contain '\0'.
142 _fm0version = 0
142 _fm0version = 0
143 _fm0fixed = '>BIB20s'
143 _fm0fixed = '>BIB20s'
144 _fm0node = '20s'
144 _fm0node = '20s'
145 _fm0fsize = _calcsize(_fm0fixed)
145 _fm0fsize = _calcsize(_fm0fixed)
146 _fm0fnodesize = _calcsize(_fm0node)
146 _fm0fnodesize = _calcsize(_fm0node)
147
147
148 def _fm0readmarkers(data, off):
148 def _fm0readmarkers(data, off):
149 # Loop on markers
149 # Loop on markers
150 l = len(data)
150 l = len(data)
151 while off + _fm0fsize <= l:
151 while off + _fm0fsize <= l:
152 # read fixed part
152 # read fixed part
153 cur = data[off:off + _fm0fsize]
153 cur = data[off:off + _fm0fsize]
154 off += _fm0fsize
154 off += _fm0fsize
155 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
155 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 # read replacement
156 # read replacement
157 sucs = ()
157 sucs = ()
158 if numsuc:
158 if numsuc:
159 s = (_fm0fnodesize * numsuc)
159 s = (_fm0fnodesize * numsuc)
160 cur = data[off:off + s]
160 cur = data[off:off + s]
161 sucs = _unpack(_fm0node * numsuc, cur)
161 sucs = _unpack(_fm0node * numsuc, cur)
162 off += s
162 off += s
163 # read metadata
163 # read metadata
164 # (metadata will be decoded on demand)
164 # (metadata will be decoded on demand)
165 metadata = data[off:off + mdsize]
165 metadata = data[off:off + mdsize]
166 if len(metadata) != mdsize:
166 if len(metadata) != mdsize:
167 raise util.Abort(_('parsing obsolete marker: metadata is too '
167 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 'short, %d bytes expected, got %d')
168 'short, %d bytes expected, got %d')
169 % (mdsize, len(metadata)))
169 % (mdsize, len(metadata)))
170 off += mdsize
170 off += mdsize
171 metadata = _fm0decodemeta(metadata)
171 metadata = _fm0decodemeta(metadata)
172 try:
172 try:
173 when, offset = metadata.pop('date', '0 0').split(' ')
173 when, offset = metadata.pop('date', '0 0').split(' ')
174 date = float(when), int(offset)
174 date = float(when), int(offset)
175 except ValueError:
175 except ValueError:
176 date = (0., 0)
176 date = (0., 0)
177 parents = None
177 parents = None
178 if 'p2' in metadata:
178 if 'p2' in metadata:
179 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
179 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 elif 'p1' in metadata:
180 elif 'p1' in metadata:
181 parents = (metadata.pop('p1', None),)
181 parents = (metadata.pop('p1', None),)
182 elif 'p0' in metadata:
182 elif 'p0' in metadata:
183 parents = ()
183 parents = ()
184 if parents is not None:
184 if parents is not None:
185 try:
185 try:
186 parents = tuple(node.bin(p) for p in parents)
186 parents = tuple(node.bin(p) for p in parents)
187 # if parent content is not a nodeid, drop the data
187 # if parent content is not a nodeid, drop the data
188 for p in parents:
188 for p in parents:
189 if len(p) != 20:
189 if len(p) != 20:
190 parents = None
190 parents = None
191 break
191 break
192 except TypeError:
192 except TypeError:
193 # if content cannot be translated to nodeid drop the data.
193 # if content cannot be translated to nodeid drop the data.
194 parents = None
194 parents = None
195
195
196 metadata = tuple(sorted(metadata.iteritems()))
196 metadata = tuple(sorted(metadata.iteritems()))
197
197
198 yield (pre, sucs, flags, metadata, date, parents)
198 yield (pre, sucs, flags, metadata, date, parents)
199
199
200 def _fm0encodeonemarker(marker):
200 def _fm0encodeonemarker(marker):
201 pre, sucs, flags, metadata, date, parents = marker
201 pre, sucs, flags, metadata, date, parents = marker
202 if flags & usingsha256:
202 if flags & usingsha256:
203 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
203 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
204 metadata = dict(metadata)
204 metadata = dict(metadata)
205 time, tz = date
205 time, tz = date
206 metadata['date'] = '%r %i' % (time, tz)
206 metadata['date'] = '%r %i' % (time, tz)
207 if parents is not None:
207 if parents is not None:
208 if not parents:
208 if not parents:
209 # mark that we explicitly recorded no parents
209 # mark that we explicitly recorded no parents
210 metadata['p0'] = ''
210 metadata['p0'] = ''
211 for i, p in enumerate(parents):
211 for i, p in enumerate(parents):
212 metadata['p%i' % (i + 1)] = node.hex(p)
212 metadata['p%i' % (i + 1)] = node.hex(p)
213 metadata = _fm0encodemeta(metadata)
213 metadata = _fm0encodemeta(metadata)
214 numsuc = len(sucs)
214 numsuc = len(sucs)
215 format = _fm0fixed + (_fm0node * numsuc)
215 format = _fm0fixed + (_fm0node * numsuc)
216 data = [numsuc, len(metadata), flags, pre]
216 data = [numsuc, len(metadata), flags, pre]
217 data.extend(sucs)
217 data.extend(sucs)
218 return _pack(format, *data) + metadata
218 return _pack(format, *data) + metadata
219
219
220 def _fm0encodemeta(meta):
220 def _fm0encodemeta(meta):
221 """Return encoded metadata string to string mapping.
221 """Return encoded metadata string to string mapping.
222
222
223 Assume no ':' in key and no '\0' in both key and value."""
223 Assume no ':' in key and no '\0' in both key and value."""
224 for key, value in meta.iteritems():
224 for key, value in meta.iteritems():
225 if ':' in key or '\0' in key:
225 if ':' in key or '\0' in key:
226 raise ValueError("':' and '\0' are forbidden in metadata key'")
226 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 if '\0' in value:
227 if '\0' in value:
228 raise ValueError("':' is forbidden in metadata value'")
228 raise ValueError("':' is forbidden in metadata value'")
229 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
229 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230
230
231 def _fm0decodemeta(data):
231 def _fm0decodemeta(data):
232 """Return string to string dictionary from encoded version."""
232 """Return string to string dictionary from encoded version."""
233 d = {}
233 d = {}
234 for l in data.split('\0'):
234 for l in data.split('\0'):
235 if l:
235 if l:
236 key, value = l.split(':')
236 key, value = l.split(':')
237 d[key] = value
237 d[key] = value
238 return d
238 return d
239
239
240 ## Parsing and writing of version "1"
240 ## Parsing and writing of version "1"
241 #
241 #
242 # The header is followed by the markers. Each marker is made of:
242 # The header is followed by the markers. Each marker is made of:
243 #
243 #
244 # - uint32: total size of the marker (including this field)
244 # - uint32: total size of the marker (including this field)
245 #
245 #
246 # - float64: date in seconds since epoch
246 # - float64: date in seconds since epoch
247 #
247 #
248 # - int16: timezone offset in minutes
248 # - int16: timezone offset in minutes
249 #
249 #
250 # - uint16: a bit field. It is reserved for flags used in common
250 # - uint16: a bit field. It is reserved for flags used in common
251 # obsolete marker operations, to avoid repeated decoding of metadata
251 # obsolete marker operations, to avoid repeated decoding of metadata
252 # entries.
252 # entries.
253 #
253 #
254 # - uint8: number of successors "N", can be zero.
254 # - uint8: number of successors "N", can be zero.
255 #
255 #
256 # - uint8: number of parents "P", can be zero.
256 # - uint8: number of parents "P", can be zero.
257 #
257 #
258 # 0: parents data stored but no parent,
258 # 0: parents data stored but no parent,
259 # 1: one parent stored,
259 # 1: one parent stored,
260 # 2: two parents stored,
260 # 2: two parents stored,
261 # 3: no parent data stored
261 # 3: no parent data stored
262 #
262 #
263 # - uint8: number of metadata entries M
263 # - uint8: number of metadata entries M
264 #
264 #
265 # - 20 or 32 bytes: precursor changeset identifier.
265 # - 20 or 32 bytes: precursor changeset identifier.
266 #
266 #
267 # - N*(20 or 32) bytes: successors changesets identifiers.
267 # - N*(20 or 32) bytes: successors changesets identifiers.
268 #
268 #
269 # - P*(20 or 32) bytes: parents of the precursors changesets.
269 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 #
270 #
271 # - M*(uint8, uint8): size of all metadata entries (key and value)
271 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 #
272 #
273 # - remaining bytes: the metadata, each (key, value) pair after the other.
273 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 _fm1version = 1
274 _fm1version = 1
275 _fm1fixed = '>IdhHBBB20s'
275 _fm1fixed = '>IdhHBBB20s'
276 _fm1nodesha1 = '20s'
276 _fm1nodesha1 = '20s'
277 _fm1nodesha256 = '32s'
277 _fm1nodesha256 = '32s'
278 _fm1nodesha1size = _calcsize(_fm1nodesha1)
278 _fm1nodesha1size = _calcsize(_fm1nodesha1)
279 _fm1nodesha256size = _calcsize(_fm1nodesha256)
279 _fm1nodesha256size = _calcsize(_fm1nodesha256)
280 _fm1fsize = _calcsize(_fm1fixed)
280 _fm1fsize = _calcsize(_fm1fixed)
281 _fm1parentnone = 3
281 _fm1parentnone = 3
282 _fm1parentshift = 14
282 _fm1parentshift = 14
283 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
283 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
284 _fm1metapair = 'BB'
284 _fm1metapair = 'BB'
285 _fm1metapairsize = _calcsize('BB')
285 _fm1metapairsize = _calcsize('BB')
286
286
287 def _fm1purereadmarkers(data, off):
287 def _fm1purereadmarkers(data, off):
288 # make some global constants local for performance
288 # make some global constants local for performance
289 noneflag = _fm1parentnone
289 noneflag = _fm1parentnone
290 sha2flag = usingsha256
290 sha2flag = usingsha256
291 sha1size = _fm1nodesha1size
291 sha1size = _fm1nodesha1size
292 sha2size = _fm1nodesha256size
292 sha2size = _fm1nodesha256size
293 sha1fmt = _fm1nodesha1
293 sha1fmt = _fm1nodesha1
294 sha2fmt = _fm1nodesha256
294 sha2fmt = _fm1nodesha256
295 metasize = _fm1metapairsize
295 metasize = _fm1metapairsize
296 metafmt = _fm1metapair
296 metafmt = _fm1metapair
297 fsize = _fm1fsize
297 fsize = _fm1fsize
298 unpack = _unpack
298 unpack = _unpack
299
299
300 # Loop on markers
300 # Loop on markers
301 stop = len(data) - _fm1fsize
301 stop = len(data) - _fm1fsize
302 ufixed = struct.Struct(_fm1fixed).unpack
302 ufixed = struct.Struct(_fm1fixed).unpack
303
303
304 while off <= stop:
304 while off <= stop:
305 # read fixed part
305 # read fixed part
306 o1 = off + fsize
306 o1 = off + fsize
307 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
307 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
308
308
309 if flags & sha2flag:
309 if flags & sha2flag:
310 # FIXME: prec was read as a SHA1, needs to be amended
310 # FIXME: prec was read as a SHA1, needs to be amended
311
311
312 # read 0 or more successors
312 # read 0 or more successors
313 if numsuc == 1:
313 if numsuc == 1:
314 o2 = o1 + sha2size
314 o2 = o1 + sha2size
315 sucs = (data[o1:o2],)
315 sucs = (data[o1:o2],)
316 else:
316 else:
317 o2 = o1 + sha2size * numsuc
317 o2 = o1 + sha2size * numsuc
318 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
318 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
319
319
320 # read parents
320 # read parents
321 if numpar == noneflag:
321 if numpar == noneflag:
322 o3 = o2
322 o3 = o2
323 parents = None
323 parents = None
324 elif numpar == 1:
324 elif numpar == 1:
325 o3 = o2 + sha2size
325 o3 = o2 + sha2size
326 parents = (data[o2:o3],)
326 parents = (data[o2:o3],)
327 else:
327 else:
328 o3 = o2 + sha2size * numpar
328 o3 = o2 + sha2size * numpar
329 parents = unpack(sha2fmt * numpar, data[o2:o3])
329 parents = unpack(sha2fmt * numpar, data[o2:o3])
330 else:
330 else:
331 # read 0 or more successors
331 # read 0 or more successors
332 if numsuc == 1:
332 if numsuc == 1:
333 o2 = o1 + sha1size
333 o2 = o1 + sha1size
334 sucs = (data[o1:o2],)
334 sucs = (data[o1:o2],)
335 else:
335 else:
336 o2 = o1 + sha1size * numsuc
336 o2 = o1 + sha1size * numsuc
337 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
337 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
338
338
339 # read parents
339 # read parents
340 if numpar == noneflag:
340 if numpar == noneflag:
341 o3 = o2
341 o3 = o2
342 parents = None
342 parents = None
343 elif numpar == 1:
343 elif numpar == 1:
344 o3 = o2 + sha1size
344 o3 = o2 + sha1size
345 parents = (data[o2:o3],)
345 parents = (data[o2:o3],)
346 else:
346 else:
347 o3 = o2 + sha1size * numpar
347 o3 = o2 + sha1size * numpar
348 parents = unpack(sha1fmt * numpar, data[o2:o3])
348 parents = unpack(sha1fmt * numpar, data[o2:o3])
349
349
350 # read metadata
350 # read metadata
351 off = o3 + metasize * nummeta
351 off = o3 + metasize * nummeta
352 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
352 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
353 metadata = []
353 metadata = []
354 for idx in xrange(0, len(metapairsize), 2):
354 for idx in xrange(0, len(metapairsize), 2):
355 o1 = off + metapairsize[idx]
355 o1 = off + metapairsize[idx]
356 o2 = o1 + metapairsize[idx + 1]
356 o2 = o1 + metapairsize[idx + 1]
357 metadata.append((data[off:o1], data[o1:o2]))
357 metadata.append((data[off:o1], data[o1:o2]))
358 off = o2
358 off = o2
359
359
360 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
360 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
361
361
362 def _fm1encodeonemarker(marker):
362 def _fm1encodeonemarker(marker):
363 pre, sucs, flags, metadata, date, parents = marker
363 pre, sucs, flags, metadata, date, parents = marker
364 # determine node size
364 # determine node size
365 _fm1node = _fm1nodesha1
365 _fm1node = _fm1nodesha1
366 if flags & usingsha256:
366 if flags & usingsha256:
367 _fm1node = _fm1nodesha256
367 _fm1node = _fm1nodesha256
368 numsuc = len(sucs)
368 numsuc = len(sucs)
369 numextranodes = numsuc
369 numextranodes = numsuc
370 if parents is None:
370 if parents is None:
371 numpar = _fm1parentnone
371 numpar = _fm1parentnone
372 else:
372 else:
373 numpar = len(parents)
373 numpar = len(parents)
374 numextranodes += numpar
374 numextranodes += numpar
375 formatnodes = _fm1node * numextranodes
375 formatnodes = _fm1node * numextranodes
376 formatmeta = _fm1metapair * len(metadata)
376 formatmeta = _fm1metapair * len(metadata)
377 format = _fm1fixed + formatnodes + formatmeta
377 format = _fm1fixed + formatnodes + formatmeta
378 # tz is stored in minutes so we divide by 60
378 # tz is stored in minutes so we divide by 60
379 tz = date[1]//60
379 tz = date[1]//60
380 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
380 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
381 data.extend(sucs)
381 data.extend(sucs)
382 if parents is not None:
382 if parents is not None:
383 data.extend(parents)
383 data.extend(parents)
384 totalsize = _calcsize(format)
384 totalsize = _calcsize(format)
385 for key, value in metadata:
385 for key, value in metadata:
386 lk = len(key)
386 lk = len(key)
387 lv = len(value)
387 lv = len(value)
388 data.append(lk)
388 data.append(lk)
389 data.append(lv)
389 data.append(lv)
390 totalsize += lk + lv
390 totalsize += lk + lv
391 data[0] = totalsize
391 data[0] = totalsize
392 data = [_pack(format, *data)]
392 data = [_pack(format, *data)]
393 for key, value in metadata:
393 for key, value in metadata:
394 data.append(key)
394 data.append(key)
395 data.append(value)
395 data.append(value)
396 return ''.join(data)
396 return ''.join(data)
397
397
398 def _fm1readmarkers(data, off):
398 def _fm1readmarkers(data, off):
399 native = getattr(parsers, 'fm1readmarkers', None)
399 native = getattr(parsers, 'fm1readmarkers', None)
400 if not native:
400 if not native:
401 return _fm1purereadmarkers(data, off)
401 return _fm1purereadmarkers(data, off)
402 stop = len(data) - _fm1fsize
402 stop = len(data) - _fm1fsize
403 return native(data, off, stop)
403 return native(data, off, stop)
404
404
405 # mapping to read/write various marker formats
405 # mapping to read/write various marker formats
406 # <version> -> (decoder, encoder)
406 # <version> -> (decoder, encoder)
407 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
407 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
408 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
408 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
409
409
410 @util.nogc
410 @util.nogc
411 def _readmarkers(data):
411 def _readmarkers(data):
412 """Read and enumerate markers from raw data"""
412 """Read and enumerate markers from raw data"""
413 off = 0
413 off = 0
414 diskversion = _unpack('>B', data[off:off + 1])[0]
414 diskversion = _unpack('>B', data[off:off + 1])[0]
415 off += 1
415 off += 1
416 if diskversion not in formats:
416 if diskversion not in formats:
417 raise util.Abort(_('parsing obsolete marker: unknown version %r')
417 raise util.Abort(_('parsing obsolete marker: unknown version %r')
418 % diskversion)
418 % diskversion)
419 return diskversion, formats[diskversion][0](data, off)
419 return diskversion, formats[diskversion][0](data, off)
420
420
421 def encodemarkers(markers, addheader=False, version=_fm0version):
421 def encodemarkers(markers, addheader=False, version=_fm0version):
422 # Kept separate from flushmarkers(), it will be reused for
422 # Kept separate from flushmarkers(), it will be reused for
423 # markers exchange.
423 # markers exchange.
424 encodeone = formats[version][1]
424 encodeone = formats[version][1]
425 if addheader:
425 if addheader:
426 yield _pack('>B', version)
426 yield _pack('>B', version)
427 for marker in markers:
427 for marker in markers:
428 yield encodeone(marker)
428 yield encodeone(marker)
429
429
430
430
431 class marker(object):
431 class marker(object):
432 """Wrap obsolete marker raw data"""
432 """Wrap obsolete marker raw data"""
433
433
434 def __init__(self, repo, data):
434 def __init__(self, repo, data):
435 # the repo argument will be used to create changectx in later version
435 # the repo argument will be used to create changectx in later version
436 self._repo = repo
436 self._repo = repo
437 self._data = data
437 self._data = data
438 self._decodedmeta = None
438 self._decodedmeta = None
439
439
440 def __hash__(self):
440 def __hash__(self):
441 return hash(self._data)
441 return hash(self._data)
442
442
443 def __eq__(self, other):
443 def __eq__(self, other):
444 if type(other) != type(self):
444 if type(other) != type(self):
445 return False
445 return False
446 return self._data == other._data
446 return self._data == other._data
447
447
448 def precnode(self):
448 def precnode(self):
449 """Precursor changeset node identifier"""
449 """Precursor changeset node identifier"""
450 return self._data[0]
450 return self._data[0]
451
451
452 def succnodes(self):
452 def succnodes(self):
453 """List of successor changesets node identifiers"""
453 """List of successor changesets node identifiers"""
454 return self._data[1]
454 return self._data[1]
455
455
456 def parentnodes(self):
456 def parentnodes(self):
457 """Parents of the precursors (None if not recorded)"""
457 """Parents of the precursors (None if not recorded)"""
458 return self._data[5]
458 return self._data[5]
459
459
460 def metadata(self):
460 def metadata(self):
461 """Decoded metadata dictionary"""
461 """Decoded metadata dictionary"""
462 return dict(self._data[3])
462 return dict(self._data[3])
463
463
464 def date(self):
464 def date(self):
465 """Creation date as (unixtime, offset)"""
465 """Creation date as (unixtime, offset)"""
466 return self._data[4]
466 return self._data[4]
467
467
468 def flags(self):
468 def flags(self):
469 """The flags field of the marker"""
469 """The flags field of the marker"""
470 return self._data[2]
470 return self._data[2]
471
471
472 @util.nogc
472 @util.nogc
473 def _addsuccessors(successors, markers):
473 def _addsuccessors(successors, markers):
474 for mark in markers:
474 for mark in markers:
475 successors.setdefault(mark[0], set()).add(mark)
475 successors.setdefault(mark[0], set()).add(mark)
476
476
477 @util.nogc
477 @util.nogc
478 def _addprecursors(precursors, markers):
478 def _addprecursors(precursors, markers):
479 for mark in markers:
479 for mark in markers:
480 for suc in mark[1]:
480 for suc in mark[1]:
481 precursors.setdefault(suc, set()).add(mark)
481 precursors.setdefault(suc, set()).add(mark)
482
482
483 @util.nogc
483 @util.nogc
484 def _addchildren(children, markers):
484 def _addchildren(children, markers):
485 for mark in markers:
485 for mark in markers:
486 parents = mark[5]
486 parents = mark[5]
487 if parents is not None:
487 if parents is not None:
488 for p in parents:
488 for p in parents:
489 children.setdefault(p, set()).add(mark)
489 children.setdefault(p, set()).add(mark)
490
490
491 def _checkinvalidmarkers(markers):
491 def _checkinvalidmarkers(markers):
492 """search for marker with invalid data and raise error if needed
492 """search for marker with invalid data and raise error if needed
493
493
494 Exist as a separated function to allow the evolve extension for a more
494 Exist as a separated function to allow the evolve extension for a more
495 subtle handling.
495 subtle handling.
496 """
496 """
497 for mark in markers:
497 for mark in markers:
498 if node.nullid in mark[1]:
498 if node.nullid in mark[1]:
499 raise util.Abort(_('bad obsolescence marker detected: '
499 raise util.Abort(_('bad obsolescence marker detected: '
500 'invalid successors nullid'))
500 'invalid successors nullid'))
501
501
502 class obsstore(object):
502 class obsstore(object):
503 """Store obsolete markers
503 """Store obsolete markers
504
504
505 Markers can be accessed with two mappings:
505 Markers can be accessed with two mappings:
506 - precursors[x] -> set(markers on precursors edges of x)
506 - precursors[x] -> set(markers on precursors edges of x)
507 - successors[x] -> set(markers on successors edges of x)
507 - successors[x] -> set(markers on successors edges of x)
508 - children[x] -> set(markers on precursors edges of children(x)
508 - children[x] -> set(markers on precursors edges of children(x)
509 """
509 """
510
510
511 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
511 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
512 # prec: nodeid, precursor changesets
512 # prec: nodeid, precursor changesets
513 # succs: tuple of nodeid, successor changesets (0-N length)
513 # succs: tuple of nodeid, successor changesets (0-N length)
514 # flag: integer, flag field carrying modifier for the markers (see doc)
514 # flag: integer, flag field carrying modifier for the markers (see doc)
515 # meta: binary blob, encoded metadata dictionary
515 # meta: binary blob, encoded metadata dictionary
516 # date: (float, int) tuple, date of marker creation
516 # date: (float, int) tuple, date of marker creation
517 # parents: (tuple of nodeid) or None, parents of precursors
517 # parents: (tuple of nodeid) or None, parents of precursors
518 # None is used when no data has been recorded
518 # None is used when no data has been recorded
519
519
520 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
520 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
521 # caches for various obsolescence related cache
521 # caches for various obsolescence related cache
522 self.caches = {}
522 self.caches = {}
523 self._all = []
523 self._all = []
524 self.sopener = sopener
524 self.svfs = svfs
525 data = sopener.tryread('obsstore')
525 data = svfs.tryread('obsstore')
526 self._version = defaultformat
526 self._version = defaultformat
527 self._readonly = readonly
527 self._readonly = readonly
528 if data:
528 if data:
529 self._version, markers = _readmarkers(data)
529 self._version, markers = _readmarkers(data)
530 self._addmarkers(markers)
530 self._addmarkers(markers)
531
531
532 def __iter__(self):
532 def __iter__(self):
533 return iter(self._all)
533 return iter(self._all)
534
534
535 def __len__(self):
535 def __len__(self):
536 return len(self._all)
536 return len(self._all)
537
537
538 def __nonzero__(self):
538 def __nonzero__(self):
539 return bool(self._all)
539 return bool(self._all)
540
540
541 def create(self, transaction, prec, succs=(), flag=0, parents=None,
541 def create(self, transaction, prec, succs=(), flag=0, parents=None,
542 date=None, metadata=None):
542 date=None, metadata=None):
543 """obsolete: add a new obsolete marker
543 """obsolete: add a new obsolete marker
544
544
545 * ensuring it is hashable
545 * ensuring it is hashable
546 * check mandatory metadata
546 * check mandatory metadata
547 * encode metadata
547 * encode metadata
548
548
549 If you are a human writing code creating marker you want to use the
549 If you are a human writing code creating marker you want to use the
550 `createmarkers` function in this module instead.
550 `createmarkers` function in this module instead.
551
551
552 return True if a new marker have been added, False if the markers
552 return True if a new marker have been added, False if the markers
553 already existed (no op).
553 already existed (no op).
554 """
554 """
555 if metadata is None:
555 if metadata is None:
556 metadata = {}
556 metadata = {}
557 if date is None:
557 if date is None:
558 if 'date' in metadata:
558 if 'date' in metadata:
559 # as a courtesy for out-of-tree extensions
559 # as a courtesy for out-of-tree extensions
560 date = util.parsedate(metadata.pop('date'))
560 date = util.parsedate(metadata.pop('date'))
561 else:
561 else:
562 date = util.makedate()
562 date = util.makedate()
563 if len(prec) != 20:
563 if len(prec) != 20:
564 raise ValueError(prec)
564 raise ValueError(prec)
565 for succ in succs:
565 for succ in succs:
566 if len(succ) != 20:
566 if len(succ) != 20:
567 raise ValueError(succ)
567 raise ValueError(succ)
568 if prec in succs:
568 if prec in succs:
569 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
569 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
570
570
571 metadata = tuple(sorted(metadata.iteritems()))
571 metadata = tuple(sorted(metadata.iteritems()))
572
572
573 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
573 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
574 return bool(self.add(transaction, [marker]))
574 return bool(self.add(transaction, [marker]))
575
575
576 def add(self, transaction, markers):
576 def add(self, transaction, markers):
577 """Add new markers to the store
577 """Add new markers to the store
578
578
579 Take care of filtering duplicate.
579 Take care of filtering duplicate.
580 Return the number of new marker."""
580 Return the number of new marker."""
581 if self._readonly:
581 if self._readonly:
582 raise util.Abort('creating obsolete markers is not enabled on this '
582 raise util.Abort('creating obsolete markers is not enabled on this '
583 'repo')
583 'repo')
584 known = set(self._all)
584 known = set(self._all)
585 new = []
585 new = []
586 for m in markers:
586 for m in markers:
587 if m not in known:
587 if m not in known:
588 known.add(m)
588 known.add(m)
589 new.append(m)
589 new.append(m)
590 if new:
590 if new:
591 f = self.sopener('obsstore', 'ab')
591 f = self.svfs('obsstore', 'ab')
592 try:
592 try:
593 offset = f.tell()
593 offset = f.tell()
594 transaction.add('obsstore', offset)
594 transaction.add('obsstore', offset)
595 # offset == 0: new file - add the version header
595 # offset == 0: new file - add the version header
596 for bytes in encodemarkers(new, offset == 0, self._version):
596 for bytes in encodemarkers(new, offset == 0, self._version):
597 f.write(bytes)
597 f.write(bytes)
598 finally:
598 finally:
599 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
599 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
600 # call 'filecacheentry.refresh()' here
600 # call 'filecacheentry.refresh()' here
601 f.close()
601 f.close()
602 self._addmarkers(new)
602 self._addmarkers(new)
603 # new marker *may* have changed several set. invalidate the cache.
603 # new marker *may* have changed several set. invalidate the cache.
604 self.caches.clear()
604 self.caches.clear()
605 # records the number of new markers for the transaction hooks
605 # records the number of new markers for the transaction hooks
606 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
606 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
607 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
607 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
608 return len(new)
608 return len(new)
609
609
610 def mergemarkers(self, transaction, data):
610 def mergemarkers(self, transaction, data):
611 """merge a binary stream of markers inside the obsstore
611 """merge a binary stream of markers inside the obsstore
612
612
613 Returns the number of new markers added."""
613 Returns the number of new markers added."""
614 version, markers = _readmarkers(data)
614 version, markers = _readmarkers(data)
615 return self.add(transaction, markers)
615 return self.add(transaction, markers)
616
616
617 @propertycache
617 @propertycache
618 def successors(self):
618 def successors(self):
619 successors = {}
619 successors = {}
620 _addsuccessors(successors, self._all)
620 _addsuccessors(successors, self._all)
621 return successors
621 return successors
622
622
623 @propertycache
623 @propertycache
624 def precursors(self):
624 def precursors(self):
625 precursors = {}
625 precursors = {}
626 _addprecursors(precursors, self._all)
626 _addprecursors(precursors, self._all)
627 return precursors
627 return precursors
628
628
629 @propertycache
629 @propertycache
630 def children(self):
630 def children(self):
631 children = {}
631 children = {}
632 _addchildren(children, self._all)
632 _addchildren(children, self._all)
633 return children
633 return children
634
634
635 def _cached(self, attr):
635 def _cached(self, attr):
636 return attr in self.__dict__
636 return attr in self.__dict__
637
637
638 def _addmarkers(self, markers):
638 def _addmarkers(self, markers):
639 markers = list(markers) # to allow repeated iteration
639 markers = list(markers) # to allow repeated iteration
640 self._all.extend(markers)
640 self._all.extend(markers)
641 if self._cached('successors'):
641 if self._cached('successors'):
642 _addsuccessors(self.successors, markers)
642 _addsuccessors(self.successors, markers)
643 if self._cached('precursors'):
643 if self._cached('precursors'):
644 _addprecursors(self.precursors, markers)
644 _addprecursors(self.precursors, markers)
645 if self._cached('children'):
645 if self._cached('children'):
646 _addchildren(self.children, markers)
646 _addchildren(self.children, markers)
647 _checkinvalidmarkers(markers)
647 _checkinvalidmarkers(markers)
648
648
649 def relevantmarkers(self, nodes):
649 def relevantmarkers(self, nodes):
650 """return a set of all obsolescence markers relevant to a set of nodes.
650 """return a set of all obsolescence markers relevant to a set of nodes.
651
651
652 "relevant" to a set of nodes mean:
652 "relevant" to a set of nodes mean:
653
653
654 - marker that use this changeset as successor
654 - marker that use this changeset as successor
655 - prune marker of direct children on this changeset
655 - prune marker of direct children on this changeset
656 - recursive application of the two rules on precursors of these markers
656 - recursive application of the two rules on precursors of these markers
657
657
658 It is a set so you cannot rely on order."""
658 It is a set so you cannot rely on order."""
659
659
660 pendingnodes = set(nodes)
660 pendingnodes = set(nodes)
661 seenmarkers = set()
661 seenmarkers = set()
662 seennodes = set(pendingnodes)
662 seennodes = set(pendingnodes)
663 precursorsmarkers = self.precursors
663 precursorsmarkers = self.precursors
664 children = self.children
664 children = self.children
665 while pendingnodes:
665 while pendingnodes:
666 direct = set()
666 direct = set()
667 for current in pendingnodes:
667 for current in pendingnodes:
668 direct.update(precursorsmarkers.get(current, ()))
668 direct.update(precursorsmarkers.get(current, ()))
669 pruned = [m for m in children.get(current, ()) if not m[1]]
669 pruned = [m for m in children.get(current, ()) if not m[1]]
670 direct.update(pruned)
670 direct.update(pruned)
671 direct -= seenmarkers
671 direct -= seenmarkers
672 pendingnodes = set([m[0] for m in direct])
672 pendingnodes = set([m[0] for m in direct])
673 seenmarkers |= direct
673 seenmarkers |= direct
674 pendingnodes -= seennodes
674 pendingnodes -= seennodes
675 seennodes |= pendingnodes
675 seennodes |= pendingnodes
676 return seenmarkers
676 return seenmarkers
677
677
678 def commonversion(versions):
678 def commonversion(versions):
679 """Return the newest version listed in both versions and our local formats.
679 """Return the newest version listed in both versions and our local formats.
680
680
681 Returns None if no common version exists.
681 Returns None if no common version exists.
682 """
682 """
683 versions.sort(reverse=True)
683 versions.sort(reverse=True)
684 # search for highest version known on both side
684 # search for highest version known on both side
685 for v in versions:
685 for v in versions:
686 if v in formats:
686 if v in formats:
687 return v
687 return v
688 return None
688 return None
689
689
690 # arbitrary picked to fit into 8K limit from HTTP server
690 # arbitrary picked to fit into 8K limit from HTTP server
691 # you have to take in account:
691 # you have to take in account:
692 # - the version header
692 # - the version header
693 # - the base85 encoding
693 # - the base85 encoding
694 _maxpayload = 5300
694 _maxpayload = 5300
695
695
696 def _pushkeyescape(markers):
696 def _pushkeyescape(markers):
697 """encode markers into a dict suitable for pushkey exchange
697 """encode markers into a dict suitable for pushkey exchange
698
698
699 - binary data is base85 encoded
699 - binary data is base85 encoded
700 - split in chunks smaller than 5300 bytes"""
700 - split in chunks smaller than 5300 bytes"""
701 keys = {}
701 keys = {}
702 parts = []
702 parts = []
703 currentlen = _maxpayload * 2 # ensure we create a new part
703 currentlen = _maxpayload * 2 # ensure we create a new part
704 for marker in markers:
704 for marker in markers:
705 nextdata = _fm0encodeonemarker(marker)
705 nextdata = _fm0encodeonemarker(marker)
706 if (len(nextdata) + currentlen > _maxpayload):
706 if (len(nextdata) + currentlen > _maxpayload):
707 currentpart = []
707 currentpart = []
708 currentlen = 0
708 currentlen = 0
709 parts.append(currentpart)
709 parts.append(currentpart)
710 currentpart.append(nextdata)
710 currentpart.append(nextdata)
711 currentlen += len(nextdata)
711 currentlen += len(nextdata)
712 for idx, part in enumerate(reversed(parts)):
712 for idx, part in enumerate(reversed(parts)):
713 data = ''.join([_pack('>B', _fm0version)] + part)
713 data = ''.join([_pack('>B', _fm0version)] + part)
714 keys['dump%i' % idx] = base85.b85encode(data)
714 keys['dump%i' % idx] = base85.b85encode(data)
715 return keys
715 return keys
716
716
717 def listmarkers(repo):
717 def listmarkers(repo):
718 """List markers over pushkey"""
718 """List markers over pushkey"""
719 if not repo.obsstore:
719 if not repo.obsstore:
720 return {}
720 return {}
721 return _pushkeyescape(sorted(repo.obsstore))
721 return _pushkeyescape(sorted(repo.obsstore))
722
722
723 def pushmarker(repo, key, old, new):
723 def pushmarker(repo, key, old, new):
724 """Push markers over pushkey"""
724 """Push markers over pushkey"""
725 if not key.startswith('dump'):
725 if not key.startswith('dump'):
726 repo.ui.warn(_('unknown key: %r') % key)
726 repo.ui.warn(_('unknown key: %r') % key)
727 return 0
727 return 0
728 if old:
728 if old:
729 repo.ui.warn(_('unexpected old value for %r') % key)
729 repo.ui.warn(_('unexpected old value for %r') % key)
730 return 0
730 return 0
731 data = base85.b85decode(new)
731 data = base85.b85decode(new)
732 lock = repo.lock()
732 lock = repo.lock()
733 try:
733 try:
734 tr = repo.transaction('pushkey: obsolete markers')
734 tr = repo.transaction('pushkey: obsolete markers')
735 try:
735 try:
736 repo.obsstore.mergemarkers(tr, data)
736 repo.obsstore.mergemarkers(tr, data)
737 tr.close()
737 tr.close()
738 return 1
738 return 1
739 finally:
739 finally:
740 tr.release()
740 tr.release()
741 finally:
741 finally:
742 lock.release()
742 lock.release()
743
743
744 def getmarkers(repo, nodes=None):
744 def getmarkers(repo, nodes=None):
745 """returns markers known in a repository
745 """returns markers known in a repository
746
746
747 If <nodes> is specified, only markers "relevant" to those nodes are are
747 If <nodes> is specified, only markers "relevant" to those nodes are are
748 returned"""
748 returned"""
749 if nodes is None:
749 if nodes is None:
750 rawmarkers = repo.obsstore
750 rawmarkers = repo.obsstore
751 else:
751 else:
752 rawmarkers = repo.obsstore.relevantmarkers(nodes)
752 rawmarkers = repo.obsstore.relevantmarkers(nodes)
753
753
754 for markerdata in rawmarkers:
754 for markerdata in rawmarkers:
755 yield marker(repo, markerdata)
755 yield marker(repo, markerdata)
756
756
757 def relevantmarkers(repo, node):
757 def relevantmarkers(repo, node):
758 """all obsolete markers relevant to some revision"""
758 """all obsolete markers relevant to some revision"""
759 for markerdata in repo.obsstore.relevantmarkers(node):
759 for markerdata in repo.obsstore.relevantmarkers(node):
760 yield marker(repo, markerdata)
760 yield marker(repo, markerdata)
761
761
762
762
763 def precursormarkers(ctx):
763 def precursormarkers(ctx):
764 """obsolete marker marking this changeset as a successors"""
764 """obsolete marker marking this changeset as a successors"""
765 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
765 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
766 yield marker(ctx.repo(), data)
766 yield marker(ctx.repo(), data)
767
767
768 def successormarkers(ctx):
768 def successormarkers(ctx):
769 """obsolete marker making this changeset obsolete"""
769 """obsolete marker making this changeset obsolete"""
770 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
770 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
771 yield marker(ctx.repo(), data)
771 yield marker(ctx.repo(), data)
772
772
773 def allsuccessors(obsstore, nodes, ignoreflags=0):
773 def allsuccessors(obsstore, nodes, ignoreflags=0):
774 """Yield node for every successor of <nodes>.
774 """Yield node for every successor of <nodes>.
775
775
776 Some successors may be unknown locally.
776 Some successors may be unknown locally.
777
777
778 This is a linear yield unsuited to detecting split changesets. It includes
778 This is a linear yield unsuited to detecting split changesets. It includes
779 initial nodes too."""
779 initial nodes too."""
780 remaining = set(nodes)
780 remaining = set(nodes)
781 seen = set(remaining)
781 seen = set(remaining)
782 while remaining:
782 while remaining:
783 current = remaining.pop()
783 current = remaining.pop()
784 yield current
784 yield current
785 for mark in obsstore.successors.get(current, ()):
785 for mark in obsstore.successors.get(current, ()):
786 # ignore marker flagged with specified flag
786 # ignore marker flagged with specified flag
787 if mark[2] & ignoreflags:
787 if mark[2] & ignoreflags:
788 continue
788 continue
789 for suc in mark[1]:
789 for suc in mark[1]:
790 if suc not in seen:
790 if suc not in seen:
791 seen.add(suc)
791 seen.add(suc)
792 remaining.add(suc)
792 remaining.add(suc)
793
793
794 def allprecursors(obsstore, nodes, ignoreflags=0):
794 def allprecursors(obsstore, nodes, ignoreflags=0):
795 """Yield node for every precursors of <nodes>.
795 """Yield node for every precursors of <nodes>.
796
796
797 Some precursors may be unknown locally.
797 Some precursors may be unknown locally.
798
798
799 This is a linear yield unsuited to detecting folded changesets. It includes
799 This is a linear yield unsuited to detecting folded changesets. It includes
800 initial nodes too."""
800 initial nodes too."""
801
801
802 remaining = set(nodes)
802 remaining = set(nodes)
803 seen = set(remaining)
803 seen = set(remaining)
804 while remaining:
804 while remaining:
805 current = remaining.pop()
805 current = remaining.pop()
806 yield current
806 yield current
807 for mark in obsstore.precursors.get(current, ()):
807 for mark in obsstore.precursors.get(current, ()):
808 # ignore marker flagged with specified flag
808 # ignore marker flagged with specified flag
809 if mark[2] & ignoreflags:
809 if mark[2] & ignoreflags:
810 continue
810 continue
811 suc = mark[0]
811 suc = mark[0]
812 if suc not in seen:
812 if suc not in seen:
813 seen.add(suc)
813 seen.add(suc)
814 remaining.add(suc)
814 remaining.add(suc)
815
815
816 def foreground(repo, nodes):
816 def foreground(repo, nodes):
817 """return all nodes in the "foreground" of other node
817 """return all nodes in the "foreground" of other node
818
818
819 The foreground of a revision is anything reachable using parent -> children
819 The foreground of a revision is anything reachable using parent -> children
820 or precursor -> successor relation. It is very similar to "descendant" but
820 or precursor -> successor relation. It is very similar to "descendant" but
821 augmented with obsolescence information.
821 augmented with obsolescence information.
822
822
823 Beware that possible obsolescence cycle may result if complex situation.
823 Beware that possible obsolescence cycle may result if complex situation.
824 """
824 """
825 repo = repo.unfiltered()
825 repo = repo.unfiltered()
826 foreground = set(repo.set('%ln::', nodes))
826 foreground = set(repo.set('%ln::', nodes))
827 if repo.obsstore:
827 if repo.obsstore:
828 # We only need this complicated logic if there is obsolescence
828 # We only need this complicated logic if there is obsolescence
829 # XXX will probably deserve an optimised revset.
829 # XXX will probably deserve an optimised revset.
830 nm = repo.changelog.nodemap
830 nm = repo.changelog.nodemap
831 plen = -1
831 plen = -1
832 # compute the whole set of successors or descendants
832 # compute the whole set of successors or descendants
833 while len(foreground) != plen:
833 while len(foreground) != plen:
834 plen = len(foreground)
834 plen = len(foreground)
835 succs = set(c.node() for c in foreground)
835 succs = set(c.node() for c in foreground)
836 mutable = [c.node() for c in foreground if c.mutable()]
836 mutable = [c.node() for c in foreground if c.mutable()]
837 succs.update(allsuccessors(repo.obsstore, mutable))
837 succs.update(allsuccessors(repo.obsstore, mutable))
838 known = (n for n in succs if n in nm)
838 known = (n for n in succs if n in nm)
839 foreground = set(repo.set('%ln::', known))
839 foreground = set(repo.set('%ln::', known))
840 return set(c.node() for c in foreground)
840 return set(c.node() for c in foreground)
841
841
842
842
843 def successorssets(repo, initialnode, cache=None):
843 def successorssets(repo, initialnode, cache=None):
844 """Return all set of successors of initial nodes
844 """Return all set of successors of initial nodes
845
845
846 The successors set of a changeset A are a group of revisions that succeed
846 The successors set of a changeset A are a group of revisions that succeed
847 A. It succeeds A as a consistent whole, each revision being only a partial
847 A. It succeeds A as a consistent whole, each revision being only a partial
848 replacement. The successors set contains non-obsolete changesets only.
848 replacement. The successors set contains non-obsolete changesets only.
849
849
850 This function returns the full list of successor sets which is why it
850 This function returns the full list of successor sets which is why it
851 returns a list of tuples and not just a single tuple. Each tuple is a valid
851 returns a list of tuples and not just a single tuple. Each tuple is a valid
852 successors set. Not that (A,) may be a valid successors set for changeset A
852 successors set. Not that (A,) may be a valid successors set for changeset A
853 (see below).
853 (see below).
854
854
855 In most cases, a changeset A will have a single element (e.g. the changeset
855 In most cases, a changeset A will have a single element (e.g. the changeset
856 A is replaced by A') in its successors set. Though, it is also common for a
856 A is replaced by A') in its successors set. Though, it is also common for a
857 changeset A to have no elements in its successor set (e.g. the changeset
857 changeset A to have no elements in its successor set (e.g. the changeset
858 has been pruned). Therefore, the returned list of successors sets will be
858 has been pruned). Therefore, the returned list of successors sets will be
859 [(A',)] or [], respectively.
859 [(A',)] or [], respectively.
860
860
861 When a changeset A is split into A' and B', however, it will result in a
861 When a changeset A is split into A' and B', however, it will result in a
862 successors set containing more than a single element, i.e. [(A',B')].
862 successors set containing more than a single element, i.e. [(A',B')].
863 Divergent changesets will result in multiple successors sets, i.e. [(A',),
863 Divergent changesets will result in multiple successors sets, i.e. [(A',),
864 (A'')].
864 (A'')].
865
865
866 If a changeset A is not obsolete, then it will conceptually have no
866 If a changeset A is not obsolete, then it will conceptually have no
867 successors set. To distinguish this from a pruned changeset, the successor
867 successors set. To distinguish this from a pruned changeset, the successor
868 set will only contain itself, i.e. [(A,)].
868 set will only contain itself, i.e. [(A,)].
869
869
870 Finally, successors unknown locally are considered to be pruned (obsoleted
870 Finally, successors unknown locally are considered to be pruned (obsoleted
871 without any successors).
871 without any successors).
872
872
873 The optional `cache` parameter is a dictionary that may contain precomputed
873 The optional `cache` parameter is a dictionary that may contain precomputed
874 successors sets. It is meant to reuse the computation of a previous call to
874 successors sets. It is meant to reuse the computation of a previous call to
875 `successorssets` when multiple calls are made at the same time. The cache
875 `successorssets` when multiple calls are made at the same time. The cache
876 dictionary is updated in place. The caller is responsible for its live
876 dictionary is updated in place. The caller is responsible for its live
877 spawn. Code that makes multiple calls to `successorssets` *must* use this
877 spawn. Code that makes multiple calls to `successorssets` *must* use this
878 cache mechanism or suffer terrible performances.
878 cache mechanism or suffer terrible performances.
879
879
880 """
880 """
881
881
882 succmarkers = repo.obsstore.successors
882 succmarkers = repo.obsstore.successors
883
883
884 # Stack of nodes we search successors sets for
884 # Stack of nodes we search successors sets for
885 toproceed = [initialnode]
885 toproceed = [initialnode]
886 # set version of above list for fast loop detection
886 # set version of above list for fast loop detection
887 # element added to "toproceed" must be added here
887 # element added to "toproceed" must be added here
888 stackedset = set(toproceed)
888 stackedset = set(toproceed)
889 if cache is None:
889 if cache is None:
890 cache = {}
890 cache = {}
891
891
892 # This while loop is the flattened version of a recursive search for
892 # This while loop is the flattened version of a recursive search for
893 # successors sets
893 # successors sets
894 #
894 #
895 # def successorssets(x):
895 # def successorssets(x):
896 # successors = directsuccessors(x)
896 # successors = directsuccessors(x)
897 # ss = [[]]
897 # ss = [[]]
898 # for succ in directsuccessors(x):
898 # for succ in directsuccessors(x):
899 # # product as in itertools cartesian product
899 # # product as in itertools cartesian product
900 # ss = product(ss, successorssets(succ))
900 # ss = product(ss, successorssets(succ))
901 # return ss
901 # return ss
902 #
902 #
903 # But we can not use plain recursive calls here:
903 # But we can not use plain recursive calls here:
904 # - that would blow the python call stack
904 # - that would blow the python call stack
905 # - obsolescence markers may have cycles, we need to handle them.
905 # - obsolescence markers may have cycles, we need to handle them.
906 #
906 #
907 # The `toproceed` list act as our call stack. Every node we search
907 # The `toproceed` list act as our call stack. Every node we search
908 # successors set for are stacked there.
908 # successors set for are stacked there.
909 #
909 #
910 # The `stackedset` is set version of this stack used to check if a node is
910 # The `stackedset` is set version of this stack used to check if a node is
911 # already stacked. This check is used to detect cycles and prevent infinite
911 # already stacked. This check is used to detect cycles and prevent infinite
912 # loop.
912 # loop.
913 #
913 #
914 # successors set of all nodes are stored in the `cache` dictionary.
914 # successors set of all nodes are stored in the `cache` dictionary.
915 #
915 #
916 # After this while loop ends we use the cache to return the successors sets
916 # After this while loop ends we use the cache to return the successors sets
917 # for the node requested by the caller.
917 # for the node requested by the caller.
918 while toproceed:
918 while toproceed:
919 # Every iteration tries to compute the successors sets of the topmost
919 # Every iteration tries to compute the successors sets of the topmost
920 # node of the stack: CURRENT.
920 # node of the stack: CURRENT.
921 #
921 #
922 # There are four possible outcomes:
922 # There are four possible outcomes:
923 #
923 #
924 # 1) We already know the successors sets of CURRENT:
924 # 1) We already know the successors sets of CURRENT:
925 # -> mission accomplished, pop it from the stack.
925 # -> mission accomplished, pop it from the stack.
926 # 2) Node is not obsolete:
926 # 2) Node is not obsolete:
927 # -> the node is its own successors sets. Add it to the cache.
927 # -> the node is its own successors sets. Add it to the cache.
928 # 3) We do not know successors set of direct successors of CURRENT:
928 # 3) We do not know successors set of direct successors of CURRENT:
929 # -> We add those successors to the stack.
929 # -> We add those successors to the stack.
930 # 4) We know successors sets of all direct successors of CURRENT:
930 # 4) We know successors sets of all direct successors of CURRENT:
931 # -> We can compute CURRENT successors set and add it to the
931 # -> We can compute CURRENT successors set and add it to the
932 # cache.
932 # cache.
933 #
933 #
934 current = toproceed[-1]
934 current = toproceed[-1]
935 if current in cache:
935 if current in cache:
936 # case (1): We already know the successors sets
936 # case (1): We already know the successors sets
937 stackedset.remove(toproceed.pop())
937 stackedset.remove(toproceed.pop())
938 elif current not in succmarkers:
938 elif current not in succmarkers:
939 # case (2): The node is not obsolete.
939 # case (2): The node is not obsolete.
940 if current in repo:
940 if current in repo:
941 # We have a valid last successors.
941 # We have a valid last successors.
942 cache[current] = [(current,)]
942 cache[current] = [(current,)]
943 else:
943 else:
944 # Final obsolete version is unknown locally.
944 # Final obsolete version is unknown locally.
945 # Do not count that as a valid successors
945 # Do not count that as a valid successors
946 cache[current] = []
946 cache[current] = []
947 else:
947 else:
948 # cases (3) and (4)
948 # cases (3) and (4)
949 #
949 #
950 # We proceed in two phases. Phase 1 aims to distinguish case (3)
950 # We proceed in two phases. Phase 1 aims to distinguish case (3)
951 # from case (4):
951 # from case (4):
952 #
952 #
953 # For each direct successors of CURRENT, we check whether its
953 # For each direct successors of CURRENT, we check whether its
954 # successors sets are known. If they are not, we stack the
954 # successors sets are known. If they are not, we stack the
955 # unknown node and proceed to the next iteration of the while
955 # unknown node and proceed to the next iteration of the while
956 # loop. (case 3)
956 # loop. (case 3)
957 #
957 #
958 # During this step, we may detect obsolescence cycles: a node
958 # During this step, we may detect obsolescence cycles: a node
959 # with unknown successors sets but already in the call stack.
959 # with unknown successors sets but already in the call stack.
960 # In such a situation, we arbitrary set the successors sets of
960 # In such a situation, we arbitrary set the successors sets of
961 # the node to nothing (node pruned) to break the cycle.
961 # the node to nothing (node pruned) to break the cycle.
962 #
962 #
963 # If no break was encountered we proceed to phase 2.
963 # If no break was encountered we proceed to phase 2.
964 #
964 #
965 # Phase 2 computes successors sets of CURRENT (case 4); see details
965 # Phase 2 computes successors sets of CURRENT (case 4); see details
966 # in phase 2 itself.
966 # in phase 2 itself.
967 #
967 #
968 # Note the two levels of iteration in each phase.
968 # Note the two levels of iteration in each phase.
969 # - The first one handles obsolescence markers using CURRENT as
969 # - The first one handles obsolescence markers using CURRENT as
970 # precursor (successors markers of CURRENT).
970 # precursor (successors markers of CURRENT).
971 #
971 #
972 # Having multiple entry here means divergence.
972 # Having multiple entry here means divergence.
973 #
973 #
974 # - The second one handles successors defined in each marker.
974 # - The second one handles successors defined in each marker.
975 #
975 #
976 # Having none means pruned node, multiple successors means split,
976 # Having none means pruned node, multiple successors means split,
977 # single successors are standard replacement.
977 # single successors are standard replacement.
978 #
978 #
979 for mark in sorted(succmarkers[current]):
979 for mark in sorted(succmarkers[current]):
980 for suc in mark[1]:
980 for suc in mark[1]:
981 if suc not in cache:
981 if suc not in cache:
982 if suc in stackedset:
982 if suc in stackedset:
983 # cycle breaking
983 # cycle breaking
984 cache[suc] = []
984 cache[suc] = []
985 else:
985 else:
986 # case (3) If we have not computed successors sets
986 # case (3) If we have not computed successors sets
987 # of one of those successors we add it to the
987 # of one of those successors we add it to the
988 # `toproceed` stack and stop all work for this
988 # `toproceed` stack and stop all work for this
989 # iteration.
989 # iteration.
990 toproceed.append(suc)
990 toproceed.append(suc)
991 stackedset.add(suc)
991 stackedset.add(suc)
992 break
992 break
993 else:
993 else:
994 continue
994 continue
995 break
995 break
996 else:
996 else:
997 # case (4): we know all successors sets of all direct
997 # case (4): we know all successors sets of all direct
998 # successors
998 # successors
999 #
999 #
1000 # Successors set contributed by each marker depends on the
1000 # Successors set contributed by each marker depends on the
1001 # successors sets of all its "successors" node.
1001 # successors sets of all its "successors" node.
1002 #
1002 #
1003 # Each different marker is a divergence in the obsolescence
1003 # Each different marker is a divergence in the obsolescence
1004 # history. It contributes successors sets distinct from other
1004 # history. It contributes successors sets distinct from other
1005 # markers.
1005 # markers.
1006 #
1006 #
1007 # Within a marker, a successor may have divergent successors
1007 # Within a marker, a successor may have divergent successors
1008 # sets. In such a case, the marker will contribute multiple
1008 # sets. In such a case, the marker will contribute multiple
1009 # divergent successors sets. If multiple successors have
1009 # divergent successors sets. If multiple successors have
1010 # divergent successors sets, a Cartesian product is used.
1010 # divergent successors sets, a Cartesian product is used.
1011 #
1011 #
1012 # At the end we post-process successors sets to remove
1012 # At the end we post-process successors sets to remove
1013 # duplicated entry and successors set that are strict subset of
1013 # duplicated entry and successors set that are strict subset of
1014 # another one.
1014 # another one.
1015 succssets = []
1015 succssets = []
1016 for mark in sorted(succmarkers[current]):
1016 for mark in sorted(succmarkers[current]):
1017 # successors sets contributed by this marker
1017 # successors sets contributed by this marker
1018 markss = [[]]
1018 markss = [[]]
1019 for suc in mark[1]:
1019 for suc in mark[1]:
1020 # cardinal product with previous successors
1020 # cardinal product with previous successors
1021 productresult = []
1021 productresult = []
1022 for prefix in markss:
1022 for prefix in markss:
1023 for suffix in cache[suc]:
1023 for suffix in cache[suc]:
1024 newss = list(prefix)
1024 newss = list(prefix)
1025 for part in suffix:
1025 for part in suffix:
1026 # do not duplicated entry in successors set
1026 # do not duplicated entry in successors set
1027 # first entry wins.
1027 # first entry wins.
1028 if part not in newss:
1028 if part not in newss:
1029 newss.append(part)
1029 newss.append(part)
1030 productresult.append(newss)
1030 productresult.append(newss)
1031 markss = productresult
1031 markss = productresult
1032 succssets.extend(markss)
1032 succssets.extend(markss)
1033 # remove duplicated and subset
1033 # remove duplicated and subset
1034 seen = []
1034 seen = []
1035 final = []
1035 final = []
1036 candidate = sorted(((set(s), s) for s in succssets if s),
1036 candidate = sorted(((set(s), s) for s in succssets if s),
1037 key=lambda x: len(x[1]), reverse=True)
1037 key=lambda x: len(x[1]), reverse=True)
1038 for setversion, listversion in candidate:
1038 for setversion, listversion in candidate:
1039 for seenset in seen:
1039 for seenset in seen:
1040 if setversion.issubset(seenset):
1040 if setversion.issubset(seenset):
1041 break
1041 break
1042 else:
1042 else:
1043 final.append(listversion)
1043 final.append(listversion)
1044 seen.append(setversion)
1044 seen.append(setversion)
1045 final.reverse() # put small successors set first
1045 final.reverse() # put small successors set first
1046 cache[current] = final
1046 cache[current] = final
1047 return cache[initialnode]
1047 return cache[initialnode]
1048
1048
1049 def _knownrevs(repo, nodes):
1049 def _knownrevs(repo, nodes):
1050 """yield revision numbers of known nodes passed in parameters
1050 """yield revision numbers of known nodes passed in parameters
1051
1051
1052 Unknown revisions are silently ignored."""
1052 Unknown revisions are silently ignored."""
1053 torev = repo.changelog.nodemap.get
1053 torev = repo.changelog.nodemap.get
1054 for n in nodes:
1054 for n in nodes:
1055 rev = torev(n)
1055 rev = torev(n)
1056 if rev is not None:
1056 if rev is not None:
1057 yield rev
1057 yield rev
1058
1058
1059 # mapping of 'set-name' -> <function to compute this set>
1059 # mapping of 'set-name' -> <function to compute this set>
1060 cachefuncs = {}
1060 cachefuncs = {}
1061 def cachefor(name):
1061 def cachefor(name):
1062 """Decorator to register a function as computing the cache for a set"""
1062 """Decorator to register a function as computing the cache for a set"""
1063 def decorator(func):
1063 def decorator(func):
1064 assert name not in cachefuncs
1064 assert name not in cachefuncs
1065 cachefuncs[name] = func
1065 cachefuncs[name] = func
1066 return func
1066 return func
1067 return decorator
1067 return decorator
1068
1068
1069 def getrevs(repo, name):
1069 def getrevs(repo, name):
1070 """Return the set of revision that belong to the <name> set
1070 """Return the set of revision that belong to the <name> set
1071
1071
1072 Such access may compute the set and cache it for future use"""
1072 Such access may compute the set and cache it for future use"""
1073 repo = repo.unfiltered()
1073 repo = repo.unfiltered()
1074 if not repo.obsstore:
1074 if not repo.obsstore:
1075 return frozenset()
1075 return frozenset()
1076 if name not in repo.obsstore.caches:
1076 if name not in repo.obsstore.caches:
1077 repo.obsstore.caches[name] = cachefuncs[name](repo)
1077 repo.obsstore.caches[name] = cachefuncs[name](repo)
1078 return repo.obsstore.caches[name]
1078 return repo.obsstore.caches[name]
1079
1079
1080 # To be simple we need to invalidate obsolescence cache when:
1080 # To be simple we need to invalidate obsolescence cache when:
1081 #
1081 #
1082 # - new changeset is added:
1082 # - new changeset is added:
1083 # - public phase is changed
1083 # - public phase is changed
1084 # - obsolescence marker are added
1084 # - obsolescence marker are added
1085 # - strip is used a repo
1085 # - strip is used a repo
1086 def clearobscaches(repo):
1086 def clearobscaches(repo):
1087 """Remove all obsolescence related cache from a repo
1087 """Remove all obsolescence related cache from a repo
1088
1088
1089 This remove all cache in obsstore is the obsstore already exist on the
1089 This remove all cache in obsstore is the obsstore already exist on the
1090 repo.
1090 repo.
1091
1091
1092 (We could be smarter here given the exact event that trigger the cache
1092 (We could be smarter here given the exact event that trigger the cache
1093 clearing)"""
1093 clearing)"""
1094 # only clear cache is there is obsstore data in this repo
1094 # only clear cache is there is obsstore data in this repo
1095 if 'obsstore' in repo._filecache:
1095 if 'obsstore' in repo._filecache:
1096 repo.obsstore.caches.clear()
1096 repo.obsstore.caches.clear()
1097
1097
1098 @cachefor('obsolete')
1098 @cachefor('obsolete')
1099 def _computeobsoleteset(repo):
1099 def _computeobsoleteset(repo):
1100 """the set of obsolete revisions"""
1100 """the set of obsolete revisions"""
1101 obs = set()
1101 obs = set()
1102 getrev = repo.changelog.nodemap.get
1102 getrev = repo.changelog.nodemap.get
1103 getphase = repo._phasecache.phase
1103 getphase = repo._phasecache.phase
1104 for n in repo.obsstore.successors:
1104 for n in repo.obsstore.successors:
1105 rev = getrev(n)
1105 rev = getrev(n)
1106 if rev is not None and getphase(repo, rev):
1106 if rev is not None and getphase(repo, rev):
1107 obs.add(rev)
1107 obs.add(rev)
1108 return obs
1108 return obs
1109
1109
1110 @cachefor('unstable')
1110 @cachefor('unstable')
1111 def _computeunstableset(repo):
1111 def _computeunstableset(repo):
1112 """the set of non obsolete revisions with obsolete parents"""
1112 """the set of non obsolete revisions with obsolete parents"""
1113 revs = [(ctx.rev(), ctx) for ctx in
1113 revs = [(ctx.rev(), ctx) for ctx in
1114 repo.set('(not public()) and (not obsolete())')]
1114 repo.set('(not public()) and (not obsolete())')]
1115 revs.sort(key=lambda x:x[0])
1115 revs.sort(key=lambda x:x[0])
1116 unstable = set()
1116 unstable = set()
1117 for rev, ctx in revs:
1117 for rev, ctx in revs:
1118 # A rev is unstable if one of its parent is obsolete or unstable
1118 # A rev is unstable if one of its parent is obsolete or unstable
1119 # this works since we traverse following growing rev order
1119 # this works since we traverse following growing rev order
1120 if any((x.obsolete() or (x.rev() in unstable))
1120 if any((x.obsolete() or (x.rev() in unstable))
1121 for x in ctx.parents()):
1121 for x in ctx.parents()):
1122 unstable.add(rev)
1122 unstable.add(rev)
1123 return unstable
1123 return unstable
1124
1124
1125 @cachefor('suspended')
1125 @cachefor('suspended')
1126 def _computesuspendedset(repo):
1126 def _computesuspendedset(repo):
1127 """the set of obsolete parents with non obsolete descendants"""
1127 """the set of obsolete parents with non obsolete descendants"""
1128 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1128 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1129 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1129 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1130
1130
1131 @cachefor('extinct')
1131 @cachefor('extinct')
1132 def _computeextinctset(repo):
1132 def _computeextinctset(repo):
1133 """the set of obsolete parents without non obsolete descendants"""
1133 """the set of obsolete parents without non obsolete descendants"""
1134 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1134 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1135
1135
1136
1136
1137 @cachefor('bumped')
1137 @cachefor('bumped')
1138 def _computebumpedset(repo):
1138 def _computebumpedset(repo):
1139 """the set of revs trying to obsolete public revisions"""
1139 """the set of revs trying to obsolete public revisions"""
1140 bumped = set()
1140 bumped = set()
1141 # util function (avoid attribute lookup in the loop)
1141 # util function (avoid attribute lookup in the loop)
1142 phase = repo._phasecache.phase # would be faster to grab the full list
1142 phase = repo._phasecache.phase # would be faster to grab the full list
1143 public = phases.public
1143 public = phases.public
1144 cl = repo.changelog
1144 cl = repo.changelog
1145 torev = cl.nodemap.get
1145 torev = cl.nodemap.get
1146 for ctx in repo.set('(not public()) and (not obsolete())'):
1146 for ctx in repo.set('(not public()) and (not obsolete())'):
1147 rev = ctx.rev()
1147 rev = ctx.rev()
1148 # We only evaluate mutable, non-obsolete revision
1148 # We only evaluate mutable, non-obsolete revision
1149 node = ctx.node()
1149 node = ctx.node()
1150 # (future) A cache of precursors may worth if split is very common
1150 # (future) A cache of precursors may worth if split is very common
1151 for pnode in allprecursors(repo.obsstore, [node],
1151 for pnode in allprecursors(repo.obsstore, [node],
1152 ignoreflags=bumpedfix):
1152 ignoreflags=bumpedfix):
1153 prev = torev(pnode) # unfiltered! but so is phasecache
1153 prev = torev(pnode) # unfiltered! but so is phasecache
1154 if (prev is not None) and (phase(repo, prev) <= public):
1154 if (prev is not None) and (phase(repo, prev) <= public):
1155 # we have a public precursors
1155 # we have a public precursors
1156 bumped.add(rev)
1156 bumped.add(rev)
1157 break # Next draft!
1157 break # Next draft!
1158 return bumped
1158 return bumped
1159
1159
1160 @cachefor('divergent')
1160 @cachefor('divergent')
1161 def _computedivergentset(repo):
1161 def _computedivergentset(repo):
1162 """the set of rev that compete to be the final successors of some revision.
1162 """the set of rev that compete to be the final successors of some revision.
1163 """
1163 """
1164 divergent = set()
1164 divergent = set()
1165 obsstore = repo.obsstore
1165 obsstore = repo.obsstore
1166 newermap = {}
1166 newermap = {}
1167 for ctx in repo.set('(not public()) - obsolete()'):
1167 for ctx in repo.set('(not public()) - obsolete()'):
1168 mark = obsstore.precursors.get(ctx.node(), ())
1168 mark = obsstore.precursors.get(ctx.node(), ())
1169 toprocess = set(mark)
1169 toprocess = set(mark)
1170 seen = set()
1170 seen = set()
1171 while toprocess:
1171 while toprocess:
1172 prec = toprocess.pop()[0]
1172 prec = toprocess.pop()[0]
1173 if prec in seen:
1173 if prec in seen:
1174 continue # emergency cycle hanging prevention
1174 continue # emergency cycle hanging prevention
1175 seen.add(prec)
1175 seen.add(prec)
1176 if prec not in newermap:
1176 if prec not in newermap:
1177 successorssets(repo, prec, newermap)
1177 successorssets(repo, prec, newermap)
1178 newer = [n for n in newermap[prec] if n]
1178 newer = [n for n in newermap[prec] if n]
1179 if len(newer) > 1:
1179 if len(newer) > 1:
1180 divergent.add(ctx.rev())
1180 divergent.add(ctx.rev())
1181 break
1181 break
1182 toprocess.update(obsstore.precursors.get(prec, ()))
1182 toprocess.update(obsstore.precursors.get(prec, ()))
1183 return divergent
1183 return divergent
1184
1184
1185
1185
1186 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1186 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1187 """Add obsolete markers between changesets in a repo
1187 """Add obsolete markers between changesets in a repo
1188
1188
1189 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1189 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1190 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1190 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1191 containing metadata for this marker only. It is merged with the global
1191 containing metadata for this marker only. It is merged with the global
1192 metadata specified through the `metadata` argument of this function,
1192 metadata specified through the `metadata` argument of this function,
1193
1193
1194 Trying to obsolete a public changeset will raise an exception.
1194 Trying to obsolete a public changeset will raise an exception.
1195
1195
1196 Current user and date are used except if specified otherwise in the
1196 Current user and date are used except if specified otherwise in the
1197 metadata attribute.
1197 metadata attribute.
1198
1198
1199 This function operates within a transaction of its own, but does
1199 This function operates within a transaction of its own, but does
1200 not take any lock on the repo.
1200 not take any lock on the repo.
1201 """
1201 """
1202 # prepare metadata
1202 # prepare metadata
1203 if metadata is None:
1203 if metadata is None:
1204 metadata = {}
1204 metadata = {}
1205 if 'user' not in metadata:
1205 if 'user' not in metadata:
1206 metadata['user'] = repo.ui.username()
1206 metadata['user'] = repo.ui.username()
1207 tr = repo.transaction('add-obsolescence-marker')
1207 tr = repo.transaction('add-obsolescence-marker')
1208 try:
1208 try:
1209 for rel in relations:
1209 for rel in relations:
1210 prec = rel[0]
1210 prec = rel[0]
1211 sucs = rel[1]
1211 sucs = rel[1]
1212 localmetadata = metadata.copy()
1212 localmetadata = metadata.copy()
1213 if 2 < len(rel):
1213 if 2 < len(rel):
1214 localmetadata.update(rel[2])
1214 localmetadata.update(rel[2])
1215
1215
1216 if not prec.mutable():
1216 if not prec.mutable():
1217 raise util.Abort("cannot obsolete public changeset: %s"
1217 raise util.Abort("cannot obsolete public changeset: %s"
1218 % prec,
1218 % prec,
1219 hint='see "hg help phases" for details')
1219 hint='see "hg help phases" for details')
1220 nprec = prec.node()
1220 nprec = prec.node()
1221 nsucs = tuple(s.node() for s in sucs)
1221 nsucs = tuple(s.node() for s in sucs)
1222 npare = None
1222 npare = None
1223 if not nsucs:
1223 if not nsucs:
1224 npare = tuple(p.node() for p in prec.parents())
1224 npare = tuple(p.node() for p in prec.parents())
1225 if nprec in nsucs:
1225 if nprec in nsucs:
1226 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1226 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1227 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1227 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1228 date=date, metadata=localmetadata)
1228 date=date, metadata=localmetadata)
1229 repo.filteredrevcache.clear()
1229 repo.filteredrevcache.clear()
1230 tr.close()
1230 tr.close()
1231 finally:
1231 finally:
1232 tr.release()
1232 tr.release()
1233
1233
1234 def isenabled(repo, option):
1234 def isenabled(repo, option):
1235 """Returns True if the given repository has the given obsolete option
1235 """Returns True if the given repository has the given obsolete option
1236 enabled.
1236 enabled.
1237 """
1237 """
1238 result = set(repo.ui.configlist('experimental', 'evolution'))
1238 result = set(repo.ui.configlist('experimental', 'evolution'))
1239 if 'all' in result:
1239 if 'all' in result:
1240 return True
1240 return True
1241
1241
1242 # For migration purposes, temporarily return true if the config hasn't been
1242 # For migration purposes, temporarily return true if the config hasn't been
1243 # set but _enabled is true.
1243 # set but _enabled is true.
1244 if len(result) == 0 and _enabled:
1244 if len(result) == 0 and _enabled:
1245 return True
1245 return True
1246
1246
1247 # createmarkers must be enabled if other options are enabled
1247 # createmarkers must be enabled if other options are enabled
1248 if ((allowunstableopt in result or exchangeopt in result) and
1248 if ((allowunstableopt in result or exchangeopt in result) and
1249 not createmarkersopt in result):
1249 not createmarkersopt in result):
1250 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1250 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1251 "if other obsolete options are enabled"))
1251 "if other obsolete options are enabled"))
1252
1252
1253 return option in result
1253 return option in result
General Comments 0
You need to be logged in to leave comments. Login now