##// END OF EJS Templates
obsolete: make _computeobsoleteset much faster...
Laurent Charignon -
r27784:432242f4 default
parent child Browse files
Show More
@@ -1,1273 +1,1272 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 base85,
77 base85,
78 error,
78 error,
79 node,
79 node,
80 parsers,
80 parsers,
81 phases,
81 phases,
82 util,
82 util,
83 )
83 )
84
84
85 _pack = struct.pack
85 _pack = struct.pack
86 _unpack = struct.unpack
86 _unpack = struct.unpack
87 _calcsize = struct.calcsize
87 _calcsize = struct.calcsize
88 propertycache = util.propertycache
88 propertycache = util.propertycache
89
89
90 # the obsolete feature is not mature enough to be enabled by default.
90 # the obsolete feature is not mature enough to be enabled by default.
91 # you have to rely on third party extension extension to enable this.
91 # you have to rely on third party extension extension to enable this.
92 _enabled = False
92 _enabled = False
93
93
94 # Options for obsolescence
94 # Options for obsolescence
95 createmarkersopt = 'createmarkers'
95 createmarkersopt = 'createmarkers'
96 allowunstableopt = 'allowunstable'
96 allowunstableopt = 'allowunstable'
97 exchangeopt = 'exchange'
97 exchangeopt = 'exchange'
98
98
99 ### obsolescence marker flag
99 ### obsolescence marker flag
100
100
101 ## bumpedfix flag
101 ## bumpedfix flag
102 #
102 #
103 # When a changeset A' succeed to a changeset A which became public, we call A'
103 # When a changeset A' succeed to a changeset A which became public, we call A'
104 # "bumped" because it's a successors of a public changesets
104 # "bumped" because it's a successors of a public changesets
105 #
105 #
106 # o A' (bumped)
106 # o A' (bumped)
107 # |`:
107 # |`:
108 # | o A
108 # | o A
109 # |/
109 # |/
110 # o Z
110 # o Z
111 #
111 #
112 # The way to solve this situation is to create a new changeset Ad as children
112 # The way to solve this situation is to create a new changeset Ad as children
113 # of A. This changeset have the same content than A'. So the diff from A to A'
113 # of A. This changeset have the same content than A'. So the diff from A to A'
114 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
114 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
115 #
115 #
116 # o Ad
116 # o Ad
117 # |`:
117 # |`:
118 # | x A'
118 # | x A'
119 # |'|
119 # |'|
120 # o | A
120 # o | A
121 # |/
121 # |/
122 # o Z
122 # o Z
123 #
123 #
124 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
124 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
125 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
125 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
126 # This flag mean that the successors express the changes between the public and
126 # This flag mean that the successors express the changes between the public and
127 # bumped version and fix the situation, breaking the transitivity of
127 # bumped version and fix the situation, breaking the transitivity of
128 # "bumped" here.
128 # "bumped" here.
129 bumpedfix = 1
129 bumpedfix = 1
130 usingsha256 = 2
130 usingsha256 = 2
131
131
132 ## Parsing and writing of version "0"
132 ## Parsing and writing of version "0"
133 #
133 #
134 # The header is followed by the markers. Each marker is made of:
134 # The header is followed by the markers. Each marker is made of:
135 #
135 #
136 # - 1 uint8 : number of new changesets "N", can be zero.
136 # - 1 uint8 : number of new changesets "N", can be zero.
137 #
137 #
138 # - 1 uint32: metadata size "M" in bytes.
138 # - 1 uint32: metadata size "M" in bytes.
139 #
139 #
140 # - 1 byte: a bit field. It is reserved for flags used in common
140 # - 1 byte: a bit field. It is reserved for flags used in common
141 # obsolete marker operations, to avoid repeated decoding of metadata
141 # obsolete marker operations, to avoid repeated decoding of metadata
142 # entries.
142 # entries.
143 #
143 #
144 # - 20 bytes: obsoleted changeset identifier.
144 # - 20 bytes: obsoleted changeset identifier.
145 #
145 #
146 # - N*20 bytes: new changesets identifiers.
146 # - N*20 bytes: new changesets identifiers.
147 #
147 #
148 # - M bytes: metadata as a sequence of nul-terminated strings. Each
148 # - M bytes: metadata as a sequence of nul-terminated strings. Each
149 # string contains a key and a value, separated by a colon ':', without
149 # string contains a key and a value, separated by a colon ':', without
150 # additional encoding. Keys cannot contain '\0' or ':' and values
150 # additional encoding. Keys cannot contain '\0' or ':' and values
151 # cannot contain '\0'.
151 # cannot contain '\0'.
152 _fm0version = 0
152 _fm0version = 0
153 _fm0fixed = '>BIB20s'
153 _fm0fixed = '>BIB20s'
154 _fm0node = '20s'
154 _fm0node = '20s'
155 _fm0fsize = _calcsize(_fm0fixed)
155 _fm0fsize = _calcsize(_fm0fixed)
156 _fm0fnodesize = _calcsize(_fm0node)
156 _fm0fnodesize = _calcsize(_fm0node)
157
157
158 def _fm0readmarkers(data, off):
158 def _fm0readmarkers(data, off):
159 # Loop on markers
159 # Loop on markers
160 l = len(data)
160 l = len(data)
161 while off + _fm0fsize <= l:
161 while off + _fm0fsize <= l:
162 # read fixed part
162 # read fixed part
163 cur = data[off:off + _fm0fsize]
163 cur = data[off:off + _fm0fsize]
164 off += _fm0fsize
164 off += _fm0fsize
165 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
165 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
166 # read replacement
166 # read replacement
167 sucs = ()
167 sucs = ()
168 if numsuc:
168 if numsuc:
169 s = (_fm0fnodesize * numsuc)
169 s = (_fm0fnodesize * numsuc)
170 cur = data[off:off + s]
170 cur = data[off:off + s]
171 sucs = _unpack(_fm0node * numsuc, cur)
171 sucs = _unpack(_fm0node * numsuc, cur)
172 off += s
172 off += s
173 # read metadata
173 # read metadata
174 # (metadata will be decoded on demand)
174 # (metadata will be decoded on demand)
175 metadata = data[off:off + mdsize]
175 metadata = data[off:off + mdsize]
176 if len(metadata) != mdsize:
176 if len(metadata) != mdsize:
177 raise error.Abort(_('parsing obsolete marker: metadata is too '
177 raise error.Abort(_('parsing obsolete marker: metadata is too '
178 'short, %d bytes expected, got %d')
178 'short, %d bytes expected, got %d')
179 % (mdsize, len(metadata)))
179 % (mdsize, len(metadata)))
180 off += mdsize
180 off += mdsize
181 metadata = _fm0decodemeta(metadata)
181 metadata = _fm0decodemeta(metadata)
182 try:
182 try:
183 when, offset = metadata.pop('date', '0 0').split(' ')
183 when, offset = metadata.pop('date', '0 0').split(' ')
184 date = float(when), int(offset)
184 date = float(when), int(offset)
185 except ValueError:
185 except ValueError:
186 date = (0., 0)
186 date = (0., 0)
187 parents = None
187 parents = None
188 if 'p2' in metadata:
188 if 'p2' in metadata:
189 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
189 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
190 elif 'p1' in metadata:
190 elif 'p1' in metadata:
191 parents = (metadata.pop('p1', None),)
191 parents = (metadata.pop('p1', None),)
192 elif 'p0' in metadata:
192 elif 'p0' in metadata:
193 parents = ()
193 parents = ()
194 if parents is not None:
194 if parents is not None:
195 try:
195 try:
196 parents = tuple(node.bin(p) for p in parents)
196 parents = tuple(node.bin(p) for p in parents)
197 # if parent content is not a nodeid, drop the data
197 # if parent content is not a nodeid, drop the data
198 for p in parents:
198 for p in parents:
199 if len(p) != 20:
199 if len(p) != 20:
200 parents = None
200 parents = None
201 break
201 break
202 except TypeError:
202 except TypeError:
203 # if content cannot be translated to nodeid drop the data.
203 # if content cannot be translated to nodeid drop the data.
204 parents = None
204 parents = None
205
205
206 metadata = tuple(sorted(metadata.iteritems()))
206 metadata = tuple(sorted(metadata.iteritems()))
207
207
208 yield (pre, sucs, flags, metadata, date, parents)
208 yield (pre, sucs, flags, metadata, date, parents)
209
209
210 def _fm0encodeonemarker(marker):
210 def _fm0encodeonemarker(marker):
211 pre, sucs, flags, metadata, date, parents = marker
211 pre, sucs, flags, metadata, date, parents = marker
212 if flags & usingsha256:
212 if flags & usingsha256:
213 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
213 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
214 metadata = dict(metadata)
214 metadata = dict(metadata)
215 time, tz = date
215 time, tz = date
216 metadata['date'] = '%r %i' % (time, tz)
216 metadata['date'] = '%r %i' % (time, tz)
217 if parents is not None:
217 if parents is not None:
218 if not parents:
218 if not parents:
219 # mark that we explicitly recorded no parents
219 # mark that we explicitly recorded no parents
220 metadata['p0'] = ''
220 metadata['p0'] = ''
221 for i, p in enumerate(parents):
221 for i, p in enumerate(parents):
222 metadata['p%i' % (i + 1)] = node.hex(p)
222 metadata['p%i' % (i + 1)] = node.hex(p)
223 metadata = _fm0encodemeta(metadata)
223 metadata = _fm0encodemeta(metadata)
224 numsuc = len(sucs)
224 numsuc = len(sucs)
225 format = _fm0fixed + (_fm0node * numsuc)
225 format = _fm0fixed + (_fm0node * numsuc)
226 data = [numsuc, len(metadata), flags, pre]
226 data = [numsuc, len(metadata), flags, pre]
227 data.extend(sucs)
227 data.extend(sucs)
228 return _pack(format, *data) + metadata
228 return _pack(format, *data) + metadata
229
229
230 def _fm0encodemeta(meta):
230 def _fm0encodemeta(meta):
231 """Return encoded metadata string to string mapping.
231 """Return encoded metadata string to string mapping.
232
232
233 Assume no ':' in key and no '\0' in both key and value."""
233 Assume no ':' in key and no '\0' in both key and value."""
234 for key, value in meta.iteritems():
234 for key, value in meta.iteritems():
235 if ':' in key or '\0' in key:
235 if ':' in key or '\0' in key:
236 raise ValueError("':' and '\0' are forbidden in metadata key'")
236 raise ValueError("':' and '\0' are forbidden in metadata key'")
237 if '\0' in value:
237 if '\0' in value:
238 raise ValueError("':' is forbidden in metadata value'")
238 raise ValueError("':' is forbidden in metadata value'")
239 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
239 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
240
240
241 def _fm0decodemeta(data):
241 def _fm0decodemeta(data):
242 """Return string to string dictionary from encoded version."""
242 """Return string to string dictionary from encoded version."""
243 d = {}
243 d = {}
244 for l in data.split('\0'):
244 for l in data.split('\0'):
245 if l:
245 if l:
246 key, value = l.split(':')
246 key, value = l.split(':')
247 d[key] = value
247 d[key] = value
248 return d
248 return d
249
249
250 ## Parsing and writing of version "1"
250 ## Parsing and writing of version "1"
251 #
251 #
252 # The header is followed by the markers. Each marker is made of:
252 # The header is followed by the markers. Each marker is made of:
253 #
253 #
254 # - uint32: total size of the marker (including this field)
254 # - uint32: total size of the marker (including this field)
255 #
255 #
256 # - float64: date in seconds since epoch
256 # - float64: date in seconds since epoch
257 #
257 #
258 # - int16: timezone offset in minutes
258 # - int16: timezone offset in minutes
259 #
259 #
260 # - uint16: a bit field. It is reserved for flags used in common
260 # - uint16: a bit field. It is reserved for flags used in common
261 # obsolete marker operations, to avoid repeated decoding of metadata
261 # obsolete marker operations, to avoid repeated decoding of metadata
262 # entries.
262 # entries.
263 #
263 #
264 # - uint8: number of successors "N", can be zero.
264 # - uint8: number of successors "N", can be zero.
265 #
265 #
266 # - uint8: number of parents "P", can be zero.
266 # - uint8: number of parents "P", can be zero.
267 #
267 #
268 # 0: parents data stored but no parent,
268 # 0: parents data stored but no parent,
269 # 1: one parent stored,
269 # 1: one parent stored,
270 # 2: two parents stored,
270 # 2: two parents stored,
271 # 3: no parent data stored
271 # 3: no parent data stored
272 #
272 #
273 # - uint8: number of metadata entries M
273 # - uint8: number of metadata entries M
274 #
274 #
275 # - 20 or 32 bytes: precursor changeset identifier.
275 # - 20 or 32 bytes: precursor changeset identifier.
276 #
276 #
277 # - N*(20 or 32) bytes: successors changesets identifiers.
277 # - N*(20 or 32) bytes: successors changesets identifiers.
278 #
278 #
279 # - P*(20 or 32) bytes: parents of the precursors changesets.
279 # - P*(20 or 32) bytes: parents of the precursors changesets.
280 #
280 #
281 # - M*(uint8, uint8): size of all metadata entries (key and value)
281 # - M*(uint8, uint8): size of all metadata entries (key and value)
282 #
282 #
283 # - remaining bytes: the metadata, each (key, value) pair after the other.
283 # - remaining bytes: the metadata, each (key, value) pair after the other.
284 _fm1version = 1
284 _fm1version = 1
285 _fm1fixed = '>IdhHBBB20s'
285 _fm1fixed = '>IdhHBBB20s'
286 _fm1nodesha1 = '20s'
286 _fm1nodesha1 = '20s'
287 _fm1nodesha256 = '32s'
287 _fm1nodesha256 = '32s'
288 _fm1nodesha1size = _calcsize(_fm1nodesha1)
288 _fm1nodesha1size = _calcsize(_fm1nodesha1)
289 _fm1nodesha256size = _calcsize(_fm1nodesha256)
289 _fm1nodesha256size = _calcsize(_fm1nodesha256)
290 _fm1fsize = _calcsize(_fm1fixed)
290 _fm1fsize = _calcsize(_fm1fixed)
291 _fm1parentnone = 3
291 _fm1parentnone = 3
292 _fm1parentshift = 14
292 _fm1parentshift = 14
293 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
293 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
294 _fm1metapair = 'BB'
294 _fm1metapair = 'BB'
295 _fm1metapairsize = _calcsize('BB')
295 _fm1metapairsize = _calcsize('BB')
296
296
297 def _fm1purereadmarkers(data, off):
297 def _fm1purereadmarkers(data, off):
298 # make some global constants local for performance
298 # make some global constants local for performance
299 noneflag = _fm1parentnone
299 noneflag = _fm1parentnone
300 sha2flag = usingsha256
300 sha2flag = usingsha256
301 sha1size = _fm1nodesha1size
301 sha1size = _fm1nodesha1size
302 sha2size = _fm1nodesha256size
302 sha2size = _fm1nodesha256size
303 sha1fmt = _fm1nodesha1
303 sha1fmt = _fm1nodesha1
304 sha2fmt = _fm1nodesha256
304 sha2fmt = _fm1nodesha256
305 metasize = _fm1metapairsize
305 metasize = _fm1metapairsize
306 metafmt = _fm1metapair
306 metafmt = _fm1metapair
307 fsize = _fm1fsize
307 fsize = _fm1fsize
308 unpack = _unpack
308 unpack = _unpack
309
309
310 # Loop on markers
310 # Loop on markers
311 stop = len(data) - _fm1fsize
311 stop = len(data) - _fm1fsize
312 ufixed = struct.Struct(_fm1fixed).unpack
312 ufixed = struct.Struct(_fm1fixed).unpack
313
313
314 while off <= stop:
314 while off <= stop:
315 # read fixed part
315 # read fixed part
316 o1 = off + fsize
316 o1 = off + fsize
317 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
317 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
318
318
319 if flags & sha2flag:
319 if flags & sha2flag:
320 # FIXME: prec was read as a SHA1, needs to be amended
320 # FIXME: prec was read as a SHA1, needs to be amended
321
321
322 # read 0 or more successors
322 # read 0 or more successors
323 if numsuc == 1:
323 if numsuc == 1:
324 o2 = o1 + sha2size
324 o2 = o1 + sha2size
325 sucs = (data[o1:o2],)
325 sucs = (data[o1:o2],)
326 else:
326 else:
327 o2 = o1 + sha2size * numsuc
327 o2 = o1 + sha2size * numsuc
328 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
328 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
329
329
330 # read parents
330 # read parents
331 if numpar == noneflag:
331 if numpar == noneflag:
332 o3 = o2
332 o3 = o2
333 parents = None
333 parents = None
334 elif numpar == 1:
334 elif numpar == 1:
335 o3 = o2 + sha2size
335 o3 = o2 + sha2size
336 parents = (data[o2:o3],)
336 parents = (data[o2:o3],)
337 else:
337 else:
338 o3 = o2 + sha2size * numpar
338 o3 = o2 + sha2size * numpar
339 parents = unpack(sha2fmt * numpar, data[o2:o3])
339 parents = unpack(sha2fmt * numpar, data[o2:o3])
340 else:
340 else:
341 # read 0 or more successors
341 # read 0 or more successors
342 if numsuc == 1:
342 if numsuc == 1:
343 o2 = o1 + sha1size
343 o2 = o1 + sha1size
344 sucs = (data[o1:o2],)
344 sucs = (data[o1:o2],)
345 else:
345 else:
346 o2 = o1 + sha1size * numsuc
346 o2 = o1 + sha1size * numsuc
347 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
347 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
348
348
349 # read parents
349 # read parents
350 if numpar == noneflag:
350 if numpar == noneflag:
351 o3 = o2
351 o3 = o2
352 parents = None
352 parents = None
353 elif numpar == 1:
353 elif numpar == 1:
354 o3 = o2 + sha1size
354 o3 = o2 + sha1size
355 parents = (data[o2:o3],)
355 parents = (data[o2:o3],)
356 else:
356 else:
357 o3 = o2 + sha1size * numpar
357 o3 = o2 + sha1size * numpar
358 parents = unpack(sha1fmt * numpar, data[o2:o3])
358 parents = unpack(sha1fmt * numpar, data[o2:o3])
359
359
360 # read metadata
360 # read metadata
361 off = o3 + metasize * nummeta
361 off = o3 + metasize * nummeta
362 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
362 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
363 metadata = []
363 metadata = []
364 for idx in xrange(0, len(metapairsize), 2):
364 for idx in xrange(0, len(metapairsize), 2):
365 o1 = off + metapairsize[idx]
365 o1 = off + metapairsize[idx]
366 o2 = o1 + metapairsize[idx + 1]
366 o2 = o1 + metapairsize[idx + 1]
367 metadata.append((data[off:o1], data[o1:o2]))
367 metadata.append((data[off:o1], data[o1:o2]))
368 off = o2
368 off = o2
369
369
370 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
370 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
371
371
372 def _fm1encodeonemarker(marker):
372 def _fm1encodeonemarker(marker):
373 pre, sucs, flags, metadata, date, parents = marker
373 pre, sucs, flags, metadata, date, parents = marker
374 # determine node size
374 # determine node size
375 _fm1node = _fm1nodesha1
375 _fm1node = _fm1nodesha1
376 if flags & usingsha256:
376 if flags & usingsha256:
377 _fm1node = _fm1nodesha256
377 _fm1node = _fm1nodesha256
378 numsuc = len(sucs)
378 numsuc = len(sucs)
379 numextranodes = numsuc
379 numextranodes = numsuc
380 if parents is None:
380 if parents is None:
381 numpar = _fm1parentnone
381 numpar = _fm1parentnone
382 else:
382 else:
383 numpar = len(parents)
383 numpar = len(parents)
384 numextranodes += numpar
384 numextranodes += numpar
385 formatnodes = _fm1node * numextranodes
385 formatnodes = _fm1node * numextranodes
386 formatmeta = _fm1metapair * len(metadata)
386 formatmeta = _fm1metapair * len(metadata)
387 format = _fm1fixed + formatnodes + formatmeta
387 format = _fm1fixed + formatnodes + formatmeta
388 # tz is stored in minutes so we divide by 60
388 # tz is stored in minutes so we divide by 60
389 tz = date[1]//60
389 tz = date[1]//60
390 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
390 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
391 data.extend(sucs)
391 data.extend(sucs)
392 if parents is not None:
392 if parents is not None:
393 data.extend(parents)
393 data.extend(parents)
394 totalsize = _calcsize(format)
394 totalsize = _calcsize(format)
395 for key, value in metadata:
395 for key, value in metadata:
396 lk = len(key)
396 lk = len(key)
397 lv = len(value)
397 lv = len(value)
398 data.append(lk)
398 data.append(lk)
399 data.append(lv)
399 data.append(lv)
400 totalsize += lk + lv
400 totalsize += lk + lv
401 data[0] = totalsize
401 data[0] = totalsize
402 data = [_pack(format, *data)]
402 data = [_pack(format, *data)]
403 for key, value in metadata:
403 for key, value in metadata:
404 data.append(key)
404 data.append(key)
405 data.append(value)
405 data.append(value)
406 return ''.join(data)
406 return ''.join(data)
407
407
408 def _fm1readmarkers(data, off):
408 def _fm1readmarkers(data, off):
409 native = getattr(parsers, 'fm1readmarkers', None)
409 native = getattr(parsers, 'fm1readmarkers', None)
410 if not native:
410 if not native:
411 return _fm1purereadmarkers(data, off)
411 return _fm1purereadmarkers(data, off)
412 stop = len(data) - _fm1fsize
412 stop = len(data) - _fm1fsize
413 return native(data, off, stop)
413 return native(data, off, stop)
414
414
415 # mapping to read/write various marker formats
415 # mapping to read/write various marker formats
416 # <version> -> (decoder, encoder)
416 # <version> -> (decoder, encoder)
417 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
417 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
418 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
418 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
419
419
420 @util.nogc
420 @util.nogc
421 def _readmarkers(data):
421 def _readmarkers(data):
422 """Read and enumerate markers from raw data"""
422 """Read and enumerate markers from raw data"""
423 off = 0
423 off = 0
424 diskversion = _unpack('>B', data[off:off + 1])[0]
424 diskversion = _unpack('>B', data[off:off + 1])[0]
425 off += 1
425 off += 1
426 if diskversion not in formats:
426 if diskversion not in formats:
427 raise error.Abort(_('parsing obsolete marker: unknown version %r')
427 raise error.Abort(_('parsing obsolete marker: unknown version %r')
428 % diskversion)
428 % diskversion)
429 return diskversion, formats[diskversion][0](data, off)
429 return diskversion, formats[diskversion][0](data, off)
430
430
431 def encodemarkers(markers, addheader=False, version=_fm0version):
431 def encodemarkers(markers, addheader=False, version=_fm0version):
432 # Kept separate from flushmarkers(), it will be reused for
432 # Kept separate from flushmarkers(), it will be reused for
433 # markers exchange.
433 # markers exchange.
434 encodeone = formats[version][1]
434 encodeone = formats[version][1]
435 if addheader:
435 if addheader:
436 yield _pack('>B', version)
436 yield _pack('>B', version)
437 for marker in markers:
437 for marker in markers:
438 yield encodeone(marker)
438 yield encodeone(marker)
439
439
440
440
441 class marker(object):
441 class marker(object):
442 """Wrap obsolete marker raw data"""
442 """Wrap obsolete marker raw data"""
443
443
444 def __init__(self, repo, data):
444 def __init__(self, repo, data):
445 # the repo argument will be used to create changectx in later version
445 # the repo argument will be used to create changectx in later version
446 self._repo = repo
446 self._repo = repo
447 self._data = data
447 self._data = data
448 self._decodedmeta = None
448 self._decodedmeta = None
449
449
450 def __hash__(self):
450 def __hash__(self):
451 return hash(self._data)
451 return hash(self._data)
452
452
453 def __eq__(self, other):
453 def __eq__(self, other):
454 if type(other) != type(self):
454 if type(other) != type(self):
455 return False
455 return False
456 return self._data == other._data
456 return self._data == other._data
457
457
458 def precnode(self):
458 def precnode(self):
459 """Precursor changeset node identifier"""
459 """Precursor changeset node identifier"""
460 return self._data[0]
460 return self._data[0]
461
461
462 def succnodes(self):
462 def succnodes(self):
463 """List of successor changesets node identifiers"""
463 """List of successor changesets node identifiers"""
464 return self._data[1]
464 return self._data[1]
465
465
466 def parentnodes(self):
466 def parentnodes(self):
467 """Parents of the precursors (None if not recorded)"""
467 """Parents of the precursors (None if not recorded)"""
468 return self._data[5]
468 return self._data[5]
469
469
470 def metadata(self):
470 def metadata(self):
471 """Decoded metadata dictionary"""
471 """Decoded metadata dictionary"""
472 return dict(self._data[3])
472 return dict(self._data[3])
473
473
474 def date(self):
474 def date(self):
475 """Creation date as (unixtime, offset)"""
475 """Creation date as (unixtime, offset)"""
476 return self._data[4]
476 return self._data[4]
477
477
478 def flags(self):
478 def flags(self):
479 """The flags field of the marker"""
479 """The flags field of the marker"""
480 return self._data[2]
480 return self._data[2]
481
481
482 @util.nogc
482 @util.nogc
483 def _addsuccessors(successors, markers):
483 def _addsuccessors(successors, markers):
484 for mark in markers:
484 for mark in markers:
485 successors.setdefault(mark[0], set()).add(mark)
485 successors.setdefault(mark[0], set()).add(mark)
486
486
487 @util.nogc
487 @util.nogc
488 def _addprecursors(precursors, markers):
488 def _addprecursors(precursors, markers):
489 for mark in markers:
489 for mark in markers:
490 for suc in mark[1]:
490 for suc in mark[1]:
491 precursors.setdefault(suc, set()).add(mark)
491 precursors.setdefault(suc, set()).add(mark)
492
492
493 @util.nogc
493 @util.nogc
494 def _addchildren(children, markers):
494 def _addchildren(children, markers):
495 for mark in markers:
495 for mark in markers:
496 parents = mark[5]
496 parents = mark[5]
497 if parents is not None:
497 if parents is not None:
498 for p in parents:
498 for p in parents:
499 children.setdefault(p, set()).add(mark)
499 children.setdefault(p, set()).add(mark)
500
500
501 def _checkinvalidmarkers(markers):
501 def _checkinvalidmarkers(markers):
502 """search for marker with invalid data and raise error if needed
502 """search for marker with invalid data and raise error if needed
503
503
504 Exist as a separated function to allow the evolve extension for a more
504 Exist as a separated function to allow the evolve extension for a more
505 subtle handling.
505 subtle handling.
506 """
506 """
507 for mark in markers:
507 for mark in markers:
508 if node.nullid in mark[1]:
508 if node.nullid in mark[1]:
509 raise error.Abort(_('bad obsolescence marker detected: '
509 raise error.Abort(_('bad obsolescence marker detected: '
510 'invalid successors nullid'))
510 'invalid successors nullid'))
511
511
512 class obsstore(object):
512 class obsstore(object):
513 """Store obsolete markers
513 """Store obsolete markers
514
514
515 Markers can be accessed with two mappings:
515 Markers can be accessed with two mappings:
516 - precursors[x] -> set(markers on precursors edges of x)
516 - precursors[x] -> set(markers on precursors edges of x)
517 - successors[x] -> set(markers on successors edges of x)
517 - successors[x] -> set(markers on successors edges of x)
518 - children[x] -> set(markers on precursors edges of children(x)
518 - children[x] -> set(markers on precursors edges of children(x)
519 """
519 """
520
520
521 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
521 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
522 # prec: nodeid, precursor changesets
522 # prec: nodeid, precursor changesets
523 # succs: tuple of nodeid, successor changesets (0-N length)
523 # succs: tuple of nodeid, successor changesets (0-N length)
524 # flag: integer, flag field carrying modifier for the markers (see doc)
524 # flag: integer, flag field carrying modifier for the markers (see doc)
525 # meta: binary blob, encoded metadata dictionary
525 # meta: binary blob, encoded metadata dictionary
526 # date: (float, int) tuple, date of marker creation
526 # date: (float, int) tuple, date of marker creation
527 # parents: (tuple of nodeid) or None, parents of precursors
527 # parents: (tuple of nodeid) or None, parents of precursors
528 # None is used when no data has been recorded
528 # None is used when no data has been recorded
529
529
530 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
530 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
531 # caches for various obsolescence related cache
531 # caches for various obsolescence related cache
532 self.caches = {}
532 self.caches = {}
533 self.svfs = svfs
533 self.svfs = svfs
534 self._version = defaultformat
534 self._version = defaultformat
535 self._readonly = readonly
535 self._readonly = readonly
536
536
537 def __iter__(self):
537 def __iter__(self):
538 return iter(self._all)
538 return iter(self._all)
539
539
540 def __len__(self):
540 def __len__(self):
541 return len(self._all)
541 return len(self._all)
542
542
543 def __nonzero__(self):
543 def __nonzero__(self):
544 if not self._cached('_all'):
544 if not self._cached('_all'):
545 try:
545 try:
546 return self.svfs.stat('obsstore').st_size > 1
546 return self.svfs.stat('obsstore').st_size > 1
547 except OSError as inst:
547 except OSError as inst:
548 if inst.errno != errno.ENOENT:
548 if inst.errno != errno.ENOENT:
549 raise
549 raise
550 # just build an empty _all list if no obsstore exists, which
550 # just build an empty _all list if no obsstore exists, which
551 # avoids further stat() syscalls
551 # avoids further stat() syscalls
552 pass
552 pass
553 return bool(self._all)
553 return bool(self._all)
554
554
555 @property
555 @property
556 def readonly(self):
556 def readonly(self):
557 """True if marker creation is disabled
557 """True if marker creation is disabled
558
558
559 Remove me in the future when obsolete marker is always on."""
559 Remove me in the future when obsolete marker is always on."""
560 return self._readonly
560 return self._readonly
561
561
562 def create(self, transaction, prec, succs=(), flag=0, parents=None,
562 def create(self, transaction, prec, succs=(), flag=0, parents=None,
563 date=None, metadata=None):
563 date=None, metadata=None):
564 """obsolete: add a new obsolete marker
564 """obsolete: add a new obsolete marker
565
565
566 * ensuring it is hashable
566 * ensuring it is hashable
567 * check mandatory metadata
567 * check mandatory metadata
568 * encode metadata
568 * encode metadata
569
569
570 If you are a human writing code creating marker you want to use the
570 If you are a human writing code creating marker you want to use the
571 `createmarkers` function in this module instead.
571 `createmarkers` function in this module instead.
572
572
573 return True if a new marker have been added, False if the markers
573 return True if a new marker have been added, False if the markers
574 already existed (no op).
574 already existed (no op).
575 """
575 """
576 if metadata is None:
576 if metadata is None:
577 metadata = {}
577 metadata = {}
578 if date is None:
578 if date is None:
579 if 'date' in metadata:
579 if 'date' in metadata:
580 # as a courtesy for out-of-tree extensions
580 # as a courtesy for out-of-tree extensions
581 date = util.parsedate(metadata.pop('date'))
581 date = util.parsedate(metadata.pop('date'))
582 else:
582 else:
583 date = util.makedate()
583 date = util.makedate()
584 if len(prec) != 20:
584 if len(prec) != 20:
585 raise ValueError(prec)
585 raise ValueError(prec)
586 for succ in succs:
586 for succ in succs:
587 if len(succ) != 20:
587 if len(succ) != 20:
588 raise ValueError(succ)
588 raise ValueError(succ)
589 if prec in succs:
589 if prec in succs:
590 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
590 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
591
591
592 metadata = tuple(sorted(metadata.iteritems()))
592 metadata = tuple(sorted(metadata.iteritems()))
593
593
594 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
594 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
595 return bool(self.add(transaction, [marker]))
595 return bool(self.add(transaction, [marker]))
596
596
597 def add(self, transaction, markers):
597 def add(self, transaction, markers):
598 """Add new markers to the store
598 """Add new markers to the store
599
599
600 Take care of filtering duplicate.
600 Take care of filtering duplicate.
601 Return the number of new marker."""
601 Return the number of new marker."""
602 if self._readonly:
602 if self._readonly:
603 raise error.Abort('creating obsolete markers is not enabled on '
603 raise error.Abort('creating obsolete markers is not enabled on '
604 'this repo')
604 'this repo')
605 known = set(self._all)
605 known = set(self._all)
606 new = []
606 new = []
607 for m in markers:
607 for m in markers:
608 if m not in known:
608 if m not in known:
609 known.add(m)
609 known.add(m)
610 new.append(m)
610 new.append(m)
611 if new:
611 if new:
612 f = self.svfs('obsstore', 'ab')
612 f = self.svfs('obsstore', 'ab')
613 try:
613 try:
614 offset = f.tell()
614 offset = f.tell()
615 transaction.add('obsstore', offset)
615 transaction.add('obsstore', offset)
616 # offset == 0: new file - add the version header
616 # offset == 0: new file - add the version header
617 for bytes in encodemarkers(new, offset == 0, self._version):
617 for bytes in encodemarkers(new, offset == 0, self._version):
618 f.write(bytes)
618 f.write(bytes)
619 finally:
619 finally:
620 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
620 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
621 # call 'filecacheentry.refresh()' here
621 # call 'filecacheentry.refresh()' here
622 f.close()
622 f.close()
623 self._addmarkers(new)
623 self._addmarkers(new)
624 # new marker *may* have changed several set. invalidate the cache.
624 # new marker *may* have changed several set. invalidate the cache.
625 self.caches.clear()
625 self.caches.clear()
626 # records the number of new markers for the transaction hooks
626 # records the number of new markers for the transaction hooks
627 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
627 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
628 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
628 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
629 return len(new)
629 return len(new)
630
630
631 def mergemarkers(self, transaction, data):
631 def mergemarkers(self, transaction, data):
632 """merge a binary stream of markers inside the obsstore
632 """merge a binary stream of markers inside the obsstore
633
633
634 Returns the number of new markers added."""
634 Returns the number of new markers added."""
635 version, markers = _readmarkers(data)
635 version, markers = _readmarkers(data)
636 return self.add(transaction, markers)
636 return self.add(transaction, markers)
637
637
638 @propertycache
638 @propertycache
639 def _all(self):
639 def _all(self):
640 data = self.svfs.tryread('obsstore')
640 data = self.svfs.tryread('obsstore')
641 if not data:
641 if not data:
642 return []
642 return []
643 self._version, markers = _readmarkers(data)
643 self._version, markers = _readmarkers(data)
644 markers = list(markers)
644 markers = list(markers)
645 _checkinvalidmarkers(markers)
645 _checkinvalidmarkers(markers)
646 return markers
646 return markers
647
647
648 @propertycache
648 @propertycache
649 def successors(self):
649 def successors(self):
650 successors = {}
650 successors = {}
651 _addsuccessors(successors, self._all)
651 _addsuccessors(successors, self._all)
652 return successors
652 return successors
653
653
654 @propertycache
654 @propertycache
655 def precursors(self):
655 def precursors(self):
656 precursors = {}
656 precursors = {}
657 _addprecursors(precursors, self._all)
657 _addprecursors(precursors, self._all)
658 return precursors
658 return precursors
659
659
660 @propertycache
660 @propertycache
661 def children(self):
661 def children(self):
662 children = {}
662 children = {}
663 _addchildren(children, self._all)
663 _addchildren(children, self._all)
664 return children
664 return children
665
665
666 def _cached(self, attr):
666 def _cached(self, attr):
667 return attr in self.__dict__
667 return attr in self.__dict__
668
668
669 def _addmarkers(self, markers):
669 def _addmarkers(self, markers):
670 markers = list(markers) # to allow repeated iteration
670 markers = list(markers) # to allow repeated iteration
671 self._all.extend(markers)
671 self._all.extend(markers)
672 if self._cached('successors'):
672 if self._cached('successors'):
673 _addsuccessors(self.successors, markers)
673 _addsuccessors(self.successors, markers)
674 if self._cached('precursors'):
674 if self._cached('precursors'):
675 _addprecursors(self.precursors, markers)
675 _addprecursors(self.precursors, markers)
676 if self._cached('children'):
676 if self._cached('children'):
677 _addchildren(self.children, markers)
677 _addchildren(self.children, markers)
678 _checkinvalidmarkers(markers)
678 _checkinvalidmarkers(markers)
679
679
680 def relevantmarkers(self, nodes):
680 def relevantmarkers(self, nodes):
681 """return a set of all obsolescence markers relevant to a set of nodes.
681 """return a set of all obsolescence markers relevant to a set of nodes.
682
682
683 "relevant" to a set of nodes mean:
683 "relevant" to a set of nodes mean:
684
684
685 - marker that use this changeset as successor
685 - marker that use this changeset as successor
686 - prune marker of direct children on this changeset
686 - prune marker of direct children on this changeset
687 - recursive application of the two rules on precursors of these markers
687 - recursive application of the two rules on precursors of these markers
688
688
689 It is a set so you cannot rely on order."""
689 It is a set so you cannot rely on order."""
690
690
691 pendingnodes = set(nodes)
691 pendingnodes = set(nodes)
692 seenmarkers = set()
692 seenmarkers = set()
693 seennodes = set(pendingnodes)
693 seennodes = set(pendingnodes)
694 precursorsmarkers = self.precursors
694 precursorsmarkers = self.precursors
695 children = self.children
695 children = self.children
696 while pendingnodes:
696 while pendingnodes:
697 direct = set()
697 direct = set()
698 for current in pendingnodes:
698 for current in pendingnodes:
699 direct.update(precursorsmarkers.get(current, ()))
699 direct.update(precursorsmarkers.get(current, ()))
700 pruned = [m for m in children.get(current, ()) if not m[1]]
700 pruned = [m for m in children.get(current, ()) if not m[1]]
701 direct.update(pruned)
701 direct.update(pruned)
702 direct -= seenmarkers
702 direct -= seenmarkers
703 pendingnodes = set([m[0] for m in direct])
703 pendingnodes = set([m[0] for m in direct])
704 seenmarkers |= direct
704 seenmarkers |= direct
705 pendingnodes -= seennodes
705 pendingnodes -= seennodes
706 seennodes |= pendingnodes
706 seennodes |= pendingnodes
707 return seenmarkers
707 return seenmarkers
708
708
709 def commonversion(versions):
709 def commonversion(versions):
710 """Return the newest version listed in both versions and our local formats.
710 """Return the newest version listed in both versions and our local formats.
711
711
712 Returns None if no common version exists.
712 Returns None if no common version exists.
713 """
713 """
714 versions.sort(reverse=True)
714 versions.sort(reverse=True)
715 # search for highest version known on both side
715 # search for highest version known on both side
716 for v in versions:
716 for v in versions:
717 if v in formats:
717 if v in formats:
718 return v
718 return v
719 return None
719 return None
720
720
721 # arbitrary picked to fit into 8K limit from HTTP server
721 # arbitrary picked to fit into 8K limit from HTTP server
722 # you have to take in account:
722 # you have to take in account:
723 # - the version header
723 # - the version header
724 # - the base85 encoding
724 # - the base85 encoding
725 _maxpayload = 5300
725 _maxpayload = 5300
726
726
727 def _pushkeyescape(markers):
727 def _pushkeyescape(markers):
728 """encode markers into a dict suitable for pushkey exchange
728 """encode markers into a dict suitable for pushkey exchange
729
729
730 - binary data is base85 encoded
730 - binary data is base85 encoded
731 - split in chunks smaller than 5300 bytes"""
731 - split in chunks smaller than 5300 bytes"""
732 keys = {}
732 keys = {}
733 parts = []
733 parts = []
734 currentlen = _maxpayload * 2 # ensure we create a new part
734 currentlen = _maxpayload * 2 # ensure we create a new part
735 for marker in markers:
735 for marker in markers:
736 nextdata = _fm0encodeonemarker(marker)
736 nextdata = _fm0encodeonemarker(marker)
737 if (len(nextdata) + currentlen > _maxpayload):
737 if (len(nextdata) + currentlen > _maxpayload):
738 currentpart = []
738 currentpart = []
739 currentlen = 0
739 currentlen = 0
740 parts.append(currentpart)
740 parts.append(currentpart)
741 currentpart.append(nextdata)
741 currentpart.append(nextdata)
742 currentlen += len(nextdata)
742 currentlen += len(nextdata)
743 for idx, part in enumerate(reversed(parts)):
743 for idx, part in enumerate(reversed(parts)):
744 data = ''.join([_pack('>B', _fm0version)] + part)
744 data = ''.join([_pack('>B', _fm0version)] + part)
745 keys['dump%i' % idx] = base85.b85encode(data)
745 keys['dump%i' % idx] = base85.b85encode(data)
746 return keys
746 return keys
747
747
748 def listmarkers(repo):
748 def listmarkers(repo):
749 """List markers over pushkey"""
749 """List markers over pushkey"""
750 if not repo.obsstore:
750 if not repo.obsstore:
751 return {}
751 return {}
752 return _pushkeyescape(sorted(repo.obsstore))
752 return _pushkeyescape(sorted(repo.obsstore))
753
753
754 def pushmarker(repo, key, old, new):
754 def pushmarker(repo, key, old, new):
755 """Push markers over pushkey"""
755 """Push markers over pushkey"""
756 if not key.startswith('dump'):
756 if not key.startswith('dump'):
757 repo.ui.warn(_('unknown key: %r') % key)
757 repo.ui.warn(_('unknown key: %r') % key)
758 return 0
758 return 0
759 if old:
759 if old:
760 repo.ui.warn(_('unexpected old value for %r') % key)
760 repo.ui.warn(_('unexpected old value for %r') % key)
761 return 0
761 return 0
762 data = base85.b85decode(new)
762 data = base85.b85decode(new)
763 lock = repo.lock()
763 lock = repo.lock()
764 try:
764 try:
765 tr = repo.transaction('pushkey: obsolete markers')
765 tr = repo.transaction('pushkey: obsolete markers')
766 try:
766 try:
767 repo.obsstore.mergemarkers(tr, data)
767 repo.obsstore.mergemarkers(tr, data)
768 tr.close()
768 tr.close()
769 return 1
769 return 1
770 finally:
770 finally:
771 tr.release()
771 tr.release()
772 finally:
772 finally:
773 lock.release()
773 lock.release()
774
774
775 def getmarkers(repo, nodes=None):
775 def getmarkers(repo, nodes=None):
776 """returns markers known in a repository
776 """returns markers known in a repository
777
777
778 If <nodes> is specified, only markers "relevant" to those nodes are are
778 If <nodes> is specified, only markers "relevant" to those nodes are are
779 returned"""
779 returned"""
780 if nodes is None:
780 if nodes is None:
781 rawmarkers = repo.obsstore
781 rawmarkers = repo.obsstore
782 else:
782 else:
783 rawmarkers = repo.obsstore.relevantmarkers(nodes)
783 rawmarkers = repo.obsstore.relevantmarkers(nodes)
784
784
785 for markerdata in rawmarkers:
785 for markerdata in rawmarkers:
786 yield marker(repo, markerdata)
786 yield marker(repo, markerdata)
787
787
788 def relevantmarkers(repo, node):
788 def relevantmarkers(repo, node):
789 """all obsolete markers relevant to some revision"""
789 """all obsolete markers relevant to some revision"""
790 for markerdata in repo.obsstore.relevantmarkers(node):
790 for markerdata in repo.obsstore.relevantmarkers(node):
791 yield marker(repo, markerdata)
791 yield marker(repo, markerdata)
792
792
793
793
794 def precursormarkers(ctx):
794 def precursormarkers(ctx):
795 """obsolete marker marking this changeset as a successors"""
795 """obsolete marker marking this changeset as a successors"""
796 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
796 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
797 yield marker(ctx.repo(), data)
797 yield marker(ctx.repo(), data)
798
798
799 def successormarkers(ctx):
799 def successormarkers(ctx):
800 """obsolete marker making this changeset obsolete"""
800 """obsolete marker making this changeset obsolete"""
801 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
801 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
802 yield marker(ctx.repo(), data)
802 yield marker(ctx.repo(), data)
803
803
804 def allsuccessors(obsstore, nodes, ignoreflags=0):
804 def allsuccessors(obsstore, nodes, ignoreflags=0):
805 """Yield node for every successor of <nodes>.
805 """Yield node for every successor of <nodes>.
806
806
807 Some successors may be unknown locally.
807 Some successors may be unknown locally.
808
808
809 This is a linear yield unsuited to detecting split changesets. It includes
809 This is a linear yield unsuited to detecting split changesets. It includes
810 initial nodes too."""
810 initial nodes too."""
811 remaining = set(nodes)
811 remaining = set(nodes)
812 seen = set(remaining)
812 seen = set(remaining)
813 while remaining:
813 while remaining:
814 current = remaining.pop()
814 current = remaining.pop()
815 yield current
815 yield current
816 for mark in obsstore.successors.get(current, ()):
816 for mark in obsstore.successors.get(current, ()):
817 # ignore marker flagged with specified flag
817 # ignore marker flagged with specified flag
818 if mark[2] & ignoreflags:
818 if mark[2] & ignoreflags:
819 continue
819 continue
820 for suc in mark[1]:
820 for suc in mark[1]:
821 if suc not in seen:
821 if suc not in seen:
822 seen.add(suc)
822 seen.add(suc)
823 remaining.add(suc)
823 remaining.add(suc)
824
824
825 def allprecursors(obsstore, nodes, ignoreflags=0):
825 def allprecursors(obsstore, nodes, ignoreflags=0):
826 """Yield node for every precursors of <nodes>.
826 """Yield node for every precursors of <nodes>.
827
827
828 Some precursors may be unknown locally.
828 Some precursors may be unknown locally.
829
829
830 This is a linear yield unsuited to detecting folded changesets. It includes
830 This is a linear yield unsuited to detecting folded changesets. It includes
831 initial nodes too."""
831 initial nodes too."""
832
832
833 remaining = set(nodes)
833 remaining = set(nodes)
834 seen = set(remaining)
834 seen = set(remaining)
835 while remaining:
835 while remaining:
836 current = remaining.pop()
836 current = remaining.pop()
837 yield current
837 yield current
838 for mark in obsstore.precursors.get(current, ()):
838 for mark in obsstore.precursors.get(current, ()):
839 # ignore marker flagged with specified flag
839 # ignore marker flagged with specified flag
840 if mark[2] & ignoreflags:
840 if mark[2] & ignoreflags:
841 continue
841 continue
842 suc = mark[0]
842 suc = mark[0]
843 if suc not in seen:
843 if suc not in seen:
844 seen.add(suc)
844 seen.add(suc)
845 remaining.add(suc)
845 remaining.add(suc)
846
846
847 def foreground(repo, nodes):
847 def foreground(repo, nodes):
848 """return all nodes in the "foreground" of other node
848 """return all nodes in the "foreground" of other node
849
849
850 The foreground of a revision is anything reachable using parent -> children
850 The foreground of a revision is anything reachable using parent -> children
851 or precursor -> successor relation. It is very similar to "descendant" but
851 or precursor -> successor relation. It is very similar to "descendant" but
852 augmented with obsolescence information.
852 augmented with obsolescence information.
853
853
854 Beware that possible obsolescence cycle may result if complex situation.
854 Beware that possible obsolescence cycle may result if complex situation.
855 """
855 """
856 repo = repo.unfiltered()
856 repo = repo.unfiltered()
857 foreground = set(repo.set('%ln::', nodes))
857 foreground = set(repo.set('%ln::', nodes))
858 if repo.obsstore:
858 if repo.obsstore:
859 # We only need this complicated logic if there is obsolescence
859 # We only need this complicated logic if there is obsolescence
860 # XXX will probably deserve an optimised revset.
860 # XXX will probably deserve an optimised revset.
861 nm = repo.changelog.nodemap
861 nm = repo.changelog.nodemap
862 plen = -1
862 plen = -1
863 # compute the whole set of successors or descendants
863 # compute the whole set of successors or descendants
864 while len(foreground) != plen:
864 while len(foreground) != plen:
865 plen = len(foreground)
865 plen = len(foreground)
866 succs = set(c.node() for c in foreground)
866 succs = set(c.node() for c in foreground)
867 mutable = [c.node() for c in foreground if c.mutable()]
867 mutable = [c.node() for c in foreground if c.mutable()]
868 succs.update(allsuccessors(repo.obsstore, mutable))
868 succs.update(allsuccessors(repo.obsstore, mutable))
869 known = (n for n in succs if n in nm)
869 known = (n for n in succs if n in nm)
870 foreground = set(repo.set('%ln::', known))
870 foreground = set(repo.set('%ln::', known))
871 return set(c.node() for c in foreground)
871 return set(c.node() for c in foreground)
872
872
873
873
874 def successorssets(repo, initialnode, cache=None):
874 def successorssets(repo, initialnode, cache=None):
875 """Return set of all latest successors of initial nodes
875 """Return set of all latest successors of initial nodes
876
876
877 The successors set of a changeset A are the group of revisions that succeed
877 The successors set of a changeset A are the group of revisions that succeed
878 A. It succeeds A as a consistent whole, each revision being only a partial
878 A. It succeeds A as a consistent whole, each revision being only a partial
879 replacement. The successors set contains non-obsolete changesets only.
879 replacement. The successors set contains non-obsolete changesets only.
880
880
881 This function returns the full list of successor sets which is why it
881 This function returns the full list of successor sets which is why it
882 returns a list of tuples and not just a single tuple. Each tuple is a valid
882 returns a list of tuples and not just a single tuple. Each tuple is a valid
883 successors set. Note that (A,) may be a valid successors set for changeset A
883 successors set. Note that (A,) may be a valid successors set for changeset A
884 (see below).
884 (see below).
885
885
886 In most cases, a changeset A will have a single element (e.g. the changeset
886 In most cases, a changeset A will have a single element (e.g. the changeset
887 A is replaced by A') in its successors set. Though, it is also common for a
887 A is replaced by A') in its successors set. Though, it is also common for a
888 changeset A to have no elements in its successor set (e.g. the changeset
888 changeset A to have no elements in its successor set (e.g. the changeset
889 has been pruned). Therefore, the returned list of successors sets will be
889 has been pruned). Therefore, the returned list of successors sets will be
890 [(A',)] or [], respectively.
890 [(A',)] or [], respectively.
891
891
892 When a changeset A is split into A' and B', however, it will result in a
892 When a changeset A is split into A' and B', however, it will result in a
893 successors set containing more than a single element, i.e. [(A',B')].
893 successors set containing more than a single element, i.e. [(A',B')].
894 Divergent changesets will result in multiple successors sets, i.e. [(A',),
894 Divergent changesets will result in multiple successors sets, i.e. [(A',),
895 (A'')].
895 (A'')].
896
896
897 If a changeset A is not obsolete, then it will conceptually have no
897 If a changeset A is not obsolete, then it will conceptually have no
898 successors set. To distinguish this from a pruned changeset, the successor
898 successors set. To distinguish this from a pruned changeset, the successor
899 set will contain itself only, i.e. [(A,)].
899 set will contain itself only, i.e. [(A,)].
900
900
901 Finally, successors unknown locally are considered to be pruned (obsoleted
901 Finally, successors unknown locally are considered to be pruned (obsoleted
902 without any successors).
902 without any successors).
903
903
904 The optional `cache` parameter is a dictionary that may contain precomputed
904 The optional `cache` parameter is a dictionary that may contain precomputed
905 successors sets. It is meant to reuse the computation of a previous call to
905 successors sets. It is meant to reuse the computation of a previous call to
906 `successorssets` when multiple calls are made at the same time. The cache
906 `successorssets` when multiple calls are made at the same time. The cache
907 dictionary is updated in place. The caller is responsible for its life
907 dictionary is updated in place. The caller is responsible for its life
908 span. Code that makes multiple calls to `successorssets` *must* use this
908 span. Code that makes multiple calls to `successorssets` *must* use this
909 cache mechanism or suffer terrible performance.
909 cache mechanism or suffer terrible performance.
910 """
910 """
911
911
912 succmarkers = repo.obsstore.successors
912 succmarkers = repo.obsstore.successors
913
913
914 # Stack of nodes we search successors sets for
914 # Stack of nodes we search successors sets for
915 toproceed = [initialnode]
915 toproceed = [initialnode]
916 # set version of above list for fast loop detection
916 # set version of above list for fast loop detection
917 # element added to "toproceed" must be added here
917 # element added to "toproceed" must be added here
918 stackedset = set(toproceed)
918 stackedset = set(toproceed)
919 if cache is None:
919 if cache is None:
920 cache = {}
920 cache = {}
921
921
922 # This while loop is the flattened version of a recursive search for
922 # This while loop is the flattened version of a recursive search for
923 # successors sets
923 # successors sets
924 #
924 #
925 # def successorssets(x):
925 # def successorssets(x):
926 # successors = directsuccessors(x)
926 # successors = directsuccessors(x)
927 # ss = [[]]
927 # ss = [[]]
928 # for succ in directsuccessors(x):
928 # for succ in directsuccessors(x):
929 # # product as in itertools cartesian product
929 # # product as in itertools cartesian product
930 # ss = product(ss, successorssets(succ))
930 # ss = product(ss, successorssets(succ))
931 # return ss
931 # return ss
932 #
932 #
933 # But we can not use plain recursive calls here:
933 # But we can not use plain recursive calls here:
934 # - that would blow the python call stack
934 # - that would blow the python call stack
935 # - obsolescence markers may have cycles, we need to handle them.
935 # - obsolescence markers may have cycles, we need to handle them.
936 #
936 #
937 # The `toproceed` list act as our call stack. Every node we search
937 # The `toproceed` list act as our call stack. Every node we search
938 # successors set for are stacked there.
938 # successors set for are stacked there.
939 #
939 #
940 # The `stackedset` is set version of this stack used to check if a node is
940 # The `stackedset` is set version of this stack used to check if a node is
941 # already stacked. This check is used to detect cycles and prevent infinite
941 # already stacked. This check is used to detect cycles and prevent infinite
942 # loop.
942 # loop.
943 #
943 #
944 # successors set of all nodes are stored in the `cache` dictionary.
944 # successors set of all nodes are stored in the `cache` dictionary.
945 #
945 #
946 # After this while loop ends we use the cache to return the successors sets
946 # After this while loop ends we use the cache to return the successors sets
947 # for the node requested by the caller.
947 # for the node requested by the caller.
948 while toproceed:
948 while toproceed:
949 # Every iteration tries to compute the successors sets of the topmost
949 # Every iteration tries to compute the successors sets of the topmost
950 # node of the stack: CURRENT.
950 # node of the stack: CURRENT.
951 #
951 #
952 # There are four possible outcomes:
952 # There are four possible outcomes:
953 #
953 #
954 # 1) We already know the successors sets of CURRENT:
954 # 1) We already know the successors sets of CURRENT:
955 # -> mission accomplished, pop it from the stack.
955 # -> mission accomplished, pop it from the stack.
956 # 2) Node is not obsolete:
956 # 2) Node is not obsolete:
957 # -> the node is its own successors sets. Add it to the cache.
957 # -> the node is its own successors sets. Add it to the cache.
958 # 3) We do not know successors set of direct successors of CURRENT:
958 # 3) We do not know successors set of direct successors of CURRENT:
959 # -> We add those successors to the stack.
959 # -> We add those successors to the stack.
960 # 4) We know successors sets of all direct successors of CURRENT:
960 # 4) We know successors sets of all direct successors of CURRENT:
961 # -> We can compute CURRENT successors set and add it to the
961 # -> We can compute CURRENT successors set and add it to the
962 # cache.
962 # cache.
963 #
963 #
964 current = toproceed[-1]
964 current = toproceed[-1]
965 if current in cache:
965 if current in cache:
966 # case (1): We already know the successors sets
966 # case (1): We already know the successors sets
967 stackedset.remove(toproceed.pop())
967 stackedset.remove(toproceed.pop())
968 elif current not in succmarkers:
968 elif current not in succmarkers:
969 # case (2): The node is not obsolete.
969 # case (2): The node is not obsolete.
970 if current in repo:
970 if current in repo:
971 # We have a valid last successors.
971 # We have a valid last successors.
972 cache[current] = [(current,)]
972 cache[current] = [(current,)]
973 else:
973 else:
974 # Final obsolete version is unknown locally.
974 # Final obsolete version is unknown locally.
975 # Do not count that as a valid successors
975 # Do not count that as a valid successors
976 cache[current] = []
976 cache[current] = []
977 else:
977 else:
978 # cases (3) and (4)
978 # cases (3) and (4)
979 #
979 #
980 # We proceed in two phases. Phase 1 aims to distinguish case (3)
980 # We proceed in two phases. Phase 1 aims to distinguish case (3)
981 # from case (4):
981 # from case (4):
982 #
982 #
983 # For each direct successors of CURRENT, we check whether its
983 # For each direct successors of CURRENT, we check whether its
984 # successors sets are known. If they are not, we stack the
984 # successors sets are known. If they are not, we stack the
985 # unknown node and proceed to the next iteration of the while
985 # unknown node and proceed to the next iteration of the while
986 # loop. (case 3)
986 # loop. (case 3)
987 #
987 #
988 # During this step, we may detect obsolescence cycles: a node
988 # During this step, we may detect obsolescence cycles: a node
989 # with unknown successors sets but already in the call stack.
989 # with unknown successors sets but already in the call stack.
990 # In such a situation, we arbitrary set the successors sets of
990 # In such a situation, we arbitrary set the successors sets of
991 # the node to nothing (node pruned) to break the cycle.
991 # the node to nothing (node pruned) to break the cycle.
992 #
992 #
993 # If no break was encountered we proceed to phase 2.
993 # If no break was encountered we proceed to phase 2.
994 #
994 #
995 # Phase 2 computes successors sets of CURRENT (case 4); see details
995 # Phase 2 computes successors sets of CURRENT (case 4); see details
996 # in phase 2 itself.
996 # in phase 2 itself.
997 #
997 #
998 # Note the two levels of iteration in each phase.
998 # Note the two levels of iteration in each phase.
999 # - The first one handles obsolescence markers using CURRENT as
999 # - The first one handles obsolescence markers using CURRENT as
1000 # precursor (successors markers of CURRENT).
1000 # precursor (successors markers of CURRENT).
1001 #
1001 #
1002 # Having multiple entry here means divergence.
1002 # Having multiple entry here means divergence.
1003 #
1003 #
1004 # - The second one handles successors defined in each marker.
1004 # - The second one handles successors defined in each marker.
1005 #
1005 #
1006 # Having none means pruned node, multiple successors means split,
1006 # Having none means pruned node, multiple successors means split,
1007 # single successors are standard replacement.
1007 # single successors are standard replacement.
1008 #
1008 #
1009 for mark in sorted(succmarkers[current]):
1009 for mark in sorted(succmarkers[current]):
1010 for suc in mark[1]:
1010 for suc in mark[1]:
1011 if suc not in cache:
1011 if suc not in cache:
1012 if suc in stackedset:
1012 if suc in stackedset:
1013 # cycle breaking
1013 # cycle breaking
1014 cache[suc] = []
1014 cache[suc] = []
1015 else:
1015 else:
1016 # case (3) If we have not computed successors sets
1016 # case (3) If we have not computed successors sets
1017 # of one of those successors we add it to the
1017 # of one of those successors we add it to the
1018 # `toproceed` stack and stop all work for this
1018 # `toproceed` stack and stop all work for this
1019 # iteration.
1019 # iteration.
1020 toproceed.append(suc)
1020 toproceed.append(suc)
1021 stackedset.add(suc)
1021 stackedset.add(suc)
1022 break
1022 break
1023 else:
1023 else:
1024 continue
1024 continue
1025 break
1025 break
1026 else:
1026 else:
1027 # case (4): we know all successors sets of all direct
1027 # case (4): we know all successors sets of all direct
1028 # successors
1028 # successors
1029 #
1029 #
1030 # Successors set contributed by each marker depends on the
1030 # Successors set contributed by each marker depends on the
1031 # successors sets of all its "successors" node.
1031 # successors sets of all its "successors" node.
1032 #
1032 #
1033 # Each different marker is a divergence in the obsolescence
1033 # Each different marker is a divergence in the obsolescence
1034 # history. It contributes successors sets distinct from other
1034 # history. It contributes successors sets distinct from other
1035 # markers.
1035 # markers.
1036 #
1036 #
1037 # Within a marker, a successor may have divergent successors
1037 # Within a marker, a successor may have divergent successors
1038 # sets. In such a case, the marker will contribute multiple
1038 # sets. In such a case, the marker will contribute multiple
1039 # divergent successors sets. If multiple successors have
1039 # divergent successors sets. If multiple successors have
1040 # divergent successors sets, a Cartesian product is used.
1040 # divergent successors sets, a Cartesian product is used.
1041 #
1041 #
1042 # At the end we post-process successors sets to remove
1042 # At the end we post-process successors sets to remove
1043 # duplicated entry and successors set that are strict subset of
1043 # duplicated entry and successors set that are strict subset of
1044 # another one.
1044 # another one.
1045 succssets = []
1045 succssets = []
1046 for mark in sorted(succmarkers[current]):
1046 for mark in sorted(succmarkers[current]):
1047 # successors sets contributed by this marker
1047 # successors sets contributed by this marker
1048 markss = [[]]
1048 markss = [[]]
1049 for suc in mark[1]:
1049 for suc in mark[1]:
1050 # cardinal product with previous successors
1050 # cardinal product with previous successors
1051 productresult = []
1051 productresult = []
1052 for prefix in markss:
1052 for prefix in markss:
1053 for suffix in cache[suc]:
1053 for suffix in cache[suc]:
1054 newss = list(prefix)
1054 newss = list(prefix)
1055 for part in suffix:
1055 for part in suffix:
1056 # do not duplicated entry in successors set
1056 # do not duplicated entry in successors set
1057 # first entry wins.
1057 # first entry wins.
1058 if part not in newss:
1058 if part not in newss:
1059 newss.append(part)
1059 newss.append(part)
1060 productresult.append(newss)
1060 productresult.append(newss)
1061 markss = productresult
1061 markss = productresult
1062 succssets.extend(markss)
1062 succssets.extend(markss)
1063 # remove duplicated and subset
1063 # remove duplicated and subset
1064 seen = []
1064 seen = []
1065 final = []
1065 final = []
1066 candidate = sorted(((set(s), s) for s in succssets if s),
1066 candidate = sorted(((set(s), s) for s in succssets if s),
1067 key=lambda x: len(x[1]), reverse=True)
1067 key=lambda x: len(x[1]), reverse=True)
1068 for setversion, listversion in candidate:
1068 for setversion, listversion in candidate:
1069 for seenset in seen:
1069 for seenset in seen:
1070 if setversion.issubset(seenset):
1070 if setversion.issubset(seenset):
1071 break
1071 break
1072 else:
1072 else:
1073 final.append(listversion)
1073 final.append(listversion)
1074 seen.append(setversion)
1074 seen.append(setversion)
1075 final.reverse() # put small successors set first
1075 final.reverse() # put small successors set first
1076 cache[current] = final
1076 cache[current] = final
1077 return cache[initialnode]
1077 return cache[initialnode]
1078
1078
1079 # mapping of 'set-name' -> <function to compute this set>
1079 # mapping of 'set-name' -> <function to compute this set>
1080 cachefuncs = {}
1080 cachefuncs = {}
1081 def cachefor(name):
1081 def cachefor(name):
1082 """Decorator to register a function as computing the cache for a set"""
1082 """Decorator to register a function as computing the cache for a set"""
1083 def decorator(func):
1083 def decorator(func):
1084 assert name not in cachefuncs
1084 assert name not in cachefuncs
1085 cachefuncs[name] = func
1085 cachefuncs[name] = func
1086 return func
1086 return func
1087 return decorator
1087 return decorator
1088
1088
1089 def getrevs(repo, name):
1089 def getrevs(repo, name):
1090 """Return the set of revision that belong to the <name> set
1090 """Return the set of revision that belong to the <name> set
1091
1091
1092 Such access may compute the set and cache it for future use"""
1092 Such access may compute the set and cache it for future use"""
1093 repo = repo.unfiltered()
1093 repo = repo.unfiltered()
1094 if not repo.obsstore:
1094 if not repo.obsstore:
1095 return frozenset()
1095 return frozenset()
1096 if name not in repo.obsstore.caches:
1096 if name not in repo.obsstore.caches:
1097 repo.obsstore.caches[name] = cachefuncs[name](repo)
1097 repo.obsstore.caches[name] = cachefuncs[name](repo)
1098 return repo.obsstore.caches[name]
1098 return repo.obsstore.caches[name]
1099
1099
1100 # To be simple we need to invalidate obsolescence cache when:
1100 # To be simple we need to invalidate obsolescence cache when:
1101 #
1101 #
1102 # - new changeset is added:
1102 # - new changeset is added:
1103 # - public phase is changed
1103 # - public phase is changed
1104 # - obsolescence marker are added
1104 # - obsolescence marker are added
1105 # - strip is used a repo
1105 # - strip is used a repo
1106 def clearobscaches(repo):
1106 def clearobscaches(repo):
1107 """Remove all obsolescence related cache from a repo
1107 """Remove all obsolescence related cache from a repo
1108
1108
1109 This remove all cache in obsstore is the obsstore already exist on the
1109 This remove all cache in obsstore is the obsstore already exist on the
1110 repo.
1110 repo.
1111
1111
1112 (We could be smarter here given the exact event that trigger the cache
1112 (We could be smarter here given the exact event that trigger the cache
1113 clearing)"""
1113 clearing)"""
1114 # only clear cache is there is obsstore data in this repo
1114 # only clear cache is there is obsstore data in this repo
1115 if 'obsstore' in repo._filecache:
1115 if 'obsstore' in repo._filecache:
1116 repo.obsstore.caches.clear()
1116 repo.obsstore.caches.clear()
1117
1117
1118 @cachefor('obsolete')
1118 @cachefor('obsolete')
1119 def _computeobsoleteset(repo):
1119 def _computeobsoleteset(repo):
1120 """the set of obsolete revisions"""
1120 """the set of obsolete revisions"""
1121 obs = set()
1121 obs = set()
1122 getrev = repo.changelog.nodemap.get
1122 getnode = repo.changelog.node
1123 getphase = repo._phasecache.phase
1123 notpublic = repo.revs("not public()")
1124 for n in repo.obsstore.successors:
1124 for r in notpublic:
1125 rev = getrev(n)
1125 if getnode(r) in repo.obsstore.successors:
1126 if rev is not None and getphase(repo, rev):
1126 obs.add(r)
1127 obs.add(rev)
1128 return obs
1127 return obs
1129
1128
1130 @cachefor('unstable')
1129 @cachefor('unstable')
1131 def _computeunstableset(repo):
1130 def _computeunstableset(repo):
1132 """the set of non obsolete revisions with obsolete parents"""
1131 """the set of non obsolete revisions with obsolete parents"""
1133 revs = [(ctx.rev(), ctx) for ctx in
1132 revs = [(ctx.rev(), ctx) for ctx in
1134 repo.set('(not public()) and (not obsolete())')]
1133 repo.set('(not public()) and (not obsolete())')]
1135 revs.sort(key=lambda x:x[0])
1134 revs.sort(key=lambda x:x[0])
1136 unstable = set()
1135 unstable = set()
1137 for rev, ctx in revs:
1136 for rev, ctx in revs:
1138 # A rev is unstable if one of its parent is obsolete or unstable
1137 # A rev is unstable if one of its parent is obsolete or unstable
1139 # this works since we traverse following growing rev order
1138 # this works since we traverse following growing rev order
1140 if any((x.obsolete() or (x.rev() in unstable))
1139 if any((x.obsolete() or (x.rev() in unstable))
1141 for x in ctx.parents()):
1140 for x in ctx.parents()):
1142 unstable.add(rev)
1141 unstable.add(rev)
1143 return unstable
1142 return unstable
1144
1143
1145 @cachefor('suspended')
1144 @cachefor('suspended')
1146 def _computesuspendedset(repo):
1145 def _computesuspendedset(repo):
1147 """the set of obsolete parents with non obsolete descendants"""
1146 """the set of obsolete parents with non obsolete descendants"""
1148 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1147 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1149 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1148 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1150
1149
1151 @cachefor('extinct')
1150 @cachefor('extinct')
1152 def _computeextinctset(repo):
1151 def _computeextinctset(repo):
1153 """the set of obsolete parents without non obsolete descendants"""
1152 """the set of obsolete parents without non obsolete descendants"""
1154 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1153 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1155
1154
1156
1155
1157 @cachefor('bumped')
1156 @cachefor('bumped')
1158 def _computebumpedset(repo):
1157 def _computebumpedset(repo):
1159 """the set of revs trying to obsolete public revisions"""
1158 """the set of revs trying to obsolete public revisions"""
1160 bumped = set()
1159 bumped = set()
1161 # util function (avoid attribute lookup in the loop)
1160 # util function (avoid attribute lookup in the loop)
1162 phase = repo._phasecache.phase # would be faster to grab the full list
1161 phase = repo._phasecache.phase # would be faster to grab the full list
1163 public = phases.public
1162 public = phases.public
1164 cl = repo.changelog
1163 cl = repo.changelog
1165 torev = cl.nodemap.get
1164 torev = cl.nodemap.get
1166 for ctx in repo.set('(not public()) and (not obsolete())'):
1165 for ctx in repo.set('(not public()) and (not obsolete())'):
1167 rev = ctx.rev()
1166 rev = ctx.rev()
1168 # We only evaluate mutable, non-obsolete revision
1167 # We only evaluate mutable, non-obsolete revision
1169 node = ctx.node()
1168 node = ctx.node()
1170 # (future) A cache of precursors may worth if split is very common
1169 # (future) A cache of precursors may worth if split is very common
1171 for pnode in allprecursors(repo.obsstore, [node],
1170 for pnode in allprecursors(repo.obsstore, [node],
1172 ignoreflags=bumpedfix):
1171 ignoreflags=bumpedfix):
1173 prev = torev(pnode) # unfiltered! but so is phasecache
1172 prev = torev(pnode) # unfiltered! but so is phasecache
1174 if (prev is not None) and (phase(repo, prev) <= public):
1173 if (prev is not None) and (phase(repo, prev) <= public):
1175 # we have a public precursors
1174 # we have a public precursors
1176 bumped.add(rev)
1175 bumped.add(rev)
1177 break # Next draft!
1176 break # Next draft!
1178 return bumped
1177 return bumped
1179
1178
1180 @cachefor('divergent')
1179 @cachefor('divergent')
1181 def _computedivergentset(repo):
1180 def _computedivergentset(repo):
1182 """the set of rev that compete to be the final successors of some revision.
1181 """the set of rev that compete to be the final successors of some revision.
1183 """
1182 """
1184 divergent = set()
1183 divergent = set()
1185 obsstore = repo.obsstore
1184 obsstore = repo.obsstore
1186 newermap = {}
1185 newermap = {}
1187 for ctx in repo.set('(not public()) - obsolete()'):
1186 for ctx in repo.set('(not public()) - obsolete()'):
1188 mark = obsstore.precursors.get(ctx.node(), ())
1187 mark = obsstore.precursors.get(ctx.node(), ())
1189 toprocess = set(mark)
1188 toprocess = set(mark)
1190 seen = set()
1189 seen = set()
1191 while toprocess:
1190 while toprocess:
1192 prec = toprocess.pop()[0]
1191 prec = toprocess.pop()[0]
1193 if prec in seen:
1192 if prec in seen:
1194 continue # emergency cycle hanging prevention
1193 continue # emergency cycle hanging prevention
1195 seen.add(prec)
1194 seen.add(prec)
1196 if prec not in newermap:
1195 if prec not in newermap:
1197 successorssets(repo, prec, newermap)
1196 successorssets(repo, prec, newermap)
1198 newer = [n for n in newermap[prec] if n]
1197 newer = [n for n in newermap[prec] if n]
1199 if len(newer) > 1:
1198 if len(newer) > 1:
1200 divergent.add(ctx.rev())
1199 divergent.add(ctx.rev())
1201 break
1200 break
1202 toprocess.update(obsstore.precursors.get(prec, ()))
1201 toprocess.update(obsstore.precursors.get(prec, ()))
1203 return divergent
1202 return divergent
1204
1203
1205
1204
1206 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1205 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1207 """Add obsolete markers between changesets in a repo
1206 """Add obsolete markers between changesets in a repo
1208
1207
1209 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1208 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1210 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1209 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1211 containing metadata for this marker only. It is merged with the global
1210 containing metadata for this marker only. It is merged with the global
1212 metadata specified through the `metadata` argument of this function,
1211 metadata specified through the `metadata` argument of this function,
1213
1212
1214 Trying to obsolete a public changeset will raise an exception.
1213 Trying to obsolete a public changeset will raise an exception.
1215
1214
1216 Current user and date are used except if specified otherwise in the
1215 Current user and date are used except if specified otherwise in the
1217 metadata attribute.
1216 metadata attribute.
1218
1217
1219 This function operates within a transaction of its own, but does
1218 This function operates within a transaction of its own, but does
1220 not take any lock on the repo.
1219 not take any lock on the repo.
1221 """
1220 """
1222 # prepare metadata
1221 # prepare metadata
1223 if metadata is None:
1222 if metadata is None:
1224 metadata = {}
1223 metadata = {}
1225 if 'user' not in metadata:
1224 if 'user' not in metadata:
1226 metadata['user'] = repo.ui.username()
1225 metadata['user'] = repo.ui.username()
1227 tr = repo.transaction('add-obsolescence-marker')
1226 tr = repo.transaction('add-obsolescence-marker')
1228 try:
1227 try:
1229 for rel in relations:
1228 for rel in relations:
1230 prec = rel[0]
1229 prec = rel[0]
1231 sucs = rel[1]
1230 sucs = rel[1]
1232 localmetadata = metadata.copy()
1231 localmetadata = metadata.copy()
1233 if 2 < len(rel):
1232 if 2 < len(rel):
1234 localmetadata.update(rel[2])
1233 localmetadata.update(rel[2])
1235
1234
1236 if not prec.mutable():
1235 if not prec.mutable():
1237 raise error.Abort("cannot obsolete public changeset: %s"
1236 raise error.Abort("cannot obsolete public changeset: %s"
1238 % prec,
1237 % prec,
1239 hint='see "hg help phases" for details')
1238 hint='see "hg help phases" for details')
1240 nprec = prec.node()
1239 nprec = prec.node()
1241 nsucs = tuple(s.node() for s in sucs)
1240 nsucs = tuple(s.node() for s in sucs)
1242 npare = None
1241 npare = None
1243 if not nsucs:
1242 if not nsucs:
1244 npare = tuple(p.node() for p in prec.parents())
1243 npare = tuple(p.node() for p in prec.parents())
1245 if nprec in nsucs:
1244 if nprec in nsucs:
1246 raise error.Abort("changeset %s cannot obsolete itself" % prec)
1245 raise error.Abort("changeset %s cannot obsolete itself" % prec)
1247 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1246 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1248 date=date, metadata=localmetadata)
1247 date=date, metadata=localmetadata)
1249 repo.filteredrevcache.clear()
1248 repo.filteredrevcache.clear()
1250 tr.close()
1249 tr.close()
1251 finally:
1250 finally:
1252 tr.release()
1251 tr.release()
1253
1252
1254 def isenabled(repo, option):
1253 def isenabled(repo, option):
1255 """Returns True if the given repository has the given obsolete option
1254 """Returns True if the given repository has the given obsolete option
1256 enabled.
1255 enabled.
1257 """
1256 """
1258 result = set(repo.ui.configlist('experimental', 'evolution'))
1257 result = set(repo.ui.configlist('experimental', 'evolution'))
1259 if 'all' in result:
1258 if 'all' in result:
1260 return True
1259 return True
1261
1260
1262 # For migration purposes, temporarily return true if the config hasn't been
1261 # For migration purposes, temporarily return true if the config hasn't been
1263 # set but _enabled is true.
1262 # set but _enabled is true.
1264 if len(result) == 0 and _enabled:
1263 if len(result) == 0 and _enabled:
1265 return True
1264 return True
1266
1265
1267 # createmarkers must be enabled if other options are enabled
1266 # createmarkers must be enabled if other options are enabled
1268 if ((allowunstableopt in result or exchangeopt in result) and
1267 if ((allowunstableopt in result or exchangeopt in result) and
1269 not createmarkersopt in result):
1268 not createmarkersopt in result):
1270 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
1269 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
1271 "if other obsolete options are enabled"))
1270 "if other obsolete options are enabled"))
1272
1271
1273 return option in result
1272 return option in result
General Comments 0
You need to be logged in to leave comments. Login now