##// END OF EJS Templates
obsolete: extract helpers from _load()...
Martin von Zweigbergk -
r24044:e0e6dd80 default
parent child Browse files
Show More
@@ -1,1217 +1,1230
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 import struct
70 import struct
71 import util, base85, node, parsers
71 import util, base85, node, parsers
72 import phases
72 import phases
73 from i18n import _
73 from i18n import _
74
74
75 _pack = struct.pack
75 _pack = struct.pack
76 _unpack = struct.unpack
76 _unpack = struct.unpack
77 _calcsize = struct.calcsize
77 _calcsize = struct.calcsize
78
78
79 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
79 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
80
80
81 # the obsolete feature is not mature enough to be enabled by default.
81 # the obsolete feature is not mature enough to be enabled by default.
82 # you have to rely on third party extension extension to enable this.
82 # you have to rely on third party extension extension to enable this.
83 _enabled = False
83 _enabled = False
84
84
85 # Options for obsolescence
85 # Options for obsolescence
86 createmarkersopt = 'createmarkers'
86 createmarkersopt = 'createmarkers'
87 allowunstableopt = 'allowunstable'
87 allowunstableopt = 'allowunstable'
88 exchangeopt = 'exchange'
88 exchangeopt = 'exchange'
89
89
90 ### obsolescence marker flag
90 ### obsolescence marker flag
91
91
92 ## bumpedfix flag
92 ## bumpedfix flag
93 #
93 #
94 # When a changeset A' succeed to a changeset A which became public, we call A'
94 # When a changeset A' succeed to a changeset A which became public, we call A'
95 # "bumped" because it's a successors of a public changesets
95 # "bumped" because it's a successors of a public changesets
96 #
96 #
97 # o A' (bumped)
97 # o A' (bumped)
98 # |`:
98 # |`:
99 # | o A
99 # | o A
100 # |/
100 # |/
101 # o Z
101 # o Z
102 #
102 #
103 # The way to solve this situation is to create a new changeset Ad as children
103 # The way to solve this situation is to create a new changeset Ad as children
104 # of A. This changeset have the same content than A'. So the diff from A to A'
104 # of A. This changeset have the same content than A'. So the diff from A to A'
105 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
106 #
106 #
107 # o Ad
107 # o Ad
108 # |`:
108 # |`:
109 # | x A'
109 # | x A'
110 # |'|
110 # |'|
111 # o | A
111 # o | A
112 # |/
112 # |/
113 # o Z
113 # o Z
114 #
114 #
115 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
116 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
117 # This flag mean that the successors express the changes between the public and
117 # This flag mean that the successors express the changes between the public and
118 # bumped version and fix the situation, breaking the transitivity of
118 # bumped version and fix the situation, breaking the transitivity of
119 # "bumped" here.
119 # "bumped" here.
120 bumpedfix = 1
120 bumpedfix = 1
121 usingsha256 = 2
121 usingsha256 = 2
122
122
123 ## Parsing and writing of version "0"
123 ## Parsing and writing of version "0"
124 #
124 #
125 # The header is followed by the markers. Each marker is made of:
125 # The header is followed by the markers. Each marker is made of:
126 #
126 #
127 # - 1 uint8 : number of new changesets "N", can be zero.
127 # - 1 uint8 : number of new changesets "N", can be zero.
128 #
128 #
129 # - 1 uint32: metadata size "M" in bytes.
129 # - 1 uint32: metadata size "M" in bytes.
130 #
130 #
131 # - 1 byte: a bit field. It is reserved for flags used in common
131 # - 1 byte: a bit field. It is reserved for flags used in common
132 # obsolete marker operations, to avoid repeated decoding of metadata
132 # obsolete marker operations, to avoid repeated decoding of metadata
133 # entries.
133 # entries.
134 #
134 #
135 # - 20 bytes: obsoleted changeset identifier.
135 # - 20 bytes: obsoleted changeset identifier.
136 #
136 #
137 # - N*20 bytes: new changesets identifiers.
137 # - N*20 bytes: new changesets identifiers.
138 #
138 #
139 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 # - M bytes: metadata as a sequence of nul-terminated strings. Each
140 # string contains a key and a value, separated by a colon ':', without
140 # string contains a key and a value, separated by a colon ':', without
141 # additional encoding. Keys cannot contain '\0' or ':' and values
141 # additional encoding. Keys cannot contain '\0' or ':' and values
142 # cannot contain '\0'.
142 # cannot contain '\0'.
143 _fm0version = 0
143 _fm0version = 0
144 _fm0fixed = '>BIB20s'
144 _fm0fixed = '>BIB20s'
145 _fm0node = '20s'
145 _fm0node = '20s'
146 _fm0fsize = _calcsize(_fm0fixed)
146 _fm0fsize = _calcsize(_fm0fixed)
147 _fm0fnodesize = _calcsize(_fm0node)
147 _fm0fnodesize = _calcsize(_fm0node)
148
148
149 def _fm0readmarkers(data, off):
149 def _fm0readmarkers(data, off):
150 # Loop on markers
150 # Loop on markers
151 l = len(data)
151 l = len(data)
152 while off + _fm0fsize <= l:
152 while off + _fm0fsize <= l:
153 # read fixed part
153 # read fixed part
154 cur = data[off:off + _fm0fsize]
154 cur = data[off:off + _fm0fsize]
155 off += _fm0fsize
155 off += _fm0fsize
156 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
157 # read replacement
157 # read replacement
158 sucs = ()
158 sucs = ()
159 if numsuc:
159 if numsuc:
160 s = (_fm0fnodesize * numsuc)
160 s = (_fm0fnodesize * numsuc)
161 cur = data[off:off + s]
161 cur = data[off:off + s]
162 sucs = _unpack(_fm0node * numsuc, cur)
162 sucs = _unpack(_fm0node * numsuc, cur)
163 off += s
163 off += s
164 # read metadata
164 # read metadata
165 # (metadata will be decoded on demand)
165 # (metadata will be decoded on demand)
166 metadata = data[off:off + mdsize]
166 metadata = data[off:off + mdsize]
167 if len(metadata) != mdsize:
167 if len(metadata) != mdsize:
168 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 raise util.Abort(_('parsing obsolete marker: metadata is too '
169 'short, %d bytes expected, got %d')
169 'short, %d bytes expected, got %d')
170 % (mdsize, len(metadata)))
170 % (mdsize, len(metadata)))
171 off += mdsize
171 off += mdsize
172 metadata = _fm0decodemeta(metadata)
172 metadata = _fm0decodemeta(metadata)
173 try:
173 try:
174 when, offset = metadata.pop('date', '0 0').split(' ')
174 when, offset = metadata.pop('date', '0 0').split(' ')
175 date = float(when), int(offset)
175 date = float(when), int(offset)
176 except ValueError:
176 except ValueError:
177 date = (0., 0)
177 date = (0., 0)
178 parents = None
178 parents = None
179 if 'p2' in metadata:
179 if 'p2' in metadata:
180 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
181 elif 'p1' in metadata:
181 elif 'p1' in metadata:
182 parents = (metadata.pop('p1', None),)
182 parents = (metadata.pop('p1', None),)
183 elif 'p0' in metadata:
183 elif 'p0' in metadata:
184 parents = ()
184 parents = ()
185 if parents is not None:
185 if parents is not None:
186 try:
186 try:
187 parents = tuple(node.bin(p) for p in parents)
187 parents = tuple(node.bin(p) for p in parents)
188 # if parent content is not a nodeid, drop the data
188 # if parent content is not a nodeid, drop the data
189 for p in parents:
189 for p in parents:
190 if len(p) != 20:
190 if len(p) != 20:
191 parents = None
191 parents = None
192 break
192 break
193 except TypeError:
193 except TypeError:
194 # if content cannot be translated to nodeid drop the data.
194 # if content cannot be translated to nodeid drop the data.
195 parents = None
195 parents = None
196
196
197 metadata = tuple(sorted(metadata.iteritems()))
197 metadata = tuple(sorted(metadata.iteritems()))
198
198
199 yield (pre, sucs, flags, metadata, date, parents)
199 yield (pre, sucs, flags, metadata, date, parents)
200
200
201 def _fm0encodeonemarker(marker):
201 def _fm0encodeonemarker(marker):
202 pre, sucs, flags, metadata, date, parents = marker
202 pre, sucs, flags, metadata, date, parents = marker
203 if flags & usingsha256:
203 if flags & usingsha256:
204 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
204 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
205 metadata = dict(metadata)
205 metadata = dict(metadata)
206 time, tz = date
206 time, tz = date
207 metadata['date'] = '%r %i' % (time, tz)
207 metadata['date'] = '%r %i' % (time, tz)
208 if parents is not None:
208 if parents is not None:
209 if not parents:
209 if not parents:
210 # mark that we explicitly recorded no parents
210 # mark that we explicitly recorded no parents
211 metadata['p0'] = ''
211 metadata['p0'] = ''
212 for i, p in enumerate(parents):
212 for i, p in enumerate(parents):
213 metadata['p%i' % (i + 1)] = node.hex(p)
213 metadata['p%i' % (i + 1)] = node.hex(p)
214 metadata = _fm0encodemeta(metadata)
214 metadata = _fm0encodemeta(metadata)
215 numsuc = len(sucs)
215 numsuc = len(sucs)
216 format = _fm0fixed + (_fm0node * numsuc)
216 format = _fm0fixed + (_fm0node * numsuc)
217 data = [numsuc, len(metadata), flags, pre]
217 data = [numsuc, len(metadata), flags, pre]
218 data.extend(sucs)
218 data.extend(sucs)
219 return _pack(format, *data) + metadata
219 return _pack(format, *data) + metadata
220
220
221 def _fm0encodemeta(meta):
221 def _fm0encodemeta(meta):
222 """Return encoded metadata string to string mapping.
222 """Return encoded metadata string to string mapping.
223
223
224 Assume no ':' in key and no '\0' in both key and value."""
224 Assume no ':' in key and no '\0' in both key and value."""
225 for key, value in meta.iteritems():
225 for key, value in meta.iteritems():
226 if ':' in key or '\0' in key:
226 if ':' in key or '\0' in key:
227 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 raise ValueError("':' and '\0' are forbidden in metadata key'")
228 if '\0' in value:
228 if '\0' in value:
229 raise ValueError("':' is forbidden in metadata value'")
229 raise ValueError("':' is forbidden in metadata value'")
230 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
231
231
232 def _fm0decodemeta(data):
232 def _fm0decodemeta(data):
233 """Return string to string dictionary from encoded version."""
233 """Return string to string dictionary from encoded version."""
234 d = {}
234 d = {}
235 for l in data.split('\0'):
235 for l in data.split('\0'):
236 if l:
236 if l:
237 key, value = l.split(':')
237 key, value = l.split(':')
238 d[key] = value
238 d[key] = value
239 return d
239 return d
240
240
241 ## Parsing and writing of version "1"
241 ## Parsing and writing of version "1"
242 #
242 #
243 # The header is followed by the markers. Each marker is made of:
243 # The header is followed by the markers. Each marker is made of:
244 #
244 #
245 # - uint32: total size of the marker (including this field)
245 # - uint32: total size of the marker (including this field)
246 #
246 #
247 # - float64: date in seconds since epoch
247 # - float64: date in seconds since epoch
248 #
248 #
249 # - int16: timezone offset in minutes
249 # - int16: timezone offset in minutes
250 #
250 #
251 # - uint16: a bit field. It is reserved for flags used in common
251 # - uint16: a bit field. It is reserved for flags used in common
252 # obsolete marker operations, to avoid repeated decoding of metadata
252 # obsolete marker operations, to avoid repeated decoding of metadata
253 # entries.
253 # entries.
254 #
254 #
255 # - uint8: number of successors "N", can be zero.
255 # - uint8: number of successors "N", can be zero.
256 #
256 #
257 # - uint8: number of parents "P", can be zero.
257 # - uint8: number of parents "P", can be zero.
258 #
258 #
259 # 0: parents data stored but no parent,
259 # 0: parents data stored but no parent,
260 # 1: one parent stored,
260 # 1: one parent stored,
261 # 2: two parents stored,
261 # 2: two parents stored,
262 # 3: no parent data stored
262 # 3: no parent data stored
263 #
263 #
264 # - uint8: number of metadata entries M
264 # - uint8: number of metadata entries M
265 #
265 #
266 # - 20 or 32 bytes: precursor changeset identifier.
266 # - 20 or 32 bytes: precursor changeset identifier.
267 #
267 #
268 # - N*(20 or 32) bytes: successors changesets identifiers.
268 # - N*(20 or 32) bytes: successors changesets identifiers.
269 #
269 #
270 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 # - P*(20 or 32) bytes: parents of the precursors changesets.
271 #
271 #
272 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 # - M*(uint8, uint8): size of all metadata entries (key and value)
273 #
273 #
274 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 # - remaining bytes: the metadata, each (key, value) pair after the other.
275 _fm1version = 1
275 _fm1version = 1
276 _fm1fixed = '>IdhHBBB20s'
276 _fm1fixed = '>IdhHBBB20s'
277 _fm1nodesha1 = '20s'
277 _fm1nodesha1 = '20s'
278 _fm1nodesha256 = '32s'
278 _fm1nodesha256 = '32s'
279 _fm1nodesha1size = _calcsize(_fm1nodesha1)
279 _fm1nodesha1size = _calcsize(_fm1nodesha1)
280 _fm1nodesha256size = _calcsize(_fm1nodesha256)
280 _fm1nodesha256size = _calcsize(_fm1nodesha256)
281 _fm1fsize = _calcsize(_fm1fixed)
281 _fm1fsize = _calcsize(_fm1fixed)
282 _fm1parentnone = 3
282 _fm1parentnone = 3
283 _fm1parentshift = 14
283 _fm1parentshift = 14
284 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
284 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
285 _fm1metapair = 'BB'
285 _fm1metapair = 'BB'
286 _fm1metapairsize = _calcsize('BB')
286 _fm1metapairsize = _calcsize('BB')
287
287
288 def _fm1purereadmarkers(data, off):
288 def _fm1purereadmarkers(data, off):
289 # make some global constants local for performance
289 # make some global constants local for performance
290 noneflag = _fm1parentnone
290 noneflag = _fm1parentnone
291 sha2flag = usingsha256
291 sha2flag = usingsha256
292 sha1size = _fm1nodesha1size
292 sha1size = _fm1nodesha1size
293 sha2size = _fm1nodesha256size
293 sha2size = _fm1nodesha256size
294 sha1fmt = _fm1nodesha1
294 sha1fmt = _fm1nodesha1
295 sha2fmt = _fm1nodesha256
295 sha2fmt = _fm1nodesha256
296 metasize = _fm1metapairsize
296 metasize = _fm1metapairsize
297 metafmt = _fm1metapair
297 metafmt = _fm1metapair
298 fsize = _fm1fsize
298 fsize = _fm1fsize
299 unpack = _unpack
299 unpack = _unpack
300
300
301 # Loop on markers
301 # Loop on markers
302 stop = len(data) - _fm1fsize
302 stop = len(data) - _fm1fsize
303 ufixed = util.unpacker(_fm1fixed)
303 ufixed = util.unpacker(_fm1fixed)
304
304
305 while off <= stop:
305 while off <= stop:
306 # read fixed part
306 # read fixed part
307 o1 = off + fsize
307 o1 = off + fsize
308 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
308 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
309
309
310 if flags & sha2flag:
310 if flags & sha2flag:
311 # FIXME: prec was read as a SHA1, needs to be amended
311 # FIXME: prec was read as a SHA1, needs to be amended
312
312
313 # read 0 or more successors
313 # read 0 or more successors
314 if numsuc == 1:
314 if numsuc == 1:
315 o2 = o1 + sha2size
315 o2 = o1 + sha2size
316 sucs = (data[o1:o2],)
316 sucs = (data[o1:o2],)
317 else:
317 else:
318 o2 = o1 + sha2size * numsuc
318 o2 = o1 + sha2size * numsuc
319 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
319 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
320
320
321 # read parents
321 # read parents
322 if numpar == noneflag:
322 if numpar == noneflag:
323 o3 = o2
323 o3 = o2
324 parents = None
324 parents = None
325 elif numpar == 1:
325 elif numpar == 1:
326 o3 = o2 + sha2size
326 o3 = o2 + sha2size
327 parents = (data[o2:o3],)
327 parents = (data[o2:o3],)
328 else:
328 else:
329 o3 = o2 + sha2size * numpar
329 o3 = o2 + sha2size * numpar
330 parents = unpack(sha2fmt * numpar, data[o2:o3])
330 parents = unpack(sha2fmt * numpar, data[o2:o3])
331 else:
331 else:
332 # read 0 or more successors
332 # read 0 or more successors
333 if numsuc == 1:
333 if numsuc == 1:
334 o2 = o1 + sha1size
334 o2 = o1 + sha1size
335 sucs = (data[o1:o2],)
335 sucs = (data[o1:o2],)
336 else:
336 else:
337 o2 = o1 + sha1size * numsuc
337 o2 = o1 + sha1size * numsuc
338 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
338 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
339
339
340 # read parents
340 # read parents
341 if numpar == noneflag:
341 if numpar == noneflag:
342 o3 = o2
342 o3 = o2
343 parents = None
343 parents = None
344 elif numpar == 1:
344 elif numpar == 1:
345 o3 = o2 + sha1size
345 o3 = o2 + sha1size
346 parents = (data[o2:o3],)
346 parents = (data[o2:o3],)
347 else:
347 else:
348 o3 = o2 + sha1size * numpar
348 o3 = o2 + sha1size * numpar
349 parents = unpack(sha1fmt * numpar, data[o2:o3])
349 parents = unpack(sha1fmt * numpar, data[o2:o3])
350
350
351 # read metadata
351 # read metadata
352 off = o3 + metasize * nummeta
352 off = o3 + metasize * nummeta
353 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
353 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
354 metadata = []
354 metadata = []
355 for idx in xrange(0, len(metapairsize), 2):
355 for idx in xrange(0, len(metapairsize), 2):
356 o1 = off + metapairsize[idx]
356 o1 = off + metapairsize[idx]
357 o2 = o1 + metapairsize[idx + 1]
357 o2 = o1 + metapairsize[idx + 1]
358 metadata.append((data[off:o1], data[o1:o2]))
358 metadata.append((data[off:o1], data[o1:o2]))
359 off = o2
359 off = o2
360
360
361 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
361 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
362
362
363 def _fm1encodeonemarker(marker):
363 def _fm1encodeonemarker(marker):
364 pre, sucs, flags, metadata, date, parents = marker
364 pre, sucs, flags, metadata, date, parents = marker
365 # determine node size
365 # determine node size
366 _fm1node = _fm1nodesha1
366 _fm1node = _fm1nodesha1
367 if flags & usingsha256:
367 if flags & usingsha256:
368 _fm1node = _fm1nodesha256
368 _fm1node = _fm1nodesha256
369 numsuc = len(sucs)
369 numsuc = len(sucs)
370 numextranodes = numsuc
370 numextranodes = numsuc
371 if parents is None:
371 if parents is None:
372 numpar = _fm1parentnone
372 numpar = _fm1parentnone
373 else:
373 else:
374 numpar = len(parents)
374 numpar = len(parents)
375 numextranodes += numpar
375 numextranodes += numpar
376 formatnodes = _fm1node * numextranodes
376 formatnodes = _fm1node * numextranodes
377 formatmeta = _fm1metapair * len(metadata)
377 formatmeta = _fm1metapair * len(metadata)
378 format = _fm1fixed + formatnodes + formatmeta
378 format = _fm1fixed + formatnodes + formatmeta
379 # tz is stored in minutes so we divide by 60
379 # tz is stored in minutes so we divide by 60
380 tz = date[1]//60
380 tz = date[1]//60
381 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
381 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
382 data.extend(sucs)
382 data.extend(sucs)
383 if parents is not None:
383 if parents is not None:
384 data.extend(parents)
384 data.extend(parents)
385 totalsize = _calcsize(format)
385 totalsize = _calcsize(format)
386 for key, value in metadata:
386 for key, value in metadata:
387 lk = len(key)
387 lk = len(key)
388 lv = len(value)
388 lv = len(value)
389 data.append(lk)
389 data.append(lk)
390 data.append(lv)
390 data.append(lv)
391 totalsize += lk + lv
391 totalsize += lk + lv
392 data[0] = totalsize
392 data[0] = totalsize
393 data = [_pack(format, *data)]
393 data = [_pack(format, *data)]
394 for key, value in metadata:
394 for key, value in metadata:
395 data.append(key)
395 data.append(key)
396 data.append(value)
396 data.append(value)
397 return ''.join(data)
397 return ''.join(data)
398
398
399 def _fm1readmarkers(data, off):
399 def _fm1readmarkers(data, off):
400 native = getattr(parsers, 'fm1readmarkers', None)
400 native = getattr(parsers, 'fm1readmarkers', None)
401 if not native:
401 if not native:
402 return _fm1purereadmarkers(data, off)
402 return _fm1purereadmarkers(data, off)
403 stop = len(data) - _fm1fsize
403 stop = len(data) - _fm1fsize
404 return native(data, off, stop)
404 return native(data, off, stop)
405
405
406 # mapping to read/write various marker formats
406 # mapping to read/write various marker formats
407 # <version> -> (decoder, encoder)
407 # <version> -> (decoder, encoder)
408 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
408 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
409 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
409 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
410
410
411 @util.nogc
411 @util.nogc
412 def _readmarkers(data):
412 def _readmarkers(data):
413 """Read and enumerate markers from raw data"""
413 """Read and enumerate markers from raw data"""
414 off = 0
414 off = 0
415 diskversion = _unpack('>B', data[off:off + 1])[0]
415 diskversion = _unpack('>B', data[off:off + 1])[0]
416 off += 1
416 off += 1
417 if diskversion not in formats:
417 if diskversion not in formats:
418 raise util.Abort(_('parsing obsolete marker: unknown version %r')
418 raise util.Abort(_('parsing obsolete marker: unknown version %r')
419 % diskversion)
419 % diskversion)
420 return diskversion, formats[diskversion][0](data, off)
420 return diskversion, formats[diskversion][0](data, off)
421
421
422 def encodemarkers(markers, addheader=False, version=_fm0version):
422 def encodemarkers(markers, addheader=False, version=_fm0version):
423 # Kept separate from flushmarkers(), it will be reused for
423 # Kept separate from flushmarkers(), it will be reused for
424 # markers exchange.
424 # markers exchange.
425 encodeone = formats[version][1]
425 encodeone = formats[version][1]
426 if addheader:
426 if addheader:
427 yield _pack('>B', version)
427 yield _pack('>B', version)
428 for marker in markers:
428 for marker in markers:
429 yield encodeone(marker)
429 yield encodeone(marker)
430
430
431
431
432 class marker(object):
432 class marker(object):
433 """Wrap obsolete marker raw data"""
433 """Wrap obsolete marker raw data"""
434
434
435 def __init__(self, repo, data):
435 def __init__(self, repo, data):
436 # the repo argument will be used to create changectx in later version
436 # the repo argument will be used to create changectx in later version
437 self._repo = repo
437 self._repo = repo
438 self._data = data
438 self._data = data
439 self._decodedmeta = None
439 self._decodedmeta = None
440
440
441 def __hash__(self):
441 def __hash__(self):
442 return hash(self._data)
442 return hash(self._data)
443
443
444 def __eq__(self, other):
444 def __eq__(self, other):
445 if type(other) != type(self):
445 if type(other) != type(self):
446 return False
446 return False
447 return self._data == other._data
447 return self._data == other._data
448
448
449 def precnode(self):
449 def precnode(self):
450 """Precursor changeset node identifier"""
450 """Precursor changeset node identifier"""
451 return self._data[0]
451 return self._data[0]
452
452
453 def succnodes(self):
453 def succnodes(self):
454 """List of successor changesets node identifiers"""
454 """List of successor changesets node identifiers"""
455 return self._data[1]
455 return self._data[1]
456
456
457 def parentnodes(self):
457 def parentnodes(self):
458 """Parents of the precursors (None if not recorded)"""
458 """Parents of the precursors (None if not recorded)"""
459 return self._data[5]
459 return self._data[5]
460
460
461 def metadata(self):
461 def metadata(self):
462 """Decoded metadata dictionary"""
462 """Decoded metadata dictionary"""
463 return dict(self._data[3])
463 return dict(self._data[3])
464
464
465 def date(self):
465 def date(self):
466 """Creation date as (unixtime, offset)"""
466 """Creation date as (unixtime, offset)"""
467 return self._data[4]
467 return self._data[4]
468
468
469 def flags(self):
469 def flags(self):
470 """The flags field of the marker"""
470 """The flags field of the marker"""
471 return self._data[2]
471 return self._data[2]
472
472
473 @util.nogc
474 def _addsuccessors(successors, markers):
475 for mark in markers:
476 successors.setdefault(mark[0], set()).add(mark)
477
478 @util.nogc
479 def _addprecursors(precursors, markers):
480 for mark in markers:
481 for suc in mark[1]:
482 precursors.setdefault(suc, set()).add(mark)
483
484 @util.nogc
485 def _addchildren(children, markers):
486 for mark in markers:
487 parents = mark[5]
488 if parents is not None:
489 for p in parents:
490 children.setdefault(p, set()).add(mark)
491
473 def _checkinvalidmarkers(obsstore):
492 def _checkinvalidmarkers(obsstore):
474 """search for marker with invalid data and raise error if needed
493 """search for marker with invalid data and raise error if needed
475
494
476 Exist as a separated function to allow the evolve extension for a more
495 Exist as a separated function to allow the evolve extension for a more
477 subtle handling.
496 subtle handling.
478 """
497 """
479 if node.nullid in obsstore.precursors:
498 if node.nullid in obsstore.precursors:
480 raise util.Abort(_('bad obsolescence marker detected: '
499 raise util.Abort(_('bad obsolescence marker detected: '
481 'invalid successors nullid'))
500 'invalid successors nullid'))
482
501
483 class obsstore(object):
502 class obsstore(object):
484 """Store obsolete markers
503 """Store obsolete markers
485
504
486 Markers can be accessed with two mappings:
505 Markers can be accessed with two mappings:
487 - precursors[x] -> set(markers on precursors edges of x)
506 - precursors[x] -> set(markers on precursors edges of x)
488 - successors[x] -> set(markers on successors edges of x)
507 - successors[x] -> set(markers on successors edges of x)
489 - children[x] -> set(markers on precursors edges of children(x)
508 - children[x] -> set(markers on precursors edges of children(x)
490 """
509 """
491
510
492 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
511 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
493 # prec: nodeid, precursor changesets
512 # prec: nodeid, precursor changesets
494 # succs: tuple of nodeid, successor changesets (0-N length)
513 # succs: tuple of nodeid, successor changesets (0-N length)
495 # flag: integer, flag field carrying modifier for the markers (see doc)
514 # flag: integer, flag field carrying modifier for the markers (see doc)
496 # meta: binary blob, encoded metadata dictionary
515 # meta: binary blob, encoded metadata dictionary
497 # date: (float, int) tuple, date of marker creation
516 # date: (float, int) tuple, date of marker creation
498 # parents: (tuple of nodeid) or None, parents of precursors
517 # parents: (tuple of nodeid) or None, parents of precursors
499 # None is used when no data has been recorded
518 # None is used when no data has been recorded
500
519
501 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
520 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
502 # caches for various obsolescence related cache
521 # caches for various obsolescence related cache
503 self.caches = {}
522 self.caches = {}
504 self._all = []
523 self._all = []
505 self.precursors = {}
524 self.precursors = {}
506 self.successors = {}
525 self.successors = {}
507 self.children = {}
526 self.children = {}
508 self.sopener = sopener
527 self.sopener = sopener
509 data = sopener.tryread('obsstore')
528 data = sopener.tryread('obsstore')
510 self._version = defaultformat
529 self._version = defaultformat
511 self._readonly = readonly
530 self._readonly = readonly
512 if data:
531 if data:
513 self._version, markers = _readmarkers(data)
532 self._version, markers = _readmarkers(data)
514 self._load(markers)
533 self._load(markers)
515
534
516 def __iter__(self):
535 def __iter__(self):
517 return iter(self._all)
536 return iter(self._all)
518
537
519 def __len__(self):
538 def __len__(self):
520 return len(self._all)
539 return len(self._all)
521
540
522 def __nonzero__(self):
541 def __nonzero__(self):
523 return bool(self._all)
542 return bool(self._all)
524
543
525 def create(self, transaction, prec, succs=(), flag=0, parents=None,
544 def create(self, transaction, prec, succs=(), flag=0, parents=None,
526 date=None, metadata=None):
545 date=None, metadata=None):
527 """obsolete: add a new obsolete marker
546 """obsolete: add a new obsolete marker
528
547
529 * ensuring it is hashable
548 * ensuring it is hashable
530 * check mandatory metadata
549 * check mandatory metadata
531 * encode metadata
550 * encode metadata
532
551
533 If you are a human writing code creating marker you want to use the
552 If you are a human writing code creating marker you want to use the
534 `createmarkers` function in this module instead.
553 `createmarkers` function in this module instead.
535
554
536 return True if a new marker have been added, False if the markers
555 return True if a new marker have been added, False if the markers
537 already existed (no op).
556 already existed (no op).
538 """
557 """
539 if metadata is None:
558 if metadata is None:
540 metadata = {}
559 metadata = {}
541 if date is None:
560 if date is None:
542 if 'date' in metadata:
561 if 'date' in metadata:
543 # as a courtesy for out-of-tree extensions
562 # as a courtesy for out-of-tree extensions
544 date = util.parsedate(metadata.pop('date'))
563 date = util.parsedate(metadata.pop('date'))
545 else:
564 else:
546 date = util.makedate()
565 date = util.makedate()
547 if len(prec) != 20:
566 if len(prec) != 20:
548 raise ValueError(prec)
567 raise ValueError(prec)
549 for succ in succs:
568 for succ in succs:
550 if len(succ) != 20:
569 if len(succ) != 20:
551 raise ValueError(succ)
570 raise ValueError(succ)
552 if prec in succs:
571 if prec in succs:
553 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
572 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
554
573
555 metadata = tuple(sorted(metadata.iteritems()))
574 metadata = tuple(sorted(metadata.iteritems()))
556
575
557 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
576 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
558 return bool(self.add(transaction, [marker]))
577 return bool(self.add(transaction, [marker]))
559
578
560 def add(self, transaction, markers):
579 def add(self, transaction, markers):
561 """Add new markers to the store
580 """Add new markers to the store
562
581
563 Take care of filtering duplicate.
582 Take care of filtering duplicate.
564 Return the number of new marker."""
583 Return the number of new marker."""
565 if self._readonly:
584 if self._readonly:
566 raise util.Abort('creating obsolete markers is not enabled on this '
585 raise util.Abort('creating obsolete markers is not enabled on this '
567 'repo')
586 'repo')
568 known = set(self._all)
587 known = set(self._all)
569 new = []
588 new = []
570 for m in markers:
589 for m in markers:
571 if m not in known:
590 if m not in known:
572 known.add(m)
591 known.add(m)
573 new.append(m)
592 new.append(m)
574 if new:
593 if new:
575 f = self.sopener('obsstore', 'ab')
594 f = self.sopener('obsstore', 'ab')
576 try:
595 try:
577 # Whether the file's current position is at the begin or at
596 # Whether the file's current position is at the begin or at
578 # the end after opening a file for appending is implementation
597 # the end after opening a file for appending is implementation
579 # defined. So we must seek to the end before calling tell(),
598 # defined. So we must seek to the end before calling tell(),
580 # or we may get a zero offset for non-zero sized files on
599 # or we may get a zero offset for non-zero sized files on
581 # some platforms (issue3543).
600 # some platforms (issue3543).
582 f.seek(0, _SEEK_END)
601 f.seek(0, _SEEK_END)
583 offset = f.tell()
602 offset = f.tell()
584 transaction.add('obsstore', offset)
603 transaction.add('obsstore', offset)
585 # offset == 0: new file - add the version header
604 # offset == 0: new file - add the version header
586 for bytes in encodemarkers(new, offset == 0, self._version):
605 for bytes in encodemarkers(new, offset == 0, self._version):
587 f.write(bytes)
606 f.write(bytes)
588 finally:
607 finally:
589 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
608 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
590 # call 'filecacheentry.refresh()' here
609 # call 'filecacheentry.refresh()' here
591 f.close()
610 f.close()
592 self._load(new)
611 self._load(new)
593 # new marker *may* have changed several set. invalidate the cache.
612 # new marker *may* have changed several set. invalidate the cache.
594 self.caches.clear()
613 self.caches.clear()
595 # records the number of new markers for the transaction hooks
614 # records the number of new markers for the transaction hooks
596 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
615 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
597 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
616 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
598 return len(new)
617 return len(new)
599
618
600 def mergemarkers(self, transaction, data):
619 def mergemarkers(self, transaction, data):
601 """merge a binary stream of markers inside the obsstore
620 """merge a binary stream of markers inside the obsstore
602
621
603 Returns the number of new markers added."""
622 Returns the number of new markers added."""
604 version, markers = _readmarkers(data)
623 version, markers = _readmarkers(data)
605 return self.add(transaction, markers)
624 return self.add(transaction, markers)
606
625
607 @util.nogc
608 def _load(self, markers):
626 def _load(self, markers):
609 for mark in markers:
627 markers = list(markers) # to allow repeated iteration
610 self._all.append(mark)
628 self._all.extend(markers)
611 pre, sucs = mark[:2]
629 _addsuccessors(self.successors, markers)
612 self.successors.setdefault(pre, set()).add(mark)
630 _addprecursors(self.precursors, markers)
613 for suc in sucs:
631 _addchildren(self.children, markers)
614 self.precursors.setdefault(suc, set()).add(mark)
615 parents = mark[5]
616 if parents is not None:
617 for p in parents:
618 self.children.setdefault(p, set()).add(mark)
619 _checkinvalidmarkers(self)
632 _checkinvalidmarkers(self)
620
633
621 def relevantmarkers(self, nodes):
634 def relevantmarkers(self, nodes):
622 """return a set of all obsolescence markers relevant to a set of nodes.
635 """return a set of all obsolescence markers relevant to a set of nodes.
623
636
624 "relevant" to a set of nodes mean:
637 "relevant" to a set of nodes mean:
625
638
626 - marker that use this changeset as successor
639 - marker that use this changeset as successor
627 - prune marker of direct children on this changeset
640 - prune marker of direct children on this changeset
628 - recursive application of the two rules on precursors of these markers
641 - recursive application of the two rules on precursors of these markers
629
642
630 It is a set so you cannot rely on order."""
643 It is a set so you cannot rely on order."""
631
644
632 pendingnodes = set(nodes)
645 pendingnodes = set(nodes)
633 seenmarkers = set()
646 seenmarkers = set()
634 seennodes = set(pendingnodes)
647 seennodes = set(pendingnodes)
635 precursorsmarkers = self.precursors
648 precursorsmarkers = self.precursors
636 children = self.children
649 children = self.children
637 while pendingnodes:
650 while pendingnodes:
638 direct = set()
651 direct = set()
639 for current in pendingnodes:
652 for current in pendingnodes:
640 direct.update(precursorsmarkers.get(current, ()))
653 direct.update(precursorsmarkers.get(current, ()))
641 pruned = [m for m in children.get(current, ()) if not m[1]]
654 pruned = [m for m in children.get(current, ()) if not m[1]]
642 direct.update(pruned)
655 direct.update(pruned)
643 direct -= seenmarkers
656 direct -= seenmarkers
644 pendingnodes = set([m[0] for m in direct])
657 pendingnodes = set([m[0] for m in direct])
645 seenmarkers |= direct
658 seenmarkers |= direct
646 pendingnodes -= seennodes
659 pendingnodes -= seennodes
647 seennodes |= pendingnodes
660 seennodes |= pendingnodes
648 return seenmarkers
661 return seenmarkers
649
662
650 def commonversion(versions):
663 def commonversion(versions):
651 """Return the newest version listed in both versions and our local formats.
664 """Return the newest version listed in both versions and our local formats.
652
665
653 Returns None if no common version exists.
666 Returns None if no common version exists.
654 """
667 """
655 versions.sort(reverse=True)
668 versions.sort(reverse=True)
656 # search for highest version known on both side
669 # search for highest version known on both side
657 for v in versions:
670 for v in versions:
658 if v in formats:
671 if v in formats:
659 return v
672 return v
660 return None
673 return None
661
674
662 # arbitrary picked to fit into 8K limit from HTTP server
675 # arbitrary picked to fit into 8K limit from HTTP server
663 # you have to take in account:
676 # you have to take in account:
664 # - the version header
677 # - the version header
665 # - the base85 encoding
678 # - the base85 encoding
666 _maxpayload = 5300
679 _maxpayload = 5300
667
680
668 def _pushkeyescape(markers):
681 def _pushkeyescape(markers):
669 """encode markers into a dict suitable for pushkey exchange
682 """encode markers into a dict suitable for pushkey exchange
670
683
671 - binary data is base85 encoded
684 - binary data is base85 encoded
672 - split in chunks smaller than 5300 bytes"""
685 - split in chunks smaller than 5300 bytes"""
673 keys = {}
686 keys = {}
674 parts = []
687 parts = []
675 currentlen = _maxpayload * 2 # ensure we create a new part
688 currentlen = _maxpayload * 2 # ensure we create a new part
676 for marker in markers:
689 for marker in markers:
677 nextdata = _fm0encodeonemarker(marker)
690 nextdata = _fm0encodeonemarker(marker)
678 if (len(nextdata) + currentlen > _maxpayload):
691 if (len(nextdata) + currentlen > _maxpayload):
679 currentpart = []
692 currentpart = []
680 currentlen = 0
693 currentlen = 0
681 parts.append(currentpart)
694 parts.append(currentpart)
682 currentpart.append(nextdata)
695 currentpart.append(nextdata)
683 currentlen += len(nextdata)
696 currentlen += len(nextdata)
684 for idx, part in enumerate(reversed(parts)):
697 for idx, part in enumerate(reversed(parts)):
685 data = ''.join([_pack('>B', _fm0version)] + part)
698 data = ''.join([_pack('>B', _fm0version)] + part)
686 keys['dump%i' % idx] = base85.b85encode(data)
699 keys['dump%i' % idx] = base85.b85encode(data)
687 return keys
700 return keys
688
701
689 def listmarkers(repo):
702 def listmarkers(repo):
690 """List markers over pushkey"""
703 """List markers over pushkey"""
691 if not repo.obsstore:
704 if not repo.obsstore:
692 return {}
705 return {}
693 return _pushkeyescape(repo.obsstore)
706 return _pushkeyescape(repo.obsstore)
694
707
695 def pushmarker(repo, key, old, new):
708 def pushmarker(repo, key, old, new):
696 """Push markers over pushkey"""
709 """Push markers over pushkey"""
697 if not key.startswith('dump'):
710 if not key.startswith('dump'):
698 repo.ui.warn(_('unknown key: %r') % key)
711 repo.ui.warn(_('unknown key: %r') % key)
699 return 0
712 return 0
700 if old:
713 if old:
701 repo.ui.warn(_('unexpected old value for %r') % key)
714 repo.ui.warn(_('unexpected old value for %r') % key)
702 return 0
715 return 0
703 data = base85.b85decode(new)
716 data = base85.b85decode(new)
704 lock = repo.lock()
717 lock = repo.lock()
705 try:
718 try:
706 tr = repo.transaction('pushkey: obsolete markers')
719 tr = repo.transaction('pushkey: obsolete markers')
707 try:
720 try:
708 repo.obsstore.mergemarkers(tr, data)
721 repo.obsstore.mergemarkers(tr, data)
709 tr.close()
722 tr.close()
710 return 1
723 return 1
711 finally:
724 finally:
712 tr.release()
725 tr.release()
713 finally:
726 finally:
714 lock.release()
727 lock.release()
715
728
716 def getmarkers(repo, nodes=None):
729 def getmarkers(repo, nodes=None):
717 """returns markers known in a repository
730 """returns markers known in a repository
718
731
719 If <nodes> is specified, only markers "relevant" to those nodes are are
732 If <nodes> is specified, only markers "relevant" to those nodes are are
720 returned"""
733 returned"""
721 if nodes is None:
734 if nodes is None:
722 rawmarkers = repo.obsstore
735 rawmarkers = repo.obsstore
723 else:
736 else:
724 rawmarkers = repo.obsstore.relevantmarkers(nodes)
737 rawmarkers = repo.obsstore.relevantmarkers(nodes)
725
738
726 for markerdata in rawmarkers:
739 for markerdata in rawmarkers:
727 yield marker(repo, markerdata)
740 yield marker(repo, markerdata)
728
741
729 def relevantmarkers(repo, node):
742 def relevantmarkers(repo, node):
730 """all obsolete markers relevant to some revision"""
743 """all obsolete markers relevant to some revision"""
731 for markerdata in repo.obsstore.relevantmarkers(node):
744 for markerdata in repo.obsstore.relevantmarkers(node):
732 yield marker(repo, markerdata)
745 yield marker(repo, markerdata)
733
746
734
747
735 def precursormarkers(ctx):
748 def precursormarkers(ctx):
736 """obsolete marker marking this changeset as a successors"""
749 """obsolete marker marking this changeset as a successors"""
737 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
750 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
738 yield marker(ctx._repo, data)
751 yield marker(ctx._repo, data)
739
752
740 def successormarkers(ctx):
753 def successormarkers(ctx):
741 """obsolete marker making this changeset obsolete"""
754 """obsolete marker making this changeset obsolete"""
742 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
755 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
743 yield marker(ctx._repo, data)
756 yield marker(ctx._repo, data)
744
757
745 def allsuccessors(obsstore, nodes, ignoreflags=0):
758 def allsuccessors(obsstore, nodes, ignoreflags=0):
746 """Yield node for every successor of <nodes>.
759 """Yield node for every successor of <nodes>.
747
760
748 Some successors may be unknown locally.
761 Some successors may be unknown locally.
749
762
750 This is a linear yield unsuited to detecting split changesets. It includes
763 This is a linear yield unsuited to detecting split changesets. It includes
751 initial nodes too."""
764 initial nodes too."""
752 remaining = set(nodes)
765 remaining = set(nodes)
753 seen = set(remaining)
766 seen = set(remaining)
754 while remaining:
767 while remaining:
755 current = remaining.pop()
768 current = remaining.pop()
756 yield current
769 yield current
757 for mark in obsstore.successors.get(current, ()):
770 for mark in obsstore.successors.get(current, ()):
758 # ignore marker flagged with specified flag
771 # ignore marker flagged with specified flag
759 if mark[2] & ignoreflags:
772 if mark[2] & ignoreflags:
760 continue
773 continue
761 for suc in mark[1]:
774 for suc in mark[1]:
762 if suc not in seen:
775 if suc not in seen:
763 seen.add(suc)
776 seen.add(suc)
764 remaining.add(suc)
777 remaining.add(suc)
765
778
766 def allprecursors(obsstore, nodes, ignoreflags=0):
779 def allprecursors(obsstore, nodes, ignoreflags=0):
767 """Yield node for every precursors of <nodes>.
780 """Yield node for every precursors of <nodes>.
768
781
769 Some precursors may be unknown locally.
782 Some precursors may be unknown locally.
770
783
771 This is a linear yield unsuited to detecting folded changesets. It includes
784 This is a linear yield unsuited to detecting folded changesets. It includes
772 initial nodes too."""
785 initial nodes too."""
773
786
774 remaining = set(nodes)
787 remaining = set(nodes)
775 seen = set(remaining)
788 seen = set(remaining)
776 while remaining:
789 while remaining:
777 current = remaining.pop()
790 current = remaining.pop()
778 yield current
791 yield current
779 for mark in obsstore.precursors.get(current, ()):
792 for mark in obsstore.precursors.get(current, ()):
780 # ignore marker flagged with specified flag
793 # ignore marker flagged with specified flag
781 if mark[2] & ignoreflags:
794 if mark[2] & ignoreflags:
782 continue
795 continue
783 suc = mark[0]
796 suc = mark[0]
784 if suc not in seen:
797 if suc not in seen:
785 seen.add(suc)
798 seen.add(suc)
786 remaining.add(suc)
799 remaining.add(suc)
787
800
788 def foreground(repo, nodes):
801 def foreground(repo, nodes):
789 """return all nodes in the "foreground" of other node
802 """return all nodes in the "foreground" of other node
790
803
791 The foreground of a revision is anything reachable using parent -> children
804 The foreground of a revision is anything reachable using parent -> children
792 or precursor -> successor relation. It is very similar to "descendant" but
805 or precursor -> successor relation. It is very similar to "descendant" but
793 augmented with obsolescence information.
806 augmented with obsolescence information.
794
807
795 Beware that possible obsolescence cycle may result if complex situation.
808 Beware that possible obsolescence cycle may result if complex situation.
796 """
809 """
797 repo = repo.unfiltered()
810 repo = repo.unfiltered()
798 foreground = set(repo.set('%ln::', nodes))
811 foreground = set(repo.set('%ln::', nodes))
799 if repo.obsstore:
812 if repo.obsstore:
800 # We only need this complicated logic if there is obsolescence
813 # We only need this complicated logic if there is obsolescence
801 # XXX will probably deserve an optimised revset.
814 # XXX will probably deserve an optimised revset.
802 nm = repo.changelog.nodemap
815 nm = repo.changelog.nodemap
803 plen = -1
816 plen = -1
804 # compute the whole set of successors or descendants
817 # compute the whole set of successors or descendants
805 while len(foreground) != plen:
818 while len(foreground) != plen:
806 plen = len(foreground)
819 plen = len(foreground)
807 succs = set(c.node() for c in foreground)
820 succs = set(c.node() for c in foreground)
808 mutable = [c.node() for c in foreground if c.mutable()]
821 mutable = [c.node() for c in foreground if c.mutable()]
809 succs.update(allsuccessors(repo.obsstore, mutable))
822 succs.update(allsuccessors(repo.obsstore, mutable))
810 known = (n for n in succs if n in nm)
823 known = (n for n in succs if n in nm)
811 foreground = set(repo.set('%ln::', known))
824 foreground = set(repo.set('%ln::', known))
812 return set(c.node() for c in foreground)
825 return set(c.node() for c in foreground)
813
826
814
827
815 def successorssets(repo, initialnode, cache=None):
828 def successorssets(repo, initialnode, cache=None):
816 """Return all set of successors of initial nodes
829 """Return all set of successors of initial nodes
817
830
818 The successors set of a changeset A are a group of revisions that succeed
831 The successors set of a changeset A are a group of revisions that succeed
819 A. It succeeds A as a consistent whole, each revision being only a partial
832 A. It succeeds A as a consistent whole, each revision being only a partial
820 replacement. The successors set contains non-obsolete changesets only.
833 replacement. The successors set contains non-obsolete changesets only.
821
834
822 This function returns the full list of successor sets which is why it
835 This function returns the full list of successor sets which is why it
823 returns a list of tuples and not just a single tuple. Each tuple is a valid
836 returns a list of tuples and not just a single tuple. Each tuple is a valid
824 successors set. Not that (A,) may be a valid successors set for changeset A
837 successors set. Not that (A,) may be a valid successors set for changeset A
825 (see below).
838 (see below).
826
839
827 In most cases, a changeset A will have a single element (e.g. the changeset
840 In most cases, a changeset A will have a single element (e.g. the changeset
828 A is replaced by A') in its successors set. Though, it is also common for a
841 A is replaced by A') in its successors set. Though, it is also common for a
829 changeset A to have no elements in its successor set (e.g. the changeset
842 changeset A to have no elements in its successor set (e.g. the changeset
830 has been pruned). Therefore, the returned list of successors sets will be
843 has been pruned). Therefore, the returned list of successors sets will be
831 [(A',)] or [], respectively.
844 [(A',)] or [], respectively.
832
845
833 When a changeset A is split into A' and B', however, it will result in a
846 When a changeset A is split into A' and B', however, it will result in a
834 successors set containing more than a single element, i.e. [(A',B')].
847 successors set containing more than a single element, i.e. [(A',B')].
835 Divergent changesets will result in multiple successors sets, i.e. [(A',),
848 Divergent changesets will result in multiple successors sets, i.e. [(A',),
836 (A'')].
849 (A'')].
837
850
838 If a changeset A is not obsolete, then it will conceptually have no
851 If a changeset A is not obsolete, then it will conceptually have no
839 successors set. To distinguish this from a pruned changeset, the successor
852 successors set. To distinguish this from a pruned changeset, the successor
840 set will only contain itself, i.e. [(A,)].
853 set will only contain itself, i.e. [(A,)].
841
854
842 Finally, successors unknown locally are considered to be pruned (obsoleted
855 Finally, successors unknown locally are considered to be pruned (obsoleted
843 without any successors).
856 without any successors).
844
857
845 The optional `cache` parameter is a dictionary that may contain precomputed
858 The optional `cache` parameter is a dictionary that may contain precomputed
846 successors sets. It is meant to reuse the computation of a previous call to
859 successors sets. It is meant to reuse the computation of a previous call to
847 `successorssets` when multiple calls are made at the same time. The cache
860 `successorssets` when multiple calls are made at the same time. The cache
848 dictionary is updated in place. The caller is responsible for its live
861 dictionary is updated in place. The caller is responsible for its live
849 spawn. Code that makes multiple calls to `successorssets` *must* use this
862 spawn. Code that makes multiple calls to `successorssets` *must* use this
850 cache mechanism or suffer terrible performances.
863 cache mechanism or suffer terrible performances.
851
864
852 """
865 """
853
866
854 succmarkers = repo.obsstore.successors
867 succmarkers = repo.obsstore.successors
855
868
856 # Stack of nodes we search successors sets for
869 # Stack of nodes we search successors sets for
857 toproceed = [initialnode]
870 toproceed = [initialnode]
858 # set version of above list for fast loop detection
871 # set version of above list for fast loop detection
859 # element added to "toproceed" must be added here
872 # element added to "toproceed" must be added here
860 stackedset = set(toproceed)
873 stackedset = set(toproceed)
861 if cache is None:
874 if cache is None:
862 cache = {}
875 cache = {}
863
876
864 # This while loop is the flattened version of a recursive search for
877 # This while loop is the flattened version of a recursive search for
865 # successors sets
878 # successors sets
866 #
879 #
867 # def successorssets(x):
880 # def successorssets(x):
868 # successors = directsuccessors(x)
881 # successors = directsuccessors(x)
869 # ss = [[]]
882 # ss = [[]]
870 # for succ in directsuccessors(x):
883 # for succ in directsuccessors(x):
871 # # product as in itertools cartesian product
884 # # product as in itertools cartesian product
872 # ss = product(ss, successorssets(succ))
885 # ss = product(ss, successorssets(succ))
873 # return ss
886 # return ss
874 #
887 #
875 # But we can not use plain recursive calls here:
888 # But we can not use plain recursive calls here:
876 # - that would blow the python call stack
889 # - that would blow the python call stack
877 # - obsolescence markers may have cycles, we need to handle them.
890 # - obsolescence markers may have cycles, we need to handle them.
878 #
891 #
879 # The `toproceed` list act as our call stack. Every node we search
892 # The `toproceed` list act as our call stack. Every node we search
880 # successors set for are stacked there.
893 # successors set for are stacked there.
881 #
894 #
882 # The `stackedset` is set version of this stack used to check if a node is
895 # The `stackedset` is set version of this stack used to check if a node is
883 # already stacked. This check is used to detect cycles and prevent infinite
896 # already stacked. This check is used to detect cycles and prevent infinite
884 # loop.
897 # loop.
885 #
898 #
886 # successors set of all nodes are stored in the `cache` dictionary.
899 # successors set of all nodes are stored in the `cache` dictionary.
887 #
900 #
888 # After this while loop ends we use the cache to return the successors sets
901 # After this while loop ends we use the cache to return the successors sets
889 # for the node requested by the caller.
902 # for the node requested by the caller.
890 while toproceed:
903 while toproceed:
891 # Every iteration tries to compute the successors sets of the topmost
904 # Every iteration tries to compute the successors sets of the topmost
892 # node of the stack: CURRENT.
905 # node of the stack: CURRENT.
893 #
906 #
894 # There are four possible outcomes:
907 # There are four possible outcomes:
895 #
908 #
896 # 1) We already know the successors sets of CURRENT:
909 # 1) We already know the successors sets of CURRENT:
897 # -> mission accomplished, pop it from the stack.
910 # -> mission accomplished, pop it from the stack.
898 # 2) Node is not obsolete:
911 # 2) Node is not obsolete:
899 # -> the node is its own successors sets. Add it to the cache.
912 # -> the node is its own successors sets. Add it to the cache.
900 # 3) We do not know successors set of direct successors of CURRENT:
913 # 3) We do not know successors set of direct successors of CURRENT:
901 # -> We add those successors to the stack.
914 # -> We add those successors to the stack.
902 # 4) We know successors sets of all direct successors of CURRENT:
915 # 4) We know successors sets of all direct successors of CURRENT:
903 # -> We can compute CURRENT successors set and add it to the
916 # -> We can compute CURRENT successors set and add it to the
904 # cache.
917 # cache.
905 #
918 #
906 current = toproceed[-1]
919 current = toproceed[-1]
907 if current in cache:
920 if current in cache:
908 # case (1): We already know the successors sets
921 # case (1): We already know the successors sets
909 stackedset.remove(toproceed.pop())
922 stackedset.remove(toproceed.pop())
910 elif current not in succmarkers:
923 elif current not in succmarkers:
911 # case (2): The node is not obsolete.
924 # case (2): The node is not obsolete.
912 if current in repo:
925 if current in repo:
913 # We have a valid last successors.
926 # We have a valid last successors.
914 cache[current] = [(current,)]
927 cache[current] = [(current,)]
915 else:
928 else:
916 # Final obsolete version is unknown locally.
929 # Final obsolete version is unknown locally.
917 # Do not count that as a valid successors
930 # Do not count that as a valid successors
918 cache[current] = []
931 cache[current] = []
919 else:
932 else:
920 # cases (3) and (4)
933 # cases (3) and (4)
921 #
934 #
922 # We proceed in two phases. Phase 1 aims to distinguish case (3)
935 # We proceed in two phases. Phase 1 aims to distinguish case (3)
923 # from case (4):
936 # from case (4):
924 #
937 #
925 # For each direct successors of CURRENT, we check whether its
938 # For each direct successors of CURRENT, we check whether its
926 # successors sets are known. If they are not, we stack the
939 # successors sets are known. If they are not, we stack the
927 # unknown node and proceed to the next iteration of the while
940 # unknown node and proceed to the next iteration of the while
928 # loop. (case 3)
941 # loop. (case 3)
929 #
942 #
930 # During this step, we may detect obsolescence cycles: a node
943 # During this step, we may detect obsolescence cycles: a node
931 # with unknown successors sets but already in the call stack.
944 # with unknown successors sets but already in the call stack.
932 # In such a situation, we arbitrary set the successors sets of
945 # In such a situation, we arbitrary set the successors sets of
933 # the node to nothing (node pruned) to break the cycle.
946 # the node to nothing (node pruned) to break the cycle.
934 #
947 #
935 # If no break was encountered we proceed to phase 2.
948 # If no break was encountered we proceed to phase 2.
936 #
949 #
937 # Phase 2 computes successors sets of CURRENT (case 4); see details
950 # Phase 2 computes successors sets of CURRENT (case 4); see details
938 # in phase 2 itself.
951 # in phase 2 itself.
939 #
952 #
940 # Note the two levels of iteration in each phase.
953 # Note the two levels of iteration in each phase.
941 # - The first one handles obsolescence markers using CURRENT as
954 # - The first one handles obsolescence markers using CURRENT as
942 # precursor (successors markers of CURRENT).
955 # precursor (successors markers of CURRENT).
943 #
956 #
944 # Having multiple entry here means divergence.
957 # Having multiple entry here means divergence.
945 #
958 #
946 # - The second one handles successors defined in each marker.
959 # - The second one handles successors defined in each marker.
947 #
960 #
948 # Having none means pruned node, multiple successors means split,
961 # Having none means pruned node, multiple successors means split,
949 # single successors are standard replacement.
962 # single successors are standard replacement.
950 #
963 #
951 for mark in sorted(succmarkers[current]):
964 for mark in sorted(succmarkers[current]):
952 for suc in mark[1]:
965 for suc in mark[1]:
953 if suc not in cache:
966 if suc not in cache:
954 if suc in stackedset:
967 if suc in stackedset:
955 # cycle breaking
968 # cycle breaking
956 cache[suc] = []
969 cache[suc] = []
957 else:
970 else:
958 # case (3) If we have not computed successors sets
971 # case (3) If we have not computed successors sets
959 # of one of those successors we add it to the
972 # of one of those successors we add it to the
960 # `toproceed` stack and stop all work for this
973 # `toproceed` stack and stop all work for this
961 # iteration.
974 # iteration.
962 toproceed.append(suc)
975 toproceed.append(suc)
963 stackedset.add(suc)
976 stackedset.add(suc)
964 break
977 break
965 else:
978 else:
966 continue
979 continue
967 break
980 break
968 else:
981 else:
969 # case (4): we know all successors sets of all direct
982 # case (4): we know all successors sets of all direct
970 # successors
983 # successors
971 #
984 #
972 # Successors set contributed by each marker depends on the
985 # Successors set contributed by each marker depends on the
973 # successors sets of all its "successors" node.
986 # successors sets of all its "successors" node.
974 #
987 #
975 # Each different marker is a divergence in the obsolescence
988 # Each different marker is a divergence in the obsolescence
976 # history. It contributes successors sets distinct from other
989 # history. It contributes successors sets distinct from other
977 # markers.
990 # markers.
978 #
991 #
979 # Within a marker, a successor may have divergent successors
992 # Within a marker, a successor may have divergent successors
980 # sets. In such a case, the marker will contribute multiple
993 # sets. In such a case, the marker will contribute multiple
981 # divergent successors sets. If multiple successors have
994 # divergent successors sets. If multiple successors have
982 # divergent successors sets, a Cartesian product is used.
995 # divergent successors sets, a Cartesian product is used.
983 #
996 #
984 # At the end we post-process successors sets to remove
997 # At the end we post-process successors sets to remove
985 # duplicated entry and successors set that are strict subset of
998 # duplicated entry and successors set that are strict subset of
986 # another one.
999 # another one.
987 succssets = []
1000 succssets = []
988 for mark in sorted(succmarkers[current]):
1001 for mark in sorted(succmarkers[current]):
989 # successors sets contributed by this marker
1002 # successors sets contributed by this marker
990 markss = [[]]
1003 markss = [[]]
991 for suc in mark[1]:
1004 for suc in mark[1]:
992 # cardinal product with previous successors
1005 # cardinal product with previous successors
993 productresult = []
1006 productresult = []
994 for prefix in markss:
1007 for prefix in markss:
995 for suffix in cache[suc]:
1008 for suffix in cache[suc]:
996 newss = list(prefix)
1009 newss = list(prefix)
997 for part in suffix:
1010 for part in suffix:
998 # do not duplicated entry in successors set
1011 # do not duplicated entry in successors set
999 # first entry wins.
1012 # first entry wins.
1000 if part not in newss:
1013 if part not in newss:
1001 newss.append(part)
1014 newss.append(part)
1002 productresult.append(newss)
1015 productresult.append(newss)
1003 markss = productresult
1016 markss = productresult
1004 succssets.extend(markss)
1017 succssets.extend(markss)
1005 # remove duplicated and subset
1018 # remove duplicated and subset
1006 seen = []
1019 seen = []
1007 final = []
1020 final = []
1008 candidate = sorted(((set(s), s) for s in succssets if s),
1021 candidate = sorted(((set(s), s) for s in succssets if s),
1009 key=lambda x: len(x[1]), reverse=True)
1022 key=lambda x: len(x[1]), reverse=True)
1010 for setversion, listversion in candidate:
1023 for setversion, listversion in candidate:
1011 for seenset in seen:
1024 for seenset in seen:
1012 if setversion.issubset(seenset):
1025 if setversion.issubset(seenset):
1013 break
1026 break
1014 else:
1027 else:
1015 final.append(listversion)
1028 final.append(listversion)
1016 seen.append(setversion)
1029 seen.append(setversion)
1017 final.reverse() # put small successors set first
1030 final.reverse() # put small successors set first
1018 cache[current] = final
1031 cache[current] = final
1019 return cache[initialnode]
1032 return cache[initialnode]
1020
1033
1021 def _knownrevs(repo, nodes):
1034 def _knownrevs(repo, nodes):
1022 """yield revision numbers of known nodes passed in parameters
1035 """yield revision numbers of known nodes passed in parameters
1023
1036
1024 Unknown revisions are silently ignored."""
1037 Unknown revisions are silently ignored."""
1025 torev = repo.changelog.nodemap.get
1038 torev = repo.changelog.nodemap.get
1026 for n in nodes:
1039 for n in nodes:
1027 rev = torev(n)
1040 rev = torev(n)
1028 if rev is not None:
1041 if rev is not None:
1029 yield rev
1042 yield rev
1030
1043
1031 # mapping of 'set-name' -> <function to compute this set>
1044 # mapping of 'set-name' -> <function to compute this set>
1032 cachefuncs = {}
1045 cachefuncs = {}
1033 def cachefor(name):
1046 def cachefor(name):
1034 """Decorator to register a function as computing the cache for a set"""
1047 """Decorator to register a function as computing the cache for a set"""
1035 def decorator(func):
1048 def decorator(func):
1036 assert name not in cachefuncs
1049 assert name not in cachefuncs
1037 cachefuncs[name] = func
1050 cachefuncs[name] = func
1038 return func
1051 return func
1039 return decorator
1052 return decorator
1040
1053
1041 def getrevs(repo, name):
1054 def getrevs(repo, name):
1042 """Return the set of revision that belong to the <name> set
1055 """Return the set of revision that belong to the <name> set
1043
1056
1044 Such access may compute the set and cache it for future use"""
1057 Such access may compute the set and cache it for future use"""
1045 repo = repo.unfiltered()
1058 repo = repo.unfiltered()
1046 if not repo.obsstore:
1059 if not repo.obsstore:
1047 return frozenset()
1060 return frozenset()
1048 if name not in repo.obsstore.caches:
1061 if name not in repo.obsstore.caches:
1049 repo.obsstore.caches[name] = cachefuncs[name](repo)
1062 repo.obsstore.caches[name] = cachefuncs[name](repo)
1050 return repo.obsstore.caches[name]
1063 return repo.obsstore.caches[name]
1051
1064
1052 # To be simple we need to invalidate obsolescence cache when:
1065 # To be simple we need to invalidate obsolescence cache when:
1053 #
1066 #
1054 # - new changeset is added:
1067 # - new changeset is added:
1055 # - public phase is changed
1068 # - public phase is changed
1056 # - obsolescence marker are added
1069 # - obsolescence marker are added
1057 # - strip is used a repo
1070 # - strip is used a repo
1058 def clearobscaches(repo):
1071 def clearobscaches(repo):
1059 """Remove all obsolescence related cache from a repo
1072 """Remove all obsolescence related cache from a repo
1060
1073
1061 This remove all cache in obsstore is the obsstore already exist on the
1074 This remove all cache in obsstore is the obsstore already exist on the
1062 repo.
1075 repo.
1063
1076
1064 (We could be smarter here given the exact event that trigger the cache
1077 (We could be smarter here given the exact event that trigger the cache
1065 clearing)"""
1078 clearing)"""
1066 # only clear cache is there is obsstore data in this repo
1079 # only clear cache is there is obsstore data in this repo
1067 if 'obsstore' in repo._filecache:
1080 if 'obsstore' in repo._filecache:
1068 repo.obsstore.caches.clear()
1081 repo.obsstore.caches.clear()
1069
1082
1070 @cachefor('obsolete')
1083 @cachefor('obsolete')
1071 def _computeobsoleteset(repo):
1084 def _computeobsoleteset(repo):
1072 """the set of obsolete revisions"""
1085 """the set of obsolete revisions"""
1073 obs = set()
1086 obs = set()
1074 getrev = repo.changelog.nodemap.get
1087 getrev = repo.changelog.nodemap.get
1075 getphase = repo._phasecache.phase
1088 getphase = repo._phasecache.phase
1076 for n in repo.obsstore.successors:
1089 for n in repo.obsstore.successors:
1077 rev = getrev(n)
1090 rev = getrev(n)
1078 if rev is not None and getphase(repo, rev):
1091 if rev is not None and getphase(repo, rev):
1079 obs.add(rev)
1092 obs.add(rev)
1080 return obs
1093 return obs
1081
1094
1082 @cachefor('unstable')
1095 @cachefor('unstable')
1083 def _computeunstableset(repo):
1096 def _computeunstableset(repo):
1084 """the set of non obsolete revisions with obsolete parents"""
1097 """the set of non obsolete revisions with obsolete parents"""
1085 # revset is not efficient enough here
1098 # revset is not efficient enough here
1086 # we do (obsolete()::) - obsolete() by hand
1099 # we do (obsolete()::) - obsolete() by hand
1087 obs = getrevs(repo, 'obsolete')
1100 obs = getrevs(repo, 'obsolete')
1088 if not obs:
1101 if not obs:
1089 return set()
1102 return set()
1090 cl = repo.changelog
1103 cl = repo.changelog
1091 return set(r for r in cl.descendants(obs) if r not in obs)
1104 return set(r for r in cl.descendants(obs) if r not in obs)
1092
1105
1093 @cachefor('suspended')
1106 @cachefor('suspended')
1094 def _computesuspendedset(repo):
1107 def _computesuspendedset(repo):
1095 """the set of obsolete parents with non obsolete descendants"""
1108 """the set of obsolete parents with non obsolete descendants"""
1096 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1109 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1097 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1110 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1098
1111
1099 @cachefor('extinct')
1112 @cachefor('extinct')
1100 def _computeextinctset(repo):
1113 def _computeextinctset(repo):
1101 """the set of obsolete parents without non obsolete descendants"""
1114 """the set of obsolete parents without non obsolete descendants"""
1102 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1115 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1103
1116
1104
1117
1105 @cachefor('bumped')
1118 @cachefor('bumped')
1106 def _computebumpedset(repo):
1119 def _computebumpedset(repo):
1107 """the set of revs trying to obsolete public revisions"""
1120 """the set of revs trying to obsolete public revisions"""
1108 bumped = set()
1121 bumped = set()
1109 # util function (avoid attribute lookup in the loop)
1122 # util function (avoid attribute lookup in the loop)
1110 phase = repo._phasecache.phase # would be faster to grab the full list
1123 phase = repo._phasecache.phase # would be faster to grab the full list
1111 public = phases.public
1124 public = phases.public
1112 cl = repo.changelog
1125 cl = repo.changelog
1113 torev = cl.nodemap.get
1126 torev = cl.nodemap.get
1114 obs = getrevs(repo, 'obsolete')
1127 obs = getrevs(repo, 'obsolete')
1115 for rev in repo:
1128 for rev in repo:
1116 # We only evaluate mutable, non-obsolete revision
1129 # We only evaluate mutable, non-obsolete revision
1117 if (public < phase(repo, rev)) and (rev not in obs):
1130 if (public < phase(repo, rev)) and (rev not in obs):
1118 node = cl.node(rev)
1131 node = cl.node(rev)
1119 # (future) A cache of precursors may worth if split is very common
1132 # (future) A cache of precursors may worth if split is very common
1120 for pnode in allprecursors(repo.obsstore, [node],
1133 for pnode in allprecursors(repo.obsstore, [node],
1121 ignoreflags=bumpedfix):
1134 ignoreflags=bumpedfix):
1122 prev = torev(pnode) # unfiltered! but so is phasecache
1135 prev = torev(pnode) # unfiltered! but so is phasecache
1123 if (prev is not None) and (phase(repo, prev) <= public):
1136 if (prev is not None) and (phase(repo, prev) <= public):
1124 # we have a public precursors
1137 # we have a public precursors
1125 bumped.add(rev)
1138 bumped.add(rev)
1126 break # Next draft!
1139 break # Next draft!
1127 return bumped
1140 return bumped
1128
1141
1129 @cachefor('divergent')
1142 @cachefor('divergent')
1130 def _computedivergentset(repo):
1143 def _computedivergentset(repo):
1131 """the set of rev that compete to be the final successors of some revision.
1144 """the set of rev that compete to be the final successors of some revision.
1132 """
1145 """
1133 divergent = set()
1146 divergent = set()
1134 obsstore = repo.obsstore
1147 obsstore = repo.obsstore
1135 newermap = {}
1148 newermap = {}
1136 for ctx in repo.set('(not public()) - obsolete()'):
1149 for ctx in repo.set('(not public()) - obsolete()'):
1137 mark = obsstore.precursors.get(ctx.node(), ())
1150 mark = obsstore.precursors.get(ctx.node(), ())
1138 toprocess = set(mark)
1151 toprocess = set(mark)
1139 while toprocess:
1152 while toprocess:
1140 prec = toprocess.pop()[0]
1153 prec = toprocess.pop()[0]
1141 if prec not in newermap:
1154 if prec not in newermap:
1142 successorssets(repo, prec, newermap)
1155 successorssets(repo, prec, newermap)
1143 newer = [n for n in newermap[prec] if n]
1156 newer = [n for n in newermap[prec] if n]
1144 if len(newer) > 1:
1157 if len(newer) > 1:
1145 divergent.add(ctx.rev())
1158 divergent.add(ctx.rev())
1146 break
1159 break
1147 toprocess.update(obsstore.precursors.get(prec, ()))
1160 toprocess.update(obsstore.precursors.get(prec, ()))
1148 return divergent
1161 return divergent
1149
1162
1150
1163
1151 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1164 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1152 """Add obsolete markers between changesets in a repo
1165 """Add obsolete markers between changesets in a repo
1153
1166
1154 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1167 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1155 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1168 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1156 containing metadata for this marker only. It is merged with the global
1169 containing metadata for this marker only. It is merged with the global
1157 metadata specified through the `metadata` argument of this function,
1170 metadata specified through the `metadata` argument of this function,
1158
1171
1159 Trying to obsolete a public changeset will raise an exception.
1172 Trying to obsolete a public changeset will raise an exception.
1160
1173
1161 Current user and date are used except if specified otherwise in the
1174 Current user and date are used except if specified otherwise in the
1162 metadata attribute.
1175 metadata attribute.
1163
1176
1164 This function operates within a transaction of its own, but does
1177 This function operates within a transaction of its own, but does
1165 not take any lock on the repo.
1178 not take any lock on the repo.
1166 """
1179 """
1167 # prepare metadata
1180 # prepare metadata
1168 if metadata is None:
1181 if metadata is None:
1169 metadata = {}
1182 metadata = {}
1170 if 'user' not in metadata:
1183 if 'user' not in metadata:
1171 metadata['user'] = repo.ui.username()
1184 metadata['user'] = repo.ui.username()
1172 tr = repo.transaction('add-obsolescence-marker')
1185 tr = repo.transaction('add-obsolescence-marker')
1173 try:
1186 try:
1174 for rel in relations:
1187 for rel in relations:
1175 prec = rel[0]
1188 prec = rel[0]
1176 sucs = rel[1]
1189 sucs = rel[1]
1177 localmetadata = metadata.copy()
1190 localmetadata = metadata.copy()
1178 if 2 < len(rel):
1191 if 2 < len(rel):
1179 localmetadata.update(rel[2])
1192 localmetadata.update(rel[2])
1180
1193
1181 if not prec.mutable():
1194 if not prec.mutable():
1182 raise util.Abort("cannot obsolete immutable changeset: %s"
1195 raise util.Abort("cannot obsolete immutable changeset: %s"
1183 % prec)
1196 % prec)
1184 nprec = prec.node()
1197 nprec = prec.node()
1185 nsucs = tuple(s.node() for s in sucs)
1198 nsucs = tuple(s.node() for s in sucs)
1186 npare = None
1199 npare = None
1187 if not nsucs:
1200 if not nsucs:
1188 npare = tuple(p.node() for p in prec.parents())
1201 npare = tuple(p.node() for p in prec.parents())
1189 if nprec in nsucs:
1202 if nprec in nsucs:
1190 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1203 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1191 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1204 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1192 date=date, metadata=localmetadata)
1205 date=date, metadata=localmetadata)
1193 repo.filteredrevcache.clear()
1206 repo.filteredrevcache.clear()
1194 tr.close()
1207 tr.close()
1195 finally:
1208 finally:
1196 tr.release()
1209 tr.release()
1197
1210
1198 def isenabled(repo, option):
1211 def isenabled(repo, option):
1199 """Returns True if the given repository has the given obsolete option
1212 """Returns True if the given repository has the given obsolete option
1200 enabled.
1213 enabled.
1201 """
1214 """
1202 result = set(repo.ui.configlist('experimental', 'evolution'))
1215 result = set(repo.ui.configlist('experimental', 'evolution'))
1203 if 'all' in result:
1216 if 'all' in result:
1204 return True
1217 return True
1205
1218
1206 # For migration purposes, temporarily return true if the config hasn't been
1219 # For migration purposes, temporarily return true if the config hasn't been
1207 # set but _enabled is true.
1220 # set but _enabled is true.
1208 if len(result) == 0 and _enabled:
1221 if len(result) == 0 and _enabled:
1209 return True
1222 return True
1210
1223
1211 # createmarkers must be enabled if other options are enabled
1224 # createmarkers must be enabled if other options are enabled
1212 if ((allowunstableopt in result or exchangeopt in result) and
1225 if ((allowunstableopt in result or exchangeopt in result) and
1213 not createmarkersopt in result):
1226 not createmarkersopt in result):
1214 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1227 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1215 "if other obsolete options are enabled"))
1228 "if other obsolete options are enabled"))
1216
1229
1217 return option in result
1230 return option in result
General Comments 0
You need to be logged in to leave comments. Login now