##// END OF EJS Templates
obsolete: use 2 argument form of enumerate()...
Gregory Szorc -
r32278:7c3ef55d default
parent child Browse files
Show More
@@ -1,1284 +1,1284 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 parsers,
79 parsers,
80 phases,
80 phases,
81 util,
81 util,
82 )
82 )
83
83
84 _pack = struct.pack
84 _pack = struct.pack
85 _unpack = struct.unpack
85 _unpack = struct.unpack
86 _calcsize = struct.calcsize
86 _calcsize = struct.calcsize
87 propertycache = util.propertycache
87 propertycache = util.propertycache
88
88
89 # the obsolete feature is not mature enough to be enabled by default.
89 # the obsolete feature is not mature enough to be enabled by default.
90 # you have to rely on third party extension extension to enable this.
90 # you have to rely on third party extension extension to enable this.
91 _enabled = False
91 _enabled = False
92
92
93 # Options for obsolescence
93 # Options for obsolescence
94 createmarkersopt = 'createmarkers'
94 createmarkersopt = 'createmarkers'
95 allowunstableopt = 'allowunstable'
95 allowunstableopt = 'allowunstable'
96 exchangeopt = 'exchange'
96 exchangeopt = 'exchange'
97
97
98 ### obsolescence marker flag
98 ### obsolescence marker flag
99
99
100 ## bumpedfix flag
100 ## bumpedfix flag
101 #
101 #
102 # When a changeset A' succeed to a changeset A which became public, we call A'
102 # When a changeset A' succeed to a changeset A which became public, we call A'
103 # "bumped" because it's a successors of a public changesets
103 # "bumped" because it's a successors of a public changesets
104 #
104 #
105 # o A' (bumped)
105 # o A' (bumped)
106 # |`:
106 # |`:
107 # | o A
107 # | o A
108 # |/
108 # |/
109 # o Z
109 # o Z
110 #
110 #
111 # The way to solve this situation is to create a new changeset Ad as children
111 # The way to solve this situation is to create a new changeset Ad as children
112 # of A. This changeset have the same content than A'. So the diff from A to A'
112 # of A. This changeset have the same content than A'. So the diff from A to A'
113 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
113 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
114 #
114 #
115 # o Ad
115 # o Ad
116 # |`:
116 # |`:
117 # | x A'
117 # | x A'
118 # |'|
118 # |'|
119 # o | A
119 # o | A
120 # |/
120 # |/
121 # o Z
121 # o Z
122 #
122 #
123 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
123 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
124 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
124 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
125 # This flag mean that the successors express the changes between the public and
125 # This flag mean that the successors express the changes between the public and
126 # bumped version and fix the situation, breaking the transitivity of
126 # bumped version and fix the situation, breaking the transitivity of
127 # "bumped" here.
127 # "bumped" here.
128 bumpedfix = 1
128 bumpedfix = 1
129 usingsha256 = 2
129 usingsha256 = 2
130
130
131 ## Parsing and writing of version "0"
131 ## Parsing and writing of version "0"
132 #
132 #
133 # The header is followed by the markers. Each marker is made of:
133 # The header is followed by the markers. Each marker is made of:
134 #
134 #
135 # - 1 uint8 : number of new changesets "N", can be zero.
135 # - 1 uint8 : number of new changesets "N", can be zero.
136 #
136 #
137 # - 1 uint32: metadata size "M" in bytes.
137 # - 1 uint32: metadata size "M" in bytes.
138 #
138 #
139 # - 1 byte: a bit field. It is reserved for flags used in common
139 # - 1 byte: a bit field. It is reserved for flags used in common
140 # obsolete marker operations, to avoid repeated decoding of metadata
140 # obsolete marker operations, to avoid repeated decoding of metadata
141 # entries.
141 # entries.
142 #
142 #
143 # - 20 bytes: obsoleted changeset identifier.
143 # - 20 bytes: obsoleted changeset identifier.
144 #
144 #
145 # - N*20 bytes: new changesets identifiers.
145 # - N*20 bytes: new changesets identifiers.
146 #
146 #
147 # - M bytes: metadata as a sequence of nul-terminated strings. Each
147 # - M bytes: metadata as a sequence of nul-terminated strings. Each
148 # string contains a key and a value, separated by a colon ':', without
148 # string contains a key and a value, separated by a colon ':', without
149 # additional encoding. Keys cannot contain '\0' or ':' and values
149 # additional encoding. Keys cannot contain '\0' or ':' and values
150 # cannot contain '\0'.
150 # cannot contain '\0'.
151 _fm0version = 0
151 _fm0version = 0
152 _fm0fixed = '>BIB20s'
152 _fm0fixed = '>BIB20s'
153 _fm0node = '20s'
153 _fm0node = '20s'
154 _fm0fsize = _calcsize(_fm0fixed)
154 _fm0fsize = _calcsize(_fm0fixed)
155 _fm0fnodesize = _calcsize(_fm0node)
155 _fm0fnodesize = _calcsize(_fm0node)
156
156
157 def _fm0readmarkers(data, off):
157 def _fm0readmarkers(data, off):
158 # Loop on markers
158 # Loop on markers
159 l = len(data)
159 l = len(data)
160 while off + _fm0fsize <= l:
160 while off + _fm0fsize <= l:
161 # read fixed part
161 # read fixed part
162 cur = data[off:off + _fm0fsize]
162 cur = data[off:off + _fm0fsize]
163 off += _fm0fsize
163 off += _fm0fsize
164 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
164 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
165 # read replacement
165 # read replacement
166 sucs = ()
166 sucs = ()
167 if numsuc:
167 if numsuc:
168 s = (_fm0fnodesize * numsuc)
168 s = (_fm0fnodesize * numsuc)
169 cur = data[off:off + s]
169 cur = data[off:off + s]
170 sucs = _unpack(_fm0node * numsuc, cur)
170 sucs = _unpack(_fm0node * numsuc, cur)
171 off += s
171 off += s
172 # read metadata
172 # read metadata
173 # (metadata will be decoded on demand)
173 # (metadata will be decoded on demand)
174 metadata = data[off:off + mdsize]
174 metadata = data[off:off + mdsize]
175 if len(metadata) != mdsize:
175 if len(metadata) != mdsize:
176 raise error.Abort(_('parsing obsolete marker: metadata is too '
176 raise error.Abort(_('parsing obsolete marker: metadata is too '
177 'short, %d bytes expected, got %d')
177 'short, %d bytes expected, got %d')
178 % (mdsize, len(metadata)))
178 % (mdsize, len(metadata)))
179 off += mdsize
179 off += mdsize
180 metadata = _fm0decodemeta(metadata)
180 metadata = _fm0decodemeta(metadata)
181 try:
181 try:
182 when, offset = metadata.pop('date', '0 0').split(' ')
182 when, offset = metadata.pop('date', '0 0').split(' ')
183 date = float(when), int(offset)
183 date = float(when), int(offset)
184 except ValueError:
184 except ValueError:
185 date = (0., 0)
185 date = (0., 0)
186 parents = None
186 parents = None
187 if 'p2' in metadata:
187 if 'p2' in metadata:
188 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
188 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
189 elif 'p1' in metadata:
189 elif 'p1' in metadata:
190 parents = (metadata.pop('p1', None),)
190 parents = (metadata.pop('p1', None),)
191 elif 'p0' in metadata:
191 elif 'p0' in metadata:
192 parents = ()
192 parents = ()
193 if parents is not None:
193 if parents is not None:
194 try:
194 try:
195 parents = tuple(node.bin(p) for p in parents)
195 parents = tuple(node.bin(p) for p in parents)
196 # if parent content is not a nodeid, drop the data
196 # if parent content is not a nodeid, drop the data
197 for p in parents:
197 for p in parents:
198 if len(p) != 20:
198 if len(p) != 20:
199 parents = None
199 parents = None
200 break
200 break
201 except TypeError:
201 except TypeError:
202 # if content cannot be translated to nodeid drop the data.
202 # if content cannot be translated to nodeid drop the data.
203 parents = None
203 parents = None
204
204
205 metadata = tuple(sorted(metadata.iteritems()))
205 metadata = tuple(sorted(metadata.iteritems()))
206
206
207 yield (pre, sucs, flags, metadata, date, parents)
207 yield (pre, sucs, flags, metadata, date, parents)
208
208
209 def _fm0encodeonemarker(marker):
209 def _fm0encodeonemarker(marker):
210 pre, sucs, flags, metadata, date, parents = marker
210 pre, sucs, flags, metadata, date, parents = marker
211 if flags & usingsha256:
211 if flags & usingsha256:
212 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
212 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
213 metadata = dict(metadata)
213 metadata = dict(metadata)
214 time, tz = date
214 time, tz = date
215 metadata['date'] = '%r %i' % (time, tz)
215 metadata['date'] = '%r %i' % (time, tz)
216 if parents is not None:
216 if parents is not None:
217 if not parents:
217 if not parents:
218 # mark that we explicitly recorded no parents
218 # mark that we explicitly recorded no parents
219 metadata['p0'] = ''
219 metadata['p0'] = ''
220 for i, p in enumerate(parents):
220 for i, p in enumerate(parents, 1):
221 metadata['p%i' % (i + 1)] = node.hex(p)
221 metadata['p%i' % i] = node.hex(p)
222 metadata = _fm0encodemeta(metadata)
222 metadata = _fm0encodemeta(metadata)
223 numsuc = len(sucs)
223 numsuc = len(sucs)
224 format = _fm0fixed + (_fm0node * numsuc)
224 format = _fm0fixed + (_fm0node * numsuc)
225 data = [numsuc, len(metadata), flags, pre]
225 data = [numsuc, len(metadata), flags, pre]
226 data.extend(sucs)
226 data.extend(sucs)
227 return _pack(format, *data) + metadata
227 return _pack(format, *data) + metadata
228
228
229 def _fm0encodemeta(meta):
229 def _fm0encodemeta(meta):
230 """Return encoded metadata string to string mapping.
230 """Return encoded metadata string to string mapping.
231
231
232 Assume no ':' in key and no '\0' in both key and value."""
232 Assume no ':' in key and no '\0' in both key and value."""
233 for key, value in meta.iteritems():
233 for key, value in meta.iteritems():
234 if ':' in key or '\0' in key:
234 if ':' in key or '\0' in key:
235 raise ValueError("':' and '\0' are forbidden in metadata key'")
235 raise ValueError("':' and '\0' are forbidden in metadata key'")
236 if '\0' in value:
236 if '\0' in value:
237 raise ValueError("':' is forbidden in metadata value'")
237 raise ValueError("':' is forbidden in metadata value'")
238 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
238 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
239
239
240 def _fm0decodemeta(data):
240 def _fm0decodemeta(data):
241 """Return string to string dictionary from encoded version."""
241 """Return string to string dictionary from encoded version."""
242 d = {}
242 d = {}
243 for l in data.split('\0'):
243 for l in data.split('\0'):
244 if l:
244 if l:
245 key, value = l.split(':')
245 key, value = l.split(':')
246 d[key] = value
246 d[key] = value
247 return d
247 return d
248
248
249 ## Parsing and writing of version "1"
249 ## Parsing and writing of version "1"
250 #
250 #
251 # The header is followed by the markers. Each marker is made of:
251 # The header is followed by the markers. Each marker is made of:
252 #
252 #
253 # - uint32: total size of the marker (including this field)
253 # - uint32: total size of the marker (including this field)
254 #
254 #
255 # - float64: date in seconds since epoch
255 # - float64: date in seconds since epoch
256 #
256 #
257 # - int16: timezone offset in minutes
257 # - int16: timezone offset in minutes
258 #
258 #
259 # - uint16: a bit field. It is reserved for flags used in common
259 # - uint16: a bit field. It is reserved for flags used in common
260 # obsolete marker operations, to avoid repeated decoding of metadata
260 # obsolete marker operations, to avoid repeated decoding of metadata
261 # entries.
261 # entries.
262 #
262 #
263 # - uint8: number of successors "N", can be zero.
263 # - uint8: number of successors "N", can be zero.
264 #
264 #
265 # - uint8: number of parents "P", can be zero.
265 # - uint8: number of parents "P", can be zero.
266 #
266 #
267 # 0: parents data stored but no parent,
267 # 0: parents data stored but no parent,
268 # 1: one parent stored,
268 # 1: one parent stored,
269 # 2: two parents stored,
269 # 2: two parents stored,
270 # 3: no parent data stored
270 # 3: no parent data stored
271 #
271 #
272 # - uint8: number of metadata entries M
272 # - uint8: number of metadata entries M
273 #
273 #
274 # - 20 or 32 bytes: precursor changeset identifier.
274 # - 20 or 32 bytes: precursor changeset identifier.
275 #
275 #
276 # - N*(20 or 32) bytes: successors changesets identifiers.
276 # - N*(20 or 32) bytes: successors changesets identifiers.
277 #
277 #
278 # - P*(20 or 32) bytes: parents of the precursors changesets.
278 # - P*(20 or 32) bytes: parents of the precursors changesets.
279 #
279 #
280 # - M*(uint8, uint8): size of all metadata entries (key and value)
280 # - M*(uint8, uint8): size of all metadata entries (key and value)
281 #
281 #
282 # - remaining bytes: the metadata, each (key, value) pair after the other.
282 # - remaining bytes: the metadata, each (key, value) pair after the other.
283 _fm1version = 1
283 _fm1version = 1
284 _fm1fixed = '>IdhHBBB20s'
284 _fm1fixed = '>IdhHBBB20s'
285 _fm1nodesha1 = '20s'
285 _fm1nodesha1 = '20s'
286 _fm1nodesha256 = '32s'
286 _fm1nodesha256 = '32s'
287 _fm1nodesha1size = _calcsize(_fm1nodesha1)
287 _fm1nodesha1size = _calcsize(_fm1nodesha1)
288 _fm1nodesha256size = _calcsize(_fm1nodesha256)
288 _fm1nodesha256size = _calcsize(_fm1nodesha256)
289 _fm1fsize = _calcsize(_fm1fixed)
289 _fm1fsize = _calcsize(_fm1fixed)
290 _fm1parentnone = 3
290 _fm1parentnone = 3
291 _fm1parentshift = 14
291 _fm1parentshift = 14
292 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
292 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
293 _fm1metapair = 'BB'
293 _fm1metapair = 'BB'
294 _fm1metapairsize = _calcsize('BB')
294 _fm1metapairsize = _calcsize('BB')
295
295
296 def _fm1purereadmarkers(data, off):
296 def _fm1purereadmarkers(data, off):
297 # make some global constants local for performance
297 # make some global constants local for performance
298 noneflag = _fm1parentnone
298 noneflag = _fm1parentnone
299 sha2flag = usingsha256
299 sha2flag = usingsha256
300 sha1size = _fm1nodesha1size
300 sha1size = _fm1nodesha1size
301 sha2size = _fm1nodesha256size
301 sha2size = _fm1nodesha256size
302 sha1fmt = _fm1nodesha1
302 sha1fmt = _fm1nodesha1
303 sha2fmt = _fm1nodesha256
303 sha2fmt = _fm1nodesha256
304 metasize = _fm1metapairsize
304 metasize = _fm1metapairsize
305 metafmt = _fm1metapair
305 metafmt = _fm1metapair
306 fsize = _fm1fsize
306 fsize = _fm1fsize
307 unpack = _unpack
307 unpack = _unpack
308
308
309 # Loop on markers
309 # Loop on markers
310 stop = len(data) - _fm1fsize
310 stop = len(data) - _fm1fsize
311 ufixed = struct.Struct(_fm1fixed).unpack
311 ufixed = struct.Struct(_fm1fixed).unpack
312
312
313 while off <= stop:
313 while off <= stop:
314 # read fixed part
314 # read fixed part
315 o1 = off + fsize
315 o1 = off + fsize
316 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
316 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
317
317
318 if flags & sha2flag:
318 if flags & sha2flag:
319 # FIXME: prec was read as a SHA1, needs to be amended
319 # FIXME: prec was read as a SHA1, needs to be amended
320
320
321 # read 0 or more successors
321 # read 0 or more successors
322 if numsuc == 1:
322 if numsuc == 1:
323 o2 = o1 + sha2size
323 o2 = o1 + sha2size
324 sucs = (data[o1:o2],)
324 sucs = (data[o1:o2],)
325 else:
325 else:
326 o2 = o1 + sha2size * numsuc
326 o2 = o1 + sha2size * numsuc
327 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
327 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
328
328
329 # read parents
329 # read parents
330 if numpar == noneflag:
330 if numpar == noneflag:
331 o3 = o2
331 o3 = o2
332 parents = None
332 parents = None
333 elif numpar == 1:
333 elif numpar == 1:
334 o3 = o2 + sha2size
334 o3 = o2 + sha2size
335 parents = (data[o2:o3],)
335 parents = (data[o2:o3],)
336 else:
336 else:
337 o3 = o2 + sha2size * numpar
337 o3 = o2 + sha2size * numpar
338 parents = unpack(sha2fmt * numpar, data[o2:o3])
338 parents = unpack(sha2fmt * numpar, data[o2:o3])
339 else:
339 else:
340 # read 0 or more successors
340 # read 0 or more successors
341 if numsuc == 1:
341 if numsuc == 1:
342 o2 = o1 + sha1size
342 o2 = o1 + sha1size
343 sucs = (data[o1:o2],)
343 sucs = (data[o1:o2],)
344 else:
344 else:
345 o2 = o1 + sha1size * numsuc
345 o2 = o1 + sha1size * numsuc
346 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
346 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
347
347
348 # read parents
348 # read parents
349 if numpar == noneflag:
349 if numpar == noneflag:
350 o3 = o2
350 o3 = o2
351 parents = None
351 parents = None
352 elif numpar == 1:
352 elif numpar == 1:
353 o3 = o2 + sha1size
353 o3 = o2 + sha1size
354 parents = (data[o2:o3],)
354 parents = (data[o2:o3],)
355 else:
355 else:
356 o3 = o2 + sha1size * numpar
356 o3 = o2 + sha1size * numpar
357 parents = unpack(sha1fmt * numpar, data[o2:o3])
357 parents = unpack(sha1fmt * numpar, data[o2:o3])
358
358
359 # read metadata
359 # read metadata
360 off = o3 + metasize * nummeta
360 off = o3 + metasize * nummeta
361 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
361 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
362 metadata = []
362 metadata = []
363 for idx in xrange(0, len(metapairsize), 2):
363 for idx in xrange(0, len(metapairsize), 2):
364 o1 = off + metapairsize[idx]
364 o1 = off + metapairsize[idx]
365 o2 = o1 + metapairsize[idx + 1]
365 o2 = o1 + metapairsize[idx + 1]
366 metadata.append((data[off:o1], data[o1:o2]))
366 metadata.append((data[off:o1], data[o1:o2]))
367 off = o2
367 off = o2
368
368
369 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
369 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
370
370
371 def _fm1encodeonemarker(marker):
371 def _fm1encodeonemarker(marker):
372 pre, sucs, flags, metadata, date, parents = marker
372 pre, sucs, flags, metadata, date, parents = marker
373 # determine node size
373 # determine node size
374 _fm1node = _fm1nodesha1
374 _fm1node = _fm1nodesha1
375 if flags & usingsha256:
375 if flags & usingsha256:
376 _fm1node = _fm1nodesha256
376 _fm1node = _fm1nodesha256
377 numsuc = len(sucs)
377 numsuc = len(sucs)
378 numextranodes = numsuc
378 numextranodes = numsuc
379 if parents is None:
379 if parents is None:
380 numpar = _fm1parentnone
380 numpar = _fm1parentnone
381 else:
381 else:
382 numpar = len(parents)
382 numpar = len(parents)
383 numextranodes += numpar
383 numextranodes += numpar
384 formatnodes = _fm1node * numextranodes
384 formatnodes = _fm1node * numextranodes
385 formatmeta = _fm1metapair * len(metadata)
385 formatmeta = _fm1metapair * len(metadata)
386 format = _fm1fixed + formatnodes + formatmeta
386 format = _fm1fixed + formatnodes + formatmeta
387 # tz is stored in minutes so we divide by 60
387 # tz is stored in minutes so we divide by 60
388 tz = date[1]//60
388 tz = date[1]//60
389 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
389 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
390 data.extend(sucs)
390 data.extend(sucs)
391 if parents is not None:
391 if parents is not None:
392 data.extend(parents)
392 data.extend(parents)
393 totalsize = _calcsize(format)
393 totalsize = _calcsize(format)
394 for key, value in metadata:
394 for key, value in metadata:
395 lk = len(key)
395 lk = len(key)
396 lv = len(value)
396 lv = len(value)
397 data.append(lk)
397 data.append(lk)
398 data.append(lv)
398 data.append(lv)
399 totalsize += lk + lv
399 totalsize += lk + lv
400 data[0] = totalsize
400 data[0] = totalsize
401 data = [_pack(format, *data)]
401 data = [_pack(format, *data)]
402 for key, value in metadata:
402 for key, value in metadata:
403 data.append(key)
403 data.append(key)
404 data.append(value)
404 data.append(value)
405 return ''.join(data)
405 return ''.join(data)
406
406
407 def _fm1readmarkers(data, off):
407 def _fm1readmarkers(data, off):
408 native = getattr(parsers, 'fm1readmarkers', None)
408 native = getattr(parsers, 'fm1readmarkers', None)
409 if not native:
409 if not native:
410 return _fm1purereadmarkers(data, off)
410 return _fm1purereadmarkers(data, off)
411 stop = len(data) - _fm1fsize
411 stop = len(data) - _fm1fsize
412 return native(data, off, stop)
412 return native(data, off, stop)
413
413
414 # mapping to read/write various marker formats
414 # mapping to read/write various marker formats
415 # <version> -> (decoder, encoder)
415 # <version> -> (decoder, encoder)
416 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
416 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
417 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
417 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
418
418
419 @util.nogc
419 @util.nogc
420 def _readmarkers(data):
420 def _readmarkers(data):
421 """Read and enumerate markers from raw data"""
421 """Read and enumerate markers from raw data"""
422 off = 0
422 off = 0
423 diskversion = _unpack('>B', data[off:off + 1])[0]
423 diskversion = _unpack('>B', data[off:off + 1])[0]
424 off += 1
424 off += 1
425 if diskversion not in formats:
425 if diskversion not in formats:
426 raise error.Abort(_('parsing obsolete marker: unknown version %r')
426 raise error.Abort(_('parsing obsolete marker: unknown version %r')
427 % diskversion)
427 % diskversion)
428 return diskversion, formats[diskversion][0](data, off)
428 return diskversion, formats[diskversion][0](data, off)
429
429
430 def encodemarkers(markers, addheader=False, version=_fm0version):
430 def encodemarkers(markers, addheader=False, version=_fm0version):
431 # Kept separate from flushmarkers(), it will be reused for
431 # Kept separate from flushmarkers(), it will be reused for
432 # markers exchange.
432 # markers exchange.
433 encodeone = formats[version][1]
433 encodeone = formats[version][1]
434 if addheader:
434 if addheader:
435 yield _pack('>B', version)
435 yield _pack('>B', version)
436 for marker in markers:
436 for marker in markers:
437 yield encodeone(marker)
437 yield encodeone(marker)
438
438
439
439
440 class marker(object):
440 class marker(object):
441 """Wrap obsolete marker raw data"""
441 """Wrap obsolete marker raw data"""
442
442
443 def __init__(self, repo, data):
443 def __init__(self, repo, data):
444 # the repo argument will be used to create changectx in later version
444 # the repo argument will be used to create changectx in later version
445 self._repo = repo
445 self._repo = repo
446 self._data = data
446 self._data = data
447 self._decodedmeta = None
447 self._decodedmeta = None
448
448
449 def __hash__(self):
449 def __hash__(self):
450 return hash(self._data)
450 return hash(self._data)
451
451
452 def __eq__(self, other):
452 def __eq__(self, other):
453 if type(other) != type(self):
453 if type(other) != type(self):
454 return False
454 return False
455 return self._data == other._data
455 return self._data == other._data
456
456
457 def precnode(self):
457 def precnode(self):
458 """Precursor changeset node identifier"""
458 """Precursor changeset node identifier"""
459 return self._data[0]
459 return self._data[0]
460
460
461 def succnodes(self):
461 def succnodes(self):
462 """List of successor changesets node identifiers"""
462 """List of successor changesets node identifiers"""
463 return self._data[1]
463 return self._data[1]
464
464
465 def parentnodes(self):
465 def parentnodes(self):
466 """Parents of the precursors (None if not recorded)"""
466 """Parents of the precursors (None if not recorded)"""
467 return self._data[5]
467 return self._data[5]
468
468
469 def metadata(self):
469 def metadata(self):
470 """Decoded metadata dictionary"""
470 """Decoded metadata dictionary"""
471 return dict(self._data[3])
471 return dict(self._data[3])
472
472
473 def date(self):
473 def date(self):
474 """Creation date as (unixtime, offset)"""
474 """Creation date as (unixtime, offset)"""
475 return self._data[4]
475 return self._data[4]
476
476
477 def flags(self):
477 def flags(self):
478 """The flags field of the marker"""
478 """The flags field of the marker"""
479 return self._data[2]
479 return self._data[2]
480
480
481 @util.nogc
481 @util.nogc
482 def _addsuccessors(successors, markers):
482 def _addsuccessors(successors, markers):
483 for mark in markers:
483 for mark in markers:
484 successors.setdefault(mark[0], set()).add(mark)
484 successors.setdefault(mark[0], set()).add(mark)
485
485
486 @util.nogc
486 @util.nogc
487 def _addprecursors(precursors, markers):
487 def _addprecursors(precursors, markers):
488 for mark in markers:
488 for mark in markers:
489 for suc in mark[1]:
489 for suc in mark[1]:
490 precursors.setdefault(suc, set()).add(mark)
490 precursors.setdefault(suc, set()).add(mark)
491
491
492 @util.nogc
492 @util.nogc
493 def _addchildren(children, markers):
493 def _addchildren(children, markers):
494 for mark in markers:
494 for mark in markers:
495 parents = mark[5]
495 parents = mark[5]
496 if parents is not None:
496 if parents is not None:
497 for p in parents:
497 for p in parents:
498 children.setdefault(p, set()).add(mark)
498 children.setdefault(p, set()).add(mark)
499
499
500 def _checkinvalidmarkers(markers):
500 def _checkinvalidmarkers(markers):
501 """search for marker with invalid data and raise error if needed
501 """search for marker with invalid data and raise error if needed
502
502
503 Exist as a separated function to allow the evolve extension for a more
503 Exist as a separated function to allow the evolve extension for a more
504 subtle handling.
504 subtle handling.
505 """
505 """
506 for mark in markers:
506 for mark in markers:
507 if node.nullid in mark[1]:
507 if node.nullid in mark[1]:
508 raise error.Abort(_('bad obsolescence marker detected: '
508 raise error.Abort(_('bad obsolescence marker detected: '
509 'invalid successors nullid'))
509 'invalid successors nullid'))
510
510
511 class obsstore(object):
511 class obsstore(object):
512 """Store obsolete markers
512 """Store obsolete markers
513
513
514 Markers can be accessed with two mappings:
514 Markers can be accessed with two mappings:
515 - precursors[x] -> set(markers on precursors edges of x)
515 - precursors[x] -> set(markers on precursors edges of x)
516 - successors[x] -> set(markers on successors edges of x)
516 - successors[x] -> set(markers on successors edges of x)
517 - children[x] -> set(markers on precursors edges of children(x)
517 - children[x] -> set(markers on precursors edges of children(x)
518 """
518 """
519
519
520 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
520 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
521 # prec: nodeid, precursor changesets
521 # prec: nodeid, precursor changesets
522 # succs: tuple of nodeid, successor changesets (0-N length)
522 # succs: tuple of nodeid, successor changesets (0-N length)
523 # flag: integer, flag field carrying modifier for the markers (see doc)
523 # flag: integer, flag field carrying modifier for the markers (see doc)
524 # meta: binary blob, encoded metadata dictionary
524 # meta: binary blob, encoded metadata dictionary
525 # date: (float, int) tuple, date of marker creation
525 # date: (float, int) tuple, date of marker creation
526 # parents: (tuple of nodeid) or None, parents of precursors
526 # parents: (tuple of nodeid) or None, parents of precursors
527 # None is used when no data has been recorded
527 # None is used when no data has been recorded
528
528
529 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
529 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
530 # caches for various obsolescence related cache
530 # caches for various obsolescence related cache
531 self.caches = {}
531 self.caches = {}
532 self.svfs = svfs
532 self.svfs = svfs
533 self._version = defaultformat
533 self._version = defaultformat
534 self._readonly = readonly
534 self._readonly = readonly
535
535
536 def __iter__(self):
536 def __iter__(self):
537 return iter(self._all)
537 return iter(self._all)
538
538
539 def __len__(self):
539 def __len__(self):
540 return len(self._all)
540 return len(self._all)
541
541
542 def __nonzero__(self):
542 def __nonzero__(self):
543 if not self._cached('_all'):
543 if not self._cached('_all'):
544 try:
544 try:
545 return self.svfs.stat('obsstore').st_size > 1
545 return self.svfs.stat('obsstore').st_size > 1
546 except OSError as inst:
546 except OSError as inst:
547 if inst.errno != errno.ENOENT:
547 if inst.errno != errno.ENOENT:
548 raise
548 raise
549 # just build an empty _all list if no obsstore exists, which
549 # just build an empty _all list if no obsstore exists, which
550 # avoids further stat() syscalls
550 # avoids further stat() syscalls
551 pass
551 pass
552 return bool(self._all)
552 return bool(self._all)
553
553
554 __bool__ = __nonzero__
554 __bool__ = __nonzero__
555
555
556 @property
556 @property
557 def readonly(self):
557 def readonly(self):
558 """True if marker creation is disabled
558 """True if marker creation is disabled
559
559
560 Remove me in the future when obsolete marker is always on."""
560 Remove me in the future when obsolete marker is always on."""
561 return self._readonly
561 return self._readonly
562
562
563 def create(self, transaction, prec, succs=(), flag=0, parents=None,
563 def create(self, transaction, prec, succs=(), flag=0, parents=None,
564 date=None, metadata=None):
564 date=None, metadata=None):
565 """obsolete: add a new obsolete marker
565 """obsolete: add a new obsolete marker
566
566
567 * ensuring it is hashable
567 * ensuring it is hashable
568 * check mandatory metadata
568 * check mandatory metadata
569 * encode metadata
569 * encode metadata
570
570
571 If you are a human writing code creating marker you want to use the
571 If you are a human writing code creating marker you want to use the
572 `createmarkers` function in this module instead.
572 `createmarkers` function in this module instead.
573
573
574 return True if a new marker have been added, False if the markers
574 return True if a new marker have been added, False if the markers
575 already existed (no op).
575 already existed (no op).
576 """
576 """
577 if metadata is None:
577 if metadata is None:
578 metadata = {}
578 metadata = {}
579 if date is None:
579 if date is None:
580 if 'date' in metadata:
580 if 'date' in metadata:
581 # as a courtesy for out-of-tree extensions
581 # as a courtesy for out-of-tree extensions
582 date = util.parsedate(metadata.pop('date'))
582 date = util.parsedate(metadata.pop('date'))
583 else:
583 else:
584 date = util.makedate()
584 date = util.makedate()
585 if len(prec) != 20:
585 if len(prec) != 20:
586 raise ValueError(prec)
586 raise ValueError(prec)
587 for succ in succs:
587 for succ in succs:
588 if len(succ) != 20:
588 if len(succ) != 20:
589 raise ValueError(succ)
589 raise ValueError(succ)
590 if prec in succs:
590 if prec in succs:
591 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
591 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
592
592
593 metadata = tuple(sorted(metadata.iteritems()))
593 metadata = tuple(sorted(metadata.iteritems()))
594
594
595 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
595 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
596 return bool(self.add(transaction, [marker]))
596 return bool(self.add(transaction, [marker]))
597
597
598 def add(self, transaction, markers):
598 def add(self, transaction, markers):
599 """Add new markers to the store
599 """Add new markers to the store
600
600
601 Take care of filtering duplicate.
601 Take care of filtering duplicate.
602 Return the number of new marker."""
602 Return the number of new marker."""
603 if self._readonly:
603 if self._readonly:
604 raise error.Abort(_('creating obsolete markers is not enabled on '
604 raise error.Abort(_('creating obsolete markers is not enabled on '
605 'this repo'))
605 'this repo'))
606 known = set(self._all)
606 known = set(self._all)
607 new = []
607 new = []
608 for m in markers:
608 for m in markers:
609 if m not in known:
609 if m not in known:
610 known.add(m)
610 known.add(m)
611 new.append(m)
611 new.append(m)
612 if new:
612 if new:
613 f = self.svfs('obsstore', 'ab')
613 f = self.svfs('obsstore', 'ab')
614 try:
614 try:
615 offset = f.tell()
615 offset = f.tell()
616 transaction.add('obsstore', offset)
616 transaction.add('obsstore', offset)
617 # offset == 0: new file - add the version header
617 # offset == 0: new file - add the version header
618 for bytes in encodemarkers(new, offset == 0, self._version):
618 for bytes in encodemarkers(new, offset == 0, self._version):
619 f.write(bytes)
619 f.write(bytes)
620 finally:
620 finally:
621 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
621 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
622 # call 'filecacheentry.refresh()' here
622 # call 'filecacheentry.refresh()' here
623 f.close()
623 f.close()
624 self._addmarkers(new)
624 self._addmarkers(new)
625 # new marker *may* have changed several set. invalidate the cache.
625 # new marker *may* have changed several set. invalidate the cache.
626 self.caches.clear()
626 self.caches.clear()
627 # records the number of new markers for the transaction hooks
627 # records the number of new markers for the transaction hooks
628 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
628 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
629 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
629 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
630 return len(new)
630 return len(new)
631
631
632 def mergemarkers(self, transaction, data):
632 def mergemarkers(self, transaction, data):
633 """merge a binary stream of markers inside the obsstore
633 """merge a binary stream of markers inside the obsstore
634
634
635 Returns the number of new markers added."""
635 Returns the number of new markers added."""
636 version, markers = _readmarkers(data)
636 version, markers = _readmarkers(data)
637 return self.add(transaction, markers)
637 return self.add(transaction, markers)
638
638
639 @propertycache
639 @propertycache
640 def _all(self):
640 def _all(self):
641 data = self.svfs.tryread('obsstore')
641 data = self.svfs.tryread('obsstore')
642 if not data:
642 if not data:
643 return []
643 return []
644 self._version, markers = _readmarkers(data)
644 self._version, markers = _readmarkers(data)
645 markers = list(markers)
645 markers = list(markers)
646 _checkinvalidmarkers(markers)
646 _checkinvalidmarkers(markers)
647 return markers
647 return markers
648
648
649 @propertycache
649 @propertycache
650 def successors(self):
650 def successors(self):
651 successors = {}
651 successors = {}
652 _addsuccessors(successors, self._all)
652 _addsuccessors(successors, self._all)
653 return successors
653 return successors
654
654
655 @propertycache
655 @propertycache
656 def precursors(self):
656 def precursors(self):
657 precursors = {}
657 precursors = {}
658 _addprecursors(precursors, self._all)
658 _addprecursors(precursors, self._all)
659 return precursors
659 return precursors
660
660
661 @propertycache
661 @propertycache
662 def children(self):
662 def children(self):
663 children = {}
663 children = {}
664 _addchildren(children, self._all)
664 _addchildren(children, self._all)
665 return children
665 return children
666
666
667 def _cached(self, attr):
667 def _cached(self, attr):
668 return attr in self.__dict__
668 return attr in self.__dict__
669
669
670 def _addmarkers(self, markers):
670 def _addmarkers(self, markers):
671 markers = list(markers) # to allow repeated iteration
671 markers = list(markers) # to allow repeated iteration
672 self._all.extend(markers)
672 self._all.extend(markers)
673 if self._cached('successors'):
673 if self._cached('successors'):
674 _addsuccessors(self.successors, markers)
674 _addsuccessors(self.successors, markers)
675 if self._cached('precursors'):
675 if self._cached('precursors'):
676 _addprecursors(self.precursors, markers)
676 _addprecursors(self.precursors, markers)
677 if self._cached('children'):
677 if self._cached('children'):
678 _addchildren(self.children, markers)
678 _addchildren(self.children, markers)
679 _checkinvalidmarkers(markers)
679 _checkinvalidmarkers(markers)
680
680
681 def relevantmarkers(self, nodes):
681 def relevantmarkers(self, nodes):
682 """return a set of all obsolescence markers relevant to a set of nodes.
682 """return a set of all obsolescence markers relevant to a set of nodes.
683
683
684 "relevant" to a set of nodes mean:
684 "relevant" to a set of nodes mean:
685
685
686 - marker that use this changeset as successor
686 - marker that use this changeset as successor
687 - prune marker of direct children on this changeset
687 - prune marker of direct children on this changeset
688 - recursive application of the two rules on precursors of these markers
688 - recursive application of the two rules on precursors of these markers
689
689
690 It is a set so you cannot rely on order."""
690 It is a set so you cannot rely on order."""
691
691
692 pendingnodes = set(nodes)
692 pendingnodes = set(nodes)
693 seenmarkers = set()
693 seenmarkers = set()
694 seennodes = set(pendingnodes)
694 seennodes = set(pendingnodes)
695 precursorsmarkers = self.precursors
695 precursorsmarkers = self.precursors
696 children = self.children
696 children = self.children
697 while pendingnodes:
697 while pendingnodes:
698 direct = set()
698 direct = set()
699 for current in pendingnodes:
699 for current in pendingnodes:
700 direct.update(precursorsmarkers.get(current, ()))
700 direct.update(precursorsmarkers.get(current, ()))
701 pruned = [m for m in children.get(current, ()) if not m[1]]
701 pruned = [m for m in children.get(current, ()) if not m[1]]
702 direct.update(pruned)
702 direct.update(pruned)
703 direct -= seenmarkers
703 direct -= seenmarkers
704 pendingnodes = set([m[0] for m in direct])
704 pendingnodes = set([m[0] for m in direct])
705 seenmarkers |= direct
705 seenmarkers |= direct
706 pendingnodes -= seennodes
706 pendingnodes -= seennodes
707 seennodes |= pendingnodes
707 seennodes |= pendingnodes
708 return seenmarkers
708 return seenmarkers
709
709
710 def commonversion(versions):
710 def commonversion(versions):
711 """Return the newest version listed in both versions and our local formats.
711 """Return the newest version listed in both versions and our local formats.
712
712
713 Returns None if no common version exists.
713 Returns None if no common version exists.
714 """
714 """
715 versions.sort(reverse=True)
715 versions.sort(reverse=True)
716 # search for highest version known on both side
716 # search for highest version known on both side
717 for v in versions:
717 for v in versions:
718 if v in formats:
718 if v in formats:
719 return v
719 return v
720 return None
720 return None
721
721
722 # arbitrary picked to fit into 8K limit from HTTP server
722 # arbitrary picked to fit into 8K limit from HTTP server
723 # you have to take in account:
723 # you have to take in account:
724 # - the version header
724 # - the version header
725 # - the base85 encoding
725 # - the base85 encoding
726 _maxpayload = 5300
726 _maxpayload = 5300
727
727
728 def _pushkeyescape(markers):
728 def _pushkeyescape(markers):
729 """encode markers into a dict suitable for pushkey exchange
729 """encode markers into a dict suitable for pushkey exchange
730
730
731 - binary data is base85 encoded
731 - binary data is base85 encoded
732 - split in chunks smaller than 5300 bytes"""
732 - split in chunks smaller than 5300 bytes"""
733 keys = {}
733 keys = {}
734 parts = []
734 parts = []
735 currentlen = _maxpayload * 2 # ensure we create a new part
735 currentlen = _maxpayload * 2 # ensure we create a new part
736 for marker in markers:
736 for marker in markers:
737 nextdata = _fm0encodeonemarker(marker)
737 nextdata = _fm0encodeonemarker(marker)
738 if (len(nextdata) + currentlen > _maxpayload):
738 if (len(nextdata) + currentlen > _maxpayload):
739 currentpart = []
739 currentpart = []
740 currentlen = 0
740 currentlen = 0
741 parts.append(currentpart)
741 parts.append(currentpart)
742 currentpart.append(nextdata)
742 currentpart.append(nextdata)
743 currentlen += len(nextdata)
743 currentlen += len(nextdata)
744 for idx, part in enumerate(reversed(parts)):
744 for idx, part in enumerate(reversed(parts)):
745 data = ''.join([_pack('>B', _fm0version)] + part)
745 data = ''.join([_pack('>B', _fm0version)] + part)
746 keys['dump%i' % idx] = util.b85encode(data)
746 keys['dump%i' % idx] = util.b85encode(data)
747 return keys
747 return keys
748
748
749 def listmarkers(repo):
749 def listmarkers(repo):
750 """List markers over pushkey"""
750 """List markers over pushkey"""
751 if not repo.obsstore:
751 if not repo.obsstore:
752 return {}
752 return {}
753 return _pushkeyescape(sorted(repo.obsstore))
753 return _pushkeyescape(sorted(repo.obsstore))
754
754
755 def pushmarker(repo, key, old, new):
755 def pushmarker(repo, key, old, new):
756 """Push markers over pushkey"""
756 """Push markers over pushkey"""
757 if not key.startswith('dump'):
757 if not key.startswith('dump'):
758 repo.ui.warn(_('unknown key: %r') % key)
758 repo.ui.warn(_('unknown key: %r') % key)
759 return 0
759 return 0
760 if old:
760 if old:
761 repo.ui.warn(_('unexpected old value for %r') % key)
761 repo.ui.warn(_('unexpected old value for %r') % key)
762 return 0
762 return 0
763 data = util.b85decode(new)
763 data = util.b85decode(new)
764 lock = repo.lock()
764 lock = repo.lock()
765 try:
765 try:
766 tr = repo.transaction('pushkey: obsolete markers')
766 tr = repo.transaction('pushkey: obsolete markers')
767 try:
767 try:
768 repo.obsstore.mergemarkers(tr, data)
768 repo.obsstore.mergemarkers(tr, data)
769 tr.close()
769 tr.close()
770 return 1
770 return 1
771 finally:
771 finally:
772 tr.release()
772 tr.release()
773 finally:
773 finally:
774 lock.release()
774 lock.release()
775
775
776 def getmarkers(repo, nodes=None):
776 def getmarkers(repo, nodes=None):
777 """returns markers known in a repository
777 """returns markers known in a repository
778
778
779 If <nodes> is specified, only markers "relevant" to those nodes are are
779 If <nodes> is specified, only markers "relevant" to those nodes are are
780 returned"""
780 returned"""
781 if nodes is None:
781 if nodes is None:
782 rawmarkers = repo.obsstore
782 rawmarkers = repo.obsstore
783 else:
783 else:
784 rawmarkers = repo.obsstore.relevantmarkers(nodes)
784 rawmarkers = repo.obsstore.relevantmarkers(nodes)
785
785
786 for markerdata in rawmarkers:
786 for markerdata in rawmarkers:
787 yield marker(repo, markerdata)
787 yield marker(repo, markerdata)
788
788
789 def relevantmarkers(repo, node):
789 def relevantmarkers(repo, node):
790 """all obsolete markers relevant to some revision"""
790 """all obsolete markers relevant to some revision"""
791 for markerdata in repo.obsstore.relevantmarkers(node):
791 for markerdata in repo.obsstore.relevantmarkers(node):
792 yield marker(repo, markerdata)
792 yield marker(repo, markerdata)
793
793
794
794
795 def precursormarkers(ctx):
795 def precursormarkers(ctx):
796 """obsolete marker marking this changeset as a successors"""
796 """obsolete marker marking this changeset as a successors"""
797 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
797 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
798 yield marker(ctx.repo(), data)
798 yield marker(ctx.repo(), data)
799
799
800 def successormarkers(ctx):
800 def successormarkers(ctx):
801 """obsolete marker making this changeset obsolete"""
801 """obsolete marker making this changeset obsolete"""
802 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
802 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
803 yield marker(ctx.repo(), data)
803 yield marker(ctx.repo(), data)
804
804
805 def allsuccessors(obsstore, nodes, ignoreflags=0):
805 def allsuccessors(obsstore, nodes, ignoreflags=0):
806 """Yield node for every successor of <nodes>.
806 """Yield node for every successor of <nodes>.
807
807
808 Some successors may be unknown locally.
808 Some successors may be unknown locally.
809
809
810 This is a linear yield unsuited to detecting split changesets. It includes
810 This is a linear yield unsuited to detecting split changesets. It includes
811 initial nodes too."""
811 initial nodes too."""
812 remaining = set(nodes)
812 remaining = set(nodes)
813 seen = set(remaining)
813 seen = set(remaining)
814 while remaining:
814 while remaining:
815 current = remaining.pop()
815 current = remaining.pop()
816 yield current
816 yield current
817 for mark in obsstore.successors.get(current, ()):
817 for mark in obsstore.successors.get(current, ()):
818 # ignore marker flagged with specified flag
818 # ignore marker flagged with specified flag
819 if mark[2] & ignoreflags:
819 if mark[2] & ignoreflags:
820 continue
820 continue
821 for suc in mark[1]:
821 for suc in mark[1]:
822 if suc not in seen:
822 if suc not in seen:
823 seen.add(suc)
823 seen.add(suc)
824 remaining.add(suc)
824 remaining.add(suc)
825
825
826 def allprecursors(obsstore, nodes, ignoreflags=0):
826 def allprecursors(obsstore, nodes, ignoreflags=0):
827 """Yield node for every precursors of <nodes>.
827 """Yield node for every precursors of <nodes>.
828
828
829 Some precursors may be unknown locally.
829 Some precursors may be unknown locally.
830
830
831 This is a linear yield unsuited to detecting folded changesets. It includes
831 This is a linear yield unsuited to detecting folded changesets. It includes
832 initial nodes too."""
832 initial nodes too."""
833
833
834 remaining = set(nodes)
834 remaining = set(nodes)
835 seen = set(remaining)
835 seen = set(remaining)
836 while remaining:
836 while remaining:
837 current = remaining.pop()
837 current = remaining.pop()
838 yield current
838 yield current
839 for mark in obsstore.precursors.get(current, ()):
839 for mark in obsstore.precursors.get(current, ()):
840 # ignore marker flagged with specified flag
840 # ignore marker flagged with specified flag
841 if mark[2] & ignoreflags:
841 if mark[2] & ignoreflags:
842 continue
842 continue
843 suc = mark[0]
843 suc = mark[0]
844 if suc not in seen:
844 if suc not in seen:
845 seen.add(suc)
845 seen.add(suc)
846 remaining.add(suc)
846 remaining.add(suc)
847
847
848 def foreground(repo, nodes):
848 def foreground(repo, nodes):
849 """return all nodes in the "foreground" of other node
849 """return all nodes in the "foreground" of other node
850
850
851 The foreground of a revision is anything reachable using parent -> children
851 The foreground of a revision is anything reachable using parent -> children
852 or precursor -> successor relation. It is very similar to "descendant" but
852 or precursor -> successor relation. It is very similar to "descendant" but
853 augmented with obsolescence information.
853 augmented with obsolescence information.
854
854
855 Beware that possible obsolescence cycle may result if complex situation.
855 Beware that possible obsolescence cycle may result if complex situation.
856 """
856 """
857 repo = repo.unfiltered()
857 repo = repo.unfiltered()
858 foreground = set(repo.set('%ln::', nodes))
858 foreground = set(repo.set('%ln::', nodes))
859 if repo.obsstore:
859 if repo.obsstore:
860 # We only need this complicated logic if there is obsolescence
860 # We only need this complicated logic if there is obsolescence
861 # XXX will probably deserve an optimised revset.
861 # XXX will probably deserve an optimised revset.
862 nm = repo.changelog.nodemap
862 nm = repo.changelog.nodemap
863 plen = -1
863 plen = -1
864 # compute the whole set of successors or descendants
864 # compute the whole set of successors or descendants
865 while len(foreground) != plen:
865 while len(foreground) != plen:
866 plen = len(foreground)
866 plen = len(foreground)
867 succs = set(c.node() for c in foreground)
867 succs = set(c.node() for c in foreground)
868 mutable = [c.node() for c in foreground if c.mutable()]
868 mutable = [c.node() for c in foreground if c.mutable()]
869 succs.update(allsuccessors(repo.obsstore, mutable))
869 succs.update(allsuccessors(repo.obsstore, mutable))
870 known = (n for n in succs if n in nm)
870 known = (n for n in succs if n in nm)
871 foreground = set(repo.set('%ln::', known))
871 foreground = set(repo.set('%ln::', known))
872 return set(c.node() for c in foreground)
872 return set(c.node() for c in foreground)
873
873
874
874
875 def successorssets(repo, initialnode, cache=None):
875 def successorssets(repo, initialnode, cache=None):
876 """Return set of all latest successors of initial nodes
876 """Return set of all latest successors of initial nodes
877
877
878 The successors set of a changeset A are the group of revisions that succeed
878 The successors set of a changeset A are the group of revisions that succeed
879 A. It succeeds A as a consistent whole, each revision being only a partial
879 A. It succeeds A as a consistent whole, each revision being only a partial
880 replacement. The successors set contains non-obsolete changesets only.
880 replacement. The successors set contains non-obsolete changesets only.
881
881
882 This function returns the full list of successor sets which is why it
882 This function returns the full list of successor sets which is why it
883 returns a list of tuples and not just a single tuple. Each tuple is a valid
883 returns a list of tuples and not just a single tuple. Each tuple is a valid
884 successors set. Note that (A,) may be a valid successors set for changeset A
884 successors set. Note that (A,) may be a valid successors set for changeset A
885 (see below).
885 (see below).
886
886
887 In most cases, a changeset A will have a single element (e.g. the changeset
887 In most cases, a changeset A will have a single element (e.g. the changeset
888 A is replaced by A') in its successors set. Though, it is also common for a
888 A is replaced by A') in its successors set. Though, it is also common for a
889 changeset A to have no elements in its successor set (e.g. the changeset
889 changeset A to have no elements in its successor set (e.g. the changeset
890 has been pruned). Therefore, the returned list of successors sets will be
890 has been pruned). Therefore, the returned list of successors sets will be
891 [(A',)] or [], respectively.
891 [(A',)] or [], respectively.
892
892
893 When a changeset A is split into A' and B', however, it will result in a
893 When a changeset A is split into A' and B', however, it will result in a
894 successors set containing more than a single element, i.e. [(A',B')].
894 successors set containing more than a single element, i.e. [(A',B')].
895 Divergent changesets will result in multiple successors sets, i.e. [(A',),
895 Divergent changesets will result in multiple successors sets, i.e. [(A',),
896 (A'')].
896 (A'')].
897
897
898 If a changeset A is not obsolete, then it will conceptually have no
898 If a changeset A is not obsolete, then it will conceptually have no
899 successors set. To distinguish this from a pruned changeset, the successor
899 successors set. To distinguish this from a pruned changeset, the successor
900 set will contain itself only, i.e. [(A,)].
900 set will contain itself only, i.e. [(A,)].
901
901
902 Finally, successors unknown locally are considered to be pruned (obsoleted
902 Finally, successors unknown locally are considered to be pruned (obsoleted
903 without any successors).
903 without any successors).
904
904
905 The optional `cache` parameter is a dictionary that may contain precomputed
905 The optional `cache` parameter is a dictionary that may contain precomputed
906 successors sets. It is meant to reuse the computation of a previous call to
906 successors sets. It is meant to reuse the computation of a previous call to
907 `successorssets` when multiple calls are made at the same time. The cache
907 `successorssets` when multiple calls are made at the same time. The cache
908 dictionary is updated in place. The caller is responsible for its life
908 dictionary is updated in place. The caller is responsible for its life
909 span. Code that makes multiple calls to `successorssets` *must* use this
909 span. Code that makes multiple calls to `successorssets` *must* use this
910 cache mechanism or suffer terrible performance.
910 cache mechanism or suffer terrible performance.
911 """
911 """
912
912
913 succmarkers = repo.obsstore.successors
913 succmarkers = repo.obsstore.successors
914
914
915 # Stack of nodes we search successors sets for
915 # Stack of nodes we search successors sets for
916 toproceed = [initialnode]
916 toproceed = [initialnode]
917 # set version of above list for fast loop detection
917 # set version of above list for fast loop detection
918 # element added to "toproceed" must be added here
918 # element added to "toproceed" must be added here
919 stackedset = set(toproceed)
919 stackedset = set(toproceed)
920 if cache is None:
920 if cache is None:
921 cache = {}
921 cache = {}
922
922
923 # This while loop is the flattened version of a recursive search for
923 # This while loop is the flattened version of a recursive search for
924 # successors sets
924 # successors sets
925 #
925 #
926 # def successorssets(x):
926 # def successorssets(x):
927 # successors = directsuccessors(x)
927 # successors = directsuccessors(x)
928 # ss = [[]]
928 # ss = [[]]
929 # for succ in directsuccessors(x):
929 # for succ in directsuccessors(x):
930 # # product as in itertools cartesian product
930 # # product as in itertools cartesian product
931 # ss = product(ss, successorssets(succ))
931 # ss = product(ss, successorssets(succ))
932 # return ss
932 # return ss
933 #
933 #
934 # But we can not use plain recursive calls here:
934 # But we can not use plain recursive calls here:
935 # - that would blow the python call stack
935 # - that would blow the python call stack
936 # - obsolescence markers may have cycles, we need to handle them.
936 # - obsolescence markers may have cycles, we need to handle them.
937 #
937 #
938 # The `toproceed` list act as our call stack. Every node we search
938 # The `toproceed` list act as our call stack. Every node we search
939 # successors set for are stacked there.
939 # successors set for are stacked there.
940 #
940 #
941 # The `stackedset` is set version of this stack used to check if a node is
941 # The `stackedset` is set version of this stack used to check if a node is
942 # already stacked. This check is used to detect cycles and prevent infinite
942 # already stacked. This check is used to detect cycles and prevent infinite
943 # loop.
943 # loop.
944 #
944 #
945 # successors set of all nodes are stored in the `cache` dictionary.
945 # successors set of all nodes are stored in the `cache` dictionary.
946 #
946 #
947 # After this while loop ends we use the cache to return the successors sets
947 # After this while loop ends we use the cache to return the successors sets
948 # for the node requested by the caller.
948 # for the node requested by the caller.
949 while toproceed:
949 while toproceed:
950 # Every iteration tries to compute the successors sets of the topmost
950 # Every iteration tries to compute the successors sets of the topmost
951 # node of the stack: CURRENT.
951 # node of the stack: CURRENT.
952 #
952 #
953 # There are four possible outcomes:
953 # There are four possible outcomes:
954 #
954 #
955 # 1) We already know the successors sets of CURRENT:
955 # 1) We already know the successors sets of CURRENT:
956 # -> mission accomplished, pop it from the stack.
956 # -> mission accomplished, pop it from the stack.
957 # 2) Node is not obsolete:
957 # 2) Node is not obsolete:
958 # -> the node is its own successors sets. Add it to the cache.
958 # -> the node is its own successors sets. Add it to the cache.
959 # 3) We do not know successors set of direct successors of CURRENT:
959 # 3) We do not know successors set of direct successors of CURRENT:
960 # -> We add those successors to the stack.
960 # -> We add those successors to the stack.
961 # 4) We know successors sets of all direct successors of CURRENT:
961 # 4) We know successors sets of all direct successors of CURRENT:
962 # -> We can compute CURRENT successors set and add it to the
962 # -> We can compute CURRENT successors set and add it to the
963 # cache.
963 # cache.
964 #
964 #
965 current = toproceed[-1]
965 current = toproceed[-1]
966 if current in cache:
966 if current in cache:
967 # case (1): We already know the successors sets
967 # case (1): We already know the successors sets
968 stackedset.remove(toproceed.pop())
968 stackedset.remove(toproceed.pop())
969 elif current not in succmarkers:
969 elif current not in succmarkers:
970 # case (2): The node is not obsolete.
970 # case (2): The node is not obsolete.
971 if current in repo:
971 if current in repo:
972 # We have a valid last successors.
972 # We have a valid last successors.
973 cache[current] = [(current,)]
973 cache[current] = [(current,)]
974 else:
974 else:
975 # Final obsolete version is unknown locally.
975 # Final obsolete version is unknown locally.
976 # Do not count that as a valid successors
976 # Do not count that as a valid successors
977 cache[current] = []
977 cache[current] = []
978 else:
978 else:
979 # cases (3) and (4)
979 # cases (3) and (4)
980 #
980 #
981 # We proceed in two phases. Phase 1 aims to distinguish case (3)
981 # We proceed in two phases. Phase 1 aims to distinguish case (3)
982 # from case (4):
982 # from case (4):
983 #
983 #
984 # For each direct successors of CURRENT, we check whether its
984 # For each direct successors of CURRENT, we check whether its
985 # successors sets are known. If they are not, we stack the
985 # successors sets are known. If they are not, we stack the
986 # unknown node and proceed to the next iteration of the while
986 # unknown node and proceed to the next iteration of the while
987 # loop. (case 3)
987 # loop. (case 3)
988 #
988 #
989 # During this step, we may detect obsolescence cycles: a node
989 # During this step, we may detect obsolescence cycles: a node
990 # with unknown successors sets but already in the call stack.
990 # with unknown successors sets but already in the call stack.
991 # In such a situation, we arbitrary set the successors sets of
991 # In such a situation, we arbitrary set the successors sets of
992 # the node to nothing (node pruned) to break the cycle.
992 # the node to nothing (node pruned) to break the cycle.
993 #
993 #
994 # If no break was encountered we proceed to phase 2.
994 # If no break was encountered we proceed to phase 2.
995 #
995 #
996 # Phase 2 computes successors sets of CURRENT (case 4); see details
996 # Phase 2 computes successors sets of CURRENT (case 4); see details
997 # in phase 2 itself.
997 # in phase 2 itself.
998 #
998 #
999 # Note the two levels of iteration in each phase.
999 # Note the two levels of iteration in each phase.
1000 # - The first one handles obsolescence markers using CURRENT as
1000 # - The first one handles obsolescence markers using CURRENT as
1001 # precursor (successors markers of CURRENT).
1001 # precursor (successors markers of CURRENT).
1002 #
1002 #
1003 # Having multiple entry here means divergence.
1003 # Having multiple entry here means divergence.
1004 #
1004 #
1005 # - The second one handles successors defined in each marker.
1005 # - The second one handles successors defined in each marker.
1006 #
1006 #
1007 # Having none means pruned node, multiple successors means split,
1007 # Having none means pruned node, multiple successors means split,
1008 # single successors are standard replacement.
1008 # single successors are standard replacement.
1009 #
1009 #
1010 for mark in sorted(succmarkers[current]):
1010 for mark in sorted(succmarkers[current]):
1011 for suc in mark[1]:
1011 for suc in mark[1]:
1012 if suc not in cache:
1012 if suc not in cache:
1013 if suc in stackedset:
1013 if suc in stackedset:
1014 # cycle breaking
1014 # cycle breaking
1015 cache[suc] = []
1015 cache[suc] = []
1016 else:
1016 else:
1017 # case (3) If we have not computed successors sets
1017 # case (3) If we have not computed successors sets
1018 # of one of those successors we add it to the
1018 # of one of those successors we add it to the
1019 # `toproceed` stack and stop all work for this
1019 # `toproceed` stack and stop all work for this
1020 # iteration.
1020 # iteration.
1021 toproceed.append(suc)
1021 toproceed.append(suc)
1022 stackedset.add(suc)
1022 stackedset.add(suc)
1023 break
1023 break
1024 else:
1024 else:
1025 continue
1025 continue
1026 break
1026 break
1027 else:
1027 else:
1028 # case (4): we know all successors sets of all direct
1028 # case (4): we know all successors sets of all direct
1029 # successors
1029 # successors
1030 #
1030 #
1031 # Successors set contributed by each marker depends on the
1031 # Successors set contributed by each marker depends on the
1032 # successors sets of all its "successors" node.
1032 # successors sets of all its "successors" node.
1033 #
1033 #
1034 # Each different marker is a divergence in the obsolescence
1034 # Each different marker is a divergence in the obsolescence
1035 # history. It contributes successors sets distinct from other
1035 # history. It contributes successors sets distinct from other
1036 # markers.
1036 # markers.
1037 #
1037 #
1038 # Within a marker, a successor may have divergent successors
1038 # Within a marker, a successor may have divergent successors
1039 # sets. In such a case, the marker will contribute multiple
1039 # sets. In such a case, the marker will contribute multiple
1040 # divergent successors sets. If multiple successors have
1040 # divergent successors sets. If multiple successors have
1041 # divergent successors sets, a Cartesian product is used.
1041 # divergent successors sets, a Cartesian product is used.
1042 #
1042 #
1043 # At the end we post-process successors sets to remove
1043 # At the end we post-process successors sets to remove
1044 # duplicated entry and successors set that are strict subset of
1044 # duplicated entry and successors set that are strict subset of
1045 # another one.
1045 # another one.
1046 succssets = []
1046 succssets = []
1047 for mark in sorted(succmarkers[current]):
1047 for mark in sorted(succmarkers[current]):
1048 # successors sets contributed by this marker
1048 # successors sets contributed by this marker
1049 markss = [[]]
1049 markss = [[]]
1050 for suc in mark[1]:
1050 for suc in mark[1]:
1051 # cardinal product with previous successors
1051 # cardinal product with previous successors
1052 productresult = []
1052 productresult = []
1053 for prefix in markss:
1053 for prefix in markss:
1054 for suffix in cache[suc]:
1054 for suffix in cache[suc]:
1055 newss = list(prefix)
1055 newss = list(prefix)
1056 for part in suffix:
1056 for part in suffix:
1057 # do not duplicated entry in successors set
1057 # do not duplicated entry in successors set
1058 # first entry wins.
1058 # first entry wins.
1059 if part not in newss:
1059 if part not in newss:
1060 newss.append(part)
1060 newss.append(part)
1061 productresult.append(newss)
1061 productresult.append(newss)
1062 markss = productresult
1062 markss = productresult
1063 succssets.extend(markss)
1063 succssets.extend(markss)
1064 # remove duplicated and subset
1064 # remove duplicated and subset
1065 seen = []
1065 seen = []
1066 final = []
1066 final = []
1067 candidate = sorted(((set(s), s) for s in succssets if s),
1067 candidate = sorted(((set(s), s) for s in succssets if s),
1068 key=lambda x: len(x[1]), reverse=True)
1068 key=lambda x: len(x[1]), reverse=True)
1069 for setversion, listversion in candidate:
1069 for setversion, listversion in candidate:
1070 for seenset in seen:
1070 for seenset in seen:
1071 if setversion.issubset(seenset):
1071 if setversion.issubset(seenset):
1072 break
1072 break
1073 else:
1073 else:
1074 final.append(listversion)
1074 final.append(listversion)
1075 seen.append(setversion)
1075 seen.append(setversion)
1076 final.reverse() # put small successors set first
1076 final.reverse() # put small successors set first
1077 cache[current] = final
1077 cache[current] = final
1078 return cache[initialnode]
1078 return cache[initialnode]
1079
1079
1080 # mapping of 'set-name' -> <function to compute this set>
1080 # mapping of 'set-name' -> <function to compute this set>
1081 cachefuncs = {}
1081 cachefuncs = {}
1082 def cachefor(name):
1082 def cachefor(name):
1083 """Decorator to register a function as computing the cache for a set"""
1083 """Decorator to register a function as computing the cache for a set"""
1084 def decorator(func):
1084 def decorator(func):
1085 assert name not in cachefuncs
1085 assert name not in cachefuncs
1086 cachefuncs[name] = func
1086 cachefuncs[name] = func
1087 return func
1087 return func
1088 return decorator
1088 return decorator
1089
1089
1090 def getrevs(repo, name):
1090 def getrevs(repo, name):
1091 """Return the set of revision that belong to the <name> set
1091 """Return the set of revision that belong to the <name> set
1092
1092
1093 Such access may compute the set and cache it for future use"""
1093 Such access may compute the set and cache it for future use"""
1094 repo = repo.unfiltered()
1094 repo = repo.unfiltered()
1095 if not repo.obsstore:
1095 if not repo.obsstore:
1096 return frozenset()
1096 return frozenset()
1097 if name not in repo.obsstore.caches:
1097 if name not in repo.obsstore.caches:
1098 repo.obsstore.caches[name] = cachefuncs[name](repo)
1098 repo.obsstore.caches[name] = cachefuncs[name](repo)
1099 return repo.obsstore.caches[name]
1099 return repo.obsstore.caches[name]
1100
1100
1101 # To be simple we need to invalidate obsolescence cache when:
1101 # To be simple we need to invalidate obsolescence cache when:
1102 #
1102 #
1103 # - new changeset is added:
1103 # - new changeset is added:
1104 # - public phase is changed
1104 # - public phase is changed
1105 # - obsolescence marker are added
1105 # - obsolescence marker are added
1106 # - strip is used a repo
1106 # - strip is used a repo
1107 def clearobscaches(repo):
1107 def clearobscaches(repo):
1108 """Remove all obsolescence related cache from a repo
1108 """Remove all obsolescence related cache from a repo
1109
1109
1110 This remove all cache in obsstore is the obsstore already exist on the
1110 This remove all cache in obsstore is the obsstore already exist on the
1111 repo.
1111 repo.
1112
1112
1113 (We could be smarter here given the exact event that trigger the cache
1113 (We could be smarter here given the exact event that trigger the cache
1114 clearing)"""
1114 clearing)"""
1115 # only clear cache is there is obsstore data in this repo
1115 # only clear cache is there is obsstore data in this repo
1116 if 'obsstore' in repo._filecache:
1116 if 'obsstore' in repo._filecache:
1117 repo.obsstore.caches.clear()
1117 repo.obsstore.caches.clear()
1118
1118
1119 @cachefor('obsolete')
1119 @cachefor('obsolete')
1120 def _computeobsoleteset(repo):
1120 def _computeobsoleteset(repo):
1121 """the set of obsolete revisions"""
1121 """the set of obsolete revisions"""
1122 obs = set()
1122 obs = set()
1123 getnode = repo.changelog.node
1123 getnode = repo.changelog.node
1124 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1124 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1125 for r in notpublic:
1125 for r in notpublic:
1126 if getnode(r) in repo.obsstore.successors:
1126 if getnode(r) in repo.obsstore.successors:
1127 obs.add(r)
1127 obs.add(r)
1128 return obs
1128 return obs
1129
1129
1130 @cachefor('unstable')
1130 @cachefor('unstable')
1131 def _computeunstableset(repo):
1131 def _computeunstableset(repo):
1132 """the set of non obsolete revisions with obsolete parents"""
1132 """the set of non obsolete revisions with obsolete parents"""
1133 revs = [(ctx.rev(), ctx) for ctx in
1133 revs = [(ctx.rev(), ctx) for ctx in
1134 repo.set('(not public()) and (not obsolete())')]
1134 repo.set('(not public()) and (not obsolete())')]
1135 revs.sort(key=lambda x:x[0])
1135 revs.sort(key=lambda x:x[0])
1136 unstable = set()
1136 unstable = set()
1137 for rev, ctx in revs:
1137 for rev, ctx in revs:
1138 # A rev is unstable if one of its parent is obsolete or unstable
1138 # A rev is unstable if one of its parent is obsolete or unstable
1139 # this works since we traverse following growing rev order
1139 # this works since we traverse following growing rev order
1140 if any((x.obsolete() or (x.rev() in unstable))
1140 if any((x.obsolete() or (x.rev() in unstable))
1141 for x in ctx.parents()):
1141 for x in ctx.parents()):
1142 unstable.add(rev)
1142 unstable.add(rev)
1143 return unstable
1143 return unstable
1144
1144
1145 @cachefor('suspended')
1145 @cachefor('suspended')
1146 def _computesuspendedset(repo):
1146 def _computesuspendedset(repo):
1147 """the set of obsolete parents with non obsolete descendants"""
1147 """the set of obsolete parents with non obsolete descendants"""
1148 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1148 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1149 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1149 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1150
1150
1151 @cachefor('extinct')
1151 @cachefor('extinct')
1152 def _computeextinctset(repo):
1152 def _computeextinctset(repo):
1153 """the set of obsolete parents without non obsolete descendants"""
1153 """the set of obsolete parents without non obsolete descendants"""
1154 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1154 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1155
1155
1156
1156
1157 @cachefor('bumped')
1157 @cachefor('bumped')
1158 def _computebumpedset(repo):
1158 def _computebumpedset(repo):
1159 """the set of revs trying to obsolete public revisions"""
1159 """the set of revs trying to obsolete public revisions"""
1160 bumped = set()
1160 bumped = set()
1161 # util function (avoid attribute lookup in the loop)
1161 # util function (avoid attribute lookup in the loop)
1162 phase = repo._phasecache.phase # would be faster to grab the full list
1162 phase = repo._phasecache.phase # would be faster to grab the full list
1163 public = phases.public
1163 public = phases.public
1164 cl = repo.changelog
1164 cl = repo.changelog
1165 torev = cl.nodemap.get
1165 torev = cl.nodemap.get
1166 for ctx in repo.set('(not public()) and (not obsolete())'):
1166 for ctx in repo.set('(not public()) and (not obsolete())'):
1167 rev = ctx.rev()
1167 rev = ctx.rev()
1168 # We only evaluate mutable, non-obsolete revision
1168 # We only evaluate mutable, non-obsolete revision
1169 node = ctx.node()
1169 node = ctx.node()
1170 # (future) A cache of precursors may worth if split is very common
1170 # (future) A cache of precursors may worth if split is very common
1171 for pnode in allprecursors(repo.obsstore, [node],
1171 for pnode in allprecursors(repo.obsstore, [node],
1172 ignoreflags=bumpedfix):
1172 ignoreflags=bumpedfix):
1173 prev = torev(pnode) # unfiltered! but so is phasecache
1173 prev = torev(pnode) # unfiltered! but so is phasecache
1174 if (prev is not None) and (phase(repo, prev) <= public):
1174 if (prev is not None) and (phase(repo, prev) <= public):
1175 # we have a public precursor
1175 # we have a public precursor
1176 bumped.add(rev)
1176 bumped.add(rev)
1177 break # Next draft!
1177 break # Next draft!
1178 return bumped
1178 return bumped
1179
1179
1180 @cachefor('divergent')
1180 @cachefor('divergent')
1181 def _computedivergentset(repo):
1181 def _computedivergentset(repo):
1182 """the set of rev that compete to be the final successors of some revision.
1182 """the set of rev that compete to be the final successors of some revision.
1183 """
1183 """
1184 divergent = set()
1184 divergent = set()
1185 obsstore = repo.obsstore
1185 obsstore = repo.obsstore
1186 newermap = {}
1186 newermap = {}
1187 for ctx in repo.set('(not public()) - obsolete()'):
1187 for ctx in repo.set('(not public()) - obsolete()'):
1188 mark = obsstore.precursors.get(ctx.node(), ())
1188 mark = obsstore.precursors.get(ctx.node(), ())
1189 toprocess = set(mark)
1189 toprocess = set(mark)
1190 seen = set()
1190 seen = set()
1191 while toprocess:
1191 while toprocess:
1192 prec = toprocess.pop()[0]
1192 prec = toprocess.pop()[0]
1193 if prec in seen:
1193 if prec in seen:
1194 continue # emergency cycle hanging prevention
1194 continue # emergency cycle hanging prevention
1195 seen.add(prec)
1195 seen.add(prec)
1196 if prec not in newermap:
1196 if prec not in newermap:
1197 successorssets(repo, prec, newermap)
1197 successorssets(repo, prec, newermap)
1198 newer = [n for n in newermap[prec] if n]
1198 newer = [n for n in newermap[prec] if n]
1199 if len(newer) > 1:
1199 if len(newer) > 1:
1200 divergent.add(ctx.rev())
1200 divergent.add(ctx.rev())
1201 break
1201 break
1202 toprocess.update(obsstore.precursors.get(prec, ()))
1202 toprocess.update(obsstore.precursors.get(prec, ()))
1203 return divergent
1203 return divergent
1204
1204
1205
1205
1206 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1206 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1207 """Add obsolete markers between changesets in a repo
1207 """Add obsolete markers between changesets in a repo
1208
1208
1209 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1209 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1210 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1210 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1211 containing metadata for this marker only. It is merged with the global
1211 containing metadata for this marker only. It is merged with the global
1212 metadata specified through the `metadata` argument of this function,
1212 metadata specified through the `metadata` argument of this function,
1213
1213
1214 Trying to obsolete a public changeset will raise an exception.
1214 Trying to obsolete a public changeset will raise an exception.
1215
1215
1216 Current user and date are used except if specified otherwise in the
1216 Current user and date are used except if specified otherwise in the
1217 metadata attribute.
1217 metadata attribute.
1218
1218
1219 This function operates within a transaction of its own, but does
1219 This function operates within a transaction of its own, but does
1220 not take any lock on the repo.
1220 not take any lock on the repo.
1221 """
1221 """
1222 # prepare metadata
1222 # prepare metadata
1223 if metadata is None:
1223 if metadata is None:
1224 metadata = {}
1224 metadata = {}
1225 if 'user' not in metadata:
1225 if 'user' not in metadata:
1226 metadata['user'] = repo.ui.username()
1226 metadata['user'] = repo.ui.username()
1227 tr = repo.transaction('add-obsolescence-marker')
1227 tr = repo.transaction('add-obsolescence-marker')
1228 try:
1228 try:
1229 markerargs = []
1229 markerargs = []
1230 for rel in relations:
1230 for rel in relations:
1231 prec = rel[0]
1231 prec = rel[0]
1232 sucs = rel[1]
1232 sucs = rel[1]
1233 localmetadata = metadata.copy()
1233 localmetadata = metadata.copy()
1234 if 2 < len(rel):
1234 if 2 < len(rel):
1235 localmetadata.update(rel[2])
1235 localmetadata.update(rel[2])
1236
1236
1237 if not prec.mutable():
1237 if not prec.mutable():
1238 raise error.Abort(_("cannot obsolete public changeset: %s")
1238 raise error.Abort(_("cannot obsolete public changeset: %s")
1239 % prec,
1239 % prec,
1240 hint="see 'hg help phases' for details")
1240 hint="see 'hg help phases' for details")
1241 nprec = prec.node()
1241 nprec = prec.node()
1242 nsucs = tuple(s.node() for s in sucs)
1242 nsucs = tuple(s.node() for s in sucs)
1243 npare = None
1243 npare = None
1244 if not nsucs:
1244 if not nsucs:
1245 npare = tuple(p.node() for p in prec.parents())
1245 npare = tuple(p.node() for p in prec.parents())
1246 if nprec in nsucs:
1246 if nprec in nsucs:
1247 raise error.Abort(_("changeset %s cannot obsolete itself")
1247 raise error.Abort(_("changeset %s cannot obsolete itself")
1248 % prec)
1248 % prec)
1249
1249
1250 # Creating the marker causes the hidden cache to become invalid,
1250 # Creating the marker causes the hidden cache to become invalid,
1251 # which causes recomputation when we ask for prec.parents() above.
1251 # which causes recomputation when we ask for prec.parents() above.
1252 # Resulting in n^2 behavior. So let's prepare all of the args
1252 # Resulting in n^2 behavior. So let's prepare all of the args
1253 # first, then create the markers.
1253 # first, then create the markers.
1254 markerargs.append((nprec, nsucs, npare, localmetadata))
1254 markerargs.append((nprec, nsucs, npare, localmetadata))
1255
1255
1256 for args in markerargs:
1256 for args in markerargs:
1257 nprec, nsucs, npare, localmetadata = args
1257 nprec, nsucs, npare, localmetadata = args
1258 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1258 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1259 date=date, metadata=localmetadata)
1259 date=date, metadata=localmetadata)
1260 repo.filteredrevcache.clear()
1260 repo.filteredrevcache.clear()
1261 tr.close()
1261 tr.close()
1262 finally:
1262 finally:
1263 tr.release()
1263 tr.release()
1264
1264
1265 def isenabled(repo, option):
1265 def isenabled(repo, option):
1266 """Returns True if the given repository has the given obsolete option
1266 """Returns True if the given repository has the given obsolete option
1267 enabled.
1267 enabled.
1268 """
1268 """
1269 result = set(repo.ui.configlist('experimental', 'evolution'))
1269 result = set(repo.ui.configlist('experimental', 'evolution'))
1270 if 'all' in result:
1270 if 'all' in result:
1271 return True
1271 return True
1272
1272
1273 # For migration purposes, temporarily return true if the config hasn't been
1273 # For migration purposes, temporarily return true if the config hasn't been
1274 # set but _enabled is true.
1274 # set but _enabled is true.
1275 if len(result) == 0 and _enabled:
1275 if len(result) == 0 and _enabled:
1276 return True
1276 return True
1277
1277
1278 # createmarkers must be enabled if other options are enabled
1278 # createmarkers must be enabled if other options are enabled
1279 if ((allowunstableopt in result or exchangeopt in result) and
1279 if ((allowunstableopt in result or exchangeopt in result) and
1280 not createmarkersopt in result):
1280 not createmarkersopt in result):
1281 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
1281 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
1282 "if other obsolete options are enabled"))
1282 "if other obsolete options are enabled"))
1283
1283
1284 return option in result
1284 return option in result
General Comments 0
You need to be logged in to leave comments. Login now