##// END OF EJS Templates
obsstore: fast path to check if obsstore is empty
Yuya Nishihara -
r26310:61efe9ef default
parent child Browse files
Show More
@@ -1,1247 +1,1256 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 import struct
70 import errno, struct
71 import util, base85, node, parsers
71 import util, base85, node, parsers
72 import phases
72 import phases
73 from i18n import _
73 from i18n import _
74
74
75 _pack = struct.pack
75 _pack = struct.pack
76 _unpack = struct.unpack
76 _unpack = struct.unpack
77 _calcsize = struct.calcsize
77 _calcsize = struct.calcsize
78 propertycache = util.propertycache
78 propertycache = util.propertycache
79
79
80 # the obsolete feature is not mature enough to be enabled by default.
80 # the obsolete feature is not mature enough to be enabled by default.
81 # you have to rely on third party extension extension to enable this.
81 # you have to rely on third party extension extension to enable this.
82 _enabled = False
82 _enabled = False
83
83
84 # Options for obsolescence
84 # Options for obsolescence
85 createmarkersopt = 'createmarkers'
85 createmarkersopt = 'createmarkers'
86 allowunstableopt = 'allowunstable'
86 allowunstableopt = 'allowunstable'
87 exchangeopt = 'exchange'
87 exchangeopt = 'exchange'
88
88
89 ### obsolescence marker flag
89 ### obsolescence marker flag
90
90
91 ## bumpedfix flag
91 ## bumpedfix flag
92 #
92 #
93 # When a changeset A' succeed to a changeset A which became public, we call A'
93 # When a changeset A' succeed to a changeset A which became public, we call A'
94 # "bumped" because it's a successors of a public changesets
94 # "bumped" because it's a successors of a public changesets
95 #
95 #
96 # o A' (bumped)
96 # o A' (bumped)
97 # |`:
97 # |`:
98 # | o A
98 # | o A
99 # |/
99 # |/
100 # o Z
100 # o Z
101 #
101 #
102 # The way to solve this situation is to create a new changeset Ad as children
102 # The way to solve this situation is to create a new changeset Ad as children
103 # of A. This changeset have the same content than A'. So the diff from A to A'
103 # of A. This changeset have the same content than A'. So the diff from A to A'
104 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
104 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 #
105 #
106 # o Ad
106 # o Ad
107 # |`:
107 # |`:
108 # | x A'
108 # | x A'
109 # |'|
109 # |'|
110 # o | A
110 # o | A
111 # |/
111 # |/
112 # o Z
112 # o Z
113 #
113 #
114 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
114 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
115 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 # This flag mean that the successors express the changes between the public and
116 # This flag mean that the successors express the changes between the public and
117 # bumped version and fix the situation, breaking the transitivity of
117 # bumped version and fix the situation, breaking the transitivity of
118 # "bumped" here.
118 # "bumped" here.
119 bumpedfix = 1
119 bumpedfix = 1
120 usingsha256 = 2
120 usingsha256 = 2
121
121
122 ## Parsing and writing of version "0"
122 ## Parsing and writing of version "0"
123 #
123 #
124 # The header is followed by the markers. Each marker is made of:
124 # The header is followed by the markers. Each marker is made of:
125 #
125 #
126 # - 1 uint8 : number of new changesets "N", can be zero.
126 # - 1 uint8 : number of new changesets "N", can be zero.
127 #
127 #
128 # - 1 uint32: metadata size "M" in bytes.
128 # - 1 uint32: metadata size "M" in bytes.
129 #
129 #
130 # - 1 byte: a bit field. It is reserved for flags used in common
130 # - 1 byte: a bit field. It is reserved for flags used in common
131 # obsolete marker operations, to avoid repeated decoding of metadata
131 # obsolete marker operations, to avoid repeated decoding of metadata
132 # entries.
132 # entries.
133 #
133 #
134 # - 20 bytes: obsoleted changeset identifier.
134 # - 20 bytes: obsoleted changeset identifier.
135 #
135 #
136 # - N*20 bytes: new changesets identifiers.
136 # - N*20 bytes: new changesets identifiers.
137 #
137 #
138 # - M bytes: metadata as a sequence of nul-terminated strings. Each
138 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 # string contains a key and a value, separated by a colon ':', without
139 # string contains a key and a value, separated by a colon ':', without
140 # additional encoding. Keys cannot contain '\0' or ':' and values
140 # additional encoding. Keys cannot contain '\0' or ':' and values
141 # cannot contain '\0'.
141 # cannot contain '\0'.
142 _fm0version = 0
142 _fm0version = 0
143 _fm0fixed = '>BIB20s'
143 _fm0fixed = '>BIB20s'
144 _fm0node = '20s'
144 _fm0node = '20s'
145 _fm0fsize = _calcsize(_fm0fixed)
145 _fm0fsize = _calcsize(_fm0fixed)
146 _fm0fnodesize = _calcsize(_fm0node)
146 _fm0fnodesize = _calcsize(_fm0node)
147
147
148 def _fm0readmarkers(data, off):
148 def _fm0readmarkers(data, off):
149 # Loop on markers
149 # Loop on markers
150 l = len(data)
150 l = len(data)
151 while off + _fm0fsize <= l:
151 while off + _fm0fsize <= l:
152 # read fixed part
152 # read fixed part
153 cur = data[off:off + _fm0fsize]
153 cur = data[off:off + _fm0fsize]
154 off += _fm0fsize
154 off += _fm0fsize
155 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
155 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 # read replacement
156 # read replacement
157 sucs = ()
157 sucs = ()
158 if numsuc:
158 if numsuc:
159 s = (_fm0fnodesize * numsuc)
159 s = (_fm0fnodesize * numsuc)
160 cur = data[off:off + s]
160 cur = data[off:off + s]
161 sucs = _unpack(_fm0node * numsuc, cur)
161 sucs = _unpack(_fm0node * numsuc, cur)
162 off += s
162 off += s
163 # read metadata
163 # read metadata
164 # (metadata will be decoded on demand)
164 # (metadata will be decoded on demand)
165 metadata = data[off:off + mdsize]
165 metadata = data[off:off + mdsize]
166 if len(metadata) != mdsize:
166 if len(metadata) != mdsize:
167 raise util.Abort(_('parsing obsolete marker: metadata is too '
167 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 'short, %d bytes expected, got %d')
168 'short, %d bytes expected, got %d')
169 % (mdsize, len(metadata)))
169 % (mdsize, len(metadata)))
170 off += mdsize
170 off += mdsize
171 metadata = _fm0decodemeta(metadata)
171 metadata = _fm0decodemeta(metadata)
172 try:
172 try:
173 when, offset = metadata.pop('date', '0 0').split(' ')
173 when, offset = metadata.pop('date', '0 0').split(' ')
174 date = float(when), int(offset)
174 date = float(when), int(offset)
175 except ValueError:
175 except ValueError:
176 date = (0., 0)
176 date = (0., 0)
177 parents = None
177 parents = None
178 if 'p2' in metadata:
178 if 'p2' in metadata:
179 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
179 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 elif 'p1' in metadata:
180 elif 'p1' in metadata:
181 parents = (metadata.pop('p1', None),)
181 parents = (metadata.pop('p1', None),)
182 elif 'p0' in metadata:
182 elif 'p0' in metadata:
183 parents = ()
183 parents = ()
184 if parents is not None:
184 if parents is not None:
185 try:
185 try:
186 parents = tuple(node.bin(p) for p in parents)
186 parents = tuple(node.bin(p) for p in parents)
187 # if parent content is not a nodeid, drop the data
187 # if parent content is not a nodeid, drop the data
188 for p in parents:
188 for p in parents:
189 if len(p) != 20:
189 if len(p) != 20:
190 parents = None
190 parents = None
191 break
191 break
192 except TypeError:
192 except TypeError:
193 # if content cannot be translated to nodeid drop the data.
193 # if content cannot be translated to nodeid drop the data.
194 parents = None
194 parents = None
195
195
196 metadata = tuple(sorted(metadata.iteritems()))
196 metadata = tuple(sorted(metadata.iteritems()))
197
197
198 yield (pre, sucs, flags, metadata, date, parents)
198 yield (pre, sucs, flags, metadata, date, parents)
199
199
200 def _fm0encodeonemarker(marker):
200 def _fm0encodeonemarker(marker):
201 pre, sucs, flags, metadata, date, parents = marker
201 pre, sucs, flags, metadata, date, parents = marker
202 if flags & usingsha256:
202 if flags & usingsha256:
203 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
203 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
204 metadata = dict(metadata)
204 metadata = dict(metadata)
205 time, tz = date
205 time, tz = date
206 metadata['date'] = '%r %i' % (time, tz)
206 metadata['date'] = '%r %i' % (time, tz)
207 if parents is not None:
207 if parents is not None:
208 if not parents:
208 if not parents:
209 # mark that we explicitly recorded no parents
209 # mark that we explicitly recorded no parents
210 metadata['p0'] = ''
210 metadata['p0'] = ''
211 for i, p in enumerate(parents):
211 for i, p in enumerate(parents):
212 metadata['p%i' % (i + 1)] = node.hex(p)
212 metadata['p%i' % (i + 1)] = node.hex(p)
213 metadata = _fm0encodemeta(metadata)
213 metadata = _fm0encodemeta(metadata)
214 numsuc = len(sucs)
214 numsuc = len(sucs)
215 format = _fm0fixed + (_fm0node * numsuc)
215 format = _fm0fixed + (_fm0node * numsuc)
216 data = [numsuc, len(metadata), flags, pre]
216 data = [numsuc, len(metadata), flags, pre]
217 data.extend(sucs)
217 data.extend(sucs)
218 return _pack(format, *data) + metadata
218 return _pack(format, *data) + metadata
219
219
220 def _fm0encodemeta(meta):
220 def _fm0encodemeta(meta):
221 """Return encoded metadata string to string mapping.
221 """Return encoded metadata string to string mapping.
222
222
223 Assume no ':' in key and no '\0' in both key and value."""
223 Assume no ':' in key and no '\0' in both key and value."""
224 for key, value in meta.iteritems():
224 for key, value in meta.iteritems():
225 if ':' in key or '\0' in key:
225 if ':' in key or '\0' in key:
226 raise ValueError("':' and '\0' are forbidden in metadata key'")
226 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 if '\0' in value:
227 if '\0' in value:
228 raise ValueError("':' is forbidden in metadata value'")
228 raise ValueError("':' is forbidden in metadata value'")
229 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
229 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230
230
231 def _fm0decodemeta(data):
231 def _fm0decodemeta(data):
232 """Return string to string dictionary from encoded version."""
232 """Return string to string dictionary from encoded version."""
233 d = {}
233 d = {}
234 for l in data.split('\0'):
234 for l in data.split('\0'):
235 if l:
235 if l:
236 key, value = l.split(':')
236 key, value = l.split(':')
237 d[key] = value
237 d[key] = value
238 return d
238 return d
239
239
240 ## Parsing and writing of version "1"
240 ## Parsing and writing of version "1"
241 #
241 #
242 # The header is followed by the markers. Each marker is made of:
242 # The header is followed by the markers. Each marker is made of:
243 #
243 #
244 # - uint32: total size of the marker (including this field)
244 # - uint32: total size of the marker (including this field)
245 #
245 #
246 # - float64: date in seconds since epoch
246 # - float64: date in seconds since epoch
247 #
247 #
248 # - int16: timezone offset in minutes
248 # - int16: timezone offset in minutes
249 #
249 #
250 # - uint16: a bit field. It is reserved for flags used in common
250 # - uint16: a bit field. It is reserved for flags used in common
251 # obsolete marker operations, to avoid repeated decoding of metadata
251 # obsolete marker operations, to avoid repeated decoding of metadata
252 # entries.
252 # entries.
253 #
253 #
254 # - uint8: number of successors "N", can be zero.
254 # - uint8: number of successors "N", can be zero.
255 #
255 #
256 # - uint8: number of parents "P", can be zero.
256 # - uint8: number of parents "P", can be zero.
257 #
257 #
258 # 0: parents data stored but no parent,
258 # 0: parents data stored but no parent,
259 # 1: one parent stored,
259 # 1: one parent stored,
260 # 2: two parents stored,
260 # 2: two parents stored,
261 # 3: no parent data stored
261 # 3: no parent data stored
262 #
262 #
263 # - uint8: number of metadata entries M
263 # - uint8: number of metadata entries M
264 #
264 #
265 # - 20 or 32 bytes: precursor changeset identifier.
265 # - 20 or 32 bytes: precursor changeset identifier.
266 #
266 #
267 # - N*(20 or 32) bytes: successors changesets identifiers.
267 # - N*(20 or 32) bytes: successors changesets identifiers.
268 #
268 #
269 # - P*(20 or 32) bytes: parents of the precursors changesets.
269 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 #
270 #
271 # - M*(uint8, uint8): size of all metadata entries (key and value)
271 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 #
272 #
273 # - remaining bytes: the metadata, each (key, value) pair after the other.
273 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 _fm1version = 1
274 _fm1version = 1
275 _fm1fixed = '>IdhHBBB20s'
275 _fm1fixed = '>IdhHBBB20s'
276 _fm1nodesha1 = '20s'
276 _fm1nodesha1 = '20s'
277 _fm1nodesha256 = '32s'
277 _fm1nodesha256 = '32s'
278 _fm1nodesha1size = _calcsize(_fm1nodesha1)
278 _fm1nodesha1size = _calcsize(_fm1nodesha1)
279 _fm1nodesha256size = _calcsize(_fm1nodesha256)
279 _fm1nodesha256size = _calcsize(_fm1nodesha256)
280 _fm1fsize = _calcsize(_fm1fixed)
280 _fm1fsize = _calcsize(_fm1fixed)
281 _fm1parentnone = 3
281 _fm1parentnone = 3
282 _fm1parentshift = 14
282 _fm1parentshift = 14
283 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
283 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
284 _fm1metapair = 'BB'
284 _fm1metapair = 'BB'
285 _fm1metapairsize = _calcsize('BB')
285 _fm1metapairsize = _calcsize('BB')
286
286
287 def _fm1purereadmarkers(data, off):
287 def _fm1purereadmarkers(data, off):
288 # make some global constants local for performance
288 # make some global constants local for performance
289 noneflag = _fm1parentnone
289 noneflag = _fm1parentnone
290 sha2flag = usingsha256
290 sha2flag = usingsha256
291 sha1size = _fm1nodesha1size
291 sha1size = _fm1nodesha1size
292 sha2size = _fm1nodesha256size
292 sha2size = _fm1nodesha256size
293 sha1fmt = _fm1nodesha1
293 sha1fmt = _fm1nodesha1
294 sha2fmt = _fm1nodesha256
294 sha2fmt = _fm1nodesha256
295 metasize = _fm1metapairsize
295 metasize = _fm1metapairsize
296 metafmt = _fm1metapair
296 metafmt = _fm1metapair
297 fsize = _fm1fsize
297 fsize = _fm1fsize
298 unpack = _unpack
298 unpack = _unpack
299
299
300 # Loop on markers
300 # Loop on markers
301 stop = len(data) - _fm1fsize
301 stop = len(data) - _fm1fsize
302 ufixed = struct.Struct(_fm1fixed).unpack
302 ufixed = struct.Struct(_fm1fixed).unpack
303
303
304 while off <= stop:
304 while off <= stop:
305 # read fixed part
305 # read fixed part
306 o1 = off + fsize
306 o1 = off + fsize
307 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
307 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
308
308
309 if flags & sha2flag:
309 if flags & sha2flag:
310 # FIXME: prec was read as a SHA1, needs to be amended
310 # FIXME: prec was read as a SHA1, needs to be amended
311
311
312 # read 0 or more successors
312 # read 0 or more successors
313 if numsuc == 1:
313 if numsuc == 1:
314 o2 = o1 + sha2size
314 o2 = o1 + sha2size
315 sucs = (data[o1:o2],)
315 sucs = (data[o1:o2],)
316 else:
316 else:
317 o2 = o1 + sha2size * numsuc
317 o2 = o1 + sha2size * numsuc
318 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
318 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
319
319
320 # read parents
320 # read parents
321 if numpar == noneflag:
321 if numpar == noneflag:
322 o3 = o2
322 o3 = o2
323 parents = None
323 parents = None
324 elif numpar == 1:
324 elif numpar == 1:
325 o3 = o2 + sha2size
325 o3 = o2 + sha2size
326 parents = (data[o2:o3],)
326 parents = (data[o2:o3],)
327 else:
327 else:
328 o3 = o2 + sha2size * numpar
328 o3 = o2 + sha2size * numpar
329 parents = unpack(sha2fmt * numpar, data[o2:o3])
329 parents = unpack(sha2fmt * numpar, data[o2:o3])
330 else:
330 else:
331 # read 0 or more successors
331 # read 0 or more successors
332 if numsuc == 1:
332 if numsuc == 1:
333 o2 = o1 + sha1size
333 o2 = o1 + sha1size
334 sucs = (data[o1:o2],)
334 sucs = (data[o1:o2],)
335 else:
335 else:
336 o2 = o1 + sha1size * numsuc
336 o2 = o1 + sha1size * numsuc
337 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
337 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
338
338
339 # read parents
339 # read parents
340 if numpar == noneflag:
340 if numpar == noneflag:
341 o3 = o2
341 o3 = o2
342 parents = None
342 parents = None
343 elif numpar == 1:
343 elif numpar == 1:
344 o3 = o2 + sha1size
344 o3 = o2 + sha1size
345 parents = (data[o2:o3],)
345 parents = (data[o2:o3],)
346 else:
346 else:
347 o3 = o2 + sha1size * numpar
347 o3 = o2 + sha1size * numpar
348 parents = unpack(sha1fmt * numpar, data[o2:o3])
348 parents = unpack(sha1fmt * numpar, data[o2:o3])
349
349
350 # read metadata
350 # read metadata
351 off = o3 + metasize * nummeta
351 off = o3 + metasize * nummeta
352 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
352 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
353 metadata = []
353 metadata = []
354 for idx in xrange(0, len(metapairsize), 2):
354 for idx in xrange(0, len(metapairsize), 2):
355 o1 = off + metapairsize[idx]
355 o1 = off + metapairsize[idx]
356 o2 = o1 + metapairsize[idx + 1]
356 o2 = o1 + metapairsize[idx + 1]
357 metadata.append((data[off:o1], data[o1:o2]))
357 metadata.append((data[off:o1], data[o1:o2]))
358 off = o2
358 off = o2
359
359
360 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
360 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
361
361
362 def _fm1encodeonemarker(marker):
362 def _fm1encodeonemarker(marker):
363 pre, sucs, flags, metadata, date, parents = marker
363 pre, sucs, flags, metadata, date, parents = marker
364 # determine node size
364 # determine node size
365 _fm1node = _fm1nodesha1
365 _fm1node = _fm1nodesha1
366 if flags & usingsha256:
366 if flags & usingsha256:
367 _fm1node = _fm1nodesha256
367 _fm1node = _fm1nodesha256
368 numsuc = len(sucs)
368 numsuc = len(sucs)
369 numextranodes = numsuc
369 numextranodes = numsuc
370 if parents is None:
370 if parents is None:
371 numpar = _fm1parentnone
371 numpar = _fm1parentnone
372 else:
372 else:
373 numpar = len(parents)
373 numpar = len(parents)
374 numextranodes += numpar
374 numextranodes += numpar
375 formatnodes = _fm1node * numextranodes
375 formatnodes = _fm1node * numextranodes
376 formatmeta = _fm1metapair * len(metadata)
376 formatmeta = _fm1metapair * len(metadata)
377 format = _fm1fixed + formatnodes + formatmeta
377 format = _fm1fixed + formatnodes + formatmeta
378 # tz is stored in minutes so we divide by 60
378 # tz is stored in minutes so we divide by 60
379 tz = date[1]//60
379 tz = date[1]//60
380 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
380 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
381 data.extend(sucs)
381 data.extend(sucs)
382 if parents is not None:
382 if parents is not None:
383 data.extend(parents)
383 data.extend(parents)
384 totalsize = _calcsize(format)
384 totalsize = _calcsize(format)
385 for key, value in metadata:
385 for key, value in metadata:
386 lk = len(key)
386 lk = len(key)
387 lv = len(value)
387 lv = len(value)
388 data.append(lk)
388 data.append(lk)
389 data.append(lv)
389 data.append(lv)
390 totalsize += lk + lv
390 totalsize += lk + lv
391 data[0] = totalsize
391 data[0] = totalsize
392 data = [_pack(format, *data)]
392 data = [_pack(format, *data)]
393 for key, value in metadata:
393 for key, value in metadata:
394 data.append(key)
394 data.append(key)
395 data.append(value)
395 data.append(value)
396 return ''.join(data)
396 return ''.join(data)
397
397
398 def _fm1readmarkers(data, off):
398 def _fm1readmarkers(data, off):
399 native = getattr(parsers, 'fm1readmarkers', None)
399 native = getattr(parsers, 'fm1readmarkers', None)
400 if not native:
400 if not native:
401 return _fm1purereadmarkers(data, off)
401 return _fm1purereadmarkers(data, off)
402 stop = len(data) - _fm1fsize
402 stop = len(data) - _fm1fsize
403 return native(data, off, stop)
403 return native(data, off, stop)
404
404
405 # mapping to read/write various marker formats
405 # mapping to read/write various marker formats
406 # <version> -> (decoder, encoder)
406 # <version> -> (decoder, encoder)
407 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
407 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
408 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
408 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
409
409
410 @util.nogc
410 @util.nogc
411 def _readmarkers(data):
411 def _readmarkers(data):
412 """Read and enumerate markers from raw data"""
412 """Read and enumerate markers from raw data"""
413 off = 0
413 off = 0
414 diskversion = _unpack('>B', data[off:off + 1])[0]
414 diskversion = _unpack('>B', data[off:off + 1])[0]
415 off += 1
415 off += 1
416 if diskversion not in formats:
416 if diskversion not in formats:
417 raise util.Abort(_('parsing obsolete marker: unknown version %r')
417 raise util.Abort(_('parsing obsolete marker: unknown version %r')
418 % diskversion)
418 % diskversion)
419 return diskversion, formats[diskversion][0](data, off)
419 return diskversion, formats[diskversion][0](data, off)
420
420
421 def encodemarkers(markers, addheader=False, version=_fm0version):
421 def encodemarkers(markers, addheader=False, version=_fm0version):
422 # Kept separate from flushmarkers(), it will be reused for
422 # Kept separate from flushmarkers(), it will be reused for
423 # markers exchange.
423 # markers exchange.
424 encodeone = formats[version][1]
424 encodeone = formats[version][1]
425 if addheader:
425 if addheader:
426 yield _pack('>B', version)
426 yield _pack('>B', version)
427 for marker in markers:
427 for marker in markers:
428 yield encodeone(marker)
428 yield encodeone(marker)
429
429
430
430
431 class marker(object):
431 class marker(object):
432 """Wrap obsolete marker raw data"""
432 """Wrap obsolete marker raw data"""
433
433
434 def __init__(self, repo, data):
434 def __init__(self, repo, data):
435 # the repo argument will be used to create changectx in later version
435 # the repo argument will be used to create changectx in later version
436 self._repo = repo
436 self._repo = repo
437 self._data = data
437 self._data = data
438 self._decodedmeta = None
438 self._decodedmeta = None
439
439
440 def __hash__(self):
440 def __hash__(self):
441 return hash(self._data)
441 return hash(self._data)
442
442
443 def __eq__(self, other):
443 def __eq__(self, other):
444 if type(other) != type(self):
444 if type(other) != type(self):
445 return False
445 return False
446 return self._data == other._data
446 return self._data == other._data
447
447
448 def precnode(self):
448 def precnode(self):
449 """Precursor changeset node identifier"""
449 """Precursor changeset node identifier"""
450 return self._data[0]
450 return self._data[0]
451
451
452 def succnodes(self):
452 def succnodes(self):
453 """List of successor changesets node identifiers"""
453 """List of successor changesets node identifiers"""
454 return self._data[1]
454 return self._data[1]
455
455
456 def parentnodes(self):
456 def parentnodes(self):
457 """Parents of the precursors (None if not recorded)"""
457 """Parents of the precursors (None if not recorded)"""
458 return self._data[5]
458 return self._data[5]
459
459
460 def metadata(self):
460 def metadata(self):
461 """Decoded metadata dictionary"""
461 """Decoded metadata dictionary"""
462 return dict(self._data[3])
462 return dict(self._data[3])
463
463
464 def date(self):
464 def date(self):
465 """Creation date as (unixtime, offset)"""
465 """Creation date as (unixtime, offset)"""
466 return self._data[4]
466 return self._data[4]
467
467
468 def flags(self):
468 def flags(self):
469 """The flags field of the marker"""
469 """The flags field of the marker"""
470 return self._data[2]
470 return self._data[2]
471
471
472 @util.nogc
472 @util.nogc
473 def _addsuccessors(successors, markers):
473 def _addsuccessors(successors, markers):
474 for mark in markers:
474 for mark in markers:
475 successors.setdefault(mark[0], set()).add(mark)
475 successors.setdefault(mark[0], set()).add(mark)
476
476
477 @util.nogc
477 @util.nogc
478 def _addprecursors(precursors, markers):
478 def _addprecursors(precursors, markers):
479 for mark in markers:
479 for mark in markers:
480 for suc in mark[1]:
480 for suc in mark[1]:
481 precursors.setdefault(suc, set()).add(mark)
481 precursors.setdefault(suc, set()).add(mark)
482
482
483 @util.nogc
483 @util.nogc
484 def _addchildren(children, markers):
484 def _addchildren(children, markers):
485 for mark in markers:
485 for mark in markers:
486 parents = mark[5]
486 parents = mark[5]
487 if parents is not None:
487 if parents is not None:
488 for p in parents:
488 for p in parents:
489 children.setdefault(p, set()).add(mark)
489 children.setdefault(p, set()).add(mark)
490
490
491 def _checkinvalidmarkers(markers):
491 def _checkinvalidmarkers(markers):
492 """search for marker with invalid data and raise error if needed
492 """search for marker with invalid data and raise error if needed
493
493
494 Exist as a separated function to allow the evolve extension for a more
494 Exist as a separated function to allow the evolve extension for a more
495 subtle handling.
495 subtle handling.
496 """
496 """
497 for mark in markers:
497 for mark in markers:
498 if node.nullid in mark[1]:
498 if node.nullid in mark[1]:
499 raise util.Abort(_('bad obsolescence marker detected: '
499 raise util.Abort(_('bad obsolescence marker detected: '
500 'invalid successors nullid'))
500 'invalid successors nullid'))
501
501
502 class obsstore(object):
502 class obsstore(object):
503 """Store obsolete markers
503 """Store obsolete markers
504
504
505 Markers can be accessed with two mappings:
505 Markers can be accessed with two mappings:
506 - precursors[x] -> set(markers on precursors edges of x)
506 - precursors[x] -> set(markers on precursors edges of x)
507 - successors[x] -> set(markers on successors edges of x)
507 - successors[x] -> set(markers on successors edges of x)
508 - children[x] -> set(markers on precursors edges of children(x)
508 - children[x] -> set(markers on precursors edges of children(x)
509 """
509 """
510
510
511 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
511 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
512 # prec: nodeid, precursor changesets
512 # prec: nodeid, precursor changesets
513 # succs: tuple of nodeid, successor changesets (0-N length)
513 # succs: tuple of nodeid, successor changesets (0-N length)
514 # flag: integer, flag field carrying modifier for the markers (see doc)
514 # flag: integer, flag field carrying modifier for the markers (see doc)
515 # meta: binary blob, encoded metadata dictionary
515 # meta: binary blob, encoded metadata dictionary
516 # date: (float, int) tuple, date of marker creation
516 # date: (float, int) tuple, date of marker creation
517 # parents: (tuple of nodeid) or None, parents of precursors
517 # parents: (tuple of nodeid) or None, parents of precursors
518 # None is used when no data has been recorded
518 # None is used when no data has been recorded
519
519
520 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
520 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
521 # caches for various obsolescence related cache
521 # caches for various obsolescence related cache
522 self.caches = {}
522 self.caches = {}
523 self.svfs = svfs
523 self.svfs = svfs
524 self._version = defaultformat
524 self._version = defaultformat
525 self._readonly = readonly
525 self._readonly = readonly
526
526
527 def __iter__(self):
527 def __iter__(self):
528 return iter(self._all)
528 return iter(self._all)
529
529
530 def __len__(self):
530 def __len__(self):
531 return len(self._all)
531 return len(self._all)
532
532
533 def __nonzero__(self):
533 def __nonzero__(self):
534 if not self._cached('_all'):
535 try:
536 return self.svfs.stat('obsstore').st_size > 1
537 except OSError as inst:
538 if inst.errno != errno.ENOENT:
539 raise
540 # just build an empty _all list if no obsstore exists, which
541 # avoids further stat() syscalls
542 pass
534 return bool(self._all)
543 return bool(self._all)
535
544
536 def create(self, transaction, prec, succs=(), flag=0, parents=None,
545 def create(self, transaction, prec, succs=(), flag=0, parents=None,
537 date=None, metadata=None):
546 date=None, metadata=None):
538 """obsolete: add a new obsolete marker
547 """obsolete: add a new obsolete marker
539
548
540 * ensuring it is hashable
549 * ensuring it is hashable
541 * check mandatory metadata
550 * check mandatory metadata
542 * encode metadata
551 * encode metadata
543
552
544 If you are a human writing code creating marker you want to use the
553 If you are a human writing code creating marker you want to use the
545 `createmarkers` function in this module instead.
554 `createmarkers` function in this module instead.
546
555
547 return True if a new marker have been added, False if the markers
556 return True if a new marker have been added, False if the markers
548 already existed (no op).
557 already existed (no op).
549 """
558 """
550 if metadata is None:
559 if metadata is None:
551 metadata = {}
560 metadata = {}
552 if date is None:
561 if date is None:
553 if 'date' in metadata:
562 if 'date' in metadata:
554 # as a courtesy for out-of-tree extensions
563 # as a courtesy for out-of-tree extensions
555 date = util.parsedate(metadata.pop('date'))
564 date = util.parsedate(metadata.pop('date'))
556 else:
565 else:
557 date = util.makedate()
566 date = util.makedate()
558 if len(prec) != 20:
567 if len(prec) != 20:
559 raise ValueError(prec)
568 raise ValueError(prec)
560 for succ in succs:
569 for succ in succs:
561 if len(succ) != 20:
570 if len(succ) != 20:
562 raise ValueError(succ)
571 raise ValueError(succ)
563 if prec in succs:
572 if prec in succs:
564 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
573 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
565
574
566 metadata = tuple(sorted(metadata.iteritems()))
575 metadata = tuple(sorted(metadata.iteritems()))
567
576
568 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
577 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
569 return bool(self.add(transaction, [marker]))
578 return bool(self.add(transaction, [marker]))
570
579
571 def add(self, transaction, markers):
580 def add(self, transaction, markers):
572 """Add new markers to the store
581 """Add new markers to the store
573
582
574 Take care of filtering duplicate.
583 Take care of filtering duplicate.
575 Return the number of new marker."""
584 Return the number of new marker."""
576 if self._readonly:
585 if self._readonly:
577 raise util.Abort('creating obsolete markers is not enabled on this '
586 raise util.Abort('creating obsolete markers is not enabled on this '
578 'repo')
587 'repo')
579 known = set(self._all)
588 known = set(self._all)
580 new = []
589 new = []
581 for m in markers:
590 for m in markers:
582 if m not in known:
591 if m not in known:
583 known.add(m)
592 known.add(m)
584 new.append(m)
593 new.append(m)
585 if new:
594 if new:
586 f = self.svfs('obsstore', 'ab')
595 f = self.svfs('obsstore', 'ab')
587 try:
596 try:
588 offset = f.tell()
597 offset = f.tell()
589 transaction.add('obsstore', offset)
598 transaction.add('obsstore', offset)
590 # offset == 0: new file - add the version header
599 # offset == 0: new file - add the version header
591 for bytes in encodemarkers(new, offset == 0, self._version):
600 for bytes in encodemarkers(new, offset == 0, self._version):
592 f.write(bytes)
601 f.write(bytes)
593 finally:
602 finally:
594 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
603 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
595 # call 'filecacheentry.refresh()' here
604 # call 'filecacheentry.refresh()' here
596 f.close()
605 f.close()
597 self._addmarkers(new)
606 self._addmarkers(new)
598 # new marker *may* have changed several set. invalidate the cache.
607 # new marker *may* have changed several set. invalidate the cache.
599 self.caches.clear()
608 self.caches.clear()
600 # records the number of new markers for the transaction hooks
609 # records the number of new markers for the transaction hooks
601 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
610 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
602 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
611 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
603 return len(new)
612 return len(new)
604
613
605 def mergemarkers(self, transaction, data):
614 def mergemarkers(self, transaction, data):
606 """merge a binary stream of markers inside the obsstore
615 """merge a binary stream of markers inside the obsstore
607
616
608 Returns the number of new markers added."""
617 Returns the number of new markers added."""
609 version, markers = _readmarkers(data)
618 version, markers = _readmarkers(data)
610 return self.add(transaction, markers)
619 return self.add(transaction, markers)
611
620
612 @propertycache
621 @propertycache
613 def _all(self):
622 def _all(self):
614 data = self.svfs.tryread('obsstore')
623 data = self.svfs.tryread('obsstore')
615 if not data:
624 if not data:
616 return []
625 return []
617 self._version, markers = _readmarkers(data)
626 self._version, markers = _readmarkers(data)
618 markers = list(markers)
627 markers = list(markers)
619 _checkinvalidmarkers(markers)
628 _checkinvalidmarkers(markers)
620 return markers
629 return markers
621
630
622 @propertycache
631 @propertycache
623 def successors(self):
632 def successors(self):
624 successors = {}
633 successors = {}
625 _addsuccessors(successors, self._all)
634 _addsuccessors(successors, self._all)
626 return successors
635 return successors
627
636
628 @propertycache
637 @propertycache
629 def precursors(self):
638 def precursors(self):
630 precursors = {}
639 precursors = {}
631 _addprecursors(precursors, self._all)
640 _addprecursors(precursors, self._all)
632 return precursors
641 return precursors
633
642
634 @propertycache
643 @propertycache
635 def children(self):
644 def children(self):
636 children = {}
645 children = {}
637 _addchildren(children, self._all)
646 _addchildren(children, self._all)
638 return children
647 return children
639
648
640 def _cached(self, attr):
649 def _cached(self, attr):
641 return attr in self.__dict__
650 return attr in self.__dict__
642
651
643 def _addmarkers(self, markers):
652 def _addmarkers(self, markers):
644 markers = list(markers) # to allow repeated iteration
653 markers = list(markers) # to allow repeated iteration
645 self._all.extend(markers)
654 self._all.extend(markers)
646 if self._cached('successors'):
655 if self._cached('successors'):
647 _addsuccessors(self.successors, markers)
656 _addsuccessors(self.successors, markers)
648 if self._cached('precursors'):
657 if self._cached('precursors'):
649 _addprecursors(self.precursors, markers)
658 _addprecursors(self.precursors, markers)
650 if self._cached('children'):
659 if self._cached('children'):
651 _addchildren(self.children, markers)
660 _addchildren(self.children, markers)
652 _checkinvalidmarkers(markers)
661 _checkinvalidmarkers(markers)
653
662
654 def relevantmarkers(self, nodes):
663 def relevantmarkers(self, nodes):
655 """return a set of all obsolescence markers relevant to a set of nodes.
664 """return a set of all obsolescence markers relevant to a set of nodes.
656
665
657 "relevant" to a set of nodes mean:
666 "relevant" to a set of nodes mean:
658
667
659 - marker that use this changeset as successor
668 - marker that use this changeset as successor
660 - prune marker of direct children on this changeset
669 - prune marker of direct children on this changeset
661 - recursive application of the two rules on precursors of these markers
670 - recursive application of the two rules on precursors of these markers
662
671
663 It is a set so you cannot rely on order."""
672 It is a set so you cannot rely on order."""
664
673
665 pendingnodes = set(nodes)
674 pendingnodes = set(nodes)
666 seenmarkers = set()
675 seenmarkers = set()
667 seennodes = set(pendingnodes)
676 seennodes = set(pendingnodes)
668 precursorsmarkers = self.precursors
677 precursorsmarkers = self.precursors
669 children = self.children
678 children = self.children
670 while pendingnodes:
679 while pendingnodes:
671 direct = set()
680 direct = set()
672 for current in pendingnodes:
681 for current in pendingnodes:
673 direct.update(precursorsmarkers.get(current, ()))
682 direct.update(precursorsmarkers.get(current, ()))
674 pruned = [m for m in children.get(current, ()) if not m[1]]
683 pruned = [m for m in children.get(current, ()) if not m[1]]
675 direct.update(pruned)
684 direct.update(pruned)
676 direct -= seenmarkers
685 direct -= seenmarkers
677 pendingnodes = set([m[0] for m in direct])
686 pendingnodes = set([m[0] for m in direct])
678 seenmarkers |= direct
687 seenmarkers |= direct
679 pendingnodes -= seennodes
688 pendingnodes -= seennodes
680 seennodes |= pendingnodes
689 seennodes |= pendingnodes
681 return seenmarkers
690 return seenmarkers
682
691
683 def commonversion(versions):
692 def commonversion(versions):
684 """Return the newest version listed in both versions and our local formats.
693 """Return the newest version listed in both versions and our local formats.
685
694
686 Returns None if no common version exists.
695 Returns None if no common version exists.
687 """
696 """
688 versions.sort(reverse=True)
697 versions.sort(reverse=True)
689 # search for highest version known on both side
698 # search for highest version known on both side
690 for v in versions:
699 for v in versions:
691 if v in formats:
700 if v in formats:
692 return v
701 return v
693 return None
702 return None
694
703
695 # arbitrary picked to fit into 8K limit from HTTP server
704 # arbitrary picked to fit into 8K limit from HTTP server
696 # you have to take in account:
705 # you have to take in account:
697 # - the version header
706 # - the version header
698 # - the base85 encoding
707 # - the base85 encoding
699 _maxpayload = 5300
708 _maxpayload = 5300
700
709
701 def _pushkeyescape(markers):
710 def _pushkeyescape(markers):
702 """encode markers into a dict suitable for pushkey exchange
711 """encode markers into a dict suitable for pushkey exchange
703
712
704 - binary data is base85 encoded
713 - binary data is base85 encoded
705 - split in chunks smaller than 5300 bytes"""
714 - split in chunks smaller than 5300 bytes"""
706 keys = {}
715 keys = {}
707 parts = []
716 parts = []
708 currentlen = _maxpayload * 2 # ensure we create a new part
717 currentlen = _maxpayload * 2 # ensure we create a new part
709 for marker in markers:
718 for marker in markers:
710 nextdata = _fm0encodeonemarker(marker)
719 nextdata = _fm0encodeonemarker(marker)
711 if (len(nextdata) + currentlen > _maxpayload):
720 if (len(nextdata) + currentlen > _maxpayload):
712 currentpart = []
721 currentpart = []
713 currentlen = 0
722 currentlen = 0
714 parts.append(currentpart)
723 parts.append(currentpart)
715 currentpart.append(nextdata)
724 currentpart.append(nextdata)
716 currentlen += len(nextdata)
725 currentlen += len(nextdata)
717 for idx, part in enumerate(reversed(parts)):
726 for idx, part in enumerate(reversed(parts)):
718 data = ''.join([_pack('>B', _fm0version)] + part)
727 data = ''.join([_pack('>B', _fm0version)] + part)
719 keys['dump%i' % idx] = base85.b85encode(data)
728 keys['dump%i' % idx] = base85.b85encode(data)
720 return keys
729 return keys
721
730
722 def listmarkers(repo):
731 def listmarkers(repo):
723 """List markers over pushkey"""
732 """List markers over pushkey"""
724 if not repo.obsstore:
733 if not repo.obsstore:
725 return {}
734 return {}
726 return _pushkeyescape(sorted(repo.obsstore))
735 return _pushkeyescape(sorted(repo.obsstore))
727
736
728 def pushmarker(repo, key, old, new):
737 def pushmarker(repo, key, old, new):
729 """Push markers over pushkey"""
738 """Push markers over pushkey"""
730 if not key.startswith('dump'):
739 if not key.startswith('dump'):
731 repo.ui.warn(_('unknown key: %r') % key)
740 repo.ui.warn(_('unknown key: %r') % key)
732 return 0
741 return 0
733 if old:
742 if old:
734 repo.ui.warn(_('unexpected old value for %r') % key)
743 repo.ui.warn(_('unexpected old value for %r') % key)
735 return 0
744 return 0
736 data = base85.b85decode(new)
745 data = base85.b85decode(new)
737 lock = repo.lock()
746 lock = repo.lock()
738 try:
747 try:
739 tr = repo.transaction('pushkey: obsolete markers')
748 tr = repo.transaction('pushkey: obsolete markers')
740 try:
749 try:
741 repo.obsstore.mergemarkers(tr, data)
750 repo.obsstore.mergemarkers(tr, data)
742 tr.close()
751 tr.close()
743 return 1
752 return 1
744 finally:
753 finally:
745 tr.release()
754 tr.release()
746 finally:
755 finally:
747 lock.release()
756 lock.release()
748
757
749 def getmarkers(repo, nodes=None):
758 def getmarkers(repo, nodes=None):
750 """returns markers known in a repository
759 """returns markers known in a repository
751
760
752 If <nodes> is specified, only markers "relevant" to those nodes are are
761 If <nodes> is specified, only markers "relevant" to those nodes are are
753 returned"""
762 returned"""
754 if nodes is None:
763 if nodes is None:
755 rawmarkers = repo.obsstore
764 rawmarkers = repo.obsstore
756 else:
765 else:
757 rawmarkers = repo.obsstore.relevantmarkers(nodes)
766 rawmarkers = repo.obsstore.relevantmarkers(nodes)
758
767
759 for markerdata in rawmarkers:
768 for markerdata in rawmarkers:
760 yield marker(repo, markerdata)
769 yield marker(repo, markerdata)
761
770
762 def relevantmarkers(repo, node):
771 def relevantmarkers(repo, node):
763 """all obsolete markers relevant to some revision"""
772 """all obsolete markers relevant to some revision"""
764 for markerdata in repo.obsstore.relevantmarkers(node):
773 for markerdata in repo.obsstore.relevantmarkers(node):
765 yield marker(repo, markerdata)
774 yield marker(repo, markerdata)
766
775
767
776
768 def precursormarkers(ctx):
777 def precursormarkers(ctx):
769 """obsolete marker marking this changeset as a successors"""
778 """obsolete marker marking this changeset as a successors"""
770 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
779 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
771 yield marker(ctx.repo(), data)
780 yield marker(ctx.repo(), data)
772
781
773 def successormarkers(ctx):
782 def successormarkers(ctx):
774 """obsolete marker making this changeset obsolete"""
783 """obsolete marker making this changeset obsolete"""
775 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
784 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
776 yield marker(ctx.repo(), data)
785 yield marker(ctx.repo(), data)
777
786
778 def allsuccessors(obsstore, nodes, ignoreflags=0):
787 def allsuccessors(obsstore, nodes, ignoreflags=0):
779 """Yield node for every successor of <nodes>.
788 """Yield node for every successor of <nodes>.
780
789
781 Some successors may be unknown locally.
790 Some successors may be unknown locally.
782
791
783 This is a linear yield unsuited to detecting split changesets. It includes
792 This is a linear yield unsuited to detecting split changesets. It includes
784 initial nodes too."""
793 initial nodes too."""
785 remaining = set(nodes)
794 remaining = set(nodes)
786 seen = set(remaining)
795 seen = set(remaining)
787 while remaining:
796 while remaining:
788 current = remaining.pop()
797 current = remaining.pop()
789 yield current
798 yield current
790 for mark in obsstore.successors.get(current, ()):
799 for mark in obsstore.successors.get(current, ()):
791 # ignore marker flagged with specified flag
800 # ignore marker flagged with specified flag
792 if mark[2] & ignoreflags:
801 if mark[2] & ignoreflags:
793 continue
802 continue
794 for suc in mark[1]:
803 for suc in mark[1]:
795 if suc not in seen:
804 if suc not in seen:
796 seen.add(suc)
805 seen.add(suc)
797 remaining.add(suc)
806 remaining.add(suc)
798
807
799 def allprecursors(obsstore, nodes, ignoreflags=0):
808 def allprecursors(obsstore, nodes, ignoreflags=0):
800 """Yield node for every precursors of <nodes>.
809 """Yield node for every precursors of <nodes>.
801
810
802 Some precursors may be unknown locally.
811 Some precursors may be unknown locally.
803
812
804 This is a linear yield unsuited to detecting folded changesets. It includes
813 This is a linear yield unsuited to detecting folded changesets. It includes
805 initial nodes too."""
814 initial nodes too."""
806
815
807 remaining = set(nodes)
816 remaining = set(nodes)
808 seen = set(remaining)
817 seen = set(remaining)
809 while remaining:
818 while remaining:
810 current = remaining.pop()
819 current = remaining.pop()
811 yield current
820 yield current
812 for mark in obsstore.precursors.get(current, ()):
821 for mark in obsstore.precursors.get(current, ()):
813 # ignore marker flagged with specified flag
822 # ignore marker flagged with specified flag
814 if mark[2] & ignoreflags:
823 if mark[2] & ignoreflags:
815 continue
824 continue
816 suc = mark[0]
825 suc = mark[0]
817 if suc not in seen:
826 if suc not in seen:
818 seen.add(suc)
827 seen.add(suc)
819 remaining.add(suc)
828 remaining.add(suc)
820
829
821 def foreground(repo, nodes):
830 def foreground(repo, nodes):
822 """return all nodes in the "foreground" of other node
831 """return all nodes in the "foreground" of other node
823
832
824 The foreground of a revision is anything reachable using parent -> children
833 The foreground of a revision is anything reachable using parent -> children
825 or precursor -> successor relation. It is very similar to "descendant" but
834 or precursor -> successor relation. It is very similar to "descendant" but
826 augmented with obsolescence information.
835 augmented with obsolescence information.
827
836
828 Beware that possible obsolescence cycle may result if complex situation.
837 Beware that possible obsolescence cycle may result if complex situation.
829 """
838 """
830 repo = repo.unfiltered()
839 repo = repo.unfiltered()
831 foreground = set(repo.set('%ln::', nodes))
840 foreground = set(repo.set('%ln::', nodes))
832 if repo.obsstore:
841 if repo.obsstore:
833 # We only need this complicated logic if there is obsolescence
842 # We only need this complicated logic if there is obsolescence
834 # XXX will probably deserve an optimised revset.
843 # XXX will probably deserve an optimised revset.
835 nm = repo.changelog.nodemap
844 nm = repo.changelog.nodemap
836 plen = -1
845 plen = -1
837 # compute the whole set of successors or descendants
846 # compute the whole set of successors or descendants
838 while len(foreground) != plen:
847 while len(foreground) != plen:
839 plen = len(foreground)
848 plen = len(foreground)
840 succs = set(c.node() for c in foreground)
849 succs = set(c.node() for c in foreground)
841 mutable = [c.node() for c in foreground if c.mutable()]
850 mutable = [c.node() for c in foreground if c.mutable()]
842 succs.update(allsuccessors(repo.obsstore, mutable))
851 succs.update(allsuccessors(repo.obsstore, mutable))
843 known = (n for n in succs if n in nm)
852 known = (n for n in succs if n in nm)
844 foreground = set(repo.set('%ln::', known))
853 foreground = set(repo.set('%ln::', known))
845 return set(c.node() for c in foreground)
854 return set(c.node() for c in foreground)
846
855
847
856
848 def successorssets(repo, initialnode, cache=None):
857 def successorssets(repo, initialnode, cache=None):
849 """Return set of all latest successors of initial nodes
858 """Return set of all latest successors of initial nodes
850
859
851 The successors set of a changeset A are the group of revisions that succeed
860 The successors set of a changeset A are the group of revisions that succeed
852 A. It succeeds A as a consistent whole, each revision being only a partial
861 A. It succeeds A as a consistent whole, each revision being only a partial
853 replacement. The successors set contains non-obsolete changesets only.
862 replacement. The successors set contains non-obsolete changesets only.
854
863
855 This function returns the full list of successor sets which is why it
864 This function returns the full list of successor sets which is why it
856 returns a list of tuples and not just a single tuple. Each tuple is a valid
865 returns a list of tuples and not just a single tuple. Each tuple is a valid
857 successors set. Note that (A,) may be a valid successors set for changeset A
866 successors set. Note that (A,) may be a valid successors set for changeset A
858 (see below).
867 (see below).
859
868
860 In most cases, a changeset A will have a single element (e.g. the changeset
869 In most cases, a changeset A will have a single element (e.g. the changeset
861 A is replaced by A') in its successors set. Though, it is also common for a
870 A is replaced by A') in its successors set. Though, it is also common for a
862 changeset A to have no elements in its successor set (e.g. the changeset
871 changeset A to have no elements in its successor set (e.g. the changeset
863 has been pruned). Therefore, the returned list of successors sets will be
872 has been pruned). Therefore, the returned list of successors sets will be
864 [(A',)] or [], respectively.
873 [(A',)] or [], respectively.
865
874
866 When a changeset A is split into A' and B', however, it will result in a
875 When a changeset A is split into A' and B', however, it will result in a
867 successors set containing more than a single element, i.e. [(A',B')].
876 successors set containing more than a single element, i.e. [(A',B')].
868 Divergent changesets will result in multiple successors sets, i.e. [(A',),
877 Divergent changesets will result in multiple successors sets, i.e. [(A',),
869 (A'')].
878 (A'')].
870
879
871 If a changeset A is not obsolete, then it will conceptually have no
880 If a changeset A is not obsolete, then it will conceptually have no
872 successors set. To distinguish this from a pruned changeset, the successor
881 successors set. To distinguish this from a pruned changeset, the successor
873 set will contain itself only, i.e. [(A,)].
882 set will contain itself only, i.e. [(A,)].
874
883
875 Finally, successors unknown locally are considered to be pruned (obsoleted
884 Finally, successors unknown locally are considered to be pruned (obsoleted
876 without any successors).
885 without any successors).
877
886
878 The optional `cache` parameter is a dictionary that may contain precomputed
887 The optional `cache` parameter is a dictionary that may contain precomputed
879 successors sets. It is meant to reuse the computation of a previous call to
888 successors sets. It is meant to reuse the computation of a previous call to
880 `successorssets` when multiple calls are made at the same time. The cache
889 `successorssets` when multiple calls are made at the same time. The cache
881 dictionary is updated in place. The caller is responsible for its life
890 dictionary is updated in place. The caller is responsible for its life
882 span. Code that makes multiple calls to `successorssets` *must* use this
891 span. Code that makes multiple calls to `successorssets` *must* use this
883 cache mechanism or suffer terrible performance.
892 cache mechanism or suffer terrible performance.
884 """
893 """
885
894
886 succmarkers = repo.obsstore.successors
895 succmarkers = repo.obsstore.successors
887
896
888 # Stack of nodes we search successors sets for
897 # Stack of nodes we search successors sets for
889 toproceed = [initialnode]
898 toproceed = [initialnode]
890 # set version of above list for fast loop detection
899 # set version of above list for fast loop detection
891 # element added to "toproceed" must be added here
900 # element added to "toproceed" must be added here
892 stackedset = set(toproceed)
901 stackedset = set(toproceed)
893 if cache is None:
902 if cache is None:
894 cache = {}
903 cache = {}
895
904
896 # This while loop is the flattened version of a recursive search for
905 # This while loop is the flattened version of a recursive search for
897 # successors sets
906 # successors sets
898 #
907 #
899 # def successorssets(x):
908 # def successorssets(x):
900 # successors = directsuccessors(x)
909 # successors = directsuccessors(x)
901 # ss = [[]]
910 # ss = [[]]
902 # for succ in directsuccessors(x):
911 # for succ in directsuccessors(x):
903 # # product as in itertools cartesian product
912 # # product as in itertools cartesian product
904 # ss = product(ss, successorssets(succ))
913 # ss = product(ss, successorssets(succ))
905 # return ss
914 # return ss
906 #
915 #
907 # But we can not use plain recursive calls here:
916 # But we can not use plain recursive calls here:
908 # - that would blow the python call stack
917 # - that would blow the python call stack
909 # - obsolescence markers may have cycles, we need to handle them.
918 # - obsolescence markers may have cycles, we need to handle them.
910 #
919 #
911 # The `toproceed` list act as our call stack. Every node we search
920 # The `toproceed` list act as our call stack. Every node we search
912 # successors set for are stacked there.
921 # successors set for are stacked there.
913 #
922 #
914 # The `stackedset` is set version of this stack used to check if a node is
923 # The `stackedset` is set version of this stack used to check if a node is
915 # already stacked. This check is used to detect cycles and prevent infinite
924 # already stacked. This check is used to detect cycles and prevent infinite
916 # loop.
925 # loop.
917 #
926 #
918 # successors set of all nodes are stored in the `cache` dictionary.
927 # successors set of all nodes are stored in the `cache` dictionary.
919 #
928 #
920 # After this while loop ends we use the cache to return the successors sets
929 # After this while loop ends we use the cache to return the successors sets
921 # for the node requested by the caller.
930 # for the node requested by the caller.
922 while toproceed:
931 while toproceed:
923 # Every iteration tries to compute the successors sets of the topmost
932 # Every iteration tries to compute the successors sets of the topmost
924 # node of the stack: CURRENT.
933 # node of the stack: CURRENT.
925 #
934 #
926 # There are four possible outcomes:
935 # There are four possible outcomes:
927 #
936 #
928 # 1) We already know the successors sets of CURRENT:
937 # 1) We already know the successors sets of CURRENT:
929 # -> mission accomplished, pop it from the stack.
938 # -> mission accomplished, pop it from the stack.
930 # 2) Node is not obsolete:
939 # 2) Node is not obsolete:
931 # -> the node is its own successors sets. Add it to the cache.
940 # -> the node is its own successors sets. Add it to the cache.
932 # 3) We do not know successors set of direct successors of CURRENT:
941 # 3) We do not know successors set of direct successors of CURRENT:
933 # -> We add those successors to the stack.
942 # -> We add those successors to the stack.
934 # 4) We know successors sets of all direct successors of CURRENT:
943 # 4) We know successors sets of all direct successors of CURRENT:
935 # -> We can compute CURRENT successors set and add it to the
944 # -> We can compute CURRENT successors set and add it to the
936 # cache.
945 # cache.
937 #
946 #
938 current = toproceed[-1]
947 current = toproceed[-1]
939 if current in cache:
948 if current in cache:
940 # case (1): We already know the successors sets
949 # case (1): We already know the successors sets
941 stackedset.remove(toproceed.pop())
950 stackedset.remove(toproceed.pop())
942 elif current not in succmarkers:
951 elif current not in succmarkers:
943 # case (2): The node is not obsolete.
952 # case (2): The node is not obsolete.
944 if current in repo:
953 if current in repo:
945 # We have a valid last successors.
954 # We have a valid last successors.
946 cache[current] = [(current,)]
955 cache[current] = [(current,)]
947 else:
956 else:
948 # Final obsolete version is unknown locally.
957 # Final obsolete version is unknown locally.
949 # Do not count that as a valid successors
958 # Do not count that as a valid successors
950 cache[current] = []
959 cache[current] = []
951 else:
960 else:
952 # cases (3) and (4)
961 # cases (3) and (4)
953 #
962 #
954 # We proceed in two phases. Phase 1 aims to distinguish case (3)
963 # We proceed in two phases. Phase 1 aims to distinguish case (3)
955 # from case (4):
964 # from case (4):
956 #
965 #
957 # For each direct successors of CURRENT, we check whether its
966 # For each direct successors of CURRENT, we check whether its
958 # successors sets are known. If they are not, we stack the
967 # successors sets are known. If they are not, we stack the
959 # unknown node and proceed to the next iteration of the while
968 # unknown node and proceed to the next iteration of the while
960 # loop. (case 3)
969 # loop. (case 3)
961 #
970 #
962 # During this step, we may detect obsolescence cycles: a node
971 # During this step, we may detect obsolescence cycles: a node
963 # with unknown successors sets but already in the call stack.
972 # with unknown successors sets but already in the call stack.
964 # In such a situation, we arbitrary set the successors sets of
973 # In such a situation, we arbitrary set the successors sets of
965 # the node to nothing (node pruned) to break the cycle.
974 # the node to nothing (node pruned) to break the cycle.
966 #
975 #
967 # If no break was encountered we proceed to phase 2.
976 # If no break was encountered we proceed to phase 2.
968 #
977 #
969 # Phase 2 computes successors sets of CURRENT (case 4); see details
978 # Phase 2 computes successors sets of CURRENT (case 4); see details
970 # in phase 2 itself.
979 # in phase 2 itself.
971 #
980 #
972 # Note the two levels of iteration in each phase.
981 # Note the two levels of iteration in each phase.
973 # - The first one handles obsolescence markers using CURRENT as
982 # - The first one handles obsolescence markers using CURRENT as
974 # precursor (successors markers of CURRENT).
983 # precursor (successors markers of CURRENT).
975 #
984 #
976 # Having multiple entry here means divergence.
985 # Having multiple entry here means divergence.
977 #
986 #
978 # - The second one handles successors defined in each marker.
987 # - The second one handles successors defined in each marker.
979 #
988 #
980 # Having none means pruned node, multiple successors means split,
989 # Having none means pruned node, multiple successors means split,
981 # single successors are standard replacement.
990 # single successors are standard replacement.
982 #
991 #
983 for mark in sorted(succmarkers[current]):
992 for mark in sorted(succmarkers[current]):
984 for suc in mark[1]:
993 for suc in mark[1]:
985 if suc not in cache:
994 if suc not in cache:
986 if suc in stackedset:
995 if suc in stackedset:
987 # cycle breaking
996 # cycle breaking
988 cache[suc] = []
997 cache[suc] = []
989 else:
998 else:
990 # case (3) If we have not computed successors sets
999 # case (3) If we have not computed successors sets
991 # of one of those successors we add it to the
1000 # of one of those successors we add it to the
992 # `toproceed` stack and stop all work for this
1001 # `toproceed` stack and stop all work for this
993 # iteration.
1002 # iteration.
994 toproceed.append(suc)
1003 toproceed.append(suc)
995 stackedset.add(suc)
1004 stackedset.add(suc)
996 break
1005 break
997 else:
1006 else:
998 continue
1007 continue
999 break
1008 break
1000 else:
1009 else:
1001 # case (4): we know all successors sets of all direct
1010 # case (4): we know all successors sets of all direct
1002 # successors
1011 # successors
1003 #
1012 #
1004 # Successors set contributed by each marker depends on the
1013 # Successors set contributed by each marker depends on the
1005 # successors sets of all its "successors" node.
1014 # successors sets of all its "successors" node.
1006 #
1015 #
1007 # Each different marker is a divergence in the obsolescence
1016 # Each different marker is a divergence in the obsolescence
1008 # history. It contributes successors sets distinct from other
1017 # history. It contributes successors sets distinct from other
1009 # markers.
1018 # markers.
1010 #
1019 #
1011 # Within a marker, a successor may have divergent successors
1020 # Within a marker, a successor may have divergent successors
1012 # sets. In such a case, the marker will contribute multiple
1021 # sets. In such a case, the marker will contribute multiple
1013 # divergent successors sets. If multiple successors have
1022 # divergent successors sets. If multiple successors have
1014 # divergent successors sets, a Cartesian product is used.
1023 # divergent successors sets, a Cartesian product is used.
1015 #
1024 #
1016 # At the end we post-process successors sets to remove
1025 # At the end we post-process successors sets to remove
1017 # duplicated entry and successors set that are strict subset of
1026 # duplicated entry and successors set that are strict subset of
1018 # another one.
1027 # another one.
1019 succssets = []
1028 succssets = []
1020 for mark in sorted(succmarkers[current]):
1029 for mark in sorted(succmarkers[current]):
1021 # successors sets contributed by this marker
1030 # successors sets contributed by this marker
1022 markss = [[]]
1031 markss = [[]]
1023 for suc in mark[1]:
1032 for suc in mark[1]:
1024 # cardinal product with previous successors
1033 # cardinal product with previous successors
1025 productresult = []
1034 productresult = []
1026 for prefix in markss:
1035 for prefix in markss:
1027 for suffix in cache[suc]:
1036 for suffix in cache[suc]:
1028 newss = list(prefix)
1037 newss = list(prefix)
1029 for part in suffix:
1038 for part in suffix:
1030 # do not duplicated entry in successors set
1039 # do not duplicated entry in successors set
1031 # first entry wins.
1040 # first entry wins.
1032 if part not in newss:
1041 if part not in newss:
1033 newss.append(part)
1042 newss.append(part)
1034 productresult.append(newss)
1043 productresult.append(newss)
1035 markss = productresult
1044 markss = productresult
1036 succssets.extend(markss)
1045 succssets.extend(markss)
1037 # remove duplicated and subset
1046 # remove duplicated and subset
1038 seen = []
1047 seen = []
1039 final = []
1048 final = []
1040 candidate = sorted(((set(s), s) for s in succssets if s),
1049 candidate = sorted(((set(s), s) for s in succssets if s),
1041 key=lambda x: len(x[1]), reverse=True)
1050 key=lambda x: len(x[1]), reverse=True)
1042 for setversion, listversion in candidate:
1051 for setversion, listversion in candidate:
1043 for seenset in seen:
1052 for seenset in seen:
1044 if setversion.issubset(seenset):
1053 if setversion.issubset(seenset):
1045 break
1054 break
1046 else:
1055 else:
1047 final.append(listversion)
1056 final.append(listversion)
1048 seen.append(setversion)
1057 seen.append(setversion)
1049 final.reverse() # put small successors set first
1058 final.reverse() # put small successors set first
1050 cache[current] = final
1059 cache[current] = final
1051 return cache[initialnode]
1060 return cache[initialnode]
1052
1061
1053 # mapping of 'set-name' -> <function to compute this set>
1062 # mapping of 'set-name' -> <function to compute this set>
1054 cachefuncs = {}
1063 cachefuncs = {}
1055 def cachefor(name):
1064 def cachefor(name):
1056 """Decorator to register a function as computing the cache for a set"""
1065 """Decorator to register a function as computing the cache for a set"""
1057 def decorator(func):
1066 def decorator(func):
1058 assert name not in cachefuncs
1067 assert name not in cachefuncs
1059 cachefuncs[name] = func
1068 cachefuncs[name] = func
1060 return func
1069 return func
1061 return decorator
1070 return decorator
1062
1071
1063 def getrevs(repo, name):
1072 def getrevs(repo, name):
1064 """Return the set of revision that belong to the <name> set
1073 """Return the set of revision that belong to the <name> set
1065
1074
1066 Such access may compute the set and cache it for future use"""
1075 Such access may compute the set and cache it for future use"""
1067 repo = repo.unfiltered()
1076 repo = repo.unfiltered()
1068 if not repo.obsstore:
1077 if not repo.obsstore:
1069 return frozenset()
1078 return frozenset()
1070 if name not in repo.obsstore.caches:
1079 if name not in repo.obsstore.caches:
1071 repo.obsstore.caches[name] = cachefuncs[name](repo)
1080 repo.obsstore.caches[name] = cachefuncs[name](repo)
1072 return repo.obsstore.caches[name]
1081 return repo.obsstore.caches[name]
1073
1082
1074 # To be simple we need to invalidate obsolescence cache when:
1083 # To be simple we need to invalidate obsolescence cache when:
1075 #
1084 #
1076 # - new changeset is added:
1085 # - new changeset is added:
1077 # - public phase is changed
1086 # - public phase is changed
1078 # - obsolescence marker are added
1087 # - obsolescence marker are added
1079 # - strip is used a repo
1088 # - strip is used a repo
1080 def clearobscaches(repo):
1089 def clearobscaches(repo):
1081 """Remove all obsolescence related cache from a repo
1090 """Remove all obsolescence related cache from a repo
1082
1091
1083 This remove all cache in obsstore is the obsstore already exist on the
1092 This remove all cache in obsstore is the obsstore already exist on the
1084 repo.
1093 repo.
1085
1094
1086 (We could be smarter here given the exact event that trigger the cache
1095 (We could be smarter here given the exact event that trigger the cache
1087 clearing)"""
1096 clearing)"""
1088 # only clear cache is there is obsstore data in this repo
1097 # only clear cache is there is obsstore data in this repo
1089 if 'obsstore' in repo._filecache:
1098 if 'obsstore' in repo._filecache:
1090 repo.obsstore.caches.clear()
1099 repo.obsstore.caches.clear()
1091
1100
1092 @cachefor('obsolete')
1101 @cachefor('obsolete')
1093 def _computeobsoleteset(repo):
1102 def _computeobsoleteset(repo):
1094 """the set of obsolete revisions"""
1103 """the set of obsolete revisions"""
1095 obs = set()
1104 obs = set()
1096 getrev = repo.changelog.nodemap.get
1105 getrev = repo.changelog.nodemap.get
1097 getphase = repo._phasecache.phase
1106 getphase = repo._phasecache.phase
1098 for n in repo.obsstore.successors:
1107 for n in repo.obsstore.successors:
1099 rev = getrev(n)
1108 rev = getrev(n)
1100 if rev is not None and getphase(repo, rev):
1109 if rev is not None and getphase(repo, rev):
1101 obs.add(rev)
1110 obs.add(rev)
1102 return obs
1111 return obs
1103
1112
1104 @cachefor('unstable')
1113 @cachefor('unstable')
1105 def _computeunstableset(repo):
1114 def _computeunstableset(repo):
1106 """the set of non obsolete revisions with obsolete parents"""
1115 """the set of non obsolete revisions with obsolete parents"""
1107 revs = [(ctx.rev(), ctx) for ctx in
1116 revs = [(ctx.rev(), ctx) for ctx in
1108 repo.set('(not public()) and (not obsolete())')]
1117 repo.set('(not public()) and (not obsolete())')]
1109 revs.sort(key=lambda x:x[0])
1118 revs.sort(key=lambda x:x[0])
1110 unstable = set()
1119 unstable = set()
1111 for rev, ctx in revs:
1120 for rev, ctx in revs:
1112 # A rev is unstable if one of its parent is obsolete or unstable
1121 # A rev is unstable if one of its parent is obsolete or unstable
1113 # this works since we traverse following growing rev order
1122 # this works since we traverse following growing rev order
1114 if any((x.obsolete() or (x.rev() in unstable))
1123 if any((x.obsolete() or (x.rev() in unstable))
1115 for x in ctx.parents()):
1124 for x in ctx.parents()):
1116 unstable.add(rev)
1125 unstable.add(rev)
1117 return unstable
1126 return unstable
1118
1127
1119 @cachefor('suspended')
1128 @cachefor('suspended')
1120 def _computesuspendedset(repo):
1129 def _computesuspendedset(repo):
1121 """the set of obsolete parents with non obsolete descendants"""
1130 """the set of obsolete parents with non obsolete descendants"""
1122 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1131 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1123 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1132 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1124
1133
1125 @cachefor('extinct')
1134 @cachefor('extinct')
1126 def _computeextinctset(repo):
1135 def _computeextinctset(repo):
1127 """the set of obsolete parents without non obsolete descendants"""
1136 """the set of obsolete parents without non obsolete descendants"""
1128 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1137 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1129
1138
1130
1139
1131 @cachefor('bumped')
1140 @cachefor('bumped')
1132 def _computebumpedset(repo):
1141 def _computebumpedset(repo):
1133 """the set of revs trying to obsolete public revisions"""
1142 """the set of revs trying to obsolete public revisions"""
1134 bumped = set()
1143 bumped = set()
1135 # util function (avoid attribute lookup in the loop)
1144 # util function (avoid attribute lookup in the loop)
1136 phase = repo._phasecache.phase # would be faster to grab the full list
1145 phase = repo._phasecache.phase # would be faster to grab the full list
1137 public = phases.public
1146 public = phases.public
1138 cl = repo.changelog
1147 cl = repo.changelog
1139 torev = cl.nodemap.get
1148 torev = cl.nodemap.get
1140 for ctx in repo.set('(not public()) and (not obsolete())'):
1149 for ctx in repo.set('(not public()) and (not obsolete())'):
1141 rev = ctx.rev()
1150 rev = ctx.rev()
1142 # We only evaluate mutable, non-obsolete revision
1151 # We only evaluate mutable, non-obsolete revision
1143 node = ctx.node()
1152 node = ctx.node()
1144 # (future) A cache of precursors may worth if split is very common
1153 # (future) A cache of precursors may worth if split is very common
1145 for pnode in allprecursors(repo.obsstore, [node],
1154 for pnode in allprecursors(repo.obsstore, [node],
1146 ignoreflags=bumpedfix):
1155 ignoreflags=bumpedfix):
1147 prev = torev(pnode) # unfiltered! but so is phasecache
1156 prev = torev(pnode) # unfiltered! but so is phasecache
1148 if (prev is not None) and (phase(repo, prev) <= public):
1157 if (prev is not None) and (phase(repo, prev) <= public):
1149 # we have a public precursors
1158 # we have a public precursors
1150 bumped.add(rev)
1159 bumped.add(rev)
1151 break # Next draft!
1160 break # Next draft!
1152 return bumped
1161 return bumped
1153
1162
1154 @cachefor('divergent')
1163 @cachefor('divergent')
1155 def _computedivergentset(repo):
1164 def _computedivergentset(repo):
1156 """the set of rev that compete to be the final successors of some revision.
1165 """the set of rev that compete to be the final successors of some revision.
1157 """
1166 """
1158 divergent = set()
1167 divergent = set()
1159 obsstore = repo.obsstore
1168 obsstore = repo.obsstore
1160 newermap = {}
1169 newermap = {}
1161 for ctx in repo.set('(not public()) - obsolete()'):
1170 for ctx in repo.set('(not public()) - obsolete()'):
1162 mark = obsstore.precursors.get(ctx.node(), ())
1171 mark = obsstore.precursors.get(ctx.node(), ())
1163 toprocess = set(mark)
1172 toprocess = set(mark)
1164 seen = set()
1173 seen = set()
1165 while toprocess:
1174 while toprocess:
1166 prec = toprocess.pop()[0]
1175 prec = toprocess.pop()[0]
1167 if prec in seen:
1176 if prec in seen:
1168 continue # emergency cycle hanging prevention
1177 continue # emergency cycle hanging prevention
1169 seen.add(prec)
1178 seen.add(prec)
1170 if prec not in newermap:
1179 if prec not in newermap:
1171 successorssets(repo, prec, newermap)
1180 successorssets(repo, prec, newermap)
1172 newer = [n for n in newermap[prec] if n]
1181 newer = [n for n in newermap[prec] if n]
1173 if len(newer) > 1:
1182 if len(newer) > 1:
1174 divergent.add(ctx.rev())
1183 divergent.add(ctx.rev())
1175 break
1184 break
1176 toprocess.update(obsstore.precursors.get(prec, ()))
1185 toprocess.update(obsstore.precursors.get(prec, ()))
1177 return divergent
1186 return divergent
1178
1187
1179
1188
1180 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1189 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1181 """Add obsolete markers between changesets in a repo
1190 """Add obsolete markers between changesets in a repo
1182
1191
1183 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1192 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1184 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1193 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1185 containing metadata for this marker only. It is merged with the global
1194 containing metadata for this marker only. It is merged with the global
1186 metadata specified through the `metadata` argument of this function,
1195 metadata specified through the `metadata` argument of this function,
1187
1196
1188 Trying to obsolete a public changeset will raise an exception.
1197 Trying to obsolete a public changeset will raise an exception.
1189
1198
1190 Current user and date are used except if specified otherwise in the
1199 Current user and date are used except if specified otherwise in the
1191 metadata attribute.
1200 metadata attribute.
1192
1201
1193 This function operates within a transaction of its own, but does
1202 This function operates within a transaction of its own, but does
1194 not take any lock on the repo.
1203 not take any lock on the repo.
1195 """
1204 """
1196 # prepare metadata
1205 # prepare metadata
1197 if metadata is None:
1206 if metadata is None:
1198 metadata = {}
1207 metadata = {}
1199 if 'user' not in metadata:
1208 if 'user' not in metadata:
1200 metadata['user'] = repo.ui.username()
1209 metadata['user'] = repo.ui.username()
1201 tr = repo.transaction('add-obsolescence-marker')
1210 tr = repo.transaction('add-obsolescence-marker')
1202 try:
1211 try:
1203 for rel in relations:
1212 for rel in relations:
1204 prec = rel[0]
1213 prec = rel[0]
1205 sucs = rel[1]
1214 sucs = rel[1]
1206 localmetadata = metadata.copy()
1215 localmetadata = metadata.copy()
1207 if 2 < len(rel):
1216 if 2 < len(rel):
1208 localmetadata.update(rel[2])
1217 localmetadata.update(rel[2])
1209
1218
1210 if not prec.mutable():
1219 if not prec.mutable():
1211 raise util.Abort("cannot obsolete public changeset: %s"
1220 raise util.Abort("cannot obsolete public changeset: %s"
1212 % prec,
1221 % prec,
1213 hint='see "hg help phases" for details')
1222 hint='see "hg help phases" for details')
1214 nprec = prec.node()
1223 nprec = prec.node()
1215 nsucs = tuple(s.node() for s in sucs)
1224 nsucs = tuple(s.node() for s in sucs)
1216 npare = None
1225 npare = None
1217 if not nsucs:
1226 if not nsucs:
1218 npare = tuple(p.node() for p in prec.parents())
1227 npare = tuple(p.node() for p in prec.parents())
1219 if nprec in nsucs:
1228 if nprec in nsucs:
1220 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1229 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1221 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1230 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1222 date=date, metadata=localmetadata)
1231 date=date, metadata=localmetadata)
1223 repo.filteredrevcache.clear()
1232 repo.filteredrevcache.clear()
1224 tr.close()
1233 tr.close()
1225 finally:
1234 finally:
1226 tr.release()
1235 tr.release()
1227
1236
1228 def isenabled(repo, option):
1237 def isenabled(repo, option):
1229 """Returns True if the given repository has the given obsolete option
1238 """Returns True if the given repository has the given obsolete option
1230 enabled.
1239 enabled.
1231 """
1240 """
1232 result = set(repo.ui.configlist('experimental', 'evolution'))
1241 result = set(repo.ui.configlist('experimental', 'evolution'))
1233 if 'all' in result:
1242 if 'all' in result:
1234 return True
1243 return True
1235
1244
1236 # For migration purposes, temporarily return true if the config hasn't been
1245 # For migration purposes, temporarily return true if the config hasn't been
1237 # set but _enabled is true.
1246 # set but _enabled is true.
1238 if len(result) == 0 and _enabled:
1247 if len(result) == 0 and _enabled:
1239 return True
1248 return True
1240
1249
1241 # createmarkers must be enabled if other options are enabled
1250 # createmarkers must be enabled if other options are enabled
1242 if ((allowunstableopt in result or exchangeopt in result) and
1251 if ((allowunstableopt in result or exchangeopt in result) and
1243 not createmarkersopt in result):
1252 not createmarkersopt in result):
1244 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1253 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1245 "if other obsolete options are enabled"))
1254 "if other obsolete options are enabled"))
1246
1255
1247 return option in result
1256 return option in result
General Comments 0
You need to be logged in to leave comments. Login now