##// END OF EJS Templates
obsstore: make the invalid markers check wrap-able...
Pierre-Yves David -
r23973:18d43114 stable
parent child Browse files
Show More
@@ -1,1200 +1,1209 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 import struct
70 import struct
71 import util, base85, node
71 import util, base85, node
72 import phases
72 import phases
73 from i18n import _
73 from i18n import _
74
74
75 _pack = struct.pack
75 _pack = struct.pack
76 _unpack = struct.unpack
76 _unpack = struct.unpack
77 _calcsize = struct.calcsize
77 _calcsize = struct.calcsize
78
78
79 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
79 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
80
80
81 # the obsolete feature is not mature enough to be enabled by default.
81 # the obsolete feature is not mature enough to be enabled by default.
82 # you have to rely on third party extension extension to enable this.
82 # you have to rely on third party extension extension to enable this.
83 _enabled = False
83 _enabled = False
84
84
85 # Options for obsolescence
85 # Options for obsolescence
86 createmarkersopt = 'createmarkers'
86 createmarkersopt = 'createmarkers'
87 allowunstableopt = 'allowunstable'
87 allowunstableopt = 'allowunstable'
88 exchangeopt = 'exchange'
88 exchangeopt = 'exchange'
89
89
90 ### obsolescence marker flag
90 ### obsolescence marker flag
91
91
92 ## bumpedfix flag
92 ## bumpedfix flag
93 #
93 #
94 # When a changeset A' succeed to a changeset A which became public, we call A'
94 # When a changeset A' succeed to a changeset A which became public, we call A'
95 # "bumped" because it's a successors of a public changesets
95 # "bumped" because it's a successors of a public changesets
96 #
96 #
97 # o A' (bumped)
97 # o A' (bumped)
98 # |`:
98 # |`:
99 # | o A
99 # | o A
100 # |/
100 # |/
101 # o Z
101 # o Z
102 #
102 #
103 # The way to solve this situation is to create a new changeset Ad as children
103 # The way to solve this situation is to create a new changeset Ad as children
104 # of A. This changeset have the same content than A'. So the diff from A to A'
104 # of A. This changeset have the same content than A'. So the diff from A to A'
105 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
106 #
106 #
107 # o Ad
107 # o Ad
108 # |`:
108 # |`:
109 # | x A'
109 # | x A'
110 # |'|
110 # |'|
111 # o | A
111 # o | A
112 # |/
112 # |/
113 # o Z
113 # o Z
114 #
114 #
115 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
116 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
117 # This flag mean that the successors express the changes between the public and
117 # This flag mean that the successors express the changes between the public and
118 # bumped version and fix the situation, breaking the transitivity of
118 # bumped version and fix the situation, breaking the transitivity of
119 # "bumped" here.
119 # "bumped" here.
120 bumpedfix = 1
120 bumpedfix = 1
121 usingsha256 = 2
121 usingsha256 = 2
122
122
123 ## Parsing and writing of version "0"
123 ## Parsing and writing of version "0"
124 #
124 #
125 # The header is followed by the markers. Each marker is made of:
125 # The header is followed by the markers. Each marker is made of:
126 #
126 #
127 # - 1 uint8 : number of new changesets "N", can be zero.
127 # - 1 uint8 : number of new changesets "N", can be zero.
128 #
128 #
129 # - 1 uint32: metadata size "M" in bytes.
129 # - 1 uint32: metadata size "M" in bytes.
130 #
130 #
131 # - 1 byte: a bit field. It is reserved for flags used in common
131 # - 1 byte: a bit field. It is reserved for flags used in common
132 # obsolete marker operations, to avoid repeated decoding of metadata
132 # obsolete marker operations, to avoid repeated decoding of metadata
133 # entries.
133 # entries.
134 #
134 #
135 # - 20 bytes: obsoleted changeset identifier.
135 # - 20 bytes: obsoleted changeset identifier.
136 #
136 #
137 # - N*20 bytes: new changesets identifiers.
137 # - N*20 bytes: new changesets identifiers.
138 #
138 #
139 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 # - M bytes: metadata as a sequence of nul-terminated strings. Each
140 # string contains a key and a value, separated by a colon ':', without
140 # string contains a key and a value, separated by a colon ':', without
141 # additional encoding. Keys cannot contain '\0' or ':' and values
141 # additional encoding. Keys cannot contain '\0' or ':' and values
142 # cannot contain '\0'.
142 # cannot contain '\0'.
143 _fm0version = 0
143 _fm0version = 0
144 _fm0fixed = '>BIB20s'
144 _fm0fixed = '>BIB20s'
145 _fm0node = '20s'
145 _fm0node = '20s'
146 _fm0fsize = _calcsize(_fm0fixed)
146 _fm0fsize = _calcsize(_fm0fixed)
147 _fm0fnodesize = _calcsize(_fm0node)
147 _fm0fnodesize = _calcsize(_fm0node)
148
148
149 def _fm0readmarkers(data, off=0):
149 def _fm0readmarkers(data, off=0):
150 # Loop on markers
150 # Loop on markers
151 l = len(data)
151 l = len(data)
152 while off + _fm0fsize <= l:
152 while off + _fm0fsize <= l:
153 # read fixed part
153 # read fixed part
154 cur = data[off:off + _fm0fsize]
154 cur = data[off:off + _fm0fsize]
155 off += _fm0fsize
155 off += _fm0fsize
156 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
157 # read replacement
157 # read replacement
158 sucs = ()
158 sucs = ()
159 if numsuc:
159 if numsuc:
160 s = (_fm0fnodesize * numsuc)
160 s = (_fm0fnodesize * numsuc)
161 cur = data[off:off + s]
161 cur = data[off:off + s]
162 sucs = _unpack(_fm0node * numsuc, cur)
162 sucs = _unpack(_fm0node * numsuc, cur)
163 off += s
163 off += s
164 # read metadata
164 # read metadata
165 # (metadata will be decoded on demand)
165 # (metadata will be decoded on demand)
166 metadata = data[off:off + mdsize]
166 metadata = data[off:off + mdsize]
167 if len(metadata) != mdsize:
167 if len(metadata) != mdsize:
168 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 raise util.Abort(_('parsing obsolete marker: metadata is too '
169 'short, %d bytes expected, got %d')
169 'short, %d bytes expected, got %d')
170 % (mdsize, len(metadata)))
170 % (mdsize, len(metadata)))
171 off += mdsize
171 off += mdsize
172 metadata = _fm0decodemeta(metadata)
172 metadata = _fm0decodemeta(metadata)
173 try:
173 try:
174 when, offset = metadata.pop('date', '0 0').split(' ')
174 when, offset = metadata.pop('date', '0 0').split(' ')
175 date = float(when), int(offset)
175 date = float(when), int(offset)
176 except ValueError:
176 except ValueError:
177 date = (0., 0)
177 date = (0., 0)
178 parents = None
178 parents = None
179 if 'p2' in metadata:
179 if 'p2' in metadata:
180 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
181 elif 'p1' in metadata:
181 elif 'p1' in metadata:
182 parents = (metadata.pop('p1', None),)
182 parents = (metadata.pop('p1', None),)
183 elif 'p0' in metadata:
183 elif 'p0' in metadata:
184 parents = ()
184 parents = ()
185 if parents is not None:
185 if parents is not None:
186 try:
186 try:
187 parents = tuple(node.bin(p) for p in parents)
187 parents = tuple(node.bin(p) for p in parents)
188 # if parent content is not a nodeid, drop the data
188 # if parent content is not a nodeid, drop the data
189 for p in parents:
189 for p in parents:
190 if len(p) != 20:
190 if len(p) != 20:
191 parents = None
191 parents = None
192 break
192 break
193 except TypeError:
193 except TypeError:
194 # if content cannot be translated to nodeid drop the data.
194 # if content cannot be translated to nodeid drop the data.
195 parents = None
195 parents = None
196
196
197 metadata = tuple(sorted(metadata.iteritems()))
197 metadata = tuple(sorted(metadata.iteritems()))
198
198
199 yield (pre, sucs, flags, metadata, date, parents)
199 yield (pre, sucs, flags, metadata, date, parents)
200
200
201 def _fm0encodeonemarker(marker):
201 def _fm0encodeonemarker(marker):
202 pre, sucs, flags, metadata, date, parents = marker
202 pre, sucs, flags, metadata, date, parents = marker
203 if flags & usingsha256:
203 if flags & usingsha256:
204 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
204 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
205 metadata = dict(metadata)
205 metadata = dict(metadata)
206 time, tz = date
206 time, tz = date
207 metadata['date'] = '%r %i' % (time, tz)
207 metadata['date'] = '%r %i' % (time, tz)
208 if parents is not None:
208 if parents is not None:
209 if not parents:
209 if not parents:
210 # mark that we explicitly recorded no parents
210 # mark that we explicitly recorded no parents
211 metadata['p0'] = ''
211 metadata['p0'] = ''
212 for i, p in enumerate(parents):
212 for i, p in enumerate(parents):
213 metadata['p%i' % (i + 1)] = node.hex(p)
213 metadata['p%i' % (i + 1)] = node.hex(p)
214 metadata = _fm0encodemeta(metadata)
214 metadata = _fm0encodemeta(metadata)
215 numsuc = len(sucs)
215 numsuc = len(sucs)
216 format = _fm0fixed + (_fm0node * numsuc)
216 format = _fm0fixed + (_fm0node * numsuc)
217 data = [numsuc, len(metadata), flags, pre]
217 data = [numsuc, len(metadata), flags, pre]
218 data.extend(sucs)
218 data.extend(sucs)
219 return _pack(format, *data) + metadata
219 return _pack(format, *data) + metadata
220
220
221 def _fm0encodemeta(meta):
221 def _fm0encodemeta(meta):
222 """Return encoded metadata string to string mapping.
222 """Return encoded metadata string to string mapping.
223
223
224 Assume no ':' in key and no '\0' in both key and value."""
224 Assume no ':' in key and no '\0' in both key and value."""
225 for key, value in meta.iteritems():
225 for key, value in meta.iteritems():
226 if ':' in key or '\0' in key:
226 if ':' in key or '\0' in key:
227 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 raise ValueError("':' and '\0' are forbidden in metadata key'")
228 if '\0' in value:
228 if '\0' in value:
229 raise ValueError("':' is forbidden in metadata value'")
229 raise ValueError("':' is forbidden in metadata value'")
230 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
231
231
232 def _fm0decodemeta(data):
232 def _fm0decodemeta(data):
233 """Return string to string dictionary from encoded version."""
233 """Return string to string dictionary from encoded version."""
234 d = {}
234 d = {}
235 for l in data.split('\0'):
235 for l in data.split('\0'):
236 if l:
236 if l:
237 key, value = l.split(':')
237 key, value = l.split(':')
238 d[key] = value
238 d[key] = value
239 return d
239 return d
240
240
241 ## Parsing and writing of version "1"
241 ## Parsing and writing of version "1"
242 #
242 #
243 # The header is followed by the markers. Each marker is made of:
243 # The header is followed by the markers. Each marker is made of:
244 #
244 #
245 # - uint32: total size of the marker (including this field)
245 # - uint32: total size of the marker (including this field)
246 #
246 #
247 # - float64: date in seconds since epoch
247 # - float64: date in seconds since epoch
248 #
248 #
249 # - int16: timezone offset in minutes
249 # - int16: timezone offset in minutes
250 #
250 #
251 # - uint16: a bit field. It is reserved for flags used in common
251 # - uint16: a bit field. It is reserved for flags used in common
252 # obsolete marker operations, to avoid repeated decoding of metadata
252 # obsolete marker operations, to avoid repeated decoding of metadata
253 # entries.
253 # entries.
254 #
254 #
255 # - uint8: number of successors "N", can be zero.
255 # - uint8: number of successors "N", can be zero.
256 #
256 #
257 # - uint8: number of parents "P", can be zero.
257 # - uint8: number of parents "P", can be zero.
258 #
258 #
259 # 0: parents data stored but no parent,
259 # 0: parents data stored but no parent,
260 # 1: one parent stored,
260 # 1: one parent stored,
261 # 2: two parents stored,
261 # 2: two parents stored,
262 # 3: no parent data stored
262 # 3: no parent data stored
263 #
263 #
264 # - uint8: number of metadata entries M
264 # - uint8: number of metadata entries M
265 #
265 #
266 # - 20 or 32 bytes: precursor changeset identifier.
266 # - 20 or 32 bytes: precursor changeset identifier.
267 #
267 #
268 # - N*(20 or 32) bytes: successors changesets identifiers.
268 # - N*(20 or 32) bytes: successors changesets identifiers.
269 #
269 #
270 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 # - P*(20 or 32) bytes: parents of the precursors changesets.
271 #
271 #
272 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 # - M*(uint8, uint8): size of all metadata entries (key and value)
273 #
273 #
274 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 # - remaining bytes: the metadata, each (key, value) pair after the other.
275 _fm1version = 1
275 _fm1version = 1
276 _fm1fixed = '>IdhHBBB20s'
276 _fm1fixed = '>IdhHBBB20s'
277 _fm1nodesha1 = '20s'
277 _fm1nodesha1 = '20s'
278 _fm1nodesha256 = '32s'
278 _fm1nodesha256 = '32s'
279 _fm1nodesha1size = _calcsize(_fm1nodesha1)
279 _fm1nodesha1size = _calcsize(_fm1nodesha1)
280 _fm1nodesha256size = _calcsize(_fm1nodesha256)
280 _fm1nodesha256size = _calcsize(_fm1nodesha256)
281 _fm1fsize = _calcsize(_fm1fixed)
281 _fm1fsize = _calcsize(_fm1fixed)
282 _fm1parentnone = 3
282 _fm1parentnone = 3
283 _fm1parentshift = 14
283 _fm1parentshift = 14
284 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
284 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
285 _fm1metapair = 'BB'
285 _fm1metapair = 'BB'
286 _fm1metapairsize = _calcsize('BB')
286 _fm1metapairsize = _calcsize('BB')
287
287
288 def _fm1readmarkers(data, off=0):
288 def _fm1readmarkers(data, off=0):
289 # make some global constants local for performance
289 # make some global constants local for performance
290 noneflag = _fm1parentnone
290 noneflag = _fm1parentnone
291 sha2flag = usingsha256
291 sha2flag = usingsha256
292 sha1size = _fm1nodesha1size
292 sha1size = _fm1nodesha1size
293 sha2size = _fm1nodesha256size
293 sha2size = _fm1nodesha256size
294 sha1fmt = _fm1nodesha1
294 sha1fmt = _fm1nodesha1
295 sha2fmt = _fm1nodesha256
295 sha2fmt = _fm1nodesha256
296 metasize = _fm1metapairsize
296 metasize = _fm1metapairsize
297 metafmt = _fm1metapair
297 metafmt = _fm1metapair
298 fsize = _fm1fsize
298 fsize = _fm1fsize
299 unpack = _unpack
299 unpack = _unpack
300
300
301 # Loop on markers
301 # Loop on markers
302 stop = len(data) - _fm1fsize
302 stop = len(data) - _fm1fsize
303 ufixed = util.unpacker(_fm1fixed)
303 ufixed = util.unpacker(_fm1fixed)
304 while off <= stop:
304 while off <= stop:
305 # read fixed part
305 # read fixed part
306 o1 = off + fsize
306 o1 = off + fsize
307 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
307 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
308
308
309 if flags & sha2flag:
309 if flags & sha2flag:
310 # FIXME: prec was read as a SHA1, needs to be amended
310 # FIXME: prec was read as a SHA1, needs to be amended
311
311
312 # read 0 or more successors
312 # read 0 or more successors
313 if numsuc == 1:
313 if numsuc == 1:
314 o2 = o1 + sha2size
314 o2 = o1 + sha2size
315 sucs = (data[o1:o2],)
315 sucs = (data[o1:o2],)
316 else:
316 else:
317 o2 = o1 + sha2size * numsuc
317 o2 = o1 + sha2size * numsuc
318 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
318 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
319
319
320 # read parents
320 # read parents
321 if numpar == noneflag:
321 if numpar == noneflag:
322 o3 = o2
322 o3 = o2
323 parents = None
323 parents = None
324 elif numpar == 1:
324 elif numpar == 1:
325 o3 = o2 + sha2size
325 o3 = o2 + sha2size
326 parents = (data[o2:o3],)
326 parents = (data[o2:o3],)
327 else:
327 else:
328 o3 = o2 + sha2size * numpar
328 o3 = o2 + sha2size * numpar
329 parents = unpack(sha2fmt * numpar, data[o2:o3])
329 parents = unpack(sha2fmt * numpar, data[o2:o3])
330 else:
330 else:
331 # read 0 or more successors
331 # read 0 or more successors
332 if numsuc == 1:
332 if numsuc == 1:
333 o2 = o1 + sha1size
333 o2 = o1 + sha1size
334 sucs = (data[o1:o2],)
334 sucs = (data[o1:o2],)
335 else:
335 else:
336 o2 = o1 + sha1size * numsuc
336 o2 = o1 + sha1size * numsuc
337 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
337 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
338
338
339 # read parents
339 # read parents
340 if numpar == noneflag:
340 if numpar == noneflag:
341 o3 = o2
341 o3 = o2
342 parents = None
342 parents = None
343 elif numpar == 1:
343 elif numpar == 1:
344 o3 = o2 + sha1size
344 o3 = o2 + sha1size
345 parents = (data[o2:o3],)
345 parents = (data[o2:o3],)
346 else:
346 else:
347 o3 = o2 + sha1size * numpar
347 o3 = o2 + sha1size * numpar
348 parents = unpack(sha1fmt * numpar, data[o2:o3])
348 parents = unpack(sha1fmt * numpar, data[o2:o3])
349
349
350 # read metadata
350 # read metadata
351 off = o3 + metasize * nummeta
351 off = o3 + metasize * nummeta
352 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
352 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
353 metadata = []
353 metadata = []
354 for idx in xrange(0, len(metapairsize), 2):
354 for idx in xrange(0, len(metapairsize), 2):
355 o1 = off + metapairsize[idx]
355 o1 = off + metapairsize[idx]
356 o2 = o1 + metapairsize[idx + 1]
356 o2 = o1 + metapairsize[idx + 1]
357 metadata.append((data[off:o1], data[o1:o2]))
357 metadata.append((data[off:o1], data[o1:o2]))
358 off = o2
358 off = o2
359
359
360 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
360 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
361
361
362 def _fm1encodeonemarker(marker):
362 def _fm1encodeonemarker(marker):
363 pre, sucs, flags, metadata, date, parents = marker
363 pre, sucs, flags, metadata, date, parents = marker
364 # determine node size
364 # determine node size
365 _fm1node = _fm1nodesha1
365 _fm1node = _fm1nodesha1
366 if flags & usingsha256:
366 if flags & usingsha256:
367 _fm1node = _fm1nodesha256
367 _fm1node = _fm1nodesha256
368 numsuc = len(sucs)
368 numsuc = len(sucs)
369 numextranodes = numsuc
369 numextranodes = numsuc
370 if parents is None:
370 if parents is None:
371 numpar = _fm1parentnone
371 numpar = _fm1parentnone
372 else:
372 else:
373 numpar = len(parents)
373 numpar = len(parents)
374 numextranodes += numpar
374 numextranodes += numpar
375 formatnodes = _fm1node * numextranodes
375 formatnodes = _fm1node * numextranodes
376 formatmeta = _fm1metapair * len(metadata)
376 formatmeta = _fm1metapair * len(metadata)
377 format = _fm1fixed + formatnodes + formatmeta
377 format = _fm1fixed + formatnodes + formatmeta
378 # tz is stored in minutes so we divide by 60
378 # tz is stored in minutes so we divide by 60
379 tz = date[1]//60
379 tz = date[1]//60
380 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
380 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
381 data.extend(sucs)
381 data.extend(sucs)
382 if parents is not None:
382 if parents is not None:
383 data.extend(parents)
383 data.extend(parents)
384 totalsize = _calcsize(format)
384 totalsize = _calcsize(format)
385 for key, value in metadata:
385 for key, value in metadata:
386 lk = len(key)
386 lk = len(key)
387 lv = len(value)
387 lv = len(value)
388 data.append(lk)
388 data.append(lk)
389 data.append(lv)
389 data.append(lv)
390 totalsize += lk + lv
390 totalsize += lk + lv
391 data[0] = totalsize
391 data[0] = totalsize
392 data = [_pack(format, *data)]
392 data = [_pack(format, *data)]
393 for key, value in metadata:
393 for key, value in metadata:
394 data.append(key)
394 data.append(key)
395 data.append(value)
395 data.append(value)
396 return ''.join(data)
396 return ''.join(data)
397
397
398 # mapping to read/write various marker formats
398 # mapping to read/write various marker formats
399 # <version> -> (decoder, encoder)
399 # <version> -> (decoder, encoder)
400 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
400 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
401 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
401 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
402
402
403 @util.nogc
403 @util.nogc
404 def _readmarkers(data):
404 def _readmarkers(data):
405 """Read and enumerate markers from raw data"""
405 """Read and enumerate markers from raw data"""
406 off = 0
406 off = 0
407 diskversion = _unpack('>B', data[off:off + 1])[0]
407 diskversion = _unpack('>B', data[off:off + 1])[0]
408 off += 1
408 off += 1
409 if diskversion not in formats:
409 if diskversion not in formats:
410 raise util.Abort(_('parsing obsolete marker: unknown version %r')
410 raise util.Abort(_('parsing obsolete marker: unknown version %r')
411 % diskversion)
411 % diskversion)
412 return diskversion, formats[diskversion][0](data, off)
412 return diskversion, formats[diskversion][0](data, off)
413
413
414 def encodemarkers(markers, addheader=False, version=_fm0version):
414 def encodemarkers(markers, addheader=False, version=_fm0version):
415 # Kept separate from flushmarkers(), it will be reused for
415 # Kept separate from flushmarkers(), it will be reused for
416 # markers exchange.
416 # markers exchange.
417 encodeone = formats[version][1]
417 encodeone = formats[version][1]
418 if addheader:
418 if addheader:
419 yield _pack('>B', version)
419 yield _pack('>B', version)
420 for marker in markers:
420 for marker in markers:
421 yield encodeone(marker)
421 yield encodeone(marker)
422
422
423
423
424 class marker(object):
424 class marker(object):
425 """Wrap obsolete marker raw data"""
425 """Wrap obsolete marker raw data"""
426
426
427 def __init__(self, repo, data):
427 def __init__(self, repo, data):
428 # the repo argument will be used to create changectx in later version
428 # the repo argument will be used to create changectx in later version
429 self._repo = repo
429 self._repo = repo
430 self._data = data
430 self._data = data
431 self._decodedmeta = None
431 self._decodedmeta = None
432
432
433 def __hash__(self):
433 def __hash__(self):
434 return hash(self._data)
434 return hash(self._data)
435
435
436 def __eq__(self, other):
436 def __eq__(self, other):
437 if type(other) != type(self):
437 if type(other) != type(self):
438 return False
438 return False
439 return self._data == other._data
439 return self._data == other._data
440
440
441 def precnode(self):
441 def precnode(self):
442 """Precursor changeset node identifier"""
442 """Precursor changeset node identifier"""
443 return self._data[0]
443 return self._data[0]
444
444
445 def succnodes(self):
445 def succnodes(self):
446 """List of successor changesets node identifiers"""
446 """List of successor changesets node identifiers"""
447 return self._data[1]
447 return self._data[1]
448
448
449 def parentnodes(self):
449 def parentnodes(self):
450 """Parents of the precursors (None if not recorded)"""
450 """Parents of the precursors (None if not recorded)"""
451 return self._data[5]
451 return self._data[5]
452
452
453 def metadata(self):
453 def metadata(self):
454 """Decoded metadata dictionary"""
454 """Decoded metadata dictionary"""
455 return dict(self._data[3])
455 return dict(self._data[3])
456
456
457 def date(self):
457 def date(self):
458 """Creation date as (unixtime, offset)"""
458 """Creation date as (unixtime, offset)"""
459 return self._data[4]
459 return self._data[4]
460
460
461 def flags(self):
461 def flags(self):
462 """The flags field of the marker"""
462 """The flags field of the marker"""
463 return self._data[2]
463 return self._data[2]
464
464
465 def _checkinvalidmarkers(obsstore):
466 """search for marker with invalid data and raise error if needed
467
468 Exist as a separated function to allow the evolve extension for a more
469 subtle handling.
470 """
471 if node.nullid in obsstore.precursors:
472 raise util.Abort(_('bad obsolescence marker detected: '
473 'invalid successors nullid'))
474
465 class obsstore(object):
475 class obsstore(object):
466 """Store obsolete markers
476 """Store obsolete markers
467
477
468 Markers can be accessed with two mappings:
478 Markers can be accessed with two mappings:
469 - precursors[x] -> set(markers on precursors edges of x)
479 - precursors[x] -> set(markers on precursors edges of x)
470 - successors[x] -> set(markers on successors edges of x)
480 - successors[x] -> set(markers on successors edges of x)
471 - children[x] -> set(markers on precursors edges of children(x)
481 - children[x] -> set(markers on precursors edges of children(x)
472 """
482 """
473
483
474 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
484 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
475 # prec: nodeid, precursor changesets
485 # prec: nodeid, precursor changesets
476 # succs: tuple of nodeid, successor changesets (0-N length)
486 # succs: tuple of nodeid, successor changesets (0-N length)
477 # flag: integer, flag field carrying modifier for the markers (see doc)
487 # flag: integer, flag field carrying modifier for the markers (see doc)
478 # meta: binary blob, encoded metadata dictionary
488 # meta: binary blob, encoded metadata dictionary
479 # date: (float, int) tuple, date of marker creation
489 # date: (float, int) tuple, date of marker creation
480 # parents: (tuple of nodeid) or None, parents of precursors
490 # parents: (tuple of nodeid) or None, parents of precursors
481 # None is used when no data has been recorded
491 # None is used when no data has been recorded
482
492
483 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
493 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
484 # caches for various obsolescence related cache
494 # caches for various obsolescence related cache
485 self.caches = {}
495 self.caches = {}
486 self._all = []
496 self._all = []
487 self.precursors = {}
497 self.precursors = {}
488 self.successors = {}
498 self.successors = {}
489 self.children = {}
499 self.children = {}
490 self.sopener = sopener
500 self.sopener = sopener
491 data = sopener.tryread('obsstore')
501 data = sopener.tryread('obsstore')
492 self._version = defaultformat
502 self._version = defaultformat
493 self._readonly = readonly
503 self._readonly = readonly
494 if data:
504 if data:
495 self._version, markers = _readmarkers(data)
505 self._version, markers = _readmarkers(data)
496 self._load(markers)
506 self._load(markers)
497
507
498 def __iter__(self):
508 def __iter__(self):
499 return iter(self._all)
509 return iter(self._all)
500
510
501 def __len__(self):
511 def __len__(self):
502 return len(self._all)
512 return len(self._all)
503
513
504 def __nonzero__(self):
514 def __nonzero__(self):
505 return bool(self._all)
515 return bool(self._all)
506
516
507 def create(self, transaction, prec, succs=(), flag=0, parents=None,
517 def create(self, transaction, prec, succs=(), flag=0, parents=None,
508 date=None, metadata=None):
518 date=None, metadata=None):
509 """obsolete: add a new obsolete marker
519 """obsolete: add a new obsolete marker
510
520
511 * ensuring it is hashable
521 * ensuring it is hashable
512 * check mandatory metadata
522 * check mandatory metadata
513 * encode metadata
523 * encode metadata
514
524
515 If you are a human writing code creating marker you want to use the
525 If you are a human writing code creating marker you want to use the
516 `createmarkers` function in this module instead.
526 `createmarkers` function in this module instead.
517
527
518 return True if a new marker have been added, False if the markers
528 return True if a new marker have been added, False if the markers
519 already existed (no op).
529 already existed (no op).
520 """
530 """
521 if metadata is None:
531 if metadata is None:
522 metadata = {}
532 metadata = {}
523 if date is None:
533 if date is None:
524 if 'date' in metadata:
534 if 'date' in metadata:
525 # as a courtesy for out-of-tree extensions
535 # as a courtesy for out-of-tree extensions
526 date = util.parsedate(metadata.pop('date'))
536 date = util.parsedate(metadata.pop('date'))
527 else:
537 else:
528 date = util.makedate()
538 date = util.makedate()
529 if len(prec) != 20:
539 if len(prec) != 20:
530 raise ValueError(prec)
540 raise ValueError(prec)
531 for succ in succs:
541 for succ in succs:
532 if len(succ) != 20:
542 if len(succ) != 20:
533 raise ValueError(succ)
543 raise ValueError(succ)
534 if prec in succs:
544 if prec in succs:
535 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
545 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
536
546
537 metadata = tuple(sorted(metadata.iteritems()))
547 metadata = tuple(sorted(metadata.iteritems()))
538
548
539 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
549 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
540 return bool(self.add(transaction, [marker]))
550 return bool(self.add(transaction, [marker]))
541
551
542 def add(self, transaction, markers):
552 def add(self, transaction, markers):
543 """Add new markers to the store
553 """Add new markers to the store
544
554
545 Take care of filtering duplicate.
555 Take care of filtering duplicate.
546 Return the number of new marker."""
556 Return the number of new marker."""
547 if self._readonly:
557 if self._readonly:
548 raise util.Abort('creating obsolete markers is not enabled on this '
558 raise util.Abort('creating obsolete markers is not enabled on this '
549 'repo')
559 'repo')
550 known = set(self._all)
560 known = set(self._all)
551 new = []
561 new = []
552 for m in markers:
562 for m in markers:
553 if m not in known:
563 if m not in known:
554 known.add(m)
564 known.add(m)
555 new.append(m)
565 new.append(m)
556 if new:
566 if new:
557 f = self.sopener('obsstore', 'ab')
567 f = self.sopener('obsstore', 'ab')
558 try:
568 try:
559 # Whether the file's current position is at the begin or at
569 # Whether the file's current position is at the begin or at
560 # the end after opening a file for appending is implementation
570 # the end after opening a file for appending is implementation
561 # defined. So we must seek to the end before calling tell(),
571 # defined. So we must seek to the end before calling tell(),
562 # or we may get a zero offset for non-zero sized files on
572 # or we may get a zero offset for non-zero sized files on
563 # some platforms (issue3543).
573 # some platforms (issue3543).
564 f.seek(0, _SEEK_END)
574 f.seek(0, _SEEK_END)
565 offset = f.tell()
575 offset = f.tell()
566 transaction.add('obsstore', offset)
576 transaction.add('obsstore', offset)
567 # offset == 0: new file - add the version header
577 # offset == 0: new file - add the version header
568 for bytes in encodemarkers(new, offset == 0, self._version):
578 for bytes in encodemarkers(new, offset == 0, self._version):
569 f.write(bytes)
579 f.write(bytes)
570 finally:
580 finally:
571 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
581 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
572 # call 'filecacheentry.refresh()' here
582 # call 'filecacheentry.refresh()' here
573 f.close()
583 f.close()
574 self._load(new)
584 self._load(new)
575 # new marker *may* have changed several set. invalidate the cache.
585 # new marker *may* have changed several set. invalidate the cache.
576 self.caches.clear()
586 self.caches.clear()
577 # records the number of new markers for the transaction hooks
587 # records the number of new markers for the transaction hooks
578 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
588 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
579 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
589 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
580 return len(new)
590 return len(new)
581
591
582 def mergemarkers(self, transaction, data):
592 def mergemarkers(self, transaction, data):
583 """merge a binary stream of markers inside the obsstore
593 """merge a binary stream of markers inside the obsstore
584
594
585 Returns the number of new markers added."""
595 Returns the number of new markers added."""
586 version, markers = _readmarkers(data)
596 version, markers = _readmarkers(data)
587 return self.add(transaction, markers)
597 return self.add(transaction, markers)
588
598
589 @util.nogc
599 @util.nogc
590 def _load(self, markers):
600 def _load(self, markers):
591 for mark in markers:
601 for mark in markers:
592 self._all.append(mark)
602 self._all.append(mark)
593 pre, sucs = mark[:2]
603 pre, sucs = mark[:2]
594 self.successors.setdefault(pre, set()).add(mark)
604 self.successors.setdefault(pre, set()).add(mark)
595 for suc in sucs:
605 for suc in sucs:
596 self.precursors.setdefault(suc, set()).add(mark)
606 self.precursors.setdefault(suc, set()).add(mark)
597 parents = mark[5]
607 parents = mark[5]
598 if parents is not None:
608 if parents is not None:
599 for p in parents:
609 for p in parents:
600 self.children.setdefault(p, set()).add(mark)
610 self.children.setdefault(p, set()).add(mark)
601 if node.nullid in self.precursors:
611 _checkinvalidmarkers(self)
602 raise util.Abort(_('bad obsolescence marker detected: '
612
603 'invalid successors nullid'))
604 def relevantmarkers(self, nodes):
613 def relevantmarkers(self, nodes):
605 """return a set of all obsolescence markers relevant to a set of nodes.
614 """return a set of all obsolescence markers relevant to a set of nodes.
606
615
607 "relevant" to a set of nodes mean:
616 "relevant" to a set of nodes mean:
608
617
609 - marker that use this changeset as successor
618 - marker that use this changeset as successor
610 - prune marker of direct children on this changeset
619 - prune marker of direct children on this changeset
611 - recursive application of the two rules on precursors of these markers
620 - recursive application of the two rules on precursors of these markers
612
621
613 It is a set so you cannot rely on order."""
622 It is a set so you cannot rely on order."""
614
623
615 pendingnodes = set(nodes)
624 pendingnodes = set(nodes)
616 seenmarkers = set()
625 seenmarkers = set()
617 seennodes = set(pendingnodes)
626 seennodes = set(pendingnodes)
618 precursorsmarkers = self.precursors
627 precursorsmarkers = self.precursors
619 children = self.children
628 children = self.children
620 while pendingnodes:
629 while pendingnodes:
621 direct = set()
630 direct = set()
622 for current in pendingnodes:
631 for current in pendingnodes:
623 direct.update(precursorsmarkers.get(current, ()))
632 direct.update(precursorsmarkers.get(current, ()))
624 pruned = [m for m in children.get(current, ()) if not m[1]]
633 pruned = [m for m in children.get(current, ()) if not m[1]]
625 direct.update(pruned)
634 direct.update(pruned)
626 direct -= seenmarkers
635 direct -= seenmarkers
627 pendingnodes = set([m[0] for m in direct])
636 pendingnodes = set([m[0] for m in direct])
628 seenmarkers |= direct
637 seenmarkers |= direct
629 pendingnodes -= seennodes
638 pendingnodes -= seennodes
630 seennodes |= pendingnodes
639 seennodes |= pendingnodes
631 return seenmarkers
640 return seenmarkers
632
641
633 def commonversion(versions):
642 def commonversion(versions):
634 """Return the newest version listed in both versions and our local formats.
643 """Return the newest version listed in both versions and our local formats.
635
644
636 Returns None if no common version exists.
645 Returns None if no common version exists.
637 """
646 """
638 versions.sort(reverse=True)
647 versions.sort(reverse=True)
639 # search for highest version known on both side
648 # search for highest version known on both side
640 for v in versions:
649 for v in versions:
641 if v in formats:
650 if v in formats:
642 return v
651 return v
643 return None
652 return None
644
653
645 # arbitrary picked to fit into 8K limit from HTTP server
654 # arbitrary picked to fit into 8K limit from HTTP server
646 # you have to take in account:
655 # you have to take in account:
647 # - the version header
656 # - the version header
648 # - the base85 encoding
657 # - the base85 encoding
649 _maxpayload = 5300
658 _maxpayload = 5300
650
659
651 def _pushkeyescape(markers):
660 def _pushkeyescape(markers):
652 """encode markers into a dict suitable for pushkey exchange
661 """encode markers into a dict suitable for pushkey exchange
653
662
654 - binary data is base85 encoded
663 - binary data is base85 encoded
655 - split in chunks smaller than 5300 bytes"""
664 - split in chunks smaller than 5300 bytes"""
656 keys = {}
665 keys = {}
657 parts = []
666 parts = []
658 currentlen = _maxpayload * 2 # ensure we create a new part
667 currentlen = _maxpayload * 2 # ensure we create a new part
659 for marker in markers:
668 for marker in markers:
660 nextdata = _fm0encodeonemarker(marker)
669 nextdata = _fm0encodeonemarker(marker)
661 if (len(nextdata) + currentlen > _maxpayload):
670 if (len(nextdata) + currentlen > _maxpayload):
662 currentpart = []
671 currentpart = []
663 currentlen = 0
672 currentlen = 0
664 parts.append(currentpart)
673 parts.append(currentpart)
665 currentpart.append(nextdata)
674 currentpart.append(nextdata)
666 currentlen += len(nextdata)
675 currentlen += len(nextdata)
667 for idx, part in enumerate(reversed(parts)):
676 for idx, part in enumerate(reversed(parts)):
668 data = ''.join([_pack('>B', _fm0version)] + part)
677 data = ''.join([_pack('>B', _fm0version)] + part)
669 keys['dump%i' % idx] = base85.b85encode(data)
678 keys['dump%i' % idx] = base85.b85encode(data)
670 return keys
679 return keys
671
680
672 def listmarkers(repo):
681 def listmarkers(repo):
673 """List markers over pushkey"""
682 """List markers over pushkey"""
674 if not repo.obsstore:
683 if not repo.obsstore:
675 return {}
684 return {}
676 return _pushkeyescape(repo.obsstore)
685 return _pushkeyescape(repo.obsstore)
677
686
678 def pushmarker(repo, key, old, new):
687 def pushmarker(repo, key, old, new):
679 """Push markers over pushkey"""
688 """Push markers over pushkey"""
680 if not key.startswith('dump'):
689 if not key.startswith('dump'):
681 repo.ui.warn(_('unknown key: %r') % key)
690 repo.ui.warn(_('unknown key: %r') % key)
682 return 0
691 return 0
683 if old:
692 if old:
684 repo.ui.warn(_('unexpected old value for %r') % key)
693 repo.ui.warn(_('unexpected old value for %r') % key)
685 return 0
694 return 0
686 data = base85.b85decode(new)
695 data = base85.b85decode(new)
687 lock = repo.lock()
696 lock = repo.lock()
688 try:
697 try:
689 tr = repo.transaction('pushkey: obsolete markers')
698 tr = repo.transaction('pushkey: obsolete markers')
690 try:
699 try:
691 repo.obsstore.mergemarkers(tr, data)
700 repo.obsstore.mergemarkers(tr, data)
692 tr.close()
701 tr.close()
693 return 1
702 return 1
694 finally:
703 finally:
695 tr.release()
704 tr.release()
696 finally:
705 finally:
697 lock.release()
706 lock.release()
698
707
699 def getmarkers(repo, nodes=None):
708 def getmarkers(repo, nodes=None):
700 """returns markers known in a repository
709 """returns markers known in a repository
701
710
702 If <nodes> is specified, only markers "relevant" to those nodes are are
711 If <nodes> is specified, only markers "relevant" to those nodes are are
703 returned"""
712 returned"""
704 if nodes is None:
713 if nodes is None:
705 rawmarkers = repo.obsstore
714 rawmarkers = repo.obsstore
706 else:
715 else:
707 rawmarkers = repo.obsstore.relevantmarkers(nodes)
716 rawmarkers = repo.obsstore.relevantmarkers(nodes)
708
717
709 for markerdata in rawmarkers:
718 for markerdata in rawmarkers:
710 yield marker(repo, markerdata)
719 yield marker(repo, markerdata)
711
720
712 def relevantmarkers(repo, node):
721 def relevantmarkers(repo, node):
713 """all obsolete markers relevant to some revision"""
722 """all obsolete markers relevant to some revision"""
714 for markerdata in repo.obsstore.relevantmarkers(node):
723 for markerdata in repo.obsstore.relevantmarkers(node):
715 yield marker(repo, markerdata)
724 yield marker(repo, markerdata)
716
725
717
726
718 def precursormarkers(ctx):
727 def precursormarkers(ctx):
719 """obsolete marker marking this changeset as a successors"""
728 """obsolete marker marking this changeset as a successors"""
720 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
729 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
721 yield marker(ctx._repo, data)
730 yield marker(ctx._repo, data)
722
731
723 def successormarkers(ctx):
732 def successormarkers(ctx):
724 """obsolete marker making this changeset obsolete"""
733 """obsolete marker making this changeset obsolete"""
725 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
734 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
726 yield marker(ctx._repo, data)
735 yield marker(ctx._repo, data)
727
736
728 def allsuccessors(obsstore, nodes, ignoreflags=0):
737 def allsuccessors(obsstore, nodes, ignoreflags=0):
729 """Yield node for every successor of <nodes>.
738 """Yield node for every successor of <nodes>.
730
739
731 Some successors may be unknown locally.
740 Some successors may be unknown locally.
732
741
733 This is a linear yield unsuited to detecting split changesets. It includes
742 This is a linear yield unsuited to detecting split changesets. It includes
734 initial nodes too."""
743 initial nodes too."""
735 remaining = set(nodes)
744 remaining = set(nodes)
736 seen = set(remaining)
745 seen = set(remaining)
737 while remaining:
746 while remaining:
738 current = remaining.pop()
747 current = remaining.pop()
739 yield current
748 yield current
740 for mark in obsstore.successors.get(current, ()):
749 for mark in obsstore.successors.get(current, ()):
741 # ignore marker flagged with specified flag
750 # ignore marker flagged with specified flag
742 if mark[2] & ignoreflags:
751 if mark[2] & ignoreflags:
743 continue
752 continue
744 for suc in mark[1]:
753 for suc in mark[1]:
745 if suc not in seen:
754 if suc not in seen:
746 seen.add(suc)
755 seen.add(suc)
747 remaining.add(suc)
756 remaining.add(suc)
748
757
749 def allprecursors(obsstore, nodes, ignoreflags=0):
758 def allprecursors(obsstore, nodes, ignoreflags=0):
750 """Yield node for every precursors of <nodes>.
759 """Yield node for every precursors of <nodes>.
751
760
752 Some precursors may be unknown locally.
761 Some precursors may be unknown locally.
753
762
754 This is a linear yield unsuited to detecting folded changesets. It includes
763 This is a linear yield unsuited to detecting folded changesets. It includes
755 initial nodes too."""
764 initial nodes too."""
756
765
757 remaining = set(nodes)
766 remaining = set(nodes)
758 seen = set(remaining)
767 seen = set(remaining)
759 while remaining:
768 while remaining:
760 current = remaining.pop()
769 current = remaining.pop()
761 yield current
770 yield current
762 for mark in obsstore.precursors.get(current, ()):
771 for mark in obsstore.precursors.get(current, ()):
763 # ignore marker flagged with specified flag
772 # ignore marker flagged with specified flag
764 if mark[2] & ignoreflags:
773 if mark[2] & ignoreflags:
765 continue
774 continue
766 suc = mark[0]
775 suc = mark[0]
767 if suc not in seen:
776 if suc not in seen:
768 seen.add(suc)
777 seen.add(suc)
769 remaining.add(suc)
778 remaining.add(suc)
770
779
771 def foreground(repo, nodes):
780 def foreground(repo, nodes):
772 """return all nodes in the "foreground" of other node
781 """return all nodes in the "foreground" of other node
773
782
774 The foreground of a revision is anything reachable using parent -> children
783 The foreground of a revision is anything reachable using parent -> children
775 or precursor -> successor relation. It is very similar to "descendant" but
784 or precursor -> successor relation. It is very similar to "descendant" but
776 augmented with obsolescence information.
785 augmented with obsolescence information.
777
786
778 Beware that possible obsolescence cycle may result if complex situation.
787 Beware that possible obsolescence cycle may result if complex situation.
779 """
788 """
780 repo = repo.unfiltered()
789 repo = repo.unfiltered()
781 foreground = set(repo.set('%ln::', nodes))
790 foreground = set(repo.set('%ln::', nodes))
782 if repo.obsstore:
791 if repo.obsstore:
783 # We only need this complicated logic if there is obsolescence
792 # We only need this complicated logic if there is obsolescence
784 # XXX will probably deserve an optimised revset.
793 # XXX will probably deserve an optimised revset.
785 nm = repo.changelog.nodemap
794 nm = repo.changelog.nodemap
786 plen = -1
795 plen = -1
787 # compute the whole set of successors or descendants
796 # compute the whole set of successors or descendants
788 while len(foreground) != plen:
797 while len(foreground) != plen:
789 plen = len(foreground)
798 plen = len(foreground)
790 succs = set(c.node() for c in foreground)
799 succs = set(c.node() for c in foreground)
791 mutable = [c.node() for c in foreground if c.mutable()]
800 mutable = [c.node() for c in foreground if c.mutable()]
792 succs.update(allsuccessors(repo.obsstore, mutable))
801 succs.update(allsuccessors(repo.obsstore, mutable))
793 known = (n for n in succs if n in nm)
802 known = (n for n in succs if n in nm)
794 foreground = set(repo.set('%ln::', known))
803 foreground = set(repo.set('%ln::', known))
795 return set(c.node() for c in foreground)
804 return set(c.node() for c in foreground)
796
805
797
806
798 def successorssets(repo, initialnode, cache=None):
807 def successorssets(repo, initialnode, cache=None):
799 """Return all set of successors of initial nodes
808 """Return all set of successors of initial nodes
800
809
801 The successors set of a changeset A are a group of revisions that succeed
810 The successors set of a changeset A are a group of revisions that succeed
802 A. It succeeds A as a consistent whole, each revision being only a partial
811 A. It succeeds A as a consistent whole, each revision being only a partial
803 replacement. The successors set contains non-obsolete changesets only.
812 replacement. The successors set contains non-obsolete changesets only.
804
813
805 This function returns the full list of successor sets which is why it
814 This function returns the full list of successor sets which is why it
806 returns a list of tuples and not just a single tuple. Each tuple is a valid
815 returns a list of tuples and not just a single tuple. Each tuple is a valid
807 successors set. Not that (A,) may be a valid successors set for changeset A
816 successors set. Not that (A,) may be a valid successors set for changeset A
808 (see below).
817 (see below).
809
818
810 In most cases, a changeset A will have a single element (e.g. the changeset
819 In most cases, a changeset A will have a single element (e.g. the changeset
811 A is replaced by A') in its successors set. Though, it is also common for a
820 A is replaced by A') in its successors set. Though, it is also common for a
812 changeset A to have no elements in its successor set (e.g. the changeset
821 changeset A to have no elements in its successor set (e.g. the changeset
813 has been pruned). Therefore, the returned list of successors sets will be
822 has been pruned). Therefore, the returned list of successors sets will be
814 [(A',)] or [], respectively.
823 [(A',)] or [], respectively.
815
824
816 When a changeset A is split into A' and B', however, it will result in a
825 When a changeset A is split into A' and B', however, it will result in a
817 successors set containing more than a single element, i.e. [(A',B')].
826 successors set containing more than a single element, i.e. [(A',B')].
818 Divergent changesets will result in multiple successors sets, i.e. [(A',),
827 Divergent changesets will result in multiple successors sets, i.e. [(A',),
819 (A'')].
828 (A'')].
820
829
821 If a changeset A is not obsolete, then it will conceptually have no
830 If a changeset A is not obsolete, then it will conceptually have no
822 successors set. To distinguish this from a pruned changeset, the successor
831 successors set. To distinguish this from a pruned changeset, the successor
823 set will only contain itself, i.e. [(A,)].
832 set will only contain itself, i.e. [(A,)].
824
833
825 Finally, successors unknown locally are considered to be pruned (obsoleted
834 Finally, successors unknown locally are considered to be pruned (obsoleted
826 without any successors).
835 without any successors).
827
836
828 The optional `cache` parameter is a dictionary that may contain precomputed
837 The optional `cache` parameter is a dictionary that may contain precomputed
829 successors sets. It is meant to reuse the computation of a previous call to
838 successors sets. It is meant to reuse the computation of a previous call to
830 `successorssets` when multiple calls are made at the same time. The cache
839 `successorssets` when multiple calls are made at the same time. The cache
831 dictionary is updated in place. The caller is responsible for its live
840 dictionary is updated in place. The caller is responsible for its live
832 spawn. Code that makes multiple calls to `successorssets` *must* use this
841 spawn. Code that makes multiple calls to `successorssets` *must* use this
833 cache mechanism or suffer terrible performances.
842 cache mechanism or suffer terrible performances.
834
843
835 """
844 """
836
845
837 succmarkers = repo.obsstore.successors
846 succmarkers = repo.obsstore.successors
838
847
839 # Stack of nodes we search successors sets for
848 # Stack of nodes we search successors sets for
840 toproceed = [initialnode]
849 toproceed = [initialnode]
841 # set version of above list for fast loop detection
850 # set version of above list for fast loop detection
842 # element added to "toproceed" must be added here
851 # element added to "toproceed" must be added here
843 stackedset = set(toproceed)
852 stackedset = set(toproceed)
844 if cache is None:
853 if cache is None:
845 cache = {}
854 cache = {}
846
855
847 # This while loop is the flattened version of a recursive search for
856 # This while loop is the flattened version of a recursive search for
848 # successors sets
857 # successors sets
849 #
858 #
850 # def successorssets(x):
859 # def successorssets(x):
851 # successors = directsuccessors(x)
860 # successors = directsuccessors(x)
852 # ss = [[]]
861 # ss = [[]]
853 # for succ in directsuccessors(x):
862 # for succ in directsuccessors(x):
854 # # product as in itertools cartesian product
863 # # product as in itertools cartesian product
855 # ss = product(ss, successorssets(succ))
864 # ss = product(ss, successorssets(succ))
856 # return ss
865 # return ss
857 #
866 #
858 # But we can not use plain recursive calls here:
867 # But we can not use plain recursive calls here:
859 # - that would blow the python call stack
868 # - that would blow the python call stack
860 # - obsolescence markers may have cycles, we need to handle them.
869 # - obsolescence markers may have cycles, we need to handle them.
861 #
870 #
862 # The `toproceed` list act as our call stack. Every node we search
871 # The `toproceed` list act as our call stack. Every node we search
863 # successors set for are stacked there.
872 # successors set for are stacked there.
864 #
873 #
865 # The `stackedset` is set version of this stack used to check if a node is
874 # The `stackedset` is set version of this stack used to check if a node is
866 # already stacked. This check is used to detect cycles and prevent infinite
875 # already stacked. This check is used to detect cycles and prevent infinite
867 # loop.
876 # loop.
868 #
877 #
869 # successors set of all nodes are stored in the `cache` dictionary.
878 # successors set of all nodes are stored in the `cache` dictionary.
870 #
879 #
871 # After this while loop ends we use the cache to return the successors sets
880 # After this while loop ends we use the cache to return the successors sets
872 # for the node requested by the caller.
881 # for the node requested by the caller.
873 while toproceed:
882 while toproceed:
874 # Every iteration tries to compute the successors sets of the topmost
883 # Every iteration tries to compute the successors sets of the topmost
875 # node of the stack: CURRENT.
884 # node of the stack: CURRENT.
876 #
885 #
877 # There are four possible outcomes:
886 # There are four possible outcomes:
878 #
887 #
879 # 1) We already know the successors sets of CURRENT:
888 # 1) We already know the successors sets of CURRENT:
880 # -> mission accomplished, pop it from the stack.
889 # -> mission accomplished, pop it from the stack.
881 # 2) Node is not obsolete:
890 # 2) Node is not obsolete:
882 # -> the node is its own successors sets. Add it to the cache.
891 # -> the node is its own successors sets. Add it to the cache.
883 # 3) We do not know successors set of direct successors of CURRENT:
892 # 3) We do not know successors set of direct successors of CURRENT:
884 # -> We add those successors to the stack.
893 # -> We add those successors to the stack.
885 # 4) We know successors sets of all direct successors of CURRENT:
894 # 4) We know successors sets of all direct successors of CURRENT:
886 # -> We can compute CURRENT successors set and add it to the
895 # -> We can compute CURRENT successors set and add it to the
887 # cache.
896 # cache.
888 #
897 #
889 current = toproceed[-1]
898 current = toproceed[-1]
890 if current in cache:
899 if current in cache:
891 # case (1): We already know the successors sets
900 # case (1): We already know the successors sets
892 stackedset.remove(toproceed.pop())
901 stackedset.remove(toproceed.pop())
893 elif current not in succmarkers:
902 elif current not in succmarkers:
894 # case (2): The node is not obsolete.
903 # case (2): The node is not obsolete.
895 if current in repo:
904 if current in repo:
896 # We have a valid last successors.
905 # We have a valid last successors.
897 cache[current] = [(current,)]
906 cache[current] = [(current,)]
898 else:
907 else:
899 # Final obsolete version is unknown locally.
908 # Final obsolete version is unknown locally.
900 # Do not count that as a valid successors
909 # Do not count that as a valid successors
901 cache[current] = []
910 cache[current] = []
902 else:
911 else:
903 # cases (3) and (4)
912 # cases (3) and (4)
904 #
913 #
905 # We proceed in two phases. Phase 1 aims to distinguish case (3)
914 # We proceed in two phases. Phase 1 aims to distinguish case (3)
906 # from case (4):
915 # from case (4):
907 #
916 #
908 # For each direct successors of CURRENT, we check whether its
917 # For each direct successors of CURRENT, we check whether its
909 # successors sets are known. If they are not, we stack the
918 # successors sets are known. If they are not, we stack the
910 # unknown node and proceed to the next iteration of the while
919 # unknown node and proceed to the next iteration of the while
911 # loop. (case 3)
920 # loop. (case 3)
912 #
921 #
913 # During this step, we may detect obsolescence cycles: a node
922 # During this step, we may detect obsolescence cycles: a node
914 # with unknown successors sets but already in the call stack.
923 # with unknown successors sets but already in the call stack.
915 # In such a situation, we arbitrary set the successors sets of
924 # In such a situation, we arbitrary set the successors sets of
916 # the node to nothing (node pruned) to break the cycle.
925 # the node to nothing (node pruned) to break the cycle.
917 #
926 #
918 # If no break was encountered we proceed to phase 2.
927 # If no break was encountered we proceed to phase 2.
919 #
928 #
920 # Phase 2 computes successors sets of CURRENT (case 4); see details
929 # Phase 2 computes successors sets of CURRENT (case 4); see details
921 # in phase 2 itself.
930 # in phase 2 itself.
922 #
931 #
923 # Note the two levels of iteration in each phase.
932 # Note the two levels of iteration in each phase.
924 # - The first one handles obsolescence markers using CURRENT as
933 # - The first one handles obsolescence markers using CURRENT as
925 # precursor (successors markers of CURRENT).
934 # precursor (successors markers of CURRENT).
926 #
935 #
927 # Having multiple entry here means divergence.
936 # Having multiple entry here means divergence.
928 #
937 #
929 # - The second one handles successors defined in each marker.
938 # - The second one handles successors defined in each marker.
930 #
939 #
931 # Having none means pruned node, multiple successors means split,
940 # Having none means pruned node, multiple successors means split,
932 # single successors are standard replacement.
941 # single successors are standard replacement.
933 #
942 #
934 for mark in sorted(succmarkers[current]):
943 for mark in sorted(succmarkers[current]):
935 for suc in mark[1]:
944 for suc in mark[1]:
936 if suc not in cache:
945 if suc not in cache:
937 if suc in stackedset:
946 if suc in stackedset:
938 # cycle breaking
947 # cycle breaking
939 cache[suc] = []
948 cache[suc] = []
940 else:
949 else:
941 # case (3) If we have not computed successors sets
950 # case (3) If we have not computed successors sets
942 # of one of those successors we add it to the
951 # of one of those successors we add it to the
943 # `toproceed` stack and stop all work for this
952 # `toproceed` stack and stop all work for this
944 # iteration.
953 # iteration.
945 toproceed.append(suc)
954 toproceed.append(suc)
946 stackedset.add(suc)
955 stackedset.add(suc)
947 break
956 break
948 else:
957 else:
949 continue
958 continue
950 break
959 break
951 else:
960 else:
952 # case (4): we know all successors sets of all direct
961 # case (4): we know all successors sets of all direct
953 # successors
962 # successors
954 #
963 #
955 # Successors set contributed by each marker depends on the
964 # Successors set contributed by each marker depends on the
956 # successors sets of all its "successors" node.
965 # successors sets of all its "successors" node.
957 #
966 #
958 # Each different marker is a divergence in the obsolescence
967 # Each different marker is a divergence in the obsolescence
959 # history. It contributes successors sets distinct from other
968 # history. It contributes successors sets distinct from other
960 # markers.
969 # markers.
961 #
970 #
962 # Within a marker, a successor may have divergent successors
971 # Within a marker, a successor may have divergent successors
963 # sets. In such a case, the marker will contribute multiple
972 # sets. In such a case, the marker will contribute multiple
964 # divergent successors sets. If multiple successors have
973 # divergent successors sets. If multiple successors have
965 # divergent successors sets, a Cartesian product is used.
974 # divergent successors sets, a Cartesian product is used.
966 #
975 #
967 # At the end we post-process successors sets to remove
976 # At the end we post-process successors sets to remove
968 # duplicated entry and successors set that are strict subset of
977 # duplicated entry and successors set that are strict subset of
969 # another one.
978 # another one.
970 succssets = []
979 succssets = []
971 for mark in sorted(succmarkers[current]):
980 for mark in sorted(succmarkers[current]):
972 # successors sets contributed by this marker
981 # successors sets contributed by this marker
973 markss = [[]]
982 markss = [[]]
974 for suc in mark[1]:
983 for suc in mark[1]:
975 # cardinal product with previous successors
984 # cardinal product with previous successors
976 productresult = []
985 productresult = []
977 for prefix in markss:
986 for prefix in markss:
978 for suffix in cache[suc]:
987 for suffix in cache[suc]:
979 newss = list(prefix)
988 newss = list(prefix)
980 for part in suffix:
989 for part in suffix:
981 # do not duplicated entry in successors set
990 # do not duplicated entry in successors set
982 # first entry wins.
991 # first entry wins.
983 if part not in newss:
992 if part not in newss:
984 newss.append(part)
993 newss.append(part)
985 productresult.append(newss)
994 productresult.append(newss)
986 markss = productresult
995 markss = productresult
987 succssets.extend(markss)
996 succssets.extend(markss)
988 # remove duplicated and subset
997 # remove duplicated and subset
989 seen = []
998 seen = []
990 final = []
999 final = []
991 candidate = sorted(((set(s), s) for s in succssets if s),
1000 candidate = sorted(((set(s), s) for s in succssets if s),
992 key=lambda x: len(x[1]), reverse=True)
1001 key=lambda x: len(x[1]), reverse=True)
993 for setversion, listversion in candidate:
1002 for setversion, listversion in candidate:
994 for seenset in seen:
1003 for seenset in seen:
995 if setversion.issubset(seenset):
1004 if setversion.issubset(seenset):
996 break
1005 break
997 else:
1006 else:
998 final.append(listversion)
1007 final.append(listversion)
999 seen.append(setversion)
1008 seen.append(setversion)
1000 final.reverse() # put small successors set first
1009 final.reverse() # put small successors set first
1001 cache[current] = final
1010 cache[current] = final
1002 return cache[initialnode]
1011 return cache[initialnode]
1003
1012
1004 def _knownrevs(repo, nodes):
1013 def _knownrevs(repo, nodes):
1005 """yield revision numbers of known nodes passed in parameters
1014 """yield revision numbers of known nodes passed in parameters
1006
1015
1007 Unknown revisions are silently ignored."""
1016 Unknown revisions are silently ignored."""
1008 torev = repo.changelog.nodemap.get
1017 torev = repo.changelog.nodemap.get
1009 for n in nodes:
1018 for n in nodes:
1010 rev = torev(n)
1019 rev = torev(n)
1011 if rev is not None:
1020 if rev is not None:
1012 yield rev
1021 yield rev
1013
1022
1014 # mapping of 'set-name' -> <function to compute this set>
1023 # mapping of 'set-name' -> <function to compute this set>
1015 cachefuncs = {}
1024 cachefuncs = {}
1016 def cachefor(name):
1025 def cachefor(name):
1017 """Decorator to register a function as computing the cache for a set"""
1026 """Decorator to register a function as computing the cache for a set"""
1018 def decorator(func):
1027 def decorator(func):
1019 assert name not in cachefuncs
1028 assert name not in cachefuncs
1020 cachefuncs[name] = func
1029 cachefuncs[name] = func
1021 return func
1030 return func
1022 return decorator
1031 return decorator
1023
1032
1024 def getrevs(repo, name):
1033 def getrevs(repo, name):
1025 """Return the set of revision that belong to the <name> set
1034 """Return the set of revision that belong to the <name> set
1026
1035
1027 Such access may compute the set and cache it for future use"""
1036 Such access may compute the set and cache it for future use"""
1028 repo = repo.unfiltered()
1037 repo = repo.unfiltered()
1029 if not repo.obsstore:
1038 if not repo.obsstore:
1030 return frozenset()
1039 return frozenset()
1031 if name not in repo.obsstore.caches:
1040 if name not in repo.obsstore.caches:
1032 repo.obsstore.caches[name] = cachefuncs[name](repo)
1041 repo.obsstore.caches[name] = cachefuncs[name](repo)
1033 return repo.obsstore.caches[name]
1042 return repo.obsstore.caches[name]
1034
1043
1035 # To be simple we need to invalidate obsolescence cache when:
1044 # To be simple we need to invalidate obsolescence cache when:
1036 #
1045 #
1037 # - new changeset is added:
1046 # - new changeset is added:
1038 # - public phase is changed
1047 # - public phase is changed
1039 # - obsolescence marker are added
1048 # - obsolescence marker are added
1040 # - strip is used a repo
1049 # - strip is used a repo
1041 def clearobscaches(repo):
1050 def clearobscaches(repo):
1042 """Remove all obsolescence related cache from a repo
1051 """Remove all obsolescence related cache from a repo
1043
1052
1044 This remove all cache in obsstore is the obsstore already exist on the
1053 This remove all cache in obsstore is the obsstore already exist on the
1045 repo.
1054 repo.
1046
1055
1047 (We could be smarter here given the exact event that trigger the cache
1056 (We could be smarter here given the exact event that trigger the cache
1048 clearing)"""
1057 clearing)"""
1049 # only clear cache is there is obsstore data in this repo
1058 # only clear cache is there is obsstore data in this repo
1050 if 'obsstore' in repo._filecache:
1059 if 'obsstore' in repo._filecache:
1051 repo.obsstore.caches.clear()
1060 repo.obsstore.caches.clear()
1052
1061
1053 @cachefor('obsolete')
1062 @cachefor('obsolete')
1054 def _computeobsoleteset(repo):
1063 def _computeobsoleteset(repo):
1055 """the set of obsolete revisions"""
1064 """the set of obsolete revisions"""
1056 obs = set()
1065 obs = set()
1057 getrev = repo.changelog.nodemap.get
1066 getrev = repo.changelog.nodemap.get
1058 getphase = repo._phasecache.phase
1067 getphase = repo._phasecache.phase
1059 for n in repo.obsstore.successors:
1068 for n in repo.obsstore.successors:
1060 rev = getrev(n)
1069 rev = getrev(n)
1061 if rev is not None and getphase(repo, rev):
1070 if rev is not None and getphase(repo, rev):
1062 obs.add(rev)
1071 obs.add(rev)
1063 return obs
1072 return obs
1064
1073
1065 @cachefor('unstable')
1074 @cachefor('unstable')
1066 def _computeunstableset(repo):
1075 def _computeunstableset(repo):
1067 """the set of non obsolete revisions with obsolete parents"""
1076 """the set of non obsolete revisions with obsolete parents"""
1068 # revset is not efficient enough here
1077 # revset is not efficient enough here
1069 # we do (obsolete()::) - obsolete() by hand
1078 # we do (obsolete()::) - obsolete() by hand
1070 obs = getrevs(repo, 'obsolete')
1079 obs = getrevs(repo, 'obsolete')
1071 if not obs:
1080 if not obs:
1072 return set()
1081 return set()
1073 cl = repo.changelog
1082 cl = repo.changelog
1074 return set(r for r in cl.descendants(obs) if r not in obs)
1083 return set(r for r in cl.descendants(obs) if r not in obs)
1075
1084
1076 @cachefor('suspended')
1085 @cachefor('suspended')
1077 def _computesuspendedset(repo):
1086 def _computesuspendedset(repo):
1078 """the set of obsolete parents with non obsolete descendants"""
1087 """the set of obsolete parents with non obsolete descendants"""
1079 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1088 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1080 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1089 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1081
1090
1082 @cachefor('extinct')
1091 @cachefor('extinct')
1083 def _computeextinctset(repo):
1092 def _computeextinctset(repo):
1084 """the set of obsolete parents without non obsolete descendants"""
1093 """the set of obsolete parents without non obsolete descendants"""
1085 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1094 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1086
1095
1087
1096
1088 @cachefor('bumped')
1097 @cachefor('bumped')
1089 def _computebumpedset(repo):
1098 def _computebumpedset(repo):
1090 """the set of revs trying to obsolete public revisions"""
1099 """the set of revs trying to obsolete public revisions"""
1091 bumped = set()
1100 bumped = set()
1092 # util function (avoid attribute lookup in the loop)
1101 # util function (avoid attribute lookup in the loop)
1093 phase = repo._phasecache.phase # would be faster to grab the full list
1102 phase = repo._phasecache.phase # would be faster to grab the full list
1094 public = phases.public
1103 public = phases.public
1095 cl = repo.changelog
1104 cl = repo.changelog
1096 torev = cl.nodemap.get
1105 torev = cl.nodemap.get
1097 obs = getrevs(repo, 'obsolete')
1106 obs = getrevs(repo, 'obsolete')
1098 for rev in repo:
1107 for rev in repo:
1099 # We only evaluate mutable, non-obsolete revision
1108 # We only evaluate mutable, non-obsolete revision
1100 if (public < phase(repo, rev)) and (rev not in obs):
1109 if (public < phase(repo, rev)) and (rev not in obs):
1101 node = cl.node(rev)
1110 node = cl.node(rev)
1102 # (future) A cache of precursors may worth if split is very common
1111 # (future) A cache of precursors may worth if split is very common
1103 for pnode in allprecursors(repo.obsstore, [node],
1112 for pnode in allprecursors(repo.obsstore, [node],
1104 ignoreflags=bumpedfix):
1113 ignoreflags=bumpedfix):
1105 prev = torev(pnode) # unfiltered! but so is phasecache
1114 prev = torev(pnode) # unfiltered! but so is phasecache
1106 if (prev is not None) and (phase(repo, prev) <= public):
1115 if (prev is not None) and (phase(repo, prev) <= public):
1107 # we have a public precursors
1116 # we have a public precursors
1108 bumped.add(rev)
1117 bumped.add(rev)
1109 break # Next draft!
1118 break # Next draft!
1110 return bumped
1119 return bumped
1111
1120
1112 @cachefor('divergent')
1121 @cachefor('divergent')
1113 def _computedivergentset(repo):
1122 def _computedivergentset(repo):
1114 """the set of rev that compete to be the final successors of some revision.
1123 """the set of rev that compete to be the final successors of some revision.
1115 """
1124 """
1116 divergent = set()
1125 divergent = set()
1117 obsstore = repo.obsstore
1126 obsstore = repo.obsstore
1118 newermap = {}
1127 newermap = {}
1119 for ctx in repo.set('(not public()) - obsolete()'):
1128 for ctx in repo.set('(not public()) - obsolete()'):
1120 mark = obsstore.precursors.get(ctx.node(), ())
1129 mark = obsstore.precursors.get(ctx.node(), ())
1121 toprocess = set(mark)
1130 toprocess = set(mark)
1122 while toprocess:
1131 while toprocess:
1123 prec = toprocess.pop()[0]
1132 prec = toprocess.pop()[0]
1124 if prec not in newermap:
1133 if prec not in newermap:
1125 successorssets(repo, prec, newermap)
1134 successorssets(repo, prec, newermap)
1126 newer = [n for n in newermap[prec] if n]
1135 newer = [n for n in newermap[prec] if n]
1127 if len(newer) > 1:
1136 if len(newer) > 1:
1128 divergent.add(ctx.rev())
1137 divergent.add(ctx.rev())
1129 break
1138 break
1130 toprocess.update(obsstore.precursors.get(prec, ()))
1139 toprocess.update(obsstore.precursors.get(prec, ()))
1131 return divergent
1140 return divergent
1132
1141
1133
1142
1134 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1143 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1135 """Add obsolete markers between changesets in a repo
1144 """Add obsolete markers between changesets in a repo
1136
1145
1137 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1146 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1138 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1147 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1139 containing metadata for this marker only. It is merged with the global
1148 containing metadata for this marker only. It is merged with the global
1140 metadata specified through the `metadata` argument of this function,
1149 metadata specified through the `metadata` argument of this function,
1141
1150
1142 Trying to obsolete a public changeset will raise an exception.
1151 Trying to obsolete a public changeset will raise an exception.
1143
1152
1144 Current user and date are used except if specified otherwise in the
1153 Current user and date are used except if specified otherwise in the
1145 metadata attribute.
1154 metadata attribute.
1146
1155
1147 This function operates within a transaction of its own, but does
1156 This function operates within a transaction of its own, but does
1148 not take any lock on the repo.
1157 not take any lock on the repo.
1149 """
1158 """
1150 # prepare metadata
1159 # prepare metadata
1151 if metadata is None:
1160 if metadata is None:
1152 metadata = {}
1161 metadata = {}
1153 if 'user' not in metadata:
1162 if 'user' not in metadata:
1154 metadata['user'] = repo.ui.username()
1163 metadata['user'] = repo.ui.username()
1155 tr = repo.transaction('add-obsolescence-marker')
1164 tr = repo.transaction('add-obsolescence-marker')
1156 try:
1165 try:
1157 for rel in relations:
1166 for rel in relations:
1158 prec = rel[0]
1167 prec = rel[0]
1159 sucs = rel[1]
1168 sucs = rel[1]
1160 localmetadata = metadata.copy()
1169 localmetadata = metadata.copy()
1161 if 2 < len(rel):
1170 if 2 < len(rel):
1162 localmetadata.update(rel[2])
1171 localmetadata.update(rel[2])
1163
1172
1164 if not prec.mutable():
1173 if not prec.mutable():
1165 raise util.Abort("cannot obsolete immutable changeset: %s"
1174 raise util.Abort("cannot obsolete immutable changeset: %s"
1166 % prec)
1175 % prec)
1167 nprec = prec.node()
1176 nprec = prec.node()
1168 nsucs = tuple(s.node() for s in sucs)
1177 nsucs = tuple(s.node() for s in sucs)
1169 npare = None
1178 npare = None
1170 if not nsucs:
1179 if not nsucs:
1171 npare = tuple(p.node() for p in prec.parents())
1180 npare = tuple(p.node() for p in prec.parents())
1172 if nprec in nsucs:
1181 if nprec in nsucs:
1173 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1182 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1174 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1183 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1175 date=date, metadata=localmetadata)
1184 date=date, metadata=localmetadata)
1176 repo.filteredrevcache.clear()
1185 repo.filteredrevcache.clear()
1177 tr.close()
1186 tr.close()
1178 finally:
1187 finally:
1179 tr.release()
1188 tr.release()
1180
1189
1181 def isenabled(repo, option):
1190 def isenabled(repo, option):
1182 """Returns True if the given repository has the given obsolete option
1191 """Returns True if the given repository has the given obsolete option
1183 enabled.
1192 enabled.
1184 """
1193 """
1185 result = set(repo.ui.configlist('experimental', 'evolution'))
1194 result = set(repo.ui.configlist('experimental', 'evolution'))
1186 if 'all' in result:
1195 if 'all' in result:
1187 return True
1196 return True
1188
1197
1189 # For migration purposes, temporarily return true if the config hasn't been
1198 # For migration purposes, temporarily return true if the config hasn't been
1190 # set but _enabled is true.
1199 # set but _enabled is true.
1191 if len(result) == 0 and _enabled:
1200 if len(result) == 0 and _enabled:
1192 return True
1201 return True
1193
1202
1194 # createmarkers must be enabled if other options are enabled
1203 # createmarkers must be enabled if other options are enabled
1195 if ((allowunstableopt in result or exchangeopt in result) and
1204 if ((allowunstableopt in result or exchangeopt in result) and
1196 not createmarkersopt in result):
1205 not createmarkersopt in result):
1197 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1206 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1198 "if other obsolete options are enabled"))
1207 "if other obsolete options are enabled"))
1199
1208
1200 return option in result
1209 return option in result
General Comments 0
You need to be logged in to leave comments. Login now