##// END OF EJS Templates
obsolete: make optional offset parameter to fm*readmarkers required...
Augie Fackler -
r24014:7d9367de default
parent child Browse files
Show More
@@ -1,1209 +1,1209
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 import struct
70 import struct
71 import util, base85, node
71 import util, base85, node
72 import phases
72 import phases
73 from i18n import _
73 from i18n import _
74
74
75 _pack = struct.pack
75 _pack = struct.pack
76 _unpack = struct.unpack
76 _unpack = struct.unpack
77 _calcsize = struct.calcsize
77 _calcsize = struct.calcsize
78
78
79 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
79 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
80
80
81 # the obsolete feature is not mature enough to be enabled by default.
81 # the obsolete feature is not mature enough to be enabled by default.
82 # you have to rely on third party extension extension to enable this.
82 # you have to rely on third party extension extension to enable this.
83 _enabled = False
83 _enabled = False
84
84
85 # Options for obsolescence
85 # Options for obsolescence
86 createmarkersopt = 'createmarkers'
86 createmarkersopt = 'createmarkers'
87 allowunstableopt = 'allowunstable'
87 allowunstableopt = 'allowunstable'
88 exchangeopt = 'exchange'
88 exchangeopt = 'exchange'
89
89
90 ### obsolescence marker flag
90 ### obsolescence marker flag
91
91
92 ## bumpedfix flag
92 ## bumpedfix flag
93 #
93 #
94 # When a changeset A' succeed to a changeset A which became public, we call A'
94 # When a changeset A' succeed to a changeset A which became public, we call A'
95 # "bumped" because it's a successors of a public changesets
95 # "bumped" because it's a successors of a public changesets
96 #
96 #
97 # o A' (bumped)
97 # o A' (bumped)
98 # |`:
98 # |`:
99 # | o A
99 # | o A
100 # |/
100 # |/
101 # o Z
101 # o Z
102 #
102 #
103 # The way to solve this situation is to create a new changeset Ad as children
103 # The way to solve this situation is to create a new changeset Ad as children
104 # of A. This changeset have the same content than A'. So the diff from A to A'
104 # of A. This changeset have the same content than A'. So the diff from A to A'
105 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
106 #
106 #
107 # o Ad
107 # o Ad
108 # |`:
108 # |`:
109 # | x A'
109 # | x A'
110 # |'|
110 # |'|
111 # o | A
111 # o | A
112 # |/
112 # |/
113 # o Z
113 # o Z
114 #
114 #
115 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
116 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
117 # This flag mean that the successors express the changes between the public and
117 # This flag mean that the successors express the changes between the public and
118 # bumped version and fix the situation, breaking the transitivity of
118 # bumped version and fix the situation, breaking the transitivity of
119 # "bumped" here.
119 # "bumped" here.
120 bumpedfix = 1
120 bumpedfix = 1
121 usingsha256 = 2
121 usingsha256 = 2
122
122
123 ## Parsing and writing of version "0"
123 ## Parsing and writing of version "0"
124 #
124 #
125 # The header is followed by the markers. Each marker is made of:
125 # The header is followed by the markers. Each marker is made of:
126 #
126 #
127 # - 1 uint8 : number of new changesets "N", can be zero.
127 # - 1 uint8 : number of new changesets "N", can be zero.
128 #
128 #
129 # - 1 uint32: metadata size "M" in bytes.
129 # - 1 uint32: metadata size "M" in bytes.
130 #
130 #
131 # - 1 byte: a bit field. It is reserved for flags used in common
131 # - 1 byte: a bit field. It is reserved for flags used in common
132 # obsolete marker operations, to avoid repeated decoding of metadata
132 # obsolete marker operations, to avoid repeated decoding of metadata
133 # entries.
133 # entries.
134 #
134 #
135 # - 20 bytes: obsoleted changeset identifier.
135 # - 20 bytes: obsoleted changeset identifier.
136 #
136 #
137 # - N*20 bytes: new changesets identifiers.
137 # - N*20 bytes: new changesets identifiers.
138 #
138 #
139 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 # - M bytes: metadata as a sequence of nul-terminated strings. Each
140 # string contains a key and a value, separated by a colon ':', without
140 # string contains a key and a value, separated by a colon ':', without
141 # additional encoding. Keys cannot contain '\0' or ':' and values
141 # additional encoding. Keys cannot contain '\0' or ':' and values
142 # cannot contain '\0'.
142 # cannot contain '\0'.
143 _fm0version = 0
143 _fm0version = 0
144 _fm0fixed = '>BIB20s'
144 _fm0fixed = '>BIB20s'
145 _fm0node = '20s'
145 _fm0node = '20s'
146 _fm0fsize = _calcsize(_fm0fixed)
146 _fm0fsize = _calcsize(_fm0fixed)
147 _fm0fnodesize = _calcsize(_fm0node)
147 _fm0fnodesize = _calcsize(_fm0node)
148
148
149 def _fm0readmarkers(data, off=0):
149 def _fm0readmarkers(data, off):
150 # Loop on markers
150 # Loop on markers
151 l = len(data)
151 l = len(data)
152 while off + _fm0fsize <= l:
152 while off + _fm0fsize <= l:
153 # read fixed part
153 # read fixed part
154 cur = data[off:off + _fm0fsize]
154 cur = data[off:off + _fm0fsize]
155 off += _fm0fsize
155 off += _fm0fsize
156 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
157 # read replacement
157 # read replacement
158 sucs = ()
158 sucs = ()
159 if numsuc:
159 if numsuc:
160 s = (_fm0fnodesize * numsuc)
160 s = (_fm0fnodesize * numsuc)
161 cur = data[off:off + s]
161 cur = data[off:off + s]
162 sucs = _unpack(_fm0node * numsuc, cur)
162 sucs = _unpack(_fm0node * numsuc, cur)
163 off += s
163 off += s
164 # read metadata
164 # read metadata
165 # (metadata will be decoded on demand)
165 # (metadata will be decoded on demand)
166 metadata = data[off:off + mdsize]
166 metadata = data[off:off + mdsize]
167 if len(metadata) != mdsize:
167 if len(metadata) != mdsize:
168 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 raise util.Abort(_('parsing obsolete marker: metadata is too '
169 'short, %d bytes expected, got %d')
169 'short, %d bytes expected, got %d')
170 % (mdsize, len(metadata)))
170 % (mdsize, len(metadata)))
171 off += mdsize
171 off += mdsize
172 metadata = _fm0decodemeta(metadata)
172 metadata = _fm0decodemeta(metadata)
173 try:
173 try:
174 when, offset = metadata.pop('date', '0 0').split(' ')
174 when, offset = metadata.pop('date', '0 0').split(' ')
175 date = float(when), int(offset)
175 date = float(when), int(offset)
176 except ValueError:
176 except ValueError:
177 date = (0., 0)
177 date = (0., 0)
178 parents = None
178 parents = None
179 if 'p2' in metadata:
179 if 'p2' in metadata:
180 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
181 elif 'p1' in metadata:
181 elif 'p1' in metadata:
182 parents = (metadata.pop('p1', None),)
182 parents = (metadata.pop('p1', None),)
183 elif 'p0' in metadata:
183 elif 'p0' in metadata:
184 parents = ()
184 parents = ()
185 if parents is not None:
185 if parents is not None:
186 try:
186 try:
187 parents = tuple(node.bin(p) for p in parents)
187 parents = tuple(node.bin(p) for p in parents)
188 # if parent content is not a nodeid, drop the data
188 # if parent content is not a nodeid, drop the data
189 for p in parents:
189 for p in parents:
190 if len(p) != 20:
190 if len(p) != 20:
191 parents = None
191 parents = None
192 break
192 break
193 except TypeError:
193 except TypeError:
194 # if content cannot be translated to nodeid drop the data.
194 # if content cannot be translated to nodeid drop the data.
195 parents = None
195 parents = None
196
196
197 metadata = tuple(sorted(metadata.iteritems()))
197 metadata = tuple(sorted(metadata.iteritems()))
198
198
199 yield (pre, sucs, flags, metadata, date, parents)
199 yield (pre, sucs, flags, metadata, date, parents)
200
200
201 def _fm0encodeonemarker(marker):
201 def _fm0encodeonemarker(marker):
202 pre, sucs, flags, metadata, date, parents = marker
202 pre, sucs, flags, metadata, date, parents = marker
203 if flags & usingsha256:
203 if flags & usingsha256:
204 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
204 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
205 metadata = dict(metadata)
205 metadata = dict(metadata)
206 time, tz = date
206 time, tz = date
207 metadata['date'] = '%r %i' % (time, tz)
207 metadata['date'] = '%r %i' % (time, tz)
208 if parents is not None:
208 if parents is not None:
209 if not parents:
209 if not parents:
210 # mark that we explicitly recorded no parents
210 # mark that we explicitly recorded no parents
211 metadata['p0'] = ''
211 metadata['p0'] = ''
212 for i, p in enumerate(parents):
212 for i, p in enumerate(parents):
213 metadata['p%i' % (i + 1)] = node.hex(p)
213 metadata['p%i' % (i + 1)] = node.hex(p)
214 metadata = _fm0encodemeta(metadata)
214 metadata = _fm0encodemeta(metadata)
215 numsuc = len(sucs)
215 numsuc = len(sucs)
216 format = _fm0fixed + (_fm0node * numsuc)
216 format = _fm0fixed + (_fm0node * numsuc)
217 data = [numsuc, len(metadata), flags, pre]
217 data = [numsuc, len(metadata), flags, pre]
218 data.extend(sucs)
218 data.extend(sucs)
219 return _pack(format, *data) + metadata
219 return _pack(format, *data) + metadata
220
220
221 def _fm0encodemeta(meta):
221 def _fm0encodemeta(meta):
222 """Return encoded metadata string to string mapping.
222 """Return encoded metadata string to string mapping.
223
223
224 Assume no ':' in key and no '\0' in both key and value."""
224 Assume no ':' in key and no '\0' in both key and value."""
225 for key, value in meta.iteritems():
225 for key, value in meta.iteritems():
226 if ':' in key or '\0' in key:
226 if ':' in key or '\0' in key:
227 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 raise ValueError("':' and '\0' are forbidden in metadata key'")
228 if '\0' in value:
228 if '\0' in value:
229 raise ValueError("':' is forbidden in metadata value'")
229 raise ValueError("':' is forbidden in metadata value'")
230 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
231
231
232 def _fm0decodemeta(data):
232 def _fm0decodemeta(data):
233 """Return string to string dictionary from encoded version."""
233 """Return string to string dictionary from encoded version."""
234 d = {}
234 d = {}
235 for l in data.split('\0'):
235 for l in data.split('\0'):
236 if l:
236 if l:
237 key, value = l.split(':')
237 key, value = l.split(':')
238 d[key] = value
238 d[key] = value
239 return d
239 return d
240
240
241 ## Parsing and writing of version "1"
241 ## Parsing and writing of version "1"
242 #
242 #
243 # The header is followed by the markers. Each marker is made of:
243 # The header is followed by the markers. Each marker is made of:
244 #
244 #
245 # - uint32: total size of the marker (including this field)
245 # - uint32: total size of the marker (including this field)
246 #
246 #
247 # - float64: date in seconds since epoch
247 # - float64: date in seconds since epoch
248 #
248 #
249 # - int16: timezone offset in minutes
249 # - int16: timezone offset in minutes
250 #
250 #
251 # - uint16: a bit field. It is reserved for flags used in common
251 # - uint16: a bit field. It is reserved for flags used in common
252 # obsolete marker operations, to avoid repeated decoding of metadata
252 # obsolete marker operations, to avoid repeated decoding of metadata
253 # entries.
253 # entries.
254 #
254 #
255 # - uint8: number of successors "N", can be zero.
255 # - uint8: number of successors "N", can be zero.
256 #
256 #
257 # - uint8: number of parents "P", can be zero.
257 # - uint8: number of parents "P", can be zero.
258 #
258 #
259 # 0: parents data stored but no parent,
259 # 0: parents data stored but no parent,
260 # 1: one parent stored,
260 # 1: one parent stored,
261 # 2: two parents stored,
261 # 2: two parents stored,
262 # 3: no parent data stored
262 # 3: no parent data stored
263 #
263 #
264 # - uint8: number of metadata entries M
264 # - uint8: number of metadata entries M
265 #
265 #
266 # - 20 or 32 bytes: precursor changeset identifier.
266 # - 20 or 32 bytes: precursor changeset identifier.
267 #
267 #
268 # - N*(20 or 32) bytes: successors changesets identifiers.
268 # - N*(20 or 32) bytes: successors changesets identifiers.
269 #
269 #
270 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 # - P*(20 or 32) bytes: parents of the precursors changesets.
271 #
271 #
272 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 # - M*(uint8, uint8): size of all metadata entries (key and value)
273 #
273 #
274 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 # - remaining bytes: the metadata, each (key, value) pair after the other.
275 _fm1version = 1
275 _fm1version = 1
276 _fm1fixed = '>IdhHBBB20s'
276 _fm1fixed = '>IdhHBBB20s'
277 _fm1nodesha1 = '20s'
277 _fm1nodesha1 = '20s'
278 _fm1nodesha256 = '32s'
278 _fm1nodesha256 = '32s'
279 _fm1nodesha1size = _calcsize(_fm1nodesha1)
279 _fm1nodesha1size = _calcsize(_fm1nodesha1)
280 _fm1nodesha256size = _calcsize(_fm1nodesha256)
280 _fm1nodesha256size = _calcsize(_fm1nodesha256)
281 _fm1fsize = _calcsize(_fm1fixed)
281 _fm1fsize = _calcsize(_fm1fixed)
282 _fm1parentnone = 3
282 _fm1parentnone = 3
283 _fm1parentshift = 14
283 _fm1parentshift = 14
284 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
284 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
285 _fm1metapair = 'BB'
285 _fm1metapair = 'BB'
286 _fm1metapairsize = _calcsize('BB')
286 _fm1metapairsize = _calcsize('BB')
287
287
288 def _fm1readmarkers(data, off=0):
288 def _fm1readmarkers(data, off):
289 # make some global constants local for performance
289 # make some global constants local for performance
290 noneflag = _fm1parentnone
290 noneflag = _fm1parentnone
291 sha2flag = usingsha256
291 sha2flag = usingsha256
292 sha1size = _fm1nodesha1size
292 sha1size = _fm1nodesha1size
293 sha2size = _fm1nodesha256size
293 sha2size = _fm1nodesha256size
294 sha1fmt = _fm1nodesha1
294 sha1fmt = _fm1nodesha1
295 sha2fmt = _fm1nodesha256
295 sha2fmt = _fm1nodesha256
296 metasize = _fm1metapairsize
296 metasize = _fm1metapairsize
297 metafmt = _fm1metapair
297 metafmt = _fm1metapair
298 fsize = _fm1fsize
298 fsize = _fm1fsize
299 unpack = _unpack
299 unpack = _unpack
300
300
301 # Loop on markers
301 # Loop on markers
302 stop = len(data) - _fm1fsize
302 stop = len(data) - _fm1fsize
303 ufixed = util.unpacker(_fm1fixed)
303 ufixed = util.unpacker(_fm1fixed)
304 while off <= stop:
304 while off <= stop:
305 # read fixed part
305 # read fixed part
306 o1 = off + fsize
306 o1 = off + fsize
307 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
307 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
308
308
309 if flags & sha2flag:
309 if flags & sha2flag:
310 # FIXME: prec was read as a SHA1, needs to be amended
310 # FIXME: prec was read as a SHA1, needs to be amended
311
311
312 # read 0 or more successors
312 # read 0 or more successors
313 if numsuc == 1:
313 if numsuc == 1:
314 o2 = o1 + sha2size
314 o2 = o1 + sha2size
315 sucs = (data[o1:o2],)
315 sucs = (data[o1:o2],)
316 else:
316 else:
317 o2 = o1 + sha2size * numsuc
317 o2 = o1 + sha2size * numsuc
318 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
318 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
319
319
320 # read parents
320 # read parents
321 if numpar == noneflag:
321 if numpar == noneflag:
322 o3 = o2
322 o3 = o2
323 parents = None
323 parents = None
324 elif numpar == 1:
324 elif numpar == 1:
325 o3 = o2 + sha2size
325 o3 = o2 + sha2size
326 parents = (data[o2:o3],)
326 parents = (data[o2:o3],)
327 else:
327 else:
328 o3 = o2 + sha2size * numpar
328 o3 = o2 + sha2size * numpar
329 parents = unpack(sha2fmt * numpar, data[o2:o3])
329 parents = unpack(sha2fmt * numpar, data[o2:o3])
330 else:
330 else:
331 # read 0 or more successors
331 # read 0 or more successors
332 if numsuc == 1:
332 if numsuc == 1:
333 o2 = o1 + sha1size
333 o2 = o1 + sha1size
334 sucs = (data[o1:o2],)
334 sucs = (data[o1:o2],)
335 else:
335 else:
336 o2 = o1 + sha1size * numsuc
336 o2 = o1 + sha1size * numsuc
337 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
337 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
338
338
339 # read parents
339 # read parents
340 if numpar == noneflag:
340 if numpar == noneflag:
341 o3 = o2
341 o3 = o2
342 parents = None
342 parents = None
343 elif numpar == 1:
343 elif numpar == 1:
344 o3 = o2 + sha1size
344 o3 = o2 + sha1size
345 parents = (data[o2:o3],)
345 parents = (data[o2:o3],)
346 else:
346 else:
347 o3 = o2 + sha1size * numpar
347 o3 = o2 + sha1size * numpar
348 parents = unpack(sha1fmt * numpar, data[o2:o3])
348 parents = unpack(sha1fmt * numpar, data[o2:o3])
349
349
350 # read metadata
350 # read metadata
351 off = o3 + metasize * nummeta
351 off = o3 + metasize * nummeta
352 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
352 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
353 metadata = []
353 metadata = []
354 for idx in xrange(0, len(metapairsize), 2):
354 for idx in xrange(0, len(metapairsize), 2):
355 o1 = off + metapairsize[idx]
355 o1 = off + metapairsize[idx]
356 o2 = o1 + metapairsize[idx + 1]
356 o2 = o1 + metapairsize[idx + 1]
357 metadata.append((data[off:o1], data[o1:o2]))
357 metadata.append((data[off:o1], data[o1:o2]))
358 off = o2
358 off = o2
359
359
360 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
360 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
361
361
362 def _fm1encodeonemarker(marker):
362 def _fm1encodeonemarker(marker):
363 pre, sucs, flags, metadata, date, parents = marker
363 pre, sucs, flags, metadata, date, parents = marker
364 # determine node size
364 # determine node size
365 _fm1node = _fm1nodesha1
365 _fm1node = _fm1nodesha1
366 if flags & usingsha256:
366 if flags & usingsha256:
367 _fm1node = _fm1nodesha256
367 _fm1node = _fm1nodesha256
368 numsuc = len(sucs)
368 numsuc = len(sucs)
369 numextranodes = numsuc
369 numextranodes = numsuc
370 if parents is None:
370 if parents is None:
371 numpar = _fm1parentnone
371 numpar = _fm1parentnone
372 else:
372 else:
373 numpar = len(parents)
373 numpar = len(parents)
374 numextranodes += numpar
374 numextranodes += numpar
375 formatnodes = _fm1node * numextranodes
375 formatnodes = _fm1node * numextranodes
376 formatmeta = _fm1metapair * len(metadata)
376 formatmeta = _fm1metapair * len(metadata)
377 format = _fm1fixed + formatnodes + formatmeta
377 format = _fm1fixed + formatnodes + formatmeta
378 # tz is stored in minutes so we divide by 60
378 # tz is stored in minutes so we divide by 60
379 tz = date[1]//60
379 tz = date[1]//60
380 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
380 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
381 data.extend(sucs)
381 data.extend(sucs)
382 if parents is not None:
382 if parents is not None:
383 data.extend(parents)
383 data.extend(parents)
384 totalsize = _calcsize(format)
384 totalsize = _calcsize(format)
385 for key, value in metadata:
385 for key, value in metadata:
386 lk = len(key)
386 lk = len(key)
387 lv = len(value)
387 lv = len(value)
388 data.append(lk)
388 data.append(lk)
389 data.append(lv)
389 data.append(lv)
390 totalsize += lk + lv
390 totalsize += lk + lv
391 data[0] = totalsize
391 data[0] = totalsize
392 data = [_pack(format, *data)]
392 data = [_pack(format, *data)]
393 for key, value in metadata:
393 for key, value in metadata:
394 data.append(key)
394 data.append(key)
395 data.append(value)
395 data.append(value)
396 return ''.join(data)
396 return ''.join(data)
397
397
398 # mapping to read/write various marker formats
398 # mapping to read/write various marker formats
399 # <version> -> (decoder, encoder)
399 # <version> -> (decoder, encoder)
400 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
400 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
401 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
401 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
402
402
403 @util.nogc
403 @util.nogc
404 def _readmarkers(data):
404 def _readmarkers(data):
405 """Read and enumerate markers from raw data"""
405 """Read and enumerate markers from raw data"""
406 off = 0
406 off = 0
407 diskversion = _unpack('>B', data[off:off + 1])[0]
407 diskversion = _unpack('>B', data[off:off + 1])[0]
408 off += 1
408 off += 1
409 if diskversion not in formats:
409 if diskversion not in formats:
410 raise util.Abort(_('parsing obsolete marker: unknown version %r')
410 raise util.Abort(_('parsing obsolete marker: unknown version %r')
411 % diskversion)
411 % diskversion)
412 return diskversion, formats[diskversion][0](data, off)
412 return diskversion, formats[diskversion][0](data, off)
413
413
414 def encodemarkers(markers, addheader=False, version=_fm0version):
414 def encodemarkers(markers, addheader=False, version=_fm0version):
415 # Kept separate from flushmarkers(), it will be reused for
415 # Kept separate from flushmarkers(), it will be reused for
416 # markers exchange.
416 # markers exchange.
417 encodeone = formats[version][1]
417 encodeone = formats[version][1]
418 if addheader:
418 if addheader:
419 yield _pack('>B', version)
419 yield _pack('>B', version)
420 for marker in markers:
420 for marker in markers:
421 yield encodeone(marker)
421 yield encodeone(marker)
422
422
423
423
424 class marker(object):
424 class marker(object):
425 """Wrap obsolete marker raw data"""
425 """Wrap obsolete marker raw data"""
426
426
427 def __init__(self, repo, data):
427 def __init__(self, repo, data):
428 # the repo argument will be used to create changectx in later version
428 # the repo argument will be used to create changectx in later version
429 self._repo = repo
429 self._repo = repo
430 self._data = data
430 self._data = data
431 self._decodedmeta = None
431 self._decodedmeta = None
432
432
433 def __hash__(self):
433 def __hash__(self):
434 return hash(self._data)
434 return hash(self._data)
435
435
436 def __eq__(self, other):
436 def __eq__(self, other):
437 if type(other) != type(self):
437 if type(other) != type(self):
438 return False
438 return False
439 return self._data == other._data
439 return self._data == other._data
440
440
441 def precnode(self):
441 def precnode(self):
442 """Precursor changeset node identifier"""
442 """Precursor changeset node identifier"""
443 return self._data[0]
443 return self._data[0]
444
444
445 def succnodes(self):
445 def succnodes(self):
446 """List of successor changesets node identifiers"""
446 """List of successor changesets node identifiers"""
447 return self._data[1]
447 return self._data[1]
448
448
449 def parentnodes(self):
449 def parentnodes(self):
450 """Parents of the precursors (None if not recorded)"""
450 """Parents of the precursors (None if not recorded)"""
451 return self._data[5]
451 return self._data[5]
452
452
453 def metadata(self):
453 def metadata(self):
454 """Decoded metadata dictionary"""
454 """Decoded metadata dictionary"""
455 return dict(self._data[3])
455 return dict(self._data[3])
456
456
457 def date(self):
457 def date(self):
458 """Creation date as (unixtime, offset)"""
458 """Creation date as (unixtime, offset)"""
459 return self._data[4]
459 return self._data[4]
460
460
461 def flags(self):
461 def flags(self):
462 """The flags field of the marker"""
462 """The flags field of the marker"""
463 return self._data[2]
463 return self._data[2]
464
464
465 def _checkinvalidmarkers(obsstore):
465 def _checkinvalidmarkers(obsstore):
466 """search for marker with invalid data and raise error if needed
466 """search for marker with invalid data and raise error if needed
467
467
468 Exist as a separated function to allow the evolve extension for a more
468 Exist as a separated function to allow the evolve extension for a more
469 subtle handling.
469 subtle handling.
470 """
470 """
471 if node.nullid in obsstore.precursors:
471 if node.nullid in obsstore.precursors:
472 raise util.Abort(_('bad obsolescence marker detected: '
472 raise util.Abort(_('bad obsolescence marker detected: '
473 'invalid successors nullid'))
473 'invalid successors nullid'))
474
474
475 class obsstore(object):
475 class obsstore(object):
476 """Store obsolete markers
476 """Store obsolete markers
477
477
478 Markers can be accessed with two mappings:
478 Markers can be accessed with two mappings:
479 - precursors[x] -> set(markers on precursors edges of x)
479 - precursors[x] -> set(markers on precursors edges of x)
480 - successors[x] -> set(markers on successors edges of x)
480 - successors[x] -> set(markers on successors edges of x)
481 - children[x] -> set(markers on precursors edges of children(x)
481 - children[x] -> set(markers on precursors edges of children(x)
482 """
482 """
483
483
484 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
484 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
485 # prec: nodeid, precursor changesets
485 # prec: nodeid, precursor changesets
486 # succs: tuple of nodeid, successor changesets (0-N length)
486 # succs: tuple of nodeid, successor changesets (0-N length)
487 # flag: integer, flag field carrying modifier for the markers (see doc)
487 # flag: integer, flag field carrying modifier for the markers (see doc)
488 # meta: binary blob, encoded metadata dictionary
488 # meta: binary blob, encoded metadata dictionary
489 # date: (float, int) tuple, date of marker creation
489 # date: (float, int) tuple, date of marker creation
490 # parents: (tuple of nodeid) or None, parents of precursors
490 # parents: (tuple of nodeid) or None, parents of precursors
491 # None is used when no data has been recorded
491 # None is used when no data has been recorded
492
492
493 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
493 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
494 # caches for various obsolescence related cache
494 # caches for various obsolescence related cache
495 self.caches = {}
495 self.caches = {}
496 self._all = []
496 self._all = []
497 self.precursors = {}
497 self.precursors = {}
498 self.successors = {}
498 self.successors = {}
499 self.children = {}
499 self.children = {}
500 self.sopener = sopener
500 self.sopener = sopener
501 data = sopener.tryread('obsstore')
501 data = sopener.tryread('obsstore')
502 self._version = defaultformat
502 self._version = defaultformat
503 self._readonly = readonly
503 self._readonly = readonly
504 if data:
504 if data:
505 self._version, markers = _readmarkers(data)
505 self._version, markers = _readmarkers(data)
506 self._load(markers)
506 self._load(markers)
507
507
508 def __iter__(self):
508 def __iter__(self):
509 return iter(self._all)
509 return iter(self._all)
510
510
511 def __len__(self):
511 def __len__(self):
512 return len(self._all)
512 return len(self._all)
513
513
514 def __nonzero__(self):
514 def __nonzero__(self):
515 return bool(self._all)
515 return bool(self._all)
516
516
517 def create(self, transaction, prec, succs=(), flag=0, parents=None,
517 def create(self, transaction, prec, succs=(), flag=0, parents=None,
518 date=None, metadata=None):
518 date=None, metadata=None):
519 """obsolete: add a new obsolete marker
519 """obsolete: add a new obsolete marker
520
520
521 * ensuring it is hashable
521 * ensuring it is hashable
522 * check mandatory metadata
522 * check mandatory metadata
523 * encode metadata
523 * encode metadata
524
524
525 If you are a human writing code creating marker you want to use the
525 If you are a human writing code creating marker you want to use the
526 `createmarkers` function in this module instead.
526 `createmarkers` function in this module instead.
527
527
528 return True if a new marker have been added, False if the markers
528 return True if a new marker have been added, False if the markers
529 already existed (no op).
529 already existed (no op).
530 """
530 """
531 if metadata is None:
531 if metadata is None:
532 metadata = {}
532 metadata = {}
533 if date is None:
533 if date is None:
534 if 'date' in metadata:
534 if 'date' in metadata:
535 # as a courtesy for out-of-tree extensions
535 # as a courtesy for out-of-tree extensions
536 date = util.parsedate(metadata.pop('date'))
536 date = util.parsedate(metadata.pop('date'))
537 else:
537 else:
538 date = util.makedate()
538 date = util.makedate()
539 if len(prec) != 20:
539 if len(prec) != 20:
540 raise ValueError(prec)
540 raise ValueError(prec)
541 for succ in succs:
541 for succ in succs:
542 if len(succ) != 20:
542 if len(succ) != 20:
543 raise ValueError(succ)
543 raise ValueError(succ)
544 if prec in succs:
544 if prec in succs:
545 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
545 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
546
546
547 metadata = tuple(sorted(metadata.iteritems()))
547 metadata = tuple(sorted(metadata.iteritems()))
548
548
549 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
549 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
550 return bool(self.add(transaction, [marker]))
550 return bool(self.add(transaction, [marker]))
551
551
552 def add(self, transaction, markers):
552 def add(self, transaction, markers):
553 """Add new markers to the store
553 """Add new markers to the store
554
554
555 Take care of filtering duplicate.
555 Take care of filtering duplicate.
556 Return the number of new marker."""
556 Return the number of new marker."""
557 if self._readonly:
557 if self._readonly:
558 raise util.Abort('creating obsolete markers is not enabled on this '
558 raise util.Abort('creating obsolete markers is not enabled on this '
559 'repo')
559 'repo')
560 known = set(self._all)
560 known = set(self._all)
561 new = []
561 new = []
562 for m in markers:
562 for m in markers:
563 if m not in known:
563 if m not in known:
564 known.add(m)
564 known.add(m)
565 new.append(m)
565 new.append(m)
566 if new:
566 if new:
567 f = self.sopener('obsstore', 'ab')
567 f = self.sopener('obsstore', 'ab')
568 try:
568 try:
569 # Whether the file's current position is at the begin or at
569 # Whether the file's current position is at the begin or at
570 # the end after opening a file for appending is implementation
570 # the end after opening a file for appending is implementation
571 # defined. So we must seek to the end before calling tell(),
571 # defined. So we must seek to the end before calling tell(),
572 # or we may get a zero offset for non-zero sized files on
572 # or we may get a zero offset for non-zero sized files on
573 # some platforms (issue3543).
573 # some platforms (issue3543).
574 f.seek(0, _SEEK_END)
574 f.seek(0, _SEEK_END)
575 offset = f.tell()
575 offset = f.tell()
576 transaction.add('obsstore', offset)
576 transaction.add('obsstore', offset)
577 # offset == 0: new file - add the version header
577 # offset == 0: new file - add the version header
578 for bytes in encodemarkers(new, offset == 0, self._version):
578 for bytes in encodemarkers(new, offset == 0, self._version):
579 f.write(bytes)
579 f.write(bytes)
580 finally:
580 finally:
581 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
581 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
582 # call 'filecacheentry.refresh()' here
582 # call 'filecacheentry.refresh()' here
583 f.close()
583 f.close()
584 self._load(new)
584 self._load(new)
585 # new marker *may* have changed several set. invalidate the cache.
585 # new marker *may* have changed several set. invalidate the cache.
586 self.caches.clear()
586 self.caches.clear()
587 # records the number of new markers for the transaction hooks
587 # records the number of new markers for the transaction hooks
588 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
588 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
589 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
589 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
590 return len(new)
590 return len(new)
591
591
592 def mergemarkers(self, transaction, data):
592 def mergemarkers(self, transaction, data):
593 """merge a binary stream of markers inside the obsstore
593 """merge a binary stream of markers inside the obsstore
594
594
595 Returns the number of new markers added."""
595 Returns the number of new markers added."""
596 version, markers = _readmarkers(data)
596 version, markers = _readmarkers(data)
597 return self.add(transaction, markers)
597 return self.add(transaction, markers)
598
598
599 @util.nogc
599 @util.nogc
600 def _load(self, markers):
600 def _load(self, markers):
601 for mark in markers:
601 for mark in markers:
602 self._all.append(mark)
602 self._all.append(mark)
603 pre, sucs = mark[:2]
603 pre, sucs = mark[:2]
604 self.successors.setdefault(pre, set()).add(mark)
604 self.successors.setdefault(pre, set()).add(mark)
605 for suc in sucs:
605 for suc in sucs:
606 self.precursors.setdefault(suc, set()).add(mark)
606 self.precursors.setdefault(suc, set()).add(mark)
607 parents = mark[5]
607 parents = mark[5]
608 if parents is not None:
608 if parents is not None:
609 for p in parents:
609 for p in parents:
610 self.children.setdefault(p, set()).add(mark)
610 self.children.setdefault(p, set()).add(mark)
611 _checkinvalidmarkers(self)
611 _checkinvalidmarkers(self)
612
612
613 def relevantmarkers(self, nodes):
613 def relevantmarkers(self, nodes):
614 """return a set of all obsolescence markers relevant to a set of nodes.
614 """return a set of all obsolescence markers relevant to a set of nodes.
615
615
616 "relevant" to a set of nodes mean:
616 "relevant" to a set of nodes mean:
617
617
618 - marker that use this changeset as successor
618 - marker that use this changeset as successor
619 - prune marker of direct children on this changeset
619 - prune marker of direct children on this changeset
620 - recursive application of the two rules on precursors of these markers
620 - recursive application of the two rules on precursors of these markers
621
621
622 It is a set so you cannot rely on order."""
622 It is a set so you cannot rely on order."""
623
623
624 pendingnodes = set(nodes)
624 pendingnodes = set(nodes)
625 seenmarkers = set()
625 seenmarkers = set()
626 seennodes = set(pendingnodes)
626 seennodes = set(pendingnodes)
627 precursorsmarkers = self.precursors
627 precursorsmarkers = self.precursors
628 children = self.children
628 children = self.children
629 while pendingnodes:
629 while pendingnodes:
630 direct = set()
630 direct = set()
631 for current in pendingnodes:
631 for current in pendingnodes:
632 direct.update(precursorsmarkers.get(current, ()))
632 direct.update(precursorsmarkers.get(current, ()))
633 pruned = [m for m in children.get(current, ()) if not m[1]]
633 pruned = [m for m in children.get(current, ()) if not m[1]]
634 direct.update(pruned)
634 direct.update(pruned)
635 direct -= seenmarkers
635 direct -= seenmarkers
636 pendingnodes = set([m[0] for m in direct])
636 pendingnodes = set([m[0] for m in direct])
637 seenmarkers |= direct
637 seenmarkers |= direct
638 pendingnodes -= seennodes
638 pendingnodes -= seennodes
639 seennodes |= pendingnodes
639 seennodes |= pendingnodes
640 return seenmarkers
640 return seenmarkers
641
641
642 def commonversion(versions):
642 def commonversion(versions):
643 """Return the newest version listed in both versions and our local formats.
643 """Return the newest version listed in both versions and our local formats.
644
644
645 Returns None if no common version exists.
645 Returns None if no common version exists.
646 """
646 """
647 versions.sort(reverse=True)
647 versions.sort(reverse=True)
648 # search for highest version known on both side
648 # search for highest version known on both side
649 for v in versions:
649 for v in versions:
650 if v in formats:
650 if v in formats:
651 return v
651 return v
652 return None
652 return None
653
653
654 # arbitrary picked to fit into 8K limit from HTTP server
654 # arbitrary picked to fit into 8K limit from HTTP server
655 # you have to take in account:
655 # you have to take in account:
656 # - the version header
656 # - the version header
657 # - the base85 encoding
657 # - the base85 encoding
658 _maxpayload = 5300
658 _maxpayload = 5300
659
659
660 def _pushkeyescape(markers):
660 def _pushkeyescape(markers):
661 """encode markers into a dict suitable for pushkey exchange
661 """encode markers into a dict suitable for pushkey exchange
662
662
663 - binary data is base85 encoded
663 - binary data is base85 encoded
664 - split in chunks smaller than 5300 bytes"""
664 - split in chunks smaller than 5300 bytes"""
665 keys = {}
665 keys = {}
666 parts = []
666 parts = []
667 currentlen = _maxpayload * 2 # ensure we create a new part
667 currentlen = _maxpayload * 2 # ensure we create a new part
668 for marker in markers:
668 for marker in markers:
669 nextdata = _fm0encodeonemarker(marker)
669 nextdata = _fm0encodeonemarker(marker)
670 if (len(nextdata) + currentlen > _maxpayload):
670 if (len(nextdata) + currentlen > _maxpayload):
671 currentpart = []
671 currentpart = []
672 currentlen = 0
672 currentlen = 0
673 parts.append(currentpart)
673 parts.append(currentpart)
674 currentpart.append(nextdata)
674 currentpart.append(nextdata)
675 currentlen += len(nextdata)
675 currentlen += len(nextdata)
676 for idx, part in enumerate(reversed(parts)):
676 for idx, part in enumerate(reversed(parts)):
677 data = ''.join([_pack('>B', _fm0version)] + part)
677 data = ''.join([_pack('>B', _fm0version)] + part)
678 keys['dump%i' % idx] = base85.b85encode(data)
678 keys['dump%i' % idx] = base85.b85encode(data)
679 return keys
679 return keys
680
680
681 def listmarkers(repo):
681 def listmarkers(repo):
682 """List markers over pushkey"""
682 """List markers over pushkey"""
683 if not repo.obsstore:
683 if not repo.obsstore:
684 return {}
684 return {}
685 return _pushkeyescape(repo.obsstore)
685 return _pushkeyescape(repo.obsstore)
686
686
687 def pushmarker(repo, key, old, new):
687 def pushmarker(repo, key, old, new):
688 """Push markers over pushkey"""
688 """Push markers over pushkey"""
689 if not key.startswith('dump'):
689 if not key.startswith('dump'):
690 repo.ui.warn(_('unknown key: %r') % key)
690 repo.ui.warn(_('unknown key: %r') % key)
691 return 0
691 return 0
692 if old:
692 if old:
693 repo.ui.warn(_('unexpected old value for %r') % key)
693 repo.ui.warn(_('unexpected old value for %r') % key)
694 return 0
694 return 0
695 data = base85.b85decode(new)
695 data = base85.b85decode(new)
696 lock = repo.lock()
696 lock = repo.lock()
697 try:
697 try:
698 tr = repo.transaction('pushkey: obsolete markers')
698 tr = repo.transaction('pushkey: obsolete markers')
699 try:
699 try:
700 repo.obsstore.mergemarkers(tr, data)
700 repo.obsstore.mergemarkers(tr, data)
701 tr.close()
701 tr.close()
702 return 1
702 return 1
703 finally:
703 finally:
704 tr.release()
704 tr.release()
705 finally:
705 finally:
706 lock.release()
706 lock.release()
707
707
708 def getmarkers(repo, nodes=None):
708 def getmarkers(repo, nodes=None):
709 """returns markers known in a repository
709 """returns markers known in a repository
710
710
711 If <nodes> is specified, only markers "relevant" to those nodes are are
711 If <nodes> is specified, only markers "relevant" to those nodes are are
712 returned"""
712 returned"""
713 if nodes is None:
713 if nodes is None:
714 rawmarkers = repo.obsstore
714 rawmarkers = repo.obsstore
715 else:
715 else:
716 rawmarkers = repo.obsstore.relevantmarkers(nodes)
716 rawmarkers = repo.obsstore.relevantmarkers(nodes)
717
717
718 for markerdata in rawmarkers:
718 for markerdata in rawmarkers:
719 yield marker(repo, markerdata)
719 yield marker(repo, markerdata)
720
720
721 def relevantmarkers(repo, node):
721 def relevantmarkers(repo, node):
722 """all obsolete markers relevant to some revision"""
722 """all obsolete markers relevant to some revision"""
723 for markerdata in repo.obsstore.relevantmarkers(node):
723 for markerdata in repo.obsstore.relevantmarkers(node):
724 yield marker(repo, markerdata)
724 yield marker(repo, markerdata)
725
725
726
726
727 def precursormarkers(ctx):
727 def precursormarkers(ctx):
728 """obsolete marker marking this changeset as a successors"""
728 """obsolete marker marking this changeset as a successors"""
729 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
729 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
730 yield marker(ctx._repo, data)
730 yield marker(ctx._repo, data)
731
731
732 def successormarkers(ctx):
732 def successormarkers(ctx):
733 """obsolete marker making this changeset obsolete"""
733 """obsolete marker making this changeset obsolete"""
734 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
734 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
735 yield marker(ctx._repo, data)
735 yield marker(ctx._repo, data)
736
736
737 def allsuccessors(obsstore, nodes, ignoreflags=0):
737 def allsuccessors(obsstore, nodes, ignoreflags=0):
738 """Yield node for every successor of <nodes>.
738 """Yield node for every successor of <nodes>.
739
739
740 Some successors may be unknown locally.
740 Some successors may be unknown locally.
741
741
742 This is a linear yield unsuited to detecting split changesets. It includes
742 This is a linear yield unsuited to detecting split changesets. It includes
743 initial nodes too."""
743 initial nodes too."""
744 remaining = set(nodes)
744 remaining = set(nodes)
745 seen = set(remaining)
745 seen = set(remaining)
746 while remaining:
746 while remaining:
747 current = remaining.pop()
747 current = remaining.pop()
748 yield current
748 yield current
749 for mark in obsstore.successors.get(current, ()):
749 for mark in obsstore.successors.get(current, ()):
750 # ignore marker flagged with specified flag
750 # ignore marker flagged with specified flag
751 if mark[2] & ignoreflags:
751 if mark[2] & ignoreflags:
752 continue
752 continue
753 for suc in mark[1]:
753 for suc in mark[1]:
754 if suc not in seen:
754 if suc not in seen:
755 seen.add(suc)
755 seen.add(suc)
756 remaining.add(suc)
756 remaining.add(suc)
757
757
758 def allprecursors(obsstore, nodes, ignoreflags=0):
758 def allprecursors(obsstore, nodes, ignoreflags=0):
759 """Yield node for every precursors of <nodes>.
759 """Yield node for every precursors of <nodes>.
760
760
761 Some precursors may be unknown locally.
761 Some precursors may be unknown locally.
762
762
763 This is a linear yield unsuited to detecting folded changesets. It includes
763 This is a linear yield unsuited to detecting folded changesets. It includes
764 initial nodes too."""
764 initial nodes too."""
765
765
766 remaining = set(nodes)
766 remaining = set(nodes)
767 seen = set(remaining)
767 seen = set(remaining)
768 while remaining:
768 while remaining:
769 current = remaining.pop()
769 current = remaining.pop()
770 yield current
770 yield current
771 for mark in obsstore.precursors.get(current, ()):
771 for mark in obsstore.precursors.get(current, ()):
772 # ignore marker flagged with specified flag
772 # ignore marker flagged with specified flag
773 if mark[2] & ignoreflags:
773 if mark[2] & ignoreflags:
774 continue
774 continue
775 suc = mark[0]
775 suc = mark[0]
776 if suc not in seen:
776 if suc not in seen:
777 seen.add(suc)
777 seen.add(suc)
778 remaining.add(suc)
778 remaining.add(suc)
779
779
780 def foreground(repo, nodes):
780 def foreground(repo, nodes):
781 """return all nodes in the "foreground" of other node
781 """return all nodes in the "foreground" of other node
782
782
783 The foreground of a revision is anything reachable using parent -> children
783 The foreground of a revision is anything reachable using parent -> children
784 or precursor -> successor relation. It is very similar to "descendant" but
784 or precursor -> successor relation. It is very similar to "descendant" but
785 augmented with obsolescence information.
785 augmented with obsolescence information.
786
786
787 Beware that possible obsolescence cycle may result if complex situation.
787 Beware that possible obsolescence cycle may result if complex situation.
788 """
788 """
789 repo = repo.unfiltered()
789 repo = repo.unfiltered()
790 foreground = set(repo.set('%ln::', nodes))
790 foreground = set(repo.set('%ln::', nodes))
791 if repo.obsstore:
791 if repo.obsstore:
792 # We only need this complicated logic if there is obsolescence
792 # We only need this complicated logic if there is obsolescence
793 # XXX will probably deserve an optimised revset.
793 # XXX will probably deserve an optimised revset.
794 nm = repo.changelog.nodemap
794 nm = repo.changelog.nodemap
795 plen = -1
795 plen = -1
796 # compute the whole set of successors or descendants
796 # compute the whole set of successors or descendants
797 while len(foreground) != plen:
797 while len(foreground) != plen:
798 plen = len(foreground)
798 plen = len(foreground)
799 succs = set(c.node() for c in foreground)
799 succs = set(c.node() for c in foreground)
800 mutable = [c.node() for c in foreground if c.mutable()]
800 mutable = [c.node() for c in foreground if c.mutable()]
801 succs.update(allsuccessors(repo.obsstore, mutable))
801 succs.update(allsuccessors(repo.obsstore, mutable))
802 known = (n for n in succs if n in nm)
802 known = (n for n in succs if n in nm)
803 foreground = set(repo.set('%ln::', known))
803 foreground = set(repo.set('%ln::', known))
804 return set(c.node() for c in foreground)
804 return set(c.node() for c in foreground)
805
805
806
806
807 def successorssets(repo, initialnode, cache=None):
807 def successorssets(repo, initialnode, cache=None):
808 """Return all set of successors of initial nodes
808 """Return all set of successors of initial nodes
809
809
810 The successors set of a changeset A are a group of revisions that succeed
810 The successors set of a changeset A are a group of revisions that succeed
811 A. It succeeds A as a consistent whole, each revision being only a partial
811 A. It succeeds A as a consistent whole, each revision being only a partial
812 replacement. The successors set contains non-obsolete changesets only.
812 replacement. The successors set contains non-obsolete changesets only.
813
813
814 This function returns the full list of successor sets which is why it
814 This function returns the full list of successor sets which is why it
815 returns a list of tuples and not just a single tuple. Each tuple is a valid
815 returns a list of tuples and not just a single tuple. Each tuple is a valid
816 successors set. Not that (A,) may be a valid successors set for changeset A
816 successors set. Not that (A,) may be a valid successors set for changeset A
817 (see below).
817 (see below).
818
818
819 In most cases, a changeset A will have a single element (e.g. the changeset
819 In most cases, a changeset A will have a single element (e.g. the changeset
820 A is replaced by A') in its successors set. Though, it is also common for a
820 A is replaced by A') in its successors set. Though, it is also common for a
821 changeset A to have no elements in its successor set (e.g. the changeset
821 changeset A to have no elements in its successor set (e.g. the changeset
822 has been pruned). Therefore, the returned list of successors sets will be
822 has been pruned). Therefore, the returned list of successors sets will be
823 [(A',)] or [], respectively.
823 [(A',)] or [], respectively.
824
824
825 When a changeset A is split into A' and B', however, it will result in a
825 When a changeset A is split into A' and B', however, it will result in a
826 successors set containing more than a single element, i.e. [(A',B')].
826 successors set containing more than a single element, i.e. [(A',B')].
827 Divergent changesets will result in multiple successors sets, i.e. [(A',),
827 Divergent changesets will result in multiple successors sets, i.e. [(A',),
828 (A'')].
828 (A'')].
829
829
830 If a changeset A is not obsolete, then it will conceptually have no
830 If a changeset A is not obsolete, then it will conceptually have no
831 successors set. To distinguish this from a pruned changeset, the successor
831 successors set. To distinguish this from a pruned changeset, the successor
832 set will only contain itself, i.e. [(A,)].
832 set will only contain itself, i.e. [(A,)].
833
833
834 Finally, successors unknown locally are considered to be pruned (obsoleted
834 Finally, successors unknown locally are considered to be pruned (obsoleted
835 without any successors).
835 without any successors).
836
836
837 The optional `cache` parameter is a dictionary that may contain precomputed
837 The optional `cache` parameter is a dictionary that may contain precomputed
838 successors sets. It is meant to reuse the computation of a previous call to
838 successors sets. It is meant to reuse the computation of a previous call to
839 `successorssets` when multiple calls are made at the same time. The cache
839 `successorssets` when multiple calls are made at the same time. The cache
840 dictionary is updated in place. The caller is responsible for its live
840 dictionary is updated in place. The caller is responsible for its live
841 spawn. Code that makes multiple calls to `successorssets` *must* use this
841 spawn. Code that makes multiple calls to `successorssets` *must* use this
842 cache mechanism or suffer terrible performances.
842 cache mechanism or suffer terrible performances.
843
843
844 """
844 """
845
845
846 succmarkers = repo.obsstore.successors
846 succmarkers = repo.obsstore.successors
847
847
848 # Stack of nodes we search successors sets for
848 # Stack of nodes we search successors sets for
849 toproceed = [initialnode]
849 toproceed = [initialnode]
850 # set version of above list for fast loop detection
850 # set version of above list for fast loop detection
851 # element added to "toproceed" must be added here
851 # element added to "toproceed" must be added here
852 stackedset = set(toproceed)
852 stackedset = set(toproceed)
853 if cache is None:
853 if cache is None:
854 cache = {}
854 cache = {}
855
855
856 # This while loop is the flattened version of a recursive search for
856 # This while loop is the flattened version of a recursive search for
857 # successors sets
857 # successors sets
858 #
858 #
859 # def successorssets(x):
859 # def successorssets(x):
860 # successors = directsuccessors(x)
860 # successors = directsuccessors(x)
861 # ss = [[]]
861 # ss = [[]]
862 # for succ in directsuccessors(x):
862 # for succ in directsuccessors(x):
863 # # product as in itertools cartesian product
863 # # product as in itertools cartesian product
864 # ss = product(ss, successorssets(succ))
864 # ss = product(ss, successorssets(succ))
865 # return ss
865 # return ss
866 #
866 #
867 # But we can not use plain recursive calls here:
867 # But we can not use plain recursive calls here:
868 # - that would blow the python call stack
868 # - that would blow the python call stack
869 # - obsolescence markers may have cycles, we need to handle them.
869 # - obsolescence markers may have cycles, we need to handle them.
870 #
870 #
871 # The `toproceed` list act as our call stack. Every node we search
871 # The `toproceed` list act as our call stack. Every node we search
872 # successors set for are stacked there.
872 # successors set for are stacked there.
873 #
873 #
874 # The `stackedset` is set version of this stack used to check if a node is
874 # The `stackedset` is set version of this stack used to check if a node is
875 # already stacked. This check is used to detect cycles and prevent infinite
875 # already stacked. This check is used to detect cycles and prevent infinite
876 # loop.
876 # loop.
877 #
877 #
878 # successors set of all nodes are stored in the `cache` dictionary.
878 # successors set of all nodes are stored in the `cache` dictionary.
879 #
879 #
880 # After this while loop ends we use the cache to return the successors sets
880 # After this while loop ends we use the cache to return the successors sets
881 # for the node requested by the caller.
881 # for the node requested by the caller.
882 while toproceed:
882 while toproceed:
883 # Every iteration tries to compute the successors sets of the topmost
883 # Every iteration tries to compute the successors sets of the topmost
884 # node of the stack: CURRENT.
884 # node of the stack: CURRENT.
885 #
885 #
886 # There are four possible outcomes:
886 # There are four possible outcomes:
887 #
887 #
888 # 1) We already know the successors sets of CURRENT:
888 # 1) We already know the successors sets of CURRENT:
889 # -> mission accomplished, pop it from the stack.
889 # -> mission accomplished, pop it from the stack.
890 # 2) Node is not obsolete:
890 # 2) Node is not obsolete:
891 # -> the node is its own successors sets. Add it to the cache.
891 # -> the node is its own successors sets. Add it to the cache.
892 # 3) We do not know successors set of direct successors of CURRENT:
892 # 3) We do not know successors set of direct successors of CURRENT:
893 # -> We add those successors to the stack.
893 # -> We add those successors to the stack.
894 # 4) We know successors sets of all direct successors of CURRENT:
894 # 4) We know successors sets of all direct successors of CURRENT:
895 # -> We can compute CURRENT successors set and add it to the
895 # -> We can compute CURRENT successors set and add it to the
896 # cache.
896 # cache.
897 #
897 #
898 current = toproceed[-1]
898 current = toproceed[-1]
899 if current in cache:
899 if current in cache:
900 # case (1): We already know the successors sets
900 # case (1): We already know the successors sets
901 stackedset.remove(toproceed.pop())
901 stackedset.remove(toproceed.pop())
902 elif current not in succmarkers:
902 elif current not in succmarkers:
903 # case (2): The node is not obsolete.
903 # case (2): The node is not obsolete.
904 if current in repo:
904 if current in repo:
905 # We have a valid last successors.
905 # We have a valid last successors.
906 cache[current] = [(current,)]
906 cache[current] = [(current,)]
907 else:
907 else:
908 # Final obsolete version is unknown locally.
908 # Final obsolete version is unknown locally.
909 # Do not count that as a valid successors
909 # Do not count that as a valid successors
910 cache[current] = []
910 cache[current] = []
911 else:
911 else:
912 # cases (3) and (4)
912 # cases (3) and (4)
913 #
913 #
914 # We proceed in two phases. Phase 1 aims to distinguish case (3)
914 # We proceed in two phases. Phase 1 aims to distinguish case (3)
915 # from case (4):
915 # from case (4):
916 #
916 #
917 # For each direct successors of CURRENT, we check whether its
917 # For each direct successors of CURRENT, we check whether its
918 # successors sets are known. If they are not, we stack the
918 # successors sets are known. If they are not, we stack the
919 # unknown node and proceed to the next iteration of the while
919 # unknown node and proceed to the next iteration of the while
920 # loop. (case 3)
920 # loop. (case 3)
921 #
921 #
922 # During this step, we may detect obsolescence cycles: a node
922 # During this step, we may detect obsolescence cycles: a node
923 # with unknown successors sets but already in the call stack.
923 # with unknown successors sets but already in the call stack.
924 # In such a situation, we arbitrary set the successors sets of
924 # In such a situation, we arbitrary set the successors sets of
925 # the node to nothing (node pruned) to break the cycle.
925 # the node to nothing (node pruned) to break the cycle.
926 #
926 #
927 # If no break was encountered we proceed to phase 2.
927 # If no break was encountered we proceed to phase 2.
928 #
928 #
929 # Phase 2 computes successors sets of CURRENT (case 4); see details
929 # Phase 2 computes successors sets of CURRENT (case 4); see details
930 # in phase 2 itself.
930 # in phase 2 itself.
931 #
931 #
932 # Note the two levels of iteration in each phase.
932 # Note the two levels of iteration in each phase.
933 # - The first one handles obsolescence markers using CURRENT as
933 # - The first one handles obsolescence markers using CURRENT as
934 # precursor (successors markers of CURRENT).
934 # precursor (successors markers of CURRENT).
935 #
935 #
936 # Having multiple entry here means divergence.
936 # Having multiple entry here means divergence.
937 #
937 #
938 # - The second one handles successors defined in each marker.
938 # - The second one handles successors defined in each marker.
939 #
939 #
940 # Having none means pruned node, multiple successors means split,
940 # Having none means pruned node, multiple successors means split,
941 # single successors are standard replacement.
941 # single successors are standard replacement.
942 #
942 #
943 for mark in sorted(succmarkers[current]):
943 for mark in sorted(succmarkers[current]):
944 for suc in mark[1]:
944 for suc in mark[1]:
945 if suc not in cache:
945 if suc not in cache:
946 if suc in stackedset:
946 if suc in stackedset:
947 # cycle breaking
947 # cycle breaking
948 cache[suc] = []
948 cache[suc] = []
949 else:
949 else:
950 # case (3) If we have not computed successors sets
950 # case (3) If we have not computed successors sets
951 # of one of those successors we add it to the
951 # of one of those successors we add it to the
952 # `toproceed` stack and stop all work for this
952 # `toproceed` stack and stop all work for this
953 # iteration.
953 # iteration.
954 toproceed.append(suc)
954 toproceed.append(suc)
955 stackedset.add(suc)
955 stackedset.add(suc)
956 break
956 break
957 else:
957 else:
958 continue
958 continue
959 break
959 break
960 else:
960 else:
961 # case (4): we know all successors sets of all direct
961 # case (4): we know all successors sets of all direct
962 # successors
962 # successors
963 #
963 #
964 # Successors set contributed by each marker depends on the
964 # Successors set contributed by each marker depends on the
965 # successors sets of all its "successors" node.
965 # successors sets of all its "successors" node.
966 #
966 #
967 # Each different marker is a divergence in the obsolescence
967 # Each different marker is a divergence in the obsolescence
968 # history. It contributes successors sets distinct from other
968 # history. It contributes successors sets distinct from other
969 # markers.
969 # markers.
970 #
970 #
971 # Within a marker, a successor may have divergent successors
971 # Within a marker, a successor may have divergent successors
972 # sets. In such a case, the marker will contribute multiple
972 # sets. In such a case, the marker will contribute multiple
973 # divergent successors sets. If multiple successors have
973 # divergent successors sets. If multiple successors have
974 # divergent successors sets, a Cartesian product is used.
974 # divergent successors sets, a Cartesian product is used.
975 #
975 #
976 # At the end we post-process successors sets to remove
976 # At the end we post-process successors sets to remove
977 # duplicated entry and successors set that are strict subset of
977 # duplicated entry and successors set that are strict subset of
978 # another one.
978 # another one.
979 succssets = []
979 succssets = []
980 for mark in sorted(succmarkers[current]):
980 for mark in sorted(succmarkers[current]):
981 # successors sets contributed by this marker
981 # successors sets contributed by this marker
982 markss = [[]]
982 markss = [[]]
983 for suc in mark[1]:
983 for suc in mark[1]:
984 # cardinal product with previous successors
984 # cardinal product with previous successors
985 productresult = []
985 productresult = []
986 for prefix in markss:
986 for prefix in markss:
987 for suffix in cache[suc]:
987 for suffix in cache[suc]:
988 newss = list(prefix)
988 newss = list(prefix)
989 for part in suffix:
989 for part in suffix:
990 # do not duplicated entry in successors set
990 # do not duplicated entry in successors set
991 # first entry wins.
991 # first entry wins.
992 if part not in newss:
992 if part not in newss:
993 newss.append(part)
993 newss.append(part)
994 productresult.append(newss)
994 productresult.append(newss)
995 markss = productresult
995 markss = productresult
996 succssets.extend(markss)
996 succssets.extend(markss)
997 # remove duplicated and subset
997 # remove duplicated and subset
998 seen = []
998 seen = []
999 final = []
999 final = []
1000 candidate = sorted(((set(s), s) for s in succssets if s),
1000 candidate = sorted(((set(s), s) for s in succssets if s),
1001 key=lambda x: len(x[1]), reverse=True)
1001 key=lambda x: len(x[1]), reverse=True)
1002 for setversion, listversion in candidate:
1002 for setversion, listversion in candidate:
1003 for seenset in seen:
1003 for seenset in seen:
1004 if setversion.issubset(seenset):
1004 if setversion.issubset(seenset):
1005 break
1005 break
1006 else:
1006 else:
1007 final.append(listversion)
1007 final.append(listversion)
1008 seen.append(setversion)
1008 seen.append(setversion)
1009 final.reverse() # put small successors set first
1009 final.reverse() # put small successors set first
1010 cache[current] = final
1010 cache[current] = final
1011 return cache[initialnode]
1011 return cache[initialnode]
1012
1012
1013 def _knownrevs(repo, nodes):
1013 def _knownrevs(repo, nodes):
1014 """yield revision numbers of known nodes passed in parameters
1014 """yield revision numbers of known nodes passed in parameters
1015
1015
1016 Unknown revisions are silently ignored."""
1016 Unknown revisions are silently ignored."""
1017 torev = repo.changelog.nodemap.get
1017 torev = repo.changelog.nodemap.get
1018 for n in nodes:
1018 for n in nodes:
1019 rev = torev(n)
1019 rev = torev(n)
1020 if rev is not None:
1020 if rev is not None:
1021 yield rev
1021 yield rev
1022
1022
1023 # mapping of 'set-name' -> <function to compute this set>
1023 # mapping of 'set-name' -> <function to compute this set>
1024 cachefuncs = {}
1024 cachefuncs = {}
1025 def cachefor(name):
1025 def cachefor(name):
1026 """Decorator to register a function as computing the cache for a set"""
1026 """Decorator to register a function as computing the cache for a set"""
1027 def decorator(func):
1027 def decorator(func):
1028 assert name not in cachefuncs
1028 assert name not in cachefuncs
1029 cachefuncs[name] = func
1029 cachefuncs[name] = func
1030 return func
1030 return func
1031 return decorator
1031 return decorator
1032
1032
1033 def getrevs(repo, name):
1033 def getrevs(repo, name):
1034 """Return the set of revision that belong to the <name> set
1034 """Return the set of revision that belong to the <name> set
1035
1035
1036 Such access may compute the set and cache it for future use"""
1036 Such access may compute the set and cache it for future use"""
1037 repo = repo.unfiltered()
1037 repo = repo.unfiltered()
1038 if not repo.obsstore:
1038 if not repo.obsstore:
1039 return frozenset()
1039 return frozenset()
1040 if name not in repo.obsstore.caches:
1040 if name not in repo.obsstore.caches:
1041 repo.obsstore.caches[name] = cachefuncs[name](repo)
1041 repo.obsstore.caches[name] = cachefuncs[name](repo)
1042 return repo.obsstore.caches[name]
1042 return repo.obsstore.caches[name]
1043
1043
1044 # To be simple we need to invalidate obsolescence cache when:
1044 # To be simple we need to invalidate obsolescence cache when:
1045 #
1045 #
1046 # - new changeset is added:
1046 # - new changeset is added:
1047 # - public phase is changed
1047 # - public phase is changed
1048 # - obsolescence marker are added
1048 # - obsolescence marker are added
1049 # - strip is used a repo
1049 # - strip is used a repo
1050 def clearobscaches(repo):
1050 def clearobscaches(repo):
1051 """Remove all obsolescence related cache from a repo
1051 """Remove all obsolescence related cache from a repo
1052
1052
1053 This remove all cache in obsstore is the obsstore already exist on the
1053 This remove all cache in obsstore is the obsstore already exist on the
1054 repo.
1054 repo.
1055
1055
1056 (We could be smarter here given the exact event that trigger the cache
1056 (We could be smarter here given the exact event that trigger the cache
1057 clearing)"""
1057 clearing)"""
1058 # only clear cache is there is obsstore data in this repo
1058 # only clear cache is there is obsstore data in this repo
1059 if 'obsstore' in repo._filecache:
1059 if 'obsstore' in repo._filecache:
1060 repo.obsstore.caches.clear()
1060 repo.obsstore.caches.clear()
1061
1061
1062 @cachefor('obsolete')
1062 @cachefor('obsolete')
1063 def _computeobsoleteset(repo):
1063 def _computeobsoleteset(repo):
1064 """the set of obsolete revisions"""
1064 """the set of obsolete revisions"""
1065 obs = set()
1065 obs = set()
1066 getrev = repo.changelog.nodemap.get
1066 getrev = repo.changelog.nodemap.get
1067 getphase = repo._phasecache.phase
1067 getphase = repo._phasecache.phase
1068 for n in repo.obsstore.successors:
1068 for n in repo.obsstore.successors:
1069 rev = getrev(n)
1069 rev = getrev(n)
1070 if rev is not None and getphase(repo, rev):
1070 if rev is not None and getphase(repo, rev):
1071 obs.add(rev)
1071 obs.add(rev)
1072 return obs
1072 return obs
1073
1073
1074 @cachefor('unstable')
1074 @cachefor('unstable')
1075 def _computeunstableset(repo):
1075 def _computeunstableset(repo):
1076 """the set of non obsolete revisions with obsolete parents"""
1076 """the set of non obsolete revisions with obsolete parents"""
1077 # revset is not efficient enough here
1077 # revset is not efficient enough here
1078 # we do (obsolete()::) - obsolete() by hand
1078 # we do (obsolete()::) - obsolete() by hand
1079 obs = getrevs(repo, 'obsolete')
1079 obs = getrevs(repo, 'obsolete')
1080 if not obs:
1080 if not obs:
1081 return set()
1081 return set()
1082 cl = repo.changelog
1082 cl = repo.changelog
1083 return set(r for r in cl.descendants(obs) if r not in obs)
1083 return set(r for r in cl.descendants(obs) if r not in obs)
1084
1084
1085 @cachefor('suspended')
1085 @cachefor('suspended')
1086 def _computesuspendedset(repo):
1086 def _computesuspendedset(repo):
1087 """the set of obsolete parents with non obsolete descendants"""
1087 """the set of obsolete parents with non obsolete descendants"""
1088 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1088 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1089 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1089 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1090
1090
1091 @cachefor('extinct')
1091 @cachefor('extinct')
1092 def _computeextinctset(repo):
1092 def _computeextinctset(repo):
1093 """the set of obsolete parents without non obsolete descendants"""
1093 """the set of obsolete parents without non obsolete descendants"""
1094 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1094 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1095
1095
1096
1096
1097 @cachefor('bumped')
1097 @cachefor('bumped')
1098 def _computebumpedset(repo):
1098 def _computebumpedset(repo):
1099 """the set of revs trying to obsolete public revisions"""
1099 """the set of revs trying to obsolete public revisions"""
1100 bumped = set()
1100 bumped = set()
1101 # util function (avoid attribute lookup in the loop)
1101 # util function (avoid attribute lookup in the loop)
1102 phase = repo._phasecache.phase # would be faster to grab the full list
1102 phase = repo._phasecache.phase # would be faster to grab the full list
1103 public = phases.public
1103 public = phases.public
1104 cl = repo.changelog
1104 cl = repo.changelog
1105 torev = cl.nodemap.get
1105 torev = cl.nodemap.get
1106 obs = getrevs(repo, 'obsolete')
1106 obs = getrevs(repo, 'obsolete')
1107 for rev in repo:
1107 for rev in repo:
1108 # We only evaluate mutable, non-obsolete revision
1108 # We only evaluate mutable, non-obsolete revision
1109 if (public < phase(repo, rev)) and (rev not in obs):
1109 if (public < phase(repo, rev)) and (rev not in obs):
1110 node = cl.node(rev)
1110 node = cl.node(rev)
1111 # (future) A cache of precursors may worth if split is very common
1111 # (future) A cache of precursors may worth if split is very common
1112 for pnode in allprecursors(repo.obsstore, [node],
1112 for pnode in allprecursors(repo.obsstore, [node],
1113 ignoreflags=bumpedfix):
1113 ignoreflags=bumpedfix):
1114 prev = torev(pnode) # unfiltered! but so is phasecache
1114 prev = torev(pnode) # unfiltered! but so is phasecache
1115 if (prev is not None) and (phase(repo, prev) <= public):
1115 if (prev is not None) and (phase(repo, prev) <= public):
1116 # we have a public precursors
1116 # we have a public precursors
1117 bumped.add(rev)
1117 bumped.add(rev)
1118 break # Next draft!
1118 break # Next draft!
1119 return bumped
1119 return bumped
1120
1120
1121 @cachefor('divergent')
1121 @cachefor('divergent')
1122 def _computedivergentset(repo):
1122 def _computedivergentset(repo):
1123 """the set of rev that compete to be the final successors of some revision.
1123 """the set of rev that compete to be the final successors of some revision.
1124 """
1124 """
1125 divergent = set()
1125 divergent = set()
1126 obsstore = repo.obsstore
1126 obsstore = repo.obsstore
1127 newermap = {}
1127 newermap = {}
1128 for ctx in repo.set('(not public()) - obsolete()'):
1128 for ctx in repo.set('(not public()) - obsolete()'):
1129 mark = obsstore.precursors.get(ctx.node(), ())
1129 mark = obsstore.precursors.get(ctx.node(), ())
1130 toprocess = set(mark)
1130 toprocess = set(mark)
1131 while toprocess:
1131 while toprocess:
1132 prec = toprocess.pop()[0]
1132 prec = toprocess.pop()[0]
1133 if prec not in newermap:
1133 if prec not in newermap:
1134 successorssets(repo, prec, newermap)
1134 successorssets(repo, prec, newermap)
1135 newer = [n for n in newermap[prec] if n]
1135 newer = [n for n in newermap[prec] if n]
1136 if len(newer) > 1:
1136 if len(newer) > 1:
1137 divergent.add(ctx.rev())
1137 divergent.add(ctx.rev())
1138 break
1138 break
1139 toprocess.update(obsstore.precursors.get(prec, ()))
1139 toprocess.update(obsstore.precursors.get(prec, ()))
1140 return divergent
1140 return divergent
1141
1141
1142
1142
1143 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1143 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1144 """Add obsolete markers between changesets in a repo
1144 """Add obsolete markers between changesets in a repo
1145
1145
1146 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1146 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1147 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1147 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1148 containing metadata for this marker only. It is merged with the global
1148 containing metadata for this marker only. It is merged with the global
1149 metadata specified through the `metadata` argument of this function,
1149 metadata specified through the `metadata` argument of this function,
1150
1150
1151 Trying to obsolete a public changeset will raise an exception.
1151 Trying to obsolete a public changeset will raise an exception.
1152
1152
1153 Current user and date are used except if specified otherwise in the
1153 Current user and date are used except if specified otherwise in the
1154 metadata attribute.
1154 metadata attribute.
1155
1155
1156 This function operates within a transaction of its own, but does
1156 This function operates within a transaction of its own, but does
1157 not take any lock on the repo.
1157 not take any lock on the repo.
1158 """
1158 """
1159 # prepare metadata
1159 # prepare metadata
1160 if metadata is None:
1160 if metadata is None:
1161 metadata = {}
1161 metadata = {}
1162 if 'user' not in metadata:
1162 if 'user' not in metadata:
1163 metadata['user'] = repo.ui.username()
1163 metadata['user'] = repo.ui.username()
1164 tr = repo.transaction('add-obsolescence-marker')
1164 tr = repo.transaction('add-obsolescence-marker')
1165 try:
1165 try:
1166 for rel in relations:
1166 for rel in relations:
1167 prec = rel[0]
1167 prec = rel[0]
1168 sucs = rel[1]
1168 sucs = rel[1]
1169 localmetadata = metadata.copy()
1169 localmetadata = metadata.copy()
1170 if 2 < len(rel):
1170 if 2 < len(rel):
1171 localmetadata.update(rel[2])
1171 localmetadata.update(rel[2])
1172
1172
1173 if not prec.mutable():
1173 if not prec.mutable():
1174 raise util.Abort("cannot obsolete immutable changeset: %s"
1174 raise util.Abort("cannot obsolete immutable changeset: %s"
1175 % prec)
1175 % prec)
1176 nprec = prec.node()
1176 nprec = prec.node()
1177 nsucs = tuple(s.node() for s in sucs)
1177 nsucs = tuple(s.node() for s in sucs)
1178 npare = None
1178 npare = None
1179 if not nsucs:
1179 if not nsucs:
1180 npare = tuple(p.node() for p in prec.parents())
1180 npare = tuple(p.node() for p in prec.parents())
1181 if nprec in nsucs:
1181 if nprec in nsucs:
1182 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1182 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1183 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1183 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1184 date=date, metadata=localmetadata)
1184 date=date, metadata=localmetadata)
1185 repo.filteredrevcache.clear()
1185 repo.filteredrevcache.clear()
1186 tr.close()
1186 tr.close()
1187 finally:
1187 finally:
1188 tr.release()
1188 tr.release()
1189
1189
1190 def isenabled(repo, option):
1190 def isenabled(repo, option):
1191 """Returns True if the given repository has the given obsolete option
1191 """Returns True if the given repository has the given obsolete option
1192 enabled.
1192 enabled.
1193 """
1193 """
1194 result = set(repo.ui.configlist('experimental', 'evolution'))
1194 result = set(repo.ui.configlist('experimental', 'evolution'))
1195 if 'all' in result:
1195 if 'all' in result:
1196 return True
1196 return True
1197
1197
1198 # For migration purposes, temporarily return true if the config hasn't been
1198 # For migration purposes, temporarily return true if the config hasn't been
1199 # set but _enabled is true.
1199 # set but _enabled is true.
1200 if len(result) == 0 and _enabled:
1200 if len(result) == 0 and _enabled:
1201 return True
1201 return True
1202
1202
1203 # createmarkers must be enabled if other options are enabled
1203 # createmarkers must be enabled if other options are enabled
1204 if ((allowunstableopt in result or exchangeopt in result) and
1204 if ((allowunstableopt in result or exchangeopt in result) and
1205 not createmarkersopt in result):
1205 not createmarkersopt in result):
1206 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1206 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1207 "if other obsolete options are enabled"))
1207 "if other obsolete options are enabled"))
1208
1208
1209 return option in result
1209 return option in result
General Comments 0
You need to be logged in to leave comments. Login now