##// END OF EJS Templates
readmarkers: use unpacker for fixed header
Matt Mackall -
r23797:eb12c3cf default
parent child Browse files
Show More
@@ -1,1168 +1,1169
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 import struct
70 import struct
71 import util, base85, node
71 import util, base85, node
72 import phases
72 import phases
73 from i18n import _
73 from i18n import _
74
74
75 _pack = struct.pack
75 _pack = struct.pack
76 _unpack = struct.unpack
76 _unpack = struct.unpack
77 _calcsize = struct.calcsize
77 _calcsize = struct.calcsize
78
78
79 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
79 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
80
80
81 # the obsolete feature is not mature enough to be enabled by default.
81 # the obsolete feature is not mature enough to be enabled by default.
82 # you have to rely on third party extension extension to enable this.
82 # you have to rely on third party extension extension to enable this.
83 _enabled = False
83 _enabled = False
84
84
85 # Options for obsolescence
85 # Options for obsolescence
86 createmarkersopt = 'createmarkers'
86 createmarkersopt = 'createmarkers'
87 allowunstableopt = 'allowunstable'
87 allowunstableopt = 'allowunstable'
88 exchangeopt = 'exchange'
88 exchangeopt = 'exchange'
89
89
90 ### obsolescence marker flag
90 ### obsolescence marker flag
91
91
92 ## bumpedfix flag
92 ## bumpedfix flag
93 #
93 #
94 # When a changeset A' succeed to a changeset A which became public, we call A'
94 # When a changeset A' succeed to a changeset A which became public, we call A'
95 # "bumped" because it's a successors of a public changesets
95 # "bumped" because it's a successors of a public changesets
96 #
96 #
97 # o A' (bumped)
97 # o A' (bumped)
98 # |`:
98 # |`:
99 # | o A
99 # | o A
100 # |/
100 # |/
101 # o Z
101 # o Z
102 #
102 #
103 # The way to solve this situation is to create a new changeset Ad as children
103 # The way to solve this situation is to create a new changeset Ad as children
104 # of A. This changeset have the same content than A'. So the diff from A to A'
104 # of A. This changeset have the same content than A'. So the diff from A to A'
105 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
106 #
106 #
107 # o Ad
107 # o Ad
108 # |`:
108 # |`:
109 # | x A'
109 # | x A'
110 # |'|
110 # |'|
111 # o | A
111 # o | A
112 # |/
112 # |/
113 # o Z
113 # o Z
114 #
114 #
115 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
116 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
117 # This flag mean that the successors express the changes between the public and
117 # This flag mean that the successors express the changes between the public and
118 # bumped version and fix the situation, breaking the transitivity of
118 # bumped version and fix the situation, breaking the transitivity of
119 # "bumped" here.
119 # "bumped" here.
120 bumpedfix = 1
120 bumpedfix = 1
121 usingsha256 = 2
121 usingsha256 = 2
122
122
123 ## Parsing and writing of version "0"
123 ## Parsing and writing of version "0"
124 #
124 #
125 # The header is followed by the markers. Each marker is made of:
125 # The header is followed by the markers. Each marker is made of:
126 #
126 #
127 # - 1 uint8 : number of new changesets "N", can be zero.
127 # - 1 uint8 : number of new changesets "N", can be zero.
128 #
128 #
129 # - 1 uint32: metadata size "M" in bytes.
129 # - 1 uint32: metadata size "M" in bytes.
130 #
130 #
131 # - 1 byte: a bit field. It is reserved for flags used in common
131 # - 1 byte: a bit field. It is reserved for flags used in common
132 # obsolete marker operations, to avoid repeated decoding of metadata
132 # obsolete marker operations, to avoid repeated decoding of metadata
133 # entries.
133 # entries.
134 #
134 #
135 # - 20 bytes: obsoleted changeset identifier.
135 # - 20 bytes: obsoleted changeset identifier.
136 #
136 #
137 # - N*20 bytes: new changesets identifiers.
137 # - N*20 bytes: new changesets identifiers.
138 #
138 #
139 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 # - M bytes: metadata as a sequence of nul-terminated strings. Each
140 # string contains a key and a value, separated by a colon ':', without
140 # string contains a key and a value, separated by a colon ':', without
141 # additional encoding. Keys cannot contain '\0' or ':' and values
141 # additional encoding. Keys cannot contain '\0' or ':' and values
142 # cannot contain '\0'.
142 # cannot contain '\0'.
143 _fm0version = 0
143 _fm0version = 0
144 _fm0fixed = '>BIB20s'
144 _fm0fixed = '>BIB20s'
145 _fm0node = '20s'
145 _fm0node = '20s'
146 _fm0fsize = _calcsize(_fm0fixed)
146 _fm0fsize = _calcsize(_fm0fixed)
147 _fm0fnodesize = _calcsize(_fm0node)
147 _fm0fnodesize = _calcsize(_fm0node)
148
148
149 def _fm0readmarkers(data, off=0):
149 def _fm0readmarkers(data, off=0):
150 # Loop on markers
150 # Loop on markers
151 l = len(data)
151 l = len(data)
152 while off + _fm0fsize <= l:
152 while off + _fm0fsize <= l:
153 # read fixed part
153 # read fixed part
154 cur = data[off:off + _fm0fsize]
154 cur = data[off:off + _fm0fsize]
155 off += _fm0fsize
155 off += _fm0fsize
156 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
157 # read replacement
157 # read replacement
158 sucs = ()
158 sucs = ()
159 if numsuc:
159 if numsuc:
160 s = (_fm0fnodesize * numsuc)
160 s = (_fm0fnodesize * numsuc)
161 cur = data[off:off + s]
161 cur = data[off:off + s]
162 sucs = _unpack(_fm0node * numsuc, cur)
162 sucs = _unpack(_fm0node * numsuc, cur)
163 off += s
163 off += s
164 # read metadata
164 # read metadata
165 # (metadata will be decoded on demand)
165 # (metadata will be decoded on demand)
166 metadata = data[off:off + mdsize]
166 metadata = data[off:off + mdsize]
167 if len(metadata) != mdsize:
167 if len(metadata) != mdsize:
168 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 raise util.Abort(_('parsing obsolete marker: metadata is too '
169 'short, %d bytes expected, got %d')
169 'short, %d bytes expected, got %d')
170 % (mdsize, len(metadata)))
170 % (mdsize, len(metadata)))
171 off += mdsize
171 off += mdsize
172 metadata = _fm0decodemeta(metadata)
172 metadata = _fm0decodemeta(metadata)
173 try:
173 try:
174 when, offset = metadata.pop('date', '0 0').split(' ')
174 when, offset = metadata.pop('date', '0 0').split(' ')
175 date = float(when), int(offset)
175 date = float(when), int(offset)
176 except ValueError:
176 except ValueError:
177 date = (0., 0)
177 date = (0., 0)
178 parents = None
178 parents = None
179 if 'p2' in metadata:
179 if 'p2' in metadata:
180 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
181 elif 'p1' in metadata:
181 elif 'p1' in metadata:
182 parents = (metadata.pop('p1', None),)
182 parents = (metadata.pop('p1', None),)
183 elif 'p0' in metadata:
183 elif 'p0' in metadata:
184 parents = ()
184 parents = ()
185 if parents is not None:
185 if parents is not None:
186 try:
186 try:
187 parents = tuple(node.bin(p) for p in parents)
187 parents = tuple(node.bin(p) for p in parents)
188 # if parent content is not a nodeid, drop the data
188 # if parent content is not a nodeid, drop the data
189 for p in parents:
189 for p in parents:
190 if len(p) != 20:
190 if len(p) != 20:
191 parents = None
191 parents = None
192 break
192 break
193 except TypeError:
193 except TypeError:
194 # if content cannot be translated to nodeid drop the data.
194 # if content cannot be translated to nodeid drop the data.
195 parents = None
195 parents = None
196
196
197 metadata = tuple(sorted(metadata.iteritems()))
197 metadata = tuple(sorted(metadata.iteritems()))
198
198
199 yield (pre, sucs, flags, metadata, date, parents)
199 yield (pre, sucs, flags, metadata, date, parents)
200
200
201 def _fm0encodeonemarker(marker):
201 def _fm0encodeonemarker(marker):
202 pre, sucs, flags, metadata, date, parents = marker
202 pre, sucs, flags, metadata, date, parents = marker
203 if flags & usingsha256:
203 if flags & usingsha256:
204 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
204 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
205 metadata = dict(metadata)
205 metadata = dict(metadata)
206 time, tz = date
206 time, tz = date
207 metadata['date'] = '%r %i' % (time, tz)
207 metadata['date'] = '%r %i' % (time, tz)
208 if parents is not None:
208 if parents is not None:
209 if not parents:
209 if not parents:
210 # mark that we explicitly recorded no parents
210 # mark that we explicitly recorded no parents
211 metadata['p0'] = ''
211 metadata['p0'] = ''
212 for i, p in enumerate(parents):
212 for i, p in enumerate(parents):
213 metadata['p%i' % (i + 1)] = node.hex(p)
213 metadata['p%i' % (i + 1)] = node.hex(p)
214 metadata = _fm0encodemeta(metadata)
214 metadata = _fm0encodemeta(metadata)
215 numsuc = len(sucs)
215 numsuc = len(sucs)
216 format = _fm0fixed + (_fm0node * numsuc)
216 format = _fm0fixed + (_fm0node * numsuc)
217 data = [numsuc, len(metadata), flags, pre]
217 data = [numsuc, len(metadata), flags, pre]
218 data.extend(sucs)
218 data.extend(sucs)
219 return _pack(format, *data) + metadata
219 return _pack(format, *data) + metadata
220
220
221 def _fm0encodemeta(meta):
221 def _fm0encodemeta(meta):
222 """Return encoded metadata string to string mapping.
222 """Return encoded metadata string to string mapping.
223
223
224 Assume no ':' in key and no '\0' in both key and value."""
224 Assume no ':' in key and no '\0' in both key and value."""
225 for key, value in meta.iteritems():
225 for key, value in meta.iteritems():
226 if ':' in key or '\0' in key:
226 if ':' in key or '\0' in key:
227 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 raise ValueError("':' and '\0' are forbidden in metadata key'")
228 if '\0' in value:
228 if '\0' in value:
229 raise ValueError("':' is forbidden in metadata value'")
229 raise ValueError("':' is forbidden in metadata value'")
230 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
231
231
232 def _fm0decodemeta(data):
232 def _fm0decodemeta(data):
233 """Return string to string dictionary from encoded version."""
233 """Return string to string dictionary from encoded version."""
234 d = {}
234 d = {}
235 for l in data.split('\0'):
235 for l in data.split('\0'):
236 if l:
236 if l:
237 key, value = l.split(':')
237 key, value = l.split(':')
238 d[key] = value
238 d[key] = value
239 return d
239 return d
240
240
241 ## Parsing and writing of version "1"
241 ## Parsing and writing of version "1"
242 #
242 #
243 # The header is followed by the markers. Each marker is made of:
243 # The header is followed by the markers. Each marker is made of:
244 #
244 #
245 # - uint32: total size of the marker (including this field)
245 # - uint32: total size of the marker (including this field)
246 #
246 #
247 # - float64: date in seconds since epoch
247 # - float64: date in seconds since epoch
248 #
248 #
249 # - int16: timezone offset in minutes
249 # - int16: timezone offset in minutes
250 #
250 #
251 # - uint16: a bit field. It is reserved for flags used in common
251 # - uint16: a bit field. It is reserved for flags used in common
252 # obsolete marker operations, to avoid repeated decoding of metadata
252 # obsolete marker operations, to avoid repeated decoding of metadata
253 # entries.
253 # entries.
254 #
254 #
255 # - uint8: number of successors "N", can be zero.
255 # - uint8: number of successors "N", can be zero.
256 #
256 #
257 # - uint8: number of parents "P", can be zero.
257 # - uint8: number of parents "P", can be zero.
258 #
258 #
259 # 0: parents data stored but no parent,
259 # 0: parents data stored but no parent,
260 # 1: one parent stored,
260 # 1: one parent stored,
261 # 2: two parents stored,
261 # 2: two parents stored,
262 # 3: no parent data stored
262 # 3: no parent data stored
263 #
263 #
264 # - uint8: number of metadata entries M
264 # - uint8: number of metadata entries M
265 #
265 #
266 # - 20 or 32 bytes: precursor changeset identifier.
266 # - 20 or 32 bytes: precursor changeset identifier.
267 #
267 #
268 # - N*(20 or 32) bytes: successors changesets identifiers.
268 # - N*(20 or 32) bytes: successors changesets identifiers.
269 #
269 #
270 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 # - P*(20 or 32) bytes: parents of the precursors changesets.
271 #
271 #
272 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 # - M*(uint8, uint8): size of all metadata entries (key and value)
273 #
273 #
274 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 # - remaining bytes: the metadata, each (key, value) pair after the other.
275 _fm1version = 1
275 _fm1version = 1
276 _fm1fixed = '>IdhHBBB20s'
276 _fm1fixed = '>IdhHBBB20s'
277 _fm1nodesha1 = '20s'
277 _fm1nodesha1 = '20s'
278 _fm1nodesha256 = '32s'
278 _fm1nodesha256 = '32s'
279 _fm1nodesha1size = _calcsize(_fm1nodesha1)
279 _fm1nodesha1size = _calcsize(_fm1nodesha1)
280 _fm1nodesha256size = _calcsize(_fm1nodesha256)
280 _fm1nodesha256size = _calcsize(_fm1nodesha256)
281 _fm1fsize = _calcsize(_fm1fixed)
281 _fm1fsize = _calcsize(_fm1fixed)
282 _fm1parentnone = 3
282 _fm1parentnone = 3
283 _fm1parentshift = 14
283 _fm1parentshift = 14
284 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
284 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
285 _fm1metapair = 'BB'
285 _fm1metapair = 'BB'
286 _fm1metapairsize = _calcsize('BB')
286 _fm1metapairsize = _calcsize('BB')
287
287
288 def _fm1readmarkers(data, off=0):
288 def _fm1readmarkers(data, off=0):
289 # Loop on markers
289 # Loop on markers
290 l = len(data)
290 l = len(data)
291 ufixed = util.unpacker(_fm1fixed)
291 while off + _fm1fsize <= l:
292 while off + _fm1fsize <= l:
292 # read fixed part
293 # read fixed part
293 fixeddata = _unpack(_fm1fixed, data[off:off + _fm1fsize])
294 fixeddata = ufixed(data[off:off + _fm1fsize])
294 off += _fm1fsize
295 off += _fm1fsize
295 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
296 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
296
297
297 _fm1node = _fm1nodesha1
298 _fm1node = _fm1nodesha1
298 fnodesize = _fm1nodesha1size
299 fnodesize = _fm1nodesha1size
299 if flags & usingsha256:
300 if flags & usingsha256:
300 _fm1node = _fm1nodesha256
301 _fm1node = _fm1nodesha256
301 fnodesize = _fm1nodesha256size
302 fnodesize = _fm1nodesha256size
302
303
303 # read 0 or more successors
304 # read 0 or more successors
304 s = (fnodesize * numsuc)
305 s = (fnodesize * numsuc)
305 sucs = _unpack(_fm1node * numsuc, data[off:off + s])
306 sucs = _unpack(_fm1node * numsuc, data[off:off + s])
306 off += s
307 off += s
307
308
308 # read parents
309 # read parents
309 if numpar == _fm1parentnone:
310 if numpar == _fm1parentnone:
310 parents = None
311 parents = None
311 else:
312 else:
312 s = (fnodesize * numpar)
313 s = (fnodesize * numpar)
313 parents = _unpack(_fm1node * numpar, data[off:off + s])
314 parents = _unpack(_fm1node * numpar, data[off:off + s])
314 off += s
315 off += s
315
316
316 # read metadata
317 # read metadata
317 metaformat = '>' + (_fm1metapair * nummeta)
318 metaformat = '>' + (_fm1metapair * nummeta)
318 s = _fm1metapairsize * nummeta
319 s = _fm1metapairsize * nummeta
319 metapairsize = _unpack(metaformat, data[off:off + s])
320 metapairsize = _unpack(metaformat, data[off:off + s])
320 off += s
321 off += s
321 metadata = []
322 metadata = []
322 for idx in xrange(0, len(metapairsize), 2):
323 for idx in xrange(0, len(metapairsize), 2):
323 sk = metapairsize[idx]
324 sk = metapairsize[idx]
324 sv = metapairsize[idx + 1]
325 sv = metapairsize[idx + 1]
325 metadata.append((data[off:off + sk], data[off + sk:off + sk + sv]))
326 metadata.append((data[off:off + sk], data[off + sk:off + sk + sv]))
326 off += sk + sv
327 off += sk + sv
327
328
328 yield (prec, sucs, flags, tuple(metadata), (seconds, tz * 60), parents)
329 yield (prec, sucs, flags, tuple(metadata), (seconds, tz * 60), parents)
329
330
330 def _fm1encodeonemarker(marker):
331 def _fm1encodeonemarker(marker):
331 pre, sucs, flags, metadata, date, parents = marker
332 pre, sucs, flags, metadata, date, parents = marker
332 # determine node size
333 # determine node size
333 _fm1node = _fm1nodesha1
334 _fm1node = _fm1nodesha1
334 if flags & usingsha256:
335 if flags & usingsha256:
335 _fm1node = _fm1nodesha256
336 _fm1node = _fm1nodesha256
336 numsuc = len(sucs)
337 numsuc = len(sucs)
337 numextranodes = numsuc
338 numextranodes = numsuc
338 if parents is None:
339 if parents is None:
339 numpar = _fm1parentnone
340 numpar = _fm1parentnone
340 else:
341 else:
341 numpar = len(parents)
342 numpar = len(parents)
342 numextranodes += numpar
343 numextranodes += numpar
343 formatnodes = _fm1node * numextranodes
344 formatnodes = _fm1node * numextranodes
344 formatmeta = _fm1metapair * len(metadata)
345 formatmeta = _fm1metapair * len(metadata)
345 format = _fm1fixed + formatnodes + formatmeta
346 format = _fm1fixed + formatnodes + formatmeta
346 # tz is stored in minutes so we divide by 60
347 # tz is stored in minutes so we divide by 60
347 tz = date[1]//60
348 tz = date[1]//60
348 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
349 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
349 data.extend(sucs)
350 data.extend(sucs)
350 if parents is not None:
351 if parents is not None:
351 data.extend(parents)
352 data.extend(parents)
352 totalsize = _calcsize(format)
353 totalsize = _calcsize(format)
353 for key, value in metadata:
354 for key, value in metadata:
354 lk = len(key)
355 lk = len(key)
355 lv = len(value)
356 lv = len(value)
356 data.append(lk)
357 data.append(lk)
357 data.append(lv)
358 data.append(lv)
358 totalsize += lk + lv
359 totalsize += lk + lv
359 data[0] = totalsize
360 data[0] = totalsize
360 data = [_pack(format, *data)]
361 data = [_pack(format, *data)]
361 for key, value in metadata:
362 for key, value in metadata:
362 data.append(key)
363 data.append(key)
363 data.append(value)
364 data.append(value)
364 return ''.join(data)
365 return ''.join(data)
365
366
366 # mapping to read/write various marker formats
367 # mapping to read/write various marker formats
367 # <version> -> (decoder, encoder)
368 # <version> -> (decoder, encoder)
368 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
369 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
369 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
370 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
370
371
371 @util.nogc
372 @util.nogc
372 def _readmarkers(data):
373 def _readmarkers(data):
373 """Read and enumerate markers from raw data"""
374 """Read and enumerate markers from raw data"""
374 off = 0
375 off = 0
375 diskversion = _unpack('>B', data[off:off + 1])[0]
376 diskversion = _unpack('>B', data[off:off + 1])[0]
376 off += 1
377 off += 1
377 if diskversion not in formats:
378 if diskversion not in formats:
378 raise util.Abort(_('parsing obsolete marker: unknown version %r')
379 raise util.Abort(_('parsing obsolete marker: unknown version %r')
379 % diskversion)
380 % diskversion)
380 return diskversion, formats[diskversion][0](data, off)
381 return diskversion, formats[diskversion][0](data, off)
381
382
382 def encodemarkers(markers, addheader=False, version=_fm0version):
383 def encodemarkers(markers, addheader=False, version=_fm0version):
383 # Kept separate from flushmarkers(), it will be reused for
384 # Kept separate from flushmarkers(), it will be reused for
384 # markers exchange.
385 # markers exchange.
385 encodeone = formats[version][1]
386 encodeone = formats[version][1]
386 if addheader:
387 if addheader:
387 yield _pack('>B', version)
388 yield _pack('>B', version)
388 for marker in markers:
389 for marker in markers:
389 yield encodeone(marker)
390 yield encodeone(marker)
390
391
391
392
392 class marker(object):
393 class marker(object):
393 """Wrap obsolete marker raw data"""
394 """Wrap obsolete marker raw data"""
394
395
395 def __init__(self, repo, data):
396 def __init__(self, repo, data):
396 # the repo argument will be used to create changectx in later version
397 # the repo argument will be used to create changectx in later version
397 self._repo = repo
398 self._repo = repo
398 self._data = data
399 self._data = data
399 self._decodedmeta = None
400 self._decodedmeta = None
400
401
401 def __hash__(self):
402 def __hash__(self):
402 return hash(self._data)
403 return hash(self._data)
403
404
404 def __eq__(self, other):
405 def __eq__(self, other):
405 if type(other) != type(self):
406 if type(other) != type(self):
406 return False
407 return False
407 return self._data == other._data
408 return self._data == other._data
408
409
409 def precnode(self):
410 def precnode(self):
410 """Precursor changeset node identifier"""
411 """Precursor changeset node identifier"""
411 return self._data[0]
412 return self._data[0]
412
413
413 def succnodes(self):
414 def succnodes(self):
414 """List of successor changesets node identifiers"""
415 """List of successor changesets node identifiers"""
415 return self._data[1]
416 return self._data[1]
416
417
417 def parentnodes(self):
418 def parentnodes(self):
418 """Parents of the precursors (None if not recorded)"""
419 """Parents of the precursors (None if not recorded)"""
419 return self._data[5]
420 return self._data[5]
420
421
421 def metadata(self):
422 def metadata(self):
422 """Decoded metadata dictionary"""
423 """Decoded metadata dictionary"""
423 return dict(self._data[3])
424 return dict(self._data[3])
424
425
425 def date(self):
426 def date(self):
426 """Creation date as (unixtime, offset)"""
427 """Creation date as (unixtime, offset)"""
427 return self._data[4]
428 return self._data[4]
428
429
429 def flags(self):
430 def flags(self):
430 """The flags field of the marker"""
431 """The flags field of the marker"""
431 return self._data[2]
432 return self._data[2]
432
433
433 class obsstore(object):
434 class obsstore(object):
434 """Store obsolete markers
435 """Store obsolete markers
435
436
436 Markers can be accessed with two mappings:
437 Markers can be accessed with two mappings:
437 - precursors[x] -> set(markers on precursors edges of x)
438 - precursors[x] -> set(markers on precursors edges of x)
438 - successors[x] -> set(markers on successors edges of x)
439 - successors[x] -> set(markers on successors edges of x)
439 - children[x] -> set(markers on precursors edges of children(x)
440 - children[x] -> set(markers on precursors edges of children(x)
440 """
441 """
441
442
442 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
443 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
443 # prec: nodeid, precursor changesets
444 # prec: nodeid, precursor changesets
444 # succs: tuple of nodeid, successor changesets (0-N length)
445 # succs: tuple of nodeid, successor changesets (0-N length)
445 # flag: integer, flag field carrying modifier for the markers (see doc)
446 # flag: integer, flag field carrying modifier for the markers (see doc)
446 # meta: binary blob, encoded metadata dictionary
447 # meta: binary blob, encoded metadata dictionary
447 # date: (float, int) tuple, date of marker creation
448 # date: (float, int) tuple, date of marker creation
448 # parents: (tuple of nodeid) or None, parents of precursors
449 # parents: (tuple of nodeid) or None, parents of precursors
449 # None is used when no data has been recorded
450 # None is used when no data has been recorded
450
451
451 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
452 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
452 # caches for various obsolescence related cache
453 # caches for various obsolescence related cache
453 self.caches = {}
454 self.caches = {}
454 self._all = []
455 self._all = []
455 self.precursors = {}
456 self.precursors = {}
456 self.successors = {}
457 self.successors = {}
457 self.children = {}
458 self.children = {}
458 self.sopener = sopener
459 self.sopener = sopener
459 data = sopener.tryread('obsstore')
460 data = sopener.tryread('obsstore')
460 self._version = defaultformat
461 self._version = defaultformat
461 self._readonly = readonly
462 self._readonly = readonly
462 if data:
463 if data:
463 self._version, markers = _readmarkers(data)
464 self._version, markers = _readmarkers(data)
464 self._load(markers)
465 self._load(markers)
465
466
466 def __iter__(self):
467 def __iter__(self):
467 return iter(self._all)
468 return iter(self._all)
468
469
469 def __len__(self):
470 def __len__(self):
470 return len(self._all)
471 return len(self._all)
471
472
472 def __nonzero__(self):
473 def __nonzero__(self):
473 return bool(self._all)
474 return bool(self._all)
474
475
475 def create(self, transaction, prec, succs=(), flag=0, parents=None,
476 def create(self, transaction, prec, succs=(), flag=0, parents=None,
476 date=None, metadata=None):
477 date=None, metadata=None):
477 """obsolete: add a new obsolete marker
478 """obsolete: add a new obsolete marker
478
479
479 * ensuring it is hashable
480 * ensuring it is hashable
480 * check mandatory metadata
481 * check mandatory metadata
481 * encode metadata
482 * encode metadata
482
483
483 If you are a human writing code creating marker you want to use the
484 If you are a human writing code creating marker you want to use the
484 `createmarkers` function in this module instead.
485 `createmarkers` function in this module instead.
485
486
486 return True if a new marker have been added, False if the markers
487 return True if a new marker have been added, False if the markers
487 already existed (no op).
488 already existed (no op).
488 """
489 """
489 if metadata is None:
490 if metadata is None:
490 metadata = {}
491 metadata = {}
491 if date is None:
492 if date is None:
492 if 'date' in metadata:
493 if 'date' in metadata:
493 # as a courtesy for out-of-tree extensions
494 # as a courtesy for out-of-tree extensions
494 date = util.parsedate(metadata.pop('date'))
495 date = util.parsedate(metadata.pop('date'))
495 else:
496 else:
496 date = util.makedate()
497 date = util.makedate()
497 if len(prec) != 20:
498 if len(prec) != 20:
498 raise ValueError(prec)
499 raise ValueError(prec)
499 for succ in succs:
500 for succ in succs:
500 if len(succ) != 20:
501 if len(succ) != 20:
501 raise ValueError(succ)
502 raise ValueError(succ)
502 if prec in succs:
503 if prec in succs:
503 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
504 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
504
505
505 metadata = tuple(sorted(metadata.iteritems()))
506 metadata = tuple(sorted(metadata.iteritems()))
506
507
507 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
508 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
508 return bool(self.add(transaction, [marker]))
509 return bool(self.add(transaction, [marker]))
509
510
510 def add(self, transaction, markers):
511 def add(self, transaction, markers):
511 """Add new markers to the store
512 """Add new markers to the store
512
513
513 Take care of filtering duplicate.
514 Take care of filtering duplicate.
514 Return the number of new marker."""
515 Return the number of new marker."""
515 if self._readonly:
516 if self._readonly:
516 raise util.Abort('creating obsolete markers is not enabled on this '
517 raise util.Abort('creating obsolete markers is not enabled on this '
517 'repo')
518 'repo')
518 known = set(self._all)
519 known = set(self._all)
519 new = []
520 new = []
520 for m in markers:
521 for m in markers:
521 if m not in known:
522 if m not in known:
522 known.add(m)
523 known.add(m)
523 new.append(m)
524 new.append(m)
524 if new:
525 if new:
525 f = self.sopener('obsstore', 'ab')
526 f = self.sopener('obsstore', 'ab')
526 try:
527 try:
527 # Whether the file's current position is at the begin or at
528 # Whether the file's current position is at the begin or at
528 # the end after opening a file for appending is implementation
529 # the end after opening a file for appending is implementation
529 # defined. So we must seek to the end before calling tell(),
530 # defined. So we must seek to the end before calling tell(),
530 # or we may get a zero offset for non-zero sized files on
531 # or we may get a zero offset for non-zero sized files on
531 # some platforms (issue3543).
532 # some platforms (issue3543).
532 f.seek(0, _SEEK_END)
533 f.seek(0, _SEEK_END)
533 offset = f.tell()
534 offset = f.tell()
534 transaction.add('obsstore', offset)
535 transaction.add('obsstore', offset)
535 # offset == 0: new file - add the version header
536 # offset == 0: new file - add the version header
536 for bytes in encodemarkers(new, offset == 0, self._version):
537 for bytes in encodemarkers(new, offset == 0, self._version):
537 f.write(bytes)
538 f.write(bytes)
538 finally:
539 finally:
539 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
540 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
540 # call 'filecacheentry.refresh()' here
541 # call 'filecacheentry.refresh()' here
541 f.close()
542 f.close()
542 self._load(new)
543 self._load(new)
543 # new marker *may* have changed several set. invalidate the cache.
544 # new marker *may* have changed several set. invalidate the cache.
544 self.caches.clear()
545 self.caches.clear()
545 # records the number of new markers for the transaction hooks
546 # records the number of new markers for the transaction hooks
546 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
547 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
547 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
548 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
548 return len(new)
549 return len(new)
549
550
550 def mergemarkers(self, transaction, data):
551 def mergemarkers(self, transaction, data):
551 """merge a binary stream of markers inside the obsstore
552 """merge a binary stream of markers inside the obsstore
552
553
553 Returns the number of new markers added."""
554 Returns the number of new markers added."""
554 version, markers = _readmarkers(data)
555 version, markers = _readmarkers(data)
555 return self.add(transaction, markers)
556 return self.add(transaction, markers)
556
557
557 @util.nogc
558 @util.nogc
558 def _load(self, markers):
559 def _load(self, markers):
559 for mark in markers:
560 for mark in markers:
560 self._all.append(mark)
561 self._all.append(mark)
561 pre, sucs = mark[:2]
562 pre, sucs = mark[:2]
562 self.successors.setdefault(pre, set()).add(mark)
563 self.successors.setdefault(pre, set()).add(mark)
563 for suc in sucs:
564 for suc in sucs:
564 self.precursors.setdefault(suc, set()).add(mark)
565 self.precursors.setdefault(suc, set()).add(mark)
565 parents = mark[5]
566 parents = mark[5]
566 if parents is not None:
567 if parents is not None:
567 for p in parents:
568 for p in parents:
568 self.children.setdefault(p, set()).add(mark)
569 self.children.setdefault(p, set()).add(mark)
569 if node.nullid in self.precursors:
570 if node.nullid in self.precursors:
570 raise util.Abort(_('bad obsolescence marker detected: '
571 raise util.Abort(_('bad obsolescence marker detected: '
571 'invalid successors nullid'))
572 'invalid successors nullid'))
572 def relevantmarkers(self, nodes):
573 def relevantmarkers(self, nodes):
573 """return a set of all obsolescence markers relevant to a set of nodes.
574 """return a set of all obsolescence markers relevant to a set of nodes.
574
575
575 "relevant" to a set of nodes mean:
576 "relevant" to a set of nodes mean:
576
577
577 - marker that use this changeset as successor
578 - marker that use this changeset as successor
578 - prune marker of direct children on this changeset
579 - prune marker of direct children on this changeset
579 - recursive application of the two rules on precursors of these markers
580 - recursive application of the two rules on precursors of these markers
580
581
581 It is a set so you cannot rely on order."""
582 It is a set so you cannot rely on order."""
582
583
583 pendingnodes = set(nodes)
584 pendingnodes = set(nodes)
584 seenmarkers = set()
585 seenmarkers = set()
585 seennodes = set(pendingnodes)
586 seennodes = set(pendingnodes)
586 precursorsmarkers = self.precursors
587 precursorsmarkers = self.precursors
587 children = self.children
588 children = self.children
588 while pendingnodes:
589 while pendingnodes:
589 direct = set()
590 direct = set()
590 for current in pendingnodes:
591 for current in pendingnodes:
591 direct.update(precursorsmarkers.get(current, ()))
592 direct.update(precursorsmarkers.get(current, ()))
592 pruned = [m for m in children.get(current, ()) if not m[1]]
593 pruned = [m for m in children.get(current, ()) if not m[1]]
593 direct.update(pruned)
594 direct.update(pruned)
594 direct -= seenmarkers
595 direct -= seenmarkers
595 pendingnodes = set([m[0] for m in direct])
596 pendingnodes = set([m[0] for m in direct])
596 seenmarkers |= direct
597 seenmarkers |= direct
597 pendingnodes -= seennodes
598 pendingnodes -= seennodes
598 seennodes |= pendingnodes
599 seennodes |= pendingnodes
599 return seenmarkers
600 return seenmarkers
600
601
601 def commonversion(versions):
602 def commonversion(versions):
602 """Return the newest version listed in both versions and our local formats.
603 """Return the newest version listed in both versions and our local formats.
603
604
604 Returns None if no common version exists.
605 Returns None if no common version exists.
605 """
606 """
606 versions.sort(reverse=True)
607 versions.sort(reverse=True)
607 # search for highest version known on both side
608 # search for highest version known on both side
608 for v in versions:
609 for v in versions:
609 if v in formats:
610 if v in formats:
610 return v
611 return v
611 return None
612 return None
612
613
613 # arbitrary picked to fit into 8K limit from HTTP server
614 # arbitrary picked to fit into 8K limit from HTTP server
614 # you have to take in account:
615 # you have to take in account:
615 # - the version header
616 # - the version header
616 # - the base85 encoding
617 # - the base85 encoding
617 _maxpayload = 5300
618 _maxpayload = 5300
618
619
619 def _pushkeyescape(markers):
620 def _pushkeyescape(markers):
620 """encode markers into a dict suitable for pushkey exchange
621 """encode markers into a dict suitable for pushkey exchange
621
622
622 - binary data is base85 encoded
623 - binary data is base85 encoded
623 - split in chunks smaller than 5300 bytes"""
624 - split in chunks smaller than 5300 bytes"""
624 keys = {}
625 keys = {}
625 parts = []
626 parts = []
626 currentlen = _maxpayload * 2 # ensure we create a new part
627 currentlen = _maxpayload * 2 # ensure we create a new part
627 for marker in markers:
628 for marker in markers:
628 nextdata = _fm0encodeonemarker(marker)
629 nextdata = _fm0encodeonemarker(marker)
629 if (len(nextdata) + currentlen > _maxpayload):
630 if (len(nextdata) + currentlen > _maxpayload):
630 currentpart = []
631 currentpart = []
631 currentlen = 0
632 currentlen = 0
632 parts.append(currentpart)
633 parts.append(currentpart)
633 currentpart.append(nextdata)
634 currentpart.append(nextdata)
634 currentlen += len(nextdata)
635 currentlen += len(nextdata)
635 for idx, part in enumerate(reversed(parts)):
636 for idx, part in enumerate(reversed(parts)):
636 data = ''.join([_pack('>B', _fm0version)] + part)
637 data = ''.join([_pack('>B', _fm0version)] + part)
637 keys['dump%i' % idx] = base85.b85encode(data)
638 keys['dump%i' % idx] = base85.b85encode(data)
638 return keys
639 return keys
639
640
640 def listmarkers(repo):
641 def listmarkers(repo):
641 """List markers over pushkey"""
642 """List markers over pushkey"""
642 if not repo.obsstore:
643 if not repo.obsstore:
643 return {}
644 return {}
644 return _pushkeyescape(repo.obsstore)
645 return _pushkeyescape(repo.obsstore)
645
646
646 def pushmarker(repo, key, old, new):
647 def pushmarker(repo, key, old, new):
647 """Push markers over pushkey"""
648 """Push markers over pushkey"""
648 if not key.startswith('dump'):
649 if not key.startswith('dump'):
649 repo.ui.warn(_('unknown key: %r') % key)
650 repo.ui.warn(_('unknown key: %r') % key)
650 return 0
651 return 0
651 if old:
652 if old:
652 repo.ui.warn(_('unexpected old value for %r') % key)
653 repo.ui.warn(_('unexpected old value for %r') % key)
653 return 0
654 return 0
654 data = base85.b85decode(new)
655 data = base85.b85decode(new)
655 lock = repo.lock()
656 lock = repo.lock()
656 try:
657 try:
657 tr = repo.transaction('pushkey: obsolete markers')
658 tr = repo.transaction('pushkey: obsolete markers')
658 try:
659 try:
659 repo.obsstore.mergemarkers(tr, data)
660 repo.obsstore.mergemarkers(tr, data)
660 tr.close()
661 tr.close()
661 return 1
662 return 1
662 finally:
663 finally:
663 tr.release()
664 tr.release()
664 finally:
665 finally:
665 lock.release()
666 lock.release()
666
667
667 def getmarkers(repo, nodes=None):
668 def getmarkers(repo, nodes=None):
668 """returns markers known in a repository
669 """returns markers known in a repository
669
670
670 If <nodes> is specified, only markers "relevant" to those nodes are are
671 If <nodes> is specified, only markers "relevant" to those nodes are are
671 returned"""
672 returned"""
672 if nodes is None:
673 if nodes is None:
673 rawmarkers = repo.obsstore
674 rawmarkers = repo.obsstore
674 else:
675 else:
675 rawmarkers = repo.obsstore.relevantmarkers(nodes)
676 rawmarkers = repo.obsstore.relevantmarkers(nodes)
676
677
677 for markerdata in rawmarkers:
678 for markerdata in rawmarkers:
678 yield marker(repo, markerdata)
679 yield marker(repo, markerdata)
679
680
680 def relevantmarkers(repo, node):
681 def relevantmarkers(repo, node):
681 """all obsolete markers relevant to some revision"""
682 """all obsolete markers relevant to some revision"""
682 for markerdata in repo.obsstore.relevantmarkers(node):
683 for markerdata in repo.obsstore.relevantmarkers(node):
683 yield marker(repo, markerdata)
684 yield marker(repo, markerdata)
684
685
685
686
686 def precursormarkers(ctx):
687 def precursormarkers(ctx):
687 """obsolete marker marking this changeset as a successors"""
688 """obsolete marker marking this changeset as a successors"""
688 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
689 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
689 yield marker(ctx._repo, data)
690 yield marker(ctx._repo, data)
690
691
691 def successormarkers(ctx):
692 def successormarkers(ctx):
692 """obsolete marker making this changeset obsolete"""
693 """obsolete marker making this changeset obsolete"""
693 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
694 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
694 yield marker(ctx._repo, data)
695 yield marker(ctx._repo, data)
695
696
696 def allsuccessors(obsstore, nodes, ignoreflags=0):
697 def allsuccessors(obsstore, nodes, ignoreflags=0):
697 """Yield node for every successor of <nodes>.
698 """Yield node for every successor of <nodes>.
698
699
699 Some successors may be unknown locally.
700 Some successors may be unknown locally.
700
701
701 This is a linear yield unsuited to detecting split changesets. It includes
702 This is a linear yield unsuited to detecting split changesets. It includes
702 initial nodes too."""
703 initial nodes too."""
703 remaining = set(nodes)
704 remaining = set(nodes)
704 seen = set(remaining)
705 seen = set(remaining)
705 while remaining:
706 while remaining:
706 current = remaining.pop()
707 current = remaining.pop()
707 yield current
708 yield current
708 for mark in obsstore.successors.get(current, ()):
709 for mark in obsstore.successors.get(current, ()):
709 # ignore marker flagged with specified flag
710 # ignore marker flagged with specified flag
710 if mark[2] & ignoreflags:
711 if mark[2] & ignoreflags:
711 continue
712 continue
712 for suc in mark[1]:
713 for suc in mark[1]:
713 if suc not in seen:
714 if suc not in seen:
714 seen.add(suc)
715 seen.add(suc)
715 remaining.add(suc)
716 remaining.add(suc)
716
717
717 def allprecursors(obsstore, nodes, ignoreflags=0):
718 def allprecursors(obsstore, nodes, ignoreflags=0):
718 """Yield node for every precursors of <nodes>.
719 """Yield node for every precursors of <nodes>.
719
720
720 Some precursors may be unknown locally.
721 Some precursors may be unknown locally.
721
722
722 This is a linear yield unsuited to detecting folded changesets. It includes
723 This is a linear yield unsuited to detecting folded changesets. It includes
723 initial nodes too."""
724 initial nodes too."""
724
725
725 remaining = set(nodes)
726 remaining = set(nodes)
726 seen = set(remaining)
727 seen = set(remaining)
727 while remaining:
728 while remaining:
728 current = remaining.pop()
729 current = remaining.pop()
729 yield current
730 yield current
730 for mark in obsstore.precursors.get(current, ()):
731 for mark in obsstore.precursors.get(current, ()):
731 # ignore marker flagged with specified flag
732 # ignore marker flagged with specified flag
732 if mark[2] & ignoreflags:
733 if mark[2] & ignoreflags:
733 continue
734 continue
734 suc = mark[0]
735 suc = mark[0]
735 if suc not in seen:
736 if suc not in seen:
736 seen.add(suc)
737 seen.add(suc)
737 remaining.add(suc)
738 remaining.add(suc)
738
739
739 def foreground(repo, nodes):
740 def foreground(repo, nodes):
740 """return all nodes in the "foreground" of other node
741 """return all nodes in the "foreground" of other node
741
742
742 The foreground of a revision is anything reachable using parent -> children
743 The foreground of a revision is anything reachable using parent -> children
743 or precursor -> successor relation. It is very similar to "descendant" but
744 or precursor -> successor relation. It is very similar to "descendant" but
744 augmented with obsolescence information.
745 augmented with obsolescence information.
745
746
746 Beware that possible obsolescence cycle may result if complex situation.
747 Beware that possible obsolescence cycle may result if complex situation.
747 """
748 """
748 repo = repo.unfiltered()
749 repo = repo.unfiltered()
749 foreground = set(repo.set('%ln::', nodes))
750 foreground = set(repo.set('%ln::', nodes))
750 if repo.obsstore:
751 if repo.obsstore:
751 # We only need this complicated logic if there is obsolescence
752 # We only need this complicated logic if there is obsolescence
752 # XXX will probably deserve an optimised revset.
753 # XXX will probably deserve an optimised revset.
753 nm = repo.changelog.nodemap
754 nm = repo.changelog.nodemap
754 plen = -1
755 plen = -1
755 # compute the whole set of successors or descendants
756 # compute the whole set of successors or descendants
756 while len(foreground) != plen:
757 while len(foreground) != plen:
757 plen = len(foreground)
758 plen = len(foreground)
758 succs = set(c.node() for c in foreground)
759 succs = set(c.node() for c in foreground)
759 mutable = [c.node() for c in foreground if c.mutable()]
760 mutable = [c.node() for c in foreground if c.mutable()]
760 succs.update(allsuccessors(repo.obsstore, mutable))
761 succs.update(allsuccessors(repo.obsstore, mutable))
761 known = (n for n in succs if n in nm)
762 known = (n for n in succs if n in nm)
762 foreground = set(repo.set('%ln::', known))
763 foreground = set(repo.set('%ln::', known))
763 return set(c.node() for c in foreground)
764 return set(c.node() for c in foreground)
764
765
765
766
766 def successorssets(repo, initialnode, cache=None):
767 def successorssets(repo, initialnode, cache=None):
767 """Return all set of successors of initial nodes
768 """Return all set of successors of initial nodes
768
769
769 The successors set of a changeset A are a group of revisions that succeed
770 The successors set of a changeset A are a group of revisions that succeed
770 A. It succeeds A as a consistent whole, each revision being only a partial
771 A. It succeeds A as a consistent whole, each revision being only a partial
771 replacement. The successors set contains non-obsolete changesets only.
772 replacement. The successors set contains non-obsolete changesets only.
772
773
773 This function returns the full list of successor sets which is why it
774 This function returns the full list of successor sets which is why it
774 returns a list of tuples and not just a single tuple. Each tuple is a valid
775 returns a list of tuples and not just a single tuple. Each tuple is a valid
775 successors set. Not that (A,) may be a valid successors set for changeset A
776 successors set. Not that (A,) may be a valid successors set for changeset A
776 (see below).
777 (see below).
777
778
778 In most cases, a changeset A will have a single element (e.g. the changeset
779 In most cases, a changeset A will have a single element (e.g. the changeset
779 A is replaced by A') in its successors set. Though, it is also common for a
780 A is replaced by A') in its successors set. Though, it is also common for a
780 changeset A to have no elements in its successor set (e.g. the changeset
781 changeset A to have no elements in its successor set (e.g. the changeset
781 has been pruned). Therefore, the returned list of successors sets will be
782 has been pruned). Therefore, the returned list of successors sets will be
782 [(A',)] or [], respectively.
783 [(A',)] or [], respectively.
783
784
784 When a changeset A is split into A' and B', however, it will result in a
785 When a changeset A is split into A' and B', however, it will result in a
785 successors set containing more than a single element, i.e. [(A',B')].
786 successors set containing more than a single element, i.e. [(A',B')].
786 Divergent changesets will result in multiple successors sets, i.e. [(A',),
787 Divergent changesets will result in multiple successors sets, i.e. [(A',),
787 (A'')].
788 (A'')].
788
789
789 If a changeset A is not obsolete, then it will conceptually have no
790 If a changeset A is not obsolete, then it will conceptually have no
790 successors set. To distinguish this from a pruned changeset, the successor
791 successors set. To distinguish this from a pruned changeset, the successor
791 set will only contain itself, i.e. [(A,)].
792 set will only contain itself, i.e. [(A,)].
792
793
793 Finally, successors unknown locally are considered to be pruned (obsoleted
794 Finally, successors unknown locally are considered to be pruned (obsoleted
794 without any successors).
795 without any successors).
795
796
796 The optional `cache` parameter is a dictionary that may contain precomputed
797 The optional `cache` parameter is a dictionary that may contain precomputed
797 successors sets. It is meant to reuse the computation of a previous call to
798 successors sets. It is meant to reuse the computation of a previous call to
798 `successorssets` when multiple calls are made at the same time. The cache
799 `successorssets` when multiple calls are made at the same time. The cache
799 dictionary is updated in place. The caller is responsible for its live
800 dictionary is updated in place. The caller is responsible for its live
800 spawn. Code that makes multiple calls to `successorssets` *must* use this
801 spawn. Code that makes multiple calls to `successorssets` *must* use this
801 cache mechanism or suffer terrible performances.
802 cache mechanism or suffer terrible performances.
802
803
803 """
804 """
804
805
805 succmarkers = repo.obsstore.successors
806 succmarkers = repo.obsstore.successors
806
807
807 # Stack of nodes we search successors sets for
808 # Stack of nodes we search successors sets for
808 toproceed = [initialnode]
809 toproceed = [initialnode]
809 # set version of above list for fast loop detection
810 # set version of above list for fast loop detection
810 # element added to "toproceed" must be added here
811 # element added to "toproceed" must be added here
811 stackedset = set(toproceed)
812 stackedset = set(toproceed)
812 if cache is None:
813 if cache is None:
813 cache = {}
814 cache = {}
814
815
815 # This while loop is the flattened version of a recursive search for
816 # This while loop is the flattened version of a recursive search for
816 # successors sets
817 # successors sets
817 #
818 #
818 # def successorssets(x):
819 # def successorssets(x):
819 # successors = directsuccessors(x)
820 # successors = directsuccessors(x)
820 # ss = [[]]
821 # ss = [[]]
821 # for succ in directsuccessors(x):
822 # for succ in directsuccessors(x):
822 # # product as in itertools cartesian product
823 # # product as in itertools cartesian product
823 # ss = product(ss, successorssets(succ))
824 # ss = product(ss, successorssets(succ))
824 # return ss
825 # return ss
825 #
826 #
826 # But we can not use plain recursive calls here:
827 # But we can not use plain recursive calls here:
827 # - that would blow the python call stack
828 # - that would blow the python call stack
828 # - obsolescence markers may have cycles, we need to handle them.
829 # - obsolescence markers may have cycles, we need to handle them.
829 #
830 #
830 # The `toproceed` list act as our call stack. Every node we search
831 # The `toproceed` list act as our call stack. Every node we search
831 # successors set for are stacked there.
832 # successors set for are stacked there.
832 #
833 #
833 # The `stackedset` is set version of this stack used to check if a node is
834 # The `stackedset` is set version of this stack used to check if a node is
834 # already stacked. This check is used to detect cycles and prevent infinite
835 # already stacked. This check is used to detect cycles and prevent infinite
835 # loop.
836 # loop.
836 #
837 #
837 # successors set of all nodes are stored in the `cache` dictionary.
838 # successors set of all nodes are stored in the `cache` dictionary.
838 #
839 #
839 # After this while loop ends we use the cache to return the successors sets
840 # After this while loop ends we use the cache to return the successors sets
840 # for the node requested by the caller.
841 # for the node requested by the caller.
841 while toproceed:
842 while toproceed:
842 # Every iteration tries to compute the successors sets of the topmost
843 # Every iteration tries to compute the successors sets of the topmost
843 # node of the stack: CURRENT.
844 # node of the stack: CURRENT.
844 #
845 #
845 # There are four possible outcomes:
846 # There are four possible outcomes:
846 #
847 #
847 # 1) We already know the successors sets of CURRENT:
848 # 1) We already know the successors sets of CURRENT:
848 # -> mission accomplished, pop it from the stack.
849 # -> mission accomplished, pop it from the stack.
849 # 2) Node is not obsolete:
850 # 2) Node is not obsolete:
850 # -> the node is its own successors sets. Add it to the cache.
851 # -> the node is its own successors sets. Add it to the cache.
851 # 3) We do not know successors set of direct successors of CURRENT:
852 # 3) We do not know successors set of direct successors of CURRENT:
852 # -> We add those successors to the stack.
853 # -> We add those successors to the stack.
853 # 4) We know successors sets of all direct successors of CURRENT:
854 # 4) We know successors sets of all direct successors of CURRENT:
854 # -> We can compute CURRENT successors set and add it to the
855 # -> We can compute CURRENT successors set and add it to the
855 # cache.
856 # cache.
856 #
857 #
857 current = toproceed[-1]
858 current = toproceed[-1]
858 if current in cache:
859 if current in cache:
859 # case (1): We already know the successors sets
860 # case (1): We already know the successors sets
860 stackedset.remove(toproceed.pop())
861 stackedset.remove(toproceed.pop())
861 elif current not in succmarkers:
862 elif current not in succmarkers:
862 # case (2): The node is not obsolete.
863 # case (2): The node is not obsolete.
863 if current in repo:
864 if current in repo:
864 # We have a valid last successors.
865 # We have a valid last successors.
865 cache[current] = [(current,)]
866 cache[current] = [(current,)]
866 else:
867 else:
867 # Final obsolete version is unknown locally.
868 # Final obsolete version is unknown locally.
868 # Do not count that as a valid successors
869 # Do not count that as a valid successors
869 cache[current] = []
870 cache[current] = []
870 else:
871 else:
871 # cases (3) and (4)
872 # cases (3) and (4)
872 #
873 #
873 # We proceed in two phases. Phase 1 aims to distinguish case (3)
874 # We proceed in two phases. Phase 1 aims to distinguish case (3)
874 # from case (4):
875 # from case (4):
875 #
876 #
876 # For each direct successors of CURRENT, we check whether its
877 # For each direct successors of CURRENT, we check whether its
877 # successors sets are known. If they are not, we stack the
878 # successors sets are known. If they are not, we stack the
878 # unknown node and proceed to the next iteration of the while
879 # unknown node and proceed to the next iteration of the while
879 # loop. (case 3)
880 # loop. (case 3)
880 #
881 #
881 # During this step, we may detect obsolescence cycles: a node
882 # During this step, we may detect obsolescence cycles: a node
882 # with unknown successors sets but already in the call stack.
883 # with unknown successors sets but already in the call stack.
883 # In such a situation, we arbitrary set the successors sets of
884 # In such a situation, we arbitrary set the successors sets of
884 # the node to nothing (node pruned) to break the cycle.
885 # the node to nothing (node pruned) to break the cycle.
885 #
886 #
886 # If no break was encountered we proceed to phase 2.
887 # If no break was encountered we proceed to phase 2.
887 #
888 #
888 # Phase 2 computes successors sets of CURRENT (case 4); see details
889 # Phase 2 computes successors sets of CURRENT (case 4); see details
889 # in phase 2 itself.
890 # in phase 2 itself.
890 #
891 #
891 # Note the two levels of iteration in each phase.
892 # Note the two levels of iteration in each phase.
892 # - The first one handles obsolescence markers using CURRENT as
893 # - The first one handles obsolescence markers using CURRENT as
893 # precursor (successors markers of CURRENT).
894 # precursor (successors markers of CURRENT).
894 #
895 #
895 # Having multiple entry here means divergence.
896 # Having multiple entry here means divergence.
896 #
897 #
897 # - The second one handles successors defined in each marker.
898 # - The second one handles successors defined in each marker.
898 #
899 #
899 # Having none means pruned node, multiple successors means split,
900 # Having none means pruned node, multiple successors means split,
900 # single successors are standard replacement.
901 # single successors are standard replacement.
901 #
902 #
902 for mark in sorted(succmarkers[current]):
903 for mark in sorted(succmarkers[current]):
903 for suc in mark[1]:
904 for suc in mark[1]:
904 if suc not in cache:
905 if suc not in cache:
905 if suc in stackedset:
906 if suc in stackedset:
906 # cycle breaking
907 # cycle breaking
907 cache[suc] = []
908 cache[suc] = []
908 else:
909 else:
909 # case (3) If we have not computed successors sets
910 # case (3) If we have not computed successors sets
910 # of one of those successors we add it to the
911 # of one of those successors we add it to the
911 # `toproceed` stack and stop all work for this
912 # `toproceed` stack and stop all work for this
912 # iteration.
913 # iteration.
913 toproceed.append(suc)
914 toproceed.append(suc)
914 stackedset.add(suc)
915 stackedset.add(suc)
915 break
916 break
916 else:
917 else:
917 continue
918 continue
918 break
919 break
919 else:
920 else:
920 # case (4): we know all successors sets of all direct
921 # case (4): we know all successors sets of all direct
921 # successors
922 # successors
922 #
923 #
923 # Successors set contributed by each marker depends on the
924 # Successors set contributed by each marker depends on the
924 # successors sets of all its "successors" node.
925 # successors sets of all its "successors" node.
925 #
926 #
926 # Each different marker is a divergence in the obsolescence
927 # Each different marker is a divergence in the obsolescence
927 # history. It contributes successors sets distinct from other
928 # history. It contributes successors sets distinct from other
928 # markers.
929 # markers.
929 #
930 #
930 # Within a marker, a successor may have divergent successors
931 # Within a marker, a successor may have divergent successors
931 # sets. In such a case, the marker will contribute multiple
932 # sets. In such a case, the marker will contribute multiple
932 # divergent successors sets. If multiple successors have
933 # divergent successors sets. If multiple successors have
933 # divergent successors sets, a Cartesian product is used.
934 # divergent successors sets, a Cartesian product is used.
934 #
935 #
935 # At the end we post-process successors sets to remove
936 # At the end we post-process successors sets to remove
936 # duplicated entry and successors set that are strict subset of
937 # duplicated entry and successors set that are strict subset of
937 # another one.
938 # another one.
938 succssets = []
939 succssets = []
939 for mark in sorted(succmarkers[current]):
940 for mark in sorted(succmarkers[current]):
940 # successors sets contributed by this marker
941 # successors sets contributed by this marker
941 markss = [[]]
942 markss = [[]]
942 for suc in mark[1]:
943 for suc in mark[1]:
943 # cardinal product with previous successors
944 # cardinal product with previous successors
944 productresult = []
945 productresult = []
945 for prefix in markss:
946 for prefix in markss:
946 for suffix in cache[suc]:
947 for suffix in cache[suc]:
947 newss = list(prefix)
948 newss = list(prefix)
948 for part in suffix:
949 for part in suffix:
949 # do not duplicated entry in successors set
950 # do not duplicated entry in successors set
950 # first entry wins.
951 # first entry wins.
951 if part not in newss:
952 if part not in newss:
952 newss.append(part)
953 newss.append(part)
953 productresult.append(newss)
954 productresult.append(newss)
954 markss = productresult
955 markss = productresult
955 succssets.extend(markss)
956 succssets.extend(markss)
956 # remove duplicated and subset
957 # remove duplicated and subset
957 seen = []
958 seen = []
958 final = []
959 final = []
959 candidate = sorted(((set(s), s) for s in succssets if s),
960 candidate = sorted(((set(s), s) for s in succssets if s),
960 key=lambda x: len(x[1]), reverse=True)
961 key=lambda x: len(x[1]), reverse=True)
961 for setversion, listversion in candidate:
962 for setversion, listversion in candidate:
962 for seenset in seen:
963 for seenset in seen:
963 if setversion.issubset(seenset):
964 if setversion.issubset(seenset):
964 break
965 break
965 else:
966 else:
966 final.append(listversion)
967 final.append(listversion)
967 seen.append(setversion)
968 seen.append(setversion)
968 final.reverse() # put small successors set first
969 final.reverse() # put small successors set first
969 cache[current] = final
970 cache[current] = final
970 return cache[initialnode]
971 return cache[initialnode]
971
972
972 def _knownrevs(repo, nodes):
973 def _knownrevs(repo, nodes):
973 """yield revision numbers of known nodes passed in parameters
974 """yield revision numbers of known nodes passed in parameters
974
975
975 Unknown revisions are silently ignored."""
976 Unknown revisions are silently ignored."""
976 torev = repo.changelog.nodemap.get
977 torev = repo.changelog.nodemap.get
977 for n in nodes:
978 for n in nodes:
978 rev = torev(n)
979 rev = torev(n)
979 if rev is not None:
980 if rev is not None:
980 yield rev
981 yield rev
981
982
982 # mapping of 'set-name' -> <function to compute this set>
983 # mapping of 'set-name' -> <function to compute this set>
983 cachefuncs = {}
984 cachefuncs = {}
984 def cachefor(name):
985 def cachefor(name):
985 """Decorator to register a function as computing the cache for a set"""
986 """Decorator to register a function as computing the cache for a set"""
986 def decorator(func):
987 def decorator(func):
987 assert name not in cachefuncs
988 assert name not in cachefuncs
988 cachefuncs[name] = func
989 cachefuncs[name] = func
989 return func
990 return func
990 return decorator
991 return decorator
991
992
992 def getrevs(repo, name):
993 def getrevs(repo, name):
993 """Return the set of revision that belong to the <name> set
994 """Return the set of revision that belong to the <name> set
994
995
995 Such access may compute the set and cache it for future use"""
996 Such access may compute the set and cache it for future use"""
996 repo = repo.unfiltered()
997 repo = repo.unfiltered()
997 if not repo.obsstore:
998 if not repo.obsstore:
998 return frozenset()
999 return frozenset()
999 if name not in repo.obsstore.caches:
1000 if name not in repo.obsstore.caches:
1000 repo.obsstore.caches[name] = cachefuncs[name](repo)
1001 repo.obsstore.caches[name] = cachefuncs[name](repo)
1001 return repo.obsstore.caches[name]
1002 return repo.obsstore.caches[name]
1002
1003
1003 # To be simple we need to invalidate obsolescence cache when:
1004 # To be simple we need to invalidate obsolescence cache when:
1004 #
1005 #
1005 # - new changeset is added:
1006 # - new changeset is added:
1006 # - public phase is changed
1007 # - public phase is changed
1007 # - obsolescence marker are added
1008 # - obsolescence marker are added
1008 # - strip is used a repo
1009 # - strip is used a repo
1009 def clearobscaches(repo):
1010 def clearobscaches(repo):
1010 """Remove all obsolescence related cache from a repo
1011 """Remove all obsolescence related cache from a repo
1011
1012
1012 This remove all cache in obsstore is the obsstore already exist on the
1013 This remove all cache in obsstore is the obsstore already exist on the
1013 repo.
1014 repo.
1014
1015
1015 (We could be smarter here given the exact event that trigger the cache
1016 (We could be smarter here given the exact event that trigger the cache
1016 clearing)"""
1017 clearing)"""
1017 # only clear cache is there is obsstore data in this repo
1018 # only clear cache is there is obsstore data in this repo
1018 if 'obsstore' in repo._filecache:
1019 if 'obsstore' in repo._filecache:
1019 repo.obsstore.caches.clear()
1020 repo.obsstore.caches.clear()
1020
1021
1021 @cachefor('obsolete')
1022 @cachefor('obsolete')
1022 def _computeobsoleteset(repo):
1023 def _computeobsoleteset(repo):
1023 """the set of obsolete revisions"""
1024 """the set of obsolete revisions"""
1024 obs = set()
1025 obs = set()
1025 getrev = repo.changelog.nodemap.get
1026 getrev = repo.changelog.nodemap.get
1026 getphase = repo._phasecache.phase
1027 getphase = repo._phasecache.phase
1027 for n in repo.obsstore.successors:
1028 for n in repo.obsstore.successors:
1028 rev = getrev(n)
1029 rev = getrev(n)
1029 if rev is not None and getphase(repo, rev):
1030 if rev is not None and getphase(repo, rev):
1030 obs.add(rev)
1031 obs.add(rev)
1031 return obs
1032 return obs
1032
1033
1033 @cachefor('unstable')
1034 @cachefor('unstable')
1034 def _computeunstableset(repo):
1035 def _computeunstableset(repo):
1035 """the set of non obsolete revisions with obsolete parents"""
1036 """the set of non obsolete revisions with obsolete parents"""
1036 # revset is not efficient enough here
1037 # revset is not efficient enough here
1037 # we do (obsolete()::) - obsolete() by hand
1038 # we do (obsolete()::) - obsolete() by hand
1038 obs = getrevs(repo, 'obsolete')
1039 obs = getrevs(repo, 'obsolete')
1039 if not obs:
1040 if not obs:
1040 return set()
1041 return set()
1041 cl = repo.changelog
1042 cl = repo.changelog
1042 return set(r for r in cl.descendants(obs) if r not in obs)
1043 return set(r for r in cl.descendants(obs) if r not in obs)
1043
1044
1044 @cachefor('suspended')
1045 @cachefor('suspended')
1045 def _computesuspendedset(repo):
1046 def _computesuspendedset(repo):
1046 """the set of obsolete parents with non obsolete descendants"""
1047 """the set of obsolete parents with non obsolete descendants"""
1047 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1048 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1048 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1049 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1049
1050
1050 @cachefor('extinct')
1051 @cachefor('extinct')
1051 def _computeextinctset(repo):
1052 def _computeextinctset(repo):
1052 """the set of obsolete parents without non obsolete descendants"""
1053 """the set of obsolete parents without non obsolete descendants"""
1053 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1054 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1054
1055
1055
1056
1056 @cachefor('bumped')
1057 @cachefor('bumped')
1057 def _computebumpedset(repo):
1058 def _computebumpedset(repo):
1058 """the set of revs trying to obsolete public revisions"""
1059 """the set of revs trying to obsolete public revisions"""
1059 bumped = set()
1060 bumped = set()
1060 # util function (avoid attribute lookup in the loop)
1061 # util function (avoid attribute lookup in the loop)
1061 phase = repo._phasecache.phase # would be faster to grab the full list
1062 phase = repo._phasecache.phase # would be faster to grab the full list
1062 public = phases.public
1063 public = phases.public
1063 cl = repo.changelog
1064 cl = repo.changelog
1064 torev = cl.nodemap.get
1065 torev = cl.nodemap.get
1065 obs = getrevs(repo, 'obsolete')
1066 obs = getrevs(repo, 'obsolete')
1066 for rev in repo:
1067 for rev in repo:
1067 # We only evaluate mutable, non-obsolete revision
1068 # We only evaluate mutable, non-obsolete revision
1068 if (public < phase(repo, rev)) and (rev not in obs):
1069 if (public < phase(repo, rev)) and (rev not in obs):
1069 node = cl.node(rev)
1070 node = cl.node(rev)
1070 # (future) A cache of precursors may worth if split is very common
1071 # (future) A cache of precursors may worth if split is very common
1071 for pnode in allprecursors(repo.obsstore, [node],
1072 for pnode in allprecursors(repo.obsstore, [node],
1072 ignoreflags=bumpedfix):
1073 ignoreflags=bumpedfix):
1073 prev = torev(pnode) # unfiltered! but so is phasecache
1074 prev = torev(pnode) # unfiltered! but so is phasecache
1074 if (prev is not None) and (phase(repo, prev) <= public):
1075 if (prev is not None) and (phase(repo, prev) <= public):
1075 # we have a public precursors
1076 # we have a public precursors
1076 bumped.add(rev)
1077 bumped.add(rev)
1077 break # Next draft!
1078 break # Next draft!
1078 return bumped
1079 return bumped
1079
1080
1080 @cachefor('divergent')
1081 @cachefor('divergent')
1081 def _computedivergentset(repo):
1082 def _computedivergentset(repo):
1082 """the set of rev that compete to be the final successors of some revision.
1083 """the set of rev that compete to be the final successors of some revision.
1083 """
1084 """
1084 divergent = set()
1085 divergent = set()
1085 obsstore = repo.obsstore
1086 obsstore = repo.obsstore
1086 newermap = {}
1087 newermap = {}
1087 for ctx in repo.set('(not public()) - obsolete()'):
1088 for ctx in repo.set('(not public()) - obsolete()'):
1088 mark = obsstore.precursors.get(ctx.node(), ())
1089 mark = obsstore.precursors.get(ctx.node(), ())
1089 toprocess = set(mark)
1090 toprocess = set(mark)
1090 while toprocess:
1091 while toprocess:
1091 prec = toprocess.pop()[0]
1092 prec = toprocess.pop()[0]
1092 if prec not in newermap:
1093 if prec not in newermap:
1093 successorssets(repo, prec, newermap)
1094 successorssets(repo, prec, newermap)
1094 newer = [n for n in newermap[prec] if n]
1095 newer = [n for n in newermap[prec] if n]
1095 if len(newer) > 1:
1096 if len(newer) > 1:
1096 divergent.add(ctx.rev())
1097 divergent.add(ctx.rev())
1097 break
1098 break
1098 toprocess.update(obsstore.precursors.get(prec, ()))
1099 toprocess.update(obsstore.precursors.get(prec, ()))
1099 return divergent
1100 return divergent
1100
1101
1101
1102
1102 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1103 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1103 """Add obsolete markers between changesets in a repo
1104 """Add obsolete markers between changesets in a repo
1104
1105
1105 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1106 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1106 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1107 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1107 containing metadata for this marker only. It is merged with the global
1108 containing metadata for this marker only. It is merged with the global
1108 metadata specified through the `metadata` argument of this function,
1109 metadata specified through the `metadata` argument of this function,
1109
1110
1110 Trying to obsolete a public changeset will raise an exception.
1111 Trying to obsolete a public changeset will raise an exception.
1111
1112
1112 Current user and date are used except if specified otherwise in the
1113 Current user and date are used except if specified otherwise in the
1113 metadata attribute.
1114 metadata attribute.
1114
1115
1115 This function operates within a transaction of its own, but does
1116 This function operates within a transaction of its own, but does
1116 not take any lock on the repo.
1117 not take any lock on the repo.
1117 """
1118 """
1118 # prepare metadata
1119 # prepare metadata
1119 if metadata is None:
1120 if metadata is None:
1120 metadata = {}
1121 metadata = {}
1121 if 'user' not in metadata:
1122 if 'user' not in metadata:
1122 metadata['user'] = repo.ui.username()
1123 metadata['user'] = repo.ui.username()
1123 tr = repo.transaction('add-obsolescence-marker')
1124 tr = repo.transaction('add-obsolescence-marker')
1124 try:
1125 try:
1125 for rel in relations:
1126 for rel in relations:
1126 prec = rel[0]
1127 prec = rel[0]
1127 sucs = rel[1]
1128 sucs = rel[1]
1128 localmetadata = metadata.copy()
1129 localmetadata = metadata.copy()
1129 if 2 < len(rel):
1130 if 2 < len(rel):
1130 localmetadata.update(rel[2])
1131 localmetadata.update(rel[2])
1131
1132
1132 if not prec.mutable():
1133 if not prec.mutable():
1133 raise util.Abort("cannot obsolete immutable changeset: %s"
1134 raise util.Abort("cannot obsolete immutable changeset: %s"
1134 % prec)
1135 % prec)
1135 nprec = prec.node()
1136 nprec = prec.node()
1136 nsucs = tuple(s.node() for s in sucs)
1137 nsucs = tuple(s.node() for s in sucs)
1137 npare = None
1138 npare = None
1138 if not nsucs:
1139 if not nsucs:
1139 npare = tuple(p.node() for p in prec.parents())
1140 npare = tuple(p.node() for p in prec.parents())
1140 if nprec in nsucs:
1141 if nprec in nsucs:
1141 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1142 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1142 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1143 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1143 date=date, metadata=localmetadata)
1144 date=date, metadata=localmetadata)
1144 repo.filteredrevcache.clear()
1145 repo.filteredrevcache.clear()
1145 tr.close()
1146 tr.close()
1146 finally:
1147 finally:
1147 tr.release()
1148 tr.release()
1148
1149
1149 def isenabled(repo, option):
1150 def isenabled(repo, option):
1150 """Returns True if the given repository has the given obsolete option
1151 """Returns True if the given repository has the given obsolete option
1151 enabled.
1152 enabled.
1152 """
1153 """
1153 result = set(repo.ui.configlist('experimental', 'evolution'))
1154 result = set(repo.ui.configlist('experimental', 'evolution'))
1154 if 'all' in result:
1155 if 'all' in result:
1155 return True
1156 return True
1156
1157
1157 # For migration purposes, temporarily return true if the config hasn't been
1158 # For migration purposes, temporarily return true if the config hasn't been
1158 # set but _enabled is true.
1159 # set but _enabled is true.
1159 if len(result) == 0 and _enabled:
1160 if len(result) == 0 and _enabled:
1160 return True
1161 return True
1161
1162
1162 # createmarkers must be enabled if other options are enabled
1163 # createmarkers must be enabled if other options are enabled
1163 if ((allowunstableopt in result or exchangeopt in result) and
1164 if ((allowunstableopt in result or exchangeopt in result) and
1164 not createmarkersopt in result):
1165 not createmarkersopt in result):
1165 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1166 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1166 "if other obsolete options are enabled"))
1167 "if other obsolete options are enabled"))
1167
1168
1168 return option in result
1169 return option in result
General Comments 0
You need to be logged in to leave comments. Login now