##// END OF EJS Templates
readmarkers: hoist subtraction out of loop comparison
Matt Mackall -
r23799:ffca0a14 default
parent child Browse files
Show More
@@ -1,1167 +1,1167 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 import struct
70 import struct
71 import util, base85, node
71 import util, base85, node
72 import phases
72 import phases
73 from i18n import _
73 from i18n import _
74
74
75 _pack = struct.pack
75 _pack = struct.pack
76 _unpack = struct.unpack
76 _unpack = struct.unpack
77 _calcsize = struct.calcsize
77 _calcsize = struct.calcsize
78
78
79 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
79 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
80
80
81 # the obsolete feature is not mature enough to be enabled by default.
81 # the obsolete feature is not mature enough to be enabled by default.
82 # you have to rely on third party extension extension to enable this.
82 # you have to rely on third party extension extension to enable this.
83 _enabled = False
83 _enabled = False
84
84
85 # Options for obsolescence
85 # Options for obsolescence
86 createmarkersopt = 'createmarkers'
86 createmarkersopt = 'createmarkers'
87 allowunstableopt = 'allowunstable'
87 allowunstableopt = 'allowunstable'
88 exchangeopt = 'exchange'
88 exchangeopt = 'exchange'
89
89
90 ### obsolescence marker flag
90 ### obsolescence marker flag
91
91
92 ## bumpedfix flag
92 ## bumpedfix flag
93 #
93 #
94 # When a changeset A' succeed to a changeset A which became public, we call A'
94 # When a changeset A' succeed to a changeset A which became public, we call A'
95 # "bumped" because it's a successors of a public changesets
95 # "bumped" because it's a successors of a public changesets
96 #
96 #
97 # o A' (bumped)
97 # o A' (bumped)
98 # |`:
98 # |`:
99 # | o A
99 # | o A
100 # |/
100 # |/
101 # o Z
101 # o Z
102 #
102 #
103 # The way to solve this situation is to create a new changeset Ad as children
103 # The way to solve this situation is to create a new changeset Ad as children
104 # of A. This changeset have the same content than A'. So the diff from A to A'
104 # of A. This changeset have the same content than A'. So the diff from A to A'
105 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
106 #
106 #
107 # o Ad
107 # o Ad
108 # |`:
108 # |`:
109 # | x A'
109 # | x A'
110 # |'|
110 # |'|
111 # o | A
111 # o | A
112 # |/
112 # |/
113 # o Z
113 # o Z
114 #
114 #
115 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
116 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
117 # This flag mean that the successors express the changes between the public and
117 # This flag mean that the successors express the changes between the public and
118 # bumped version and fix the situation, breaking the transitivity of
118 # bumped version and fix the situation, breaking the transitivity of
119 # "bumped" here.
119 # "bumped" here.
120 bumpedfix = 1
120 bumpedfix = 1
121 usingsha256 = 2
121 usingsha256 = 2
122
122
123 ## Parsing and writing of version "0"
123 ## Parsing and writing of version "0"
124 #
124 #
125 # The header is followed by the markers. Each marker is made of:
125 # The header is followed by the markers. Each marker is made of:
126 #
126 #
127 # - 1 uint8 : number of new changesets "N", can be zero.
127 # - 1 uint8 : number of new changesets "N", can be zero.
128 #
128 #
129 # - 1 uint32: metadata size "M" in bytes.
129 # - 1 uint32: metadata size "M" in bytes.
130 #
130 #
131 # - 1 byte: a bit field. It is reserved for flags used in common
131 # - 1 byte: a bit field. It is reserved for flags used in common
132 # obsolete marker operations, to avoid repeated decoding of metadata
132 # obsolete marker operations, to avoid repeated decoding of metadata
133 # entries.
133 # entries.
134 #
134 #
135 # - 20 bytes: obsoleted changeset identifier.
135 # - 20 bytes: obsoleted changeset identifier.
136 #
136 #
137 # - N*20 bytes: new changesets identifiers.
137 # - N*20 bytes: new changesets identifiers.
138 #
138 #
139 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 # - M bytes: metadata as a sequence of nul-terminated strings. Each
140 # string contains a key and a value, separated by a colon ':', without
140 # string contains a key and a value, separated by a colon ':', without
141 # additional encoding. Keys cannot contain '\0' or ':' and values
141 # additional encoding. Keys cannot contain '\0' or ':' and values
142 # cannot contain '\0'.
142 # cannot contain '\0'.
143 _fm0version = 0
143 _fm0version = 0
144 _fm0fixed = '>BIB20s'
144 _fm0fixed = '>BIB20s'
145 _fm0node = '20s'
145 _fm0node = '20s'
146 _fm0fsize = _calcsize(_fm0fixed)
146 _fm0fsize = _calcsize(_fm0fixed)
147 _fm0fnodesize = _calcsize(_fm0node)
147 _fm0fnodesize = _calcsize(_fm0node)
148
148
149 def _fm0readmarkers(data, off=0):
149 def _fm0readmarkers(data, off=0):
150 # Loop on markers
150 # Loop on markers
151 l = len(data)
151 l = len(data)
152 while off + _fm0fsize <= l:
152 while off + _fm0fsize <= l:
153 # read fixed part
153 # read fixed part
154 cur = data[off:off + _fm0fsize]
154 cur = data[off:off + _fm0fsize]
155 off += _fm0fsize
155 off += _fm0fsize
156 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
157 # read replacement
157 # read replacement
158 sucs = ()
158 sucs = ()
159 if numsuc:
159 if numsuc:
160 s = (_fm0fnodesize * numsuc)
160 s = (_fm0fnodesize * numsuc)
161 cur = data[off:off + s]
161 cur = data[off:off + s]
162 sucs = _unpack(_fm0node * numsuc, cur)
162 sucs = _unpack(_fm0node * numsuc, cur)
163 off += s
163 off += s
164 # read metadata
164 # read metadata
165 # (metadata will be decoded on demand)
165 # (metadata will be decoded on demand)
166 metadata = data[off:off + mdsize]
166 metadata = data[off:off + mdsize]
167 if len(metadata) != mdsize:
167 if len(metadata) != mdsize:
168 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 raise util.Abort(_('parsing obsolete marker: metadata is too '
169 'short, %d bytes expected, got %d')
169 'short, %d bytes expected, got %d')
170 % (mdsize, len(metadata)))
170 % (mdsize, len(metadata)))
171 off += mdsize
171 off += mdsize
172 metadata = _fm0decodemeta(metadata)
172 metadata = _fm0decodemeta(metadata)
173 try:
173 try:
174 when, offset = metadata.pop('date', '0 0').split(' ')
174 when, offset = metadata.pop('date', '0 0').split(' ')
175 date = float(when), int(offset)
175 date = float(when), int(offset)
176 except ValueError:
176 except ValueError:
177 date = (0., 0)
177 date = (0., 0)
178 parents = None
178 parents = None
179 if 'p2' in metadata:
179 if 'p2' in metadata:
180 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
181 elif 'p1' in metadata:
181 elif 'p1' in metadata:
182 parents = (metadata.pop('p1', None),)
182 parents = (metadata.pop('p1', None),)
183 elif 'p0' in metadata:
183 elif 'p0' in metadata:
184 parents = ()
184 parents = ()
185 if parents is not None:
185 if parents is not None:
186 try:
186 try:
187 parents = tuple(node.bin(p) for p in parents)
187 parents = tuple(node.bin(p) for p in parents)
188 # if parent content is not a nodeid, drop the data
188 # if parent content is not a nodeid, drop the data
189 for p in parents:
189 for p in parents:
190 if len(p) != 20:
190 if len(p) != 20:
191 parents = None
191 parents = None
192 break
192 break
193 except TypeError:
193 except TypeError:
194 # if content cannot be translated to nodeid drop the data.
194 # if content cannot be translated to nodeid drop the data.
195 parents = None
195 parents = None
196
196
197 metadata = tuple(sorted(metadata.iteritems()))
197 metadata = tuple(sorted(metadata.iteritems()))
198
198
199 yield (pre, sucs, flags, metadata, date, parents)
199 yield (pre, sucs, flags, metadata, date, parents)
200
200
201 def _fm0encodeonemarker(marker):
201 def _fm0encodeonemarker(marker):
202 pre, sucs, flags, metadata, date, parents = marker
202 pre, sucs, flags, metadata, date, parents = marker
203 if flags & usingsha256:
203 if flags & usingsha256:
204 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
204 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
205 metadata = dict(metadata)
205 metadata = dict(metadata)
206 time, tz = date
206 time, tz = date
207 metadata['date'] = '%r %i' % (time, tz)
207 metadata['date'] = '%r %i' % (time, tz)
208 if parents is not None:
208 if parents is not None:
209 if not parents:
209 if not parents:
210 # mark that we explicitly recorded no parents
210 # mark that we explicitly recorded no parents
211 metadata['p0'] = ''
211 metadata['p0'] = ''
212 for i, p in enumerate(parents):
212 for i, p in enumerate(parents):
213 metadata['p%i' % (i + 1)] = node.hex(p)
213 metadata['p%i' % (i + 1)] = node.hex(p)
214 metadata = _fm0encodemeta(metadata)
214 metadata = _fm0encodemeta(metadata)
215 numsuc = len(sucs)
215 numsuc = len(sucs)
216 format = _fm0fixed + (_fm0node * numsuc)
216 format = _fm0fixed + (_fm0node * numsuc)
217 data = [numsuc, len(metadata), flags, pre]
217 data = [numsuc, len(metadata), flags, pre]
218 data.extend(sucs)
218 data.extend(sucs)
219 return _pack(format, *data) + metadata
219 return _pack(format, *data) + metadata
220
220
221 def _fm0encodemeta(meta):
221 def _fm0encodemeta(meta):
222 """Return encoded metadata string to string mapping.
222 """Return encoded metadata string to string mapping.
223
223
224 Assume no ':' in key and no '\0' in both key and value."""
224 Assume no ':' in key and no '\0' in both key and value."""
225 for key, value in meta.iteritems():
225 for key, value in meta.iteritems():
226 if ':' in key or '\0' in key:
226 if ':' in key or '\0' in key:
227 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 raise ValueError("':' and '\0' are forbidden in metadata key'")
228 if '\0' in value:
228 if '\0' in value:
229 raise ValueError("':' is forbidden in metadata value'")
229 raise ValueError("':' is forbidden in metadata value'")
230 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
231
231
232 def _fm0decodemeta(data):
232 def _fm0decodemeta(data):
233 """Return string to string dictionary from encoded version."""
233 """Return string to string dictionary from encoded version."""
234 d = {}
234 d = {}
235 for l in data.split('\0'):
235 for l in data.split('\0'):
236 if l:
236 if l:
237 key, value = l.split(':')
237 key, value = l.split(':')
238 d[key] = value
238 d[key] = value
239 return d
239 return d
240
240
241 ## Parsing and writing of version "1"
241 ## Parsing and writing of version "1"
242 #
242 #
243 # The header is followed by the markers. Each marker is made of:
243 # The header is followed by the markers. Each marker is made of:
244 #
244 #
245 # - uint32: total size of the marker (including this field)
245 # - uint32: total size of the marker (including this field)
246 #
246 #
247 # - float64: date in seconds since epoch
247 # - float64: date in seconds since epoch
248 #
248 #
249 # - int16: timezone offset in minutes
249 # - int16: timezone offset in minutes
250 #
250 #
251 # - uint16: a bit field. It is reserved for flags used in common
251 # - uint16: a bit field. It is reserved for flags used in common
252 # obsolete marker operations, to avoid repeated decoding of metadata
252 # obsolete marker operations, to avoid repeated decoding of metadata
253 # entries.
253 # entries.
254 #
254 #
255 # - uint8: number of successors "N", can be zero.
255 # - uint8: number of successors "N", can be zero.
256 #
256 #
257 # - uint8: number of parents "P", can be zero.
257 # - uint8: number of parents "P", can be zero.
258 #
258 #
259 # 0: parents data stored but no parent,
259 # 0: parents data stored but no parent,
260 # 1: one parent stored,
260 # 1: one parent stored,
261 # 2: two parents stored,
261 # 2: two parents stored,
262 # 3: no parent data stored
262 # 3: no parent data stored
263 #
263 #
264 # - uint8: number of metadata entries M
264 # - uint8: number of metadata entries M
265 #
265 #
266 # - 20 or 32 bytes: precursor changeset identifier.
266 # - 20 or 32 bytes: precursor changeset identifier.
267 #
267 #
268 # - N*(20 or 32) bytes: successors changesets identifiers.
268 # - N*(20 or 32) bytes: successors changesets identifiers.
269 #
269 #
270 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 # - P*(20 or 32) bytes: parents of the precursors changesets.
271 #
271 #
272 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 # - M*(uint8, uint8): size of all metadata entries (key and value)
273 #
273 #
274 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 # - remaining bytes: the metadata, each (key, value) pair after the other.
275 _fm1version = 1
275 _fm1version = 1
276 _fm1fixed = '>IdhHBBB20s'
276 _fm1fixed = '>IdhHBBB20s'
277 _fm1nodesha1 = '20s'
277 _fm1nodesha1 = '20s'
278 _fm1nodesha256 = '32s'
278 _fm1nodesha256 = '32s'
279 _fm1nodesha1size = _calcsize(_fm1nodesha1)
279 _fm1nodesha1size = _calcsize(_fm1nodesha1)
280 _fm1nodesha256size = _calcsize(_fm1nodesha256)
280 _fm1nodesha256size = _calcsize(_fm1nodesha256)
281 _fm1fsize = _calcsize(_fm1fixed)
281 _fm1fsize = _calcsize(_fm1fixed)
282 _fm1parentnone = 3
282 _fm1parentnone = 3
283 _fm1parentshift = 14
283 _fm1parentshift = 14
284 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
284 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
285 _fm1metapair = 'BB'
285 _fm1metapair = 'BB'
286 _fm1metapairsize = _calcsize('BB')
286 _fm1metapairsize = _calcsize('BB')
287
287
288 def _fm1readmarkers(data, off=0):
288 def _fm1readmarkers(data, off=0):
289 # Loop on markers
289 # Loop on markers
290 l = len(data)
290 stop = len(data) - _fm1fsize
291 ufixed = util.unpacker(_fm1fixed)
291 ufixed = util.unpacker(_fm1fixed)
292 while off + _fm1fsize <= l:
292 while off <= stop:
293 # read fixed part
293 # read fixed part
294 o1 = off + _fm1fsize
294 o1 = off + _fm1fsize
295 fixeddata = ufixed(data[off:o1])
295 fixeddata = ufixed(data[off:o1])
296 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
296 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
297
297
298 _fm1node = _fm1nodesha1
298 _fm1node = _fm1nodesha1
299 fnodesize = _fm1nodesha1size
299 fnodesize = _fm1nodesha1size
300 if flags & usingsha256:
300 if flags & usingsha256:
301 _fm1node = _fm1nodesha256
301 _fm1node = _fm1nodesha256
302 fnodesize = _fm1nodesha256size
302 fnodesize = _fm1nodesha256size
303
303
304 # read 0 or more successors
304 # read 0 or more successors
305 o2 = o1 + fnodesize * numsuc
305 o2 = o1 + fnodesize * numsuc
306 sucs = _unpack(_fm1node * numsuc, data[o1:o2])
306 sucs = _unpack(_fm1node * numsuc, data[o1:o2])
307
307
308 # read parents
308 # read parents
309 if numpar == _fm1parentnone:
309 if numpar == _fm1parentnone:
310 o3 = o2
310 o3 = o2
311 parents = None
311 parents = None
312 else:
312 else:
313 o3 = o2 + fnodesize * numpar
313 o3 = o2 + fnodesize * numpar
314 parents = _unpack(_fm1node * numpar, data[o2:o3])
314 parents = _unpack(_fm1node * numpar, data[o2:o3])
315
315
316 # read metadata
316 # read metadata
317 metaformat = '>' + (_fm1metapair * nummeta)
317 metaformat = '>' + (_fm1metapair * nummeta)
318 off = o3 + _fm1metapairsize * nummeta
318 off = o3 + _fm1metapairsize * nummeta
319 metapairsize = _unpack(metaformat, data[o3:off])
319 metapairsize = _unpack(metaformat, data[o3:off])
320 metadata = []
320 metadata = []
321 for idx in xrange(0, len(metapairsize), 2):
321 for idx in xrange(0, len(metapairsize), 2):
322 o1 = off + metapairsize[idx]
322 o1 = off + metapairsize[idx]
323 o2 = o1 + metapairsize[idx + 1]
323 o2 = o1 + metapairsize[idx + 1]
324 metadata.append((data[off:o1], data[o1:o2]))
324 metadata.append((data[off:o1], data[o1:o2]))
325 off = o2
325 off = o2
326
326
327 yield (prec, sucs, flags, tuple(metadata), (seconds, tz * 60), parents)
327 yield (prec, sucs, flags, tuple(metadata), (seconds, tz * 60), parents)
328
328
329 def _fm1encodeonemarker(marker):
329 def _fm1encodeonemarker(marker):
330 pre, sucs, flags, metadata, date, parents = marker
330 pre, sucs, flags, metadata, date, parents = marker
331 # determine node size
331 # determine node size
332 _fm1node = _fm1nodesha1
332 _fm1node = _fm1nodesha1
333 if flags & usingsha256:
333 if flags & usingsha256:
334 _fm1node = _fm1nodesha256
334 _fm1node = _fm1nodesha256
335 numsuc = len(sucs)
335 numsuc = len(sucs)
336 numextranodes = numsuc
336 numextranodes = numsuc
337 if parents is None:
337 if parents is None:
338 numpar = _fm1parentnone
338 numpar = _fm1parentnone
339 else:
339 else:
340 numpar = len(parents)
340 numpar = len(parents)
341 numextranodes += numpar
341 numextranodes += numpar
342 formatnodes = _fm1node * numextranodes
342 formatnodes = _fm1node * numextranodes
343 formatmeta = _fm1metapair * len(metadata)
343 formatmeta = _fm1metapair * len(metadata)
344 format = _fm1fixed + formatnodes + formatmeta
344 format = _fm1fixed + formatnodes + formatmeta
345 # tz is stored in minutes so we divide by 60
345 # tz is stored in minutes so we divide by 60
346 tz = date[1]//60
346 tz = date[1]//60
347 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
347 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
348 data.extend(sucs)
348 data.extend(sucs)
349 if parents is not None:
349 if parents is not None:
350 data.extend(parents)
350 data.extend(parents)
351 totalsize = _calcsize(format)
351 totalsize = _calcsize(format)
352 for key, value in metadata:
352 for key, value in metadata:
353 lk = len(key)
353 lk = len(key)
354 lv = len(value)
354 lv = len(value)
355 data.append(lk)
355 data.append(lk)
356 data.append(lv)
356 data.append(lv)
357 totalsize += lk + lv
357 totalsize += lk + lv
358 data[0] = totalsize
358 data[0] = totalsize
359 data = [_pack(format, *data)]
359 data = [_pack(format, *data)]
360 for key, value in metadata:
360 for key, value in metadata:
361 data.append(key)
361 data.append(key)
362 data.append(value)
362 data.append(value)
363 return ''.join(data)
363 return ''.join(data)
364
364
365 # mapping to read/write various marker formats
365 # mapping to read/write various marker formats
366 # <version> -> (decoder, encoder)
366 # <version> -> (decoder, encoder)
367 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
367 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
368 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
368 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
369
369
370 @util.nogc
370 @util.nogc
371 def _readmarkers(data):
371 def _readmarkers(data):
372 """Read and enumerate markers from raw data"""
372 """Read and enumerate markers from raw data"""
373 off = 0
373 off = 0
374 diskversion = _unpack('>B', data[off:off + 1])[0]
374 diskversion = _unpack('>B', data[off:off + 1])[0]
375 off += 1
375 off += 1
376 if diskversion not in formats:
376 if diskversion not in formats:
377 raise util.Abort(_('parsing obsolete marker: unknown version %r')
377 raise util.Abort(_('parsing obsolete marker: unknown version %r')
378 % diskversion)
378 % diskversion)
379 return diskversion, formats[diskversion][0](data, off)
379 return diskversion, formats[diskversion][0](data, off)
380
380
381 def encodemarkers(markers, addheader=False, version=_fm0version):
381 def encodemarkers(markers, addheader=False, version=_fm0version):
382 # Kept separate from flushmarkers(), it will be reused for
382 # Kept separate from flushmarkers(), it will be reused for
383 # markers exchange.
383 # markers exchange.
384 encodeone = formats[version][1]
384 encodeone = formats[version][1]
385 if addheader:
385 if addheader:
386 yield _pack('>B', version)
386 yield _pack('>B', version)
387 for marker in markers:
387 for marker in markers:
388 yield encodeone(marker)
388 yield encodeone(marker)
389
389
390
390
391 class marker(object):
391 class marker(object):
392 """Wrap obsolete marker raw data"""
392 """Wrap obsolete marker raw data"""
393
393
394 def __init__(self, repo, data):
394 def __init__(self, repo, data):
395 # the repo argument will be used to create changectx in later version
395 # the repo argument will be used to create changectx in later version
396 self._repo = repo
396 self._repo = repo
397 self._data = data
397 self._data = data
398 self._decodedmeta = None
398 self._decodedmeta = None
399
399
400 def __hash__(self):
400 def __hash__(self):
401 return hash(self._data)
401 return hash(self._data)
402
402
403 def __eq__(self, other):
403 def __eq__(self, other):
404 if type(other) != type(self):
404 if type(other) != type(self):
405 return False
405 return False
406 return self._data == other._data
406 return self._data == other._data
407
407
408 def precnode(self):
408 def precnode(self):
409 """Precursor changeset node identifier"""
409 """Precursor changeset node identifier"""
410 return self._data[0]
410 return self._data[0]
411
411
412 def succnodes(self):
412 def succnodes(self):
413 """List of successor changesets node identifiers"""
413 """List of successor changesets node identifiers"""
414 return self._data[1]
414 return self._data[1]
415
415
416 def parentnodes(self):
416 def parentnodes(self):
417 """Parents of the precursors (None if not recorded)"""
417 """Parents of the precursors (None if not recorded)"""
418 return self._data[5]
418 return self._data[5]
419
419
420 def metadata(self):
420 def metadata(self):
421 """Decoded metadata dictionary"""
421 """Decoded metadata dictionary"""
422 return dict(self._data[3])
422 return dict(self._data[3])
423
423
424 def date(self):
424 def date(self):
425 """Creation date as (unixtime, offset)"""
425 """Creation date as (unixtime, offset)"""
426 return self._data[4]
426 return self._data[4]
427
427
428 def flags(self):
428 def flags(self):
429 """The flags field of the marker"""
429 """The flags field of the marker"""
430 return self._data[2]
430 return self._data[2]
431
431
432 class obsstore(object):
432 class obsstore(object):
433 """Store obsolete markers
433 """Store obsolete markers
434
434
435 Markers can be accessed with two mappings:
435 Markers can be accessed with two mappings:
436 - precursors[x] -> set(markers on precursors edges of x)
436 - precursors[x] -> set(markers on precursors edges of x)
437 - successors[x] -> set(markers on successors edges of x)
437 - successors[x] -> set(markers on successors edges of x)
438 - children[x] -> set(markers on precursors edges of children(x)
438 - children[x] -> set(markers on precursors edges of children(x)
439 """
439 """
440
440
441 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
441 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
442 # prec: nodeid, precursor changesets
442 # prec: nodeid, precursor changesets
443 # succs: tuple of nodeid, successor changesets (0-N length)
443 # succs: tuple of nodeid, successor changesets (0-N length)
444 # flag: integer, flag field carrying modifier for the markers (see doc)
444 # flag: integer, flag field carrying modifier for the markers (see doc)
445 # meta: binary blob, encoded metadata dictionary
445 # meta: binary blob, encoded metadata dictionary
446 # date: (float, int) tuple, date of marker creation
446 # date: (float, int) tuple, date of marker creation
447 # parents: (tuple of nodeid) or None, parents of precursors
447 # parents: (tuple of nodeid) or None, parents of precursors
448 # None is used when no data has been recorded
448 # None is used when no data has been recorded
449
449
450 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
450 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
451 # caches for various obsolescence related cache
451 # caches for various obsolescence related cache
452 self.caches = {}
452 self.caches = {}
453 self._all = []
453 self._all = []
454 self.precursors = {}
454 self.precursors = {}
455 self.successors = {}
455 self.successors = {}
456 self.children = {}
456 self.children = {}
457 self.sopener = sopener
457 self.sopener = sopener
458 data = sopener.tryread('obsstore')
458 data = sopener.tryread('obsstore')
459 self._version = defaultformat
459 self._version = defaultformat
460 self._readonly = readonly
460 self._readonly = readonly
461 if data:
461 if data:
462 self._version, markers = _readmarkers(data)
462 self._version, markers = _readmarkers(data)
463 self._load(markers)
463 self._load(markers)
464
464
465 def __iter__(self):
465 def __iter__(self):
466 return iter(self._all)
466 return iter(self._all)
467
467
468 def __len__(self):
468 def __len__(self):
469 return len(self._all)
469 return len(self._all)
470
470
471 def __nonzero__(self):
471 def __nonzero__(self):
472 return bool(self._all)
472 return bool(self._all)
473
473
474 def create(self, transaction, prec, succs=(), flag=0, parents=None,
474 def create(self, transaction, prec, succs=(), flag=0, parents=None,
475 date=None, metadata=None):
475 date=None, metadata=None):
476 """obsolete: add a new obsolete marker
476 """obsolete: add a new obsolete marker
477
477
478 * ensuring it is hashable
478 * ensuring it is hashable
479 * check mandatory metadata
479 * check mandatory metadata
480 * encode metadata
480 * encode metadata
481
481
482 If you are a human writing code creating marker you want to use the
482 If you are a human writing code creating marker you want to use the
483 `createmarkers` function in this module instead.
483 `createmarkers` function in this module instead.
484
484
485 return True if a new marker have been added, False if the markers
485 return True if a new marker have been added, False if the markers
486 already existed (no op).
486 already existed (no op).
487 """
487 """
488 if metadata is None:
488 if metadata is None:
489 metadata = {}
489 metadata = {}
490 if date is None:
490 if date is None:
491 if 'date' in metadata:
491 if 'date' in metadata:
492 # as a courtesy for out-of-tree extensions
492 # as a courtesy for out-of-tree extensions
493 date = util.parsedate(metadata.pop('date'))
493 date = util.parsedate(metadata.pop('date'))
494 else:
494 else:
495 date = util.makedate()
495 date = util.makedate()
496 if len(prec) != 20:
496 if len(prec) != 20:
497 raise ValueError(prec)
497 raise ValueError(prec)
498 for succ in succs:
498 for succ in succs:
499 if len(succ) != 20:
499 if len(succ) != 20:
500 raise ValueError(succ)
500 raise ValueError(succ)
501 if prec in succs:
501 if prec in succs:
502 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
502 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
503
503
504 metadata = tuple(sorted(metadata.iteritems()))
504 metadata = tuple(sorted(metadata.iteritems()))
505
505
506 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
506 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
507 return bool(self.add(transaction, [marker]))
507 return bool(self.add(transaction, [marker]))
508
508
509 def add(self, transaction, markers):
509 def add(self, transaction, markers):
510 """Add new markers to the store
510 """Add new markers to the store
511
511
512 Take care of filtering duplicate.
512 Take care of filtering duplicate.
513 Return the number of new marker."""
513 Return the number of new marker."""
514 if self._readonly:
514 if self._readonly:
515 raise util.Abort('creating obsolete markers is not enabled on this '
515 raise util.Abort('creating obsolete markers is not enabled on this '
516 'repo')
516 'repo')
517 known = set(self._all)
517 known = set(self._all)
518 new = []
518 new = []
519 for m in markers:
519 for m in markers:
520 if m not in known:
520 if m not in known:
521 known.add(m)
521 known.add(m)
522 new.append(m)
522 new.append(m)
523 if new:
523 if new:
524 f = self.sopener('obsstore', 'ab')
524 f = self.sopener('obsstore', 'ab')
525 try:
525 try:
526 # Whether the file's current position is at the begin or at
526 # Whether the file's current position is at the begin or at
527 # the end after opening a file for appending is implementation
527 # the end after opening a file for appending is implementation
528 # defined. So we must seek to the end before calling tell(),
528 # defined. So we must seek to the end before calling tell(),
529 # or we may get a zero offset for non-zero sized files on
529 # or we may get a zero offset for non-zero sized files on
530 # some platforms (issue3543).
530 # some platforms (issue3543).
531 f.seek(0, _SEEK_END)
531 f.seek(0, _SEEK_END)
532 offset = f.tell()
532 offset = f.tell()
533 transaction.add('obsstore', offset)
533 transaction.add('obsstore', offset)
534 # offset == 0: new file - add the version header
534 # offset == 0: new file - add the version header
535 for bytes in encodemarkers(new, offset == 0, self._version):
535 for bytes in encodemarkers(new, offset == 0, self._version):
536 f.write(bytes)
536 f.write(bytes)
537 finally:
537 finally:
538 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
538 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
539 # call 'filecacheentry.refresh()' here
539 # call 'filecacheentry.refresh()' here
540 f.close()
540 f.close()
541 self._load(new)
541 self._load(new)
542 # new marker *may* have changed several set. invalidate the cache.
542 # new marker *may* have changed several set. invalidate the cache.
543 self.caches.clear()
543 self.caches.clear()
544 # records the number of new markers for the transaction hooks
544 # records the number of new markers for the transaction hooks
545 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
545 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
546 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
546 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
547 return len(new)
547 return len(new)
548
548
549 def mergemarkers(self, transaction, data):
549 def mergemarkers(self, transaction, data):
550 """merge a binary stream of markers inside the obsstore
550 """merge a binary stream of markers inside the obsstore
551
551
552 Returns the number of new markers added."""
552 Returns the number of new markers added."""
553 version, markers = _readmarkers(data)
553 version, markers = _readmarkers(data)
554 return self.add(transaction, markers)
554 return self.add(transaction, markers)
555
555
556 @util.nogc
556 @util.nogc
557 def _load(self, markers):
557 def _load(self, markers):
558 for mark in markers:
558 for mark in markers:
559 self._all.append(mark)
559 self._all.append(mark)
560 pre, sucs = mark[:2]
560 pre, sucs = mark[:2]
561 self.successors.setdefault(pre, set()).add(mark)
561 self.successors.setdefault(pre, set()).add(mark)
562 for suc in sucs:
562 for suc in sucs:
563 self.precursors.setdefault(suc, set()).add(mark)
563 self.precursors.setdefault(suc, set()).add(mark)
564 parents = mark[5]
564 parents = mark[5]
565 if parents is not None:
565 if parents is not None:
566 for p in parents:
566 for p in parents:
567 self.children.setdefault(p, set()).add(mark)
567 self.children.setdefault(p, set()).add(mark)
568 if node.nullid in self.precursors:
568 if node.nullid in self.precursors:
569 raise util.Abort(_('bad obsolescence marker detected: '
569 raise util.Abort(_('bad obsolescence marker detected: '
570 'invalid successors nullid'))
570 'invalid successors nullid'))
571 def relevantmarkers(self, nodes):
571 def relevantmarkers(self, nodes):
572 """return a set of all obsolescence markers relevant to a set of nodes.
572 """return a set of all obsolescence markers relevant to a set of nodes.
573
573
574 "relevant" to a set of nodes mean:
574 "relevant" to a set of nodes mean:
575
575
576 - marker that use this changeset as successor
576 - marker that use this changeset as successor
577 - prune marker of direct children on this changeset
577 - prune marker of direct children on this changeset
578 - recursive application of the two rules on precursors of these markers
578 - recursive application of the two rules on precursors of these markers
579
579
580 It is a set so you cannot rely on order."""
580 It is a set so you cannot rely on order."""
581
581
582 pendingnodes = set(nodes)
582 pendingnodes = set(nodes)
583 seenmarkers = set()
583 seenmarkers = set()
584 seennodes = set(pendingnodes)
584 seennodes = set(pendingnodes)
585 precursorsmarkers = self.precursors
585 precursorsmarkers = self.precursors
586 children = self.children
586 children = self.children
587 while pendingnodes:
587 while pendingnodes:
588 direct = set()
588 direct = set()
589 for current in pendingnodes:
589 for current in pendingnodes:
590 direct.update(precursorsmarkers.get(current, ()))
590 direct.update(precursorsmarkers.get(current, ()))
591 pruned = [m for m in children.get(current, ()) if not m[1]]
591 pruned = [m for m in children.get(current, ()) if not m[1]]
592 direct.update(pruned)
592 direct.update(pruned)
593 direct -= seenmarkers
593 direct -= seenmarkers
594 pendingnodes = set([m[0] for m in direct])
594 pendingnodes = set([m[0] for m in direct])
595 seenmarkers |= direct
595 seenmarkers |= direct
596 pendingnodes -= seennodes
596 pendingnodes -= seennodes
597 seennodes |= pendingnodes
597 seennodes |= pendingnodes
598 return seenmarkers
598 return seenmarkers
599
599
600 def commonversion(versions):
600 def commonversion(versions):
601 """Return the newest version listed in both versions and our local formats.
601 """Return the newest version listed in both versions and our local formats.
602
602
603 Returns None if no common version exists.
603 Returns None if no common version exists.
604 """
604 """
605 versions.sort(reverse=True)
605 versions.sort(reverse=True)
606 # search for highest version known on both side
606 # search for highest version known on both side
607 for v in versions:
607 for v in versions:
608 if v in formats:
608 if v in formats:
609 return v
609 return v
610 return None
610 return None
611
611
612 # arbitrary picked to fit into 8K limit from HTTP server
612 # arbitrary picked to fit into 8K limit from HTTP server
613 # you have to take in account:
613 # you have to take in account:
614 # - the version header
614 # - the version header
615 # - the base85 encoding
615 # - the base85 encoding
616 _maxpayload = 5300
616 _maxpayload = 5300
617
617
618 def _pushkeyescape(markers):
618 def _pushkeyescape(markers):
619 """encode markers into a dict suitable for pushkey exchange
619 """encode markers into a dict suitable for pushkey exchange
620
620
621 - binary data is base85 encoded
621 - binary data is base85 encoded
622 - split in chunks smaller than 5300 bytes"""
622 - split in chunks smaller than 5300 bytes"""
623 keys = {}
623 keys = {}
624 parts = []
624 parts = []
625 currentlen = _maxpayload * 2 # ensure we create a new part
625 currentlen = _maxpayload * 2 # ensure we create a new part
626 for marker in markers:
626 for marker in markers:
627 nextdata = _fm0encodeonemarker(marker)
627 nextdata = _fm0encodeonemarker(marker)
628 if (len(nextdata) + currentlen > _maxpayload):
628 if (len(nextdata) + currentlen > _maxpayload):
629 currentpart = []
629 currentpart = []
630 currentlen = 0
630 currentlen = 0
631 parts.append(currentpart)
631 parts.append(currentpart)
632 currentpart.append(nextdata)
632 currentpart.append(nextdata)
633 currentlen += len(nextdata)
633 currentlen += len(nextdata)
634 for idx, part in enumerate(reversed(parts)):
634 for idx, part in enumerate(reversed(parts)):
635 data = ''.join([_pack('>B', _fm0version)] + part)
635 data = ''.join([_pack('>B', _fm0version)] + part)
636 keys['dump%i' % idx] = base85.b85encode(data)
636 keys['dump%i' % idx] = base85.b85encode(data)
637 return keys
637 return keys
638
638
639 def listmarkers(repo):
639 def listmarkers(repo):
640 """List markers over pushkey"""
640 """List markers over pushkey"""
641 if not repo.obsstore:
641 if not repo.obsstore:
642 return {}
642 return {}
643 return _pushkeyescape(repo.obsstore)
643 return _pushkeyescape(repo.obsstore)
644
644
645 def pushmarker(repo, key, old, new):
645 def pushmarker(repo, key, old, new):
646 """Push markers over pushkey"""
646 """Push markers over pushkey"""
647 if not key.startswith('dump'):
647 if not key.startswith('dump'):
648 repo.ui.warn(_('unknown key: %r') % key)
648 repo.ui.warn(_('unknown key: %r') % key)
649 return 0
649 return 0
650 if old:
650 if old:
651 repo.ui.warn(_('unexpected old value for %r') % key)
651 repo.ui.warn(_('unexpected old value for %r') % key)
652 return 0
652 return 0
653 data = base85.b85decode(new)
653 data = base85.b85decode(new)
654 lock = repo.lock()
654 lock = repo.lock()
655 try:
655 try:
656 tr = repo.transaction('pushkey: obsolete markers')
656 tr = repo.transaction('pushkey: obsolete markers')
657 try:
657 try:
658 repo.obsstore.mergemarkers(tr, data)
658 repo.obsstore.mergemarkers(tr, data)
659 tr.close()
659 tr.close()
660 return 1
660 return 1
661 finally:
661 finally:
662 tr.release()
662 tr.release()
663 finally:
663 finally:
664 lock.release()
664 lock.release()
665
665
666 def getmarkers(repo, nodes=None):
666 def getmarkers(repo, nodes=None):
667 """returns markers known in a repository
667 """returns markers known in a repository
668
668
669 If <nodes> is specified, only markers "relevant" to those nodes are are
669 If <nodes> is specified, only markers "relevant" to those nodes are are
670 returned"""
670 returned"""
671 if nodes is None:
671 if nodes is None:
672 rawmarkers = repo.obsstore
672 rawmarkers = repo.obsstore
673 else:
673 else:
674 rawmarkers = repo.obsstore.relevantmarkers(nodes)
674 rawmarkers = repo.obsstore.relevantmarkers(nodes)
675
675
676 for markerdata in rawmarkers:
676 for markerdata in rawmarkers:
677 yield marker(repo, markerdata)
677 yield marker(repo, markerdata)
678
678
679 def relevantmarkers(repo, node):
679 def relevantmarkers(repo, node):
680 """all obsolete markers relevant to some revision"""
680 """all obsolete markers relevant to some revision"""
681 for markerdata in repo.obsstore.relevantmarkers(node):
681 for markerdata in repo.obsstore.relevantmarkers(node):
682 yield marker(repo, markerdata)
682 yield marker(repo, markerdata)
683
683
684
684
685 def precursormarkers(ctx):
685 def precursormarkers(ctx):
686 """obsolete marker marking this changeset as a successors"""
686 """obsolete marker marking this changeset as a successors"""
687 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
687 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
688 yield marker(ctx._repo, data)
688 yield marker(ctx._repo, data)
689
689
690 def successormarkers(ctx):
690 def successormarkers(ctx):
691 """obsolete marker making this changeset obsolete"""
691 """obsolete marker making this changeset obsolete"""
692 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
692 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
693 yield marker(ctx._repo, data)
693 yield marker(ctx._repo, data)
694
694
695 def allsuccessors(obsstore, nodes, ignoreflags=0):
695 def allsuccessors(obsstore, nodes, ignoreflags=0):
696 """Yield node for every successor of <nodes>.
696 """Yield node for every successor of <nodes>.
697
697
698 Some successors may be unknown locally.
698 Some successors may be unknown locally.
699
699
700 This is a linear yield unsuited to detecting split changesets. It includes
700 This is a linear yield unsuited to detecting split changesets. It includes
701 initial nodes too."""
701 initial nodes too."""
702 remaining = set(nodes)
702 remaining = set(nodes)
703 seen = set(remaining)
703 seen = set(remaining)
704 while remaining:
704 while remaining:
705 current = remaining.pop()
705 current = remaining.pop()
706 yield current
706 yield current
707 for mark in obsstore.successors.get(current, ()):
707 for mark in obsstore.successors.get(current, ()):
708 # ignore marker flagged with specified flag
708 # ignore marker flagged with specified flag
709 if mark[2] & ignoreflags:
709 if mark[2] & ignoreflags:
710 continue
710 continue
711 for suc in mark[1]:
711 for suc in mark[1]:
712 if suc not in seen:
712 if suc not in seen:
713 seen.add(suc)
713 seen.add(suc)
714 remaining.add(suc)
714 remaining.add(suc)
715
715
716 def allprecursors(obsstore, nodes, ignoreflags=0):
716 def allprecursors(obsstore, nodes, ignoreflags=0):
717 """Yield node for every precursors of <nodes>.
717 """Yield node for every precursors of <nodes>.
718
718
719 Some precursors may be unknown locally.
719 Some precursors may be unknown locally.
720
720
721 This is a linear yield unsuited to detecting folded changesets. It includes
721 This is a linear yield unsuited to detecting folded changesets. It includes
722 initial nodes too."""
722 initial nodes too."""
723
723
724 remaining = set(nodes)
724 remaining = set(nodes)
725 seen = set(remaining)
725 seen = set(remaining)
726 while remaining:
726 while remaining:
727 current = remaining.pop()
727 current = remaining.pop()
728 yield current
728 yield current
729 for mark in obsstore.precursors.get(current, ()):
729 for mark in obsstore.precursors.get(current, ()):
730 # ignore marker flagged with specified flag
730 # ignore marker flagged with specified flag
731 if mark[2] & ignoreflags:
731 if mark[2] & ignoreflags:
732 continue
732 continue
733 suc = mark[0]
733 suc = mark[0]
734 if suc not in seen:
734 if suc not in seen:
735 seen.add(suc)
735 seen.add(suc)
736 remaining.add(suc)
736 remaining.add(suc)
737
737
738 def foreground(repo, nodes):
738 def foreground(repo, nodes):
739 """return all nodes in the "foreground" of other node
739 """return all nodes in the "foreground" of other node
740
740
741 The foreground of a revision is anything reachable using parent -> children
741 The foreground of a revision is anything reachable using parent -> children
742 or precursor -> successor relation. It is very similar to "descendant" but
742 or precursor -> successor relation. It is very similar to "descendant" but
743 augmented with obsolescence information.
743 augmented with obsolescence information.
744
744
745 Beware that possible obsolescence cycle may result if complex situation.
745 Beware that possible obsolescence cycle may result if complex situation.
746 """
746 """
747 repo = repo.unfiltered()
747 repo = repo.unfiltered()
748 foreground = set(repo.set('%ln::', nodes))
748 foreground = set(repo.set('%ln::', nodes))
749 if repo.obsstore:
749 if repo.obsstore:
750 # We only need this complicated logic if there is obsolescence
750 # We only need this complicated logic if there is obsolescence
751 # XXX will probably deserve an optimised revset.
751 # XXX will probably deserve an optimised revset.
752 nm = repo.changelog.nodemap
752 nm = repo.changelog.nodemap
753 plen = -1
753 plen = -1
754 # compute the whole set of successors or descendants
754 # compute the whole set of successors or descendants
755 while len(foreground) != plen:
755 while len(foreground) != plen:
756 plen = len(foreground)
756 plen = len(foreground)
757 succs = set(c.node() for c in foreground)
757 succs = set(c.node() for c in foreground)
758 mutable = [c.node() for c in foreground if c.mutable()]
758 mutable = [c.node() for c in foreground if c.mutable()]
759 succs.update(allsuccessors(repo.obsstore, mutable))
759 succs.update(allsuccessors(repo.obsstore, mutable))
760 known = (n for n in succs if n in nm)
760 known = (n for n in succs if n in nm)
761 foreground = set(repo.set('%ln::', known))
761 foreground = set(repo.set('%ln::', known))
762 return set(c.node() for c in foreground)
762 return set(c.node() for c in foreground)
763
763
764
764
765 def successorssets(repo, initialnode, cache=None):
765 def successorssets(repo, initialnode, cache=None):
766 """Return all set of successors of initial nodes
766 """Return all set of successors of initial nodes
767
767
768 The successors set of a changeset A are a group of revisions that succeed
768 The successors set of a changeset A are a group of revisions that succeed
769 A. It succeeds A as a consistent whole, each revision being only a partial
769 A. It succeeds A as a consistent whole, each revision being only a partial
770 replacement. The successors set contains non-obsolete changesets only.
770 replacement. The successors set contains non-obsolete changesets only.
771
771
772 This function returns the full list of successor sets which is why it
772 This function returns the full list of successor sets which is why it
773 returns a list of tuples and not just a single tuple. Each tuple is a valid
773 returns a list of tuples and not just a single tuple. Each tuple is a valid
774 successors set. Not that (A,) may be a valid successors set for changeset A
774 successors set. Not that (A,) may be a valid successors set for changeset A
775 (see below).
775 (see below).
776
776
777 In most cases, a changeset A will have a single element (e.g. the changeset
777 In most cases, a changeset A will have a single element (e.g. the changeset
778 A is replaced by A') in its successors set. Though, it is also common for a
778 A is replaced by A') in its successors set. Though, it is also common for a
779 changeset A to have no elements in its successor set (e.g. the changeset
779 changeset A to have no elements in its successor set (e.g. the changeset
780 has been pruned). Therefore, the returned list of successors sets will be
780 has been pruned). Therefore, the returned list of successors sets will be
781 [(A',)] or [], respectively.
781 [(A',)] or [], respectively.
782
782
783 When a changeset A is split into A' and B', however, it will result in a
783 When a changeset A is split into A' and B', however, it will result in a
784 successors set containing more than a single element, i.e. [(A',B')].
784 successors set containing more than a single element, i.e. [(A',B')].
785 Divergent changesets will result in multiple successors sets, i.e. [(A',),
785 Divergent changesets will result in multiple successors sets, i.e. [(A',),
786 (A'')].
786 (A'')].
787
787
788 If a changeset A is not obsolete, then it will conceptually have no
788 If a changeset A is not obsolete, then it will conceptually have no
789 successors set. To distinguish this from a pruned changeset, the successor
789 successors set. To distinguish this from a pruned changeset, the successor
790 set will only contain itself, i.e. [(A,)].
790 set will only contain itself, i.e. [(A,)].
791
791
792 Finally, successors unknown locally are considered to be pruned (obsoleted
792 Finally, successors unknown locally are considered to be pruned (obsoleted
793 without any successors).
793 without any successors).
794
794
795 The optional `cache` parameter is a dictionary that may contain precomputed
795 The optional `cache` parameter is a dictionary that may contain precomputed
796 successors sets. It is meant to reuse the computation of a previous call to
796 successors sets. It is meant to reuse the computation of a previous call to
797 `successorssets` when multiple calls are made at the same time. The cache
797 `successorssets` when multiple calls are made at the same time. The cache
798 dictionary is updated in place. The caller is responsible for its live
798 dictionary is updated in place. The caller is responsible for its live
799 spawn. Code that makes multiple calls to `successorssets` *must* use this
799 spawn. Code that makes multiple calls to `successorssets` *must* use this
800 cache mechanism or suffer terrible performances.
800 cache mechanism or suffer terrible performances.
801
801
802 """
802 """
803
803
804 succmarkers = repo.obsstore.successors
804 succmarkers = repo.obsstore.successors
805
805
806 # Stack of nodes we search successors sets for
806 # Stack of nodes we search successors sets for
807 toproceed = [initialnode]
807 toproceed = [initialnode]
808 # set version of above list for fast loop detection
808 # set version of above list for fast loop detection
809 # element added to "toproceed" must be added here
809 # element added to "toproceed" must be added here
810 stackedset = set(toproceed)
810 stackedset = set(toproceed)
811 if cache is None:
811 if cache is None:
812 cache = {}
812 cache = {}
813
813
814 # This while loop is the flattened version of a recursive search for
814 # This while loop is the flattened version of a recursive search for
815 # successors sets
815 # successors sets
816 #
816 #
817 # def successorssets(x):
817 # def successorssets(x):
818 # successors = directsuccessors(x)
818 # successors = directsuccessors(x)
819 # ss = [[]]
819 # ss = [[]]
820 # for succ in directsuccessors(x):
820 # for succ in directsuccessors(x):
821 # # product as in itertools cartesian product
821 # # product as in itertools cartesian product
822 # ss = product(ss, successorssets(succ))
822 # ss = product(ss, successorssets(succ))
823 # return ss
823 # return ss
824 #
824 #
825 # But we can not use plain recursive calls here:
825 # But we can not use plain recursive calls here:
826 # - that would blow the python call stack
826 # - that would blow the python call stack
827 # - obsolescence markers may have cycles, we need to handle them.
827 # - obsolescence markers may have cycles, we need to handle them.
828 #
828 #
829 # The `toproceed` list act as our call stack. Every node we search
829 # The `toproceed` list act as our call stack. Every node we search
830 # successors set for are stacked there.
830 # successors set for are stacked there.
831 #
831 #
832 # The `stackedset` is set version of this stack used to check if a node is
832 # The `stackedset` is set version of this stack used to check if a node is
833 # already stacked. This check is used to detect cycles and prevent infinite
833 # already stacked. This check is used to detect cycles and prevent infinite
834 # loop.
834 # loop.
835 #
835 #
836 # successors set of all nodes are stored in the `cache` dictionary.
836 # successors set of all nodes are stored in the `cache` dictionary.
837 #
837 #
838 # After this while loop ends we use the cache to return the successors sets
838 # After this while loop ends we use the cache to return the successors sets
839 # for the node requested by the caller.
839 # for the node requested by the caller.
840 while toproceed:
840 while toproceed:
841 # Every iteration tries to compute the successors sets of the topmost
841 # Every iteration tries to compute the successors sets of the topmost
842 # node of the stack: CURRENT.
842 # node of the stack: CURRENT.
843 #
843 #
844 # There are four possible outcomes:
844 # There are four possible outcomes:
845 #
845 #
846 # 1) We already know the successors sets of CURRENT:
846 # 1) We already know the successors sets of CURRENT:
847 # -> mission accomplished, pop it from the stack.
847 # -> mission accomplished, pop it from the stack.
848 # 2) Node is not obsolete:
848 # 2) Node is not obsolete:
849 # -> the node is its own successors sets. Add it to the cache.
849 # -> the node is its own successors sets. Add it to the cache.
850 # 3) We do not know successors set of direct successors of CURRENT:
850 # 3) We do not know successors set of direct successors of CURRENT:
851 # -> We add those successors to the stack.
851 # -> We add those successors to the stack.
852 # 4) We know successors sets of all direct successors of CURRENT:
852 # 4) We know successors sets of all direct successors of CURRENT:
853 # -> We can compute CURRENT successors set and add it to the
853 # -> We can compute CURRENT successors set and add it to the
854 # cache.
854 # cache.
855 #
855 #
856 current = toproceed[-1]
856 current = toproceed[-1]
857 if current in cache:
857 if current in cache:
858 # case (1): We already know the successors sets
858 # case (1): We already know the successors sets
859 stackedset.remove(toproceed.pop())
859 stackedset.remove(toproceed.pop())
860 elif current not in succmarkers:
860 elif current not in succmarkers:
861 # case (2): The node is not obsolete.
861 # case (2): The node is not obsolete.
862 if current in repo:
862 if current in repo:
863 # We have a valid last successors.
863 # We have a valid last successors.
864 cache[current] = [(current,)]
864 cache[current] = [(current,)]
865 else:
865 else:
866 # Final obsolete version is unknown locally.
866 # Final obsolete version is unknown locally.
867 # Do not count that as a valid successors
867 # Do not count that as a valid successors
868 cache[current] = []
868 cache[current] = []
869 else:
869 else:
870 # cases (3) and (4)
870 # cases (3) and (4)
871 #
871 #
872 # We proceed in two phases. Phase 1 aims to distinguish case (3)
872 # We proceed in two phases. Phase 1 aims to distinguish case (3)
873 # from case (4):
873 # from case (4):
874 #
874 #
875 # For each direct successors of CURRENT, we check whether its
875 # For each direct successors of CURRENT, we check whether its
876 # successors sets are known. If they are not, we stack the
876 # successors sets are known. If they are not, we stack the
877 # unknown node and proceed to the next iteration of the while
877 # unknown node and proceed to the next iteration of the while
878 # loop. (case 3)
878 # loop. (case 3)
879 #
879 #
880 # During this step, we may detect obsolescence cycles: a node
880 # During this step, we may detect obsolescence cycles: a node
881 # with unknown successors sets but already in the call stack.
881 # with unknown successors sets but already in the call stack.
882 # In such a situation, we arbitrary set the successors sets of
882 # In such a situation, we arbitrary set the successors sets of
883 # the node to nothing (node pruned) to break the cycle.
883 # the node to nothing (node pruned) to break the cycle.
884 #
884 #
885 # If no break was encountered we proceed to phase 2.
885 # If no break was encountered we proceed to phase 2.
886 #
886 #
887 # Phase 2 computes successors sets of CURRENT (case 4); see details
887 # Phase 2 computes successors sets of CURRENT (case 4); see details
888 # in phase 2 itself.
888 # in phase 2 itself.
889 #
889 #
890 # Note the two levels of iteration in each phase.
890 # Note the two levels of iteration in each phase.
891 # - The first one handles obsolescence markers using CURRENT as
891 # - The first one handles obsolescence markers using CURRENT as
892 # precursor (successors markers of CURRENT).
892 # precursor (successors markers of CURRENT).
893 #
893 #
894 # Having multiple entry here means divergence.
894 # Having multiple entry here means divergence.
895 #
895 #
896 # - The second one handles successors defined in each marker.
896 # - The second one handles successors defined in each marker.
897 #
897 #
898 # Having none means pruned node, multiple successors means split,
898 # Having none means pruned node, multiple successors means split,
899 # single successors are standard replacement.
899 # single successors are standard replacement.
900 #
900 #
901 for mark in sorted(succmarkers[current]):
901 for mark in sorted(succmarkers[current]):
902 for suc in mark[1]:
902 for suc in mark[1]:
903 if suc not in cache:
903 if suc not in cache:
904 if suc in stackedset:
904 if suc in stackedset:
905 # cycle breaking
905 # cycle breaking
906 cache[suc] = []
906 cache[suc] = []
907 else:
907 else:
908 # case (3) If we have not computed successors sets
908 # case (3) If we have not computed successors sets
909 # of one of those successors we add it to the
909 # of one of those successors we add it to the
910 # `toproceed` stack and stop all work for this
910 # `toproceed` stack and stop all work for this
911 # iteration.
911 # iteration.
912 toproceed.append(suc)
912 toproceed.append(suc)
913 stackedset.add(suc)
913 stackedset.add(suc)
914 break
914 break
915 else:
915 else:
916 continue
916 continue
917 break
917 break
918 else:
918 else:
919 # case (4): we know all successors sets of all direct
919 # case (4): we know all successors sets of all direct
920 # successors
920 # successors
921 #
921 #
922 # Successors set contributed by each marker depends on the
922 # Successors set contributed by each marker depends on the
923 # successors sets of all its "successors" node.
923 # successors sets of all its "successors" node.
924 #
924 #
925 # Each different marker is a divergence in the obsolescence
925 # Each different marker is a divergence in the obsolescence
926 # history. It contributes successors sets distinct from other
926 # history. It contributes successors sets distinct from other
927 # markers.
927 # markers.
928 #
928 #
929 # Within a marker, a successor may have divergent successors
929 # Within a marker, a successor may have divergent successors
930 # sets. In such a case, the marker will contribute multiple
930 # sets. In such a case, the marker will contribute multiple
931 # divergent successors sets. If multiple successors have
931 # divergent successors sets. If multiple successors have
932 # divergent successors sets, a Cartesian product is used.
932 # divergent successors sets, a Cartesian product is used.
933 #
933 #
934 # At the end we post-process successors sets to remove
934 # At the end we post-process successors sets to remove
935 # duplicated entry and successors set that are strict subset of
935 # duplicated entry and successors set that are strict subset of
936 # another one.
936 # another one.
937 succssets = []
937 succssets = []
938 for mark in sorted(succmarkers[current]):
938 for mark in sorted(succmarkers[current]):
939 # successors sets contributed by this marker
939 # successors sets contributed by this marker
940 markss = [[]]
940 markss = [[]]
941 for suc in mark[1]:
941 for suc in mark[1]:
942 # cardinal product with previous successors
942 # cardinal product with previous successors
943 productresult = []
943 productresult = []
944 for prefix in markss:
944 for prefix in markss:
945 for suffix in cache[suc]:
945 for suffix in cache[suc]:
946 newss = list(prefix)
946 newss = list(prefix)
947 for part in suffix:
947 for part in suffix:
948 # do not duplicated entry in successors set
948 # do not duplicated entry in successors set
949 # first entry wins.
949 # first entry wins.
950 if part not in newss:
950 if part not in newss:
951 newss.append(part)
951 newss.append(part)
952 productresult.append(newss)
952 productresult.append(newss)
953 markss = productresult
953 markss = productresult
954 succssets.extend(markss)
954 succssets.extend(markss)
955 # remove duplicated and subset
955 # remove duplicated and subset
956 seen = []
956 seen = []
957 final = []
957 final = []
958 candidate = sorted(((set(s), s) for s in succssets if s),
958 candidate = sorted(((set(s), s) for s in succssets if s),
959 key=lambda x: len(x[1]), reverse=True)
959 key=lambda x: len(x[1]), reverse=True)
960 for setversion, listversion in candidate:
960 for setversion, listversion in candidate:
961 for seenset in seen:
961 for seenset in seen:
962 if setversion.issubset(seenset):
962 if setversion.issubset(seenset):
963 break
963 break
964 else:
964 else:
965 final.append(listversion)
965 final.append(listversion)
966 seen.append(setversion)
966 seen.append(setversion)
967 final.reverse() # put small successors set first
967 final.reverse() # put small successors set first
968 cache[current] = final
968 cache[current] = final
969 return cache[initialnode]
969 return cache[initialnode]
970
970
971 def _knownrevs(repo, nodes):
971 def _knownrevs(repo, nodes):
972 """yield revision numbers of known nodes passed in parameters
972 """yield revision numbers of known nodes passed in parameters
973
973
974 Unknown revisions are silently ignored."""
974 Unknown revisions are silently ignored."""
975 torev = repo.changelog.nodemap.get
975 torev = repo.changelog.nodemap.get
976 for n in nodes:
976 for n in nodes:
977 rev = torev(n)
977 rev = torev(n)
978 if rev is not None:
978 if rev is not None:
979 yield rev
979 yield rev
980
980
981 # mapping of 'set-name' -> <function to compute this set>
981 # mapping of 'set-name' -> <function to compute this set>
982 cachefuncs = {}
982 cachefuncs = {}
983 def cachefor(name):
983 def cachefor(name):
984 """Decorator to register a function as computing the cache for a set"""
984 """Decorator to register a function as computing the cache for a set"""
985 def decorator(func):
985 def decorator(func):
986 assert name not in cachefuncs
986 assert name not in cachefuncs
987 cachefuncs[name] = func
987 cachefuncs[name] = func
988 return func
988 return func
989 return decorator
989 return decorator
990
990
991 def getrevs(repo, name):
991 def getrevs(repo, name):
992 """Return the set of revision that belong to the <name> set
992 """Return the set of revision that belong to the <name> set
993
993
994 Such access may compute the set and cache it for future use"""
994 Such access may compute the set and cache it for future use"""
995 repo = repo.unfiltered()
995 repo = repo.unfiltered()
996 if not repo.obsstore:
996 if not repo.obsstore:
997 return frozenset()
997 return frozenset()
998 if name not in repo.obsstore.caches:
998 if name not in repo.obsstore.caches:
999 repo.obsstore.caches[name] = cachefuncs[name](repo)
999 repo.obsstore.caches[name] = cachefuncs[name](repo)
1000 return repo.obsstore.caches[name]
1000 return repo.obsstore.caches[name]
1001
1001
1002 # To be simple we need to invalidate obsolescence cache when:
1002 # To be simple we need to invalidate obsolescence cache when:
1003 #
1003 #
1004 # - new changeset is added:
1004 # - new changeset is added:
1005 # - public phase is changed
1005 # - public phase is changed
1006 # - obsolescence marker are added
1006 # - obsolescence marker are added
1007 # - strip is used a repo
1007 # - strip is used a repo
1008 def clearobscaches(repo):
1008 def clearobscaches(repo):
1009 """Remove all obsolescence related cache from a repo
1009 """Remove all obsolescence related cache from a repo
1010
1010
1011 This remove all cache in obsstore is the obsstore already exist on the
1011 This remove all cache in obsstore is the obsstore already exist on the
1012 repo.
1012 repo.
1013
1013
1014 (We could be smarter here given the exact event that trigger the cache
1014 (We could be smarter here given the exact event that trigger the cache
1015 clearing)"""
1015 clearing)"""
1016 # only clear cache is there is obsstore data in this repo
1016 # only clear cache is there is obsstore data in this repo
1017 if 'obsstore' in repo._filecache:
1017 if 'obsstore' in repo._filecache:
1018 repo.obsstore.caches.clear()
1018 repo.obsstore.caches.clear()
1019
1019
1020 @cachefor('obsolete')
1020 @cachefor('obsolete')
1021 def _computeobsoleteset(repo):
1021 def _computeobsoleteset(repo):
1022 """the set of obsolete revisions"""
1022 """the set of obsolete revisions"""
1023 obs = set()
1023 obs = set()
1024 getrev = repo.changelog.nodemap.get
1024 getrev = repo.changelog.nodemap.get
1025 getphase = repo._phasecache.phase
1025 getphase = repo._phasecache.phase
1026 for n in repo.obsstore.successors:
1026 for n in repo.obsstore.successors:
1027 rev = getrev(n)
1027 rev = getrev(n)
1028 if rev is not None and getphase(repo, rev):
1028 if rev is not None and getphase(repo, rev):
1029 obs.add(rev)
1029 obs.add(rev)
1030 return obs
1030 return obs
1031
1031
1032 @cachefor('unstable')
1032 @cachefor('unstable')
1033 def _computeunstableset(repo):
1033 def _computeunstableset(repo):
1034 """the set of non obsolete revisions with obsolete parents"""
1034 """the set of non obsolete revisions with obsolete parents"""
1035 # revset is not efficient enough here
1035 # revset is not efficient enough here
1036 # we do (obsolete()::) - obsolete() by hand
1036 # we do (obsolete()::) - obsolete() by hand
1037 obs = getrevs(repo, 'obsolete')
1037 obs = getrevs(repo, 'obsolete')
1038 if not obs:
1038 if not obs:
1039 return set()
1039 return set()
1040 cl = repo.changelog
1040 cl = repo.changelog
1041 return set(r for r in cl.descendants(obs) if r not in obs)
1041 return set(r for r in cl.descendants(obs) if r not in obs)
1042
1042
1043 @cachefor('suspended')
1043 @cachefor('suspended')
1044 def _computesuspendedset(repo):
1044 def _computesuspendedset(repo):
1045 """the set of obsolete parents with non obsolete descendants"""
1045 """the set of obsolete parents with non obsolete descendants"""
1046 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1046 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1047 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1047 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1048
1048
1049 @cachefor('extinct')
1049 @cachefor('extinct')
1050 def _computeextinctset(repo):
1050 def _computeextinctset(repo):
1051 """the set of obsolete parents without non obsolete descendants"""
1051 """the set of obsolete parents without non obsolete descendants"""
1052 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1052 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1053
1053
1054
1054
1055 @cachefor('bumped')
1055 @cachefor('bumped')
1056 def _computebumpedset(repo):
1056 def _computebumpedset(repo):
1057 """the set of revs trying to obsolete public revisions"""
1057 """the set of revs trying to obsolete public revisions"""
1058 bumped = set()
1058 bumped = set()
1059 # util function (avoid attribute lookup in the loop)
1059 # util function (avoid attribute lookup in the loop)
1060 phase = repo._phasecache.phase # would be faster to grab the full list
1060 phase = repo._phasecache.phase # would be faster to grab the full list
1061 public = phases.public
1061 public = phases.public
1062 cl = repo.changelog
1062 cl = repo.changelog
1063 torev = cl.nodemap.get
1063 torev = cl.nodemap.get
1064 obs = getrevs(repo, 'obsolete')
1064 obs = getrevs(repo, 'obsolete')
1065 for rev in repo:
1065 for rev in repo:
1066 # We only evaluate mutable, non-obsolete revision
1066 # We only evaluate mutable, non-obsolete revision
1067 if (public < phase(repo, rev)) and (rev not in obs):
1067 if (public < phase(repo, rev)) and (rev not in obs):
1068 node = cl.node(rev)
1068 node = cl.node(rev)
1069 # (future) A cache of precursors may worth if split is very common
1069 # (future) A cache of precursors may worth if split is very common
1070 for pnode in allprecursors(repo.obsstore, [node],
1070 for pnode in allprecursors(repo.obsstore, [node],
1071 ignoreflags=bumpedfix):
1071 ignoreflags=bumpedfix):
1072 prev = torev(pnode) # unfiltered! but so is phasecache
1072 prev = torev(pnode) # unfiltered! but so is phasecache
1073 if (prev is not None) and (phase(repo, prev) <= public):
1073 if (prev is not None) and (phase(repo, prev) <= public):
1074 # we have a public precursors
1074 # we have a public precursors
1075 bumped.add(rev)
1075 bumped.add(rev)
1076 break # Next draft!
1076 break # Next draft!
1077 return bumped
1077 return bumped
1078
1078
1079 @cachefor('divergent')
1079 @cachefor('divergent')
1080 def _computedivergentset(repo):
1080 def _computedivergentset(repo):
1081 """the set of rev that compete to be the final successors of some revision.
1081 """the set of rev that compete to be the final successors of some revision.
1082 """
1082 """
1083 divergent = set()
1083 divergent = set()
1084 obsstore = repo.obsstore
1084 obsstore = repo.obsstore
1085 newermap = {}
1085 newermap = {}
1086 for ctx in repo.set('(not public()) - obsolete()'):
1086 for ctx in repo.set('(not public()) - obsolete()'):
1087 mark = obsstore.precursors.get(ctx.node(), ())
1087 mark = obsstore.precursors.get(ctx.node(), ())
1088 toprocess = set(mark)
1088 toprocess = set(mark)
1089 while toprocess:
1089 while toprocess:
1090 prec = toprocess.pop()[0]
1090 prec = toprocess.pop()[0]
1091 if prec not in newermap:
1091 if prec not in newermap:
1092 successorssets(repo, prec, newermap)
1092 successorssets(repo, prec, newermap)
1093 newer = [n for n in newermap[prec] if n]
1093 newer = [n for n in newermap[prec] if n]
1094 if len(newer) > 1:
1094 if len(newer) > 1:
1095 divergent.add(ctx.rev())
1095 divergent.add(ctx.rev())
1096 break
1096 break
1097 toprocess.update(obsstore.precursors.get(prec, ()))
1097 toprocess.update(obsstore.precursors.get(prec, ()))
1098 return divergent
1098 return divergent
1099
1099
1100
1100
1101 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1101 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1102 """Add obsolete markers between changesets in a repo
1102 """Add obsolete markers between changesets in a repo
1103
1103
1104 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1104 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1105 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1105 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1106 containing metadata for this marker only. It is merged with the global
1106 containing metadata for this marker only. It is merged with the global
1107 metadata specified through the `metadata` argument of this function,
1107 metadata specified through the `metadata` argument of this function,
1108
1108
1109 Trying to obsolete a public changeset will raise an exception.
1109 Trying to obsolete a public changeset will raise an exception.
1110
1110
1111 Current user and date are used except if specified otherwise in the
1111 Current user and date are used except if specified otherwise in the
1112 metadata attribute.
1112 metadata attribute.
1113
1113
1114 This function operates within a transaction of its own, but does
1114 This function operates within a transaction of its own, but does
1115 not take any lock on the repo.
1115 not take any lock on the repo.
1116 """
1116 """
1117 # prepare metadata
1117 # prepare metadata
1118 if metadata is None:
1118 if metadata is None:
1119 metadata = {}
1119 metadata = {}
1120 if 'user' not in metadata:
1120 if 'user' not in metadata:
1121 metadata['user'] = repo.ui.username()
1121 metadata['user'] = repo.ui.username()
1122 tr = repo.transaction('add-obsolescence-marker')
1122 tr = repo.transaction('add-obsolescence-marker')
1123 try:
1123 try:
1124 for rel in relations:
1124 for rel in relations:
1125 prec = rel[0]
1125 prec = rel[0]
1126 sucs = rel[1]
1126 sucs = rel[1]
1127 localmetadata = metadata.copy()
1127 localmetadata = metadata.copy()
1128 if 2 < len(rel):
1128 if 2 < len(rel):
1129 localmetadata.update(rel[2])
1129 localmetadata.update(rel[2])
1130
1130
1131 if not prec.mutable():
1131 if not prec.mutable():
1132 raise util.Abort("cannot obsolete immutable changeset: %s"
1132 raise util.Abort("cannot obsolete immutable changeset: %s"
1133 % prec)
1133 % prec)
1134 nprec = prec.node()
1134 nprec = prec.node()
1135 nsucs = tuple(s.node() for s in sucs)
1135 nsucs = tuple(s.node() for s in sucs)
1136 npare = None
1136 npare = None
1137 if not nsucs:
1137 if not nsucs:
1138 npare = tuple(p.node() for p in prec.parents())
1138 npare = tuple(p.node() for p in prec.parents())
1139 if nprec in nsucs:
1139 if nprec in nsucs:
1140 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1140 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1141 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1141 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1142 date=date, metadata=localmetadata)
1142 date=date, metadata=localmetadata)
1143 repo.filteredrevcache.clear()
1143 repo.filteredrevcache.clear()
1144 tr.close()
1144 tr.close()
1145 finally:
1145 finally:
1146 tr.release()
1146 tr.release()
1147
1147
1148 def isenabled(repo, option):
1148 def isenabled(repo, option):
1149 """Returns True if the given repository has the given obsolete option
1149 """Returns True if the given repository has the given obsolete option
1150 enabled.
1150 enabled.
1151 """
1151 """
1152 result = set(repo.ui.configlist('experimental', 'evolution'))
1152 result = set(repo.ui.configlist('experimental', 'evolution'))
1153 if 'all' in result:
1153 if 'all' in result:
1154 return True
1154 return True
1155
1155
1156 # For migration purposes, temporarily return true if the config hasn't been
1156 # For migration purposes, temporarily return true if the config hasn't been
1157 # set but _enabled is true.
1157 # set but _enabled is true.
1158 if len(result) == 0 and _enabled:
1158 if len(result) == 0 and _enabled:
1159 return True
1159 return True
1160
1160
1161 # createmarkers must be enabled if other options are enabled
1161 # createmarkers must be enabled if other options are enabled
1162 if ((allowunstableopt in result or exchangeopt in result) and
1162 if ((allowunstableopt in result or exchangeopt in result) and
1163 not createmarkersopt in result):
1163 not createmarkersopt in result):
1164 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1164 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1165 "if other obsolete options are enabled"))
1165 "if other obsolete options are enabled"))
1166
1166
1167 return option in result
1167 return option in result
General Comments 0
You need to be logged in to leave comments. Login now