##// END OF EJS Templates
obsstore: record data as floating point in fm0 format...
Pierre-Yves David -
r23002:2920a96f default
parent child Browse files
Show More
@@ -1,1174 +1,1175 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 import struct
70 import struct
71 import util, base85, node
71 import util, base85, node
72 import phases
72 import phases
73 from i18n import _
73 from i18n import _
74
74
75 _pack = struct.pack
75 _pack = struct.pack
76 _unpack = struct.unpack
76 _unpack = struct.unpack
77
77
78 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
78 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
79
79
80 # the obsolete feature is not mature enough to be enabled by default.
80 # the obsolete feature is not mature enough to be enabled by default.
81 # you have to rely on third party extension extension to enable this.
81 # you have to rely on third party extension extension to enable this.
82 _enabled = False
82 _enabled = False
83
83
84 # Options for obsolescence
84 # Options for obsolescence
85 createmarkersopt = 'createmarkers'
85 createmarkersopt = 'createmarkers'
86 allowunstableopt = 'allowunstable'
86 allowunstableopt = 'allowunstable'
87 exchangeopt = 'exchange'
87 exchangeopt = 'exchange'
88
88
89 ### obsolescence marker flag
89 ### obsolescence marker flag
90
90
91 ## bumpedfix flag
91 ## bumpedfix flag
92 #
92 #
93 # When a changeset A' succeed to a changeset A which became public, we call A'
93 # When a changeset A' succeed to a changeset A which became public, we call A'
94 # "bumped" because it's a successors of a public changesets
94 # "bumped" because it's a successors of a public changesets
95 #
95 #
96 # o A' (bumped)
96 # o A' (bumped)
97 # |`:
97 # |`:
98 # | o A
98 # | o A
99 # |/
99 # |/
100 # o Z
100 # o Z
101 #
101 #
102 # The way to solve this situation is to create a new changeset Ad as children
102 # The way to solve this situation is to create a new changeset Ad as children
103 # of A. This changeset have the same content than A'. So the diff from A to A'
103 # of A. This changeset have the same content than A'. So the diff from A to A'
104 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
104 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 #
105 #
106 # o Ad
106 # o Ad
107 # |`:
107 # |`:
108 # | x A'
108 # | x A'
109 # |'|
109 # |'|
110 # o | A
110 # o | A
111 # |/
111 # |/
112 # o Z
112 # o Z
113 #
113 #
114 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
114 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
115 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 # This flag mean that the successors express the changes between the public and
116 # This flag mean that the successors express the changes between the public and
117 # bumped version and fix the situation, breaking the transitivity of
117 # bumped version and fix the situation, breaking the transitivity of
118 # "bumped" here.
118 # "bumped" here.
119 bumpedfix = 1
119 bumpedfix = 1
120 usingsha256 = 2
120 usingsha256 = 2
121
121
122 ## Parsing and writing of version "0"
122 ## Parsing and writing of version "0"
123 #
123 #
124 # The header is followed by the markers. Each marker is made of:
124 # The header is followed by the markers. Each marker is made of:
125 #
125 #
126 # - 1 uint8 : number of new changesets "N", can be zero.
126 # - 1 uint8 : number of new changesets "N", can be zero.
127 #
127 #
128 # - 1 uint32: metadata size "M" in bytes.
128 # - 1 uint32: metadata size "M" in bytes.
129 #
129 #
130 # - 1 byte: a bit field. It is reserved for flags used in common
130 # - 1 byte: a bit field. It is reserved for flags used in common
131 # obsolete marker operations, to avoid repeated decoding of metadata
131 # obsolete marker operations, to avoid repeated decoding of metadata
132 # entries.
132 # entries.
133 #
133 #
134 # - 20 bytes: obsoleted changeset identifier.
134 # - 20 bytes: obsoleted changeset identifier.
135 #
135 #
136 # - N*20 bytes: new changesets identifiers.
136 # - N*20 bytes: new changesets identifiers.
137 #
137 #
138 # - M bytes: metadata as a sequence of nul-terminated strings. Each
138 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 # string contains a key and a value, separated by a colon ':', without
139 # string contains a key and a value, separated by a colon ':', without
140 # additional encoding. Keys cannot contain '\0' or ':' and values
140 # additional encoding. Keys cannot contain '\0' or ':' and values
141 # cannot contain '\0'.
141 # cannot contain '\0'.
142 _fm0version = 0
142 _fm0version = 0
143 _fm0fixed = '>BIB20s'
143 _fm0fixed = '>BIB20s'
144 _fm0node = '20s'
144 _fm0node = '20s'
145 _fm0fsize = struct.calcsize(_fm0fixed)
145 _fm0fsize = struct.calcsize(_fm0fixed)
146 _fm0fnodesize = struct.calcsize(_fm0node)
146 _fm0fnodesize = struct.calcsize(_fm0node)
147
147
148 def _fm0readmarkers(data, off=0):
148 def _fm0readmarkers(data, off=0):
149 # Loop on markers
149 # Loop on markers
150 l = len(data)
150 l = len(data)
151 while off + _fm0fsize <= l:
151 while off + _fm0fsize <= l:
152 # read fixed part
152 # read fixed part
153 cur = data[off:off + _fm0fsize]
153 cur = data[off:off + _fm0fsize]
154 off += _fm0fsize
154 off += _fm0fsize
155 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
155 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 # read replacement
156 # read replacement
157 sucs = ()
157 sucs = ()
158 if numsuc:
158 if numsuc:
159 s = (_fm0fnodesize * numsuc)
159 s = (_fm0fnodesize * numsuc)
160 cur = data[off:off + s]
160 cur = data[off:off + s]
161 sucs = _unpack(_fm0node * numsuc, cur)
161 sucs = _unpack(_fm0node * numsuc, cur)
162 off += s
162 off += s
163 # read metadata
163 # read metadata
164 # (metadata will be decoded on demand)
164 # (metadata will be decoded on demand)
165 metadata = data[off:off + mdsize]
165 metadata = data[off:off + mdsize]
166 if len(metadata) != mdsize:
166 if len(metadata) != mdsize:
167 raise util.Abort(_('parsing obsolete marker: metadata is too '
167 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 'short, %d bytes expected, got %d')
168 'short, %d bytes expected, got %d')
169 % (mdsize, len(metadata)))
169 % (mdsize, len(metadata)))
170 off += mdsize
170 off += mdsize
171 metadata = _fm0decodemeta(metadata)
171 metadata = _fm0decodemeta(metadata)
172 try:
172 try:
173 when, offset = metadata.pop('date', '0 0').split(' ')
173 when, offset = metadata.pop('date', '0 0').split(' ')
174 date = float(when), int(offset)
174 date = float(when), int(offset)
175 except ValueError:
175 except ValueError:
176 date = (0., 0)
176 date = (0., 0)
177 parents = None
177 parents = None
178 if 'p2' in metadata:
178 if 'p2' in metadata:
179 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
179 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 elif 'p1' in metadata:
180 elif 'p1' in metadata:
181 parents = (metadata.pop('p1', None),)
181 parents = (metadata.pop('p1', None),)
182 elif 'p0' in metadata:
182 elif 'p0' in metadata:
183 parents = ()
183 parents = ()
184 if parents is not None:
184 if parents is not None:
185 try:
185 try:
186 parents = tuple(node.bin(p) for p in parents)
186 parents = tuple(node.bin(p) for p in parents)
187 # if parent content is not a nodeid, drop the data
187 # if parent content is not a nodeid, drop the data
188 for p in parents:
188 for p in parents:
189 if len(p) != 20:
189 if len(p) != 20:
190 parents = None
190 parents = None
191 break
191 break
192 except TypeError:
192 except TypeError:
193 # if content cannot be translated to nodeid drop the data.
193 # if content cannot be translated to nodeid drop the data.
194 parents = None
194 parents = None
195
195
196 metadata = tuple(sorted(metadata.iteritems()))
196 metadata = tuple(sorted(metadata.iteritems()))
197
197
198 yield (pre, sucs, flags, metadata, date, parents)
198 yield (pre, sucs, flags, metadata, date, parents)
199
199
200 def _fm0encodeonemarker(marker):
200 def _fm0encodeonemarker(marker):
201 pre, sucs, flags, metadata, date, parents = marker
201 pre, sucs, flags, metadata, date, parents = marker
202 if flags & usingsha256:
202 if flags & usingsha256:
203 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
203 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
204 metadata = dict(metadata)
204 metadata = dict(metadata)
205 metadata['date'] = '%d %i' % date
205 time, tz = date
206 metadata['date'] = '%r %i' % (time, tz)
206 if parents is not None:
207 if parents is not None:
207 if not parents:
208 if not parents:
208 # mark that we explicitly recorded no parents
209 # mark that we explicitly recorded no parents
209 metadata['p0'] = ''
210 metadata['p0'] = ''
210 for i, p in enumerate(parents):
211 for i, p in enumerate(parents):
211 metadata['p%i' % (i + 1)] = node.hex(p)
212 metadata['p%i' % (i + 1)] = node.hex(p)
212 metadata = _fm0encodemeta(metadata)
213 metadata = _fm0encodemeta(metadata)
213 numsuc = len(sucs)
214 numsuc = len(sucs)
214 format = _fm0fixed + (_fm0node * numsuc)
215 format = _fm0fixed + (_fm0node * numsuc)
215 data = [numsuc, len(metadata), flags, pre]
216 data = [numsuc, len(metadata), flags, pre]
216 data.extend(sucs)
217 data.extend(sucs)
217 return _pack(format, *data) + metadata
218 return _pack(format, *data) + metadata
218
219
219 def _fm0encodemeta(meta):
220 def _fm0encodemeta(meta):
220 """Return encoded metadata string to string mapping.
221 """Return encoded metadata string to string mapping.
221
222
222 Assume no ':' in key and no '\0' in both key and value."""
223 Assume no ':' in key and no '\0' in both key and value."""
223 for key, value in meta.iteritems():
224 for key, value in meta.iteritems():
224 if ':' in key or '\0' in key:
225 if ':' in key or '\0' in key:
225 raise ValueError("':' and '\0' are forbidden in metadata key'")
226 raise ValueError("':' and '\0' are forbidden in metadata key'")
226 if '\0' in value:
227 if '\0' in value:
227 raise ValueError("':' is forbidden in metadata value'")
228 raise ValueError("':' is forbidden in metadata value'")
228 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
229 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
229
230
230 def _fm0decodemeta(data):
231 def _fm0decodemeta(data):
231 """Return string to string dictionary from encoded version."""
232 """Return string to string dictionary from encoded version."""
232 d = {}
233 d = {}
233 for l in data.split('\0'):
234 for l in data.split('\0'):
234 if l:
235 if l:
235 key, value = l.split(':')
236 key, value = l.split(':')
236 d[key] = value
237 d[key] = value
237 return d
238 return d
238
239
239 ## Parsing and writing of version "1"
240 ## Parsing and writing of version "1"
240 #
241 #
241 # The header is followed by the markers. Each marker is made of:
242 # The header is followed by the markers. Each marker is made of:
242 #
243 #
243 # - uint32: total size of the marker (including this field)
244 # - uint32: total size of the marker (including this field)
244 #
245 #
245 # - float64: date in seconds since epoch
246 # - float64: date in seconds since epoch
246 #
247 #
247 # - int16: timezone offset in minutes
248 # - int16: timezone offset in minutes
248 #
249 #
249 # - uint16: a bit field. It is reserved for flags used in common
250 # - uint16: a bit field. It is reserved for flags used in common
250 # obsolete marker operations, to avoid repeated decoding of metadata
251 # obsolete marker operations, to avoid repeated decoding of metadata
251 # entries.
252 # entries.
252 #
253 #
253 # - uint8: number of successors "N", can be zero.
254 # - uint8: number of successors "N", can be zero.
254 #
255 #
255 # - uint8: number of parents "P", can be zero.
256 # - uint8: number of parents "P", can be zero.
256 #
257 #
257 # 0: parents data stored but no parent,
258 # 0: parents data stored but no parent,
258 # 1: one parent stored,
259 # 1: one parent stored,
259 # 2: two parents stored,
260 # 2: two parents stored,
260 # 3: no parent data stored
261 # 3: no parent data stored
261 #
262 #
262 # - uint8: number of metadata entries M
263 # - uint8: number of metadata entries M
263 #
264 #
264 # - 20 or 32 bytes: precursor changeset identifier.
265 # - 20 or 32 bytes: precursor changeset identifier.
265 #
266 #
266 # - N*(20 or 32) bytes: successors changesets identifiers.
267 # - N*(20 or 32) bytes: successors changesets identifiers.
267 #
268 #
268 # - P*(20 or 32) bytes: parents of the precursors changesets.
269 # - P*(20 or 32) bytes: parents of the precursors changesets.
269 #
270 #
270 # - M*(uint8, uint8): size of all metadata entries (key and value)
271 # - M*(uint8, uint8): size of all metadata entries (key and value)
271 #
272 #
272 # - remaining bytes: the metadata, each (key, value) pair after the other.
273 # - remaining bytes: the metadata, each (key, value) pair after the other.
273 _fm1version = 1
274 _fm1version = 1
274 _fm1fixed = '>IdhHBBB20s'
275 _fm1fixed = '>IdhHBBB20s'
275 _fm1nodesha1 = '20s'
276 _fm1nodesha1 = '20s'
276 _fm1nodesha256 = '32s'
277 _fm1nodesha256 = '32s'
277 _fm1fsize = struct.calcsize(_fm1fixed)
278 _fm1fsize = struct.calcsize(_fm1fixed)
278 _fm1parentnone = 3
279 _fm1parentnone = 3
279 _fm1parentshift = 14
280 _fm1parentshift = 14
280 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
281 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
281 _fm1metapair = 'BB'
282 _fm1metapair = 'BB'
282 _fm1metapairsize = struct.calcsize('BB')
283 _fm1metapairsize = struct.calcsize('BB')
283
284
284 def _fm1readmarkers(data, off=0):
285 def _fm1readmarkers(data, off=0):
285 # Loop on markers
286 # Loop on markers
286 l = len(data)
287 l = len(data)
287 while off + _fm1fsize <= l:
288 while off + _fm1fsize <= l:
288 # read fixed part
289 # read fixed part
289 cur = data[off:off + _fm1fsize]
290 cur = data[off:off + _fm1fsize]
290 off += _fm1fsize
291 off += _fm1fsize
291 fixeddata = _unpack(_fm1fixed, cur)
292 fixeddata = _unpack(_fm1fixed, cur)
292 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
293 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
293 # extract the number of parents information
294 # extract the number of parents information
294 if numpar == _fm1parentnone:
295 if numpar == _fm1parentnone:
295 numpar = None
296 numpar = None
296 # build the date tuple (upgrade tz minutes to seconds)
297 # build the date tuple (upgrade tz minutes to seconds)
297 date = (seconds, tz * 60)
298 date = (seconds, tz * 60)
298 _fm1node = _fm1nodesha1
299 _fm1node = _fm1nodesha1
299 if flags & usingsha256:
300 if flags & usingsha256:
300 _fm1node = _fm1nodesha256
301 _fm1node = _fm1nodesha256
301 fnodesize = struct.calcsize(_fm1node)
302 fnodesize = struct.calcsize(_fm1node)
302 # read replacement
303 # read replacement
303 sucs = ()
304 sucs = ()
304 if numsuc:
305 if numsuc:
305 s = (fnodesize * numsuc)
306 s = (fnodesize * numsuc)
306 cur = data[off:off + s]
307 cur = data[off:off + s]
307 sucs = _unpack(_fm1node * numsuc, cur)
308 sucs = _unpack(_fm1node * numsuc, cur)
308 off += s
309 off += s
309 # read parents
310 # read parents
310 if numpar is None:
311 if numpar is None:
311 parents = None
312 parents = None
312 elif numpar == 0:
313 elif numpar == 0:
313 parents = ()
314 parents = ()
314 elif numpar: # neither None nor zero
315 elif numpar: # neither None nor zero
315 s = (fnodesize * numpar)
316 s = (fnodesize * numpar)
316 cur = data[off:off + s]
317 cur = data[off:off + s]
317 parents = _unpack(_fm1node * numpar, cur)
318 parents = _unpack(_fm1node * numpar, cur)
318 off += s
319 off += s
319 # read metadata
320 # read metadata
320 metaformat = '>' + (_fm1metapair * nummeta)
321 metaformat = '>' + (_fm1metapair * nummeta)
321 s = _fm1metapairsize * nummeta
322 s = _fm1metapairsize * nummeta
322 metapairsize = _unpack(metaformat, data[off:off + s])
323 metapairsize = _unpack(metaformat, data[off:off + s])
323 off += s
324 off += s
324 metadata = []
325 metadata = []
325 for idx in xrange(0, len(metapairsize), 2):
326 for idx in xrange(0, len(metapairsize), 2):
326 sk = metapairsize[idx]
327 sk = metapairsize[idx]
327 sv = metapairsize[idx + 1]
328 sv = metapairsize[idx + 1]
328 key = data[off:off + sk]
329 key = data[off:off + sk]
329 value = data[off + sk:off + sk + sv]
330 value = data[off + sk:off + sk + sv]
330 assert len(key) == sk
331 assert len(key) == sk
331 assert len(value) == sv
332 assert len(value) == sv
332 metadata.append((key, value))
333 metadata.append((key, value))
333 off += sk + sv
334 off += sk + sv
334 metadata = tuple(metadata)
335 metadata = tuple(metadata)
335
336
336 yield (prec, sucs, flags, metadata, date, parents)
337 yield (prec, sucs, flags, metadata, date, parents)
337
338
338 def _fm1encodeonemarker(marker):
339 def _fm1encodeonemarker(marker):
339 pre, sucs, flags, metadata, date, parents = marker
340 pre, sucs, flags, metadata, date, parents = marker
340 # determine node size
341 # determine node size
341 _fm1node = _fm1nodesha1
342 _fm1node = _fm1nodesha1
342 if flags & usingsha256:
343 if flags & usingsha256:
343 _fm1node = _fm1nodesha256
344 _fm1node = _fm1nodesha256
344 numsuc = len(sucs)
345 numsuc = len(sucs)
345 numextranodes = numsuc
346 numextranodes = numsuc
346 if parents is None:
347 if parents is None:
347 numpar = _fm1parentnone
348 numpar = _fm1parentnone
348 else:
349 else:
349 numpar = len(parents)
350 numpar = len(parents)
350 numextranodes += numpar
351 numextranodes += numpar
351 formatnodes = _fm1node * numextranodes
352 formatnodes = _fm1node * numextranodes
352 formatmeta = _fm1metapair * len(metadata)
353 formatmeta = _fm1metapair * len(metadata)
353 format = _fm1fixed + formatnodes + formatmeta
354 format = _fm1fixed + formatnodes + formatmeta
354 # tz is stored in minutes so we divide by 60
355 # tz is stored in minutes so we divide by 60
355 tz = date[1]//60
356 tz = date[1]//60
356 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
357 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
357 data.extend(sucs)
358 data.extend(sucs)
358 if parents is not None:
359 if parents is not None:
359 data.extend(parents)
360 data.extend(parents)
360 totalsize = struct.calcsize(format)
361 totalsize = struct.calcsize(format)
361 for key, value in metadata:
362 for key, value in metadata:
362 lk = len(key)
363 lk = len(key)
363 lv = len(value)
364 lv = len(value)
364 data.append(lk)
365 data.append(lk)
365 data.append(lv)
366 data.append(lv)
366 totalsize += lk + lv
367 totalsize += lk + lv
367 data[0] = totalsize
368 data[0] = totalsize
368 data = [_pack(format, *data)]
369 data = [_pack(format, *data)]
369 for key, value in metadata:
370 for key, value in metadata:
370 data.append(key)
371 data.append(key)
371 data.append(value)
372 data.append(value)
372 return ''.join(data)
373 return ''.join(data)
373
374
374 # mapping to read/write various marker formats
375 # mapping to read/write various marker formats
375 # <version> -> (decoder, encoder)
376 # <version> -> (decoder, encoder)
376 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
377 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
377 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
378 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
378
379
379 def _readmarkers(data):
380 def _readmarkers(data):
380 """Read and enumerate markers from raw data"""
381 """Read and enumerate markers from raw data"""
381 off = 0
382 off = 0
382 diskversion = _unpack('>B', data[off:off + 1])[0]
383 diskversion = _unpack('>B', data[off:off + 1])[0]
383 off += 1
384 off += 1
384 if diskversion not in formats:
385 if diskversion not in formats:
385 raise util.Abort(_('parsing obsolete marker: unknown version %r')
386 raise util.Abort(_('parsing obsolete marker: unknown version %r')
386 % diskversion)
387 % diskversion)
387 return diskversion, formats[diskversion][0](data, off)
388 return diskversion, formats[diskversion][0](data, off)
388
389
389 def encodemarkers(markers, addheader=False, version=_fm0version):
390 def encodemarkers(markers, addheader=False, version=_fm0version):
390 # Kept separate from flushmarkers(), it will be reused for
391 # Kept separate from flushmarkers(), it will be reused for
391 # markers exchange.
392 # markers exchange.
392 encodeone = formats[version][1]
393 encodeone = formats[version][1]
393 if addheader:
394 if addheader:
394 yield _pack('>B', version)
395 yield _pack('>B', version)
395 for marker in markers:
396 for marker in markers:
396 yield encodeone(marker)
397 yield encodeone(marker)
397
398
398
399
399 class marker(object):
400 class marker(object):
400 """Wrap obsolete marker raw data"""
401 """Wrap obsolete marker raw data"""
401
402
402 def __init__(self, repo, data):
403 def __init__(self, repo, data):
403 # the repo argument will be used to create changectx in later version
404 # the repo argument will be used to create changectx in later version
404 self._repo = repo
405 self._repo = repo
405 self._data = data
406 self._data = data
406 self._decodedmeta = None
407 self._decodedmeta = None
407
408
408 def __hash__(self):
409 def __hash__(self):
409 return hash(self._data)
410 return hash(self._data)
410
411
411 def __eq__(self, other):
412 def __eq__(self, other):
412 if type(other) != type(self):
413 if type(other) != type(self):
413 return False
414 return False
414 return self._data == other._data
415 return self._data == other._data
415
416
416 def precnode(self):
417 def precnode(self):
417 """Precursor changeset node identifier"""
418 """Precursor changeset node identifier"""
418 return self._data[0]
419 return self._data[0]
419
420
420 def succnodes(self):
421 def succnodes(self):
421 """List of successor changesets node identifiers"""
422 """List of successor changesets node identifiers"""
422 return self._data[1]
423 return self._data[1]
423
424
424 def parentnodes(self):
425 def parentnodes(self):
425 """Parents of the precursors (None if not recorded)"""
426 """Parents of the precursors (None if not recorded)"""
426 return self._data[5]
427 return self._data[5]
427
428
428 def metadata(self):
429 def metadata(self):
429 """Decoded metadata dictionary"""
430 """Decoded metadata dictionary"""
430 return dict(self._data[3])
431 return dict(self._data[3])
431
432
432 def date(self):
433 def date(self):
433 """Creation date as (unixtime, offset)"""
434 """Creation date as (unixtime, offset)"""
434 return self._data[4]
435 return self._data[4]
435
436
436 def flags(self):
437 def flags(self):
437 """The flags field of the marker"""
438 """The flags field of the marker"""
438 return self._data[2]
439 return self._data[2]
439
440
440 class obsstore(object):
441 class obsstore(object):
441 """Store obsolete markers
442 """Store obsolete markers
442
443
443 Markers can be accessed with two mappings:
444 Markers can be accessed with two mappings:
444 - precursors[x] -> set(markers on precursors edges of x)
445 - precursors[x] -> set(markers on precursors edges of x)
445 - successors[x] -> set(markers on successors edges of x)
446 - successors[x] -> set(markers on successors edges of x)
446 - children[x] -> set(markers on precursors edges of children(x)
447 - children[x] -> set(markers on precursors edges of children(x)
447 """
448 """
448
449
449 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
450 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
450 # prec: nodeid, precursor changesets
451 # prec: nodeid, precursor changesets
451 # succs: tuple of nodeid, successor changesets (0-N length)
452 # succs: tuple of nodeid, successor changesets (0-N length)
452 # flag: integer, flag field carrying modifier for the markers (see doc)
453 # flag: integer, flag field carrying modifier for the markers (see doc)
453 # meta: binary blob, encoded metadata dictionary
454 # meta: binary blob, encoded metadata dictionary
454 # date: (float, int) tuple, date of marker creation
455 # date: (float, int) tuple, date of marker creation
455 # parents: (tuple of nodeid) or None, parents of precursors
456 # parents: (tuple of nodeid) or None, parents of precursors
456 # None is used when no data has been recorded
457 # None is used when no data has been recorded
457
458
458 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
459 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
459 # caches for various obsolescence related cache
460 # caches for various obsolescence related cache
460 self.caches = {}
461 self.caches = {}
461 self._all = []
462 self._all = []
462 self.precursors = {}
463 self.precursors = {}
463 self.successors = {}
464 self.successors = {}
464 self.children = {}
465 self.children = {}
465 self.sopener = sopener
466 self.sopener = sopener
466 data = sopener.tryread('obsstore')
467 data = sopener.tryread('obsstore')
467 self._version = defaultformat
468 self._version = defaultformat
468 self._readonly = readonly
469 self._readonly = readonly
469 if data:
470 if data:
470 self._version, markers = _readmarkers(data)
471 self._version, markers = _readmarkers(data)
471 self._load(markers)
472 self._load(markers)
472
473
473 def __iter__(self):
474 def __iter__(self):
474 return iter(self._all)
475 return iter(self._all)
475
476
476 def __len__(self):
477 def __len__(self):
477 return len(self._all)
478 return len(self._all)
478
479
479 def __nonzero__(self):
480 def __nonzero__(self):
480 return bool(self._all)
481 return bool(self._all)
481
482
482 def create(self, transaction, prec, succs=(), flag=0, parents=None,
483 def create(self, transaction, prec, succs=(), flag=0, parents=None,
483 date=None, metadata=None):
484 date=None, metadata=None):
484 """obsolete: add a new obsolete marker
485 """obsolete: add a new obsolete marker
485
486
486 * ensuring it is hashable
487 * ensuring it is hashable
487 * check mandatory metadata
488 * check mandatory metadata
488 * encode metadata
489 * encode metadata
489
490
490 If you are a human writing code creating marker you want to use the
491 If you are a human writing code creating marker you want to use the
491 `createmarkers` function in this module instead.
492 `createmarkers` function in this module instead.
492
493
493 return True if a new marker have been added, False if the markers
494 return True if a new marker have been added, False if the markers
494 already existed (no op).
495 already existed (no op).
495 """
496 """
496 if metadata is None:
497 if metadata is None:
497 metadata = {}
498 metadata = {}
498 if date is None:
499 if date is None:
499 if 'date' in metadata:
500 if 'date' in metadata:
500 # as a courtesy for out-of-tree extensions
501 # as a courtesy for out-of-tree extensions
501 date = util.parsedate(metadata.pop('date'))
502 date = util.parsedate(metadata.pop('date'))
502 else:
503 else:
503 date = util.makedate()
504 date = util.makedate()
504 if len(prec) != 20:
505 if len(prec) != 20:
505 raise ValueError(prec)
506 raise ValueError(prec)
506 for succ in succs:
507 for succ in succs:
507 if len(succ) != 20:
508 if len(succ) != 20:
508 raise ValueError(succ)
509 raise ValueError(succ)
509 if prec in succs:
510 if prec in succs:
510 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
511 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
511
512
512 metadata = tuple(sorted(metadata.iteritems()))
513 metadata = tuple(sorted(metadata.iteritems()))
513
514
514 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
515 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
515 return bool(self.add(transaction, [marker]))
516 return bool(self.add(transaction, [marker]))
516
517
517 def add(self, transaction, markers):
518 def add(self, transaction, markers):
518 """Add new markers to the store
519 """Add new markers to the store
519
520
520 Take care of filtering duplicate.
521 Take care of filtering duplicate.
521 Return the number of new marker."""
522 Return the number of new marker."""
522 if self._readonly:
523 if self._readonly:
523 raise util.Abort('creating obsolete markers is not enabled on this '
524 raise util.Abort('creating obsolete markers is not enabled on this '
524 'repo')
525 'repo')
525 known = set(self._all)
526 known = set(self._all)
526 new = []
527 new = []
527 for m in markers:
528 for m in markers:
528 if m not in known:
529 if m not in known:
529 known.add(m)
530 known.add(m)
530 new.append(m)
531 new.append(m)
531 if new:
532 if new:
532 f = self.sopener('obsstore', 'ab')
533 f = self.sopener('obsstore', 'ab')
533 try:
534 try:
534 # Whether the file's current position is at the begin or at
535 # Whether the file's current position is at the begin or at
535 # the end after opening a file for appending is implementation
536 # the end after opening a file for appending is implementation
536 # defined. So we must seek to the end before calling tell(),
537 # defined. So we must seek to the end before calling tell(),
537 # or we may get a zero offset for non-zero sized files on
538 # or we may get a zero offset for non-zero sized files on
538 # some platforms (issue3543).
539 # some platforms (issue3543).
539 f.seek(0, _SEEK_END)
540 f.seek(0, _SEEK_END)
540 offset = f.tell()
541 offset = f.tell()
541 transaction.add('obsstore', offset)
542 transaction.add('obsstore', offset)
542 # offset == 0: new file - add the version header
543 # offset == 0: new file - add the version header
543 for bytes in encodemarkers(new, offset == 0, self._version):
544 for bytes in encodemarkers(new, offset == 0, self._version):
544 f.write(bytes)
545 f.write(bytes)
545 finally:
546 finally:
546 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
547 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
547 # call 'filecacheentry.refresh()' here
548 # call 'filecacheentry.refresh()' here
548 f.close()
549 f.close()
549 self._load(new)
550 self._load(new)
550 # new marker *may* have changed several set. invalidate the cache.
551 # new marker *may* have changed several set. invalidate the cache.
551 self.caches.clear()
552 self.caches.clear()
552 # records the number of new markers for the transaction hooks
553 # records the number of new markers for the transaction hooks
553 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
554 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
554 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
555 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
555 return len(new)
556 return len(new)
556
557
557 def mergemarkers(self, transaction, data):
558 def mergemarkers(self, transaction, data):
558 """merge a binary stream of markers inside the obsstore
559 """merge a binary stream of markers inside the obsstore
559
560
560 Returns the number of new markers added."""
561 Returns the number of new markers added."""
561 version, markers = _readmarkers(data)
562 version, markers = _readmarkers(data)
562 return self.add(transaction, markers)
563 return self.add(transaction, markers)
563
564
564 def _load(self, markers):
565 def _load(self, markers):
565 for mark in markers:
566 for mark in markers:
566 self._all.append(mark)
567 self._all.append(mark)
567 pre, sucs = mark[:2]
568 pre, sucs = mark[:2]
568 self.successors.setdefault(pre, set()).add(mark)
569 self.successors.setdefault(pre, set()).add(mark)
569 for suc in sucs:
570 for suc in sucs:
570 self.precursors.setdefault(suc, set()).add(mark)
571 self.precursors.setdefault(suc, set()).add(mark)
571 parents = mark[5]
572 parents = mark[5]
572 if parents is not None:
573 if parents is not None:
573 for p in parents:
574 for p in parents:
574 self.children.setdefault(p, set()).add(mark)
575 self.children.setdefault(p, set()).add(mark)
575 if node.nullid in self.precursors:
576 if node.nullid in self.precursors:
576 raise util.Abort(_('bad obsolescence marker detected: '
577 raise util.Abort(_('bad obsolescence marker detected: '
577 'invalid successors nullid'))
578 'invalid successors nullid'))
578 def relevantmarkers(self, nodes):
579 def relevantmarkers(self, nodes):
579 """return a set of all obsolescence markers relevant to a set of nodes.
580 """return a set of all obsolescence markers relevant to a set of nodes.
580
581
581 "relevant" to a set of nodes mean:
582 "relevant" to a set of nodes mean:
582
583
583 - marker that use this changeset as successor
584 - marker that use this changeset as successor
584 - prune marker of direct children on this changeset
585 - prune marker of direct children on this changeset
585 - recursive application of the two rules on precursors of these markers
586 - recursive application of the two rules on precursors of these markers
586
587
587 It is a set so you cannot rely on order."""
588 It is a set so you cannot rely on order."""
588
589
589 pendingnodes = set(nodes)
590 pendingnodes = set(nodes)
590 seenmarkers = set()
591 seenmarkers = set()
591 seennodes = set(pendingnodes)
592 seennodes = set(pendingnodes)
592 precursorsmarkers = self.precursors
593 precursorsmarkers = self.precursors
593 children = self.children
594 children = self.children
594 while pendingnodes:
595 while pendingnodes:
595 direct = set()
596 direct = set()
596 for current in pendingnodes:
597 for current in pendingnodes:
597 direct.update(precursorsmarkers.get(current, ()))
598 direct.update(precursorsmarkers.get(current, ()))
598 pruned = [m for m in children.get(current, ()) if not m[1]]
599 pruned = [m for m in children.get(current, ()) if not m[1]]
599 direct.update(pruned)
600 direct.update(pruned)
600 direct -= seenmarkers
601 direct -= seenmarkers
601 pendingnodes = set([m[0] for m in direct])
602 pendingnodes = set([m[0] for m in direct])
602 seenmarkers |= direct
603 seenmarkers |= direct
603 pendingnodes -= seennodes
604 pendingnodes -= seennodes
604 seennodes |= pendingnodes
605 seennodes |= pendingnodes
605 return seenmarkers
606 return seenmarkers
606
607
607 def commonversion(versions):
608 def commonversion(versions):
608 """Return the newest version listed in both versions and our local formats.
609 """Return the newest version listed in both versions and our local formats.
609
610
610 Returns None if no common version exists.
611 Returns None if no common version exists.
611 """
612 """
612 versions.sort(reverse=True)
613 versions.sort(reverse=True)
613 # search for highest version known on both side
614 # search for highest version known on both side
614 for v in versions:
615 for v in versions:
615 if v in formats:
616 if v in formats:
616 return v
617 return v
617 return None
618 return None
618
619
619 # arbitrary picked to fit into 8K limit from HTTP server
620 # arbitrary picked to fit into 8K limit from HTTP server
620 # you have to take in account:
621 # you have to take in account:
621 # - the version header
622 # - the version header
622 # - the base85 encoding
623 # - the base85 encoding
623 _maxpayload = 5300
624 _maxpayload = 5300
624
625
625 def _pushkeyescape(markers):
626 def _pushkeyescape(markers):
626 """encode markers into a dict suitable for pushkey exchange
627 """encode markers into a dict suitable for pushkey exchange
627
628
628 - binary data is base85 encoded
629 - binary data is base85 encoded
629 - split in chunks smaller than 5300 bytes"""
630 - split in chunks smaller than 5300 bytes"""
630 keys = {}
631 keys = {}
631 parts = []
632 parts = []
632 currentlen = _maxpayload * 2 # ensure we create a new part
633 currentlen = _maxpayload * 2 # ensure we create a new part
633 for marker in markers:
634 for marker in markers:
634 nextdata = _fm0encodeonemarker(marker)
635 nextdata = _fm0encodeonemarker(marker)
635 if (len(nextdata) + currentlen > _maxpayload):
636 if (len(nextdata) + currentlen > _maxpayload):
636 currentpart = []
637 currentpart = []
637 currentlen = 0
638 currentlen = 0
638 parts.append(currentpart)
639 parts.append(currentpart)
639 currentpart.append(nextdata)
640 currentpart.append(nextdata)
640 currentlen += len(nextdata)
641 currentlen += len(nextdata)
641 for idx, part in enumerate(reversed(parts)):
642 for idx, part in enumerate(reversed(parts)):
642 data = ''.join([_pack('>B', _fm0version)] + part)
643 data = ''.join([_pack('>B', _fm0version)] + part)
643 keys['dump%i' % idx] = base85.b85encode(data)
644 keys['dump%i' % idx] = base85.b85encode(data)
644 return keys
645 return keys
645
646
646 def listmarkers(repo):
647 def listmarkers(repo):
647 """List markers over pushkey"""
648 """List markers over pushkey"""
648 if not repo.obsstore:
649 if not repo.obsstore:
649 return {}
650 return {}
650 return _pushkeyescape(repo.obsstore)
651 return _pushkeyescape(repo.obsstore)
651
652
652 def pushmarker(repo, key, old, new):
653 def pushmarker(repo, key, old, new):
653 """Push markers over pushkey"""
654 """Push markers over pushkey"""
654 if not key.startswith('dump'):
655 if not key.startswith('dump'):
655 repo.ui.warn(_('unknown key: %r') % key)
656 repo.ui.warn(_('unknown key: %r') % key)
656 return 0
657 return 0
657 if old:
658 if old:
658 repo.ui.warn(_('unexpected old value for %r') % key)
659 repo.ui.warn(_('unexpected old value for %r') % key)
659 return 0
660 return 0
660 data = base85.b85decode(new)
661 data = base85.b85decode(new)
661 lock = repo.lock()
662 lock = repo.lock()
662 try:
663 try:
663 tr = repo.transaction('pushkey: obsolete markers')
664 tr = repo.transaction('pushkey: obsolete markers')
664 try:
665 try:
665 repo.obsstore.mergemarkers(tr, data)
666 repo.obsstore.mergemarkers(tr, data)
666 tr.close()
667 tr.close()
667 return 1
668 return 1
668 finally:
669 finally:
669 tr.release()
670 tr.release()
670 finally:
671 finally:
671 lock.release()
672 lock.release()
672
673
673 def getmarkers(repo, nodes=None):
674 def getmarkers(repo, nodes=None):
674 """returns markers known in a repository
675 """returns markers known in a repository
675
676
676 If <nodes> is specified, only markers "relevant" to those nodes are are
677 If <nodes> is specified, only markers "relevant" to those nodes are are
677 returned"""
678 returned"""
678 if nodes is None:
679 if nodes is None:
679 rawmarkers = repo.obsstore
680 rawmarkers = repo.obsstore
680 else:
681 else:
681 rawmarkers = repo.obsstore.relevantmarkers(nodes)
682 rawmarkers = repo.obsstore.relevantmarkers(nodes)
682
683
683 for markerdata in rawmarkers:
684 for markerdata in rawmarkers:
684 yield marker(repo, markerdata)
685 yield marker(repo, markerdata)
685
686
686 def relevantmarkers(repo, node):
687 def relevantmarkers(repo, node):
687 """all obsolete markers relevant to some revision"""
688 """all obsolete markers relevant to some revision"""
688 for markerdata in repo.obsstore.relevantmarkers(node):
689 for markerdata in repo.obsstore.relevantmarkers(node):
689 yield marker(repo, markerdata)
690 yield marker(repo, markerdata)
690
691
691
692
692 def precursormarkers(ctx):
693 def precursormarkers(ctx):
693 """obsolete marker marking this changeset as a successors"""
694 """obsolete marker marking this changeset as a successors"""
694 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
695 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
695 yield marker(ctx._repo, data)
696 yield marker(ctx._repo, data)
696
697
697 def successormarkers(ctx):
698 def successormarkers(ctx):
698 """obsolete marker making this changeset obsolete"""
699 """obsolete marker making this changeset obsolete"""
699 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
700 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
700 yield marker(ctx._repo, data)
701 yield marker(ctx._repo, data)
701
702
702 def allsuccessors(obsstore, nodes, ignoreflags=0):
703 def allsuccessors(obsstore, nodes, ignoreflags=0):
703 """Yield node for every successor of <nodes>.
704 """Yield node for every successor of <nodes>.
704
705
705 Some successors may be unknown locally.
706 Some successors may be unknown locally.
706
707
707 This is a linear yield unsuited to detecting split changesets. It includes
708 This is a linear yield unsuited to detecting split changesets. It includes
708 initial nodes too."""
709 initial nodes too."""
709 remaining = set(nodes)
710 remaining = set(nodes)
710 seen = set(remaining)
711 seen = set(remaining)
711 while remaining:
712 while remaining:
712 current = remaining.pop()
713 current = remaining.pop()
713 yield current
714 yield current
714 for mark in obsstore.successors.get(current, ()):
715 for mark in obsstore.successors.get(current, ()):
715 # ignore marker flagged with specified flag
716 # ignore marker flagged with specified flag
716 if mark[2] & ignoreflags:
717 if mark[2] & ignoreflags:
717 continue
718 continue
718 for suc in mark[1]:
719 for suc in mark[1]:
719 if suc not in seen:
720 if suc not in seen:
720 seen.add(suc)
721 seen.add(suc)
721 remaining.add(suc)
722 remaining.add(suc)
722
723
723 def allprecursors(obsstore, nodes, ignoreflags=0):
724 def allprecursors(obsstore, nodes, ignoreflags=0):
724 """Yield node for every precursors of <nodes>.
725 """Yield node for every precursors of <nodes>.
725
726
726 Some precursors may be unknown locally.
727 Some precursors may be unknown locally.
727
728
728 This is a linear yield unsuited to detecting folded changesets. It includes
729 This is a linear yield unsuited to detecting folded changesets. It includes
729 initial nodes too."""
730 initial nodes too."""
730
731
731 remaining = set(nodes)
732 remaining = set(nodes)
732 seen = set(remaining)
733 seen = set(remaining)
733 while remaining:
734 while remaining:
734 current = remaining.pop()
735 current = remaining.pop()
735 yield current
736 yield current
736 for mark in obsstore.precursors.get(current, ()):
737 for mark in obsstore.precursors.get(current, ()):
737 # ignore marker flagged with specified flag
738 # ignore marker flagged with specified flag
738 if mark[2] & ignoreflags:
739 if mark[2] & ignoreflags:
739 continue
740 continue
740 suc = mark[0]
741 suc = mark[0]
741 if suc not in seen:
742 if suc not in seen:
742 seen.add(suc)
743 seen.add(suc)
743 remaining.add(suc)
744 remaining.add(suc)
744
745
745 def foreground(repo, nodes):
746 def foreground(repo, nodes):
746 """return all nodes in the "foreground" of other node
747 """return all nodes in the "foreground" of other node
747
748
748 The foreground of a revision is anything reachable using parent -> children
749 The foreground of a revision is anything reachable using parent -> children
749 or precursor -> successor relation. It is very similar to "descendant" but
750 or precursor -> successor relation. It is very similar to "descendant" but
750 augmented with obsolescence information.
751 augmented with obsolescence information.
751
752
752 Beware that possible obsolescence cycle may result if complex situation.
753 Beware that possible obsolescence cycle may result if complex situation.
753 """
754 """
754 repo = repo.unfiltered()
755 repo = repo.unfiltered()
755 foreground = set(repo.set('%ln::', nodes))
756 foreground = set(repo.set('%ln::', nodes))
756 if repo.obsstore:
757 if repo.obsstore:
757 # We only need this complicated logic if there is obsolescence
758 # We only need this complicated logic if there is obsolescence
758 # XXX will probably deserve an optimised revset.
759 # XXX will probably deserve an optimised revset.
759 nm = repo.changelog.nodemap
760 nm = repo.changelog.nodemap
760 plen = -1
761 plen = -1
761 # compute the whole set of successors or descendants
762 # compute the whole set of successors or descendants
762 while len(foreground) != plen:
763 while len(foreground) != plen:
763 plen = len(foreground)
764 plen = len(foreground)
764 succs = set(c.node() for c in foreground)
765 succs = set(c.node() for c in foreground)
765 mutable = [c.node() for c in foreground if c.mutable()]
766 mutable = [c.node() for c in foreground if c.mutable()]
766 succs.update(allsuccessors(repo.obsstore, mutable))
767 succs.update(allsuccessors(repo.obsstore, mutable))
767 known = (n for n in succs if n in nm)
768 known = (n for n in succs if n in nm)
768 foreground = set(repo.set('%ln::', known))
769 foreground = set(repo.set('%ln::', known))
769 return set(c.node() for c in foreground)
770 return set(c.node() for c in foreground)
770
771
771
772
772 def successorssets(repo, initialnode, cache=None):
773 def successorssets(repo, initialnode, cache=None):
773 """Return all set of successors of initial nodes
774 """Return all set of successors of initial nodes
774
775
775 The successors set of a changeset A are a group of revisions that succeed
776 The successors set of a changeset A are a group of revisions that succeed
776 A. It succeeds A as a consistent whole, each revision being only a partial
777 A. It succeeds A as a consistent whole, each revision being only a partial
777 replacement. The successors set contains non-obsolete changesets only.
778 replacement. The successors set contains non-obsolete changesets only.
778
779
779 This function returns the full list of successor sets which is why it
780 This function returns the full list of successor sets which is why it
780 returns a list of tuples and not just a single tuple. Each tuple is a valid
781 returns a list of tuples and not just a single tuple. Each tuple is a valid
781 successors set. Not that (A,) may be a valid successors set for changeset A
782 successors set. Not that (A,) may be a valid successors set for changeset A
782 (see below).
783 (see below).
783
784
784 In most cases, a changeset A will have a single element (e.g. the changeset
785 In most cases, a changeset A will have a single element (e.g. the changeset
785 A is replaced by A') in its successors set. Though, it is also common for a
786 A is replaced by A') in its successors set. Though, it is also common for a
786 changeset A to have no elements in its successor set (e.g. the changeset
787 changeset A to have no elements in its successor set (e.g. the changeset
787 has been pruned). Therefore, the returned list of successors sets will be
788 has been pruned). Therefore, the returned list of successors sets will be
788 [(A',)] or [], respectively.
789 [(A',)] or [], respectively.
789
790
790 When a changeset A is split into A' and B', however, it will result in a
791 When a changeset A is split into A' and B', however, it will result in a
791 successors set containing more than a single element, i.e. [(A',B')].
792 successors set containing more than a single element, i.e. [(A',B')].
792 Divergent changesets will result in multiple successors sets, i.e. [(A',),
793 Divergent changesets will result in multiple successors sets, i.e. [(A',),
793 (A'')].
794 (A'')].
794
795
795 If a changeset A is not obsolete, then it will conceptually have no
796 If a changeset A is not obsolete, then it will conceptually have no
796 successors set. To distinguish this from a pruned changeset, the successor
797 successors set. To distinguish this from a pruned changeset, the successor
797 set will only contain itself, i.e. [(A,)].
798 set will only contain itself, i.e. [(A,)].
798
799
799 Finally, successors unknown locally are considered to be pruned (obsoleted
800 Finally, successors unknown locally are considered to be pruned (obsoleted
800 without any successors).
801 without any successors).
801
802
802 The optional `cache` parameter is a dictionary that may contain precomputed
803 The optional `cache` parameter is a dictionary that may contain precomputed
803 successors sets. It is meant to reuse the computation of a previous call to
804 successors sets. It is meant to reuse the computation of a previous call to
804 `successorssets` when multiple calls are made at the same time. The cache
805 `successorssets` when multiple calls are made at the same time. The cache
805 dictionary is updated in place. The caller is responsible for its live
806 dictionary is updated in place. The caller is responsible for its live
806 spawn. Code that makes multiple calls to `successorssets` *must* use this
807 spawn. Code that makes multiple calls to `successorssets` *must* use this
807 cache mechanism or suffer terrible performances.
808 cache mechanism or suffer terrible performances.
808
809
809 """
810 """
810
811
811 succmarkers = repo.obsstore.successors
812 succmarkers = repo.obsstore.successors
812
813
813 # Stack of nodes we search successors sets for
814 # Stack of nodes we search successors sets for
814 toproceed = [initialnode]
815 toproceed = [initialnode]
815 # set version of above list for fast loop detection
816 # set version of above list for fast loop detection
816 # element added to "toproceed" must be added here
817 # element added to "toproceed" must be added here
817 stackedset = set(toproceed)
818 stackedset = set(toproceed)
818 if cache is None:
819 if cache is None:
819 cache = {}
820 cache = {}
820
821
821 # This while loop is the flattened version of a recursive search for
822 # This while loop is the flattened version of a recursive search for
822 # successors sets
823 # successors sets
823 #
824 #
824 # def successorssets(x):
825 # def successorssets(x):
825 # successors = directsuccessors(x)
826 # successors = directsuccessors(x)
826 # ss = [[]]
827 # ss = [[]]
827 # for succ in directsuccessors(x):
828 # for succ in directsuccessors(x):
828 # # product as in itertools cartesian product
829 # # product as in itertools cartesian product
829 # ss = product(ss, successorssets(succ))
830 # ss = product(ss, successorssets(succ))
830 # return ss
831 # return ss
831 #
832 #
832 # But we can not use plain recursive calls here:
833 # But we can not use plain recursive calls here:
833 # - that would blow the python call stack
834 # - that would blow the python call stack
834 # - obsolescence markers may have cycles, we need to handle them.
835 # - obsolescence markers may have cycles, we need to handle them.
835 #
836 #
836 # The `toproceed` list act as our call stack. Every node we search
837 # The `toproceed` list act as our call stack. Every node we search
837 # successors set for are stacked there.
838 # successors set for are stacked there.
838 #
839 #
839 # The `stackedset` is set version of this stack used to check if a node is
840 # The `stackedset` is set version of this stack used to check if a node is
840 # already stacked. This check is used to detect cycles and prevent infinite
841 # already stacked. This check is used to detect cycles and prevent infinite
841 # loop.
842 # loop.
842 #
843 #
843 # successors set of all nodes are stored in the `cache` dictionary.
844 # successors set of all nodes are stored in the `cache` dictionary.
844 #
845 #
845 # After this while loop ends we use the cache to return the successors sets
846 # After this while loop ends we use the cache to return the successors sets
846 # for the node requested by the caller.
847 # for the node requested by the caller.
847 while toproceed:
848 while toproceed:
848 # Every iteration tries to compute the successors sets of the topmost
849 # Every iteration tries to compute the successors sets of the topmost
849 # node of the stack: CURRENT.
850 # node of the stack: CURRENT.
850 #
851 #
851 # There are four possible outcomes:
852 # There are four possible outcomes:
852 #
853 #
853 # 1) We already know the successors sets of CURRENT:
854 # 1) We already know the successors sets of CURRENT:
854 # -> mission accomplished, pop it from the stack.
855 # -> mission accomplished, pop it from the stack.
855 # 2) Node is not obsolete:
856 # 2) Node is not obsolete:
856 # -> the node is its own successors sets. Add it to the cache.
857 # -> the node is its own successors sets. Add it to the cache.
857 # 3) We do not know successors set of direct successors of CURRENT:
858 # 3) We do not know successors set of direct successors of CURRENT:
858 # -> We add those successors to the stack.
859 # -> We add those successors to the stack.
859 # 4) We know successors sets of all direct successors of CURRENT:
860 # 4) We know successors sets of all direct successors of CURRENT:
860 # -> We can compute CURRENT successors set and add it to the
861 # -> We can compute CURRENT successors set and add it to the
861 # cache.
862 # cache.
862 #
863 #
863 current = toproceed[-1]
864 current = toproceed[-1]
864 if current in cache:
865 if current in cache:
865 # case (1): We already know the successors sets
866 # case (1): We already know the successors sets
866 stackedset.remove(toproceed.pop())
867 stackedset.remove(toproceed.pop())
867 elif current not in succmarkers:
868 elif current not in succmarkers:
868 # case (2): The node is not obsolete.
869 # case (2): The node is not obsolete.
869 if current in repo:
870 if current in repo:
870 # We have a valid last successors.
871 # We have a valid last successors.
871 cache[current] = [(current,)]
872 cache[current] = [(current,)]
872 else:
873 else:
873 # Final obsolete version is unknown locally.
874 # Final obsolete version is unknown locally.
874 # Do not count that as a valid successors
875 # Do not count that as a valid successors
875 cache[current] = []
876 cache[current] = []
876 else:
877 else:
877 # cases (3) and (4)
878 # cases (3) and (4)
878 #
879 #
879 # We proceed in two phases. Phase 1 aims to distinguish case (3)
880 # We proceed in two phases. Phase 1 aims to distinguish case (3)
880 # from case (4):
881 # from case (4):
881 #
882 #
882 # For each direct successors of CURRENT, we check whether its
883 # For each direct successors of CURRENT, we check whether its
883 # successors sets are known. If they are not, we stack the
884 # successors sets are known. If they are not, we stack the
884 # unknown node and proceed to the next iteration of the while
885 # unknown node and proceed to the next iteration of the while
885 # loop. (case 3)
886 # loop. (case 3)
886 #
887 #
887 # During this step, we may detect obsolescence cycles: a node
888 # During this step, we may detect obsolescence cycles: a node
888 # with unknown successors sets but already in the call stack.
889 # with unknown successors sets but already in the call stack.
889 # In such a situation, we arbitrary set the successors sets of
890 # In such a situation, we arbitrary set the successors sets of
890 # the node to nothing (node pruned) to break the cycle.
891 # the node to nothing (node pruned) to break the cycle.
891 #
892 #
892 # If no break was encountered we proceed to phase 2.
893 # If no break was encountered we proceed to phase 2.
893 #
894 #
894 # Phase 2 computes successors sets of CURRENT (case 4); see details
895 # Phase 2 computes successors sets of CURRENT (case 4); see details
895 # in phase 2 itself.
896 # in phase 2 itself.
896 #
897 #
897 # Note the two levels of iteration in each phase.
898 # Note the two levels of iteration in each phase.
898 # - The first one handles obsolescence markers using CURRENT as
899 # - The first one handles obsolescence markers using CURRENT as
899 # precursor (successors markers of CURRENT).
900 # precursor (successors markers of CURRENT).
900 #
901 #
901 # Having multiple entry here means divergence.
902 # Having multiple entry here means divergence.
902 #
903 #
903 # - The second one handles successors defined in each marker.
904 # - The second one handles successors defined in each marker.
904 #
905 #
905 # Having none means pruned node, multiple successors means split,
906 # Having none means pruned node, multiple successors means split,
906 # single successors are standard replacement.
907 # single successors are standard replacement.
907 #
908 #
908 for mark in sorted(succmarkers[current]):
909 for mark in sorted(succmarkers[current]):
909 for suc in mark[1]:
910 for suc in mark[1]:
910 if suc not in cache:
911 if suc not in cache:
911 if suc in stackedset:
912 if suc in stackedset:
912 # cycle breaking
913 # cycle breaking
913 cache[suc] = []
914 cache[suc] = []
914 else:
915 else:
915 # case (3) If we have not computed successors sets
916 # case (3) If we have not computed successors sets
916 # of one of those successors we add it to the
917 # of one of those successors we add it to the
917 # `toproceed` stack and stop all work for this
918 # `toproceed` stack and stop all work for this
918 # iteration.
919 # iteration.
919 toproceed.append(suc)
920 toproceed.append(suc)
920 stackedset.add(suc)
921 stackedset.add(suc)
921 break
922 break
922 else:
923 else:
923 continue
924 continue
924 break
925 break
925 else:
926 else:
926 # case (4): we know all successors sets of all direct
927 # case (4): we know all successors sets of all direct
927 # successors
928 # successors
928 #
929 #
929 # Successors set contributed by each marker depends on the
930 # Successors set contributed by each marker depends on the
930 # successors sets of all its "successors" node.
931 # successors sets of all its "successors" node.
931 #
932 #
932 # Each different marker is a divergence in the obsolescence
933 # Each different marker is a divergence in the obsolescence
933 # history. It contributes successors sets distinct from other
934 # history. It contributes successors sets distinct from other
934 # markers.
935 # markers.
935 #
936 #
936 # Within a marker, a successor may have divergent successors
937 # Within a marker, a successor may have divergent successors
937 # sets. In such a case, the marker will contribute multiple
938 # sets. In such a case, the marker will contribute multiple
938 # divergent successors sets. If multiple successors have
939 # divergent successors sets. If multiple successors have
939 # divergent successors sets, a Cartesian product is used.
940 # divergent successors sets, a Cartesian product is used.
940 #
941 #
941 # At the end we post-process successors sets to remove
942 # At the end we post-process successors sets to remove
942 # duplicated entry and successors set that are strict subset of
943 # duplicated entry and successors set that are strict subset of
943 # another one.
944 # another one.
944 succssets = []
945 succssets = []
945 for mark in sorted(succmarkers[current]):
946 for mark in sorted(succmarkers[current]):
946 # successors sets contributed by this marker
947 # successors sets contributed by this marker
947 markss = [[]]
948 markss = [[]]
948 for suc in mark[1]:
949 for suc in mark[1]:
949 # cardinal product with previous successors
950 # cardinal product with previous successors
950 productresult = []
951 productresult = []
951 for prefix in markss:
952 for prefix in markss:
952 for suffix in cache[suc]:
953 for suffix in cache[suc]:
953 newss = list(prefix)
954 newss = list(prefix)
954 for part in suffix:
955 for part in suffix:
955 # do not duplicated entry in successors set
956 # do not duplicated entry in successors set
956 # first entry wins.
957 # first entry wins.
957 if part not in newss:
958 if part not in newss:
958 newss.append(part)
959 newss.append(part)
959 productresult.append(newss)
960 productresult.append(newss)
960 markss = productresult
961 markss = productresult
961 succssets.extend(markss)
962 succssets.extend(markss)
962 # remove duplicated and subset
963 # remove duplicated and subset
963 seen = []
964 seen = []
964 final = []
965 final = []
965 candidate = sorted(((set(s), s) for s in succssets if s),
966 candidate = sorted(((set(s), s) for s in succssets if s),
966 key=lambda x: len(x[1]), reverse=True)
967 key=lambda x: len(x[1]), reverse=True)
967 for setversion, listversion in candidate:
968 for setversion, listversion in candidate:
968 for seenset in seen:
969 for seenset in seen:
969 if setversion.issubset(seenset):
970 if setversion.issubset(seenset):
970 break
971 break
971 else:
972 else:
972 final.append(listversion)
973 final.append(listversion)
973 seen.append(setversion)
974 seen.append(setversion)
974 final.reverse() # put small successors set first
975 final.reverse() # put small successors set first
975 cache[current] = final
976 cache[current] = final
976 return cache[initialnode]
977 return cache[initialnode]
977
978
978 def _knownrevs(repo, nodes):
979 def _knownrevs(repo, nodes):
979 """yield revision numbers of known nodes passed in parameters
980 """yield revision numbers of known nodes passed in parameters
980
981
981 Unknown revisions are silently ignored."""
982 Unknown revisions are silently ignored."""
982 torev = repo.changelog.nodemap.get
983 torev = repo.changelog.nodemap.get
983 for n in nodes:
984 for n in nodes:
984 rev = torev(n)
985 rev = torev(n)
985 if rev is not None:
986 if rev is not None:
986 yield rev
987 yield rev
987
988
988 # mapping of 'set-name' -> <function to compute this set>
989 # mapping of 'set-name' -> <function to compute this set>
989 cachefuncs = {}
990 cachefuncs = {}
990 def cachefor(name):
991 def cachefor(name):
991 """Decorator to register a function as computing the cache for a set"""
992 """Decorator to register a function as computing the cache for a set"""
992 def decorator(func):
993 def decorator(func):
993 assert name not in cachefuncs
994 assert name not in cachefuncs
994 cachefuncs[name] = func
995 cachefuncs[name] = func
995 return func
996 return func
996 return decorator
997 return decorator
997
998
998 def getrevs(repo, name):
999 def getrevs(repo, name):
999 """Return the set of revision that belong to the <name> set
1000 """Return the set of revision that belong to the <name> set
1000
1001
1001 Such access may compute the set and cache it for future use"""
1002 Such access may compute the set and cache it for future use"""
1002 repo = repo.unfiltered()
1003 repo = repo.unfiltered()
1003 if not repo.obsstore:
1004 if not repo.obsstore:
1004 return frozenset()
1005 return frozenset()
1005 if name not in repo.obsstore.caches:
1006 if name not in repo.obsstore.caches:
1006 repo.obsstore.caches[name] = cachefuncs[name](repo)
1007 repo.obsstore.caches[name] = cachefuncs[name](repo)
1007 return repo.obsstore.caches[name]
1008 return repo.obsstore.caches[name]
1008
1009
1009 # To be simple we need to invalidate obsolescence cache when:
1010 # To be simple we need to invalidate obsolescence cache when:
1010 #
1011 #
1011 # - new changeset is added:
1012 # - new changeset is added:
1012 # - public phase is changed
1013 # - public phase is changed
1013 # - obsolescence marker are added
1014 # - obsolescence marker are added
1014 # - strip is used a repo
1015 # - strip is used a repo
1015 def clearobscaches(repo):
1016 def clearobscaches(repo):
1016 """Remove all obsolescence related cache from a repo
1017 """Remove all obsolescence related cache from a repo
1017
1018
1018 This remove all cache in obsstore is the obsstore already exist on the
1019 This remove all cache in obsstore is the obsstore already exist on the
1019 repo.
1020 repo.
1020
1021
1021 (We could be smarter here given the exact event that trigger the cache
1022 (We could be smarter here given the exact event that trigger the cache
1022 clearing)"""
1023 clearing)"""
1023 # only clear cache is there is obsstore data in this repo
1024 # only clear cache is there is obsstore data in this repo
1024 if 'obsstore' in repo._filecache:
1025 if 'obsstore' in repo._filecache:
1025 repo.obsstore.caches.clear()
1026 repo.obsstore.caches.clear()
1026
1027
1027 @cachefor('obsolete')
1028 @cachefor('obsolete')
1028 def _computeobsoleteset(repo):
1029 def _computeobsoleteset(repo):
1029 """the set of obsolete revisions"""
1030 """the set of obsolete revisions"""
1030 obs = set()
1031 obs = set()
1031 getrev = repo.changelog.nodemap.get
1032 getrev = repo.changelog.nodemap.get
1032 getphase = repo._phasecache.phase
1033 getphase = repo._phasecache.phase
1033 for n in repo.obsstore.successors:
1034 for n in repo.obsstore.successors:
1034 rev = getrev(n)
1035 rev = getrev(n)
1035 if rev is not None and getphase(repo, rev):
1036 if rev is not None and getphase(repo, rev):
1036 obs.add(rev)
1037 obs.add(rev)
1037 return obs
1038 return obs
1038
1039
1039 @cachefor('unstable')
1040 @cachefor('unstable')
1040 def _computeunstableset(repo):
1041 def _computeunstableset(repo):
1041 """the set of non obsolete revisions with obsolete parents"""
1042 """the set of non obsolete revisions with obsolete parents"""
1042 # revset is not efficient enough here
1043 # revset is not efficient enough here
1043 # we do (obsolete()::) - obsolete() by hand
1044 # we do (obsolete()::) - obsolete() by hand
1044 obs = getrevs(repo, 'obsolete')
1045 obs = getrevs(repo, 'obsolete')
1045 if not obs:
1046 if not obs:
1046 return set()
1047 return set()
1047 cl = repo.changelog
1048 cl = repo.changelog
1048 return set(r for r in cl.descendants(obs) if r not in obs)
1049 return set(r for r in cl.descendants(obs) if r not in obs)
1049
1050
1050 @cachefor('suspended')
1051 @cachefor('suspended')
1051 def _computesuspendedset(repo):
1052 def _computesuspendedset(repo):
1052 """the set of obsolete parents with non obsolete descendants"""
1053 """the set of obsolete parents with non obsolete descendants"""
1053 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1054 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1054 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1055 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1055
1056
1056 @cachefor('extinct')
1057 @cachefor('extinct')
1057 def _computeextinctset(repo):
1058 def _computeextinctset(repo):
1058 """the set of obsolete parents without non obsolete descendants"""
1059 """the set of obsolete parents without non obsolete descendants"""
1059 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1060 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1060
1061
1061
1062
1062 @cachefor('bumped')
1063 @cachefor('bumped')
1063 def _computebumpedset(repo):
1064 def _computebumpedset(repo):
1064 """the set of revs trying to obsolete public revisions"""
1065 """the set of revs trying to obsolete public revisions"""
1065 bumped = set()
1066 bumped = set()
1066 # util function (avoid attribute lookup in the loop)
1067 # util function (avoid attribute lookup in the loop)
1067 phase = repo._phasecache.phase # would be faster to grab the full list
1068 phase = repo._phasecache.phase # would be faster to grab the full list
1068 public = phases.public
1069 public = phases.public
1069 cl = repo.changelog
1070 cl = repo.changelog
1070 torev = cl.nodemap.get
1071 torev = cl.nodemap.get
1071 obs = getrevs(repo, 'obsolete')
1072 obs = getrevs(repo, 'obsolete')
1072 for rev in repo:
1073 for rev in repo:
1073 # We only evaluate mutable, non-obsolete revision
1074 # We only evaluate mutable, non-obsolete revision
1074 if (public < phase(repo, rev)) and (rev not in obs):
1075 if (public < phase(repo, rev)) and (rev not in obs):
1075 node = cl.node(rev)
1076 node = cl.node(rev)
1076 # (future) A cache of precursors may worth if split is very common
1077 # (future) A cache of precursors may worth if split is very common
1077 for pnode in allprecursors(repo.obsstore, [node],
1078 for pnode in allprecursors(repo.obsstore, [node],
1078 ignoreflags=bumpedfix):
1079 ignoreflags=bumpedfix):
1079 prev = torev(pnode) # unfiltered! but so is phasecache
1080 prev = torev(pnode) # unfiltered! but so is phasecache
1080 if (prev is not None) and (phase(repo, prev) <= public):
1081 if (prev is not None) and (phase(repo, prev) <= public):
1081 # we have a public precursors
1082 # we have a public precursors
1082 bumped.add(rev)
1083 bumped.add(rev)
1083 break # Next draft!
1084 break # Next draft!
1084 return bumped
1085 return bumped
1085
1086
1086 @cachefor('divergent')
1087 @cachefor('divergent')
1087 def _computedivergentset(repo):
1088 def _computedivergentset(repo):
1088 """the set of rev that compete to be the final successors of some revision.
1089 """the set of rev that compete to be the final successors of some revision.
1089 """
1090 """
1090 divergent = set()
1091 divergent = set()
1091 obsstore = repo.obsstore
1092 obsstore = repo.obsstore
1092 newermap = {}
1093 newermap = {}
1093 for ctx in repo.set('(not public()) - obsolete()'):
1094 for ctx in repo.set('(not public()) - obsolete()'):
1094 mark = obsstore.precursors.get(ctx.node(), ())
1095 mark = obsstore.precursors.get(ctx.node(), ())
1095 toprocess = set(mark)
1096 toprocess = set(mark)
1096 while toprocess:
1097 while toprocess:
1097 prec = toprocess.pop()[0]
1098 prec = toprocess.pop()[0]
1098 if prec not in newermap:
1099 if prec not in newermap:
1099 successorssets(repo, prec, newermap)
1100 successorssets(repo, prec, newermap)
1100 newer = [n for n in newermap[prec] if n]
1101 newer = [n for n in newermap[prec] if n]
1101 if len(newer) > 1:
1102 if len(newer) > 1:
1102 divergent.add(ctx.rev())
1103 divergent.add(ctx.rev())
1103 break
1104 break
1104 toprocess.update(obsstore.precursors.get(prec, ()))
1105 toprocess.update(obsstore.precursors.get(prec, ()))
1105 return divergent
1106 return divergent
1106
1107
1107
1108
1108 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1109 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1109 """Add obsolete markers between changesets in a repo
1110 """Add obsolete markers between changesets in a repo
1110
1111
1111 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1112 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1112 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1113 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1113 containing metadata for this marker only. It is merged with the global
1114 containing metadata for this marker only. It is merged with the global
1114 metadata specified through the `metadata` argument of this function,
1115 metadata specified through the `metadata` argument of this function,
1115
1116
1116 Trying to obsolete a public changeset will raise an exception.
1117 Trying to obsolete a public changeset will raise an exception.
1117
1118
1118 Current user and date are used except if specified otherwise in the
1119 Current user and date are used except if specified otherwise in the
1119 metadata attribute.
1120 metadata attribute.
1120
1121
1121 This function operates within a transaction of its own, but does
1122 This function operates within a transaction of its own, but does
1122 not take any lock on the repo.
1123 not take any lock on the repo.
1123 """
1124 """
1124 # prepare metadata
1125 # prepare metadata
1125 if metadata is None:
1126 if metadata is None:
1126 metadata = {}
1127 metadata = {}
1127 if 'user' not in metadata:
1128 if 'user' not in metadata:
1128 metadata['user'] = repo.ui.username()
1129 metadata['user'] = repo.ui.username()
1129 tr = repo.transaction('add-obsolescence-marker')
1130 tr = repo.transaction('add-obsolescence-marker')
1130 try:
1131 try:
1131 for rel in relations:
1132 for rel in relations:
1132 prec = rel[0]
1133 prec = rel[0]
1133 sucs = rel[1]
1134 sucs = rel[1]
1134 localmetadata = metadata.copy()
1135 localmetadata = metadata.copy()
1135 if 2 < len(rel):
1136 if 2 < len(rel):
1136 localmetadata.update(rel[2])
1137 localmetadata.update(rel[2])
1137
1138
1138 if not prec.mutable():
1139 if not prec.mutable():
1139 raise util.Abort("cannot obsolete immutable changeset: %s"
1140 raise util.Abort("cannot obsolete immutable changeset: %s"
1140 % prec)
1141 % prec)
1141 nprec = prec.node()
1142 nprec = prec.node()
1142 nsucs = tuple(s.node() for s in sucs)
1143 nsucs = tuple(s.node() for s in sucs)
1143 npare = None
1144 npare = None
1144 if not nsucs:
1145 if not nsucs:
1145 npare = tuple(p.node() for p in prec.parents())
1146 npare = tuple(p.node() for p in prec.parents())
1146 if nprec in nsucs:
1147 if nprec in nsucs:
1147 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1148 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1148 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1149 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1149 date=date, metadata=localmetadata)
1150 date=date, metadata=localmetadata)
1150 repo.filteredrevcache.clear()
1151 repo.filteredrevcache.clear()
1151 tr.close()
1152 tr.close()
1152 finally:
1153 finally:
1153 tr.release()
1154 tr.release()
1154
1155
1155 def isenabled(repo, option):
1156 def isenabled(repo, option):
1156 """Returns True if the given repository has the given obsolete option
1157 """Returns True if the given repository has the given obsolete option
1157 enabled.
1158 enabled.
1158 """
1159 """
1159 result = set(repo.ui.configlist('experimental', 'evolution'))
1160 result = set(repo.ui.configlist('experimental', 'evolution'))
1160 if 'all' in result:
1161 if 'all' in result:
1161 return True
1162 return True
1162
1163
1163 # For migration purposes, temporarily return true if the config hasn't been
1164 # For migration purposes, temporarily return true if the config hasn't been
1164 # set but _enabled is true.
1165 # set but _enabled is true.
1165 if len(result) == 0 and _enabled:
1166 if len(result) == 0 and _enabled:
1166 return True
1167 return True
1167
1168
1168 # createmarkers must be enabled if other options are enabled
1169 # createmarkers must be enabled if other options are enabled
1169 if ((allowunstableopt in result or exchangeopt in result) and
1170 if ((allowunstableopt in result or exchangeopt in result) and
1170 not createmarkersopt in result):
1171 not createmarkersopt in result):
1171 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1172 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1172 "if other obsolete options are enabled"))
1173 "if other obsolete options are enabled"))
1173
1174
1174 return option in result
1175 return option in result
General Comments 0
You need to be logged in to leave comments. Login now