##// END OF EJS Templates
readmarkers: combine parent conditionals
Matt Mackall -
r23791:2e58b062 default
parent child Browse files
Show More
@@ -1,1174 +1,1171 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 import struct
70 import struct
71 import util, base85, node
71 import util, base85, node
72 import phases
72 import phases
73 from i18n import _
73 from i18n import _
74
74
75 _pack = struct.pack
75 _pack = struct.pack
76 _unpack = struct.unpack
76 _unpack = struct.unpack
77 _calcsize = struct.calcsize
77 _calcsize = struct.calcsize
78
78
79 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
79 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
80
80
81 # the obsolete feature is not mature enough to be enabled by default.
81 # the obsolete feature is not mature enough to be enabled by default.
82 # you have to rely on third party extension extension to enable this.
82 # you have to rely on third party extension extension to enable this.
83 _enabled = False
83 _enabled = False
84
84
85 # Options for obsolescence
85 # Options for obsolescence
86 createmarkersopt = 'createmarkers'
86 createmarkersopt = 'createmarkers'
87 allowunstableopt = 'allowunstable'
87 allowunstableopt = 'allowunstable'
88 exchangeopt = 'exchange'
88 exchangeopt = 'exchange'
89
89
90 ### obsolescence marker flag
90 ### obsolescence marker flag
91
91
92 ## bumpedfix flag
92 ## bumpedfix flag
93 #
93 #
94 # When a changeset A' succeed to a changeset A which became public, we call A'
94 # When a changeset A' succeed to a changeset A which became public, we call A'
95 # "bumped" because it's a successors of a public changesets
95 # "bumped" because it's a successors of a public changesets
96 #
96 #
97 # o A' (bumped)
97 # o A' (bumped)
98 # |`:
98 # |`:
99 # | o A
99 # | o A
100 # |/
100 # |/
101 # o Z
101 # o Z
102 #
102 #
103 # The way to solve this situation is to create a new changeset Ad as children
103 # The way to solve this situation is to create a new changeset Ad as children
104 # of A. This changeset have the same content than A'. So the diff from A to A'
104 # of A. This changeset have the same content than A'. So the diff from A to A'
105 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
106 #
106 #
107 # o Ad
107 # o Ad
108 # |`:
108 # |`:
109 # | x A'
109 # | x A'
110 # |'|
110 # |'|
111 # o | A
111 # o | A
112 # |/
112 # |/
113 # o Z
113 # o Z
114 #
114 #
115 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
116 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
117 # This flag mean that the successors express the changes between the public and
117 # This flag mean that the successors express the changes between the public and
118 # bumped version and fix the situation, breaking the transitivity of
118 # bumped version and fix the situation, breaking the transitivity of
119 # "bumped" here.
119 # "bumped" here.
120 bumpedfix = 1
120 bumpedfix = 1
121 usingsha256 = 2
121 usingsha256 = 2
122
122
123 ## Parsing and writing of version "0"
123 ## Parsing and writing of version "0"
124 #
124 #
125 # The header is followed by the markers. Each marker is made of:
125 # The header is followed by the markers. Each marker is made of:
126 #
126 #
127 # - 1 uint8 : number of new changesets "N", can be zero.
127 # - 1 uint8 : number of new changesets "N", can be zero.
128 #
128 #
129 # - 1 uint32: metadata size "M" in bytes.
129 # - 1 uint32: metadata size "M" in bytes.
130 #
130 #
131 # - 1 byte: a bit field. It is reserved for flags used in common
131 # - 1 byte: a bit field. It is reserved for flags used in common
132 # obsolete marker operations, to avoid repeated decoding of metadata
132 # obsolete marker operations, to avoid repeated decoding of metadata
133 # entries.
133 # entries.
134 #
134 #
135 # - 20 bytes: obsoleted changeset identifier.
135 # - 20 bytes: obsoleted changeset identifier.
136 #
136 #
137 # - N*20 bytes: new changesets identifiers.
137 # - N*20 bytes: new changesets identifiers.
138 #
138 #
139 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 # - M bytes: metadata as a sequence of nul-terminated strings. Each
140 # string contains a key and a value, separated by a colon ':', without
140 # string contains a key and a value, separated by a colon ':', without
141 # additional encoding. Keys cannot contain '\0' or ':' and values
141 # additional encoding. Keys cannot contain '\0' or ':' and values
142 # cannot contain '\0'.
142 # cannot contain '\0'.
143 _fm0version = 0
143 _fm0version = 0
144 _fm0fixed = '>BIB20s'
144 _fm0fixed = '>BIB20s'
145 _fm0node = '20s'
145 _fm0node = '20s'
146 _fm0fsize = _calcsize(_fm0fixed)
146 _fm0fsize = _calcsize(_fm0fixed)
147 _fm0fnodesize = _calcsize(_fm0node)
147 _fm0fnodesize = _calcsize(_fm0node)
148
148
149 def _fm0readmarkers(data, off=0):
149 def _fm0readmarkers(data, off=0):
150 # Loop on markers
150 # Loop on markers
151 l = len(data)
151 l = len(data)
152 while off + _fm0fsize <= l:
152 while off + _fm0fsize <= l:
153 # read fixed part
153 # read fixed part
154 cur = data[off:off + _fm0fsize]
154 cur = data[off:off + _fm0fsize]
155 off += _fm0fsize
155 off += _fm0fsize
156 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
157 # read replacement
157 # read replacement
158 sucs = ()
158 sucs = ()
159 if numsuc:
159 if numsuc:
160 s = (_fm0fnodesize * numsuc)
160 s = (_fm0fnodesize * numsuc)
161 cur = data[off:off + s]
161 cur = data[off:off + s]
162 sucs = _unpack(_fm0node * numsuc, cur)
162 sucs = _unpack(_fm0node * numsuc, cur)
163 off += s
163 off += s
164 # read metadata
164 # read metadata
165 # (metadata will be decoded on demand)
165 # (metadata will be decoded on demand)
166 metadata = data[off:off + mdsize]
166 metadata = data[off:off + mdsize]
167 if len(metadata) != mdsize:
167 if len(metadata) != mdsize:
168 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 raise util.Abort(_('parsing obsolete marker: metadata is too '
169 'short, %d bytes expected, got %d')
169 'short, %d bytes expected, got %d')
170 % (mdsize, len(metadata)))
170 % (mdsize, len(metadata)))
171 off += mdsize
171 off += mdsize
172 metadata = _fm0decodemeta(metadata)
172 metadata = _fm0decodemeta(metadata)
173 try:
173 try:
174 when, offset = metadata.pop('date', '0 0').split(' ')
174 when, offset = metadata.pop('date', '0 0').split(' ')
175 date = float(when), int(offset)
175 date = float(when), int(offset)
176 except ValueError:
176 except ValueError:
177 date = (0., 0)
177 date = (0., 0)
178 parents = None
178 parents = None
179 if 'p2' in metadata:
179 if 'p2' in metadata:
180 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
181 elif 'p1' in metadata:
181 elif 'p1' in metadata:
182 parents = (metadata.pop('p1', None),)
182 parents = (metadata.pop('p1', None),)
183 elif 'p0' in metadata:
183 elif 'p0' in metadata:
184 parents = ()
184 parents = ()
185 if parents is not None:
185 if parents is not None:
186 try:
186 try:
187 parents = tuple(node.bin(p) for p in parents)
187 parents = tuple(node.bin(p) for p in parents)
188 # if parent content is not a nodeid, drop the data
188 # if parent content is not a nodeid, drop the data
189 for p in parents:
189 for p in parents:
190 if len(p) != 20:
190 if len(p) != 20:
191 parents = None
191 parents = None
192 break
192 break
193 except TypeError:
193 except TypeError:
194 # if content cannot be translated to nodeid drop the data.
194 # if content cannot be translated to nodeid drop the data.
195 parents = None
195 parents = None
196
196
197 metadata = tuple(sorted(metadata.iteritems()))
197 metadata = tuple(sorted(metadata.iteritems()))
198
198
199 yield (pre, sucs, flags, metadata, date, parents)
199 yield (pre, sucs, flags, metadata, date, parents)
200
200
201 def _fm0encodeonemarker(marker):
201 def _fm0encodeonemarker(marker):
202 pre, sucs, flags, metadata, date, parents = marker
202 pre, sucs, flags, metadata, date, parents = marker
203 if flags & usingsha256:
203 if flags & usingsha256:
204 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
204 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
205 metadata = dict(metadata)
205 metadata = dict(metadata)
206 time, tz = date
206 time, tz = date
207 metadata['date'] = '%r %i' % (time, tz)
207 metadata['date'] = '%r %i' % (time, tz)
208 if parents is not None:
208 if parents is not None:
209 if not parents:
209 if not parents:
210 # mark that we explicitly recorded no parents
210 # mark that we explicitly recorded no parents
211 metadata['p0'] = ''
211 metadata['p0'] = ''
212 for i, p in enumerate(parents):
212 for i, p in enumerate(parents):
213 metadata['p%i' % (i + 1)] = node.hex(p)
213 metadata['p%i' % (i + 1)] = node.hex(p)
214 metadata = _fm0encodemeta(metadata)
214 metadata = _fm0encodemeta(metadata)
215 numsuc = len(sucs)
215 numsuc = len(sucs)
216 format = _fm0fixed + (_fm0node * numsuc)
216 format = _fm0fixed + (_fm0node * numsuc)
217 data = [numsuc, len(metadata), flags, pre]
217 data = [numsuc, len(metadata), flags, pre]
218 data.extend(sucs)
218 data.extend(sucs)
219 return _pack(format, *data) + metadata
219 return _pack(format, *data) + metadata
220
220
221 def _fm0encodemeta(meta):
221 def _fm0encodemeta(meta):
222 """Return encoded metadata string to string mapping.
222 """Return encoded metadata string to string mapping.
223
223
224 Assume no ':' in key and no '\0' in both key and value."""
224 Assume no ':' in key and no '\0' in both key and value."""
225 for key, value in meta.iteritems():
225 for key, value in meta.iteritems():
226 if ':' in key or '\0' in key:
226 if ':' in key or '\0' in key:
227 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 raise ValueError("':' and '\0' are forbidden in metadata key'")
228 if '\0' in value:
228 if '\0' in value:
229 raise ValueError("':' is forbidden in metadata value'")
229 raise ValueError("':' is forbidden in metadata value'")
230 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
231
231
232 def _fm0decodemeta(data):
232 def _fm0decodemeta(data):
233 """Return string to string dictionary from encoded version."""
233 """Return string to string dictionary from encoded version."""
234 d = {}
234 d = {}
235 for l in data.split('\0'):
235 for l in data.split('\0'):
236 if l:
236 if l:
237 key, value = l.split(':')
237 key, value = l.split(':')
238 d[key] = value
238 d[key] = value
239 return d
239 return d
240
240
241 ## Parsing and writing of version "1"
241 ## Parsing and writing of version "1"
242 #
242 #
243 # The header is followed by the markers. Each marker is made of:
243 # The header is followed by the markers. Each marker is made of:
244 #
244 #
245 # - uint32: total size of the marker (including this field)
245 # - uint32: total size of the marker (including this field)
246 #
246 #
247 # - float64: date in seconds since epoch
247 # - float64: date in seconds since epoch
248 #
248 #
249 # - int16: timezone offset in minutes
249 # - int16: timezone offset in minutes
250 #
250 #
251 # - uint16: a bit field. It is reserved for flags used in common
251 # - uint16: a bit field. It is reserved for flags used in common
252 # obsolete marker operations, to avoid repeated decoding of metadata
252 # obsolete marker operations, to avoid repeated decoding of metadata
253 # entries.
253 # entries.
254 #
254 #
255 # - uint8: number of successors "N", can be zero.
255 # - uint8: number of successors "N", can be zero.
256 #
256 #
257 # - uint8: number of parents "P", can be zero.
257 # - uint8: number of parents "P", can be zero.
258 #
258 #
259 # 0: parents data stored but no parent,
259 # 0: parents data stored but no parent,
260 # 1: one parent stored,
260 # 1: one parent stored,
261 # 2: two parents stored,
261 # 2: two parents stored,
262 # 3: no parent data stored
262 # 3: no parent data stored
263 #
263 #
264 # - uint8: number of metadata entries M
264 # - uint8: number of metadata entries M
265 #
265 #
266 # - 20 or 32 bytes: precursor changeset identifier.
266 # - 20 or 32 bytes: precursor changeset identifier.
267 #
267 #
268 # - N*(20 or 32) bytes: successors changesets identifiers.
268 # - N*(20 or 32) bytes: successors changesets identifiers.
269 #
269 #
270 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 # - P*(20 or 32) bytes: parents of the precursors changesets.
271 #
271 #
272 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 # - M*(uint8, uint8): size of all metadata entries (key and value)
273 #
273 #
274 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 # - remaining bytes: the metadata, each (key, value) pair after the other.
275 _fm1version = 1
275 _fm1version = 1
276 _fm1fixed = '>IdhHBBB20s'
276 _fm1fixed = '>IdhHBBB20s'
277 _fm1nodesha1 = '20s'
277 _fm1nodesha1 = '20s'
278 _fm1nodesha256 = '32s'
278 _fm1nodesha256 = '32s'
279 _fm1nodesha1size = _calcsize(_fm1nodesha1)
279 _fm1nodesha1size = _calcsize(_fm1nodesha1)
280 _fm1nodesha256size = _calcsize(_fm1nodesha256)
280 _fm1nodesha256size = _calcsize(_fm1nodesha256)
281 _fm1fsize = _calcsize(_fm1fixed)
281 _fm1fsize = _calcsize(_fm1fixed)
282 _fm1parentnone = 3
282 _fm1parentnone = 3
283 _fm1parentshift = 14
283 _fm1parentshift = 14
284 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
284 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
285 _fm1metapair = 'BB'
285 _fm1metapair = 'BB'
286 _fm1metapairsize = _calcsize('BB')
286 _fm1metapairsize = _calcsize('BB')
287
287
288 def _fm1readmarkers(data, off=0):
288 def _fm1readmarkers(data, off=0):
289 # Loop on markers
289 # Loop on markers
290 l = len(data)
290 l = len(data)
291 while off + _fm1fsize <= l:
291 while off + _fm1fsize <= l:
292 # read fixed part
292 # read fixed part
293 fixeddata = _unpack(_fm1fixed, data[off:off + _fm1fsize])
293 fixeddata = _unpack(_fm1fixed, data[off:off + _fm1fsize])
294 off += _fm1fsize
294 off += _fm1fsize
295 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
295 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
296 # extract the number of parents information
297 if numpar == _fm1parentnone:
298 numpar = None
299 # build the date tuple (upgrade tz minutes to seconds)
296 # build the date tuple (upgrade tz minutes to seconds)
300 date = (seconds, tz * 60)
297 date = (seconds, tz * 60)
301 _fm1node = _fm1nodesha1
298 _fm1node = _fm1nodesha1
302 fnodesize = _fm1nodesha1size
299 fnodesize = _fm1nodesha1size
303 if flags & usingsha256:
300 if flags & usingsha256:
304 _fm1node = _fm1nodesha256
301 _fm1node = _fm1nodesha256
305 fnodesize = _fm1nodesha256size
302 fnodesize = _fm1nodesha256size
306 # read replacement
303 # read replacement
307 sucs = ()
304 sucs = ()
308 if numsuc:
305 if numsuc:
309 s = (fnodesize * numsuc)
306 s = (fnodesize * numsuc)
310 sucs = _unpack(_fm1node * numsuc, data[off:off + s])
307 sucs = _unpack(_fm1node * numsuc, data[off:off + s])
311 off += s
308 off += s
312 # read parents
309 # read parents
313 if numpar is None:
310 if numpar == _fm1parentnone:
314 parents = None
311 parents = None
315 elif numpar == 0:
312 elif numpar == 0:
316 parents = ()
313 parents = ()
317 elif numpar: # neither None nor zero
314 elif numpar: # neither None nor zero
318 s = (fnodesize * numpar)
315 s = (fnodesize * numpar)
319 parents = _unpack(_fm1node * numpar, data[off:off + s])
316 parents = _unpack(_fm1node * numpar, data[off:off + s])
320 off += s
317 off += s
321 # read metadata
318 # read metadata
322 metaformat = '>' + (_fm1metapair * nummeta)
319 metaformat = '>' + (_fm1metapair * nummeta)
323 s = _fm1metapairsize * nummeta
320 s = _fm1metapairsize * nummeta
324 metapairsize = _unpack(metaformat, data[off:off + s])
321 metapairsize = _unpack(metaformat, data[off:off + s])
325 off += s
322 off += s
326 metadata = []
323 metadata = []
327 for idx in xrange(0, len(metapairsize), 2):
324 for idx in xrange(0, len(metapairsize), 2):
328 sk = metapairsize[idx]
325 sk = metapairsize[idx]
329 sv = metapairsize[idx + 1]
326 sv = metapairsize[idx + 1]
330 metadata.append((data[off:off + sk], data[off + sk:off + sk + sv]))
327 metadata.append((data[off:off + sk], data[off + sk:off + sk + sv]))
331 off += sk + sv
328 off += sk + sv
332 metadata = tuple(metadata)
329 metadata = tuple(metadata)
333
330
334 yield (prec, sucs, flags, metadata, date, parents)
331 yield (prec, sucs, flags, metadata, date, parents)
335
332
336 def _fm1encodeonemarker(marker):
333 def _fm1encodeonemarker(marker):
337 pre, sucs, flags, metadata, date, parents = marker
334 pre, sucs, flags, metadata, date, parents = marker
338 # determine node size
335 # determine node size
339 _fm1node = _fm1nodesha1
336 _fm1node = _fm1nodesha1
340 if flags & usingsha256:
337 if flags & usingsha256:
341 _fm1node = _fm1nodesha256
338 _fm1node = _fm1nodesha256
342 numsuc = len(sucs)
339 numsuc = len(sucs)
343 numextranodes = numsuc
340 numextranodes = numsuc
344 if parents is None:
341 if parents is None:
345 numpar = _fm1parentnone
342 numpar = _fm1parentnone
346 else:
343 else:
347 numpar = len(parents)
344 numpar = len(parents)
348 numextranodes += numpar
345 numextranodes += numpar
349 formatnodes = _fm1node * numextranodes
346 formatnodes = _fm1node * numextranodes
350 formatmeta = _fm1metapair * len(metadata)
347 formatmeta = _fm1metapair * len(metadata)
351 format = _fm1fixed + formatnodes + formatmeta
348 format = _fm1fixed + formatnodes + formatmeta
352 # tz is stored in minutes so we divide by 60
349 # tz is stored in minutes so we divide by 60
353 tz = date[1]//60
350 tz = date[1]//60
354 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
351 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
355 data.extend(sucs)
352 data.extend(sucs)
356 if parents is not None:
353 if parents is not None:
357 data.extend(parents)
354 data.extend(parents)
358 totalsize = _calcsize(format)
355 totalsize = _calcsize(format)
359 for key, value in metadata:
356 for key, value in metadata:
360 lk = len(key)
357 lk = len(key)
361 lv = len(value)
358 lv = len(value)
362 data.append(lk)
359 data.append(lk)
363 data.append(lv)
360 data.append(lv)
364 totalsize += lk + lv
361 totalsize += lk + lv
365 data[0] = totalsize
362 data[0] = totalsize
366 data = [_pack(format, *data)]
363 data = [_pack(format, *data)]
367 for key, value in metadata:
364 for key, value in metadata:
368 data.append(key)
365 data.append(key)
369 data.append(value)
366 data.append(value)
370 return ''.join(data)
367 return ''.join(data)
371
368
372 # mapping to read/write various marker formats
369 # mapping to read/write various marker formats
373 # <version> -> (decoder, encoder)
370 # <version> -> (decoder, encoder)
374 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
371 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
375 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
372 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
376
373
377 @util.nogc
374 @util.nogc
378 def _readmarkers(data):
375 def _readmarkers(data):
379 """Read and enumerate markers from raw data"""
376 """Read and enumerate markers from raw data"""
380 off = 0
377 off = 0
381 diskversion = _unpack('>B', data[off:off + 1])[0]
378 diskversion = _unpack('>B', data[off:off + 1])[0]
382 off += 1
379 off += 1
383 if diskversion not in formats:
380 if diskversion not in formats:
384 raise util.Abort(_('parsing obsolete marker: unknown version %r')
381 raise util.Abort(_('parsing obsolete marker: unknown version %r')
385 % diskversion)
382 % diskversion)
386 return diskversion, formats[diskversion][0](data, off)
383 return diskversion, formats[diskversion][0](data, off)
387
384
388 def encodemarkers(markers, addheader=False, version=_fm0version):
385 def encodemarkers(markers, addheader=False, version=_fm0version):
389 # Kept separate from flushmarkers(), it will be reused for
386 # Kept separate from flushmarkers(), it will be reused for
390 # markers exchange.
387 # markers exchange.
391 encodeone = formats[version][1]
388 encodeone = formats[version][1]
392 if addheader:
389 if addheader:
393 yield _pack('>B', version)
390 yield _pack('>B', version)
394 for marker in markers:
391 for marker in markers:
395 yield encodeone(marker)
392 yield encodeone(marker)
396
393
397
394
398 class marker(object):
395 class marker(object):
399 """Wrap obsolete marker raw data"""
396 """Wrap obsolete marker raw data"""
400
397
401 def __init__(self, repo, data):
398 def __init__(self, repo, data):
402 # the repo argument will be used to create changectx in later version
399 # the repo argument will be used to create changectx in later version
403 self._repo = repo
400 self._repo = repo
404 self._data = data
401 self._data = data
405 self._decodedmeta = None
402 self._decodedmeta = None
406
403
407 def __hash__(self):
404 def __hash__(self):
408 return hash(self._data)
405 return hash(self._data)
409
406
410 def __eq__(self, other):
407 def __eq__(self, other):
411 if type(other) != type(self):
408 if type(other) != type(self):
412 return False
409 return False
413 return self._data == other._data
410 return self._data == other._data
414
411
415 def precnode(self):
412 def precnode(self):
416 """Precursor changeset node identifier"""
413 """Precursor changeset node identifier"""
417 return self._data[0]
414 return self._data[0]
418
415
419 def succnodes(self):
416 def succnodes(self):
420 """List of successor changesets node identifiers"""
417 """List of successor changesets node identifiers"""
421 return self._data[1]
418 return self._data[1]
422
419
423 def parentnodes(self):
420 def parentnodes(self):
424 """Parents of the precursors (None if not recorded)"""
421 """Parents of the precursors (None if not recorded)"""
425 return self._data[5]
422 return self._data[5]
426
423
427 def metadata(self):
424 def metadata(self):
428 """Decoded metadata dictionary"""
425 """Decoded metadata dictionary"""
429 return dict(self._data[3])
426 return dict(self._data[3])
430
427
431 def date(self):
428 def date(self):
432 """Creation date as (unixtime, offset)"""
429 """Creation date as (unixtime, offset)"""
433 return self._data[4]
430 return self._data[4]
434
431
435 def flags(self):
432 def flags(self):
436 """The flags field of the marker"""
433 """The flags field of the marker"""
437 return self._data[2]
434 return self._data[2]
438
435
439 class obsstore(object):
436 class obsstore(object):
440 """Store obsolete markers
437 """Store obsolete markers
441
438
442 Markers can be accessed with two mappings:
439 Markers can be accessed with two mappings:
443 - precursors[x] -> set(markers on precursors edges of x)
440 - precursors[x] -> set(markers on precursors edges of x)
444 - successors[x] -> set(markers on successors edges of x)
441 - successors[x] -> set(markers on successors edges of x)
445 - children[x] -> set(markers on precursors edges of children(x)
442 - children[x] -> set(markers on precursors edges of children(x)
446 """
443 """
447
444
448 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
445 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
449 # prec: nodeid, precursor changesets
446 # prec: nodeid, precursor changesets
450 # succs: tuple of nodeid, successor changesets (0-N length)
447 # succs: tuple of nodeid, successor changesets (0-N length)
451 # flag: integer, flag field carrying modifier for the markers (see doc)
448 # flag: integer, flag field carrying modifier for the markers (see doc)
452 # meta: binary blob, encoded metadata dictionary
449 # meta: binary blob, encoded metadata dictionary
453 # date: (float, int) tuple, date of marker creation
450 # date: (float, int) tuple, date of marker creation
454 # parents: (tuple of nodeid) or None, parents of precursors
451 # parents: (tuple of nodeid) or None, parents of precursors
455 # None is used when no data has been recorded
452 # None is used when no data has been recorded
456
453
457 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
454 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
458 # caches for various obsolescence related cache
455 # caches for various obsolescence related cache
459 self.caches = {}
456 self.caches = {}
460 self._all = []
457 self._all = []
461 self.precursors = {}
458 self.precursors = {}
462 self.successors = {}
459 self.successors = {}
463 self.children = {}
460 self.children = {}
464 self.sopener = sopener
461 self.sopener = sopener
465 data = sopener.tryread('obsstore')
462 data = sopener.tryread('obsstore')
466 self._version = defaultformat
463 self._version = defaultformat
467 self._readonly = readonly
464 self._readonly = readonly
468 if data:
465 if data:
469 self._version, markers = _readmarkers(data)
466 self._version, markers = _readmarkers(data)
470 self._load(markers)
467 self._load(markers)
471
468
472 def __iter__(self):
469 def __iter__(self):
473 return iter(self._all)
470 return iter(self._all)
474
471
475 def __len__(self):
472 def __len__(self):
476 return len(self._all)
473 return len(self._all)
477
474
478 def __nonzero__(self):
475 def __nonzero__(self):
479 return bool(self._all)
476 return bool(self._all)
480
477
481 def create(self, transaction, prec, succs=(), flag=0, parents=None,
478 def create(self, transaction, prec, succs=(), flag=0, parents=None,
482 date=None, metadata=None):
479 date=None, metadata=None):
483 """obsolete: add a new obsolete marker
480 """obsolete: add a new obsolete marker
484
481
485 * ensuring it is hashable
482 * ensuring it is hashable
486 * check mandatory metadata
483 * check mandatory metadata
487 * encode metadata
484 * encode metadata
488
485
489 If you are a human writing code creating marker you want to use the
486 If you are a human writing code creating marker you want to use the
490 `createmarkers` function in this module instead.
487 `createmarkers` function in this module instead.
491
488
492 return True if a new marker have been added, False if the markers
489 return True if a new marker have been added, False if the markers
493 already existed (no op).
490 already existed (no op).
494 """
491 """
495 if metadata is None:
492 if metadata is None:
496 metadata = {}
493 metadata = {}
497 if date is None:
494 if date is None:
498 if 'date' in metadata:
495 if 'date' in metadata:
499 # as a courtesy for out-of-tree extensions
496 # as a courtesy for out-of-tree extensions
500 date = util.parsedate(metadata.pop('date'))
497 date = util.parsedate(metadata.pop('date'))
501 else:
498 else:
502 date = util.makedate()
499 date = util.makedate()
503 if len(prec) != 20:
500 if len(prec) != 20:
504 raise ValueError(prec)
501 raise ValueError(prec)
505 for succ in succs:
502 for succ in succs:
506 if len(succ) != 20:
503 if len(succ) != 20:
507 raise ValueError(succ)
504 raise ValueError(succ)
508 if prec in succs:
505 if prec in succs:
509 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
506 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
510
507
511 metadata = tuple(sorted(metadata.iteritems()))
508 metadata = tuple(sorted(metadata.iteritems()))
512
509
513 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
510 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
514 return bool(self.add(transaction, [marker]))
511 return bool(self.add(transaction, [marker]))
515
512
516 def add(self, transaction, markers):
513 def add(self, transaction, markers):
517 """Add new markers to the store
514 """Add new markers to the store
518
515
519 Take care of filtering duplicate.
516 Take care of filtering duplicate.
520 Return the number of new marker."""
517 Return the number of new marker."""
521 if self._readonly:
518 if self._readonly:
522 raise util.Abort('creating obsolete markers is not enabled on this '
519 raise util.Abort('creating obsolete markers is not enabled on this '
523 'repo')
520 'repo')
524 known = set(self._all)
521 known = set(self._all)
525 new = []
522 new = []
526 for m in markers:
523 for m in markers:
527 if m not in known:
524 if m not in known:
528 known.add(m)
525 known.add(m)
529 new.append(m)
526 new.append(m)
530 if new:
527 if new:
531 f = self.sopener('obsstore', 'ab')
528 f = self.sopener('obsstore', 'ab')
532 try:
529 try:
533 # Whether the file's current position is at the begin or at
530 # Whether the file's current position is at the begin or at
534 # the end after opening a file for appending is implementation
531 # the end after opening a file for appending is implementation
535 # defined. So we must seek to the end before calling tell(),
532 # defined. So we must seek to the end before calling tell(),
536 # or we may get a zero offset for non-zero sized files on
533 # or we may get a zero offset for non-zero sized files on
537 # some platforms (issue3543).
534 # some platforms (issue3543).
538 f.seek(0, _SEEK_END)
535 f.seek(0, _SEEK_END)
539 offset = f.tell()
536 offset = f.tell()
540 transaction.add('obsstore', offset)
537 transaction.add('obsstore', offset)
541 # offset == 0: new file - add the version header
538 # offset == 0: new file - add the version header
542 for bytes in encodemarkers(new, offset == 0, self._version):
539 for bytes in encodemarkers(new, offset == 0, self._version):
543 f.write(bytes)
540 f.write(bytes)
544 finally:
541 finally:
545 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
542 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
546 # call 'filecacheentry.refresh()' here
543 # call 'filecacheentry.refresh()' here
547 f.close()
544 f.close()
548 self._load(new)
545 self._load(new)
549 # new marker *may* have changed several set. invalidate the cache.
546 # new marker *may* have changed several set. invalidate the cache.
550 self.caches.clear()
547 self.caches.clear()
551 # records the number of new markers for the transaction hooks
548 # records the number of new markers for the transaction hooks
552 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
549 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
553 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
550 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
554 return len(new)
551 return len(new)
555
552
556 def mergemarkers(self, transaction, data):
553 def mergemarkers(self, transaction, data):
557 """merge a binary stream of markers inside the obsstore
554 """merge a binary stream of markers inside the obsstore
558
555
559 Returns the number of new markers added."""
556 Returns the number of new markers added."""
560 version, markers = _readmarkers(data)
557 version, markers = _readmarkers(data)
561 return self.add(transaction, markers)
558 return self.add(transaction, markers)
562
559
563 @util.nogc
560 @util.nogc
564 def _load(self, markers):
561 def _load(self, markers):
565 for mark in markers:
562 for mark in markers:
566 self._all.append(mark)
563 self._all.append(mark)
567 pre, sucs = mark[:2]
564 pre, sucs = mark[:2]
568 self.successors.setdefault(pre, set()).add(mark)
565 self.successors.setdefault(pre, set()).add(mark)
569 for suc in sucs:
566 for suc in sucs:
570 self.precursors.setdefault(suc, set()).add(mark)
567 self.precursors.setdefault(suc, set()).add(mark)
571 parents = mark[5]
568 parents = mark[5]
572 if parents is not None:
569 if parents is not None:
573 for p in parents:
570 for p in parents:
574 self.children.setdefault(p, set()).add(mark)
571 self.children.setdefault(p, set()).add(mark)
575 if node.nullid in self.precursors:
572 if node.nullid in self.precursors:
576 raise util.Abort(_('bad obsolescence marker detected: '
573 raise util.Abort(_('bad obsolescence marker detected: '
577 'invalid successors nullid'))
574 'invalid successors nullid'))
578 def relevantmarkers(self, nodes):
575 def relevantmarkers(self, nodes):
579 """return a set of all obsolescence markers relevant to a set of nodes.
576 """return a set of all obsolescence markers relevant to a set of nodes.
580
577
581 "relevant" to a set of nodes mean:
578 "relevant" to a set of nodes mean:
582
579
583 - marker that use this changeset as successor
580 - marker that use this changeset as successor
584 - prune marker of direct children on this changeset
581 - prune marker of direct children on this changeset
585 - recursive application of the two rules on precursors of these markers
582 - recursive application of the two rules on precursors of these markers
586
583
587 It is a set so you cannot rely on order."""
584 It is a set so you cannot rely on order."""
588
585
589 pendingnodes = set(nodes)
586 pendingnodes = set(nodes)
590 seenmarkers = set()
587 seenmarkers = set()
591 seennodes = set(pendingnodes)
588 seennodes = set(pendingnodes)
592 precursorsmarkers = self.precursors
589 precursorsmarkers = self.precursors
593 children = self.children
590 children = self.children
594 while pendingnodes:
591 while pendingnodes:
595 direct = set()
592 direct = set()
596 for current in pendingnodes:
593 for current in pendingnodes:
597 direct.update(precursorsmarkers.get(current, ()))
594 direct.update(precursorsmarkers.get(current, ()))
598 pruned = [m for m in children.get(current, ()) if not m[1]]
595 pruned = [m for m in children.get(current, ()) if not m[1]]
599 direct.update(pruned)
596 direct.update(pruned)
600 direct -= seenmarkers
597 direct -= seenmarkers
601 pendingnodes = set([m[0] for m in direct])
598 pendingnodes = set([m[0] for m in direct])
602 seenmarkers |= direct
599 seenmarkers |= direct
603 pendingnodes -= seennodes
600 pendingnodes -= seennodes
604 seennodes |= pendingnodes
601 seennodes |= pendingnodes
605 return seenmarkers
602 return seenmarkers
606
603
607 def commonversion(versions):
604 def commonversion(versions):
608 """Return the newest version listed in both versions and our local formats.
605 """Return the newest version listed in both versions and our local formats.
609
606
610 Returns None if no common version exists.
607 Returns None if no common version exists.
611 """
608 """
612 versions.sort(reverse=True)
609 versions.sort(reverse=True)
613 # search for highest version known on both side
610 # search for highest version known on both side
614 for v in versions:
611 for v in versions:
615 if v in formats:
612 if v in formats:
616 return v
613 return v
617 return None
614 return None
618
615
619 # arbitrary picked to fit into 8K limit from HTTP server
616 # arbitrary picked to fit into 8K limit from HTTP server
620 # you have to take in account:
617 # you have to take in account:
621 # - the version header
618 # - the version header
622 # - the base85 encoding
619 # - the base85 encoding
623 _maxpayload = 5300
620 _maxpayload = 5300
624
621
625 def _pushkeyescape(markers):
622 def _pushkeyescape(markers):
626 """encode markers into a dict suitable for pushkey exchange
623 """encode markers into a dict suitable for pushkey exchange
627
624
628 - binary data is base85 encoded
625 - binary data is base85 encoded
629 - split in chunks smaller than 5300 bytes"""
626 - split in chunks smaller than 5300 bytes"""
630 keys = {}
627 keys = {}
631 parts = []
628 parts = []
632 currentlen = _maxpayload * 2 # ensure we create a new part
629 currentlen = _maxpayload * 2 # ensure we create a new part
633 for marker in markers:
630 for marker in markers:
634 nextdata = _fm0encodeonemarker(marker)
631 nextdata = _fm0encodeonemarker(marker)
635 if (len(nextdata) + currentlen > _maxpayload):
632 if (len(nextdata) + currentlen > _maxpayload):
636 currentpart = []
633 currentpart = []
637 currentlen = 0
634 currentlen = 0
638 parts.append(currentpart)
635 parts.append(currentpart)
639 currentpart.append(nextdata)
636 currentpart.append(nextdata)
640 currentlen += len(nextdata)
637 currentlen += len(nextdata)
641 for idx, part in enumerate(reversed(parts)):
638 for idx, part in enumerate(reversed(parts)):
642 data = ''.join([_pack('>B', _fm0version)] + part)
639 data = ''.join([_pack('>B', _fm0version)] + part)
643 keys['dump%i' % idx] = base85.b85encode(data)
640 keys['dump%i' % idx] = base85.b85encode(data)
644 return keys
641 return keys
645
642
646 def listmarkers(repo):
643 def listmarkers(repo):
647 """List markers over pushkey"""
644 """List markers over pushkey"""
648 if not repo.obsstore:
645 if not repo.obsstore:
649 return {}
646 return {}
650 return _pushkeyescape(repo.obsstore)
647 return _pushkeyescape(repo.obsstore)
651
648
652 def pushmarker(repo, key, old, new):
649 def pushmarker(repo, key, old, new):
653 """Push markers over pushkey"""
650 """Push markers over pushkey"""
654 if not key.startswith('dump'):
651 if not key.startswith('dump'):
655 repo.ui.warn(_('unknown key: %r') % key)
652 repo.ui.warn(_('unknown key: %r') % key)
656 return 0
653 return 0
657 if old:
654 if old:
658 repo.ui.warn(_('unexpected old value for %r') % key)
655 repo.ui.warn(_('unexpected old value for %r') % key)
659 return 0
656 return 0
660 data = base85.b85decode(new)
657 data = base85.b85decode(new)
661 lock = repo.lock()
658 lock = repo.lock()
662 try:
659 try:
663 tr = repo.transaction('pushkey: obsolete markers')
660 tr = repo.transaction('pushkey: obsolete markers')
664 try:
661 try:
665 repo.obsstore.mergemarkers(tr, data)
662 repo.obsstore.mergemarkers(tr, data)
666 tr.close()
663 tr.close()
667 return 1
664 return 1
668 finally:
665 finally:
669 tr.release()
666 tr.release()
670 finally:
667 finally:
671 lock.release()
668 lock.release()
672
669
673 def getmarkers(repo, nodes=None):
670 def getmarkers(repo, nodes=None):
674 """returns markers known in a repository
671 """returns markers known in a repository
675
672
676 If <nodes> is specified, only markers "relevant" to those nodes are are
673 If <nodes> is specified, only markers "relevant" to those nodes are are
677 returned"""
674 returned"""
678 if nodes is None:
675 if nodes is None:
679 rawmarkers = repo.obsstore
676 rawmarkers = repo.obsstore
680 else:
677 else:
681 rawmarkers = repo.obsstore.relevantmarkers(nodes)
678 rawmarkers = repo.obsstore.relevantmarkers(nodes)
682
679
683 for markerdata in rawmarkers:
680 for markerdata in rawmarkers:
684 yield marker(repo, markerdata)
681 yield marker(repo, markerdata)
685
682
686 def relevantmarkers(repo, node):
683 def relevantmarkers(repo, node):
687 """all obsolete markers relevant to some revision"""
684 """all obsolete markers relevant to some revision"""
688 for markerdata in repo.obsstore.relevantmarkers(node):
685 for markerdata in repo.obsstore.relevantmarkers(node):
689 yield marker(repo, markerdata)
686 yield marker(repo, markerdata)
690
687
691
688
692 def precursormarkers(ctx):
689 def precursormarkers(ctx):
693 """obsolete marker marking this changeset as a successors"""
690 """obsolete marker marking this changeset as a successors"""
694 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
691 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
695 yield marker(ctx._repo, data)
692 yield marker(ctx._repo, data)
696
693
697 def successormarkers(ctx):
694 def successormarkers(ctx):
698 """obsolete marker making this changeset obsolete"""
695 """obsolete marker making this changeset obsolete"""
699 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
696 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
700 yield marker(ctx._repo, data)
697 yield marker(ctx._repo, data)
701
698
702 def allsuccessors(obsstore, nodes, ignoreflags=0):
699 def allsuccessors(obsstore, nodes, ignoreflags=0):
703 """Yield node for every successor of <nodes>.
700 """Yield node for every successor of <nodes>.
704
701
705 Some successors may be unknown locally.
702 Some successors may be unknown locally.
706
703
707 This is a linear yield unsuited to detecting split changesets. It includes
704 This is a linear yield unsuited to detecting split changesets. It includes
708 initial nodes too."""
705 initial nodes too."""
709 remaining = set(nodes)
706 remaining = set(nodes)
710 seen = set(remaining)
707 seen = set(remaining)
711 while remaining:
708 while remaining:
712 current = remaining.pop()
709 current = remaining.pop()
713 yield current
710 yield current
714 for mark in obsstore.successors.get(current, ()):
711 for mark in obsstore.successors.get(current, ()):
715 # ignore marker flagged with specified flag
712 # ignore marker flagged with specified flag
716 if mark[2] & ignoreflags:
713 if mark[2] & ignoreflags:
717 continue
714 continue
718 for suc in mark[1]:
715 for suc in mark[1]:
719 if suc not in seen:
716 if suc not in seen:
720 seen.add(suc)
717 seen.add(suc)
721 remaining.add(suc)
718 remaining.add(suc)
722
719
723 def allprecursors(obsstore, nodes, ignoreflags=0):
720 def allprecursors(obsstore, nodes, ignoreflags=0):
724 """Yield node for every precursors of <nodes>.
721 """Yield node for every precursors of <nodes>.
725
722
726 Some precursors may be unknown locally.
723 Some precursors may be unknown locally.
727
724
728 This is a linear yield unsuited to detecting folded changesets. It includes
725 This is a linear yield unsuited to detecting folded changesets. It includes
729 initial nodes too."""
726 initial nodes too."""
730
727
731 remaining = set(nodes)
728 remaining = set(nodes)
732 seen = set(remaining)
729 seen = set(remaining)
733 while remaining:
730 while remaining:
734 current = remaining.pop()
731 current = remaining.pop()
735 yield current
732 yield current
736 for mark in obsstore.precursors.get(current, ()):
733 for mark in obsstore.precursors.get(current, ()):
737 # ignore marker flagged with specified flag
734 # ignore marker flagged with specified flag
738 if mark[2] & ignoreflags:
735 if mark[2] & ignoreflags:
739 continue
736 continue
740 suc = mark[0]
737 suc = mark[0]
741 if suc not in seen:
738 if suc not in seen:
742 seen.add(suc)
739 seen.add(suc)
743 remaining.add(suc)
740 remaining.add(suc)
744
741
745 def foreground(repo, nodes):
742 def foreground(repo, nodes):
746 """return all nodes in the "foreground" of other node
743 """return all nodes in the "foreground" of other node
747
744
748 The foreground of a revision is anything reachable using parent -> children
745 The foreground of a revision is anything reachable using parent -> children
749 or precursor -> successor relation. It is very similar to "descendant" but
746 or precursor -> successor relation. It is very similar to "descendant" but
750 augmented with obsolescence information.
747 augmented with obsolescence information.
751
748
752 Beware that possible obsolescence cycle may result if complex situation.
749 Beware that possible obsolescence cycle may result if complex situation.
753 """
750 """
754 repo = repo.unfiltered()
751 repo = repo.unfiltered()
755 foreground = set(repo.set('%ln::', nodes))
752 foreground = set(repo.set('%ln::', nodes))
756 if repo.obsstore:
753 if repo.obsstore:
757 # We only need this complicated logic if there is obsolescence
754 # We only need this complicated logic if there is obsolescence
758 # XXX will probably deserve an optimised revset.
755 # XXX will probably deserve an optimised revset.
759 nm = repo.changelog.nodemap
756 nm = repo.changelog.nodemap
760 plen = -1
757 plen = -1
761 # compute the whole set of successors or descendants
758 # compute the whole set of successors or descendants
762 while len(foreground) != plen:
759 while len(foreground) != plen:
763 plen = len(foreground)
760 plen = len(foreground)
764 succs = set(c.node() for c in foreground)
761 succs = set(c.node() for c in foreground)
765 mutable = [c.node() for c in foreground if c.mutable()]
762 mutable = [c.node() for c in foreground if c.mutable()]
766 succs.update(allsuccessors(repo.obsstore, mutable))
763 succs.update(allsuccessors(repo.obsstore, mutable))
767 known = (n for n in succs if n in nm)
764 known = (n for n in succs if n in nm)
768 foreground = set(repo.set('%ln::', known))
765 foreground = set(repo.set('%ln::', known))
769 return set(c.node() for c in foreground)
766 return set(c.node() for c in foreground)
770
767
771
768
772 def successorssets(repo, initialnode, cache=None):
769 def successorssets(repo, initialnode, cache=None):
773 """Return all set of successors of initial nodes
770 """Return all set of successors of initial nodes
774
771
775 The successors set of a changeset A are a group of revisions that succeed
772 The successors set of a changeset A are a group of revisions that succeed
776 A. It succeeds A as a consistent whole, each revision being only a partial
773 A. It succeeds A as a consistent whole, each revision being only a partial
777 replacement. The successors set contains non-obsolete changesets only.
774 replacement. The successors set contains non-obsolete changesets only.
778
775
779 This function returns the full list of successor sets which is why it
776 This function returns the full list of successor sets which is why it
780 returns a list of tuples and not just a single tuple. Each tuple is a valid
777 returns a list of tuples and not just a single tuple. Each tuple is a valid
781 successors set. Not that (A,) may be a valid successors set for changeset A
778 successors set. Not that (A,) may be a valid successors set for changeset A
782 (see below).
779 (see below).
783
780
784 In most cases, a changeset A will have a single element (e.g. the changeset
781 In most cases, a changeset A will have a single element (e.g. the changeset
785 A is replaced by A') in its successors set. Though, it is also common for a
782 A is replaced by A') in its successors set. Though, it is also common for a
786 changeset A to have no elements in its successor set (e.g. the changeset
783 changeset A to have no elements in its successor set (e.g. the changeset
787 has been pruned). Therefore, the returned list of successors sets will be
784 has been pruned). Therefore, the returned list of successors sets will be
788 [(A',)] or [], respectively.
785 [(A',)] or [], respectively.
789
786
790 When a changeset A is split into A' and B', however, it will result in a
787 When a changeset A is split into A' and B', however, it will result in a
791 successors set containing more than a single element, i.e. [(A',B')].
788 successors set containing more than a single element, i.e. [(A',B')].
792 Divergent changesets will result in multiple successors sets, i.e. [(A',),
789 Divergent changesets will result in multiple successors sets, i.e. [(A',),
793 (A'')].
790 (A'')].
794
791
795 If a changeset A is not obsolete, then it will conceptually have no
792 If a changeset A is not obsolete, then it will conceptually have no
796 successors set. To distinguish this from a pruned changeset, the successor
793 successors set. To distinguish this from a pruned changeset, the successor
797 set will only contain itself, i.e. [(A,)].
794 set will only contain itself, i.e. [(A,)].
798
795
799 Finally, successors unknown locally are considered to be pruned (obsoleted
796 Finally, successors unknown locally are considered to be pruned (obsoleted
800 without any successors).
797 without any successors).
801
798
802 The optional `cache` parameter is a dictionary that may contain precomputed
799 The optional `cache` parameter is a dictionary that may contain precomputed
803 successors sets. It is meant to reuse the computation of a previous call to
800 successors sets. It is meant to reuse the computation of a previous call to
804 `successorssets` when multiple calls are made at the same time. The cache
801 `successorssets` when multiple calls are made at the same time. The cache
805 dictionary is updated in place. The caller is responsible for its live
802 dictionary is updated in place. The caller is responsible for its live
806 spawn. Code that makes multiple calls to `successorssets` *must* use this
803 spawn. Code that makes multiple calls to `successorssets` *must* use this
807 cache mechanism or suffer terrible performances.
804 cache mechanism or suffer terrible performances.
808
805
809 """
806 """
810
807
811 succmarkers = repo.obsstore.successors
808 succmarkers = repo.obsstore.successors
812
809
813 # Stack of nodes we search successors sets for
810 # Stack of nodes we search successors sets for
814 toproceed = [initialnode]
811 toproceed = [initialnode]
815 # set version of above list for fast loop detection
812 # set version of above list for fast loop detection
816 # element added to "toproceed" must be added here
813 # element added to "toproceed" must be added here
817 stackedset = set(toproceed)
814 stackedset = set(toproceed)
818 if cache is None:
815 if cache is None:
819 cache = {}
816 cache = {}
820
817
821 # This while loop is the flattened version of a recursive search for
818 # This while loop is the flattened version of a recursive search for
822 # successors sets
819 # successors sets
823 #
820 #
824 # def successorssets(x):
821 # def successorssets(x):
825 # successors = directsuccessors(x)
822 # successors = directsuccessors(x)
826 # ss = [[]]
823 # ss = [[]]
827 # for succ in directsuccessors(x):
824 # for succ in directsuccessors(x):
828 # # product as in itertools cartesian product
825 # # product as in itertools cartesian product
829 # ss = product(ss, successorssets(succ))
826 # ss = product(ss, successorssets(succ))
830 # return ss
827 # return ss
831 #
828 #
832 # But we can not use plain recursive calls here:
829 # But we can not use plain recursive calls here:
833 # - that would blow the python call stack
830 # - that would blow the python call stack
834 # - obsolescence markers may have cycles, we need to handle them.
831 # - obsolescence markers may have cycles, we need to handle them.
835 #
832 #
836 # The `toproceed` list act as our call stack. Every node we search
833 # The `toproceed` list act as our call stack. Every node we search
837 # successors set for are stacked there.
834 # successors set for are stacked there.
838 #
835 #
839 # The `stackedset` is set version of this stack used to check if a node is
836 # The `stackedset` is set version of this stack used to check if a node is
840 # already stacked. This check is used to detect cycles and prevent infinite
837 # already stacked. This check is used to detect cycles and prevent infinite
841 # loop.
838 # loop.
842 #
839 #
843 # successors set of all nodes are stored in the `cache` dictionary.
840 # successors set of all nodes are stored in the `cache` dictionary.
844 #
841 #
845 # After this while loop ends we use the cache to return the successors sets
842 # After this while loop ends we use the cache to return the successors sets
846 # for the node requested by the caller.
843 # for the node requested by the caller.
847 while toproceed:
844 while toproceed:
848 # Every iteration tries to compute the successors sets of the topmost
845 # Every iteration tries to compute the successors sets of the topmost
849 # node of the stack: CURRENT.
846 # node of the stack: CURRENT.
850 #
847 #
851 # There are four possible outcomes:
848 # There are four possible outcomes:
852 #
849 #
853 # 1) We already know the successors sets of CURRENT:
850 # 1) We already know the successors sets of CURRENT:
854 # -> mission accomplished, pop it from the stack.
851 # -> mission accomplished, pop it from the stack.
855 # 2) Node is not obsolete:
852 # 2) Node is not obsolete:
856 # -> the node is its own successors sets. Add it to the cache.
853 # -> the node is its own successors sets. Add it to the cache.
857 # 3) We do not know successors set of direct successors of CURRENT:
854 # 3) We do not know successors set of direct successors of CURRENT:
858 # -> We add those successors to the stack.
855 # -> We add those successors to the stack.
859 # 4) We know successors sets of all direct successors of CURRENT:
856 # 4) We know successors sets of all direct successors of CURRENT:
860 # -> We can compute CURRENT successors set and add it to the
857 # -> We can compute CURRENT successors set and add it to the
861 # cache.
858 # cache.
862 #
859 #
863 current = toproceed[-1]
860 current = toproceed[-1]
864 if current in cache:
861 if current in cache:
865 # case (1): We already know the successors sets
862 # case (1): We already know the successors sets
866 stackedset.remove(toproceed.pop())
863 stackedset.remove(toproceed.pop())
867 elif current not in succmarkers:
864 elif current not in succmarkers:
868 # case (2): The node is not obsolete.
865 # case (2): The node is not obsolete.
869 if current in repo:
866 if current in repo:
870 # We have a valid last successors.
867 # We have a valid last successors.
871 cache[current] = [(current,)]
868 cache[current] = [(current,)]
872 else:
869 else:
873 # Final obsolete version is unknown locally.
870 # Final obsolete version is unknown locally.
874 # Do not count that as a valid successors
871 # Do not count that as a valid successors
875 cache[current] = []
872 cache[current] = []
876 else:
873 else:
877 # cases (3) and (4)
874 # cases (3) and (4)
878 #
875 #
879 # We proceed in two phases. Phase 1 aims to distinguish case (3)
876 # We proceed in two phases. Phase 1 aims to distinguish case (3)
880 # from case (4):
877 # from case (4):
881 #
878 #
882 # For each direct successors of CURRENT, we check whether its
879 # For each direct successors of CURRENT, we check whether its
883 # successors sets are known. If they are not, we stack the
880 # successors sets are known. If they are not, we stack the
884 # unknown node and proceed to the next iteration of the while
881 # unknown node and proceed to the next iteration of the while
885 # loop. (case 3)
882 # loop. (case 3)
886 #
883 #
887 # During this step, we may detect obsolescence cycles: a node
884 # During this step, we may detect obsolescence cycles: a node
888 # with unknown successors sets but already in the call stack.
885 # with unknown successors sets but already in the call stack.
889 # In such a situation, we arbitrary set the successors sets of
886 # In such a situation, we arbitrary set the successors sets of
890 # the node to nothing (node pruned) to break the cycle.
887 # the node to nothing (node pruned) to break the cycle.
891 #
888 #
892 # If no break was encountered we proceed to phase 2.
889 # If no break was encountered we proceed to phase 2.
893 #
890 #
894 # Phase 2 computes successors sets of CURRENT (case 4); see details
891 # Phase 2 computes successors sets of CURRENT (case 4); see details
895 # in phase 2 itself.
892 # in phase 2 itself.
896 #
893 #
897 # Note the two levels of iteration in each phase.
894 # Note the two levels of iteration in each phase.
898 # - The first one handles obsolescence markers using CURRENT as
895 # - The first one handles obsolescence markers using CURRENT as
899 # precursor (successors markers of CURRENT).
896 # precursor (successors markers of CURRENT).
900 #
897 #
901 # Having multiple entry here means divergence.
898 # Having multiple entry here means divergence.
902 #
899 #
903 # - The second one handles successors defined in each marker.
900 # - The second one handles successors defined in each marker.
904 #
901 #
905 # Having none means pruned node, multiple successors means split,
902 # Having none means pruned node, multiple successors means split,
906 # single successors are standard replacement.
903 # single successors are standard replacement.
907 #
904 #
908 for mark in sorted(succmarkers[current]):
905 for mark in sorted(succmarkers[current]):
909 for suc in mark[1]:
906 for suc in mark[1]:
910 if suc not in cache:
907 if suc not in cache:
911 if suc in stackedset:
908 if suc in stackedset:
912 # cycle breaking
909 # cycle breaking
913 cache[suc] = []
910 cache[suc] = []
914 else:
911 else:
915 # case (3) If we have not computed successors sets
912 # case (3) If we have not computed successors sets
916 # of one of those successors we add it to the
913 # of one of those successors we add it to the
917 # `toproceed` stack and stop all work for this
914 # `toproceed` stack and stop all work for this
918 # iteration.
915 # iteration.
919 toproceed.append(suc)
916 toproceed.append(suc)
920 stackedset.add(suc)
917 stackedset.add(suc)
921 break
918 break
922 else:
919 else:
923 continue
920 continue
924 break
921 break
925 else:
922 else:
926 # case (4): we know all successors sets of all direct
923 # case (4): we know all successors sets of all direct
927 # successors
924 # successors
928 #
925 #
929 # Successors set contributed by each marker depends on the
926 # Successors set contributed by each marker depends on the
930 # successors sets of all its "successors" node.
927 # successors sets of all its "successors" node.
931 #
928 #
932 # Each different marker is a divergence in the obsolescence
929 # Each different marker is a divergence in the obsolescence
933 # history. It contributes successors sets distinct from other
930 # history. It contributes successors sets distinct from other
934 # markers.
931 # markers.
935 #
932 #
936 # Within a marker, a successor may have divergent successors
933 # Within a marker, a successor may have divergent successors
937 # sets. In such a case, the marker will contribute multiple
934 # sets. In such a case, the marker will contribute multiple
938 # divergent successors sets. If multiple successors have
935 # divergent successors sets. If multiple successors have
939 # divergent successors sets, a Cartesian product is used.
936 # divergent successors sets, a Cartesian product is used.
940 #
937 #
941 # At the end we post-process successors sets to remove
938 # At the end we post-process successors sets to remove
942 # duplicated entry and successors set that are strict subset of
939 # duplicated entry and successors set that are strict subset of
943 # another one.
940 # another one.
944 succssets = []
941 succssets = []
945 for mark in sorted(succmarkers[current]):
942 for mark in sorted(succmarkers[current]):
946 # successors sets contributed by this marker
943 # successors sets contributed by this marker
947 markss = [[]]
944 markss = [[]]
948 for suc in mark[1]:
945 for suc in mark[1]:
949 # cardinal product with previous successors
946 # cardinal product with previous successors
950 productresult = []
947 productresult = []
951 for prefix in markss:
948 for prefix in markss:
952 for suffix in cache[suc]:
949 for suffix in cache[suc]:
953 newss = list(prefix)
950 newss = list(prefix)
954 for part in suffix:
951 for part in suffix:
955 # do not duplicated entry in successors set
952 # do not duplicated entry in successors set
956 # first entry wins.
953 # first entry wins.
957 if part not in newss:
954 if part not in newss:
958 newss.append(part)
955 newss.append(part)
959 productresult.append(newss)
956 productresult.append(newss)
960 markss = productresult
957 markss = productresult
961 succssets.extend(markss)
958 succssets.extend(markss)
962 # remove duplicated and subset
959 # remove duplicated and subset
963 seen = []
960 seen = []
964 final = []
961 final = []
965 candidate = sorted(((set(s), s) for s in succssets if s),
962 candidate = sorted(((set(s), s) for s in succssets if s),
966 key=lambda x: len(x[1]), reverse=True)
963 key=lambda x: len(x[1]), reverse=True)
967 for setversion, listversion in candidate:
964 for setversion, listversion in candidate:
968 for seenset in seen:
965 for seenset in seen:
969 if setversion.issubset(seenset):
966 if setversion.issubset(seenset):
970 break
967 break
971 else:
968 else:
972 final.append(listversion)
969 final.append(listversion)
973 seen.append(setversion)
970 seen.append(setversion)
974 final.reverse() # put small successors set first
971 final.reverse() # put small successors set first
975 cache[current] = final
972 cache[current] = final
976 return cache[initialnode]
973 return cache[initialnode]
977
974
978 def _knownrevs(repo, nodes):
975 def _knownrevs(repo, nodes):
979 """yield revision numbers of known nodes passed in parameters
976 """yield revision numbers of known nodes passed in parameters
980
977
981 Unknown revisions are silently ignored."""
978 Unknown revisions are silently ignored."""
982 torev = repo.changelog.nodemap.get
979 torev = repo.changelog.nodemap.get
983 for n in nodes:
980 for n in nodes:
984 rev = torev(n)
981 rev = torev(n)
985 if rev is not None:
982 if rev is not None:
986 yield rev
983 yield rev
987
984
988 # mapping of 'set-name' -> <function to compute this set>
985 # mapping of 'set-name' -> <function to compute this set>
989 cachefuncs = {}
986 cachefuncs = {}
990 def cachefor(name):
987 def cachefor(name):
991 """Decorator to register a function as computing the cache for a set"""
988 """Decorator to register a function as computing the cache for a set"""
992 def decorator(func):
989 def decorator(func):
993 assert name not in cachefuncs
990 assert name not in cachefuncs
994 cachefuncs[name] = func
991 cachefuncs[name] = func
995 return func
992 return func
996 return decorator
993 return decorator
997
994
998 def getrevs(repo, name):
995 def getrevs(repo, name):
999 """Return the set of revision that belong to the <name> set
996 """Return the set of revision that belong to the <name> set
1000
997
1001 Such access may compute the set and cache it for future use"""
998 Such access may compute the set and cache it for future use"""
1002 repo = repo.unfiltered()
999 repo = repo.unfiltered()
1003 if not repo.obsstore:
1000 if not repo.obsstore:
1004 return frozenset()
1001 return frozenset()
1005 if name not in repo.obsstore.caches:
1002 if name not in repo.obsstore.caches:
1006 repo.obsstore.caches[name] = cachefuncs[name](repo)
1003 repo.obsstore.caches[name] = cachefuncs[name](repo)
1007 return repo.obsstore.caches[name]
1004 return repo.obsstore.caches[name]
1008
1005
1009 # To be simple we need to invalidate obsolescence cache when:
1006 # To be simple we need to invalidate obsolescence cache when:
1010 #
1007 #
1011 # - new changeset is added:
1008 # - new changeset is added:
1012 # - public phase is changed
1009 # - public phase is changed
1013 # - obsolescence marker are added
1010 # - obsolescence marker are added
1014 # - strip is used a repo
1011 # - strip is used a repo
1015 def clearobscaches(repo):
1012 def clearobscaches(repo):
1016 """Remove all obsolescence related cache from a repo
1013 """Remove all obsolescence related cache from a repo
1017
1014
1018 This remove all cache in obsstore is the obsstore already exist on the
1015 This remove all cache in obsstore is the obsstore already exist on the
1019 repo.
1016 repo.
1020
1017
1021 (We could be smarter here given the exact event that trigger the cache
1018 (We could be smarter here given the exact event that trigger the cache
1022 clearing)"""
1019 clearing)"""
1023 # only clear cache is there is obsstore data in this repo
1020 # only clear cache is there is obsstore data in this repo
1024 if 'obsstore' in repo._filecache:
1021 if 'obsstore' in repo._filecache:
1025 repo.obsstore.caches.clear()
1022 repo.obsstore.caches.clear()
1026
1023
1027 @cachefor('obsolete')
1024 @cachefor('obsolete')
1028 def _computeobsoleteset(repo):
1025 def _computeobsoleteset(repo):
1029 """the set of obsolete revisions"""
1026 """the set of obsolete revisions"""
1030 obs = set()
1027 obs = set()
1031 getrev = repo.changelog.nodemap.get
1028 getrev = repo.changelog.nodemap.get
1032 getphase = repo._phasecache.phase
1029 getphase = repo._phasecache.phase
1033 for n in repo.obsstore.successors:
1030 for n in repo.obsstore.successors:
1034 rev = getrev(n)
1031 rev = getrev(n)
1035 if rev is not None and getphase(repo, rev):
1032 if rev is not None and getphase(repo, rev):
1036 obs.add(rev)
1033 obs.add(rev)
1037 return obs
1034 return obs
1038
1035
1039 @cachefor('unstable')
1036 @cachefor('unstable')
1040 def _computeunstableset(repo):
1037 def _computeunstableset(repo):
1041 """the set of non obsolete revisions with obsolete parents"""
1038 """the set of non obsolete revisions with obsolete parents"""
1042 # revset is not efficient enough here
1039 # revset is not efficient enough here
1043 # we do (obsolete()::) - obsolete() by hand
1040 # we do (obsolete()::) - obsolete() by hand
1044 obs = getrevs(repo, 'obsolete')
1041 obs = getrevs(repo, 'obsolete')
1045 if not obs:
1042 if not obs:
1046 return set()
1043 return set()
1047 cl = repo.changelog
1044 cl = repo.changelog
1048 return set(r for r in cl.descendants(obs) if r not in obs)
1045 return set(r for r in cl.descendants(obs) if r not in obs)
1049
1046
1050 @cachefor('suspended')
1047 @cachefor('suspended')
1051 def _computesuspendedset(repo):
1048 def _computesuspendedset(repo):
1052 """the set of obsolete parents with non obsolete descendants"""
1049 """the set of obsolete parents with non obsolete descendants"""
1053 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1050 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1054 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1051 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1055
1052
1056 @cachefor('extinct')
1053 @cachefor('extinct')
1057 def _computeextinctset(repo):
1054 def _computeextinctset(repo):
1058 """the set of obsolete parents without non obsolete descendants"""
1055 """the set of obsolete parents without non obsolete descendants"""
1059 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1056 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1060
1057
1061
1058
1062 @cachefor('bumped')
1059 @cachefor('bumped')
1063 def _computebumpedset(repo):
1060 def _computebumpedset(repo):
1064 """the set of revs trying to obsolete public revisions"""
1061 """the set of revs trying to obsolete public revisions"""
1065 bumped = set()
1062 bumped = set()
1066 # util function (avoid attribute lookup in the loop)
1063 # util function (avoid attribute lookup in the loop)
1067 phase = repo._phasecache.phase # would be faster to grab the full list
1064 phase = repo._phasecache.phase # would be faster to grab the full list
1068 public = phases.public
1065 public = phases.public
1069 cl = repo.changelog
1066 cl = repo.changelog
1070 torev = cl.nodemap.get
1067 torev = cl.nodemap.get
1071 obs = getrevs(repo, 'obsolete')
1068 obs = getrevs(repo, 'obsolete')
1072 for rev in repo:
1069 for rev in repo:
1073 # We only evaluate mutable, non-obsolete revision
1070 # We only evaluate mutable, non-obsolete revision
1074 if (public < phase(repo, rev)) and (rev not in obs):
1071 if (public < phase(repo, rev)) and (rev not in obs):
1075 node = cl.node(rev)
1072 node = cl.node(rev)
1076 # (future) A cache of precursors may worth if split is very common
1073 # (future) A cache of precursors may worth if split is very common
1077 for pnode in allprecursors(repo.obsstore, [node],
1074 for pnode in allprecursors(repo.obsstore, [node],
1078 ignoreflags=bumpedfix):
1075 ignoreflags=bumpedfix):
1079 prev = torev(pnode) # unfiltered! but so is phasecache
1076 prev = torev(pnode) # unfiltered! but so is phasecache
1080 if (prev is not None) and (phase(repo, prev) <= public):
1077 if (prev is not None) and (phase(repo, prev) <= public):
1081 # we have a public precursors
1078 # we have a public precursors
1082 bumped.add(rev)
1079 bumped.add(rev)
1083 break # Next draft!
1080 break # Next draft!
1084 return bumped
1081 return bumped
1085
1082
1086 @cachefor('divergent')
1083 @cachefor('divergent')
1087 def _computedivergentset(repo):
1084 def _computedivergentset(repo):
1088 """the set of rev that compete to be the final successors of some revision.
1085 """the set of rev that compete to be the final successors of some revision.
1089 """
1086 """
1090 divergent = set()
1087 divergent = set()
1091 obsstore = repo.obsstore
1088 obsstore = repo.obsstore
1092 newermap = {}
1089 newermap = {}
1093 for ctx in repo.set('(not public()) - obsolete()'):
1090 for ctx in repo.set('(not public()) - obsolete()'):
1094 mark = obsstore.precursors.get(ctx.node(), ())
1091 mark = obsstore.precursors.get(ctx.node(), ())
1095 toprocess = set(mark)
1092 toprocess = set(mark)
1096 while toprocess:
1093 while toprocess:
1097 prec = toprocess.pop()[0]
1094 prec = toprocess.pop()[0]
1098 if prec not in newermap:
1095 if prec not in newermap:
1099 successorssets(repo, prec, newermap)
1096 successorssets(repo, prec, newermap)
1100 newer = [n for n in newermap[prec] if n]
1097 newer = [n for n in newermap[prec] if n]
1101 if len(newer) > 1:
1098 if len(newer) > 1:
1102 divergent.add(ctx.rev())
1099 divergent.add(ctx.rev())
1103 break
1100 break
1104 toprocess.update(obsstore.precursors.get(prec, ()))
1101 toprocess.update(obsstore.precursors.get(prec, ()))
1105 return divergent
1102 return divergent
1106
1103
1107
1104
1108 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1105 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1109 """Add obsolete markers between changesets in a repo
1106 """Add obsolete markers between changesets in a repo
1110
1107
1111 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1108 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1112 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1109 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1113 containing metadata for this marker only. It is merged with the global
1110 containing metadata for this marker only. It is merged with the global
1114 metadata specified through the `metadata` argument of this function,
1111 metadata specified through the `metadata` argument of this function,
1115
1112
1116 Trying to obsolete a public changeset will raise an exception.
1113 Trying to obsolete a public changeset will raise an exception.
1117
1114
1118 Current user and date are used except if specified otherwise in the
1115 Current user and date are used except if specified otherwise in the
1119 metadata attribute.
1116 metadata attribute.
1120
1117
1121 This function operates within a transaction of its own, but does
1118 This function operates within a transaction of its own, but does
1122 not take any lock on the repo.
1119 not take any lock on the repo.
1123 """
1120 """
1124 # prepare metadata
1121 # prepare metadata
1125 if metadata is None:
1122 if metadata is None:
1126 metadata = {}
1123 metadata = {}
1127 if 'user' not in metadata:
1124 if 'user' not in metadata:
1128 metadata['user'] = repo.ui.username()
1125 metadata['user'] = repo.ui.username()
1129 tr = repo.transaction('add-obsolescence-marker')
1126 tr = repo.transaction('add-obsolescence-marker')
1130 try:
1127 try:
1131 for rel in relations:
1128 for rel in relations:
1132 prec = rel[0]
1129 prec = rel[0]
1133 sucs = rel[1]
1130 sucs = rel[1]
1134 localmetadata = metadata.copy()
1131 localmetadata = metadata.copy()
1135 if 2 < len(rel):
1132 if 2 < len(rel):
1136 localmetadata.update(rel[2])
1133 localmetadata.update(rel[2])
1137
1134
1138 if not prec.mutable():
1135 if not prec.mutable():
1139 raise util.Abort("cannot obsolete immutable changeset: %s"
1136 raise util.Abort("cannot obsolete immutable changeset: %s"
1140 % prec)
1137 % prec)
1141 nprec = prec.node()
1138 nprec = prec.node()
1142 nsucs = tuple(s.node() for s in sucs)
1139 nsucs = tuple(s.node() for s in sucs)
1143 npare = None
1140 npare = None
1144 if not nsucs:
1141 if not nsucs:
1145 npare = tuple(p.node() for p in prec.parents())
1142 npare = tuple(p.node() for p in prec.parents())
1146 if nprec in nsucs:
1143 if nprec in nsucs:
1147 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1144 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1148 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1145 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1149 date=date, metadata=localmetadata)
1146 date=date, metadata=localmetadata)
1150 repo.filteredrevcache.clear()
1147 repo.filteredrevcache.clear()
1151 tr.close()
1148 tr.close()
1152 finally:
1149 finally:
1153 tr.release()
1150 tr.release()
1154
1151
1155 def isenabled(repo, option):
1152 def isenabled(repo, option):
1156 """Returns True if the given repository has the given obsolete option
1153 """Returns True if the given repository has the given obsolete option
1157 enabled.
1154 enabled.
1158 """
1155 """
1159 result = set(repo.ui.configlist('experimental', 'evolution'))
1156 result = set(repo.ui.configlist('experimental', 'evolution'))
1160 if 'all' in result:
1157 if 'all' in result:
1161 return True
1158 return True
1162
1159
1163 # For migration purposes, temporarily return true if the config hasn't been
1160 # For migration purposes, temporarily return true if the config hasn't been
1164 # set but _enabled is true.
1161 # set but _enabled is true.
1165 if len(result) == 0 and _enabled:
1162 if len(result) == 0 and _enabled:
1166 return True
1163 return True
1167
1164
1168 # createmarkers must be enabled if other options are enabled
1165 # createmarkers must be enabled if other options are enabled
1169 if ((allowunstableopt in result or exchangeopt in result) and
1166 if ((allowunstableopt in result or exchangeopt in result) and
1170 not createmarkersopt in result):
1167 not createmarkersopt in result):
1171 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1168 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1172 "if other obsolete options are enabled"))
1169 "if other obsolete options are enabled"))
1173
1170
1174 return option in result
1171 return option in result
General Comments 0
You need to be logged in to leave comments. Login now