##// END OF EJS Templates
readmarkers: drop a conditional
Matt Mackall -
r23793:201e1516 default
parent child Browse files
Show More
@@ -1,1175 +1,1173
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 import struct
70 import struct
71 import util, base85, node
71 import util, base85, node
72 import phases
72 import phases
73 from i18n import _
73 from i18n import _
74
74
75 _pack = struct.pack
75 _pack = struct.pack
76 _unpack = struct.unpack
76 _unpack = struct.unpack
77 _calcsize = struct.calcsize
77 _calcsize = struct.calcsize
78
78
79 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
79 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
80
80
81 # the obsolete feature is not mature enough to be enabled by default.
81 # the obsolete feature is not mature enough to be enabled by default.
82 # you have to rely on third party extension extension to enable this.
82 # you have to rely on third party extension extension to enable this.
83 _enabled = False
83 _enabled = False
84
84
85 # Options for obsolescence
85 # Options for obsolescence
86 createmarkersopt = 'createmarkers'
86 createmarkersopt = 'createmarkers'
87 allowunstableopt = 'allowunstable'
87 allowunstableopt = 'allowunstable'
88 exchangeopt = 'exchange'
88 exchangeopt = 'exchange'
89
89
90 ### obsolescence marker flag
90 ### obsolescence marker flag
91
91
92 ## bumpedfix flag
92 ## bumpedfix flag
93 #
93 #
94 # When a changeset A' succeed to a changeset A which became public, we call A'
94 # When a changeset A' succeed to a changeset A which became public, we call A'
95 # "bumped" because it's a successors of a public changesets
95 # "bumped" because it's a successors of a public changesets
96 #
96 #
97 # o A' (bumped)
97 # o A' (bumped)
98 # |`:
98 # |`:
99 # | o A
99 # | o A
100 # |/
100 # |/
101 # o Z
101 # o Z
102 #
102 #
103 # The way to solve this situation is to create a new changeset Ad as children
103 # The way to solve this situation is to create a new changeset Ad as children
104 # of A. This changeset have the same content than A'. So the diff from A to A'
104 # of A. This changeset have the same content than A'. So the diff from A to A'
105 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
106 #
106 #
107 # o Ad
107 # o Ad
108 # |`:
108 # |`:
109 # | x A'
109 # | x A'
110 # |'|
110 # |'|
111 # o | A
111 # o | A
112 # |/
112 # |/
113 # o Z
113 # o Z
114 #
114 #
115 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
116 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
117 # This flag mean that the successors express the changes between the public and
117 # This flag mean that the successors express the changes between the public and
118 # bumped version and fix the situation, breaking the transitivity of
118 # bumped version and fix the situation, breaking the transitivity of
119 # "bumped" here.
119 # "bumped" here.
120 bumpedfix = 1
120 bumpedfix = 1
121 usingsha256 = 2
121 usingsha256 = 2
122
122
123 ## Parsing and writing of version "0"
123 ## Parsing and writing of version "0"
124 #
124 #
125 # The header is followed by the markers. Each marker is made of:
125 # The header is followed by the markers. Each marker is made of:
126 #
126 #
127 # - 1 uint8 : number of new changesets "N", can be zero.
127 # - 1 uint8 : number of new changesets "N", can be zero.
128 #
128 #
129 # - 1 uint32: metadata size "M" in bytes.
129 # - 1 uint32: metadata size "M" in bytes.
130 #
130 #
131 # - 1 byte: a bit field. It is reserved for flags used in common
131 # - 1 byte: a bit field. It is reserved for flags used in common
132 # obsolete marker operations, to avoid repeated decoding of metadata
132 # obsolete marker operations, to avoid repeated decoding of metadata
133 # entries.
133 # entries.
134 #
134 #
135 # - 20 bytes: obsoleted changeset identifier.
135 # - 20 bytes: obsoleted changeset identifier.
136 #
136 #
137 # - N*20 bytes: new changesets identifiers.
137 # - N*20 bytes: new changesets identifiers.
138 #
138 #
139 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 # - M bytes: metadata as a sequence of nul-terminated strings. Each
140 # string contains a key and a value, separated by a colon ':', without
140 # string contains a key and a value, separated by a colon ':', without
141 # additional encoding. Keys cannot contain '\0' or ':' and values
141 # additional encoding. Keys cannot contain '\0' or ':' and values
142 # cannot contain '\0'.
142 # cannot contain '\0'.
143 _fm0version = 0
143 _fm0version = 0
144 _fm0fixed = '>BIB20s'
144 _fm0fixed = '>BIB20s'
145 _fm0node = '20s'
145 _fm0node = '20s'
146 _fm0fsize = _calcsize(_fm0fixed)
146 _fm0fsize = _calcsize(_fm0fixed)
147 _fm0fnodesize = _calcsize(_fm0node)
147 _fm0fnodesize = _calcsize(_fm0node)
148
148
149 def _fm0readmarkers(data, off=0):
149 def _fm0readmarkers(data, off=0):
150 # Loop on markers
150 # Loop on markers
151 l = len(data)
151 l = len(data)
152 while off + _fm0fsize <= l:
152 while off + _fm0fsize <= l:
153 # read fixed part
153 # read fixed part
154 cur = data[off:off + _fm0fsize]
154 cur = data[off:off + _fm0fsize]
155 off += _fm0fsize
155 off += _fm0fsize
156 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
157 # read replacement
157 # read replacement
158 sucs = ()
158 sucs = ()
159 if numsuc:
159 if numsuc:
160 s = (_fm0fnodesize * numsuc)
160 s = (_fm0fnodesize * numsuc)
161 cur = data[off:off + s]
161 cur = data[off:off + s]
162 sucs = _unpack(_fm0node * numsuc, cur)
162 sucs = _unpack(_fm0node * numsuc, cur)
163 off += s
163 off += s
164 # read metadata
164 # read metadata
165 # (metadata will be decoded on demand)
165 # (metadata will be decoded on demand)
166 metadata = data[off:off + mdsize]
166 metadata = data[off:off + mdsize]
167 if len(metadata) != mdsize:
167 if len(metadata) != mdsize:
168 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 raise util.Abort(_('parsing obsolete marker: metadata is too '
169 'short, %d bytes expected, got %d')
169 'short, %d bytes expected, got %d')
170 % (mdsize, len(metadata)))
170 % (mdsize, len(metadata)))
171 off += mdsize
171 off += mdsize
172 metadata = _fm0decodemeta(metadata)
172 metadata = _fm0decodemeta(metadata)
173 try:
173 try:
174 when, offset = metadata.pop('date', '0 0').split(' ')
174 when, offset = metadata.pop('date', '0 0').split(' ')
175 date = float(when), int(offset)
175 date = float(when), int(offset)
176 except ValueError:
176 except ValueError:
177 date = (0., 0)
177 date = (0., 0)
178 parents = None
178 parents = None
179 if 'p2' in metadata:
179 if 'p2' in metadata:
180 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
181 elif 'p1' in metadata:
181 elif 'p1' in metadata:
182 parents = (metadata.pop('p1', None),)
182 parents = (metadata.pop('p1', None),)
183 elif 'p0' in metadata:
183 elif 'p0' in metadata:
184 parents = ()
184 parents = ()
185 if parents is not None:
185 if parents is not None:
186 try:
186 try:
187 parents = tuple(node.bin(p) for p in parents)
187 parents = tuple(node.bin(p) for p in parents)
188 # if parent content is not a nodeid, drop the data
188 # if parent content is not a nodeid, drop the data
189 for p in parents:
189 for p in parents:
190 if len(p) != 20:
190 if len(p) != 20:
191 parents = None
191 parents = None
192 break
192 break
193 except TypeError:
193 except TypeError:
194 # if content cannot be translated to nodeid drop the data.
194 # if content cannot be translated to nodeid drop the data.
195 parents = None
195 parents = None
196
196
197 metadata = tuple(sorted(metadata.iteritems()))
197 metadata = tuple(sorted(metadata.iteritems()))
198
198
199 yield (pre, sucs, flags, metadata, date, parents)
199 yield (pre, sucs, flags, metadata, date, parents)
200
200
201 def _fm0encodeonemarker(marker):
201 def _fm0encodeonemarker(marker):
202 pre, sucs, flags, metadata, date, parents = marker
202 pre, sucs, flags, metadata, date, parents = marker
203 if flags & usingsha256:
203 if flags & usingsha256:
204 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
204 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
205 metadata = dict(metadata)
205 metadata = dict(metadata)
206 time, tz = date
206 time, tz = date
207 metadata['date'] = '%r %i' % (time, tz)
207 metadata['date'] = '%r %i' % (time, tz)
208 if parents is not None:
208 if parents is not None:
209 if not parents:
209 if not parents:
210 # mark that we explicitly recorded no parents
210 # mark that we explicitly recorded no parents
211 metadata['p0'] = ''
211 metadata['p0'] = ''
212 for i, p in enumerate(parents):
212 for i, p in enumerate(parents):
213 metadata['p%i' % (i + 1)] = node.hex(p)
213 metadata['p%i' % (i + 1)] = node.hex(p)
214 metadata = _fm0encodemeta(metadata)
214 metadata = _fm0encodemeta(metadata)
215 numsuc = len(sucs)
215 numsuc = len(sucs)
216 format = _fm0fixed + (_fm0node * numsuc)
216 format = _fm0fixed + (_fm0node * numsuc)
217 data = [numsuc, len(metadata), flags, pre]
217 data = [numsuc, len(metadata), flags, pre]
218 data.extend(sucs)
218 data.extend(sucs)
219 return _pack(format, *data) + metadata
219 return _pack(format, *data) + metadata
220
220
221 def _fm0encodemeta(meta):
221 def _fm0encodemeta(meta):
222 """Return encoded metadata string to string mapping.
222 """Return encoded metadata string to string mapping.
223
223
224 Assume no ':' in key and no '\0' in both key and value."""
224 Assume no ':' in key and no '\0' in both key and value."""
225 for key, value in meta.iteritems():
225 for key, value in meta.iteritems():
226 if ':' in key or '\0' in key:
226 if ':' in key or '\0' in key:
227 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 raise ValueError("':' and '\0' are forbidden in metadata key'")
228 if '\0' in value:
228 if '\0' in value:
229 raise ValueError("':' is forbidden in metadata value'")
229 raise ValueError("':' is forbidden in metadata value'")
230 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
231
231
232 def _fm0decodemeta(data):
232 def _fm0decodemeta(data):
233 """Return string to string dictionary from encoded version."""
233 """Return string to string dictionary from encoded version."""
234 d = {}
234 d = {}
235 for l in data.split('\0'):
235 for l in data.split('\0'):
236 if l:
236 if l:
237 key, value = l.split(':')
237 key, value = l.split(':')
238 d[key] = value
238 d[key] = value
239 return d
239 return d
240
240
241 ## Parsing and writing of version "1"
241 ## Parsing and writing of version "1"
242 #
242 #
243 # The header is followed by the markers. Each marker is made of:
243 # The header is followed by the markers. Each marker is made of:
244 #
244 #
245 # - uint32: total size of the marker (including this field)
245 # - uint32: total size of the marker (including this field)
246 #
246 #
247 # - float64: date in seconds since epoch
247 # - float64: date in seconds since epoch
248 #
248 #
249 # - int16: timezone offset in minutes
249 # - int16: timezone offset in minutes
250 #
250 #
251 # - uint16: a bit field. It is reserved for flags used in common
251 # - uint16: a bit field. It is reserved for flags used in common
252 # obsolete marker operations, to avoid repeated decoding of metadata
252 # obsolete marker operations, to avoid repeated decoding of metadata
253 # entries.
253 # entries.
254 #
254 #
255 # - uint8: number of successors "N", can be zero.
255 # - uint8: number of successors "N", can be zero.
256 #
256 #
257 # - uint8: number of parents "P", can be zero.
257 # - uint8: number of parents "P", can be zero.
258 #
258 #
259 # 0: parents data stored but no parent,
259 # 0: parents data stored but no parent,
260 # 1: one parent stored,
260 # 1: one parent stored,
261 # 2: two parents stored,
261 # 2: two parents stored,
262 # 3: no parent data stored
262 # 3: no parent data stored
263 #
263 #
264 # - uint8: number of metadata entries M
264 # - uint8: number of metadata entries M
265 #
265 #
266 # - 20 or 32 bytes: precursor changeset identifier.
266 # - 20 or 32 bytes: precursor changeset identifier.
267 #
267 #
268 # - N*(20 or 32) bytes: successors changesets identifiers.
268 # - N*(20 or 32) bytes: successors changesets identifiers.
269 #
269 #
270 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 # - P*(20 or 32) bytes: parents of the precursors changesets.
271 #
271 #
272 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 # - M*(uint8, uint8): size of all metadata entries (key and value)
273 #
273 #
274 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 # - remaining bytes: the metadata, each (key, value) pair after the other.
275 _fm1version = 1
275 _fm1version = 1
276 _fm1fixed = '>IdhHBBB20s'
276 _fm1fixed = '>IdhHBBB20s'
277 _fm1nodesha1 = '20s'
277 _fm1nodesha1 = '20s'
278 _fm1nodesha256 = '32s'
278 _fm1nodesha256 = '32s'
279 _fm1nodesha1size = _calcsize(_fm1nodesha1)
279 _fm1nodesha1size = _calcsize(_fm1nodesha1)
280 _fm1nodesha256size = _calcsize(_fm1nodesha256)
280 _fm1nodesha256size = _calcsize(_fm1nodesha256)
281 _fm1fsize = _calcsize(_fm1fixed)
281 _fm1fsize = _calcsize(_fm1fixed)
282 _fm1parentnone = 3
282 _fm1parentnone = 3
283 _fm1parentshift = 14
283 _fm1parentshift = 14
284 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
284 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
285 _fm1metapair = 'BB'
285 _fm1metapair = 'BB'
286 _fm1metapairsize = _calcsize('BB')
286 _fm1metapairsize = _calcsize('BB')
287
287
288 def _fm1readmarkers(data, off=0):
288 def _fm1readmarkers(data, off=0):
289 # Loop on markers
289 # Loop on markers
290 l = len(data)
290 l = len(data)
291 while off + _fm1fsize <= l:
291 while off + _fm1fsize <= l:
292 # read fixed part
292 # read fixed part
293 fixeddata = _unpack(_fm1fixed, data[off:off + _fm1fsize])
293 fixeddata = _unpack(_fm1fixed, data[off:off + _fm1fsize])
294 off += _fm1fsize
294 off += _fm1fsize
295 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
295 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
296
296
297 # build the date tuple (upgrade tz minutes to seconds)
297 # build the date tuple (upgrade tz minutes to seconds)
298 date = (seconds, tz * 60)
298 date = (seconds, tz * 60)
299 _fm1node = _fm1nodesha1
299 _fm1node = _fm1nodesha1
300 fnodesize = _fm1nodesha1size
300 fnodesize = _fm1nodesha1size
301 if flags & usingsha256:
301 if flags & usingsha256:
302 _fm1node = _fm1nodesha256
302 _fm1node = _fm1nodesha256
303 fnodesize = _fm1nodesha256size
303 fnodesize = _fm1nodesha256size
304
304
305 # read replacement
305 # read 0 or more successors
306 sucs = ()
306 s = (fnodesize * numsuc)
307 if numsuc:
307 sucs = _unpack(_fm1node * numsuc, data[off:off + s])
308 s = (fnodesize * numsuc)
308 off += s
309 sucs = _unpack(_fm1node * numsuc, data[off:off + s])
310 off += s
311
309
312 # read parents
310 # read parents
313 if numpar == _fm1parentnone:
311 if numpar == _fm1parentnone:
314 parents = None
312 parents = None
315 elif numpar == 0:
313 elif numpar == 0:
316 parents = ()
314 parents = ()
317 elif numpar: # neither None nor zero
315 elif numpar: # neither None nor zero
318 s = (fnodesize * numpar)
316 s = (fnodesize * numpar)
319 parents = _unpack(_fm1node * numpar, data[off:off + s])
317 parents = _unpack(_fm1node * numpar, data[off:off + s])
320 off += s
318 off += s
321
319
322 # read metadata
320 # read metadata
323 metaformat = '>' + (_fm1metapair * nummeta)
321 metaformat = '>' + (_fm1metapair * nummeta)
324 s = _fm1metapairsize * nummeta
322 s = _fm1metapairsize * nummeta
325 metapairsize = _unpack(metaformat, data[off:off + s])
323 metapairsize = _unpack(metaformat, data[off:off + s])
326 off += s
324 off += s
327 metadata = []
325 metadata = []
328 for idx in xrange(0, len(metapairsize), 2):
326 for idx in xrange(0, len(metapairsize), 2):
329 sk = metapairsize[idx]
327 sk = metapairsize[idx]
330 sv = metapairsize[idx + 1]
328 sv = metapairsize[idx + 1]
331 metadata.append((data[off:off + sk], data[off + sk:off + sk + sv]))
329 metadata.append((data[off:off + sk], data[off + sk:off + sk + sv]))
332 off += sk + sv
330 off += sk + sv
333 metadata = tuple(metadata)
331 metadata = tuple(metadata)
334
332
335 yield (prec, sucs, flags, metadata, date, parents)
333 yield (prec, sucs, flags, metadata, date, parents)
336
334
337 def _fm1encodeonemarker(marker):
335 def _fm1encodeonemarker(marker):
338 pre, sucs, flags, metadata, date, parents = marker
336 pre, sucs, flags, metadata, date, parents = marker
339 # determine node size
337 # determine node size
340 _fm1node = _fm1nodesha1
338 _fm1node = _fm1nodesha1
341 if flags & usingsha256:
339 if flags & usingsha256:
342 _fm1node = _fm1nodesha256
340 _fm1node = _fm1nodesha256
343 numsuc = len(sucs)
341 numsuc = len(sucs)
344 numextranodes = numsuc
342 numextranodes = numsuc
345 if parents is None:
343 if parents is None:
346 numpar = _fm1parentnone
344 numpar = _fm1parentnone
347 else:
345 else:
348 numpar = len(parents)
346 numpar = len(parents)
349 numextranodes += numpar
347 numextranodes += numpar
350 formatnodes = _fm1node * numextranodes
348 formatnodes = _fm1node * numextranodes
351 formatmeta = _fm1metapair * len(metadata)
349 formatmeta = _fm1metapair * len(metadata)
352 format = _fm1fixed + formatnodes + formatmeta
350 format = _fm1fixed + formatnodes + formatmeta
353 # tz is stored in minutes so we divide by 60
351 # tz is stored in minutes so we divide by 60
354 tz = date[1]//60
352 tz = date[1]//60
355 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
353 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
356 data.extend(sucs)
354 data.extend(sucs)
357 if parents is not None:
355 if parents is not None:
358 data.extend(parents)
356 data.extend(parents)
359 totalsize = _calcsize(format)
357 totalsize = _calcsize(format)
360 for key, value in metadata:
358 for key, value in metadata:
361 lk = len(key)
359 lk = len(key)
362 lv = len(value)
360 lv = len(value)
363 data.append(lk)
361 data.append(lk)
364 data.append(lv)
362 data.append(lv)
365 totalsize += lk + lv
363 totalsize += lk + lv
366 data[0] = totalsize
364 data[0] = totalsize
367 data = [_pack(format, *data)]
365 data = [_pack(format, *data)]
368 for key, value in metadata:
366 for key, value in metadata:
369 data.append(key)
367 data.append(key)
370 data.append(value)
368 data.append(value)
371 return ''.join(data)
369 return ''.join(data)
372
370
373 # mapping to read/write various marker formats
371 # mapping to read/write various marker formats
374 # <version> -> (decoder, encoder)
372 # <version> -> (decoder, encoder)
375 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
373 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
376 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
374 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
377
375
378 @util.nogc
376 @util.nogc
379 def _readmarkers(data):
377 def _readmarkers(data):
380 """Read and enumerate markers from raw data"""
378 """Read and enumerate markers from raw data"""
381 off = 0
379 off = 0
382 diskversion = _unpack('>B', data[off:off + 1])[0]
380 diskversion = _unpack('>B', data[off:off + 1])[0]
383 off += 1
381 off += 1
384 if diskversion not in formats:
382 if diskversion not in formats:
385 raise util.Abort(_('parsing obsolete marker: unknown version %r')
383 raise util.Abort(_('parsing obsolete marker: unknown version %r')
386 % diskversion)
384 % diskversion)
387 return diskversion, formats[diskversion][0](data, off)
385 return diskversion, formats[diskversion][0](data, off)
388
386
389 def encodemarkers(markers, addheader=False, version=_fm0version):
387 def encodemarkers(markers, addheader=False, version=_fm0version):
390 # Kept separate from flushmarkers(), it will be reused for
388 # Kept separate from flushmarkers(), it will be reused for
391 # markers exchange.
389 # markers exchange.
392 encodeone = formats[version][1]
390 encodeone = formats[version][1]
393 if addheader:
391 if addheader:
394 yield _pack('>B', version)
392 yield _pack('>B', version)
395 for marker in markers:
393 for marker in markers:
396 yield encodeone(marker)
394 yield encodeone(marker)
397
395
398
396
399 class marker(object):
397 class marker(object):
400 """Wrap obsolete marker raw data"""
398 """Wrap obsolete marker raw data"""
401
399
402 def __init__(self, repo, data):
400 def __init__(self, repo, data):
403 # the repo argument will be used to create changectx in later version
401 # the repo argument will be used to create changectx in later version
404 self._repo = repo
402 self._repo = repo
405 self._data = data
403 self._data = data
406 self._decodedmeta = None
404 self._decodedmeta = None
407
405
408 def __hash__(self):
406 def __hash__(self):
409 return hash(self._data)
407 return hash(self._data)
410
408
411 def __eq__(self, other):
409 def __eq__(self, other):
412 if type(other) != type(self):
410 if type(other) != type(self):
413 return False
411 return False
414 return self._data == other._data
412 return self._data == other._data
415
413
416 def precnode(self):
414 def precnode(self):
417 """Precursor changeset node identifier"""
415 """Precursor changeset node identifier"""
418 return self._data[0]
416 return self._data[0]
419
417
420 def succnodes(self):
418 def succnodes(self):
421 """List of successor changesets node identifiers"""
419 """List of successor changesets node identifiers"""
422 return self._data[1]
420 return self._data[1]
423
421
424 def parentnodes(self):
422 def parentnodes(self):
425 """Parents of the precursors (None if not recorded)"""
423 """Parents of the precursors (None if not recorded)"""
426 return self._data[5]
424 return self._data[5]
427
425
428 def metadata(self):
426 def metadata(self):
429 """Decoded metadata dictionary"""
427 """Decoded metadata dictionary"""
430 return dict(self._data[3])
428 return dict(self._data[3])
431
429
432 def date(self):
430 def date(self):
433 """Creation date as (unixtime, offset)"""
431 """Creation date as (unixtime, offset)"""
434 return self._data[4]
432 return self._data[4]
435
433
436 def flags(self):
434 def flags(self):
437 """The flags field of the marker"""
435 """The flags field of the marker"""
438 return self._data[2]
436 return self._data[2]
439
437
440 class obsstore(object):
438 class obsstore(object):
441 """Store obsolete markers
439 """Store obsolete markers
442
440
443 Markers can be accessed with two mappings:
441 Markers can be accessed with two mappings:
444 - precursors[x] -> set(markers on precursors edges of x)
442 - precursors[x] -> set(markers on precursors edges of x)
445 - successors[x] -> set(markers on successors edges of x)
443 - successors[x] -> set(markers on successors edges of x)
446 - children[x] -> set(markers on precursors edges of children(x)
444 - children[x] -> set(markers on precursors edges of children(x)
447 """
445 """
448
446
449 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
447 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
450 # prec: nodeid, precursor changesets
448 # prec: nodeid, precursor changesets
451 # succs: tuple of nodeid, successor changesets (0-N length)
449 # succs: tuple of nodeid, successor changesets (0-N length)
452 # flag: integer, flag field carrying modifier for the markers (see doc)
450 # flag: integer, flag field carrying modifier for the markers (see doc)
453 # meta: binary blob, encoded metadata dictionary
451 # meta: binary blob, encoded metadata dictionary
454 # date: (float, int) tuple, date of marker creation
452 # date: (float, int) tuple, date of marker creation
455 # parents: (tuple of nodeid) or None, parents of precursors
453 # parents: (tuple of nodeid) or None, parents of precursors
456 # None is used when no data has been recorded
454 # None is used when no data has been recorded
457
455
458 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
456 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
459 # caches for various obsolescence related cache
457 # caches for various obsolescence related cache
460 self.caches = {}
458 self.caches = {}
461 self._all = []
459 self._all = []
462 self.precursors = {}
460 self.precursors = {}
463 self.successors = {}
461 self.successors = {}
464 self.children = {}
462 self.children = {}
465 self.sopener = sopener
463 self.sopener = sopener
466 data = sopener.tryread('obsstore')
464 data = sopener.tryread('obsstore')
467 self._version = defaultformat
465 self._version = defaultformat
468 self._readonly = readonly
466 self._readonly = readonly
469 if data:
467 if data:
470 self._version, markers = _readmarkers(data)
468 self._version, markers = _readmarkers(data)
471 self._load(markers)
469 self._load(markers)
472
470
473 def __iter__(self):
471 def __iter__(self):
474 return iter(self._all)
472 return iter(self._all)
475
473
476 def __len__(self):
474 def __len__(self):
477 return len(self._all)
475 return len(self._all)
478
476
479 def __nonzero__(self):
477 def __nonzero__(self):
480 return bool(self._all)
478 return bool(self._all)
481
479
482 def create(self, transaction, prec, succs=(), flag=0, parents=None,
480 def create(self, transaction, prec, succs=(), flag=0, parents=None,
483 date=None, metadata=None):
481 date=None, metadata=None):
484 """obsolete: add a new obsolete marker
482 """obsolete: add a new obsolete marker
485
483
486 * ensuring it is hashable
484 * ensuring it is hashable
487 * check mandatory metadata
485 * check mandatory metadata
488 * encode metadata
486 * encode metadata
489
487
490 If you are a human writing code creating marker you want to use the
488 If you are a human writing code creating marker you want to use the
491 `createmarkers` function in this module instead.
489 `createmarkers` function in this module instead.
492
490
493 return True if a new marker have been added, False if the markers
491 return True if a new marker have been added, False if the markers
494 already existed (no op).
492 already existed (no op).
495 """
493 """
496 if metadata is None:
494 if metadata is None:
497 metadata = {}
495 metadata = {}
498 if date is None:
496 if date is None:
499 if 'date' in metadata:
497 if 'date' in metadata:
500 # as a courtesy for out-of-tree extensions
498 # as a courtesy for out-of-tree extensions
501 date = util.parsedate(metadata.pop('date'))
499 date = util.parsedate(metadata.pop('date'))
502 else:
500 else:
503 date = util.makedate()
501 date = util.makedate()
504 if len(prec) != 20:
502 if len(prec) != 20:
505 raise ValueError(prec)
503 raise ValueError(prec)
506 for succ in succs:
504 for succ in succs:
507 if len(succ) != 20:
505 if len(succ) != 20:
508 raise ValueError(succ)
506 raise ValueError(succ)
509 if prec in succs:
507 if prec in succs:
510 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
508 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
511
509
512 metadata = tuple(sorted(metadata.iteritems()))
510 metadata = tuple(sorted(metadata.iteritems()))
513
511
514 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
512 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
515 return bool(self.add(transaction, [marker]))
513 return bool(self.add(transaction, [marker]))
516
514
517 def add(self, transaction, markers):
515 def add(self, transaction, markers):
518 """Add new markers to the store
516 """Add new markers to the store
519
517
520 Take care of filtering duplicate.
518 Take care of filtering duplicate.
521 Return the number of new marker."""
519 Return the number of new marker."""
522 if self._readonly:
520 if self._readonly:
523 raise util.Abort('creating obsolete markers is not enabled on this '
521 raise util.Abort('creating obsolete markers is not enabled on this '
524 'repo')
522 'repo')
525 known = set(self._all)
523 known = set(self._all)
526 new = []
524 new = []
527 for m in markers:
525 for m in markers:
528 if m not in known:
526 if m not in known:
529 known.add(m)
527 known.add(m)
530 new.append(m)
528 new.append(m)
531 if new:
529 if new:
532 f = self.sopener('obsstore', 'ab')
530 f = self.sopener('obsstore', 'ab')
533 try:
531 try:
534 # Whether the file's current position is at the begin or at
532 # Whether the file's current position is at the begin or at
535 # the end after opening a file for appending is implementation
533 # the end after opening a file for appending is implementation
536 # defined. So we must seek to the end before calling tell(),
534 # defined. So we must seek to the end before calling tell(),
537 # or we may get a zero offset for non-zero sized files on
535 # or we may get a zero offset for non-zero sized files on
538 # some platforms (issue3543).
536 # some platforms (issue3543).
539 f.seek(0, _SEEK_END)
537 f.seek(0, _SEEK_END)
540 offset = f.tell()
538 offset = f.tell()
541 transaction.add('obsstore', offset)
539 transaction.add('obsstore', offset)
542 # offset == 0: new file - add the version header
540 # offset == 0: new file - add the version header
543 for bytes in encodemarkers(new, offset == 0, self._version):
541 for bytes in encodemarkers(new, offset == 0, self._version):
544 f.write(bytes)
542 f.write(bytes)
545 finally:
543 finally:
546 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
544 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
547 # call 'filecacheentry.refresh()' here
545 # call 'filecacheentry.refresh()' here
548 f.close()
546 f.close()
549 self._load(new)
547 self._load(new)
550 # new marker *may* have changed several set. invalidate the cache.
548 # new marker *may* have changed several set. invalidate the cache.
551 self.caches.clear()
549 self.caches.clear()
552 # records the number of new markers for the transaction hooks
550 # records the number of new markers for the transaction hooks
553 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
551 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
554 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
552 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
555 return len(new)
553 return len(new)
556
554
557 def mergemarkers(self, transaction, data):
555 def mergemarkers(self, transaction, data):
558 """merge a binary stream of markers inside the obsstore
556 """merge a binary stream of markers inside the obsstore
559
557
560 Returns the number of new markers added."""
558 Returns the number of new markers added."""
561 version, markers = _readmarkers(data)
559 version, markers = _readmarkers(data)
562 return self.add(transaction, markers)
560 return self.add(transaction, markers)
563
561
564 @util.nogc
562 @util.nogc
565 def _load(self, markers):
563 def _load(self, markers):
566 for mark in markers:
564 for mark in markers:
567 self._all.append(mark)
565 self._all.append(mark)
568 pre, sucs = mark[:2]
566 pre, sucs = mark[:2]
569 self.successors.setdefault(pre, set()).add(mark)
567 self.successors.setdefault(pre, set()).add(mark)
570 for suc in sucs:
568 for suc in sucs:
571 self.precursors.setdefault(suc, set()).add(mark)
569 self.precursors.setdefault(suc, set()).add(mark)
572 parents = mark[5]
570 parents = mark[5]
573 if parents is not None:
571 if parents is not None:
574 for p in parents:
572 for p in parents:
575 self.children.setdefault(p, set()).add(mark)
573 self.children.setdefault(p, set()).add(mark)
576 if node.nullid in self.precursors:
574 if node.nullid in self.precursors:
577 raise util.Abort(_('bad obsolescence marker detected: '
575 raise util.Abort(_('bad obsolescence marker detected: '
578 'invalid successors nullid'))
576 'invalid successors nullid'))
579 def relevantmarkers(self, nodes):
577 def relevantmarkers(self, nodes):
580 """return a set of all obsolescence markers relevant to a set of nodes.
578 """return a set of all obsolescence markers relevant to a set of nodes.
581
579
582 "relevant" to a set of nodes mean:
580 "relevant" to a set of nodes mean:
583
581
584 - marker that use this changeset as successor
582 - marker that use this changeset as successor
585 - prune marker of direct children on this changeset
583 - prune marker of direct children on this changeset
586 - recursive application of the two rules on precursors of these markers
584 - recursive application of the two rules on precursors of these markers
587
585
588 It is a set so you cannot rely on order."""
586 It is a set so you cannot rely on order."""
589
587
590 pendingnodes = set(nodes)
588 pendingnodes = set(nodes)
591 seenmarkers = set()
589 seenmarkers = set()
592 seennodes = set(pendingnodes)
590 seennodes = set(pendingnodes)
593 precursorsmarkers = self.precursors
591 precursorsmarkers = self.precursors
594 children = self.children
592 children = self.children
595 while pendingnodes:
593 while pendingnodes:
596 direct = set()
594 direct = set()
597 for current in pendingnodes:
595 for current in pendingnodes:
598 direct.update(precursorsmarkers.get(current, ()))
596 direct.update(precursorsmarkers.get(current, ()))
599 pruned = [m for m in children.get(current, ()) if not m[1]]
597 pruned = [m for m in children.get(current, ()) if not m[1]]
600 direct.update(pruned)
598 direct.update(pruned)
601 direct -= seenmarkers
599 direct -= seenmarkers
602 pendingnodes = set([m[0] for m in direct])
600 pendingnodes = set([m[0] for m in direct])
603 seenmarkers |= direct
601 seenmarkers |= direct
604 pendingnodes -= seennodes
602 pendingnodes -= seennodes
605 seennodes |= pendingnodes
603 seennodes |= pendingnodes
606 return seenmarkers
604 return seenmarkers
607
605
608 def commonversion(versions):
606 def commonversion(versions):
609 """Return the newest version listed in both versions and our local formats.
607 """Return the newest version listed in both versions and our local formats.
610
608
611 Returns None if no common version exists.
609 Returns None if no common version exists.
612 """
610 """
613 versions.sort(reverse=True)
611 versions.sort(reverse=True)
614 # search for highest version known on both side
612 # search for highest version known on both side
615 for v in versions:
613 for v in versions:
616 if v in formats:
614 if v in formats:
617 return v
615 return v
618 return None
616 return None
619
617
620 # arbitrary picked to fit into 8K limit from HTTP server
618 # arbitrary picked to fit into 8K limit from HTTP server
621 # you have to take in account:
619 # you have to take in account:
622 # - the version header
620 # - the version header
623 # - the base85 encoding
621 # - the base85 encoding
624 _maxpayload = 5300
622 _maxpayload = 5300
625
623
626 def _pushkeyescape(markers):
624 def _pushkeyescape(markers):
627 """encode markers into a dict suitable for pushkey exchange
625 """encode markers into a dict suitable for pushkey exchange
628
626
629 - binary data is base85 encoded
627 - binary data is base85 encoded
630 - split in chunks smaller than 5300 bytes"""
628 - split in chunks smaller than 5300 bytes"""
631 keys = {}
629 keys = {}
632 parts = []
630 parts = []
633 currentlen = _maxpayload * 2 # ensure we create a new part
631 currentlen = _maxpayload * 2 # ensure we create a new part
634 for marker in markers:
632 for marker in markers:
635 nextdata = _fm0encodeonemarker(marker)
633 nextdata = _fm0encodeonemarker(marker)
636 if (len(nextdata) + currentlen > _maxpayload):
634 if (len(nextdata) + currentlen > _maxpayload):
637 currentpart = []
635 currentpart = []
638 currentlen = 0
636 currentlen = 0
639 parts.append(currentpart)
637 parts.append(currentpart)
640 currentpart.append(nextdata)
638 currentpart.append(nextdata)
641 currentlen += len(nextdata)
639 currentlen += len(nextdata)
642 for idx, part in enumerate(reversed(parts)):
640 for idx, part in enumerate(reversed(parts)):
643 data = ''.join([_pack('>B', _fm0version)] + part)
641 data = ''.join([_pack('>B', _fm0version)] + part)
644 keys['dump%i' % idx] = base85.b85encode(data)
642 keys['dump%i' % idx] = base85.b85encode(data)
645 return keys
643 return keys
646
644
647 def listmarkers(repo):
645 def listmarkers(repo):
648 """List markers over pushkey"""
646 """List markers over pushkey"""
649 if not repo.obsstore:
647 if not repo.obsstore:
650 return {}
648 return {}
651 return _pushkeyescape(repo.obsstore)
649 return _pushkeyescape(repo.obsstore)
652
650
653 def pushmarker(repo, key, old, new):
651 def pushmarker(repo, key, old, new):
654 """Push markers over pushkey"""
652 """Push markers over pushkey"""
655 if not key.startswith('dump'):
653 if not key.startswith('dump'):
656 repo.ui.warn(_('unknown key: %r') % key)
654 repo.ui.warn(_('unknown key: %r') % key)
657 return 0
655 return 0
658 if old:
656 if old:
659 repo.ui.warn(_('unexpected old value for %r') % key)
657 repo.ui.warn(_('unexpected old value for %r') % key)
660 return 0
658 return 0
661 data = base85.b85decode(new)
659 data = base85.b85decode(new)
662 lock = repo.lock()
660 lock = repo.lock()
663 try:
661 try:
664 tr = repo.transaction('pushkey: obsolete markers')
662 tr = repo.transaction('pushkey: obsolete markers')
665 try:
663 try:
666 repo.obsstore.mergemarkers(tr, data)
664 repo.obsstore.mergemarkers(tr, data)
667 tr.close()
665 tr.close()
668 return 1
666 return 1
669 finally:
667 finally:
670 tr.release()
668 tr.release()
671 finally:
669 finally:
672 lock.release()
670 lock.release()
673
671
674 def getmarkers(repo, nodes=None):
672 def getmarkers(repo, nodes=None):
675 """returns markers known in a repository
673 """returns markers known in a repository
676
674
677 If <nodes> is specified, only markers "relevant" to those nodes are are
675 If <nodes> is specified, only markers "relevant" to those nodes are are
678 returned"""
676 returned"""
679 if nodes is None:
677 if nodes is None:
680 rawmarkers = repo.obsstore
678 rawmarkers = repo.obsstore
681 else:
679 else:
682 rawmarkers = repo.obsstore.relevantmarkers(nodes)
680 rawmarkers = repo.obsstore.relevantmarkers(nodes)
683
681
684 for markerdata in rawmarkers:
682 for markerdata in rawmarkers:
685 yield marker(repo, markerdata)
683 yield marker(repo, markerdata)
686
684
687 def relevantmarkers(repo, node):
685 def relevantmarkers(repo, node):
688 """all obsolete markers relevant to some revision"""
686 """all obsolete markers relevant to some revision"""
689 for markerdata in repo.obsstore.relevantmarkers(node):
687 for markerdata in repo.obsstore.relevantmarkers(node):
690 yield marker(repo, markerdata)
688 yield marker(repo, markerdata)
691
689
692
690
693 def precursormarkers(ctx):
691 def precursormarkers(ctx):
694 """obsolete marker marking this changeset as a successors"""
692 """obsolete marker marking this changeset as a successors"""
695 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
693 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
696 yield marker(ctx._repo, data)
694 yield marker(ctx._repo, data)
697
695
698 def successormarkers(ctx):
696 def successormarkers(ctx):
699 """obsolete marker making this changeset obsolete"""
697 """obsolete marker making this changeset obsolete"""
700 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
698 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
701 yield marker(ctx._repo, data)
699 yield marker(ctx._repo, data)
702
700
703 def allsuccessors(obsstore, nodes, ignoreflags=0):
701 def allsuccessors(obsstore, nodes, ignoreflags=0):
704 """Yield node for every successor of <nodes>.
702 """Yield node for every successor of <nodes>.
705
703
706 Some successors may be unknown locally.
704 Some successors may be unknown locally.
707
705
708 This is a linear yield unsuited to detecting split changesets. It includes
706 This is a linear yield unsuited to detecting split changesets. It includes
709 initial nodes too."""
707 initial nodes too."""
710 remaining = set(nodes)
708 remaining = set(nodes)
711 seen = set(remaining)
709 seen = set(remaining)
712 while remaining:
710 while remaining:
713 current = remaining.pop()
711 current = remaining.pop()
714 yield current
712 yield current
715 for mark in obsstore.successors.get(current, ()):
713 for mark in obsstore.successors.get(current, ()):
716 # ignore marker flagged with specified flag
714 # ignore marker flagged with specified flag
717 if mark[2] & ignoreflags:
715 if mark[2] & ignoreflags:
718 continue
716 continue
719 for suc in mark[1]:
717 for suc in mark[1]:
720 if suc not in seen:
718 if suc not in seen:
721 seen.add(suc)
719 seen.add(suc)
722 remaining.add(suc)
720 remaining.add(suc)
723
721
724 def allprecursors(obsstore, nodes, ignoreflags=0):
722 def allprecursors(obsstore, nodes, ignoreflags=0):
725 """Yield node for every precursors of <nodes>.
723 """Yield node for every precursors of <nodes>.
726
724
727 Some precursors may be unknown locally.
725 Some precursors may be unknown locally.
728
726
729 This is a linear yield unsuited to detecting folded changesets. It includes
727 This is a linear yield unsuited to detecting folded changesets. It includes
730 initial nodes too."""
728 initial nodes too."""
731
729
732 remaining = set(nodes)
730 remaining = set(nodes)
733 seen = set(remaining)
731 seen = set(remaining)
734 while remaining:
732 while remaining:
735 current = remaining.pop()
733 current = remaining.pop()
736 yield current
734 yield current
737 for mark in obsstore.precursors.get(current, ()):
735 for mark in obsstore.precursors.get(current, ()):
738 # ignore marker flagged with specified flag
736 # ignore marker flagged with specified flag
739 if mark[2] & ignoreflags:
737 if mark[2] & ignoreflags:
740 continue
738 continue
741 suc = mark[0]
739 suc = mark[0]
742 if suc not in seen:
740 if suc not in seen:
743 seen.add(suc)
741 seen.add(suc)
744 remaining.add(suc)
742 remaining.add(suc)
745
743
746 def foreground(repo, nodes):
744 def foreground(repo, nodes):
747 """return all nodes in the "foreground" of other node
745 """return all nodes in the "foreground" of other node
748
746
749 The foreground of a revision is anything reachable using parent -> children
747 The foreground of a revision is anything reachable using parent -> children
750 or precursor -> successor relation. It is very similar to "descendant" but
748 or precursor -> successor relation. It is very similar to "descendant" but
751 augmented with obsolescence information.
749 augmented with obsolescence information.
752
750
753 Beware that possible obsolescence cycle may result if complex situation.
751 Beware that possible obsolescence cycle may result if complex situation.
754 """
752 """
755 repo = repo.unfiltered()
753 repo = repo.unfiltered()
756 foreground = set(repo.set('%ln::', nodes))
754 foreground = set(repo.set('%ln::', nodes))
757 if repo.obsstore:
755 if repo.obsstore:
758 # We only need this complicated logic if there is obsolescence
756 # We only need this complicated logic if there is obsolescence
759 # XXX will probably deserve an optimised revset.
757 # XXX will probably deserve an optimised revset.
760 nm = repo.changelog.nodemap
758 nm = repo.changelog.nodemap
761 plen = -1
759 plen = -1
762 # compute the whole set of successors or descendants
760 # compute the whole set of successors or descendants
763 while len(foreground) != plen:
761 while len(foreground) != plen:
764 plen = len(foreground)
762 plen = len(foreground)
765 succs = set(c.node() for c in foreground)
763 succs = set(c.node() for c in foreground)
766 mutable = [c.node() for c in foreground if c.mutable()]
764 mutable = [c.node() for c in foreground if c.mutable()]
767 succs.update(allsuccessors(repo.obsstore, mutable))
765 succs.update(allsuccessors(repo.obsstore, mutable))
768 known = (n for n in succs if n in nm)
766 known = (n for n in succs if n in nm)
769 foreground = set(repo.set('%ln::', known))
767 foreground = set(repo.set('%ln::', known))
770 return set(c.node() for c in foreground)
768 return set(c.node() for c in foreground)
771
769
772
770
773 def successorssets(repo, initialnode, cache=None):
771 def successorssets(repo, initialnode, cache=None):
774 """Return all set of successors of initial nodes
772 """Return all set of successors of initial nodes
775
773
776 The successors set of a changeset A are a group of revisions that succeed
774 The successors set of a changeset A are a group of revisions that succeed
777 A. It succeeds A as a consistent whole, each revision being only a partial
775 A. It succeeds A as a consistent whole, each revision being only a partial
778 replacement. The successors set contains non-obsolete changesets only.
776 replacement. The successors set contains non-obsolete changesets only.
779
777
780 This function returns the full list of successor sets which is why it
778 This function returns the full list of successor sets which is why it
781 returns a list of tuples and not just a single tuple. Each tuple is a valid
779 returns a list of tuples and not just a single tuple. Each tuple is a valid
782 successors set. Not that (A,) may be a valid successors set for changeset A
780 successors set. Not that (A,) may be a valid successors set for changeset A
783 (see below).
781 (see below).
784
782
785 In most cases, a changeset A will have a single element (e.g. the changeset
783 In most cases, a changeset A will have a single element (e.g. the changeset
786 A is replaced by A') in its successors set. Though, it is also common for a
784 A is replaced by A') in its successors set. Though, it is also common for a
787 changeset A to have no elements in its successor set (e.g. the changeset
785 changeset A to have no elements in its successor set (e.g. the changeset
788 has been pruned). Therefore, the returned list of successors sets will be
786 has been pruned). Therefore, the returned list of successors sets will be
789 [(A',)] or [], respectively.
787 [(A',)] or [], respectively.
790
788
791 When a changeset A is split into A' and B', however, it will result in a
789 When a changeset A is split into A' and B', however, it will result in a
792 successors set containing more than a single element, i.e. [(A',B')].
790 successors set containing more than a single element, i.e. [(A',B')].
793 Divergent changesets will result in multiple successors sets, i.e. [(A',),
791 Divergent changesets will result in multiple successors sets, i.e. [(A',),
794 (A'')].
792 (A'')].
795
793
796 If a changeset A is not obsolete, then it will conceptually have no
794 If a changeset A is not obsolete, then it will conceptually have no
797 successors set. To distinguish this from a pruned changeset, the successor
795 successors set. To distinguish this from a pruned changeset, the successor
798 set will only contain itself, i.e. [(A,)].
796 set will only contain itself, i.e. [(A,)].
799
797
800 Finally, successors unknown locally are considered to be pruned (obsoleted
798 Finally, successors unknown locally are considered to be pruned (obsoleted
801 without any successors).
799 without any successors).
802
800
803 The optional `cache` parameter is a dictionary that may contain precomputed
801 The optional `cache` parameter is a dictionary that may contain precomputed
804 successors sets. It is meant to reuse the computation of a previous call to
802 successors sets. It is meant to reuse the computation of a previous call to
805 `successorssets` when multiple calls are made at the same time. The cache
803 `successorssets` when multiple calls are made at the same time. The cache
806 dictionary is updated in place. The caller is responsible for its live
804 dictionary is updated in place. The caller is responsible for its live
807 spawn. Code that makes multiple calls to `successorssets` *must* use this
805 spawn. Code that makes multiple calls to `successorssets` *must* use this
808 cache mechanism or suffer terrible performances.
806 cache mechanism or suffer terrible performances.
809
807
810 """
808 """
811
809
812 succmarkers = repo.obsstore.successors
810 succmarkers = repo.obsstore.successors
813
811
814 # Stack of nodes we search successors sets for
812 # Stack of nodes we search successors sets for
815 toproceed = [initialnode]
813 toproceed = [initialnode]
816 # set version of above list for fast loop detection
814 # set version of above list for fast loop detection
817 # element added to "toproceed" must be added here
815 # element added to "toproceed" must be added here
818 stackedset = set(toproceed)
816 stackedset = set(toproceed)
819 if cache is None:
817 if cache is None:
820 cache = {}
818 cache = {}
821
819
822 # This while loop is the flattened version of a recursive search for
820 # This while loop is the flattened version of a recursive search for
823 # successors sets
821 # successors sets
824 #
822 #
825 # def successorssets(x):
823 # def successorssets(x):
826 # successors = directsuccessors(x)
824 # successors = directsuccessors(x)
827 # ss = [[]]
825 # ss = [[]]
828 # for succ in directsuccessors(x):
826 # for succ in directsuccessors(x):
829 # # product as in itertools cartesian product
827 # # product as in itertools cartesian product
830 # ss = product(ss, successorssets(succ))
828 # ss = product(ss, successorssets(succ))
831 # return ss
829 # return ss
832 #
830 #
833 # But we can not use plain recursive calls here:
831 # But we can not use plain recursive calls here:
834 # - that would blow the python call stack
832 # - that would blow the python call stack
835 # - obsolescence markers may have cycles, we need to handle them.
833 # - obsolescence markers may have cycles, we need to handle them.
836 #
834 #
837 # The `toproceed` list act as our call stack. Every node we search
835 # The `toproceed` list act as our call stack. Every node we search
838 # successors set for are stacked there.
836 # successors set for are stacked there.
839 #
837 #
840 # The `stackedset` is set version of this stack used to check if a node is
838 # The `stackedset` is set version of this stack used to check if a node is
841 # already stacked. This check is used to detect cycles and prevent infinite
839 # already stacked. This check is used to detect cycles and prevent infinite
842 # loop.
840 # loop.
843 #
841 #
844 # successors set of all nodes are stored in the `cache` dictionary.
842 # successors set of all nodes are stored in the `cache` dictionary.
845 #
843 #
846 # After this while loop ends we use the cache to return the successors sets
844 # After this while loop ends we use the cache to return the successors sets
847 # for the node requested by the caller.
845 # for the node requested by the caller.
848 while toproceed:
846 while toproceed:
849 # Every iteration tries to compute the successors sets of the topmost
847 # Every iteration tries to compute the successors sets of the topmost
850 # node of the stack: CURRENT.
848 # node of the stack: CURRENT.
851 #
849 #
852 # There are four possible outcomes:
850 # There are four possible outcomes:
853 #
851 #
854 # 1) We already know the successors sets of CURRENT:
852 # 1) We already know the successors sets of CURRENT:
855 # -> mission accomplished, pop it from the stack.
853 # -> mission accomplished, pop it from the stack.
856 # 2) Node is not obsolete:
854 # 2) Node is not obsolete:
857 # -> the node is its own successors sets. Add it to the cache.
855 # -> the node is its own successors sets. Add it to the cache.
858 # 3) We do not know successors set of direct successors of CURRENT:
856 # 3) We do not know successors set of direct successors of CURRENT:
859 # -> We add those successors to the stack.
857 # -> We add those successors to the stack.
860 # 4) We know successors sets of all direct successors of CURRENT:
858 # 4) We know successors sets of all direct successors of CURRENT:
861 # -> We can compute CURRENT successors set and add it to the
859 # -> We can compute CURRENT successors set and add it to the
862 # cache.
860 # cache.
863 #
861 #
864 current = toproceed[-1]
862 current = toproceed[-1]
865 if current in cache:
863 if current in cache:
866 # case (1): We already know the successors sets
864 # case (1): We already know the successors sets
867 stackedset.remove(toproceed.pop())
865 stackedset.remove(toproceed.pop())
868 elif current not in succmarkers:
866 elif current not in succmarkers:
869 # case (2): The node is not obsolete.
867 # case (2): The node is not obsolete.
870 if current in repo:
868 if current in repo:
871 # We have a valid last successors.
869 # We have a valid last successors.
872 cache[current] = [(current,)]
870 cache[current] = [(current,)]
873 else:
871 else:
874 # Final obsolete version is unknown locally.
872 # Final obsolete version is unknown locally.
875 # Do not count that as a valid successors
873 # Do not count that as a valid successors
876 cache[current] = []
874 cache[current] = []
877 else:
875 else:
878 # cases (3) and (4)
876 # cases (3) and (4)
879 #
877 #
880 # We proceed in two phases. Phase 1 aims to distinguish case (3)
878 # We proceed in two phases. Phase 1 aims to distinguish case (3)
881 # from case (4):
879 # from case (4):
882 #
880 #
883 # For each direct successors of CURRENT, we check whether its
881 # For each direct successors of CURRENT, we check whether its
884 # successors sets are known. If they are not, we stack the
882 # successors sets are known. If they are not, we stack the
885 # unknown node and proceed to the next iteration of the while
883 # unknown node and proceed to the next iteration of the while
886 # loop. (case 3)
884 # loop. (case 3)
887 #
885 #
888 # During this step, we may detect obsolescence cycles: a node
886 # During this step, we may detect obsolescence cycles: a node
889 # with unknown successors sets but already in the call stack.
887 # with unknown successors sets but already in the call stack.
890 # In such a situation, we arbitrary set the successors sets of
888 # In such a situation, we arbitrary set the successors sets of
891 # the node to nothing (node pruned) to break the cycle.
889 # the node to nothing (node pruned) to break the cycle.
892 #
890 #
893 # If no break was encountered we proceed to phase 2.
891 # If no break was encountered we proceed to phase 2.
894 #
892 #
895 # Phase 2 computes successors sets of CURRENT (case 4); see details
893 # Phase 2 computes successors sets of CURRENT (case 4); see details
896 # in phase 2 itself.
894 # in phase 2 itself.
897 #
895 #
898 # Note the two levels of iteration in each phase.
896 # Note the two levels of iteration in each phase.
899 # - The first one handles obsolescence markers using CURRENT as
897 # - The first one handles obsolescence markers using CURRENT as
900 # precursor (successors markers of CURRENT).
898 # precursor (successors markers of CURRENT).
901 #
899 #
902 # Having multiple entry here means divergence.
900 # Having multiple entry here means divergence.
903 #
901 #
904 # - The second one handles successors defined in each marker.
902 # - The second one handles successors defined in each marker.
905 #
903 #
906 # Having none means pruned node, multiple successors means split,
904 # Having none means pruned node, multiple successors means split,
907 # single successors are standard replacement.
905 # single successors are standard replacement.
908 #
906 #
909 for mark in sorted(succmarkers[current]):
907 for mark in sorted(succmarkers[current]):
910 for suc in mark[1]:
908 for suc in mark[1]:
911 if suc not in cache:
909 if suc not in cache:
912 if suc in stackedset:
910 if suc in stackedset:
913 # cycle breaking
911 # cycle breaking
914 cache[suc] = []
912 cache[suc] = []
915 else:
913 else:
916 # case (3) If we have not computed successors sets
914 # case (3) If we have not computed successors sets
917 # of one of those successors we add it to the
915 # of one of those successors we add it to the
918 # `toproceed` stack and stop all work for this
916 # `toproceed` stack and stop all work for this
919 # iteration.
917 # iteration.
920 toproceed.append(suc)
918 toproceed.append(suc)
921 stackedset.add(suc)
919 stackedset.add(suc)
922 break
920 break
923 else:
921 else:
924 continue
922 continue
925 break
923 break
926 else:
924 else:
927 # case (4): we know all successors sets of all direct
925 # case (4): we know all successors sets of all direct
928 # successors
926 # successors
929 #
927 #
930 # Successors set contributed by each marker depends on the
928 # Successors set contributed by each marker depends on the
931 # successors sets of all its "successors" node.
929 # successors sets of all its "successors" node.
932 #
930 #
933 # Each different marker is a divergence in the obsolescence
931 # Each different marker is a divergence in the obsolescence
934 # history. It contributes successors sets distinct from other
932 # history. It contributes successors sets distinct from other
935 # markers.
933 # markers.
936 #
934 #
937 # Within a marker, a successor may have divergent successors
935 # Within a marker, a successor may have divergent successors
938 # sets. In such a case, the marker will contribute multiple
936 # sets. In such a case, the marker will contribute multiple
939 # divergent successors sets. If multiple successors have
937 # divergent successors sets. If multiple successors have
940 # divergent successors sets, a Cartesian product is used.
938 # divergent successors sets, a Cartesian product is used.
941 #
939 #
942 # At the end we post-process successors sets to remove
940 # At the end we post-process successors sets to remove
943 # duplicated entry and successors set that are strict subset of
941 # duplicated entry and successors set that are strict subset of
944 # another one.
942 # another one.
945 succssets = []
943 succssets = []
946 for mark in sorted(succmarkers[current]):
944 for mark in sorted(succmarkers[current]):
947 # successors sets contributed by this marker
945 # successors sets contributed by this marker
948 markss = [[]]
946 markss = [[]]
949 for suc in mark[1]:
947 for suc in mark[1]:
950 # cardinal product with previous successors
948 # cardinal product with previous successors
951 productresult = []
949 productresult = []
952 for prefix in markss:
950 for prefix in markss:
953 for suffix in cache[suc]:
951 for suffix in cache[suc]:
954 newss = list(prefix)
952 newss = list(prefix)
955 for part in suffix:
953 for part in suffix:
956 # do not duplicated entry in successors set
954 # do not duplicated entry in successors set
957 # first entry wins.
955 # first entry wins.
958 if part not in newss:
956 if part not in newss:
959 newss.append(part)
957 newss.append(part)
960 productresult.append(newss)
958 productresult.append(newss)
961 markss = productresult
959 markss = productresult
962 succssets.extend(markss)
960 succssets.extend(markss)
963 # remove duplicated and subset
961 # remove duplicated and subset
964 seen = []
962 seen = []
965 final = []
963 final = []
966 candidate = sorted(((set(s), s) for s in succssets if s),
964 candidate = sorted(((set(s), s) for s in succssets if s),
967 key=lambda x: len(x[1]), reverse=True)
965 key=lambda x: len(x[1]), reverse=True)
968 for setversion, listversion in candidate:
966 for setversion, listversion in candidate:
969 for seenset in seen:
967 for seenset in seen:
970 if setversion.issubset(seenset):
968 if setversion.issubset(seenset):
971 break
969 break
972 else:
970 else:
973 final.append(listversion)
971 final.append(listversion)
974 seen.append(setversion)
972 seen.append(setversion)
975 final.reverse() # put small successors set first
973 final.reverse() # put small successors set first
976 cache[current] = final
974 cache[current] = final
977 return cache[initialnode]
975 return cache[initialnode]
978
976
979 def _knownrevs(repo, nodes):
977 def _knownrevs(repo, nodes):
980 """yield revision numbers of known nodes passed in parameters
978 """yield revision numbers of known nodes passed in parameters
981
979
982 Unknown revisions are silently ignored."""
980 Unknown revisions are silently ignored."""
983 torev = repo.changelog.nodemap.get
981 torev = repo.changelog.nodemap.get
984 for n in nodes:
982 for n in nodes:
985 rev = torev(n)
983 rev = torev(n)
986 if rev is not None:
984 if rev is not None:
987 yield rev
985 yield rev
988
986
989 # mapping of 'set-name' -> <function to compute this set>
987 # mapping of 'set-name' -> <function to compute this set>
990 cachefuncs = {}
988 cachefuncs = {}
991 def cachefor(name):
989 def cachefor(name):
992 """Decorator to register a function as computing the cache for a set"""
990 """Decorator to register a function as computing the cache for a set"""
993 def decorator(func):
991 def decorator(func):
994 assert name not in cachefuncs
992 assert name not in cachefuncs
995 cachefuncs[name] = func
993 cachefuncs[name] = func
996 return func
994 return func
997 return decorator
995 return decorator
998
996
999 def getrevs(repo, name):
997 def getrevs(repo, name):
1000 """Return the set of revision that belong to the <name> set
998 """Return the set of revision that belong to the <name> set
1001
999
1002 Such access may compute the set and cache it for future use"""
1000 Such access may compute the set and cache it for future use"""
1003 repo = repo.unfiltered()
1001 repo = repo.unfiltered()
1004 if not repo.obsstore:
1002 if not repo.obsstore:
1005 return frozenset()
1003 return frozenset()
1006 if name not in repo.obsstore.caches:
1004 if name not in repo.obsstore.caches:
1007 repo.obsstore.caches[name] = cachefuncs[name](repo)
1005 repo.obsstore.caches[name] = cachefuncs[name](repo)
1008 return repo.obsstore.caches[name]
1006 return repo.obsstore.caches[name]
1009
1007
1010 # To be simple we need to invalidate obsolescence cache when:
1008 # To be simple we need to invalidate obsolescence cache when:
1011 #
1009 #
1012 # - new changeset is added:
1010 # - new changeset is added:
1013 # - public phase is changed
1011 # - public phase is changed
1014 # - obsolescence marker are added
1012 # - obsolescence marker are added
1015 # - strip is used a repo
1013 # - strip is used a repo
1016 def clearobscaches(repo):
1014 def clearobscaches(repo):
1017 """Remove all obsolescence related cache from a repo
1015 """Remove all obsolescence related cache from a repo
1018
1016
1019 This remove all cache in obsstore is the obsstore already exist on the
1017 This remove all cache in obsstore is the obsstore already exist on the
1020 repo.
1018 repo.
1021
1019
1022 (We could be smarter here given the exact event that trigger the cache
1020 (We could be smarter here given the exact event that trigger the cache
1023 clearing)"""
1021 clearing)"""
1024 # only clear cache is there is obsstore data in this repo
1022 # only clear cache is there is obsstore data in this repo
1025 if 'obsstore' in repo._filecache:
1023 if 'obsstore' in repo._filecache:
1026 repo.obsstore.caches.clear()
1024 repo.obsstore.caches.clear()
1027
1025
1028 @cachefor('obsolete')
1026 @cachefor('obsolete')
1029 def _computeobsoleteset(repo):
1027 def _computeobsoleteset(repo):
1030 """the set of obsolete revisions"""
1028 """the set of obsolete revisions"""
1031 obs = set()
1029 obs = set()
1032 getrev = repo.changelog.nodemap.get
1030 getrev = repo.changelog.nodemap.get
1033 getphase = repo._phasecache.phase
1031 getphase = repo._phasecache.phase
1034 for n in repo.obsstore.successors:
1032 for n in repo.obsstore.successors:
1035 rev = getrev(n)
1033 rev = getrev(n)
1036 if rev is not None and getphase(repo, rev):
1034 if rev is not None and getphase(repo, rev):
1037 obs.add(rev)
1035 obs.add(rev)
1038 return obs
1036 return obs
1039
1037
1040 @cachefor('unstable')
1038 @cachefor('unstable')
1041 def _computeunstableset(repo):
1039 def _computeunstableset(repo):
1042 """the set of non obsolete revisions with obsolete parents"""
1040 """the set of non obsolete revisions with obsolete parents"""
1043 # revset is not efficient enough here
1041 # revset is not efficient enough here
1044 # we do (obsolete()::) - obsolete() by hand
1042 # we do (obsolete()::) - obsolete() by hand
1045 obs = getrevs(repo, 'obsolete')
1043 obs = getrevs(repo, 'obsolete')
1046 if not obs:
1044 if not obs:
1047 return set()
1045 return set()
1048 cl = repo.changelog
1046 cl = repo.changelog
1049 return set(r for r in cl.descendants(obs) if r not in obs)
1047 return set(r for r in cl.descendants(obs) if r not in obs)
1050
1048
1051 @cachefor('suspended')
1049 @cachefor('suspended')
1052 def _computesuspendedset(repo):
1050 def _computesuspendedset(repo):
1053 """the set of obsolete parents with non obsolete descendants"""
1051 """the set of obsolete parents with non obsolete descendants"""
1054 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1052 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1055 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1053 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1056
1054
1057 @cachefor('extinct')
1055 @cachefor('extinct')
1058 def _computeextinctset(repo):
1056 def _computeextinctset(repo):
1059 """the set of obsolete parents without non obsolete descendants"""
1057 """the set of obsolete parents without non obsolete descendants"""
1060 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1058 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1061
1059
1062
1060
1063 @cachefor('bumped')
1061 @cachefor('bumped')
1064 def _computebumpedset(repo):
1062 def _computebumpedset(repo):
1065 """the set of revs trying to obsolete public revisions"""
1063 """the set of revs trying to obsolete public revisions"""
1066 bumped = set()
1064 bumped = set()
1067 # util function (avoid attribute lookup in the loop)
1065 # util function (avoid attribute lookup in the loop)
1068 phase = repo._phasecache.phase # would be faster to grab the full list
1066 phase = repo._phasecache.phase # would be faster to grab the full list
1069 public = phases.public
1067 public = phases.public
1070 cl = repo.changelog
1068 cl = repo.changelog
1071 torev = cl.nodemap.get
1069 torev = cl.nodemap.get
1072 obs = getrevs(repo, 'obsolete')
1070 obs = getrevs(repo, 'obsolete')
1073 for rev in repo:
1071 for rev in repo:
1074 # We only evaluate mutable, non-obsolete revision
1072 # We only evaluate mutable, non-obsolete revision
1075 if (public < phase(repo, rev)) and (rev not in obs):
1073 if (public < phase(repo, rev)) and (rev not in obs):
1076 node = cl.node(rev)
1074 node = cl.node(rev)
1077 # (future) A cache of precursors may worth if split is very common
1075 # (future) A cache of precursors may worth if split is very common
1078 for pnode in allprecursors(repo.obsstore, [node],
1076 for pnode in allprecursors(repo.obsstore, [node],
1079 ignoreflags=bumpedfix):
1077 ignoreflags=bumpedfix):
1080 prev = torev(pnode) # unfiltered! but so is phasecache
1078 prev = torev(pnode) # unfiltered! but so is phasecache
1081 if (prev is not None) and (phase(repo, prev) <= public):
1079 if (prev is not None) and (phase(repo, prev) <= public):
1082 # we have a public precursors
1080 # we have a public precursors
1083 bumped.add(rev)
1081 bumped.add(rev)
1084 break # Next draft!
1082 break # Next draft!
1085 return bumped
1083 return bumped
1086
1084
1087 @cachefor('divergent')
1085 @cachefor('divergent')
1088 def _computedivergentset(repo):
1086 def _computedivergentset(repo):
1089 """the set of rev that compete to be the final successors of some revision.
1087 """the set of rev that compete to be the final successors of some revision.
1090 """
1088 """
1091 divergent = set()
1089 divergent = set()
1092 obsstore = repo.obsstore
1090 obsstore = repo.obsstore
1093 newermap = {}
1091 newermap = {}
1094 for ctx in repo.set('(not public()) - obsolete()'):
1092 for ctx in repo.set('(not public()) - obsolete()'):
1095 mark = obsstore.precursors.get(ctx.node(), ())
1093 mark = obsstore.precursors.get(ctx.node(), ())
1096 toprocess = set(mark)
1094 toprocess = set(mark)
1097 while toprocess:
1095 while toprocess:
1098 prec = toprocess.pop()[0]
1096 prec = toprocess.pop()[0]
1099 if prec not in newermap:
1097 if prec not in newermap:
1100 successorssets(repo, prec, newermap)
1098 successorssets(repo, prec, newermap)
1101 newer = [n for n in newermap[prec] if n]
1099 newer = [n for n in newermap[prec] if n]
1102 if len(newer) > 1:
1100 if len(newer) > 1:
1103 divergent.add(ctx.rev())
1101 divergent.add(ctx.rev())
1104 break
1102 break
1105 toprocess.update(obsstore.precursors.get(prec, ()))
1103 toprocess.update(obsstore.precursors.get(prec, ()))
1106 return divergent
1104 return divergent
1107
1105
1108
1106
1109 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1107 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1110 """Add obsolete markers between changesets in a repo
1108 """Add obsolete markers between changesets in a repo
1111
1109
1112 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1110 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1113 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1111 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1114 containing metadata for this marker only. It is merged with the global
1112 containing metadata for this marker only. It is merged with the global
1115 metadata specified through the `metadata` argument of this function,
1113 metadata specified through the `metadata` argument of this function,
1116
1114
1117 Trying to obsolete a public changeset will raise an exception.
1115 Trying to obsolete a public changeset will raise an exception.
1118
1116
1119 Current user and date are used except if specified otherwise in the
1117 Current user and date are used except if specified otherwise in the
1120 metadata attribute.
1118 metadata attribute.
1121
1119
1122 This function operates within a transaction of its own, but does
1120 This function operates within a transaction of its own, but does
1123 not take any lock on the repo.
1121 not take any lock on the repo.
1124 """
1122 """
1125 # prepare metadata
1123 # prepare metadata
1126 if metadata is None:
1124 if metadata is None:
1127 metadata = {}
1125 metadata = {}
1128 if 'user' not in metadata:
1126 if 'user' not in metadata:
1129 metadata['user'] = repo.ui.username()
1127 metadata['user'] = repo.ui.username()
1130 tr = repo.transaction('add-obsolescence-marker')
1128 tr = repo.transaction('add-obsolescence-marker')
1131 try:
1129 try:
1132 for rel in relations:
1130 for rel in relations:
1133 prec = rel[0]
1131 prec = rel[0]
1134 sucs = rel[1]
1132 sucs = rel[1]
1135 localmetadata = metadata.copy()
1133 localmetadata = metadata.copy()
1136 if 2 < len(rel):
1134 if 2 < len(rel):
1137 localmetadata.update(rel[2])
1135 localmetadata.update(rel[2])
1138
1136
1139 if not prec.mutable():
1137 if not prec.mutable():
1140 raise util.Abort("cannot obsolete immutable changeset: %s"
1138 raise util.Abort("cannot obsolete immutable changeset: %s"
1141 % prec)
1139 % prec)
1142 nprec = prec.node()
1140 nprec = prec.node()
1143 nsucs = tuple(s.node() for s in sucs)
1141 nsucs = tuple(s.node() for s in sucs)
1144 npare = None
1142 npare = None
1145 if not nsucs:
1143 if not nsucs:
1146 npare = tuple(p.node() for p in prec.parents())
1144 npare = tuple(p.node() for p in prec.parents())
1147 if nprec in nsucs:
1145 if nprec in nsucs:
1148 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1146 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1149 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1147 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1150 date=date, metadata=localmetadata)
1148 date=date, metadata=localmetadata)
1151 repo.filteredrevcache.clear()
1149 repo.filteredrevcache.clear()
1152 tr.close()
1150 tr.close()
1153 finally:
1151 finally:
1154 tr.release()
1152 tr.release()
1155
1153
1156 def isenabled(repo, option):
1154 def isenabled(repo, option):
1157 """Returns True if the given repository has the given obsolete option
1155 """Returns True if the given repository has the given obsolete option
1158 enabled.
1156 enabled.
1159 """
1157 """
1160 result = set(repo.ui.configlist('experimental', 'evolution'))
1158 result = set(repo.ui.configlist('experimental', 'evolution'))
1161 if 'all' in result:
1159 if 'all' in result:
1162 return True
1160 return True
1163
1161
1164 # For migration purposes, temporarily return true if the config hasn't been
1162 # For migration purposes, temporarily return true if the config hasn't been
1165 # set but _enabled is true.
1163 # set but _enabled is true.
1166 if len(result) == 0 and _enabled:
1164 if len(result) == 0 and _enabled:
1167 return True
1165 return True
1168
1166
1169 # createmarkers must be enabled if other options are enabled
1167 # createmarkers must be enabled if other options are enabled
1170 if ((allowunstableopt in result or exchangeopt in result) and
1168 if ((allowunstableopt in result or exchangeopt in result) and
1171 not createmarkersopt in result):
1169 not createmarkersopt in result):
1172 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1170 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1173 "if other obsolete options are enabled"))
1171 "if other obsolete options are enabled"))
1174
1172
1175 return option in result
1173 return option in result
General Comments 0
You need to be logged in to leave comments. Login now