##// END OF EJS Templates
obsstore: disable garbage collection during initialization (issue4456)...
Pierre-Yves David -
r23497:5817f71c default
parent child Browse files
Show More
@@ -1,1175 +1,1177 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 import struct
70 import struct
71 import util, base85, node
71 import util, base85, node
72 import phases
72 import phases
73 from i18n import _
73 from i18n import _
74
74
75 _pack = struct.pack
75 _pack = struct.pack
76 _unpack = struct.unpack
76 _unpack = struct.unpack
77
77
78 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
78 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
79
79
80 # the obsolete feature is not mature enough to be enabled by default.
80 # the obsolete feature is not mature enough to be enabled by default.
81 # you have to rely on third party extension extension to enable this.
81 # you have to rely on third party extension extension to enable this.
82 _enabled = False
82 _enabled = False
83
83
84 # Options for obsolescence
84 # Options for obsolescence
85 createmarkersopt = 'createmarkers'
85 createmarkersopt = 'createmarkers'
86 allowunstableopt = 'allowunstable'
86 allowunstableopt = 'allowunstable'
87 exchangeopt = 'exchange'
87 exchangeopt = 'exchange'
88
88
89 ### obsolescence marker flag
89 ### obsolescence marker flag
90
90
91 ## bumpedfix flag
91 ## bumpedfix flag
92 #
92 #
93 # When a changeset A' succeed to a changeset A which became public, we call A'
93 # When a changeset A' succeed to a changeset A which became public, we call A'
94 # "bumped" because it's a successors of a public changesets
94 # "bumped" because it's a successors of a public changesets
95 #
95 #
96 # o A' (bumped)
96 # o A' (bumped)
97 # |`:
97 # |`:
98 # | o A
98 # | o A
99 # |/
99 # |/
100 # o Z
100 # o Z
101 #
101 #
102 # The way to solve this situation is to create a new changeset Ad as children
102 # The way to solve this situation is to create a new changeset Ad as children
103 # of A. This changeset have the same content than A'. So the diff from A to A'
103 # of A. This changeset have the same content than A'. So the diff from A to A'
104 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
104 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 #
105 #
106 # o Ad
106 # o Ad
107 # |`:
107 # |`:
108 # | x A'
108 # | x A'
109 # |'|
109 # |'|
110 # o | A
110 # o | A
111 # |/
111 # |/
112 # o Z
112 # o Z
113 #
113 #
114 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
114 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
115 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 # This flag mean that the successors express the changes between the public and
116 # This flag mean that the successors express the changes between the public and
117 # bumped version and fix the situation, breaking the transitivity of
117 # bumped version and fix the situation, breaking the transitivity of
118 # "bumped" here.
118 # "bumped" here.
119 bumpedfix = 1
119 bumpedfix = 1
120 usingsha256 = 2
120 usingsha256 = 2
121
121
122 ## Parsing and writing of version "0"
122 ## Parsing and writing of version "0"
123 #
123 #
124 # The header is followed by the markers. Each marker is made of:
124 # The header is followed by the markers. Each marker is made of:
125 #
125 #
126 # - 1 uint8 : number of new changesets "N", can be zero.
126 # - 1 uint8 : number of new changesets "N", can be zero.
127 #
127 #
128 # - 1 uint32: metadata size "M" in bytes.
128 # - 1 uint32: metadata size "M" in bytes.
129 #
129 #
130 # - 1 byte: a bit field. It is reserved for flags used in common
130 # - 1 byte: a bit field. It is reserved for flags used in common
131 # obsolete marker operations, to avoid repeated decoding of metadata
131 # obsolete marker operations, to avoid repeated decoding of metadata
132 # entries.
132 # entries.
133 #
133 #
134 # - 20 bytes: obsoleted changeset identifier.
134 # - 20 bytes: obsoleted changeset identifier.
135 #
135 #
136 # - N*20 bytes: new changesets identifiers.
136 # - N*20 bytes: new changesets identifiers.
137 #
137 #
138 # - M bytes: metadata as a sequence of nul-terminated strings. Each
138 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 # string contains a key and a value, separated by a colon ':', without
139 # string contains a key and a value, separated by a colon ':', without
140 # additional encoding. Keys cannot contain '\0' or ':' and values
140 # additional encoding. Keys cannot contain '\0' or ':' and values
141 # cannot contain '\0'.
141 # cannot contain '\0'.
142 _fm0version = 0
142 _fm0version = 0
143 _fm0fixed = '>BIB20s'
143 _fm0fixed = '>BIB20s'
144 _fm0node = '20s'
144 _fm0node = '20s'
145 _fm0fsize = struct.calcsize(_fm0fixed)
145 _fm0fsize = struct.calcsize(_fm0fixed)
146 _fm0fnodesize = struct.calcsize(_fm0node)
146 _fm0fnodesize = struct.calcsize(_fm0node)
147
147
148 def _fm0readmarkers(data, off=0):
148 def _fm0readmarkers(data, off=0):
149 # Loop on markers
149 # Loop on markers
150 l = len(data)
150 l = len(data)
151 while off + _fm0fsize <= l:
151 while off + _fm0fsize <= l:
152 # read fixed part
152 # read fixed part
153 cur = data[off:off + _fm0fsize]
153 cur = data[off:off + _fm0fsize]
154 off += _fm0fsize
154 off += _fm0fsize
155 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
155 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 # read replacement
156 # read replacement
157 sucs = ()
157 sucs = ()
158 if numsuc:
158 if numsuc:
159 s = (_fm0fnodesize * numsuc)
159 s = (_fm0fnodesize * numsuc)
160 cur = data[off:off + s]
160 cur = data[off:off + s]
161 sucs = _unpack(_fm0node * numsuc, cur)
161 sucs = _unpack(_fm0node * numsuc, cur)
162 off += s
162 off += s
163 # read metadata
163 # read metadata
164 # (metadata will be decoded on demand)
164 # (metadata will be decoded on demand)
165 metadata = data[off:off + mdsize]
165 metadata = data[off:off + mdsize]
166 if len(metadata) != mdsize:
166 if len(metadata) != mdsize:
167 raise util.Abort(_('parsing obsolete marker: metadata is too '
167 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 'short, %d bytes expected, got %d')
168 'short, %d bytes expected, got %d')
169 % (mdsize, len(metadata)))
169 % (mdsize, len(metadata)))
170 off += mdsize
170 off += mdsize
171 metadata = _fm0decodemeta(metadata)
171 metadata = _fm0decodemeta(metadata)
172 try:
172 try:
173 when, offset = metadata.pop('date', '0 0').split(' ')
173 when, offset = metadata.pop('date', '0 0').split(' ')
174 date = float(when), int(offset)
174 date = float(when), int(offset)
175 except ValueError:
175 except ValueError:
176 date = (0., 0)
176 date = (0., 0)
177 parents = None
177 parents = None
178 if 'p2' in metadata:
178 if 'p2' in metadata:
179 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
179 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 elif 'p1' in metadata:
180 elif 'p1' in metadata:
181 parents = (metadata.pop('p1', None),)
181 parents = (metadata.pop('p1', None),)
182 elif 'p0' in metadata:
182 elif 'p0' in metadata:
183 parents = ()
183 parents = ()
184 if parents is not None:
184 if parents is not None:
185 try:
185 try:
186 parents = tuple(node.bin(p) for p in parents)
186 parents = tuple(node.bin(p) for p in parents)
187 # if parent content is not a nodeid, drop the data
187 # if parent content is not a nodeid, drop the data
188 for p in parents:
188 for p in parents:
189 if len(p) != 20:
189 if len(p) != 20:
190 parents = None
190 parents = None
191 break
191 break
192 except TypeError:
192 except TypeError:
193 # if content cannot be translated to nodeid drop the data.
193 # if content cannot be translated to nodeid drop the data.
194 parents = None
194 parents = None
195
195
196 metadata = tuple(sorted(metadata.iteritems()))
196 metadata = tuple(sorted(metadata.iteritems()))
197
197
198 yield (pre, sucs, flags, metadata, date, parents)
198 yield (pre, sucs, flags, metadata, date, parents)
199
199
200 def _fm0encodeonemarker(marker):
200 def _fm0encodeonemarker(marker):
201 pre, sucs, flags, metadata, date, parents = marker
201 pre, sucs, flags, metadata, date, parents = marker
202 if flags & usingsha256:
202 if flags & usingsha256:
203 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
203 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
204 metadata = dict(metadata)
204 metadata = dict(metadata)
205 time, tz = date
205 time, tz = date
206 metadata['date'] = '%r %i' % (time, tz)
206 metadata['date'] = '%r %i' % (time, tz)
207 if parents is not None:
207 if parents is not None:
208 if not parents:
208 if not parents:
209 # mark that we explicitly recorded no parents
209 # mark that we explicitly recorded no parents
210 metadata['p0'] = ''
210 metadata['p0'] = ''
211 for i, p in enumerate(parents):
211 for i, p in enumerate(parents):
212 metadata['p%i' % (i + 1)] = node.hex(p)
212 metadata['p%i' % (i + 1)] = node.hex(p)
213 metadata = _fm0encodemeta(metadata)
213 metadata = _fm0encodemeta(metadata)
214 numsuc = len(sucs)
214 numsuc = len(sucs)
215 format = _fm0fixed + (_fm0node * numsuc)
215 format = _fm0fixed + (_fm0node * numsuc)
216 data = [numsuc, len(metadata), flags, pre]
216 data = [numsuc, len(metadata), flags, pre]
217 data.extend(sucs)
217 data.extend(sucs)
218 return _pack(format, *data) + metadata
218 return _pack(format, *data) + metadata
219
219
220 def _fm0encodemeta(meta):
220 def _fm0encodemeta(meta):
221 """Return encoded metadata string to string mapping.
221 """Return encoded metadata string to string mapping.
222
222
223 Assume no ':' in key and no '\0' in both key and value."""
223 Assume no ':' in key and no '\0' in both key and value."""
224 for key, value in meta.iteritems():
224 for key, value in meta.iteritems():
225 if ':' in key or '\0' in key:
225 if ':' in key or '\0' in key:
226 raise ValueError("':' and '\0' are forbidden in metadata key'")
226 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 if '\0' in value:
227 if '\0' in value:
228 raise ValueError("':' is forbidden in metadata value'")
228 raise ValueError("':' is forbidden in metadata value'")
229 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
229 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230
230
231 def _fm0decodemeta(data):
231 def _fm0decodemeta(data):
232 """Return string to string dictionary from encoded version."""
232 """Return string to string dictionary from encoded version."""
233 d = {}
233 d = {}
234 for l in data.split('\0'):
234 for l in data.split('\0'):
235 if l:
235 if l:
236 key, value = l.split(':')
236 key, value = l.split(':')
237 d[key] = value
237 d[key] = value
238 return d
238 return d
239
239
240 ## Parsing and writing of version "1"
240 ## Parsing and writing of version "1"
241 #
241 #
242 # The header is followed by the markers. Each marker is made of:
242 # The header is followed by the markers. Each marker is made of:
243 #
243 #
244 # - uint32: total size of the marker (including this field)
244 # - uint32: total size of the marker (including this field)
245 #
245 #
246 # - float64: date in seconds since epoch
246 # - float64: date in seconds since epoch
247 #
247 #
248 # - int16: timezone offset in minutes
248 # - int16: timezone offset in minutes
249 #
249 #
250 # - uint16: a bit field. It is reserved for flags used in common
250 # - uint16: a bit field. It is reserved for flags used in common
251 # obsolete marker operations, to avoid repeated decoding of metadata
251 # obsolete marker operations, to avoid repeated decoding of metadata
252 # entries.
252 # entries.
253 #
253 #
254 # - uint8: number of successors "N", can be zero.
254 # - uint8: number of successors "N", can be zero.
255 #
255 #
256 # - uint8: number of parents "P", can be zero.
256 # - uint8: number of parents "P", can be zero.
257 #
257 #
258 # 0: parents data stored but no parent,
258 # 0: parents data stored but no parent,
259 # 1: one parent stored,
259 # 1: one parent stored,
260 # 2: two parents stored,
260 # 2: two parents stored,
261 # 3: no parent data stored
261 # 3: no parent data stored
262 #
262 #
263 # - uint8: number of metadata entries M
263 # - uint8: number of metadata entries M
264 #
264 #
265 # - 20 or 32 bytes: precursor changeset identifier.
265 # - 20 or 32 bytes: precursor changeset identifier.
266 #
266 #
267 # - N*(20 or 32) bytes: successors changesets identifiers.
267 # - N*(20 or 32) bytes: successors changesets identifiers.
268 #
268 #
269 # - P*(20 or 32) bytes: parents of the precursors changesets.
269 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 #
270 #
271 # - M*(uint8, uint8): size of all metadata entries (key and value)
271 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 #
272 #
273 # - remaining bytes: the metadata, each (key, value) pair after the other.
273 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 _fm1version = 1
274 _fm1version = 1
275 _fm1fixed = '>IdhHBBB20s'
275 _fm1fixed = '>IdhHBBB20s'
276 _fm1nodesha1 = '20s'
276 _fm1nodesha1 = '20s'
277 _fm1nodesha256 = '32s'
277 _fm1nodesha256 = '32s'
278 _fm1fsize = struct.calcsize(_fm1fixed)
278 _fm1fsize = struct.calcsize(_fm1fixed)
279 _fm1parentnone = 3
279 _fm1parentnone = 3
280 _fm1parentshift = 14
280 _fm1parentshift = 14
281 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
281 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
282 _fm1metapair = 'BB'
282 _fm1metapair = 'BB'
283 _fm1metapairsize = struct.calcsize('BB')
283 _fm1metapairsize = struct.calcsize('BB')
284
284
285 def _fm1readmarkers(data, off=0):
285 def _fm1readmarkers(data, off=0):
286 # Loop on markers
286 # Loop on markers
287 l = len(data)
287 l = len(data)
288 while off + _fm1fsize <= l:
288 while off + _fm1fsize <= l:
289 # read fixed part
289 # read fixed part
290 cur = data[off:off + _fm1fsize]
290 cur = data[off:off + _fm1fsize]
291 off += _fm1fsize
291 off += _fm1fsize
292 fixeddata = _unpack(_fm1fixed, cur)
292 fixeddata = _unpack(_fm1fixed, cur)
293 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
293 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
294 # extract the number of parents information
294 # extract the number of parents information
295 if numpar == _fm1parentnone:
295 if numpar == _fm1parentnone:
296 numpar = None
296 numpar = None
297 # build the date tuple (upgrade tz minutes to seconds)
297 # build the date tuple (upgrade tz minutes to seconds)
298 date = (seconds, tz * 60)
298 date = (seconds, tz * 60)
299 _fm1node = _fm1nodesha1
299 _fm1node = _fm1nodesha1
300 if flags & usingsha256:
300 if flags & usingsha256:
301 _fm1node = _fm1nodesha256
301 _fm1node = _fm1nodesha256
302 fnodesize = struct.calcsize(_fm1node)
302 fnodesize = struct.calcsize(_fm1node)
303 # read replacement
303 # read replacement
304 sucs = ()
304 sucs = ()
305 if numsuc:
305 if numsuc:
306 s = (fnodesize * numsuc)
306 s = (fnodesize * numsuc)
307 cur = data[off:off + s]
307 cur = data[off:off + s]
308 sucs = _unpack(_fm1node * numsuc, cur)
308 sucs = _unpack(_fm1node * numsuc, cur)
309 off += s
309 off += s
310 # read parents
310 # read parents
311 if numpar is None:
311 if numpar is None:
312 parents = None
312 parents = None
313 elif numpar == 0:
313 elif numpar == 0:
314 parents = ()
314 parents = ()
315 elif numpar: # neither None nor zero
315 elif numpar: # neither None nor zero
316 s = (fnodesize * numpar)
316 s = (fnodesize * numpar)
317 cur = data[off:off + s]
317 cur = data[off:off + s]
318 parents = _unpack(_fm1node * numpar, cur)
318 parents = _unpack(_fm1node * numpar, cur)
319 off += s
319 off += s
320 # read metadata
320 # read metadata
321 metaformat = '>' + (_fm1metapair * nummeta)
321 metaformat = '>' + (_fm1metapair * nummeta)
322 s = _fm1metapairsize * nummeta
322 s = _fm1metapairsize * nummeta
323 metapairsize = _unpack(metaformat, data[off:off + s])
323 metapairsize = _unpack(metaformat, data[off:off + s])
324 off += s
324 off += s
325 metadata = []
325 metadata = []
326 for idx in xrange(0, len(metapairsize), 2):
326 for idx in xrange(0, len(metapairsize), 2):
327 sk = metapairsize[idx]
327 sk = metapairsize[idx]
328 sv = metapairsize[idx + 1]
328 sv = metapairsize[idx + 1]
329 key = data[off:off + sk]
329 key = data[off:off + sk]
330 value = data[off + sk:off + sk + sv]
330 value = data[off + sk:off + sk + sv]
331 assert len(key) == sk
331 assert len(key) == sk
332 assert len(value) == sv
332 assert len(value) == sv
333 metadata.append((key, value))
333 metadata.append((key, value))
334 off += sk + sv
334 off += sk + sv
335 metadata = tuple(metadata)
335 metadata = tuple(metadata)
336
336
337 yield (prec, sucs, flags, metadata, date, parents)
337 yield (prec, sucs, flags, metadata, date, parents)
338
338
339 def _fm1encodeonemarker(marker):
339 def _fm1encodeonemarker(marker):
340 pre, sucs, flags, metadata, date, parents = marker
340 pre, sucs, flags, metadata, date, parents = marker
341 # determine node size
341 # determine node size
342 _fm1node = _fm1nodesha1
342 _fm1node = _fm1nodesha1
343 if flags & usingsha256:
343 if flags & usingsha256:
344 _fm1node = _fm1nodesha256
344 _fm1node = _fm1nodesha256
345 numsuc = len(sucs)
345 numsuc = len(sucs)
346 numextranodes = numsuc
346 numextranodes = numsuc
347 if parents is None:
347 if parents is None:
348 numpar = _fm1parentnone
348 numpar = _fm1parentnone
349 else:
349 else:
350 numpar = len(parents)
350 numpar = len(parents)
351 numextranodes += numpar
351 numextranodes += numpar
352 formatnodes = _fm1node * numextranodes
352 formatnodes = _fm1node * numextranodes
353 formatmeta = _fm1metapair * len(metadata)
353 formatmeta = _fm1metapair * len(metadata)
354 format = _fm1fixed + formatnodes + formatmeta
354 format = _fm1fixed + formatnodes + formatmeta
355 # tz is stored in minutes so we divide by 60
355 # tz is stored in minutes so we divide by 60
356 tz = date[1]//60
356 tz = date[1]//60
357 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
357 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
358 data.extend(sucs)
358 data.extend(sucs)
359 if parents is not None:
359 if parents is not None:
360 data.extend(parents)
360 data.extend(parents)
361 totalsize = struct.calcsize(format)
361 totalsize = struct.calcsize(format)
362 for key, value in metadata:
362 for key, value in metadata:
363 lk = len(key)
363 lk = len(key)
364 lv = len(value)
364 lv = len(value)
365 data.append(lk)
365 data.append(lk)
366 data.append(lv)
366 data.append(lv)
367 totalsize += lk + lv
367 totalsize += lk + lv
368 data[0] = totalsize
368 data[0] = totalsize
369 data = [_pack(format, *data)]
369 data = [_pack(format, *data)]
370 for key, value in metadata:
370 for key, value in metadata:
371 data.append(key)
371 data.append(key)
372 data.append(value)
372 data.append(value)
373 return ''.join(data)
373 return ''.join(data)
374
374
375 # mapping to read/write various marker formats
375 # mapping to read/write various marker formats
376 # <version> -> (decoder, encoder)
376 # <version> -> (decoder, encoder)
377 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
377 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
378 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
378 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
379
379
380 @util.nogc
380 def _readmarkers(data):
381 def _readmarkers(data):
381 """Read and enumerate markers from raw data"""
382 """Read and enumerate markers from raw data"""
382 off = 0
383 off = 0
383 diskversion = _unpack('>B', data[off:off + 1])[0]
384 diskversion = _unpack('>B', data[off:off + 1])[0]
384 off += 1
385 off += 1
385 if diskversion not in formats:
386 if diskversion not in formats:
386 raise util.Abort(_('parsing obsolete marker: unknown version %r')
387 raise util.Abort(_('parsing obsolete marker: unknown version %r')
387 % diskversion)
388 % diskversion)
388 return diskversion, formats[diskversion][0](data, off)
389 return diskversion, formats[diskversion][0](data, off)
389
390
390 def encodemarkers(markers, addheader=False, version=_fm0version):
391 def encodemarkers(markers, addheader=False, version=_fm0version):
391 # Kept separate from flushmarkers(), it will be reused for
392 # Kept separate from flushmarkers(), it will be reused for
392 # markers exchange.
393 # markers exchange.
393 encodeone = formats[version][1]
394 encodeone = formats[version][1]
394 if addheader:
395 if addheader:
395 yield _pack('>B', version)
396 yield _pack('>B', version)
396 for marker in markers:
397 for marker in markers:
397 yield encodeone(marker)
398 yield encodeone(marker)
398
399
399
400
400 class marker(object):
401 class marker(object):
401 """Wrap obsolete marker raw data"""
402 """Wrap obsolete marker raw data"""
402
403
403 def __init__(self, repo, data):
404 def __init__(self, repo, data):
404 # the repo argument will be used to create changectx in later version
405 # the repo argument will be used to create changectx in later version
405 self._repo = repo
406 self._repo = repo
406 self._data = data
407 self._data = data
407 self._decodedmeta = None
408 self._decodedmeta = None
408
409
409 def __hash__(self):
410 def __hash__(self):
410 return hash(self._data)
411 return hash(self._data)
411
412
412 def __eq__(self, other):
413 def __eq__(self, other):
413 if type(other) != type(self):
414 if type(other) != type(self):
414 return False
415 return False
415 return self._data == other._data
416 return self._data == other._data
416
417
417 def precnode(self):
418 def precnode(self):
418 """Precursor changeset node identifier"""
419 """Precursor changeset node identifier"""
419 return self._data[0]
420 return self._data[0]
420
421
421 def succnodes(self):
422 def succnodes(self):
422 """List of successor changesets node identifiers"""
423 """List of successor changesets node identifiers"""
423 return self._data[1]
424 return self._data[1]
424
425
425 def parentnodes(self):
426 def parentnodes(self):
426 """Parents of the precursors (None if not recorded)"""
427 """Parents of the precursors (None if not recorded)"""
427 return self._data[5]
428 return self._data[5]
428
429
429 def metadata(self):
430 def metadata(self):
430 """Decoded metadata dictionary"""
431 """Decoded metadata dictionary"""
431 return dict(self._data[3])
432 return dict(self._data[3])
432
433
433 def date(self):
434 def date(self):
434 """Creation date as (unixtime, offset)"""
435 """Creation date as (unixtime, offset)"""
435 return self._data[4]
436 return self._data[4]
436
437
437 def flags(self):
438 def flags(self):
438 """The flags field of the marker"""
439 """The flags field of the marker"""
439 return self._data[2]
440 return self._data[2]
440
441
441 class obsstore(object):
442 class obsstore(object):
442 """Store obsolete markers
443 """Store obsolete markers
443
444
444 Markers can be accessed with two mappings:
445 Markers can be accessed with two mappings:
445 - precursors[x] -> set(markers on precursors edges of x)
446 - precursors[x] -> set(markers on precursors edges of x)
446 - successors[x] -> set(markers on successors edges of x)
447 - successors[x] -> set(markers on successors edges of x)
447 - children[x] -> set(markers on precursors edges of children(x)
448 - children[x] -> set(markers on precursors edges of children(x)
448 """
449 """
449
450
450 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
451 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
451 # prec: nodeid, precursor changesets
452 # prec: nodeid, precursor changesets
452 # succs: tuple of nodeid, successor changesets (0-N length)
453 # succs: tuple of nodeid, successor changesets (0-N length)
453 # flag: integer, flag field carrying modifier for the markers (see doc)
454 # flag: integer, flag field carrying modifier for the markers (see doc)
454 # meta: binary blob, encoded metadata dictionary
455 # meta: binary blob, encoded metadata dictionary
455 # date: (float, int) tuple, date of marker creation
456 # date: (float, int) tuple, date of marker creation
456 # parents: (tuple of nodeid) or None, parents of precursors
457 # parents: (tuple of nodeid) or None, parents of precursors
457 # None is used when no data has been recorded
458 # None is used when no data has been recorded
458
459
459 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
460 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
460 # caches for various obsolescence related cache
461 # caches for various obsolescence related cache
461 self.caches = {}
462 self.caches = {}
462 self._all = []
463 self._all = []
463 self.precursors = {}
464 self.precursors = {}
464 self.successors = {}
465 self.successors = {}
465 self.children = {}
466 self.children = {}
466 self.sopener = sopener
467 self.sopener = sopener
467 data = sopener.tryread('obsstore')
468 data = sopener.tryread('obsstore')
468 self._version = defaultformat
469 self._version = defaultformat
469 self._readonly = readonly
470 self._readonly = readonly
470 if data:
471 if data:
471 self._version, markers = _readmarkers(data)
472 self._version, markers = _readmarkers(data)
472 self._load(markers)
473 self._load(markers)
473
474
474 def __iter__(self):
475 def __iter__(self):
475 return iter(self._all)
476 return iter(self._all)
476
477
477 def __len__(self):
478 def __len__(self):
478 return len(self._all)
479 return len(self._all)
479
480
480 def __nonzero__(self):
481 def __nonzero__(self):
481 return bool(self._all)
482 return bool(self._all)
482
483
483 def create(self, transaction, prec, succs=(), flag=0, parents=None,
484 def create(self, transaction, prec, succs=(), flag=0, parents=None,
484 date=None, metadata=None):
485 date=None, metadata=None):
485 """obsolete: add a new obsolete marker
486 """obsolete: add a new obsolete marker
486
487
487 * ensuring it is hashable
488 * ensuring it is hashable
488 * check mandatory metadata
489 * check mandatory metadata
489 * encode metadata
490 * encode metadata
490
491
491 If you are a human writing code creating marker you want to use the
492 If you are a human writing code creating marker you want to use the
492 `createmarkers` function in this module instead.
493 `createmarkers` function in this module instead.
493
494
494 return True if a new marker have been added, False if the markers
495 return True if a new marker have been added, False if the markers
495 already existed (no op).
496 already existed (no op).
496 """
497 """
497 if metadata is None:
498 if metadata is None:
498 metadata = {}
499 metadata = {}
499 if date is None:
500 if date is None:
500 if 'date' in metadata:
501 if 'date' in metadata:
501 # as a courtesy for out-of-tree extensions
502 # as a courtesy for out-of-tree extensions
502 date = util.parsedate(metadata.pop('date'))
503 date = util.parsedate(metadata.pop('date'))
503 else:
504 else:
504 date = util.makedate()
505 date = util.makedate()
505 if len(prec) != 20:
506 if len(prec) != 20:
506 raise ValueError(prec)
507 raise ValueError(prec)
507 for succ in succs:
508 for succ in succs:
508 if len(succ) != 20:
509 if len(succ) != 20:
509 raise ValueError(succ)
510 raise ValueError(succ)
510 if prec in succs:
511 if prec in succs:
511 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
512 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
512
513
513 metadata = tuple(sorted(metadata.iteritems()))
514 metadata = tuple(sorted(metadata.iteritems()))
514
515
515 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
516 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
516 return bool(self.add(transaction, [marker]))
517 return bool(self.add(transaction, [marker]))
517
518
518 def add(self, transaction, markers):
519 def add(self, transaction, markers):
519 """Add new markers to the store
520 """Add new markers to the store
520
521
521 Take care of filtering duplicate.
522 Take care of filtering duplicate.
522 Return the number of new marker."""
523 Return the number of new marker."""
523 if self._readonly:
524 if self._readonly:
524 raise util.Abort('creating obsolete markers is not enabled on this '
525 raise util.Abort('creating obsolete markers is not enabled on this '
525 'repo')
526 'repo')
526 known = set(self._all)
527 known = set(self._all)
527 new = []
528 new = []
528 for m in markers:
529 for m in markers:
529 if m not in known:
530 if m not in known:
530 known.add(m)
531 known.add(m)
531 new.append(m)
532 new.append(m)
532 if new:
533 if new:
533 f = self.sopener('obsstore', 'ab')
534 f = self.sopener('obsstore', 'ab')
534 try:
535 try:
535 # Whether the file's current position is at the begin or at
536 # Whether the file's current position is at the begin or at
536 # the end after opening a file for appending is implementation
537 # the end after opening a file for appending is implementation
537 # defined. So we must seek to the end before calling tell(),
538 # defined. So we must seek to the end before calling tell(),
538 # or we may get a zero offset for non-zero sized files on
539 # or we may get a zero offset for non-zero sized files on
539 # some platforms (issue3543).
540 # some platforms (issue3543).
540 f.seek(0, _SEEK_END)
541 f.seek(0, _SEEK_END)
541 offset = f.tell()
542 offset = f.tell()
542 transaction.add('obsstore', offset)
543 transaction.add('obsstore', offset)
543 # offset == 0: new file - add the version header
544 # offset == 0: new file - add the version header
544 for bytes in encodemarkers(new, offset == 0, self._version):
545 for bytes in encodemarkers(new, offset == 0, self._version):
545 f.write(bytes)
546 f.write(bytes)
546 finally:
547 finally:
547 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
548 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
548 # call 'filecacheentry.refresh()' here
549 # call 'filecacheentry.refresh()' here
549 f.close()
550 f.close()
550 self._load(new)
551 self._load(new)
551 # new marker *may* have changed several set. invalidate the cache.
552 # new marker *may* have changed several set. invalidate the cache.
552 self.caches.clear()
553 self.caches.clear()
553 # records the number of new markers for the transaction hooks
554 # records the number of new markers for the transaction hooks
554 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
555 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
555 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
556 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
556 return len(new)
557 return len(new)
557
558
558 def mergemarkers(self, transaction, data):
559 def mergemarkers(self, transaction, data):
559 """merge a binary stream of markers inside the obsstore
560 """merge a binary stream of markers inside the obsstore
560
561
561 Returns the number of new markers added."""
562 Returns the number of new markers added."""
562 version, markers = _readmarkers(data)
563 version, markers = _readmarkers(data)
563 return self.add(transaction, markers)
564 return self.add(transaction, markers)
564
565
566 @util.nogc
565 def _load(self, markers):
567 def _load(self, markers):
566 for mark in markers:
568 for mark in markers:
567 self._all.append(mark)
569 self._all.append(mark)
568 pre, sucs = mark[:2]
570 pre, sucs = mark[:2]
569 self.successors.setdefault(pre, set()).add(mark)
571 self.successors.setdefault(pre, set()).add(mark)
570 for suc in sucs:
572 for suc in sucs:
571 self.precursors.setdefault(suc, set()).add(mark)
573 self.precursors.setdefault(suc, set()).add(mark)
572 parents = mark[5]
574 parents = mark[5]
573 if parents is not None:
575 if parents is not None:
574 for p in parents:
576 for p in parents:
575 self.children.setdefault(p, set()).add(mark)
577 self.children.setdefault(p, set()).add(mark)
576 if node.nullid in self.precursors:
578 if node.nullid in self.precursors:
577 raise util.Abort(_('bad obsolescence marker detected: '
579 raise util.Abort(_('bad obsolescence marker detected: '
578 'invalid successors nullid'))
580 'invalid successors nullid'))
579 def relevantmarkers(self, nodes):
581 def relevantmarkers(self, nodes):
580 """return a set of all obsolescence markers relevant to a set of nodes.
582 """return a set of all obsolescence markers relevant to a set of nodes.
581
583
582 "relevant" to a set of nodes mean:
584 "relevant" to a set of nodes mean:
583
585
584 - marker that use this changeset as successor
586 - marker that use this changeset as successor
585 - prune marker of direct children on this changeset
587 - prune marker of direct children on this changeset
586 - recursive application of the two rules on precursors of these markers
588 - recursive application of the two rules on precursors of these markers
587
589
588 It is a set so you cannot rely on order."""
590 It is a set so you cannot rely on order."""
589
591
590 pendingnodes = set(nodes)
592 pendingnodes = set(nodes)
591 seenmarkers = set()
593 seenmarkers = set()
592 seennodes = set(pendingnodes)
594 seennodes = set(pendingnodes)
593 precursorsmarkers = self.precursors
595 precursorsmarkers = self.precursors
594 children = self.children
596 children = self.children
595 while pendingnodes:
597 while pendingnodes:
596 direct = set()
598 direct = set()
597 for current in pendingnodes:
599 for current in pendingnodes:
598 direct.update(precursorsmarkers.get(current, ()))
600 direct.update(precursorsmarkers.get(current, ()))
599 pruned = [m for m in children.get(current, ()) if not m[1]]
601 pruned = [m for m in children.get(current, ()) if not m[1]]
600 direct.update(pruned)
602 direct.update(pruned)
601 direct -= seenmarkers
603 direct -= seenmarkers
602 pendingnodes = set([m[0] for m in direct])
604 pendingnodes = set([m[0] for m in direct])
603 seenmarkers |= direct
605 seenmarkers |= direct
604 pendingnodes -= seennodes
606 pendingnodes -= seennodes
605 seennodes |= pendingnodes
607 seennodes |= pendingnodes
606 return seenmarkers
608 return seenmarkers
607
609
608 def commonversion(versions):
610 def commonversion(versions):
609 """Return the newest version listed in both versions and our local formats.
611 """Return the newest version listed in both versions and our local formats.
610
612
611 Returns None if no common version exists.
613 Returns None if no common version exists.
612 """
614 """
613 versions.sort(reverse=True)
615 versions.sort(reverse=True)
614 # search for highest version known on both side
616 # search for highest version known on both side
615 for v in versions:
617 for v in versions:
616 if v in formats:
618 if v in formats:
617 return v
619 return v
618 return None
620 return None
619
621
620 # arbitrary picked to fit into 8K limit from HTTP server
622 # arbitrary picked to fit into 8K limit from HTTP server
621 # you have to take in account:
623 # you have to take in account:
622 # - the version header
624 # - the version header
623 # - the base85 encoding
625 # - the base85 encoding
624 _maxpayload = 5300
626 _maxpayload = 5300
625
627
626 def _pushkeyescape(markers):
628 def _pushkeyescape(markers):
627 """encode markers into a dict suitable for pushkey exchange
629 """encode markers into a dict suitable for pushkey exchange
628
630
629 - binary data is base85 encoded
631 - binary data is base85 encoded
630 - split in chunks smaller than 5300 bytes"""
632 - split in chunks smaller than 5300 bytes"""
631 keys = {}
633 keys = {}
632 parts = []
634 parts = []
633 currentlen = _maxpayload * 2 # ensure we create a new part
635 currentlen = _maxpayload * 2 # ensure we create a new part
634 for marker in markers:
636 for marker in markers:
635 nextdata = _fm0encodeonemarker(marker)
637 nextdata = _fm0encodeonemarker(marker)
636 if (len(nextdata) + currentlen > _maxpayload):
638 if (len(nextdata) + currentlen > _maxpayload):
637 currentpart = []
639 currentpart = []
638 currentlen = 0
640 currentlen = 0
639 parts.append(currentpart)
641 parts.append(currentpart)
640 currentpart.append(nextdata)
642 currentpart.append(nextdata)
641 currentlen += len(nextdata)
643 currentlen += len(nextdata)
642 for idx, part in enumerate(reversed(parts)):
644 for idx, part in enumerate(reversed(parts)):
643 data = ''.join([_pack('>B', _fm0version)] + part)
645 data = ''.join([_pack('>B', _fm0version)] + part)
644 keys['dump%i' % idx] = base85.b85encode(data)
646 keys['dump%i' % idx] = base85.b85encode(data)
645 return keys
647 return keys
646
648
647 def listmarkers(repo):
649 def listmarkers(repo):
648 """List markers over pushkey"""
650 """List markers over pushkey"""
649 if not repo.obsstore:
651 if not repo.obsstore:
650 return {}
652 return {}
651 return _pushkeyescape(repo.obsstore)
653 return _pushkeyescape(repo.obsstore)
652
654
653 def pushmarker(repo, key, old, new):
655 def pushmarker(repo, key, old, new):
654 """Push markers over pushkey"""
656 """Push markers over pushkey"""
655 if not key.startswith('dump'):
657 if not key.startswith('dump'):
656 repo.ui.warn(_('unknown key: %r') % key)
658 repo.ui.warn(_('unknown key: %r') % key)
657 return 0
659 return 0
658 if old:
660 if old:
659 repo.ui.warn(_('unexpected old value for %r') % key)
661 repo.ui.warn(_('unexpected old value for %r') % key)
660 return 0
662 return 0
661 data = base85.b85decode(new)
663 data = base85.b85decode(new)
662 lock = repo.lock()
664 lock = repo.lock()
663 try:
665 try:
664 tr = repo.transaction('pushkey: obsolete markers')
666 tr = repo.transaction('pushkey: obsolete markers')
665 try:
667 try:
666 repo.obsstore.mergemarkers(tr, data)
668 repo.obsstore.mergemarkers(tr, data)
667 tr.close()
669 tr.close()
668 return 1
670 return 1
669 finally:
671 finally:
670 tr.release()
672 tr.release()
671 finally:
673 finally:
672 lock.release()
674 lock.release()
673
675
674 def getmarkers(repo, nodes=None):
676 def getmarkers(repo, nodes=None):
675 """returns markers known in a repository
677 """returns markers known in a repository
676
678
677 If <nodes> is specified, only markers "relevant" to those nodes are are
679 If <nodes> is specified, only markers "relevant" to those nodes are are
678 returned"""
680 returned"""
679 if nodes is None:
681 if nodes is None:
680 rawmarkers = repo.obsstore
682 rawmarkers = repo.obsstore
681 else:
683 else:
682 rawmarkers = repo.obsstore.relevantmarkers(nodes)
684 rawmarkers = repo.obsstore.relevantmarkers(nodes)
683
685
684 for markerdata in rawmarkers:
686 for markerdata in rawmarkers:
685 yield marker(repo, markerdata)
687 yield marker(repo, markerdata)
686
688
687 def relevantmarkers(repo, node):
689 def relevantmarkers(repo, node):
688 """all obsolete markers relevant to some revision"""
690 """all obsolete markers relevant to some revision"""
689 for markerdata in repo.obsstore.relevantmarkers(node):
691 for markerdata in repo.obsstore.relevantmarkers(node):
690 yield marker(repo, markerdata)
692 yield marker(repo, markerdata)
691
693
692
694
693 def precursormarkers(ctx):
695 def precursormarkers(ctx):
694 """obsolete marker marking this changeset as a successors"""
696 """obsolete marker marking this changeset as a successors"""
695 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
697 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
696 yield marker(ctx._repo, data)
698 yield marker(ctx._repo, data)
697
699
698 def successormarkers(ctx):
700 def successormarkers(ctx):
699 """obsolete marker making this changeset obsolete"""
701 """obsolete marker making this changeset obsolete"""
700 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
702 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
701 yield marker(ctx._repo, data)
703 yield marker(ctx._repo, data)
702
704
703 def allsuccessors(obsstore, nodes, ignoreflags=0):
705 def allsuccessors(obsstore, nodes, ignoreflags=0):
704 """Yield node for every successor of <nodes>.
706 """Yield node for every successor of <nodes>.
705
707
706 Some successors may be unknown locally.
708 Some successors may be unknown locally.
707
709
708 This is a linear yield unsuited to detecting split changesets. It includes
710 This is a linear yield unsuited to detecting split changesets. It includes
709 initial nodes too."""
711 initial nodes too."""
710 remaining = set(nodes)
712 remaining = set(nodes)
711 seen = set(remaining)
713 seen = set(remaining)
712 while remaining:
714 while remaining:
713 current = remaining.pop()
715 current = remaining.pop()
714 yield current
716 yield current
715 for mark in obsstore.successors.get(current, ()):
717 for mark in obsstore.successors.get(current, ()):
716 # ignore marker flagged with specified flag
718 # ignore marker flagged with specified flag
717 if mark[2] & ignoreflags:
719 if mark[2] & ignoreflags:
718 continue
720 continue
719 for suc in mark[1]:
721 for suc in mark[1]:
720 if suc not in seen:
722 if suc not in seen:
721 seen.add(suc)
723 seen.add(suc)
722 remaining.add(suc)
724 remaining.add(suc)
723
725
724 def allprecursors(obsstore, nodes, ignoreflags=0):
726 def allprecursors(obsstore, nodes, ignoreflags=0):
725 """Yield node for every precursors of <nodes>.
727 """Yield node for every precursors of <nodes>.
726
728
727 Some precursors may be unknown locally.
729 Some precursors may be unknown locally.
728
730
729 This is a linear yield unsuited to detecting folded changesets. It includes
731 This is a linear yield unsuited to detecting folded changesets. It includes
730 initial nodes too."""
732 initial nodes too."""
731
733
732 remaining = set(nodes)
734 remaining = set(nodes)
733 seen = set(remaining)
735 seen = set(remaining)
734 while remaining:
736 while remaining:
735 current = remaining.pop()
737 current = remaining.pop()
736 yield current
738 yield current
737 for mark in obsstore.precursors.get(current, ()):
739 for mark in obsstore.precursors.get(current, ()):
738 # ignore marker flagged with specified flag
740 # ignore marker flagged with specified flag
739 if mark[2] & ignoreflags:
741 if mark[2] & ignoreflags:
740 continue
742 continue
741 suc = mark[0]
743 suc = mark[0]
742 if suc not in seen:
744 if suc not in seen:
743 seen.add(suc)
745 seen.add(suc)
744 remaining.add(suc)
746 remaining.add(suc)
745
747
746 def foreground(repo, nodes):
748 def foreground(repo, nodes):
747 """return all nodes in the "foreground" of other node
749 """return all nodes in the "foreground" of other node
748
750
749 The foreground of a revision is anything reachable using parent -> children
751 The foreground of a revision is anything reachable using parent -> children
750 or precursor -> successor relation. It is very similar to "descendant" but
752 or precursor -> successor relation. It is very similar to "descendant" but
751 augmented with obsolescence information.
753 augmented with obsolescence information.
752
754
753 Beware that possible obsolescence cycle may result if complex situation.
755 Beware that possible obsolescence cycle may result if complex situation.
754 """
756 """
755 repo = repo.unfiltered()
757 repo = repo.unfiltered()
756 foreground = set(repo.set('%ln::', nodes))
758 foreground = set(repo.set('%ln::', nodes))
757 if repo.obsstore:
759 if repo.obsstore:
758 # We only need this complicated logic if there is obsolescence
760 # We only need this complicated logic if there is obsolescence
759 # XXX will probably deserve an optimised revset.
761 # XXX will probably deserve an optimised revset.
760 nm = repo.changelog.nodemap
762 nm = repo.changelog.nodemap
761 plen = -1
763 plen = -1
762 # compute the whole set of successors or descendants
764 # compute the whole set of successors or descendants
763 while len(foreground) != plen:
765 while len(foreground) != plen:
764 plen = len(foreground)
766 plen = len(foreground)
765 succs = set(c.node() for c in foreground)
767 succs = set(c.node() for c in foreground)
766 mutable = [c.node() for c in foreground if c.mutable()]
768 mutable = [c.node() for c in foreground if c.mutable()]
767 succs.update(allsuccessors(repo.obsstore, mutable))
769 succs.update(allsuccessors(repo.obsstore, mutable))
768 known = (n for n in succs if n in nm)
770 known = (n for n in succs if n in nm)
769 foreground = set(repo.set('%ln::', known))
771 foreground = set(repo.set('%ln::', known))
770 return set(c.node() for c in foreground)
772 return set(c.node() for c in foreground)
771
773
772
774
773 def successorssets(repo, initialnode, cache=None):
775 def successorssets(repo, initialnode, cache=None):
774 """Return all set of successors of initial nodes
776 """Return all set of successors of initial nodes
775
777
776 The successors set of a changeset A are a group of revisions that succeed
778 The successors set of a changeset A are a group of revisions that succeed
777 A. It succeeds A as a consistent whole, each revision being only a partial
779 A. It succeeds A as a consistent whole, each revision being only a partial
778 replacement. The successors set contains non-obsolete changesets only.
780 replacement. The successors set contains non-obsolete changesets only.
779
781
780 This function returns the full list of successor sets which is why it
782 This function returns the full list of successor sets which is why it
781 returns a list of tuples and not just a single tuple. Each tuple is a valid
783 returns a list of tuples and not just a single tuple. Each tuple is a valid
782 successors set. Not that (A,) may be a valid successors set for changeset A
784 successors set. Not that (A,) may be a valid successors set for changeset A
783 (see below).
785 (see below).
784
786
785 In most cases, a changeset A will have a single element (e.g. the changeset
787 In most cases, a changeset A will have a single element (e.g. the changeset
786 A is replaced by A') in its successors set. Though, it is also common for a
788 A is replaced by A') in its successors set. Though, it is also common for a
787 changeset A to have no elements in its successor set (e.g. the changeset
789 changeset A to have no elements in its successor set (e.g. the changeset
788 has been pruned). Therefore, the returned list of successors sets will be
790 has been pruned). Therefore, the returned list of successors sets will be
789 [(A',)] or [], respectively.
791 [(A',)] or [], respectively.
790
792
791 When a changeset A is split into A' and B', however, it will result in a
793 When a changeset A is split into A' and B', however, it will result in a
792 successors set containing more than a single element, i.e. [(A',B')].
794 successors set containing more than a single element, i.e. [(A',B')].
793 Divergent changesets will result in multiple successors sets, i.e. [(A',),
795 Divergent changesets will result in multiple successors sets, i.e. [(A',),
794 (A'')].
796 (A'')].
795
797
796 If a changeset A is not obsolete, then it will conceptually have no
798 If a changeset A is not obsolete, then it will conceptually have no
797 successors set. To distinguish this from a pruned changeset, the successor
799 successors set. To distinguish this from a pruned changeset, the successor
798 set will only contain itself, i.e. [(A,)].
800 set will only contain itself, i.e. [(A,)].
799
801
800 Finally, successors unknown locally are considered to be pruned (obsoleted
802 Finally, successors unknown locally are considered to be pruned (obsoleted
801 without any successors).
803 without any successors).
802
804
803 The optional `cache` parameter is a dictionary that may contain precomputed
805 The optional `cache` parameter is a dictionary that may contain precomputed
804 successors sets. It is meant to reuse the computation of a previous call to
806 successors sets. It is meant to reuse the computation of a previous call to
805 `successorssets` when multiple calls are made at the same time. The cache
807 `successorssets` when multiple calls are made at the same time. The cache
806 dictionary is updated in place. The caller is responsible for its live
808 dictionary is updated in place. The caller is responsible for its live
807 spawn. Code that makes multiple calls to `successorssets` *must* use this
809 spawn. Code that makes multiple calls to `successorssets` *must* use this
808 cache mechanism or suffer terrible performances.
810 cache mechanism or suffer terrible performances.
809
811
810 """
812 """
811
813
812 succmarkers = repo.obsstore.successors
814 succmarkers = repo.obsstore.successors
813
815
814 # Stack of nodes we search successors sets for
816 # Stack of nodes we search successors sets for
815 toproceed = [initialnode]
817 toproceed = [initialnode]
816 # set version of above list for fast loop detection
818 # set version of above list for fast loop detection
817 # element added to "toproceed" must be added here
819 # element added to "toproceed" must be added here
818 stackedset = set(toproceed)
820 stackedset = set(toproceed)
819 if cache is None:
821 if cache is None:
820 cache = {}
822 cache = {}
821
823
822 # This while loop is the flattened version of a recursive search for
824 # This while loop is the flattened version of a recursive search for
823 # successors sets
825 # successors sets
824 #
826 #
825 # def successorssets(x):
827 # def successorssets(x):
826 # successors = directsuccessors(x)
828 # successors = directsuccessors(x)
827 # ss = [[]]
829 # ss = [[]]
828 # for succ in directsuccessors(x):
830 # for succ in directsuccessors(x):
829 # # product as in itertools cartesian product
831 # # product as in itertools cartesian product
830 # ss = product(ss, successorssets(succ))
832 # ss = product(ss, successorssets(succ))
831 # return ss
833 # return ss
832 #
834 #
833 # But we can not use plain recursive calls here:
835 # But we can not use plain recursive calls here:
834 # - that would blow the python call stack
836 # - that would blow the python call stack
835 # - obsolescence markers may have cycles, we need to handle them.
837 # - obsolescence markers may have cycles, we need to handle them.
836 #
838 #
837 # The `toproceed` list act as our call stack. Every node we search
839 # The `toproceed` list act as our call stack. Every node we search
838 # successors set for are stacked there.
840 # successors set for are stacked there.
839 #
841 #
840 # The `stackedset` is set version of this stack used to check if a node is
842 # The `stackedset` is set version of this stack used to check if a node is
841 # already stacked. This check is used to detect cycles and prevent infinite
843 # already stacked. This check is used to detect cycles and prevent infinite
842 # loop.
844 # loop.
843 #
845 #
844 # successors set of all nodes are stored in the `cache` dictionary.
846 # successors set of all nodes are stored in the `cache` dictionary.
845 #
847 #
846 # After this while loop ends we use the cache to return the successors sets
848 # After this while loop ends we use the cache to return the successors sets
847 # for the node requested by the caller.
849 # for the node requested by the caller.
848 while toproceed:
850 while toproceed:
849 # Every iteration tries to compute the successors sets of the topmost
851 # Every iteration tries to compute the successors sets of the topmost
850 # node of the stack: CURRENT.
852 # node of the stack: CURRENT.
851 #
853 #
852 # There are four possible outcomes:
854 # There are four possible outcomes:
853 #
855 #
854 # 1) We already know the successors sets of CURRENT:
856 # 1) We already know the successors sets of CURRENT:
855 # -> mission accomplished, pop it from the stack.
857 # -> mission accomplished, pop it from the stack.
856 # 2) Node is not obsolete:
858 # 2) Node is not obsolete:
857 # -> the node is its own successors sets. Add it to the cache.
859 # -> the node is its own successors sets. Add it to the cache.
858 # 3) We do not know successors set of direct successors of CURRENT:
860 # 3) We do not know successors set of direct successors of CURRENT:
859 # -> We add those successors to the stack.
861 # -> We add those successors to the stack.
860 # 4) We know successors sets of all direct successors of CURRENT:
862 # 4) We know successors sets of all direct successors of CURRENT:
861 # -> We can compute CURRENT successors set and add it to the
863 # -> We can compute CURRENT successors set and add it to the
862 # cache.
864 # cache.
863 #
865 #
864 current = toproceed[-1]
866 current = toproceed[-1]
865 if current in cache:
867 if current in cache:
866 # case (1): We already know the successors sets
868 # case (1): We already know the successors sets
867 stackedset.remove(toproceed.pop())
869 stackedset.remove(toproceed.pop())
868 elif current not in succmarkers:
870 elif current not in succmarkers:
869 # case (2): The node is not obsolete.
871 # case (2): The node is not obsolete.
870 if current in repo:
872 if current in repo:
871 # We have a valid last successors.
873 # We have a valid last successors.
872 cache[current] = [(current,)]
874 cache[current] = [(current,)]
873 else:
875 else:
874 # Final obsolete version is unknown locally.
876 # Final obsolete version is unknown locally.
875 # Do not count that as a valid successors
877 # Do not count that as a valid successors
876 cache[current] = []
878 cache[current] = []
877 else:
879 else:
878 # cases (3) and (4)
880 # cases (3) and (4)
879 #
881 #
880 # We proceed in two phases. Phase 1 aims to distinguish case (3)
882 # We proceed in two phases. Phase 1 aims to distinguish case (3)
881 # from case (4):
883 # from case (4):
882 #
884 #
883 # For each direct successors of CURRENT, we check whether its
885 # For each direct successors of CURRENT, we check whether its
884 # successors sets are known. If they are not, we stack the
886 # successors sets are known. If they are not, we stack the
885 # unknown node and proceed to the next iteration of the while
887 # unknown node and proceed to the next iteration of the while
886 # loop. (case 3)
888 # loop. (case 3)
887 #
889 #
888 # During this step, we may detect obsolescence cycles: a node
890 # During this step, we may detect obsolescence cycles: a node
889 # with unknown successors sets but already in the call stack.
891 # with unknown successors sets but already in the call stack.
890 # In such a situation, we arbitrary set the successors sets of
892 # In such a situation, we arbitrary set the successors sets of
891 # the node to nothing (node pruned) to break the cycle.
893 # the node to nothing (node pruned) to break the cycle.
892 #
894 #
893 # If no break was encountered we proceed to phase 2.
895 # If no break was encountered we proceed to phase 2.
894 #
896 #
895 # Phase 2 computes successors sets of CURRENT (case 4); see details
897 # Phase 2 computes successors sets of CURRENT (case 4); see details
896 # in phase 2 itself.
898 # in phase 2 itself.
897 #
899 #
898 # Note the two levels of iteration in each phase.
900 # Note the two levels of iteration in each phase.
899 # - The first one handles obsolescence markers using CURRENT as
901 # - The first one handles obsolescence markers using CURRENT as
900 # precursor (successors markers of CURRENT).
902 # precursor (successors markers of CURRENT).
901 #
903 #
902 # Having multiple entry here means divergence.
904 # Having multiple entry here means divergence.
903 #
905 #
904 # - The second one handles successors defined in each marker.
906 # - The second one handles successors defined in each marker.
905 #
907 #
906 # Having none means pruned node, multiple successors means split,
908 # Having none means pruned node, multiple successors means split,
907 # single successors are standard replacement.
909 # single successors are standard replacement.
908 #
910 #
909 for mark in sorted(succmarkers[current]):
911 for mark in sorted(succmarkers[current]):
910 for suc in mark[1]:
912 for suc in mark[1]:
911 if suc not in cache:
913 if suc not in cache:
912 if suc in stackedset:
914 if suc in stackedset:
913 # cycle breaking
915 # cycle breaking
914 cache[suc] = []
916 cache[suc] = []
915 else:
917 else:
916 # case (3) If we have not computed successors sets
918 # case (3) If we have not computed successors sets
917 # of one of those successors we add it to the
919 # of one of those successors we add it to the
918 # `toproceed` stack and stop all work for this
920 # `toproceed` stack and stop all work for this
919 # iteration.
921 # iteration.
920 toproceed.append(suc)
922 toproceed.append(suc)
921 stackedset.add(suc)
923 stackedset.add(suc)
922 break
924 break
923 else:
925 else:
924 continue
926 continue
925 break
927 break
926 else:
928 else:
927 # case (4): we know all successors sets of all direct
929 # case (4): we know all successors sets of all direct
928 # successors
930 # successors
929 #
931 #
930 # Successors set contributed by each marker depends on the
932 # Successors set contributed by each marker depends on the
931 # successors sets of all its "successors" node.
933 # successors sets of all its "successors" node.
932 #
934 #
933 # Each different marker is a divergence in the obsolescence
935 # Each different marker is a divergence in the obsolescence
934 # history. It contributes successors sets distinct from other
936 # history. It contributes successors sets distinct from other
935 # markers.
937 # markers.
936 #
938 #
937 # Within a marker, a successor may have divergent successors
939 # Within a marker, a successor may have divergent successors
938 # sets. In such a case, the marker will contribute multiple
940 # sets. In such a case, the marker will contribute multiple
939 # divergent successors sets. If multiple successors have
941 # divergent successors sets. If multiple successors have
940 # divergent successors sets, a Cartesian product is used.
942 # divergent successors sets, a Cartesian product is used.
941 #
943 #
942 # At the end we post-process successors sets to remove
944 # At the end we post-process successors sets to remove
943 # duplicated entry and successors set that are strict subset of
945 # duplicated entry and successors set that are strict subset of
944 # another one.
946 # another one.
945 succssets = []
947 succssets = []
946 for mark in sorted(succmarkers[current]):
948 for mark in sorted(succmarkers[current]):
947 # successors sets contributed by this marker
949 # successors sets contributed by this marker
948 markss = [[]]
950 markss = [[]]
949 for suc in mark[1]:
951 for suc in mark[1]:
950 # cardinal product with previous successors
952 # cardinal product with previous successors
951 productresult = []
953 productresult = []
952 for prefix in markss:
954 for prefix in markss:
953 for suffix in cache[suc]:
955 for suffix in cache[suc]:
954 newss = list(prefix)
956 newss = list(prefix)
955 for part in suffix:
957 for part in suffix:
956 # do not duplicated entry in successors set
958 # do not duplicated entry in successors set
957 # first entry wins.
959 # first entry wins.
958 if part not in newss:
960 if part not in newss:
959 newss.append(part)
961 newss.append(part)
960 productresult.append(newss)
962 productresult.append(newss)
961 markss = productresult
963 markss = productresult
962 succssets.extend(markss)
964 succssets.extend(markss)
963 # remove duplicated and subset
965 # remove duplicated and subset
964 seen = []
966 seen = []
965 final = []
967 final = []
966 candidate = sorted(((set(s), s) for s in succssets if s),
968 candidate = sorted(((set(s), s) for s in succssets if s),
967 key=lambda x: len(x[1]), reverse=True)
969 key=lambda x: len(x[1]), reverse=True)
968 for setversion, listversion in candidate:
970 for setversion, listversion in candidate:
969 for seenset in seen:
971 for seenset in seen:
970 if setversion.issubset(seenset):
972 if setversion.issubset(seenset):
971 break
973 break
972 else:
974 else:
973 final.append(listversion)
975 final.append(listversion)
974 seen.append(setversion)
976 seen.append(setversion)
975 final.reverse() # put small successors set first
977 final.reverse() # put small successors set first
976 cache[current] = final
978 cache[current] = final
977 return cache[initialnode]
979 return cache[initialnode]
978
980
979 def _knownrevs(repo, nodes):
981 def _knownrevs(repo, nodes):
980 """yield revision numbers of known nodes passed in parameters
982 """yield revision numbers of known nodes passed in parameters
981
983
982 Unknown revisions are silently ignored."""
984 Unknown revisions are silently ignored."""
983 torev = repo.changelog.nodemap.get
985 torev = repo.changelog.nodemap.get
984 for n in nodes:
986 for n in nodes:
985 rev = torev(n)
987 rev = torev(n)
986 if rev is not None:
988 if rev is not None:
987 yield rev
989 yield rev
988
990
989 # mapping of 'set-name' -> <function to compute this set>
991 # mapping of 'set-name' -> <function to compute this set>
990 cachefuncs = {}
992 cachefuncs = {}
991 def cachefor(name):
993 def cachefor(name):
992 """Decorator to register a function as computing the cache for a set"""
994 """Decorator to register a function as computing the cache for a set"""
993 def decorator(func):
995 def decorator(func):
994 assert name not in cachefuncs
996 assert name not in cachefuncs
995 cachefuncs[name] = func
997 cachefuncs[name] = func
996 return func
998 return func
997 return decorator
999 return decorator
998
1000
999 def getrevs(repo, name):
1001 def getrevs(repo, name):
1000 """Return the set of revision that belong to the <name> set
1002 """Return the set of revision that belong to the <name> set
1001
1003
1002 Such access may compute the set and cache it for future use"""
1004 Such access may compute the set and cache it for future use"""
1003 repo = repo.unfiltered()
1005 repo = repo.unfiltered()
1004 if not repo.obsstore:
1006 if not repo.obsstore:
1005 return frozenset()
1007 return frozenset()
1006 if name not in repo.obsstore.caches:
1008 if name not in repo.obsstore.caches:
1007 repo.obsstore.caches[name] = cachefuncs[name](repo)
1009 repo.obsstore.caches[name] = cachefuncs[name](repo)
1008 return repo.obsstore.caches[name]
1010 return repo.obsstore.caches[name]
1009
1011
1010 # To be simple we need to invalidate obsolescence cache when:
1012 # To be simple we need to invalidate obsolescence cache when:
1011 #
1013 #
1012 # - new changeset is added:
1014 # - new changeset is added:
1013 # - public phase is changed
1015 # - public phase is changed
1014 # - obsolescence marker are added
1016 # - obsolescence marker are added
1015 # - strip is used a repo
1017 # - strip is used a repo
1016 def clearobscaches(repo):
1018 def clearobscaches(repo):
1017 """Remove all obsolescence related cache from a repo
1019 """Remove all obsolescence related cache from a repo
1018
1020
1019 This remove all cache in obsstore is the obsstore already exist on the
1021 This remove all cache in obsstore is the obsstore already exist on the
1020 repo.
1022 repo.
1021
1023
1022 (We could be smarter here given the exact event that trigger the cache
1024 (We could be smarter here given the exact event that trigger the cache
1023 clearing)"""
1025 clearing)"""
1024 # only clear cache is there is obsstore data in this repo
1026 # only clear cache is there is obsstore data in this repo
1025 if 'obsstore' in repo._filecache:
1027 if 'obsstore' in repo._filecache:
1026 repo.obsstore.caches.clear()
1028 repo.obsstore.caches.clear()
1027
1029
1028 @cachefor('obsolete')
1030 @cachefor('obsolete')
1029 def _computeobsoleteset(repo):
1031 def _computeobsoleteset(repo):
1030 """the set of obsolete revisions"""
1032 """the set of obsolete revisions"""
1031 obs = set()
1033 obs = set()
1032 getrev = repo.changelog.nodemap.get
1034 getrev = repo.changelog.nodemap.get
1033 getphase = repo._phasecache.phase
1035 getphase = repo._phasecache.phase
1034 for n in repo.obsstore.successors:
1036 for n in repo.obsstore.successors:
1035 rev = getrev(n)
1037 rev = getrev(n)
1036 if rev is not None and getphase(repo, rev):
1038 if rev is not None and getphase(repo, rev):
1037 obs.add(rev)
1039 obs.add(rev)
1038 return obs
1040 return obs
1039
1041
1040 @cachefor('unstable')
1042 @cachefor('unstable')
1041 def _computeunstableset(repo):
1043 def _computeunstableset(repo):
1042 """the set of non obsolete revisions with obsolete parents"""
1044 """the set of non obsolete revisions with obsolete parents"""
1043 # revset is not efficient enough here
1045 # revset is not efficient enough here
1044 # we do (obsolete()::) - obsolete() by hand
1046 # we do (obsolete()::) - obsolete() by hand
1045 obs = getrevs(repo, 'obsolete')
1047 obs = getrevs(repo, 'obsolete')
1046 if not obs:
1048 if not obs:
1047 return set()
1049 return set()
1048 cl = repo.changelog
1050 cl = repo.changelog
1049 return set(r for r in cl.descendants(obs) if r not in obs)
1051 return set(r for r in cl.descendants(obs) if r not in obs)
1050
1052
1051 @cachefor('suspended')
1053 @cachefor('suspended')
1052 def _computesuspendedset(repo):
1054 def _computesuspendedset(repo):
1053 """the set of obsolete parents with non obsolete descendants"""
1055 """the set of obsolete parents with non obsolete descendants"""
1054 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1056 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1055 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1057 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1056
1058
1057 @cachefor('extinct')
1059 @cachefor('extinct')
1058 def _computeextinctset(repo):
1060 def _computeextinctset(repo):
1059 """the set of obsolete parents without non obsolete descendants"""
1061 """the set of obsolete parents without non obsolete descendants"""
1060 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1062 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1061
1063
1062
1064
1063 @cachefor('bumped')
1065 @cachefor('bumped')
1064 def _computebumpedset(repo):
1066 def _computebumpedset(repo):
1065 """the set of revs trying to obsolete public revisions"""
1067 """the set of revs trying to obsolete public revisions"""
1066 bumped = set()
1068 bumped = set()
1067 # util function (avoid attribute lookup in the loop)
1069 # util function (avoid attribute lookup in the loop)
1068 phase = repo._phasecache.phase # would be faster to grab the full list
1070 phase = repo._phasecache.phase # would be faster to grab the full list
1069 public = phases.public
1071 public = phases.public
1070 cl = repo.changelog
1072 cl = repo.changelog
1071 torev = cl.nodemap.get
1073 torev = cl.nodemap.get
1072 obs = getrevs(repo, 'obsolete')
1074 obs = getrevs(repo, 'obsolete')
1073 for rev in repo:
1075 for rev in repo:
1074 # We only evaluate mutable, non-obsolete revision
1076 # We only evaluate mutable, non-obsolete revision
1075 if (public < phase(repo, rev)) and (rev not in obs):
1077 if (public < phase(repo, rev)) and (rev not in obs):
1076 node = cl.node(rev)
1078 node = cl.node(rev)
1077 # (future) A cache of precursors may worth if split is very common
1079 # (future) A cache of precursors may worth if split is very common
1078 for pnode in allprecursors(repo.obsstore, [node],
1080 for pnode in allprecursors(repo.obsstore, [node],
1079 ignoreflags=bumpedfix):
1081 ignoreflags=bumpedfix):
1080 prev = torev(pnode) # unfiltered! but so is phasecache
1082 prev = torev(pnode) # unfiltered! but so is phasecache
1081 if (prev is not None) and (phase(repo, prev) <= public):
1083 if (prev is not None) and (phase(repo, prev) <= public):
1082 # we have a public precursors
1084 # we have a public precursors
1083 bumped.add(rev)
1085 bumped.add(rev)
1084 break # Next draft!
1086 break # Next draft!
1085 return bumped
1087 return bumped
1086
1088
1087 @cachefor('divergent')
1089 @cachefor('divergent')
1088 def _computedivergentset(repo):
1090 def _computedivergentset(repo):
1089 """the set of rev that compete to be the final successors of some revision.
1091 """the set of rev that compete to be the final successors of some revision.
1090 """
1092 """
1091 divergent = set()
1093 divergent = set()
1092 obsstore = repo.obsstore
1094 obsstore = repo.obsstore
1093 newermap = {}
1095 newermap = {}
1094 for ctx in repo.set('(not public()) - obsolete()'):
1096 for ctx in repo.set('(not public()) - obsolete()'):
1095 mark = obsstore.precursors.get(ctx.node(), ())
1097 mark = obsstore.precursors.get(ctx.node(), ())
1096 toprocess = set(mark)
1098 toprocess = set(mark)
1097 while toprocess:
1099 while toprocess:
1098 prec = toprocess.pop()[0]
1100 prec = toprocess.pop()[0]
1099 if prec not in newermap:
1101 if prec not in newermap:
1100 successorssets(repo, prec, newermap)
1102 successorssets(repo, prec, newermap)
1101 newer = [n for n in newermap[prec] if n]
1103 newer = [n for n in newermap[prec] if n]
1102 if len(newer) > 1:
1104 if len(newer) > 1:
1103 divergent.add(ctx.rev())
1105 divergent.add(ctx.rev())
1104 break
1106 break
1105 toprocess.update(obsstore.precursors.get(prec, ()))
1107 toprocess.update(obsstore.precursors.get(prec, ()))
1106 return divergent
1108 return divergent
1107
1109
1108
1110
1109 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1111 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1110 """Add obsolete markers between changesets in a repo
1112 """Add obsolete markers between changesets in a repo
1111
1113
1112 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1114 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1113 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1115 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1114 containing metadata for this marker only. It is merged with the global
1116 containing metadata for this marker only. It is merged with the global
1115 metadata specified through the `metadata` argument of this function,
1117 metadata specified through the `metadata` argument of this function,
1116
1118
1117 Trying to obsolete a public changeset will raise an exception.
1119 Trying to obsolete a public changeset will raise an exception.
1118
1120
1119 Current user and date are used except if specified otherwise in the
1121 Current user and date are used except if specified otherwise in the
1120 metadata attribute.
1122 metadata attribute.
1121
1123
1122 This function operates within a transaction of its own, but does
1124 This function operates within a transaction of its own, but does
1123 not take any lock on the repo.
1125 not take any lock on the repo.
1124 """
1126 """
1125 # prepare metadata
1127 # prepare metadata
1126 if metadata is None:
1128 if metadata is None:
1127 metadata = {}
1129 metadata = {}
1128 if 'user' not in metadata:
1130 if 'user' not in metadata:
1129 metadata['user'] = repo.ui.username()
1131 metadata['user'] = repo.ui.username()
1130 tr = repo.transaction('add-obsolescence-marker')
1132 tr = repo.transaction('add-obsolescence-marker')
1131 try:
1133 try:
1132 for rel in relations:
1134 for rel in relations:
1133 prec = rel[0]
1135 prec = rel[0]
1134 sucs = rel[1]
1136 sucs = rel[1]
1135 localmetadata = metadata.copy()
1137 localmetadata = metadata.copy()
1136 if 2 < len(rel):
1138 if 2 < len(rel):
1137 localmetadata.update(rel[2])
1139 localmetadata.update(rel[2])
1138
1140
1139 if not prec.mutable():
1141 if not prec.mutable():
1140 raise util.Abort("cannot obsolete immutable changeset: %s"
1142 raise util.Abort("cannot obsolete immutable changeset: %s"
1141 % prec)
1143 % prec)
1142 nprec = prec.node()
1144 nprec = prec.node()
1143 nsucs = tuple(s.node() for s in sucs)
1145 nsucs = tuple(s.node() for s in sucs)
1144 npare = None
1146 npare = None
1145 if not nsucs:
1147 if not nsucs:
1146 npare = tuple(p.node() for p in prec.parents())
1148 npare = tuple(p.node() for p in prec.parents())
1147 if nprec in nsucs:
1149 if nprec in nsucs:
1148 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1150 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1149 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1151 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1150 date=date, metadata=localmetadata)
1152 date=date, metadata=localmetadata)
1151 repo.filteredrevcache.clear()
1153 repo.filteredrevcache.clear()
1152 tr.close()
1154 tr.close()
1153 finally:
1155 finally:
1154 tr.release()
1156 tr.release()
1155
1157
1156 def isenabled(repo, option):
1158 def isenabled(repo, option):
1157 """Returns True if the given repository has the given obsolete option
1159 """Returns True if the given repository has the given obsolete option
1158 enabled.
1160 enabled.
1159 """
1161 """
1160 result = set(repo.ui.configlist('experimental', 'evolution'))
1162 result = set(repo.ui.configlist('experimental', 'evolution'))
1161 if 'all' in result:
1163 if 'all' in result:
1162 return True
1164 return True
1163
1165
1164 # For migration purposes, temporarily return true if the config hasn't been
1166 # For migration purposes, temporarily return true if the config hasn't been
1165 # set but _enabled is true.
1167 # set but _enabled is true.
1166 if len(result) == 0 and _enabled:
1168 if len(result) == 0 and _enabled:
1167 return True
1169 return True
1168
1170
1169 # createmarkers must be enabled if other options are enabled
1171 # createmarkers must be enabled if other options are enabled
1170 if ((allowunstableopt in result or exchangeopt in result) and
1172 if ((allowunstableopt in result or exchangeopt in result) and
1171 not createmarkersopt in result):
1173 not createmarkersopt in result):
1172 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1174 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1173 "if other obsolete options are enabled"))
1175 "if other obsolete options are enabled"))
1174
1176
1175 return option in result
1177 return option in result
General Comments 0
You need to be logged in to leave comments. Login now