##// END OF EJS Templates
obsstore: cache size computation for fm1 node...
Pierre-Yves David -
r23499:b46876c9 default
parent child Browse files
Show More
@@ -1,1178 +1,1181 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 import struct
70 import struct
71 import util, base85, node
71 import util, base85, node
72 import phases
72 import phases
73 from i18n import _
73 from i18n import _
74
74
75 _pack = struct.pack
75 _pack = struct.pack
76 _unpack = struct.unpack
76 _unpack = struct.unpack
77 _calcsize = struct.calcsize
77 _calcsize = struct.calcsize
78
78
79 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
79 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
80
80
81 # the obsolete feature is not mature enough to be enabled by default.
81 # the obsolete feature is not mature enough to be enabled by default.
82 # you have to rely on third party extension extension to enable this.
82 # you have to rely on third party extension extension to enable this.
83 _enabled = False
83 _enabled = False
84
84
85 # Options for obsolescence
85 # Options for obsolescence
86 createmarkersopt = 'createmarkers'
86 createmarkersopt = 'createmarkers'
87 allowunstableopt = 'allowunstable'
87 allowunstableopt = 'allowunstable'
88 exchangeopt = 'exchange'
88 exchangeopt = 'exchange'
89
89
90 ### obsolescence marker flag
90 ### obsolescence marker flag
91
91
92 ## bumpedfix flag
92 ## bumpedfix flag
93 #
93 #
94 # When a changeset A' succeed to a changeset A which became public, we call A'
94 # When a changeset A' succeed to a changeset A which became public, we call A'
95 # "bumped" because it's a successors of a public changesets
95 # "bumped" because it's a successors of a public changesets
96 #
96 #
97 # o A' (bumped)
97 # o A' (bumped)
98 # |`:
98 # |`:
99 # | o A
99 # | o A
100 # |/
100 # |/
101 # o Z
101 # o Z
102 #
102 #
103 # The way to solve this situation is to create a new changeset Ad as children
103 # The way to solve this situation is to create a new changeset Ad as children
104 # of A. This changeset have the same content than A'. So the diff from A to A'
104 # of A. This changeset have the same content than A'. So the diff from A to A'
105 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
106 #
106 #
107 # o Ad
107 # o Ad
108 # |`:
108 # |`:
109 # | x A'
109 # | x A'
110 # |'|
110 # |'|
111 # o | A
111 # o | A
112 # |/
112 # |/
113 # o Z
113 # o Z
114 #
114 #
115 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
116 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
117 # This flag mean that the successors express the changes between the public and
117 # This flag mean that the successors express the changes between the public and
118 # bumped version and fix the situation, breaking the transitivity of
118 # bumped version and fix the situation, breaking the transitivity of
119 # "bumped" here.
119 # "bumped" here.
120 bumpedfix = 1
120 bumpedfix = 1
121 usingsha256 = 2
121 usingsha256 = 2
122
122
123 ## Parsing and writing of version "0"
123 ## Parsing and writing of version "0"
124 #
124 #
125 # The header is followed by the markers. Each marker is made of:
125 # The header is followed by the markers. Each marker is made of:
126 #
126 #
127 # - 1 uint8 : number of new changesets "N", can be zero.
127 # - 1 uint8 : number of new changesets "N", can be zero.
128 #
128 #
129 # - 1 uint32: metadata size "M" in bytes.
129 # - 1 uint32: metadata size "M" in bytes.
130 #
130 #
131 # - 1 byte: a bit field. It is reserved for flags used in common
131 # - 1 byte: a bit field. It is reserved for flags used in common
132 # obsolete marker operations, to avoid repeated decoding of metadata
132 # obsolete marker operations, to avoid repeated decoding of metadata
133 # entries.
133 # entries.
134 #
134 #
135 # - 20 bytes: obsoleted changeset identifier.
135 # - 20 bytes: obsoleted changeset identifier.
136 #
136 #
137 # - N*20 bytes: new changesets identifiers.
137 # - N*20 bytes: new changesets identifiers.
138 #
138 #
139 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 # - M bytes: metadata as a sequence of nul-terminated strings. Each
140 # string contains a key and a value, separated by a colon ':', without
140 # string contains a key and a value, separated by a colon ':', without
141 # additional encoding. Keys cannot contain '\0' or ':' and values
141 # additional encoding. Keys cannot contain '\0' or ':' and values
142 # cannot contain '\0'.
142 # cannot contain '\0'.
143 _fm0version = 0
143 _fm0version = 0
144 _fm0fixed = '>BIB20s'
144 _fm0fixed = '>BIB20s'
145 _fm0node = '20s'
145 _fm0node = '20s'
146 _fm0fsize = _calcsize(_fm0fixed)
146 _fm0fsize = _calcsize(_fm0fixed)
147 _fm0fnodesize = _calcsize(_fm0node)
147 _fm0fnodesize = _calcsize(_fm0node)
148
148
149 def _fm0readmarkers(data, off=0):
149 def _fm0readmarkers(data, off=0):
150 # Loop on markers
150 # Loop on markers
151 l = len(data)
151 l = len(data)
152 while off + _fm0fsize <= l:
152 while off + _fm0fsize <= l:
153 # read fixed part
153 # read fixed part
154 cur = data[off:off + _fm0fsize]
154 cur = data[off:off + _fm0fsize]
155 off += _fm0fsize
155 off += _fm0fsize
156 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
157 # read replacement
157 # read replacement
158 sucs = ()
158 sucs = ()
159 if numsuc:
159 if numsuc:
160 s = (_fm0fnodesize * numsuc)
160 s = (_fm0fnodesize * numsuc)
161 cur = data[off:off + s]
161 cur = data[off:off + s]
162 sucs = _unpack(_fm0node * numsuc, cur)
162 sucs = _unpack(_fm0node * numsuc, cur)
163 off += s
163 off += s
164 # read metadata
164 # read metadata
165 # (metadata will be decoded on demand)
165 # (metadata will be decoded on demand)
166 metadata = data[off:off + mdsize]
166 metadata = data[off:off + mdsize]
167 if len(metadata) != mdsize:
167 if len(metadata) != mdsize:
168 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 raise util.Abort(_('parsing obsolete marker: metadata is too '
169 'short, %d bytes expected, got %d')
169 'short, %d bytes expected, got %d')
170 % (mdsize, len(metadata)))
170 % (mdsize, len(metadata)))
171 off += mdsize
171 off += mdsize
172 metadata = _fm0decodemeta(metadata)
172 metadata = _fm0decodemeta(metadata)
173 try:
173 try:
174 when, offset = metadata.pop('date', '0 0').split(' ')
174 when, offset = metadata.pop('date', '0 0').split(' ')
175 date = float(when), int(offset)
175 date = float(when), int(offset)
176 except ValueError:
176 except ValueError:
177 date = (0., 0)
177 date = (0., 0)
178 parents = None
178 parents = None
179 if 'p2' in metadata:
179 if 'p2' in metadata:
180 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
181 elif 'p1' in metadata:
181 elif 'p1' in metadata:
182 parents = (metadata.pop('p1', None),)
182 parents = (metadata.pop('p1', None),)
183 elif 'p0' in metadata:
183 elif 'p0' in metadata:
184 parents = ()
184 parents = ()
185 if parents is not None:
185 if parents is not None:
186 try:
186 try:
187 parents = tuple(node.bin(p) for p in parents)
187 parents = tuple(node.bin(p) for p in parents)
188 # if parent content is not a nodeid, drop the data
188 # if parent content is not a nodeid, drop the data
189 for p in parents:
189 for p in parents:
190 if len(p) != 20:
190 if len(p) != 20:
191 parents = None
191 parents = None
192 break
192 break
193 except TypeError:
193 except TypeError:
194 # if content cannot be translated to nodeid drop the data.
194 # if content cannot be translated to nodeid drop the data.
195 parents = None
195 parents = None
196
196
197 metadata = tuple(sorted(metadata.iteritems()))
197 metadata = tuple(sorted(metadata.iteritems()))
198
198
199 yield (pre, sucs, flags, metadata, date, parents)
199 yield (pre, sucs, flags, metadata, date, parents)
200
200
201 def _fm0encodeonemarker(marker):
201 def _fm0encodeonemarker(marker):
202 pre, sucs, flags, metadata, date, parents = marker
202 pre, sucs, flags, metadata, date, parents = marker
203 if flags & usingsha256:
203 if flags & usingsha256:
204 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
204 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
205 metadata = dict(metadata)
205 metadata = dict(metadata)
206 time, tz = date
206 time, tz = date
207 metadata['date'] = '%r %i' % (time, tz)
207 metadata['date'] = '%r %i' % (time, tz)
208 if parents is not None:
208 if parents is not None:
209 if not parents:
209 if not parents:
210 # mark that we explicitly recorded no parents
210 # mark that we explicitly recorded no parents
211 metadata['p0'] = ''
211 metadata['p0'] = ''
212 for i, p in enumerate(parents):
212 for i, p in enumerate(parents):
213 metadata['p%i' % (i + 1)] = node.hex(p)
213 metadata['p%i' % (i + 1)] = node.hex(p)
214 metadata = _fm0encodemeta(metadata)
214 metadata = _fm0encodemeta(metadata)
215 numsuc = len(sucs)
215 numsuc = len(sucs)
216 format = _fm0fixed + (_fm0node * numsuc)
216 format = _fm0fixed + (_fm0node * numsuc)
217 data = [numsuc, len(metadata), flags, pre]
217 data = [numsuc, len(metadata), flags, pre]
218 data.extend(sucs)
218 data.extend(sucs)
219 return _pack(format, *data) + metadata
219 return _pack(format, *data) + metadata
220
220
221 def _fm0encodemeta(meta):
221 def _fm0encodemeta(meta):
222 """Return encoded metadata string to string mapping.
222 """Return encoded metadata string to string mapping.
223
223
224 Assume no ':' in key and no '\0' in both key and value."""
224 Assume no ':' in key and no '\0' in both key and value."""
225 for key, value in meta.iteritems():
225 for key, value in meta.iteritems():
226 if ':' in key or '\0' in key:
226 if ':' in key or '\0' in key:
227 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 raise ValueError("':' and '\0' are forbidden in metadata key'")
228 if '\0' in value:
228 if '\0' in value:
229 raise ValueError("':' is forbidden in metadata value'")
229 raise ValueError("':' is forbidden in metadata value'")
230 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
231
231
232 def _fm0decodemeta(data):
232 def _fm0decodemeta(data):
233 """Return string to string dictionary from encoded version."""
233 """Return string to string dictionary from encoded version."""
234 d = {}
234 d = {}
235 for l in data.split('\0'):
235 for l in data.split('\0'):
236 if l:
236 if l:
237 key, value = l.split(':')
237 key, value = l.split(':')
238 d[key] = value
238 d[key] = value
239 return d
239 return d
240
240
241 ## Parsing and writing of version "1"
241 ## Parsing and writing of version "1"
242 #
242 #
243 # The header is followed by the markers. Each marker is made of:
243 # The header is followed by the markers. Each marker is made of:
244 #
244 #
245 # - uint32: total size of the marker (including this field)
245 # - uint32: total size of the marker (including this field)
246 #
246 #
247 # - float64: date in seconds since epoch
247 # - float64: date in seconds since epoch
248 #
248 #
249 # - int16: timezone offset in minutes
249 # - int16: timezone offset in minutes
250 #
250 #
251 # - uint16: a bit field. It is reserved for flags used in common
251 # - uint16: a bit field. It is reserved for flags used in common
252 # obsolete marker operations, to avoid repeated decoding of metadata
252 # obsolete marker operations, to avoid repeated decoding of metadata
253 # entries.
253 # entries.
254 #
254 #
255 # - uint8: number of successors "N", can be zero.
255 # - uint8: number of successors "N", can be zero.
256 #
256 #
257 # - uint8: number of parents "P", can be zero.
257 # - uint8: number of parents "P", can be zero.
258 #
258 #
259 # 0: parents data stored but no parent,
259 # 0: parents data stored but no parent,
260 # 1: one parent stored,
260 # 1: one parent stored,
261 # 2: two parents stored,
261 # 2: two parents stored,
262 # 3: no parent data stored
262 # 3: no parent data stored
263 #
263 #
264 # - uint8: number of metadata entries M
264 # - uint8: number of metadata entries M
265 #
265 #
266 # - 20 or 32 bytes: precursor changeset identifier.
266 # - 20 or 32 bytes: precursor changeset identifier.
267 #
267 #
268 # - N*(20 or 32) bytes: successors changesets identifiers.
268 # - N*(20 or 32) bytes: successors changesets identifiers.
269 #
269 #
270 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 # - P*(20 or 32) bytes: parents of the precursors changesets.
271 #
271 #
272 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 # - M*(uint8, uint8): size of all metadata entries (key and value)
273 #
273 #
274 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 # - remaining bytes: the metadata, each (key, value) pair after the other.
275 _fm1version = 1
275 _fm1version = 1
276 _fm1fixed = '>IdhHBBB20s'
276 _fm1fixed = '>IdhHBBB20s'
277 _fm1nodesha1 = '20s'
277 _fm1nodesha1 = '20s'
278 _fm1nodesha256 = '32s'
278 _fm1nodesha256 = '32s'
279 _fm1nodesha1size = _calcsize(_fm1nodesha1)
280 _fm1nodesha256size = _calcsize(_fm1nodesha256)
279 _fm1fsize = _calcsize(_fm1fixed)
281 _fm1fsize = _calcsize(_fm1fixed)
280 _fm1parentnone = 3
282 _fm1parentnone = 3
281 _fm1parentshift = 14
283 _fm1parentshift = 14
282 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
284 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
283 _fm1metapair = 'BB'
285 _fm1metapair = 'BB'
284 _fm1metapairsize = _calcsize('BB')
286 _fm1metapairsize = _calcsize('BB')
285
287
286 def _fm1readmarkers(data, off=0):
288 def _fm1readmarkers(data, off=0):
287 # Loop on markers
289 # Loop on markers
288 l = len(data)
290 l = len(data)
289 while off + _fm1fsize <= l:
291 while off + _fm1fsize <= l:
290 # read fixed part
292 # read fixed part
291 cur = data[off:off + _fm1fsize]
293 cur = data[off:off + _fm1fsize]
292 off += _fm1fsize
294 off += _fm1fsize
293 fixeddata = _unpack(_fm1fixed, cur)
295 fixeddata = _unpack(_fm1fixed, cur)
294 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
296 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
295 # extract the number of parents information
297 # extract the number of parents information
296 if numpar == _fm1parentnone:
298 if numpar == _fm1parentnone:
297 numpar = None
299 numpar = None
298 # build the date tuple (upgrade tz minutes to seconds)
300 # build the date tuple (upgrade tz minutes to seconds)
299 date = (seconds, tz * 60)
301 date = (seconds, tz * 60)
300 _fm1node = _fm1nodesha1
302 _fm1node = _fm1nodesha1
303 fnodesize = _fm1nodesha1size
301 if flags & usingsha256:
304 if flags & usingsha256:
302 _fm1node = _fm1nodesha256
305 _fm1node = _fm1nodesha256
303 fnodesize = _calcsize(_fm1node)
306 fnodesize = _fm1nodesha256size
304 # read replacement
307 # read replacement
305 sucs = ()
308 sucs = ()
306 if numsuc:
309 if numsuc:
307 s = (fnodesize * numsuc)
310 s = (fnodesize * numsuc)
308 cur = data[off:off + s]
311 cur = data[off:off + s]
309 sucs = _unpack(_fm1node * numsuc, cur)
312 sucs = _unpack(_fm1node * numsuc, cur)
310 off += s
313 off += s
311 # read parents
314 # read parents
312 if numpar is None:
315 if numpar is None:
313 parents = None
316 parents = None
314 elif numpar == 0:
317 elif numpar == 0:
315 parents = ()
318 parents = ()
316 elif numpar: # neither None nor zero
319 elif numpar: # neither None nor zero
317 s = (fnodesize * numpar)
320 s = (fnodesize * numpar)
318 cur = data[off:off + s]
321 cur = data[off:off + s]
319 parents = _unpack(_fm1node * numpar, cur)
322 parents = _unpack(_fm1node * numpar, cur)
320 off += s
323 off += s
321 # read metadata
324 # read metadata
322 metaformat = '>' + (_fm1metapair * nummeta)
325 metaformat = '>' + (_fm1metapair * nummeta)
323 s = _fm1metapairsize * nummeta
326 s = _fm1metapairsize * nummeta
324 metapairsize = _unpack(metaformat, data[off:off + s])
327 metapairsize = _unpack(metaformat, data[off:off + s])
325 off += s
328 off += s
326 metadata = []
329 metadata = []
327 for idx in xrange(0, len(metapairsize), 2):
330 for idx in xrange(0, len(metapairsize), 2):
328 sk = metapairsize[idx]
331 sk = metapairsize[idx]
329 sv = metapairsize[idx + 1]
332 sv = metapairsize[idx + 1]
330 key = data[off:off + sk]
333 key = data[off:off + sk]
331 value = data[off + sk:off + sk + sv]
334 value = data[off + sk:off + sk + sv]
332 assert len(key) == sk
335 assert len(key) == sk
333 assert len(value) == sv
336 assert len(value) == sv
334 metadata.append((key, value))
337 metadata.append((key, value))
335 off += sk + sv
338 off += sk + sv
336 metadata = tuple(metadata)
339 metadata = tuple(metadata)
337
340
338 yield (prec, sucs, flags, metadata, date, parents)
341 yield (prec, sucs, flags, metadata, date, parents)
339
342
340 def _fm1encodeonemarker(marker):
343 def _fm1encodeonemarker(marker):
341 pre, sucs, flags, metadata, date, parents = marker
344 pre, sucs, flags, metadata, date, parents = marker
342 # determine node size
345 # determine node size
343 _fm1node = _fm1nodesha1
346 _fm1node = _fm1nodesha1
344 if flags & usingsha256:
347 if flags & usingsha256:
345 _fm1node = _fm1nodesha256
348 _fm1node = _fm1nodesha256
346 numsuc = len(sucs)
349 numsuc = len(sucs)
347 numextranodes = numsuc
350 numextranodes = numsuc
348 if parents is None:
351 if parents is None:
349 numpar = _fm1parentnone
352 numpar = _fm1parentnone
350 else:
353 else:
351 numpar = len(parents)
354 numpar = len(parents)
352 numextranodes += numpar
355 numextranodes += numpar
353 formatnodes = _fm1node * numextranodes
356 formatnodes = _fm1node * numextranodes
354 formatmeta = _fm1metapair * len(metadata)
357 formatmeta = _fm1metapair * len(metadata)
355 format = _fm1fixed + formatnodes + formatmeta
358 format = _fm1fixed + formatnodes + formatmeta
356 # tz is stored in minutes so we divide by 60
359 # tz is stored in minutes so we divide by 60
357 tz = date[1]//60
360 tz = date[1]//60
358 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
361 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
359 data.extend(sucs)
362 data.extend(sucs)
360 if parents is not None:
363 if parents is not None:
361 data.extend(parents)
364 data.extend(parents)
362 totalsize = _calcsize(format)
365 totalsize = _calcsize(format)
363 for key, value in metadata:
366 for key, value in metadata:
364 lk = len(key)
367 lk = len(key)
365 lv = len(value)
368 lv = len(value)
366 data.append(lk)
369 data.append(lk)
367 data.append(lv)
370 data.append(lv)
368 totalsize += lk + lv
371 totalsize += lk + lv
369 data[0] = totalsize
372 data[0] = totalsize
370 data = [_pack(format, *data)]
373 data = [_pack(format, *data)]
371 for key, value in metadata:
374 for key, value in metadata:
372 data.append(key)
375 data.append(key)
373 data.append(value)
376 data.append(value)
374 return ''.join(data)
377 return ''.join(data)
375
378
376 # mapping to read/write various marker formats
379 # mapping to read/write various marker formats
377 # <version> -> (decoder, encoder)
380 # <version> -> (decoder, encoder)
378 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
381 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
379 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
382 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
380
383
381 @util.nogc
384 @util.nogc
382 def _readmarkers(data):
385 def _readmarkers(data):
383 """Read and enumerate markers from raw data"""
386 """Read and enumerate markers from raw data"""
384 off = 0
387 off = 0
385 diskversion = _unpack('>B', data[off:off + 1])[0]
388 diskversion = _unpack('>B', data[off:off + 1])[0]
386 off += 1
389 off += 1
387 if diskversion not in formats:
390 if diskversion not in formats:
388 raise util.Abort(_('parsing obsolete marker: unknown version %r')
391 raise util.Abort(_('parsing obsolete marker: unknown version %r')
389 % diskversion)
392 % diskversion)
390 return diskversion, formats[diskversion][0](data, off)
393 return diskversion, formats[diskversion][0](data, off)
391
394
392 def encodemarkers(markers, addheader=False, version=_fm0version):
395 def encodemarkers(markers, addheader=False, version=_fm0version):
393 # Kept separate from flushmarkers(), it will be reused for
396 # Kept separate from flushmarkers(), it will be reused for
394 # markers exchange.
397 # markers exchange.
395 encodeone = formats[version][1]
398 encodeone = formats[version][1]
396 if addheader:
399 if addheader:
397 yield _pack('>B', version)
400 yield _pack('>B', version)
398 for marker in markers:
401 for marker in markers:
399 yield encodeone(marker)
402 yield encodeone(marker)
400
403
401
404
402 class marker(object):
405 class marker(object):
403 """Wrap obsolete marker raw data"""
406 """Wrap obsolete marker raw data"""
404
407
405 def __init__(self, repo, data):
408 def __init__(self, repo, data):
406 # the repo argument will be used to create changectx in later version
409 # the repo argument will be used to create changectx in later version
407 self._repo = repo
410 self._repo = repo
408 self._data = data
411 self._data = data
409 self._decodedmeta = None
412 self._decodedmeta = None
410
413
411 def __hash__(self):
414 def __hash__(self):
412 return hash(self._data)
415 return hash(self._data)
413
416
414 def __eq__(self, other):
417 def __eq__(self, other):
415 if type(other) != type(self):
418 if type(other) != type(self):
416 return False
419 return False
417 return self._data == other._data
420 return self._data == other._data
418
421
419 def precnode(self):
422 def precnode(self):
420 """Precursor changeset node identifier"""
423 """Precursor changeset node identifier"""
421 return self._data[0]
424 return self._data[0]
422
425
423 def succnodes(self):
426 def succnodes(self):
424 """List of successor changesets node identifiers"""
427 """List of successor changesets node identifiers"""
425 return self._data[1]
428 return self._data[1]
426
429
427 def parentnodes(self):
430 def parentnodes(self):
428 """Parents of the precursors (None if not recorded)"""
431 """Parents of the precursors (None if not recorded)"""
429 return self._data[5]
432 return self._data[5]
430
433
431 def metadata(self):
434 def metadata(self):
432 """Decoded metadata dictionary"""
435 """Decoded metadata dictionary"""
433 return dict(self._data[3])
436 return dict(self._data[3])
434
437
435 def date(self):
438 def date(self):
436 """Creation date as (unixtime, offset)"""
439 """Creation date as (unixtime, offset)"""
437 return self._data[4]
440 return self._data[4]
438
441
439 def flags(self):
442 def flags(self):
440 """The flags field of the marker"""
443 """The flags field of the marker"""
441 return self._data[2]
444 return self._data[2]
442
445
443 class obsstore(object):
446 class obsstore(object):
444 """Store obsolete markers
447 """Store obsolete markers
445
448
446 Markers can be accessed with two mappings:
449 Markers can be accessed with two mappings:
447 - precursors[x] -> set(markers on precursors edges of x)
450 - precursors[x] -> set(markers on precursors edges of x)
448 - successors[x] -> set(markers on successors edges of x)
451 - successors[x] -> set(markers on successors edges of x)
449 - children[x] -> set(markers on precursors edges of children(x)
452 - children[x] -> set(markers on precursors edges of children(x)
450 """
453 """
451
454
452 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
455 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
453 # prec: nodeid, precursor changesets
456 # prec: nodeid, precursor changesets
454 # succs: tuple of nodeid, successor changesets (0-N length)
457 # succs: tuple of nodeid, successor changesets (0-N length)
455 # flag: integer, flag field carrying modifier for the markers (see doc)
458 # flag: integer, flag field carrying modifier for the markers (see doc)
456 # meta: binary blob, encoded metadata dictionary
459 # meta: binary blob, encoded metadata dictionary
457 # date: (float, int) tuple, date of marker creation
460 # date: (float, int) tuple, date of marker creation
458 # parents: (tuple of nodeid) or None, parents of precursors
461 # parents: (tuple of nodeid) or None, parents of precursors
459 # None is used when no data has been recorded
462 # None is used when no data has been recorded
460
463
461 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
464 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
462 # caches for various obsolescence related cache
465 # caches for various obsolescence related cache
463 self.caches = {}
466 self.caches = {}
464 self._all = []
467 self._all = []
465 self.precursors = {}
468 self.precursors = {}
466 self.successors = {}
469 self.successors = {}
467 self.children = {}
470 self.children = {}
468 self.sopener = sopener
471 self.sopener = sopener
469 data = sopener.tryread('obsstore')
472 data = sopener.tryread('obsstore')
470 self._version = defaultformat
473 self._version = defaultformat
471 self._readonly = readonly
474 self._readonly = readonly
472 if data:
475 if data:
473 self._version, markers = _readmarkers(data)
476 self._version, markers = _readmarkers(data)
474 self._load(markers)
477 self._load(markers)
475
478
476 def __iter__(self):
479 def __iter__(self):
477 return iter(self._all)
480 return iter(self._all)
478
481
479 def __len__(self):
482 def __len__(self):
480 return len(self._all)
483 return len(self._all)
481
484
482 def __nonzero__(self):
485 def __nonzero__(self):
483 return bool(self._all)
486 return bool(self._all)
484
487
485 def create(self, transaction, prec, succs=(), flag=0, parents=None,
488 def create(self, transaction, prec, succs=(), flag=0, parents=None,
486 date=None, metadata=None):
489 date=None, metadata=None):
487 """obsolete: add a new obsolete marker
490 """obsolete: add a new obsolete marker
488
491
489 * ensuring it is hashable
492 * ensuring it is hashable
490 * check mandatory metadata
493 * check mandatory metadata
491 * encode metadata
494 * encode metadata
492
495
493 If you are a human writing code creating marker you want to use the
496 If you are a human writing code creating marker you want to use the
494 `createmarkers` function in this module instead.
497 `createmarkers` function in this module instead.
495
498
496 return True if a new marker have been added, False if the markers
499 return True if a new marker have been added, False if the markers
497 already existed (no op).
500 already existed (no op).
498 """
501 """
499 if metadata is None:
502 if metadata is None:
500 metadata = {}
503 metadata = {}
501 if date is None:
504 if date is None:
502 if 'date' in metadata:
505 if 'date' in metadata:
503 # as a courtesy for out-of-tree extensions
506 # as a courtesy for out-of-tree extensions
504 date = util.parsedate(metadata.pop('date'))
507 date = util.parsedate(metadata.pop('date'))
505 else:
508 else:
506 date = util.makedate()
509 date = util.makedate()
507 if len(prec) != 20:
510 if len(prec) != 20:
508 raise ValueError(prec)
511 raise ValueError(prec)
509 for succ in succs:
512 for succ in succs:
510 if len(succ) != 20:
513 if len(succ) != 20:
511 raise ValueError(succ)
514 raise ValueError(succ)
512 if prec in succs:
515 if prec in succs:
513 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
516 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
514
517
515 metadata = tuple(sorted(metadata.iteritems()))
518 metadata = tuple(sorted(metadata.iteritems()))
516
519
517 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
520 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
518 return bool(self.add(transaction, [marker]))
521 return bool(self.add(transaction, [marker]))
519
522
520 def add(self, transaction, markers):
523 def add(self, transaction, markers):
521 """Add new markers to the store
524 """Add new markers to the store
522
525
523 Take care of filtering duplicate.
526 Take care of filtering duplicate.
524 Return the number of new marker."""
527 Return the number of new marker."""
525 if self._readonly:
528 if self._readonly:
526 raise util.Abort('creating obsolete markers is not enabled on this '
529 raise util.Abort('creating obsolete markers is not enabled on this '
527 'repo')
530 'repo')
528 known = set(self._all)
531 known = set(self._all)
529 new = []
532 new = []
530 for m in markers:
533 for m in markers:
531 if m not in known:
534 if m not in known:
532 known.add(m)
535 known.add(m)
533 new.append(m)
536 new.append(m)
534 if new:
537 if new:
535 f = self.sopener('obsstore', 'ab')
538 f = self.sopener('obsstore', 'ab')
536 try:
539 try:
537 # Whether the file's current position is at the begin or at
540 # Whether the file's current position is at the begin or at
538 # the end after opening a file for appending is implementation
541 # the end after opening a file for appending is implementation
539 # defined. So we must seek to the end before calling tell(),
542 # defined. So we must seek to the end before calling tell(),
540 # or we may get a zero offset for non-zero sized files on
543 # or we may get a zero offset for non-zero sized files on
541 # some platforms (issue3543).
544 # some platforms (issue3543).
542 f.seek(0, _SEEK_END)
545 f.seek(0, _SEEK_END)
543 offset = f.tell()
546 offset = f.tell()
544 transaction.add('obsstore', offset)
547 transaction.add('obsstore', offset)
545 # offset == 0: new file - add the version header
548 # offset == 0: new file - add the version header
546 for bytes in encodemarkers(new, offset == 0, self._version):
549 for bytes in encodemarkers(new, offset == 0, self._version):
547 f.write(bytes)
550 f.write(bytes)
548 finally:
551 finally:
549 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
552 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
550 # call 'filecacheentry.refresh()' here
553 # call 'filecacheentry.refresh()' here
551 f.close()
554 f.close()
552 self._load(new)
555 self._load(new)
553 # new marker *may* have changed several set. invalidate the cache.
556 # new marker *may* have changed several set. invalidate the cache.
554 self.caches.clear()
557 self.caches.clear()
555 # records the number of new markers for the transaction hooks
558 # records the number of new markers for the transaction hooks
556 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
559 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
557 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
560 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
558 return len(new)
561 return len(new)
559
562
560 def mergemarkers(self, transaction, data):
563 def mergemarkers(self, transaction, data):
561 """merge a binary stream of markers inside the obsstore
564 """merge a binary stream of markers inside the obsstore
562
565
563 Returns the number of new markers added."""
566 Returns the number of new markers added."""
564 version, markers = _readmarkers(data)
567 version, markers = _readmarkers(data)
565 return self.add(transaction, markers)
568 return self.add(transaction, markers)
566
569
567 @util.nogc
570 @util.nogc
568 def _load(self, markers):
571 def _load(self, markers):
569 for mark in markers:
572 for mark in markers:
570 self._all.append(mark)
573 self._all.append(mark)
571 pre, sucs = mark[:2]
574 pre, sucs = mark[:2]
572 self.successors.setdefault(pre, set()).add(mark)
575 self.successors.setdefault(pre, set()).add(mark)
573 for suc in sucs:
576 for suc in sucs:
574 self.precursors.setdefault(suc, set()).add(mark)
577 self.precursors.setdefault(suc, set()).add(mark)
575 parents = mark[5]
578 parents = mark[5]
576 if parents is not None:
579 if parents is not None:
577 for p in parents:
580 for p in parents:
578 self.children.setdefault(p, set()).add(mark)
581 self.children.setdefault(p, set()).add(mark)
579 if node.nullid in self.precursors:
582 if node.nullid in self.precursors:
580 raise util.Abort(_('bad obsolescence marker detected: '
583 raise util.Abort(_('bad obsolescence marker detected: '
581 'invalid successors nullid'))
584 'invalid successors nullid'))
582 def relevantmarkers(self, nodes):
585 def relevantmarkers(self, nodes):
583 """return a set of all obsolescence markers relevant to a set of nodes.
586 """return a set of all obsolescence markers relevant to a set of nodes.
584
587
585 "relevant" to a set of nodes mean:
588 "relevant" to a set of nodes mean:
586
589
587 - marker that use this changeset as successor
590 - marker that use this changeset as successor
588 - prune marker of direct children on this changeset
591 - prune marker of direct children on this changeset
589 - recursive application of the two rules on precursors of these markers
592 - recursive application of the two rules on precursors of these markers
590
593
591 It is a set so you cannot rely on order."""
594 It is a set so you cannot rely on order."""
592
595
593 pendingnodes = set(nodes)
596 pendingnodes = set(nodes)
594 seenmarkers = set()
597 seenmarkers = set()
595 seennodes = set(pendingnodes)
598 seennodes = set(pendingnodes)
596 precursorsmarkers = self.precursors
599 precursorsmarkers = self.precursors
597 children = self.children
600 children = self.children
598 while pendingnodes:
601 while pendingnodes:
599 direct = set()
602 direct = set()
600 for current in pendingnodes:
603 for current in pendingnodes:
601 direct.update(precursorsmarkers.get(current, ()))
604 direct.update(precursorsmarkers.get(current, ()))
602 pruned = [m for m in children.get(current, ()) if not m[1]]
605 pruned = [m for m in children.get(current, ()) if not m[1]]
603 direct.update(pruned)
606 direct.update(pruned)
604 direct -= seenmarkers
607 direct -= seenmarkers
605 pendingnodes = set([m[0] for m in direct])
608 pendingnodes = set([m[0] for m in direct])
606 seenmarkers |= direct
609 seenmarkers |= direct
607 pendingnodes -= seennodes
610 pendingnodes -= seennodes
608 seennodes |= pendingnodes
611 seennodes |= pendingnodes
609 return seenmarkers
612 return seenmarkers
610
613
611 def commonversion(versions):
614 def commonversion(versions):
612 """Return the newest version listed in both versions and our local formats.
615 """Return the newest version listed in both versions and our local formats.
613
616
614 Returns None if no common version exists.
617 Returns None if no common version exists.
615 """
618 """
616 versions.sort(reverse=True)
619 versions.sort(reverse=True)
617 # search for highest version known on both side
620 # search for highest version known on both side
618 for v in versions:
621 for v in versions:
619 if v in formats:
622 if v in formats:
620 return v
623 return v
621 return None
624 return None
622
625
623 # arbitrary picked to fit into 8K limit from HTTP server
626 # arbitrary picked to fit into 8K limit from HTTP server
624 # you have to take in account:
627 # you have to take in account:
625 # - the version header
628 # - the version header
626 # - the base85 encoding
629 # - the base85 encoding
627 _maxpayload = 5300
630 _maxpayload = 5300
628
631
629 def _pushkeyescape(markers):
632 def _pushkeyescape(markers):
630 """encode markers into a dict suitable for pushkey exchange
633 """encode markers into a dict suitable for pushkey exchange
631
634
632 - binary data is base85 encoded
635 - binary data is base85 encoded
633 - split in chunks smaller than 5300 bytes"""
636 - split in chunks smaller than 5300 bytes"""
634 keys = {}
637 keys = {}
635 parts = []
638 parts = []
636 currentlen = _maxpayload * 2 # ensure we create a new part
639 currentlen = _maxpayload * 2 # ensure we create a new part
637 for marker in markers:
640 for marker in markers:
638 nextdata = _fm0encodeonemarker(marker)
641 nextdata = _fm0encodeonemarker(marker)
639 if (len(nextdata) + currentlen > _maxpayload):
642 if (len(nextdata) + currentlen > _maxpayload):
640 currentpart = []
643 currentpart = []
641 currentlen = 0
644 currentlen = 0
642 parts.append(currentpart)
645 parts.append(currentpart)
643 currentpart.append(nextdata)
646 currentpart.append(nextdata)
644 currentlen += len(nextdata)
647 currentlen += len(nextdata)
645 for idx, part in enumerate(reversed(parts)):
648 for idx, part in enumerate(reversed(parts)):
646 data = ''.join([_pack('>B', _fm0version)] + part)
649 data = ''.join([_pack('>B', _fm0version)] + part)
647 keys['dump%i' % idx] = base85.b85encode(data)
650 keys['dump%i' % idx] = base85.b85encode(data)
648 return keys
651 return keys
649
652
650 def listmarkers(repo):
653 def listmarkers(repo):
651 """List markers over pushkey"""
654 """List markers over pushkey"""
652 if not repo.obsstore:
655 if not repo.obsstore:
653 return {}
656 return {}
654 return _pushkeyescape(repo.obsstore)
657 return _pushkeyescape(repo.obsstore)
655
658
656 def pushmarker(repo, key, old, new):
659 def pushmarker(repo, key, old, new):
657 """Push markers over pushkey"""
660 """Push markers over pushkey"""
658 if not key.startswith('dump'):
661 if not key.startswith('dump'):
659 repo.ui.warn(_('unknown key: %r') % key)
662 repo.ui.warn(_('unknown key: %r') % key)
660 return 0
663 return 0
661 if old:
664 if old:
662 repo.ui.warn(_('unexpected old value for %r') % key)
665 repo.ui.warn(_('unexpected old value for %r') % key)
663 return 0
666 return 0
664 data = base85.b85decode(new)
667 data = base85.b85decode(new)
665 lock = repo.lock()
668 lock = repo.lock()
666 try:
669 try:
667 tr = repo.transaction('pushkey: obsolete markers')
670 tr = repo.transaction('pushkey: obsolete markers')
668 try:
671 try:
669 repo.obsstore.mergemarkers(tr, data)
672 repo.obsstore.mergemarkers(tr, data)
670 tr.close()
673 tr.close()
671 return 1
674 return 1
672 finally:
675 finally:
673 tr.release()
676 tr.release()
674 finally:
677 finally:
675 lock.release()
678 lock.release()
676
679
677 def getmarkers(repo, nodes=None):
680 def getmarkers(repo, nodes=None):
678 """returns markers known in a repository
681 """returns markers known in a repository
679
682
680 If <nodes> is specified, only markers "relevant" to those nodes are are
683 If <nodes> is specified, only markers "relevant" to those nodes are are
681 returned"""
684 returned"""
682 if nodes is None:
685 if nodes is None:
683 rawmarkers = repo.obsstore
686 rawmarkers = repo.obsstore
684 else:
687 else:
685 rawmarkers = repo.obsstore.relevantmarkers(nodes)
688 rawmarkers = repo.obsstore.relevantmarkers(nodes)
686
689
687 for markerdata in rawmarkers:
690 for markerdata in rawmarkers:
688 yield marker(repo, markerdata)
691 yield marker(repo, markerdata)
689
692
690 def relevantmarkers(repo, node):
693 def relevantmarkers(repo, node):
691 """all obsolete markers relevant to some revision"""
694 """all obsolete markers relevant to some revision"""
692 for markerdata in repo.obsstore.relevantmarkers(node):
695 for markerdata in repo.obsstore.relevantmarkers(node):
693 yield marker(repo, markerdata)
696 yield marker(repo, markerdata)
694
697
695
698
696 def precursormarkers(ctx):
699 def precursormarkers(ctx):
697 """obsolete marker marking this changeset as a successors"""
700 """obsolete marker marking this changeset as a successors"""
698 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
701 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
699 yield marker(ctx._repo, data)
702 yield marker(ctx._repo, data)
700
703
701 def successormarkers(ctx):
704 def successormarkers(ctx):
702 """obsolete marker making this changeset obsolete"""
705 """obsolete marker making this changeset obsolete"""
703 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
706 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
704 yield marker(ctx._repo, data)
707 yield marker(ctx._repo, data)
705
708
706 def allsuccessors(obsstore, nodes, ignoreflags=0):
709 def allsuccessors(obsstore, nodes, ignoreflags=0):
707 """Yield node for every successor of <nodes>.
710 """Yield node for every successor of <nodes>.
708
711
709 Some successors may be unknown locally.
712 Some successors may be unknown locally.
710
713
711 This is a linear yield unsuited to detecting split changesets. It includes
714 This is a linear yield unsuited to detecting split changesets. It includes
712 initial nodes too."""
715 initial nodes too."""
713 remaining = set(nodes)
716 remaining = set(nodes)
714 seen = set(remaining)
717 seen = set(remaining)
715 while remaining:
718 while remaining:
716 current = remaining.pop()
719 current = remaining.pop()
717 yield current
720 yield current
718 for mark in obsstore.successors.get(current, ()):
721 for mark in obsstore.successors.get(current, ()):
719 # ignore marker flagged with specified flag
722 # ignore marker flagged with specified flag
720 if mark[2] & ignoreflags:
723 if mark[2] & ignoreflags:
721 continue
724 continue
722 for suc in mark[1]:
725 for suc in mark[1]:
723 if suc not in seen:
726 if suc not in seen:
724 seen.add(suc)
727 seen.add(suc)
725 remaining.add(suc)
728 remaining.add(suc)
726
729
727 def allprecursors(obsstore, nodes, ignoreflags=0):
730 def allprecursors(obsstore, nodes, ignoreflags=0):
728 """Yield node for every precursors of <nodes>.
731 """Yield node for every precursors of <nodes>.
729
732
730 Some precursors may be unknown locally.
733 Some precursors may be unknown locally.
731
734
732 This is a linear yield unsuited to detecting folded changesets. It includes
735 This is a linear yield unsuited to detecting folded changesets. It includes
733 initial nodes too."""
736 initial nodes too."""
734
737
735 remaining = set(nodes)
738 remaining = set(nodes)
736 seen = set(remaining)
739 seen = set(remaining)
737 while remaining:
740 while remaining:
738 current = remaining.pop()
741 current = remaining.pop()
739 yield current
742 yield current
740 for mark in obsstore.precursors.get(current, ()):
743 for mark in obsstore.precursors.get(current, ()):
741 # ignore marker flagged with specified flag
744 # ignore marker flagged with specified flag
742 if mark[2] & ignoreflags:
745 if mark[2] & ignoreflags:
743 continue
746 continue
744 suc = mark[0]
747 suc = mark[0]
745 if suc not in seen:
748 if suc not in seen:
746 seen.add(suc)
749 seen.add(suc)
747 remaining.add(suc)
750 remaining.add(suc)
748
751
749 def foreground(repo, nodes):
752 def foreground(repo, nodes):
750 """return all nodes in the "foreground" of other node
753 """return all nodes in the "foreground" of other node
751
754
752 The foreground of a revision is anything reachable using parent -> children
755 The foreground of a revision is anything reachable using parent -> children
753 or precursor -> successor relation. It is very similar to "descendant" but
756 or precursor -> successor relation. It is very similar to "descendant" but
754 augmented with obsolescence information.
757 augmented with obsolescence information.
755
758
756 Beware that possible obsolescence cycle may result if complex situation.
759 Beware that possible obsolescence cycle may result if complex situation.
757 """
760 """
758 repo = repo.unfiltered()
761 repo = repo.unfiltered()
759 foreground = set(repo.set('%ln::', nodes))
762 foreground = set(repo.set('%ln::', nodes))
760 if repo.obsstore:
763 if repo.obsstore:
761 # We only need this complicated logic if there is obsolescence
764 # We only need this complicated logic if there is obsolescence
762 # XXX will probably deserve an optimised revset.
765 # XXX will probably deserve an optimised revset.
763 nm = repo.changelog.nodemap
766 nm = repo.changelog.nodemap
764 plen = -1
767 plen = -1
765 # compute the whole set of successors or descendants
768 # compute the whole set of successors or descendants
766 while len(foreground) != plen:
769 while len(foreground) != plen:
767 plen = len(foreground)
770 plen = len(foreground)
768 succs = set(c.node() for c in foreground)
771 succs = set(c.node() for c in foreground)
769 mutable = [c.node() for c in foreground if c.mutable()]
772 mutable = [c.node() for c in foreground if c.mutable()]
770 succs.update(allsuccessors(repo.obsstore, mutable))
773 succs.update(allsuccessors(repo.obsstore, mutable))
771 known = (n for n in succs if n in nm)
774 known = (n for n in succs if n in nm)
772 foreground = set(repo.set('%ln::', known))
775 foreground = set(repo.set('%ln::', known))
773 return set(c.node() for c in foreground)
776 return set(c.node() for c in foreground)
774
777
775
778
776 def successorssets(repo, initialnode, cache=None):
779 def successorssets(repo, initialnode, cache=None):
777 """Return all set of successors of initial nodes
780 """Return all set of successors of initial nodes
778
781
779 The successors set of a changeset A are a group of revisions that succeed
782 The successors set of a changeset A are a group of revisions that succeed
780 A. It succeeds A as a consistent whole, each revision being only a partial
783 A. It succeeds A as a consistent whole, each revision being only a partial
781 replacement. The successors set contains non-obsolete changesets only.
784 replacement. The successors set contains non-obsolete changesets only.
782
785
783 This function returns the full list of successor sets which is why it
786 This function returns the full list of successor sets which is why it
784 returns a list of tuples and not just a single tuple. Each tuple is a valid
787 returns a list of tuples and not just a single tuple. Each tuple is a valid
785 successors set. Not that (A,) may be a valid successors set for changeset A
788 successors set. Not that (A,) may be a valid successors set for changeset A
786 (see below).
789 (see below).
787
790
788 In most cases, a changeset A will have a single element (e.g. the changeset
791 In most cases, a changeset A will have a single element (e.g. the changeset
789 A is replaced by A') in its successors set. Though, it is also common for a
792 A is replaced by A') in its successors set. Though, it is also common for a
790 changeset A to have no elements in its successor set (e.g. the changeset
793 changeset A to have no elements in its successor set (e.g. the changeset
791 has been pruned). Therefore, the returned list of successors sets will be
794 has been pruned). Therefore, the returned list of successors sets will be
792 [(A',)] or [], respectively.
795 [(A',)] or [], respectively.
793
796
794 When a changeset A is split into A' and B', however, it will result in a
797 When a changeset A is split into A' and B', however, it will result in a
795 successors set containing more than a single element, i.e. [(A',B')].
798 successors set containing more than a single element, i.e. [(A',B')].
796 Divergent changesets will result in multiple successors sets, i.e. [(A',),
799 Divergent changesets will result in multiple successors sets, i.e. [(A',),
797 (A'')].
800 (A'')].
798
801
799 If a changeset A is not obsolete, then it will conceptually have no
802 If a changeset A is not obsolete, then it will conceptually have no
800 successors set. To distinguish this from a pruned changeset, the successor
803 successors set. To distinguish this from a pruned changeset, the successor
801 set will only contain itself, i.e. [(A,)].
804 set will only contain itself, i.e. [(A,)].
802
805
803 Finally, successors unknown locally are considered to be pruned (obsoleted
806 Finally, successors unknown locally are considered to be pruned (obsoleted
804 without any successors).
807 without any successors).
805
808
806 The optional `cache` parameter is a dictionary that may contain precomputed
809 The optional `cache` parameter is a dictionary that may contain precomputed
807 successors sets. It is meant to reuse the computation of a previous call to
810 successors sets. It is meant to reuse the computation of a previous call to
808 `successorssets` when multiple calls are made at the same time. The cache
811 `successorssets` when multiple calls are made at the same time. The cache
809 dictionary is updated in place. The caller is responsible for its live
812 dictionary is updated in place. The caller is responsible for its live
810 spawn. Code that makes multiple calls to `successorssets` *must* use this
813 spawn. Code that makes multiple calls to `successorssets` *must* use this
811 cache mechanism or suffer terrible performances.
814 cache mechanism or suffer terrible performances.
812
815
813 """
816 """
814
817
815 succmarkers = repo.obsstore.successors
818 succmarkers = repo.obsstore.successors
816
819
817 # Stack of nodes we search successors sets for
820 # Stack of nodes we search successors sets for
818 toproceed = [initialnode]
821 toproceed = [initialnode]
819 # set version of above list for fast loop detection
822 # set version of above list for fast loop detection
820 # element added to "toproceed" must be added here
823 # element added to "toproceed" must be added here
821 stackedset = set(toproceed)
824 stackedset = set(toproceed)
822 if cache is None:
825 if cache is None:
823 cache = {}
826 cache = {}
824
827
825 # This while loop is the flattened version of a recursive search for
828 # This while loop is the flattened version of a recursive search for
826 # successors sets
829 # successors sets
827 #
830 #
828 # def successorssets(x):
831 # def successorssets(x):
829 # successors = directsuccessors(x)
832 # successors = directsuccessors(x)
830 # ss = [[]]
833 # ss = [[]]
831 # for succ in directsuccessors(x):
834 # for succ in directsuccessors(x):
832 # # product as in itertools cartesian product
835 # # product as in itertools cartesian product
833 # ss = product(ss, successorssets(succ))
836 # ss = product(ss, successorssets(succ))
834 # return ss
837 # return ss
835 #
838 #
836 # But we can not use plain recursive calls here:
839 # But we can not use plain recursive calls here:
837 # - that would blow the python call stack
840 # - that would blow the python call stack
838 # - obsolescence markers may have cycles, we need to handle them.
841 # - obsolescence markers may have cycles, we need to handle them.
839 #
842 #
840 # The `toproceed` list act as our call stack. Every node we search
843 # The `toproceed` list act as our call stack. Every node we search
841 # successors set for are stacked there.
844 # successors set for are stacked there.
842 #
845 #
843 # The `stackedset` is set version of this stack used to check if a node is
846 # The `stackedset` is set version of this stack used to check if a node is
844 # already stacked. This check is used to detect cycles and prevent infinite
847 # already stacked. This check is used to detect cycles and prevent infinite
845 # loop.
848 # loop.
846 #
849 #
847 # successors set of all nodes are stored in the `cache` dictionary.
850 # successors set of all nodes are stored in the `cache` dictionary.
848 #
851 #
849 # After this while loop ends we use the cache to return the successors sets
852 # After this while loop ends we use the cache to return the successors sets
850 # for the node requested by the caller.
853 # for the node requested by the caller.
851 while toproceed:
854 while toproceed:
852 # Every iteration tries to compute the successors sets of the topmost
855 # Every iteration tries to compute the successors sets of the topmost
853 # node of the stack: CURRENT.
856 # node of the stack: CURRENT.
854 #
857 #
855 # There are four possible outcomes:
858 # There are four possible outcomes:
856 #
859 #
857 # 1) We already know the successors sets of CURRENT:
860 # 1) We already know the successors sets of CURRENT:
858 # -> mission accomplished, pop it from the stack.
861 # -> mission accomplished, pop it from the stack.
859 # 2) Node is not obsolete:
862 # 2) Node is not obsolete:
860 # -> the node is its own successors sets. Add it to the cache.
863 # -> the node is its own successors sets. Add it to the cache.
861 # 3) We do not know successors set of direct successors of CURRENT:
864 # 3) We do not know successors set of direct successors of CURRENT:
862 # -> We add those successors to the stack.
865 # -> We add those successors to the stack.
863 # 4) We know successors sets of all direct successors of CURRENT:
866 # 4) We know successors sets of all direct successors of CURRENT:
864 # -> We can compute CURRENT successors set and add it to the
867 # -> We can compute CURRENT successors set and add it to the
865 # cache.
868 # cache.
866 #
869 #
867 current = toproceed[-1]
870 current = toproceed[-1]
868 if current in cache:
871 if current in cache:
869 # case (1): We already know the successors sets
872 # case (1): We already know the successors sets
870 stackedset.remove(toproceed.pop())
873 stackedset.remove(toproceed.pop())
871 elif current not in succmarkers:
874 elif current not in succmarkers:
872 # case (2): The node is not obsolete.
875 # case (2): The node is not obsolete.
873 if current in repo:
876 if current in repo:
874 # We have a valid last successors.
877 # We have a valid last successors.
875 cache[current] = [(current,)]
878 cache[current] = [(current,)]
876 else:
879 else:
877 # Final obsolete version is unknown locally.
880 # Final obsolete version is unknown locally.
878 # Do not count that as a valid successors
881 # Do not count that as a valid successors
879 cache[current] = []
882 cache[current] = []
880 else:
883 else:
881 # cases (3) and (4)
884 # cases (3) and (4)
882 #
885 #
883 # We proceed in two phases. Phase 1 aims to distinguish case (3)
886 # We proceed in two phases. Phase 1 aims to distinguish case (3)
884 # from case (4):
887 # from case (4):
885 #
888 #
886 # For each direct successors of CURRENT, we check whether its
889 # For each direct successors of CURRENT, we check whether its
887 # successors sets are known. If they are not, we stack the
890 # successors sets are known. If they are not, we stack the
888 # unknown node and proceed to the next iteration of the while
891 # unknown node and proceed to the next iteration of the while
889 # loop. (case 3)
892 # loop. (case 3)
890 #
893 #
891 # During this step, we may detect obsolescence cycles: a node
894 # During this step, we may detect obsolescence cycles: a node
892 # with unknown successors sets but already in the call stack.
895 # with unknown successors sets but already in the call stack.
893 # In such a situation, we arbitrary set the successors sets of
896 # In such a situation, we arbitrary set the successors sets of
894 # the node to nothing (node pruned) to break the cycle.
897 # the node to nothing (node pruned) to break the cycle.
895 #
898 #
896 # If no break was encountered we proceed to phase 2.
899 # If no break was encountered we proceed to phase 2.
897 #
900 #
898 # Phase 2 computes successors sets of CURRENT (case 4); see details
901 # Phase 2 computes successors sets of CURRENT (case 4); see details
899 # in phase 2 itself.
902 # in phase 2 itself.
900 #
903 #
901 # Note the two levels of iteration in each phase.
904 # Note the two levels of iteration in each phase.
902 # - The first one handles obsolescence markers using CURRENT as
905 # - The first one handles obsolescence markers using CURRENT as
903 # precursor (successors markers of CURRENT).
906 # precursor (successors markers of CURRENT).
904 #
907 #
905 # Having multiple entry here means divergence.
908 # Having multiple entry here means divergence.
906 #
909 #
907 # - The second one handles successors defined in each marker.
910 # - The second one handles successors defined in each marker.
908 #
911 #
909 # Having none means pruned node, multiple successors means split,
912 # Having none means pruned node, multiple successors means split,
910 # single successors are standard replacement.
913 # single successors are standard replacement.
911 #
914 #
912 for mark in sorted(succmarkers[current]):
915 for mark in sorted(succmarkers[current]):
913 for suc in mark[1]:
916 for suc in mark[1]:
914 if suc not in cache:
917 if suc not in cache:
915 if suc in stackedset:
918 if suc in stackedset:
916 # cycle breaking
919 # cycle breaking
917 cache[suc] = []
920 cache[suc] = []
918 else:
921 else:
919 # case (3) If we have not computed successors sets
922 # case (3) If we have not computed successors sets
920 # of one of those successors we add it to the
923 # of one of those successors we add it to the
921 # `toproceed` stack and stop all work for this
924 # `toproceed` stack and stop all work for this
922 # iteration.
925 # iteration.
923 toproceed.append(suc)
926 toproceed.append(suc)
924 stackedset.add(suc)
927 stackedset.add(suc)
925 break
928 break
926 else:
929 else:
927 continue
930 continue
928 break
931 break
929 else:
932 else:
930 # case (4): we know all successors sets of all direct
933 # case (4): we know all successors sets of all direct
931 # successors
934 # successors
932 #
935 #
933 # Successors set contributed by each marker depends on the
936 # Successors set contributed by each marker depends on the
934 # successors sets of all its "successors" node.
937 # successors sets of all its "successors" node.
935 #
938 #
936 # Each different marker is a divergence in the obsolescence
939 # Each different marker is a divergence in the obsolescence
937 # history. It contributes successors sets distinct from other
940 # history. It contributes successors sets distinct from other
938 # markers.
941 # markers.
939 #
942 #
940 # Within a marker, a successor may have divergent successors
943 # Within a marker, a successor may have divergent successors
941 # sets. In such a case, the marker will contribute multiple
944 # sets. In such a case, the marker will contribute multiple
942 # divergent successors sets. If multiple successors have
945 # divergent successors sets. If multiple successors have
943 # divergent successors sets, a Cartesian product is used.
946 # divergent successors sets, a Cartesian product is used.
944 #
947 #
945 # At the end we post-process successors sets to remove
948 # At the end we post-process successors sets to remove
946 # duplicated entry and successors set that are strict subset of
949 # duplicated entry and successors set that are strict subset of
947 # another one.
950 # another one.
948 succssets = []
951 succssets = []
949 for mark in sorted(succmarkers[current]):
952 for mark in sorted(succmarkers[current]):
950 # successors sets contributed by this marker
953 # successors sets contributed by this marker
951 markss = [[]]
954 markss = [[]]
952 for suc in mark[1]:
955 for suc in mark[1]:
953 # cardinal product with previous successors
956 # cardinal product with previous successors
954 productresult = []
957 productresult = []
955 for prefix in markss:
958 for prefix in markss:
956 for suffix in cache[suc]:
959 for suffix in cache[suc]:
957 newss = list(prefix)
960 newss = list(prefix)
958 for part in suffix:
961 for part in suffix:
959 # do not duplicated entry in successors set
962 # do not duplicated entry in successors set
960 # first entry wins.
963 # first entry wins.
961 if part not in newss:
964 if part not in newss:
962 newss.append(part)
965 newss.append(part)
963 productresult.append(newss)
966 productresult.append(newss)
964 markss = productresult
967 markss = productresult
965 succssets.extend(markss)
968 succssets.extend(markss)
966 # remove duplicated and subset
969 # remove duplicated and subset
967 seen = []
970 seen = []
968 final = []
971 final = []
969 candidate = sorted(((set(s), s) for s in succssets if s),
972 candidate = sorted(((set(s), s) for s in succssets if s),
970 key=lambda x: len(x[1]), reverse=True)
973 key=lambda x: len(x[1]), reverse=True)
971 for setversion, listversion in candidate:
974 for setversion, listversion in candidate:
972 for seenset in seen:
975 for seenset in seen:
973 if setversion.issubset(seenset):
976 if setversion.issubset(seenset):
974 break
977 break
975 else:
978 else:
976 final.append(listversion)
979 final.append(listversion)
977 seen.append(setversion)
980 seen.append(setversion)
978 final.reverse() # put small successors set first
981 final.reverse() # put small successors set first
979 cache[current] = final
982 cache[current] = final
980 return cache[initialnode]
983 return cache[initialnode]
981
984
982 def _knownrevs(repo, nodes):
985 def _knownrevs(repo, nodes):
983 """yield revision numbers of known nodes passed in parameters
986 """yield revision numbers of known nodes passed in parameters
984
987
985 Unknown revisions are silently ignored."""
988 Unknown revisions are silently ignored."""
986 torev = repo.changelog.nodemap.get
989 torev = repo.changelog.nodemap.get
987 for n in nodes:
990 for n in nodes:
988 rev = torev(n)
991 rev = torev(n)
989 if rev is not None:
992 if rev is not None:
990 yield rev
993 yield rev
991
994
992 # mapping of 'set-name' -> <function to compute this set>
995 # mapping of 'set-name' -> <function to compute this set>
993 cachefuncs = {}
996 cachefuncs = {}
994 def cachefor(name):
997 def cachefor(name):
995 """Decorator to register a function as computing the cache for a set"""
998 """Decorator to register a function as computing the cache for a set"""
996 def decorator(func):
999 def decorator(func):
997 assert name not in cachefuncs
1000 assert name not in cachefuncs
998 cachefuncs[name] = func
1001 cachefuncs[name] = func
999 return func
1002 return func
1000 return decorator
1003 return decorator
1001
1004
1002 def getrevs(repo, name):
1005 def getrevs(repo, name):
1003 """Return the set of revision that belong to the <name> set
1006 """Return the set of revision that belong to the <name> set
1004
1007
1005 Such access may compute the set and cache it for future use"""
1008 Such access may compute the set and cache it for future use"""
1006 repo = repo.unfiltered()
1009 repo = repo.unfiltered()
1007 if not repo.obsstore:
1010 if not repo.obsstore:
1008 return frozenset()
1011 return frozenset()
1009 if name not in repo.obsstore.caches:
1012 if name not in repo.obsstore.caches:
1010 repo.obsstore.caches[name] = cachefuncs[name](repo)
1013 repo.obsstore.caches[name] = cachefuncs[name](repo)
1011 return repo.obsstore.caches[name]
1014 return repo.obsstore.caches[name]
1012
1015
1013 # To be simple we need to invalidate obsolescence cache when:
1016 # To be simple we need to invalidate obsolescence cache when:
1014 #
1017 #
1015 # - new changeset is added:
1018 # - new changeset is added:
1016 # - public phase is changed
1019 # - public phase is changed
1017 # - obsolescence marker are added
1020 # - obsolescence marker are added
1018 # - strip is used a repo
1021 # - strip is used a repo
1019 def clearobscaches(repo):
1022 def clearobscaches(repo):
1020 """Remove all obsolescence related cache from a repo
1023 """Remove all obsolescence related cache from a repo
1021
1024
1022 This remove all cache in obsstore is the obsstore already exist on the
1025 This remove all cache in obsstore is the obsstore already exist on the
1023 repo.
1026 repo.
1024
1027
1025 (We could be smarter here given the exact event that trigger the cache
1028 (We could be smarter here given the exact event that trigger the cache
1026 clearing)"""
1029 clearing)"""
1027 # only clear cache is there is obsstore data in this repo
1030 # only clear cache is there is obsstore data in this repo
1028 if 'obsstore' in repo._filecache:
1031 if 'obsstore' in repo._filecache:
1029 repo.obsstore.caches.clear()
1032 repo.obsstore.caches.clear()
1030
1033
1031 @cachefor('obsolete')
1034 @cachefor('obsolete')
1032 def _computeobsoleteset(repo):
1035 def _computeobsoleteset(repo):
1033 """the set of obsolete revisions"""
1036 """the set of obsolete revisions"""
1034 obs = set()
1037 obs = set()
1035 getrev = repo.changelog.nodemap.get
1038 getrev = repo.changelog.nodemap.get
1036 getphase = repo._phasecache.phase
1039 getphase = repo._phasecache.phase
1037 for n in repo.obsstore.successors:
1040 for n in repo.obsstore.successors:
1038 rev = getrev(n)
1041 rev = getrev(n)
1039 if rev is not None and getphase(repo, rev):
1042 if rev is not None and getphase(repo, rev):
1040 obs.add(rev)
1043 obs.add(rev)
1041 return obs
1044 return obs
1042
1045
1043 @cachefor('unstable')
1046 @cachefor('unstable')
1044 def _computeunstableset(repo):
1047 def _computeunstableset(repo):
1045 """the set of non obsolete revisions with obsolete parents"""
1048 """the set of non obsolete revisions with obsolete parents"""
1046 # revset is not efficient enough here
1049 # revset is not efficient enough here
1047 # we do (obsolete()::) - obsolete() by hand
1050 # we do (obsolete()::) - obsolete() by hand
1048 obs = getrevs(repo, 'obsolete')
1051 obs = getrevs(repo, 'obsolete')
1049 if not obs:
1052 if not obs:
1050 return set()
1053 return set()
1051 cl = repo.changelog
1054 cl = repo.changelog
1052 return set(r for r in cl.descendants(obs) if r not in obs)
1055 return set(r for r in cl.descendants(obs) if r not in obs)
1053
1056
1054 @cachefor('suspended')
1057 @cachefor('suspended')
1055 def _computesuspendedset(repo):
1058 def _computesuspendedset(repo):
1056 """the set of obsolete parents with non obsolete descendants"""
1059 """the set of obsolete parents with non obsolete descendants"""
1057 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1060 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1058 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1061 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1059
1062
1060 @cachefor('extinct')
1063 @cachefor('extinct')
1061 def _computeextinctset(repo):
1064 def _computeextinctset(repo):
1062 """the set of obsolete parents without non obsolete descendants"""
1065 """the set of obsolete parents without non obsolete descendants"""
1063 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1066 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1064
1067
1065
1068
1066 @cachefor('bumped')
1069 @cachefor('bumped')
1067 def _computebumpedset(repo):
1070 def _computebumpedset(repo):
1068 """the set of revs trying to obsolete public revisions"""
1071 """the set of revs trying to obsolete public revisions"""
1069 bumped = set()
1072 bumped = set()
1070 # util function (avoid attribute lookup in the loop)
1073 # util function (avoid attribute lookup in the loop)
1071 phase = repo._phasecache.phase # would be faster to grab the full list
1074 phase = repo._phasecache.phase # would be faster to grab the full list
1072 public = phases.public
1075 public = phases.public
1073 cl = repo.changelog
1076 cl = repo.changelog
1074 torev = cl.nodemap.get
1077 torev = cl.nodemap.get
1075 obs = getrevs(repo, 'obsolete')
1078 obs = getrevs(repo, 'obsolete')
1076 for rev in repo:
1079 for rev in repo:
1077 # We only evaluate mutable, non-obsolete revision
1080 # We only evaluate mutable, non-obsolete revision
1078 if (public < phase(repo, rev)) and (rev not in obs):
1081 if (public < phase(repo, rev)) and (rev not in obs):
1079 node = cl.node(rev)
1082 node = cl.node(rev)
1080 # (future) A cache of precursors may worth if split is very common
1083 # (future) A cache of precursors may worth if split is very common
1081 for pnode in allprecursors(repo.obsstore, [node],
1084 for pnode in allprecursors(repo.obsstore, [node],
1082 ignoreflags=bumpedfix):
1085 ignoreflags=bumpedfix):
1083 prev = torev(pnode) # unfiltered! but so is phasecache
1086 prev = torev(pnode) # unfiltered! but so is phasecache
1084 if (prev is not None) and (phase(repo, prev) <= public):
1087 if (prev is not None) and (phase(repo, prev) <= public):
1085 # we have a public precursors
1088 # we have a public precursors
1086 bumped.add(rev)
1089 bumped.add(rev)
1087 break # Next draft!
1090 break # Next draft!
1088 return bumped
1091 return bumped
1089
1092
1090 @cachefor('divergent')
1093 @cachefor('divergent')
1091 def _computedivergentset(repo):
1094 def _computedivergentset(repo):
1092 """the set of rev that compete to be the final successors of some revision.
1095 """the set of rev that compete to be the final successors of some revision.
1093 """
1096 """
1094 divergent = set()
1097 divergent = set()
1095 obsstore = repo.obsstore
1098 obsstore = repo.obsstore
1096 newermap = {}
1099 newermap = {}
1097 for ctx in repo.set('(not public()) - obsolete()'):
1100 for ctx in repo.set('(not public()) - obsolete()'):
1098 mark = obsstore.precursors.get(ctx.node(), ())
1101 mark = obsstore.precursors.get(ctx.node(), ())
1099 toprocess = set(mark)
1102 toprocess = set(mark)
1100 while toprocess:
1103 while toprocess:
1101 prec = toprocess.pop()[0]
1104 prec = toprocess.pop()[0]
1102 if prec not in newermap:
1105 if prec not in newermap:
1103 successorssets(repo, prec, newermap)
1106 successorssets(repo, prec, newermap)
1104 newer = [n for n in newermap[prec] if n]
1107 newer = [n for n in newermap[prec] if n]
1105 if len(newer) > 1:
1108 if len(newer) > 1:
1106 divergent.add(ctx.rev())
1109 divergent.add(ctx.rev())
1107 break
1110 break
1108 toprocess.update(obsstore.precursors.get(prec, ()))
1111 toprocess.update(obsstore.precursors.get(prec, ()))
1109 return divergent
1112 return divergent
1110
1113
1111
1114
1112 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1115 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1113 """Add obsolete markers between changesets in a repo
1116 """Add obsolete markers between changesets in a repo
1114
1117
1115 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1118 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1116 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1119 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1117 containing metadata for this marker only. It is merged with the global
1120 containing metadata for this marker only. It is merged with the global
1118 metadata specified through the `metadata` argument of this function,
1121 metadata specified through the `metadata` argument of this function,
1119
1122
1120 Trying to obsolete a public changeset will raise an exception.
1123 Trying to obsolete a public changeset will raise an exception.
1121
1124
1122 Current user and date are used except if specified otherwise in the
1125 Current user and date are used except if specified otherwise in the
1123 metadata attribute.
1126 metadata attribute.
1124
1127
1125 This function operates within a transaction of its own, but does
1128 This function operates within a transaction of its own, but does
1126 not take any lock on the repo.
1129 not take any lock on the repo.
1127 """
1130 """
1128 # prepare metadata
1131 # prepare metadata
1129 if metadata is None:
1132 if metadata is None:
1130 metadata = {}
1133 metadata = {}
1131 if 'user' not in metadata:
1134 if 'user' not in metadata:
1132 metadata['user'] = repo.ui.username()
1135 metadata['user'] = repo.ui.username()
1133 tr = repo.transaction('add-obsolescence-marker')
1136 tr = repo.transaction('add-obsolescence-marker')
1134 try:
1137 try:
1135 for rel in relations:
1138 for rel in relations:
1136 prec = rel[0]
1139 prec = rel[0]
1137 sucs = rel[1]
1140 sucs = rel[1]
1138 localmetadata = metadata.copy()
1141 localmetadata = metadata.copy()
1139 if 2 < len(rel):
1142 if 2 < len(rel):
1140 localmetadata.update(rel[2])
1143 localmetadata.update(rel[2])
1141
1144
1142 if not prec.mutable():
1145 if not prec.mutable():
1143 raise util.Abort("cannot obsolete immutable changeset: %s"
1146 raise util.Abort("cannot obsolete immutable changeset: %s"
1144 % prec)
1147 % prec)
1145 nprec = prec.node()
1148 nprec = prec.node()
1146 nsucs = tuple(s.node() for s in sucs)
1149 nsucs = tuple(s.node() for s in sucs)
1147 npare = None
1150 npare = None
1148 if not nsucs:
1151 if not nsucs:
1149 npare = tuple(p.node() for p in prec.parents())
1152 npare = tuple(p.node() for p in prec.parents())
1150 if nprec in nsucs:
1153 if nprec in nsucs:
1151 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1154 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1152 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1155 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1153 date=date, metadata=localmetadata)
1156 date=date, metadata=localmetadata)
1154 repo.filteredrevcache.clear()
1157 repo.filteredrevcache.clear()
1155 tr.close()
1158 tr.close()
1156 finally:
1159 finally:
1157 tr.release()
1160 tr.release()
1158
1161
1159 def isenabled(repo, option):
1162 def isenabled(repo, option):
1160 """Returns True if the given repository has the given obsolete option
1163 """Returns True if the given repository has the given obsolete option
1161 enabled.
1164 enabled.
1162 """
1165 """
1163 result = set(repo.ui.configlist('experimental', 'evolution'))
1166 result = set(repo.ui.configlist('experimental', 'evolution'))
1164 if 'all' in result:
1167 if 'all' in result:
1165 return True
1168 return True
1166
1169
1167 # For migration purposes, temporarily return true if the config hasn't been
1170 # For migration purposes, temporarily return true if the config hasn't been
1168 # set but _enabled is true.
1171 # set but _enabled is true.
1169 if len(result) == 0 and _enabled:
1172 if len(result) == 0 and _enabled:
1170 return True
1173 return True
1171
1174
1172 # createmarkers must be enabled if other options are enabled
1175 # createmarkers must be enabled if other options are enabled
1173 if ((allowunstableopt in result or exchangeopt in result) and
1176 if ((allowunstableopt in result or exchangeopt in result) and
1174 not createmarkersopt in result):
1177 not createmarkersopt in result):
1175 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1178 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1176 "if other obsolete options are enabled"))
1179 "if other obsolete options are enabled"))
1177
1180
1178 return option in result
1181 return option in result
General Comments 0
You need to be logged in to leave comments. Login now