##// END OF EJS Templates
obsstore: make the readonly attribute accessible...
Pierre-Yves David -
r26684:74ff350c default
parent child Browse files
Show More
@@ -1,1256 +1,1263 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 import errno, struct
70 import errno, struct
71 import util, base85, node, parsers, error
71 import util, base85, node, parsers, error
72 import phases
72 import phases
73 from i18n import _
73 from i18n import _
74
74
75 _pack = struct.pack
75 _pack = struct.pack
76 _unpack = struct.unpack
76 _unpack = struct.unpack
77 _calcsize = struct.calcsize
77 _calcsize = struct.calcsize
78 propertycache = util.propertycache
78 propertycache = util.propertycache
79
79
80 # the obsolete feature is not mature enough to be enabled by default.
80 # the obsolete feature is not mature enough to be enabled by default.
81 # you have to rely on third party extension extension to enable this.
81 # you have to rely on third party extension extension to enable this.
82 _enabled = False
82 _enabled = False
83
83
84 # Options for obsolescence
84 # Options for obsolescence
85 createmarkersopt = 'createmarkers'
85 createmarkersopt = 'createmarkers'
86 allowunstableopt = 'allowunstable'
86 allowunstableopt = 'allowunstable'
87 exchangeopt = 'exchange'
87 exchangeopt = 'exchange'
88
88
89 ### obsolescence marker flag
89 ### obsolescence marker flag
90
90
91 ## bumpedfix flag
91 ## bumpedfix flag
92 #
92 #
93 # When a changeset A' succeed to a changeset A which became public, we call A'
93 # When a changeset A' succeed to a changeset A which became public, we call A'
94 # "bumped" because it's a successors of a public changesets
94 # "bumped" because it's a successors of a public changesets
95 #
95 #
96 # o A' (bumped)
96 # o A' (bumped)
97 # |`:
97 # |`:
98 # | o A
98 # | o A
99 # |/
99 # |/
100 # o Z
100 # o Z
101 #
101 #
102 # The way to solve this situation is to create a new changeset Ad as children
102 # The way to solve this situation is to create a new changeset Ad as children
103 # of A. This changeset have the same content than A'. So the diff from A to A'
103 # of A. This changeset have the same content than A'. So the diff from A to A'
104 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
104 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 #
105 #
106 # o Ad
106 # o Ad
107 # |`:
107 # |`:
108 # | x A'
108 # | x A'
109 # |'|
109 # |'|
110 # o | A
110 # o | A
111 # |/
111 # |/
112 # o Z
112 # o Z
113 #
113 #
114 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
114 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
115 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 # This flag mean that the successors express the changes between the public and
116 # This flag mean that the successors express the changes between the public and
117 # bumped version and fix the situation, breaking the transitivity of
117 # bumped version and fix the situation, breaking the transitivity of
118 # "bumped" here.
118 # "bumped" here.
119 bumpedfix = 1
119 bumpedfix = 1
120 usingsha256 = 2
120 usingsha256 = 2
121
121
122 ## Parsing and writing of version "0"
122 ## Parsing and writing of version "0"
123 #
123 #
124 # The header is followed by the markers. Each marker is made of:
124 # The header is followed by the markers. Each marker is made of:
125 #
125 #
126 # - 1 uint8 : number of new changesets "N", can be zero.
126 # - 1 uint8 : number of new changesets "N", can be zero.
127 #
127 #
128 # - 1 uint32: metadata size "M" in bytes.
128 # - 1 uint32: metadata size "M" in bytes.
129 #
129 #
130 # - 1 byte: a bit field. It is reserved for flags used in common
130 # - 1 byte: a bit field. It is reserved for flags used in common
131 # obsolete marker operations, to avoid repeated decoding of metadata
131 # obsolete marker operations, to avoid repeated decoding of metadata
132 # entries.
132 # entries.
133 #
133 #
134 # - 20 bytes: obsoleted changeset identifier.
134 # - 20 bytes: obsoleted changeset identifier.
135 #
135 #
136 # - N*20 bytes: new changesets identifiers.
136 # - N*20 bytes: new changesets identifiers.
137 #
137 #
138 # - M bytes: metadata as a sequence of nul-terminated strings. Each
138 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 # string contains a key and a value, separated by a colon ':', without
139 # string contains a key and a value, separated by a colon ':', without
140 # additional encoding. Keys cannot contain '\0' or ':' and values
140 # additional encoding. Keys cannot contain '\0' or ':' and values
141 # cannot contain '\0'.
141 # cannot contain '\0'.
142 _fm0version = 0
142 _fm0version = 0
143 _fm0fixed = '>BIB20s'
143 _fm0fixed = '>BIB20s'
144 _fm0node = '20s'
144 _fm0node = '20s'
145 _fm0fsize = _calcsize(_fm0fixed)
145 _fm0fsize = _calcsize(_fm0fixed)
146 _fm0fnodesize = _calcsize(_fm0node)
146 _fm0fnodesize = _calcsize(_fm0node)
147
147
148 def _fm0readmarkers(data, off):
148 def _fm0readmarkers(data, off):
149 # Loop on markers
149 # Loop on markers
150 l = len(data)
150 l = len(data)
151 while off + _fm0fsize <= l:
151 while off + _fm0fsize <= l:
152 # read fixed part
152 # read fixed part
153 cur = data[off:off + _fm0fsize]
153 cur = data[off:off + _fm0fsize]
154 off += _fm0fsize
154 off += _fm0fsize
155 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
155 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 # read replacement
156 # read replacement
157 sucs = ()
157 sucs = ()
158 if numsuc:
158 if numsuc:
159 s = (_fm0fnodesize * numsuc)
159 s = (_fm0fnodesize * numsuc)
160 cur = data[off:off + s]
160 cur = data[off:off + s]
161 sucs = _unpack(_fm0node * numsuc, cur)
161 sucs = _unpack(_fm0node * numsuc, cur)
162 off += s
162 off += s
163 # read metadata
163 # read metadata
164 # (metadata will be decoded on demand)
164 # (metadata will be decoded on demand)
165 metadata = data[off:off + mdsize]
165 metadata = data[off:off + mdsize]
166 if len(metadata) != mdsize:
166 if len(metadata) != mdsize:
167 raise error.Abort(_('parsing obsolete marker: metadata is too '
167 raise error.Abort(_('parsing obsolete marker: metadata is too '
168 'short, %d bytes expected, got %d')
168 'short, %d bytes expected, got %d')
169 % (mdsize, len(metadata)))
169 % (mdsize, len(metadata)))
170 off += mdsize
170 off += mdsize
171 metadata = _fm0decodemeta(metadata)
171 metadata = _fm0decodemeta(metadata)
172 try:
172 try:
173 when, offset = metadata.pop('date', '0 0').split(' ')
173 when, offset = metadata.pop('date', '0 0').split(' ')
174 date = float(when), int(offset)
174 date = float(when), int(offset)
175 except ValueError:
175 except ValueError:
176 date = (0., 0)
176 date = (0., 0)
177 parents = None
177 parents = None
178 if 'p2' in metadata:
178 if 'p2' in metadata:
179 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
179 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 elif 'p1' in metadata:
180 elif 'p1' in metadata:
181 parents = (metadata.pop('p1', None),)
181 parents = (metadata.pop('p1', None),)
182 elif 'p0' in metadata:
182 elif 'p0' in metadata:
183 parents = ()
183 parents = ()
184 if parents is not None:
184 if parents is not None:
185 try:
185 try:
186 parents = tuple(node.bin(p) for p in parents)
186 parents = tuple(node.bin(p) for p in parents)
187 # if parent content is not a nodeid, drop the data
187 # if parent content is not a nodeid, drop the data
188 for p in parents:
188 for p in parents:
189 if len(p) != 20:
189 if len(p) != 20:
190 parents = None
190 parents = None
191 break
191 break
192 except TypeError:
192 except TypeError:
193 # if content cannot be translated to nodeid drop the data.
193 # if content cannot be translated to nodeid drop the data.
194 parents = None
194 parents = None
195
195
196 metadata = tuple(sorted(metadata.iteritems()))
196 metadata = tuple(sorted(metadata.iteritems()))
197
197
198 yield (pre, sucs, flags, metadata, date, parents)
198 yield (pre, sucs, flags, metadata, date, parents)
199
199
200 def _fm0encodeonemarker(marker):
200 def _fm0encodeonemarker(marker):
201 pre, sucs, flags, metadata, date, parents = marker
201 pre, sucs, flags, metadata, date, parents = marker
202 if flags & usingsha256:
202 if flags & usingsha256:
203 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
203 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
204 metadata = dict(metadata)
204 metadata = dict(metadata)
205 time, tz = date
205 time, tz = date
206 metadata['date'] = '%r %i' % (time, tz)
206 metadata['date'] = '%r %i' % (time, tz)
207 if parents is not None:
207 if parents is not None:
208 if not parents:
208 if not parents:
209 # mark that we explicitly recorded no parents
209 # mark that we explicitly recorded no parents
210 metadata['p0'] = ''
210 metadata['p0'] = ''
211 for i, p in enumerate(parents):
211 for i, p in enumerate(parents):
212 metadata['p%i' % (i + 1)] = node.hex(p)
212 metadata['p%i' % (i + 1)] = node.hex(p)
213 metadata = _fm0encodemeta(metadata)
213 metadata = _fm0encodemeta(metadata)
214 numsuc = len(sucs)
214 numsuc = len(sucs)
215 format = _fm0fixed + (_fm0node * numsuc)
215 format = _fm0fixed + (_fm0node * numsuc)
216 data = [numsuc, len(metadata), flags, pre]
216 data = [numsuc, len(metadata), flags, pre]
217 data.extend(sucs)
217 data.extend(sucs)
218 return _pack(format, *data) + metadata
218 return _pack(format, *data) + metadata
219
219
220 def _fm0encodemeta(meta):
220 def _fm0encodemeta(meta):
221 """Return encoded metadata string to string mapping.
221 """Return encoded metadata string to string mapping.
222
222
223 Assume no ':' in key and no '\0' in both key and value."""
223 Assume no ':' in key and no '\0' in both key and value."""
224 for key, value in meta.iteritems():
224 for key, value in meta.iteritems():
225 if ':' in key or '\0' in key:
225 if ':' in key or '\0' in key:
226 raise ValueError("':' and '\0' are forbidden in metadata key'")
226 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 if '\0' in value:
227 if '\0' in value:
228 raise ValueError("':' is forbidden in metadata value'")
228 raise ValueError("':' is forbidden in metadata value'")
229 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
229 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230
230
231 def _fm0decodemeta(data):
231 def _fm0decodemeta(data):
232 """Return string to string dictionary from encoded version."""
232 """Return string to string dictionary from encoded version."""
233 d = {}
233 d = {}
234 for l in data.split('\0'):
234 for l in data.split('\0'):
235 if l:
235 if l:
236 key, value = l.split(':')
236 key, value = l.split(':')
237 d[key] = value
237 d[key] = value
238 return d
238 return d
239
239
240 ## Parsing and writing of version "1"
240 ## Parsing and writing of version "1"
241 #
241 #
242 # The header is followed by the markers. Each marker is made of:
242 # The header is followed by the markers. Each marker is made of:
243 #
243 #
244 # - uint32: total size of the marker (including this field)
244 # - uint32: total size of the marker (including this field)
245 #
245 #
246 # - float64: date in seconds since epoch
246 # - float64: date in seconds since epoch
247 #
247 #
248 # - int16: timezone offset in minutes
248 # - int16: timezone offset in minutes
249 #
249 #
250 # - uint16: a bit field. It is reserved for flags used in common
250 # - uint16: a bit field. It is reserved for flags used in common
251 # obsolete marker operations, to avoid repeated decoding of metadata
251 # obsolete marker operations, to avoid repeated decoding of metadata
252 # entries.
252 # entries.
253 #
253 #
254 # - uint8: number of successors "N", can be zero.
254 # - uint8: number of successors "N", can be zero.
255 #
255 #
256 # - uint8: number of parents "P", can be zero.
256 # - uint8: number of parents "P", can be zero.
257 #
257 #
258 # 0: parents data stored but no parent,
258 # 0: parents data stored but no parent,
259 # 1: one parent stored,
259 # 1: one parent stored,
260 # 2: two parents stored,
260 # 2: two parents stored,
261 # 3: no parent data stored
261 # 3: no parent data stored
262 #
262 #
263 # - uint8: number of metadata entries M
263 # - uint8: number of metadata entries M
264 #
264 #
265 # - 20 or 32 bytes: precursor changeset identifier.
265 # - 20 or 32 bytes: precursor changeset identifier.
266 #
266 #
267 # - N*(20 or 32) bytes: successors changesets identifiers.
267 # - N*(20 or 32) bytes: successors changesets identifiers.
268 #
268 #
269 # - P*(20 or 32) bytes: parents of the precursors changesets.
269 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 #
270 #
271 # - M*(uint8, uint8): size of all metadata entries (key and value)
271 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 #
272 #
273 # - remaining bytes: the metadata, each (key, value) pair after the other.
273 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 _fm1version = 1
274 _fm1version = 1
275 _fm1fixed = '>IdhHBBB20s'
275 _fm1fixed = '>IdhHBBB20s'
276 _fm1nodesha1 = '20s'
276 _fm1nodesha1 = '20s'
277 _fm1nodesha256 = '32s'
277 _fm1nodesha256 = '32s'
278 _fm1nodesha1size = _calcsize(_fm1nodesha1)
278 _fm1nodesha1size = _calcsize(_fm1nodesha1)
279 _fm1nodesha256size = _calcsize(_fm1nodesha256)
279 _fm1nodesha256size = _calcsize(_fm1nodesha256)
280 _fm1fsize = _calcsize(_fm1fixed)
280 _fm1fsize = _calcsize(_fm1fixed)
281 _fm1parentnone = 3
281 _fm1parentnone = 3
282 _fm1parentshift = 14
282 _fm1parentshift = 14
283 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
283 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
284 _fm1metapair = 'BB'
284 _fm1metapair = 'BB'
285 _fm1metapairsize = _calcsize('BB')
285 _fm1metapairsize = _calcsize('BB')
286
286
287 def _fm1purereadmarkers(data, off):
287 def _fm1purereadmarkers(data, off):
288 # make some global constants local for performance
288 # make some global constants local for performance
289 noneflag = _fm1parentnone
289 noneflag = _fm1parentnone
290 sha2flag = usingsha256
290 sha2flag = usingsha256
291 sha1size = _fm1nodesha1size
291 sha1size = _fm1nodesha1size
292 sha2size = _fm1nodesha256size
292 sha2size = _fm1nodesha256size
293 sha1fmt = _fm1nodesha1
293 sha1fmt = _fm1nodesha1
294 sha2fmt = _fm1nodesha256
294 sha2fmt = _fm1nodesha256
295 metasize = _fm1metapairsize
295 metasize = _fm1metapairsize
296 metafmt = _fm1metapair
296 metafmt = _fm1metapair
297 fsize = _fm1fsize
297 fsize = _fm1fsize
298 unpack = _unpack
298 unpack = _unpack
299
299
300 # Loop on markers
300 # Loop on markers
301 stop = len(data) - _fm1fsize
301 stop = len(data) - _fm1fsize
302 ufixed = struct.Struct(_fm1fixed).unpack
302 ufixed = struct.Struct(_fm1fixed).unpack
303
303
304 while off <= stop:
304 while off <= stop:
305 # read fixed part
305 # read fixed part
306 o1 = off + fsize
306 o1 = off + fsize
307 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
307 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
308
308
309 if flags & sha2flag:
309 if flags & sha2flag:
310 # FIXME: prec was read as a SHA1, needs to be amended
310 # FIXME: prec was read as a SHA1, needs to be amended
311
311
312 # read 0 or more successors
312 # read 0 or more successors
313 if numsuc == 1:
313 if numsuc == 1:
314 o2 = o1 + sha2size
314 o2 = o1 + sha2size
315 sucs = (data[o1:o2],)
315 sucs = (data[o1:o2],)
316 else:
316 else:
317 o2 = o1 + sha2size * numsuc
317 o2 = o1 + sha2size * numsuc
318 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
318 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
319
319
320 # read parents
320 # read parents
321 if numpar == noneflag:
321 if numpar == noneflag:
322 o3 = o2
322 o3 = o2
323 parents = None
323 parents = None
324 elif numpar == 1:
324 elif numpar == 1:
325 o3 = o2 + sha2size
325 o3 = o2 + sha2size
326 parents = (data[o2:o3],)
326 parents = (data[o2:o3],)
327 else:
327 else:
328 o3 = o2 + sha2size * numpar
328 o3 = o2 + sha2size * numpar
329 parents = unpack(sha2fmt * numpar, data[o2:o3])
329 parents = unpack(sha2fmt * numpar, data[o2:o3])
330 else:
330 else:
331 # read 0 or more successors
331 # read 0 or more successors
332 if numsuc == 1:
332 if numsuc == 1:
333 o2 = o1 + sha1size
333 o2 = o1 + sha1size
334 sucs = (data[o1:o2],)
334 sucs = (data[o1:o2],)
335 else:
335 else:
336 o2 = o1 + sha1size * numsuc
336 o2 = o1 + sha1size * numsuc
337 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
337 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
338
338
339 # read parents
339 # read parents
340 if numpar == noneflag:
340 if numpar == noneflag:
341 o3 = o2
341 o3 = o2
342 parents = None
342 parents = None
343 elif numpar == 1:
343 elif numpar == 1:
344 o3 = o2 + sha1size
344 o3 = o2 + sha1size
345 parents = (data[o2:o3],)
345 parents = (data[o2:o3],)
346 else:
346 else:
347 o3 = o2 + sha1size * numpar
347 o3 = o2 + sha1size * numpar
348 parents = unpack(sha1fmt * numpar, data[o2:o3])
348 parents = unpack(sha1fmt * numpar, data[o2:o3])
349
349
350 # read metadata
350 # read metadata
351 off = o3 + metasize * nummeta
351 off = o3 + metasize * nummeta
352 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
352 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
353 metadata = []
353 metadata = []
354 for idx in xrange(0, len(metapairsize), 2):
354 for idx in xrange(0, len(metapairsize), 2):
355 o1 = off + metapairsize[idx]
355 o1 = off + metapairsize[idx]
356 o2 = o1 + metapairsize[idx + 1]
356 o2 = o1 + metapairsize[idx + 1]
357 metadata.append((data[off:o1], data[o1:o2]))
357 metadata.append((data[off:o1], data[o1:o2]))
358 off = o2
358 off = o2
359
359
360 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
360 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
361
361
362 def _fm1encodeonemarker(marker):
362 def _fm1encodeonemarker(marker):
363 pre, sucs, flags, metadata, date, parents = marker
363 pre, sucs, flags, metadata, date, parents = marker
364 # determine node size
364 # determine node size
365 _fm1node = _fm1nodesha1
365 _fm1node = _fm1nodesha1
366 if flags & usingsha256:
366 if flags & usingsha256:
367 _fm1node = _fm1nodesha256
367 _fm1node = _fm1nodesha256
368 numsuc = len(sucs)
368 numsuc = len(sucs)
369 numextranodes = numsuc
369 numextranodes = numsuc
370 if parents is None:
370 if parents is None:
371 numpar = _fm1parentnone
371 numpar = _fm1parentnone
372 else:
372 else:
373 numpar = len(parents)
373 numpar = len(parents)
374 numextranodes += numpar
374 numextranodes += numpar
375 formatnodes = _fm1node * numextranodes
375 formatnodes = _fm1node * numextranodes
376 formatmeta = _fm1metapair * len(metadata)
376 formatmeta = _fm1metapair * len(metadata)
377 format = _fm1fixed + formatnodes + formatmeta
377 format = _fm1fixed + formatnodes + formatmeta
378 # tz is stored in minutes so we divide by 60
378 # tz is stored in minutes so we divide by 60
379 tz = date[1]//60
379 tz = date[1]//60
380 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
380 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
381 data.extend(sucs)
381 data.extend(sucs)
382 if parents is not None:
382 if parents is not None:
383 data.extend(parents)
383 data.extend(parents)
384 totalsize = _calcsize(format)
384 totalsize = _calcsize(format)
385 for key, value in metadata:
385 for key, value in metadata:
386 lk = len(key)
386 lk = len(key)
387 lv = len(value)
387 lv = len(value)
388 data.append(lk)
388 data.append(lk)
389 data.append(lv)
389 data.append(lv)
390 totalsize += lk + lv
390 totalsize += lk + lv
391 data[0] = totalsize
391 data[0] = totalsize
392 data = [_pack(format, *data)]
392 data = [_pack(format, *data)]
393 for key, value in metadata:
393 for key, value in metadata:
394 data.append(key)
394 data.append(key)
395 data.append(value)
395 data.append(value)
396 return ''.join(data)
396 return ''.join(data)
397
397
398 def _fm1readmarkers(data, off):
398 def _fm1readmarkers(data, off):
399 native = getattr(parsers, 'fm1readmarkers', None)
399 native = getattr(parsers, 'fm1readmarkers', None)
400 if not native:
400 if not native:
401 return _fm1purereadmarkers(data, off)
401 return _fm1purereadmarkers(data, off)
402 stop = len(data) - _fm1fsize
402 stop = len(data) - _fm1fsize
403 return native(data, off, stop)
403 return native(data, off, stop)
404
404
405 # mapping to read/write various marker formats
405 # mapping to read/write various marker formats
406 # <version> -> (decoder, encoder)
406 # <version> -> (decoder, encoder)
407 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
407 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
408 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
408 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
409
409
410 @util.nogc
410 @util.nogc
411 def _readmarkers(data):
411 def _readmarkers(data):
412 """Read and enumerate markers from raw data"""
412 """Read and enumerate markers from raw data"""
413 off = 0
413 off = 0
414 diskversion = _unpack('>B', data[off:off + 1])[0]
414 diskversion = _unpack('>B', data[off:off + 1])[0]
415 off += 1
415 off += 1
416 if diskversion not in formats:
416 if diskversion not in formats:
417 raise error.Abort(_('parsing obsolete marker: unknown version %r')
417 raise error.Abort(_('parsing obsolete marker: unknown version %r')
418 % diskversion)
418 % diskversion)
419 return diskversion, formats[diskversion][0](data, off)
419 return diskversion, formats[diskversion][0](data, off)
420
420
421 def encodemarkers(markers, addheader=False, version=_fm0version):
421 def encodemarkers(markers, addheader=False, version=_fm0version):
422 # Kept separate from flushmarkers(), it will be reused for
422 # Kept separate from flushmarkers(), it will be reused for
423 # markers exchange.
423 # markers exchange.
424 encodeone = formats[version][1]
424 encodeone = formats[version][1]
425 if addheader:
425 if addheader:
426 yield _pack('>B', version)
426 yield _pack('>B', version)
427 for marker in markers:
427 for marker in markers:
428 yield encodeone(marker)
428 yield encodeone(marker)
429
429
430
430
431 class marker(object):
431 class marker(object):
432 """Wrap obsolete marker raw data"""
432 """Wrap obsolete marker raw data"""
433
433
434 def __init__(self, repo, data):
434 def __init__(self, repo, data):
435 # the repo argument will be used to create changectx in later version
435 # the repo argument will be used to create changectx in later version
436 self._repo = repo
436 self._repo = repo
437 self._data = data
437 self._data = data
438 self._decodedmeta = None
438 self._decodedmeta = None
439
439
440 def __hash__(self):
440 def __hash__(self):
441 return hash(self._data)
441 return hash(self._data)
442
442
443 def __eq__(self, other):
443 def __eq__(self, other):
444 if type(other) != type(self):
444 if type(other) != type(self):
445 return False
445 return False
446 return self._data == other._data
446 return self._data == other._data
447
447
448 def precnode(self):
448 def precnode(self):
449 """Precursor changeset node identifier"""
449 """Precursor changeset node identifier"""
450 return self._data[0]
450 return self._data[0]
451
451
452 def succnodes(self):
452 def succnodes(self):
453 """List of successor changesets node identifiers"""
453 """List of successor changesets node identifiers"""
454 return self._data[1]
454 return self._data[1]
455
455
456 def parentnodes(self):
456 def parentnodes(self):
457 """Parents of the precursors (None if not recorded)"""
457 """Parents of the precursors (None if not recorded)"""
458 return self._data[5]
458 return self._data[5]
459
459
460 def metadata(self):
460 def metadata(self):
461 """Decoded metadata dictionary"""
461 """Decoded metadata dictionary"""
462 return dict(self._data[3])
462 return dict(self._data[3])
463
463
464 def date(self):
464 def date(self):
465 """Creation date as (unixtime, offset)"""
465 """Creation date as (unixtime, offset)"""
466 return self._data[4]
466 return self._data[4]
467
467
468 def flags(self):
468 def flags(self):
469 """The flags field of the marker"""
469 """The flags field of the marker"""
470 return self._data[2]
470 return self._data[2]
471
471
472 @util.nogc
472 @util.nogc
473 def _addsuccessors(successors, markers):
473 def _addsuccessors(successors, markers):
474 for mark in markers:
474 for mark in markers:
475 successors.setdefault(mark[0], set()).add(mark)
475 successors.setdefault(mark[0], set()).add(mark)
476
476
477 @util.nogc
477 @util.nogc
478 def _addprecursors(precursors, markers):
478 def _addprecursors(precursors, markers):
479 for mark in markers:
479 for mark in markers:
480 for suc in mark[1]:
480 for suc in mark[1]:
481 precursors.setdefault(suc, set()).add(mark)
481 precursors.setdefault(suc, set()).add(mark)
482
482
483 @util.nogc
483 @util.nogc
484 def _addchildren(children, markers):
484 def _addchildren(children, markers):
485 for mark in markers:
485 for mark in markers:
486 parents = mark[5]
486 parents = mark[5]
487 if parents is not None:
487 if parents is not None:
488 for p in parents:
488 for p in parents:
489 children.setdefault(p, set()).add(mark)
489 children.setdefault(p, set()).add(mark)
490
490
491 def _checkinvalidmarkers(markers):
491 def _checkinvalidmarkers(markers):
492 """search for marker with invalid data and raise error if needed
492 """search for marker with invalid data and raise error if needed
493
493
494 Exist as a separated function to allow the evolve extension for a more
494 Exist as a separated function to allow the evolve extension for a more
495 subtle handling.
495 subtle handling.
496 """
496 """
497 for mark in markers:
497 for mark in markers:
498 if node.nullid in mark[1]:
498 if node.nullid in mark[1]:
499 raise error.Abort(_('bad obsolescence marker detected: '
499 raise error.Abort(_('bad obsolescence marker detected: '
500 'invalid successors nullid'))
500 'invalid successors nullid'))
501
501
502 class obsstore(object):
502 class obsstore(object):
503 """Store obsolete markers
503 """Store obsolete markers
504
504
505 Markers can be accessed with two mappings:
505 Markers can be accessed with two mappings:
506 - precursors[x] -> set(markers on precursors edges of x)
506 - precursors[x] -> set(markers on precursors edges of x)
507 - successors[x] -> set(markers on successors edges of x)
507 - successors[x] -> set(markers on successors edges of x)
508 - children[x] -> set(markers on precursors edges of children(x)
508 - children[x] -> set(markers on precursors edges of children(x)
509 """
509 """
510
510
511 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
511 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
512 # prec: nodeid, precursor changesets
512 # prec: nodeid, precursor changesets
513 # succs: tuple of nodeid, successor changesets (0-N length)
513 # succs: tuple of nodeid, successor changesets (0-N length)
514 # flag: integer, flag field carrying modifier for the markers (see doc)
514 # flag: integer, flag field carrying modifier for the markers (see doc)
515 # meta: binary blob, encoded metadata dictionary
515 # meta: binary blob, encoded metadata dictionary
516 # date: (float, int) tuple, date of marker creation
516 # date: (float, int) tuple, date of marker creation
517 # parents: (tuple of nodeid) or None, parents of precursors
517 # parents: (tuple of nodeid) or None, parents of precursors
518 # None is used when no data has been recorded
518 # None is used when no data has been recorded
519
519
520 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
520 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
521 # caches for various obsolescence related cache
521 # caches for various obsolescence related cache
522 self.caches = {}
522 self.caches = {}
523 self.svfs = svfs
523 self.svfs = svfs
524 self._version = defaultformat
524 self._version = defaultformat
525 self._readonly = readonly
525 self._readonly = readonly
526
526
527 def __iter__(self):
527 def __iter__(self):
528 return iter(self._all)
528 return iter(self._all)
529
529
530 def __len__(self):
530 def __len__(self):
531 return len(self._all)
531 return len(self._all)
532
532
533 def __nonzero__(self):
533 def __nonzero__(self):
534 if not self._cached('_all'):
534 if not self._cached('_all'):
535 try:
535 try:
536 return self.svfs.stat('obsstore').st_size > 1
536 return self.svfs.stat('obsstore').st_size > 1
537 except OSError as inst:
537 except OSError as inst:
538 if inst.errno != errno.ENOENT:
538 if inst.errno != errno.ENOENT:
539 raise
539 raise
540 # just build an empty _all list if no obsstore exists, which
540 # just build an empty _all list if no obsstore exists, which
541 # avoids further stat() syscalls
541 # avoids further stat() syscalls
542 pass
542 pass
543 return bool(self._all)
543 return bool(self._all)
544
544
545 @property
546 def readonly(self):
547 """True if marker creation is disabled
548
549 Remove me in the future when obsolete marker is always on."""
550 return self._readonly
551
545 def create(self, transaction, prec, succs=(), flag=0, parents=None,
552 def create(self, transaction, prec, succs=(), flag=0, parents=None,
546 date=None, metadata=None):
553 date=None, metadata=None):
547 """obsolete: add a new obsolete marker
554 """obsolete: add a new obsolete marker
548
555
549 * ensuring it is hashable
556 * ensuring it is hashable
550 * check mandatory metadata
557 * check mandatory metadata
551 * encode metadata
558 * encode metadata
552
559
553 If you are a human writing code creating marker you want to use the
560 If you are a human writing code creating marker you want to use the
554 `createmarkers` function in this module instead.
561 `createmarkers` function in this module instead.
555
562
556 return True if a new marker have been added, False if the markers
563 return True if a new marker have been added, False if the markers
557 already existed (no op).
564 already existed (no op).
558 """
565 """
559 if metadata is None:
566 if metadata is None:
560 metadata = {}
567 metadata = {}
561 if date is None:
568 if date is None:
562 if 'date' in metadata:
569 if 'date' in metadata:
563 # as a courtesy for out-of-tree extensions
570 # as a courtesy for out-of-tree extensions
564 date = util.parsedate(metadata.pop('date'))
571 date = util.parsedate(metadata.pop('date'))
565 else:
572 else:
566 date = util.makedate()
573 date = util.makedate()
567 if len(prec) != 20:
574 if len(prec) != 20:
568 raise ValueError(prec)
575 raise ValueError(prec)
569 for succ in succs:
576 for succ in succs:
570 if len(succ) != 20:
577 if len(succ) != 20:
571 raise ValueError(succ)
578 raise ValueError(succ)
572 if prec in succs:
579 if prec in succs:
573 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
580 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
574
581
575 metadata = tuple(sorted(metadata.iteritems()))
582 metadata = tuple(sorted(metadata.iteritems()))
576
583
577 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
584 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
578 return bool(self.add(transaction, [marker]))
585 return bool(self.add(transaction, [marker]))
579
586
580 def add(self, transaction, markers):
587 def add(self, transaction, markers):
581 """Add new markers to the store
588 """Add new markers to the store
582
589
583 Take care of filtering duplicate.
590 Take care of filtering duplicate.
584 Return the number of new marker."""
591 Return the number of new marker."""
585 if self._readonly:
592 if self._readonly:
586 raise error.Abort('creating obsolete markers is not enabled on '
593 raise error.Abort('creating obsolete markers is not enabled on '
587 'this repo')
594 'this repo')
588 known = set(self._all)
595 known = set(self._all)
589 new = []
596 new = []
590 for m in markers:
597 for m in markers:
591 if m not in known:
598 if m not in known:
592 known.add(m)
599 known.add(m)
593 new.append(m)
600 new.append(m)
594 if new:
601 if new:
595 f = self.svfs('obsstore', 'ab')
602 f = self.svfs('obsstore', 'ab')
596 try:
603 try:
597 offset = f.tell()
604 offset = f.tell()
598 transaction.add('obsstore', offset)
605 transaction.add('obsstore', offset)
599 # offset == 0: new file - add the version header
606 # offset == 0: new file - add the version header
600 for bytes in encodemarkers(new, offset == 0, self._version):
607 for bytes in encodemarkers(new, offset == 0, self._version):
601 f.write(bytes)
608 f.write(bytes)
602 finally:
609 finally:
603 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
610 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
604 # call 'filecacheentry.refresh()' here
611 # call 'filecacheentry.refresh()' here
605 f.close()
612 f.close()
606 self._addmarkers(new)
613 self._addmarkers(new)
607 # new marker *may* have changed several set. invalidate the cache.
614 # new marker *may* have changed several set. invalidate the cache.
608 self.caches.clear()
615 self.caches.clear()
609 # records the number of new markers for the transaction hooks
616 # records the number of new markers for the transaction hooks
610 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
617 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
611 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
618 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
612 return len(new)
619 return len(new)
613
620
614 def mergemarkers(self, transaction, data):
621 def mergemarkers(self, transaction, data):
615 """merge a binary stream of markers inside the obsstore
622 """merge a binary stream of markers inside the obsstore
616
623
617 Returns the number of new markers added."""
624 Returns the number of new markers added."""
618 version, markers = _readmarkers(data)
625 version, markers = _readmarkers(data)
619 return self.add(transaction, markers)
626 return self.add(transaction, markers)
620
627
621 @propertycache
628 @propertycache
622 def _all(self):
629 def _all(self):
623 data = self.svfs.tryread('obsstore')
630 data = self.svfs.tryread('obsstore')
624 if not data:
631 if not data:
625 return []
632 return []
626 self._version, markers = _readmarkers(data)
633 self._version, markers = _readmarkers(data)
627 markers = list(markers)
634 markers = list(markers)
628 _checkinvalidmarkers(markers)
635 _checkinvalidmarkers(markers)
629 return markers
636 return markers
630
637
631 @propertycache
638 @propertycache
632 def successors(self):
639 def successors(self):
633 successors = {}
640 successors = {}
634 _addsuccessors(successors, self._all)
641 _addsuccessors(successors, self._all)
635 return successors
642 return successors
636
643
637 @propertycache
644 @propertycache
638 def precursors(self):
645 def precursors(self):
639 precursors = {}
646 precursors = {}
640 _addprecursors(precursors, self._all)
647 _addprecursors(precursors, self._all)
641 return precursors
648 return precursors
642
649
643 @propertycache
650 @propertycache
644 def children(self):
651 def children(self):
645 children = {}
652 children = {}
646 _addchildren(children, self._all)
653 _addchildren(children, self._all)
647 return children
654 return children
648
655
649 def _cached(self, attr):
656 def _cached(self, attr):
650 return attr in self.__dict__
657 return attr in self.__dict__
651
658
652 def _addmarkers(self, markers):
659 def _addmarkers(self, markers):
653 markers = list(markers) # to allow repeated iteration
660 markers = list(markers) # to allow repeated iteration
654 self._all.extend(markers)
661 self._all.extend(markers)
655 if self._cached('successors'):
662 if self._cached('successors'):
656 _addsuccessors(self.successors, markers)
663 _addsuccessors(self.successors, markers)
657 if self._cached('precursors'):
664 if self._cached('precursors'):
658 _addprecursors(self.precursors, markers)
665 _addprecursors(self.precursors, markers)
659 if self._cached('children'):
666 if self._cached('children'):
660 _addchildren(self.children, markers)
667 _addchildren(self.children, markers)
661 _checkinvalidmarkers(markers)
668 _checkinvalidmarkers(markers)
662
669
663 def relevantmarkers(self, nodes):
670 def relevantmarkers(self, nodes):
664 """return a set of all obsolescence markers relevant to a set of nodes.
671 """return a set of all obsolescence markers relevant to a set of nodes.
665
672
666 "relevant" to a set of nodes mean:
673 "relevant" to a set of nodes mean:
667
674
668 - marker that use this changeset as successor
675 - marker that use this changeset as successor
669 - prune marker of direct children on this changeset
676 - prune marker of direct children on this changeset
670 - recursive application of the two rules on precursors of these markers
677 - recursive application of the two rules on precursors of these markers
671
678
672 It is a set so you cannot rely on order."""
679 It is a set so you cannot rely on order."""
673
680
674 pendingnodes = set(nodes)
681 pendingnodes = set(nodes)
675 seenmarkers = set()
682 seenmarkers = set()
676 seennodes = set(pendingnodes)
683 seennodes = set(pendingnodes)
677 precursorsmarkers = self.precursors
684 precursorsmarkers = self.precursors
678 children = self.children
685 children = self.children
679 while pendingnodes:
686 while pendingnodes:
680 direct = set()
687 direct = set()
681 for current in pendingnodes:
688 for current in pendingnodes:
682 direct.update(precursorsmarkers.get(current, ()))
689 direct.update(precursorsmarkers.get(current, ()))
683 pruned = [m for m in children.get(current, ()) if not m[1]]
690 pruned = [m for m in children.get(current, ()) if not m[1]]
684 direct.update(pruned)
691 direct.update(pruned)
685 direct -= seenmarkers
692 direct -= seenmarkers
686 pendingnodes = set([m[0] for m in direct])
693 pendingnodes = set([m[0] for m in direct])
687 seenmarkers |= direct
694 seenmarkers |= direct
688 pendingnodes -= seennodes
695 pendingnodes -= seennodes
689 seennodes |= pendingnodes
696 seennodes |= pendingnodes
690 return seenmarkers
697 return seenmarkers
691
698
692 def commonversion(versions):
699 def commonversion(versions):
693 """Return the newest version listed in both versions and our local formats.
700 """Return the newest version listed in both versions and our local formats.
694
701
695 Returns None if no common version exists.
702 Returns None if no common version exists.
696 """
703 """
697 versions.sort(reverse=True)
704 versions.sort(reverse=True)
698 # search for highest version known on both side
705 # search for highest version known on both side
699 for v in versions:
706 for v in versions:
700 if v in formats:
707 if v in formats:
701 return v
708 return v
702 return None
709 return None
703
710
704 # arbitrary picked to fit into 8K limit from HTTP server
711 # arbitrary picked to fit into 8K limit from HTTP server
705 # you have to take in account:
712 # you have to take in account:
706 # - the version header
713 # - the version header
707 # - the base85 encoding
714 # - the base85 encoding
708 _maxpayload = 5300
715 _maxpayload = 5300
709
716
710 def _pushkeyescape(markers):
717 def _pushkeyescape(markers):
711 """encode markers into a dict suitable for pushkey exchange
718 """encode markers into a dict suitable for pushkey exchange
712
719
713 - binary data is base85 encoded
720 - binary data is base85 encoded
714 - split in chunks smaller than 5300 bytes"""
721 - split in chunks smaller than 5300 bytes"""
715 keys = {}
722 keys = {}
716 parts = []
723 parts = []
717 currentlen = _maxpayload * 2 # ensure we create a new part
724 currentlen = _maxpayload * 2 # ensure we create a new part
718 for marker in markers:
725 for marker in markers:
719 nextdata = _fm0encodeonemarker(marker)
726 nextdata = _fm0encodeonemarker(marker)
720 if (len(nextdata) + currentlen > _maxpayload):
727 if (len(nextdata) + currentlen > _maxpayload):
721 currentpart = []
728 currentpart = []
722 currentlen = 0
729 currentlen = 0
723 parts.append(currentpart)
730 parts.append(currentpart)
724 currentpart.append(nextdata)
731 currentpart.append(nextdata)
725 currentlen += len(nextdata)
732 currentlen += len(nextdata)
726 for idx, part in enumerate(reversed(parts)):
733 for idx, part in enumerate(reversed(parts)):
727 data = ''.join([_pack('>B', _fm0version)] + part)
734 data = ''.join([_pack('>B', _fm0version)] + part)
728 keys['dump%i' % idx] = base85.b85encode(data)
735 keys['dump%i' % idx] = base85.b85encode(data)
729 return keys
736 return keys
730
737
731 def listmarkers(repo):
738 def listmarkers(repo):
732 """List markers over pushkey"""
739 """List markers over pushkey"""
733 if not repo.obsstore:
740 if not repo.obsstore:
734 return {}
741 return {}
735 return _pushkeyescape(sorted(repo.obsstore))
742 return _pushkeyescape(sorted(repo.obsstore))
736
743
737 def pushmarker(repo, key, old, new):
744 def pushmarker(repo, key, old, new):
738 """Push markers over pushkey"""
745 """Push markers over pushkey"""
739 if not key.startswith('dump'):
746 if not key.startswith('dump'):
740 repo.ui.warn(_('unknown key: %r') % key)
747 repo.ui.warn(_('unknown key: %r') % key)
741 return 0
748 return 0
742 if old:
749 if old:
743 repo.ui.warn(_('unexpected old value for %r') % key)
750 repo.ui.warn(_('unexpected old value for %r') % key)
744 return 0
751 return 0
745 data = base85.b85decode(new)
752 data = base85.b85decode(new)
746 lock = repo.lock()
753 lock = repo.lock()
747 try:
754 try:
748 tr = repo.transaction('pushkey: obsolete markers')
755 tr = repo.transaction('pushkey: obsolete markers')
749 try:
756 try:
750 repo.obsstore.mergemarkers(tr, data)
757 repo.obsstore.mergemarkers(tr, data)
751 tr.close()
758 tr.close()
752 return 1
759 return 1
753 finally:
760 finally:
754 tr.release()
761 tr.release()
755 finally:
762 finally:
756 lock.release()
763 lock.release()
757
764
758 def getmarkers(repo, nodes=None):
765 def getmarkers(repo, nodes=None):
759 """returns markers known in a repository
766 """returns markers known in a repository
760
767
761 If <nodes> is specified, only markers "relevant" to those nodes are are
768 If <nodes> is specified, only markers "relevant" to those nodes are are
762 returned"""
769 returned"""
763 if nodes is None:
770 if nodes is None:
764 rawmarkers = repo.obsstore
771 rawmarkers = repo.obsstore
765 else:
772 else:
766 rawmarkers = repo.obsstore.relevantmarkers(nodes)
773 rawmarkers = repo.obsstore.relevantmarkers(nodes)
767
774
768 for markerdata in rawmarkers:
775 for markerdata in rawmarkers:
769 yield marker(repo, markerdata)
776 yield marker(repo, markerdata)
770
777
771 def relevantmarkers(repo, node):
778 def relevantmarkers(repo, node):
772 """all obsolete markers relevant to some revision"""
779 """all obsolete markers relevant to some revision"""
773 for markerdata in repo.obsstore.relevantmarkers(node):
780 for markerdata in repo.obsstore.relevantmarkers(node):
774 yield marker(repo, markerdata)
781 yield marker(repo, markerdata)
775
782
776
783
777 def precursormarkers(ctx):
784 def precursormarkers(ctx):
778 """obsolete marker marking this changeset as a successors"""
785 """obsolete marker marking this changeset as a successors"""
779 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
786 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
780 yield marker(ctx.repo(), data)
787 yield marker(ctx.repo(), data)
781
788
782 def successormarkers(ctx):
789 def successormarkers(ctx):
783 """obsolete marker making this changeset obsolete"""
790 """obsolete marker making this changeset obsolete"""
784 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
791 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
785 yield marker(ctx.repo(), data)
792 yield marker(ctx.repo(), data)
786
793
787 def allsuccessors(obsstore, nodes, ignoreflags=0):
794 def allsuccessors(obsstore, nodes, ignoreflags=0):
788 """Yield node for every successor of <nodes>.
795 """Yield node for every successor of <nodes>.
789
796
790 Some successors may be unknown locally.
797 Some successors may be unknown locally.
791
798
792 This is a linear yield unsuited to detecting split changesets. It includes
799 This is a linear yield unsuited to detecting split changesets. It includes
793 initial nodes too."""
800 initial nodes too."""
794 remaining = set(nodes)
801 remaining = set(nodes)
795 seen = set(remaining)
802 seen = set(remaining)
796 while remaining:
803 while remaining:
797 current = remaining.pop()
804 current = remaining.pop()
798 yield current
805 yield current
799 for mark in obsstore.successors.get(current, ()):
806 for mark in obsstore.successors.get(current, ()):
800 # ignore marker flagged with specified flag
807 # ignore marker flagged with specified flag
801 if mark[2] & ignoreflags:
808 if mark[2] & ignoreflags:
802 continue
809 continue
803 for suc in mark[1]:
810 for suc in mark[1]:
804 if suc not in seen:
811 if suc not in seen:
805 seen.add(suc)
812 seen.add(suc)
806 remaining.add(suc)
813 remaining.add(suc)
807
814
808 def allprecursors(obsstore, nodes, ignoreflags=0):
815 def allprecursors(obsstore, nodes, ignoreflags=0):
809 """Yield node for every precursors of <nodes>.
816 """Yield node for every precursors of <nodes>.
810
817
811 Some precursors may be unknown locally.
818 Some precursors may be unknown locally.
812
819
813 This is a linear yield unsuited to detecting folded changesets. It includes
820 This is a linear yield unsuited to detecting folded changesets. It includes
814 initial nodes too."""
821 initial nodes too."""
815
822
816 remaining = set(nodes)
823 remaining = set(nodes)
817 seen = set(remaining)
824 seen = set(remaining)
818 while remaining:
825 while remaining:
819 current = remaining.pop()
826 current = remaining.pop()
820 yield current
827 yield current
821 for mark in obsstore.precursors.get(current, ()):
828 for mark in obsstore.precursors.get(current, ()):
822 # ignore marker flagged with specified flag
829 # ignore marker flagged with specified flag
823 if mark[2] & ignoreflags:
830 if mark[2] & ignoreflags:
824 continue
831 continue
825 suc = mark[0]
832 suc = mark[0]
826 if suc not in seen:
833 if suc not in seen:
827 seen.add(suc)
834 seen.add(suc)
828 remaining.add(suc)
835 remaining.add(suc)
829
836
830 def foreground(repo, nodes):
837 def foreground(repo, nodes):
831 """return all nodes in the "foreground" of other node
838 """return all nodes in the "foreground" of other node
832
839
833 The foreground of a revision is anything reachable using parent -> children
840 The foreground of a revision is anything reachable using parent -> children
834 or precursor -> successor relation. It is very similar to "descendant" but
841 or precursor -> successor relation. It is very similar to "descendant" but
835 augmented with obsolescence information.
842 augmented with obsolescence information.
836
843
837 Beware that possible obsolescence cycle may result if complex situation.
844 Beware that possible obsolescence cycle may result if complex situation.
838 """
845 """
839 repo = repo.unfiltered()
846 repo = repo.unfiltered()
840 foreground = set(repo.set('%ln::', nodes))
847 foreground = set(repo.set('%ln::', nodes))
841 if repo.obsstore:
848 if repo.obsstore:
842 # We only need this complicated logic if there is obsolescence
849 # We only need this complicated logic if there is obsolescence
843 # XXX will probably deserve an optimised revset.
850 # XXX will probably deserve an optimised revset.
844 nm = repo.changelog.nodemap
851 nm = repo.changelog.nodemap
845 plen = -1
852 plen = -1
846 # compute the whole set of successors or descendants
853 # compute the whole set of successors or descendants
847 while len(foreground) != plen:
854 while len(foreground) != plen:
848 plen = len(foreground)
855 plen = len(foreground)
849 succs = set(c.node() for c in foreground)
856 succs = set(c.node() for c in foreground)
850 mutable = [c.node() for c in foreground if c.mutable()]
857 mutable = [c.node() for c in foreground if c.mutable()]
851 succs.update(allsuccessors(repo.obsstore, mutable))
858 succs.update(allsuccessors(repo.obsstore, mutable))
852 known = (n for n in succs if n in nm)
859 known = (n for n in succs if n in nm)
853 foreground = set(repo.set('%ln::', known))
860 foreground = set(repo.set('%ln::', known))
854 return set(c.node() for c in foreground)
861 return set(c.node() for c in foreground)
855
862
856
863
857 def successorssets(repo, initialnode, cache=None):
864 def successorssets(repo, initialnode, cache=None):
858 """Return set of all latest successors of initial nodes
865 """Return set of all latest successors of initial nodes
859
866
860 The successors set of a changeset A are the group of revisions that succeed
867 The successors set of a changeset A are the group of revisions that succeed
861 A. It succeeds A as a consistent whole, each revision being only a partial
868 A. It succeeds A as a consistent whole, each revision being only a partial
862 replacement. The successors set contains non-obsolete changesets only.
869 replacement. The successors set contains non-obsolete changesets only.
863
870
864 This function returns the full list of successor sets which is why it
871 This function returns the full list of successor sets which is why it
865 returns a list of tuples and not just a single tuple. Each tuple is a valid
872 returns a list of tuples and not just a single tuple. Each tuple is a valid
866 successors set. Note that (A,) may be a valid successors set for changeset A
873 successors set. Note that (A,) may be a valid successors set for changeset A
867 (see below).
874 (see below).
868
875
869 In most cases, a changeset A will have a single element (e.g. the changeset
876 In most cases, a changeset A will have a single element (e.g. the changeset
870 A is replaced by A') in its successors set. Though, it is also common for a
877 A is replaced by A') in its successors set. Though, it is also common for a
871 changeset A to have no elements in its successor set (e.g. the changeset
878 changeset A to have no elements in its successor set (e.g. the changeset
872 has been pruned). Therefore, the returned list of successors sets will be
879 has been pruned). Therefore, the returned list of successors sets will be
873 [(A',)] or [], respectively.
880 [(A',)] or [], respectively.
874
881
875 When a changeset A is split into A' and B', however, it will result in a
882 When a changeset A is split into A' and B', however, it will result in a
876 successors set containing more than a single element, i.e. [(A',B')].
883 successors set containing more than a single element, i.e. [(A',B')].
877 Divergent changesets will result in multiple successors sets, i.e. [(A',),
884 Divergent changesets will result in multiple successors sets, i.e. [(A',),
878 (A'')].
885 (A'')].
879
886
880 If a changeset A is not obsolete, then it will conceptually have no
887 If a changeset A is not obsolete, then it will conceptually have no
881 successors set. To distinguish this from a pruned changeset, the successor
888 successors set. To distinguish this from a pruned changeset, the successor
882 set will contain itself only, i.e. [(A,)].
889 set will contain itself only, i.e. [(A,)].
883
890
884 Finally, successors unknown locally are considered to be pruned (obsoleted
891 Finally, successors unknown locally are considered to be pruned (obsoleted
885 without any successors).
892 without any successors).
886
893
887 The optional `cache` parameter is a dictionary that may contain precomputed
894 The optional `cache` parameter is a dictionary that may contain precomputed
888 successors sets. It is meant to reuse the computation of a previous call to
895 successors sets. It is meant to reuse the computation of a previous call to
889 `successorssets` when multiple calls are made at the same time. The cache
896 `successorssets` when multiple calls are made at the same time. The cache
890 dictionary is updated in place. The caller is responsible for its life
897 dictionary is updated in place. The caller is responsible for its life
891 span. Code that makes multiple calls to `successorssets` *must* use this
898 span. Code that makes multiple calls to `successorssets` *must* use this
892 cache mechanism or suffer terrible performance.
899 cache mechanism or suffer terrible performance.
893 """
900 """
894
901
895 succmarkers = repo.obsstore.successors
902 succmarkers = repo.obsstore.successors
896
903
897 # Stack of nodes we search successors sets for
904 # Stack of nodes we search successors sets for
898 toproceed = [initialnode]
905 toproceed = [initialnode]
899 # set version of above list for fast loop detection
906 # set version of above list for fast loop detection
900 # element added to "toproceed" must be added here
907 # element added to "toproceed" must be added here
901 stackedset = set(toproceed)
908 stackedset = set(toproceed)
902 if cache is None:
909 if cache is None:
903 cache = {}
910 cache = {}
904
911
905 # This while loop is the flattened version of a recursive search for
912 # This while loop is the flattened version of a recursive search for
906 # successors sets
913 # successors sets
907 #
914 #
908 # def successorssets(x):
915 # def successorssets(x):
909 # successors = directsuccessors(x)
916 # successors = directsuccessors(x)
910 # ss = [[]]
917 # ss = [[]]
911 # for succ in directsuccessors(x):
918 # for succ in directsuccessors(x):
912 # # product as in itertools cartesian product
919 # # product as in itertools cartesian product
913 # ss = product(ss, successorssets(succ))
920 # ss = product(ss, successorssets(succ))
914 # return ss
921 # return ss
915 #
922 #
916 # But we can not use plain recursive calls here:
923 # But we can not use plain recursive calls here:
917 # - that would blow the python call stack
924 # - that would blow the python call stack
918 # - obsolescence markers may have cycles, we need to handle them.
925 # - obsolescence markers may have cycles, we need to handle them.
919 #
926 #
920 # The `toproceed` list act as our call stack. Every node we search
927 # The `toproceed` list act as our call stack. Every node we search
921 # successors set for are stacked there.
928 # successors set for are stacked there.
922 #
929 #
923 # The `stackedset` is set version of this stack used to check if a node is
930 # The `stackedset` is set version of this stack used to check if a node is
924 # already stacked. This check is used to detect cycles and prevent infinite
931 # already stacked. This check is used to detect cycles and prevent infinite
925 # loop.
932 # loop.
926 #
933 #
927 # successors set of all nodes are stored in the `cache` dictionary.
934 # successors set of all nodes are stored in the `cache` dictionary.
928 #
935 #
929 # After this while loop ends we use the cache to return the successors sets
936 # After this while loop ends we use the cache to return the successors sets
930 # for the node requested by the caller.
937 # for the node requested by the caller.
931 while toproceed:
938 while toproceed:
932 # Every iteration tries to compute the successors sets of the topmost
939 # Every iteration tries to compute the successors sets of the topmost
933 # node of the stack: CURRENT.
940 # node of the stack: CURRENT.
934 #
941 #
935 # There are four possible outcomes:
942 # There are four possible outcomes:
936 #
943 #
937 # 1) We already know the successors sets of CURRENT:
944 # 1) We already know the successors sets of CURRENT:
938 # -> mission accomplished, pop it from the stack.
945 # -> mission accomplished, pop it from the stack.
939 # 2) Node is not obsolete:
946 # 2) Node is not obsolete:
940 # -> the node is its own successors sets. Add it to the cache.
947 # -> the node is its own successors sets. Add it to the cache.
941 # 3) We do not know successors set of direct successors of CURRENT:
948 # 3) We do not know successors set of direct successors of CURRENT:
942 # -> We add those successors to the stack.
949 # -> We add those successors to the stack.
943 # 4) We know successors sets of all direct successors of CURRENT:
950 # 4) We know successors sets of all direct successors of CURRENT:
944 # -> We can compute CURRENT successors set and add it to the
951 # -> We can compute CURRENT successors set and add it to the
945 # cache.
952 # cache.
946 #
953 #
947 current = toproceed[-1]
954 current = toproceed[-1]
948 if current in cache:
955 if current in cache:
949 # case (1): We already know the successors sets
956 # case (1): We already know the successors sets
950 stackedset.remove(toproceed.pop())
957 stackedset.remove(toproceed.pop())
951 elif current not in succmarkers:
958 elif current not in succmarkers:
952 # case (2): The node is not obsolete.
959 # case (2): The node is not obsolete.
953 if current in repo:
960 if current in repo:
954 # We have a valid last successors.
961 # We have a valid last successors.
955 cache[current] = [(current,)]
962 cache[current] = [(current,)]
956 else:
963 else:
957 # Final obsolete version is unknown locally.
964 # Final obsolete version is unknown locally.
958 # Do not count that as a valid successors
965 # Do not count that as a valid successors
959 cache[current] = []
966 cache[current] = []
960 else:
967 else:
961 # cases (3) and (4)
968 # cases (3) and (4)
962 #
969 #
963 # We proceed in two phases. Phase 1 aims to distinguish case (3)
970 # We proceed in two phases. Phase 1 aims to distinguish case (3)
964 # from case (4):
971 # from case (4):
965 #
972 #
966 # For each direct successors of CURRENT, we check whether its
973 # For each direct successors of CURRENT, we check whether its
967 # successors sets are known. If they are not, we stack the
974 # successors sets are known. If they are not, we stack the
968 # unknown node and proceed to the next iteration of the while
975 # unknown node and proceed to the next iteration of the while
969 # loop. (case 3)
976 # loop. (case 3)
970 #
977 #
971 # During this step, we may detect obsolescence cycles: a node
978 # During this step, we may detect obsolescence cycles: a node
972 # with unknown successors sets but already in the call stack.
979 # with unknown successors sets but already in the call stack.
973 # In such a situation, we arbitrary set the successors sets of
980 # In such a situation, we arbitrary set the successors sets of
974 # the node to nothing (node pruned) to break the cycle.
981 # the node to nothing (node pruned) to break the cycle.
975 #
982 #
976 # If no break was encountered we proceed to phase 2.
983 # If no break was encountered we proceed to phase 2.
977 #
984 #
978 # Phase 2 computes successors sets of CURRENT (case 4); see details
985 # Phase 2 computes successors sets of CURRENT (case 4); see details
979 # in phase 2 itself.
986 # in phase 2 itself.
980 #
987 #
981 # Note the two levels of iteration in each phase.
988 # Note the two levels of iteration in each phase.
982 # - The first one handles obsolescence markers using CURRENT as
989 # - The first one handles obsolescence markers using CURRENT as
983 # precursor (successors markers of CURRENT).
990 # precursor (successors markers of CURRENT).
984 #
991 #
985 # Having multiple entry here means divergence.
992 # Having multiple entry here means divergence.
986 #
993 #
987 # - The second one handles successors defined in each marker.
994 # - The second one handles successors defined in each marker.
988 #
995 #
989 # Having none means pruned node, multiple successors means split,
996 # Having none means pruned node, multiple successors means split,
990 # single successors are standard replacement.
997 # single successors are standard replacement.
991 #
998 #
992 for mark in sorted(succmarkers[current]):
999 for mark in sorted(succmarkers[current]):
993 for suc in mark[1]:
1000 for suc in mark[1]:
994 if suc not in cache:
1001 if suc not in cache:
995 if suc in stackedset:
1002 if suc in stackedset:
996 # cycle breaking
1003 # cycle breaking
997 cache[suc] = []
1004 cache[suc] = []
998 else:
1005 else:
999 # case (3) If we have not computed successors sets
1006 # case (3) If we have not computed successors sets
1000 # of one of those successors we add it to the
1007 # of one of those successors we add it to the
1001 # `toproceed` stack and stop all work for this
1008 # `toproceed` stack and stop all work for this
1002 # iteration.
1009 # iteration.
1003 toproceed.append(suc)
1010 toproceed.append(suc)
1004 stackedset.add(suc)
1011 stackedset.add(suc)
1005 break
1012 break
1006 else:
1013 else:
1007 continue
1014 continue
1008 break
1015 break
1009 else:
1016 else:
1010 # case (4): we know all successors sets of all direct
1017 # case (4): we know all successors sets of all direct
1011 # successors
1018 # successors
1012 #
1019 #
1013 # Successors set contributed by each marker depends on the
1020 # Successors set contributed by each marker depends on the
1014 # successors sets of all its "successors" node.
1021 # successors sets of all its "successors" node.
1015 #
1022 #
1016 # Each different marker is a divergence in the obsolescence
1023 # Each different marker is a divergence in the obsolescence
1017 # history. It contributes successors sets distinct from other
1024 # history. It contributes successors sets distinct from other
1018 # markers.
1025 # markers.
1019 #
1026 #
1020 # Within a marker, a successor may have divergent successors
1027 # Within a marker, a successor may have divergent successors
1021 # sets. In such a case, the marker will contribute multiple
1028 # sets. In such a case, the marker will contribute multiple
1022 # divergent successors sets. If multiple successors have
1029 # divergent successors sets. If multiple successors have
1023 # divergent successors sets, a Cartesian product is used.
1030 # divergent successors sets, a Cartesian product is used.
1024 #
1031 #
1025 # At the end we post-process successors sets to remove
1032 # At the end we post-process successors sets to remove
1026 # duplicated entry and successors set that are strict subset of
1033 # duplicated entry and successors set that are strict subset of
1027 # another one.
1034 # another one.
1028 succssets = []
1035 succssets = []
1029 for mark in sorted(succmarkers[current]):
1036 for mark in sorted(succmarkers[current]):
1030 # successors sets contributed by this marker
1037 # successors sets contributed by this marker
1031 markss = [[]]
1038 markss = [[]]
1032 for suc in mark[1]:
1039 for suc in mark[1]:
1033 # cardinal product with previous successors
1040 # cardinal product with previous successors
1034 productresult = []
1041 productresult = []
1035 for prefix in markss:
1042 for prefix in markss:
1036 for suffix in cache[suc]:
1043 for suffix in cache[suc]:
1037 newss = list(prefix)
1044 newss = list(prefix)
1038 for part in suffix:
1045 for part in suffix:
1039 # do not duplicated entry in successors set
1046 # do not duplicated entry in successors set
1040 # first entry wins.
1047 # first entry wins.
1041 if part not in newss:
1048 if part not in newss:
1042 newss.append(part)
1049 newss.append(part)
1043 productresult.append(newss)
1050 productresult.append(newss)
1044 markss = productresult
1051 markss = productresult
1045 succssets.extend(markss)
1052 succssets.extend(markss)
1046 # remove duplicated and subset
1053 # remove duplicated and subset
1047 seen = []
1054 seen = []
1048 final = []
1055 final = []
1049 candidate = sorted(((set(s), s) for s in succssets if s),
1056 candidate = sorted(((set(s), s) for s in succssets if s),
1050 key=lambda x: len(x[1]), reverse=True)
1057 key=lambda x: len(x[1]), reverse=True)
1051 for setversion, listversion in candidate:
1058 for setversion, listversion in candidate:
1052 for seenset in seen:
1059 for seenset in seen:
1053 if setversion.issubset(seenset):
1060 if setversion.issubset(seenset):
1054 break
1061 break
1055 else:
1062 else:
1056 final.append(listversion)
1063 final.append(listversion)
1057 seen.append(setversion)
1064 seen.append(setversion)
1058 final.reverse() # put small successors set first
1065 final.reverse() # put small successors set first
1059 cache[current] = final
1066 cache[current] = final
1060 return cache[initialnode]
1067 return cache[initialnode]
1061
1068
1062 # mapping of 'set-name' -> <function to compute this set>
1069 # mapping of 'set-name' -> <function to compute this set>
1063 cachefuncs = {}
1070 cachefuncs = {}
1064 def cachefor(name):
1071 def cachefor(name):
1065 """Decorator to register a function as computing the cache for a set"""
1072 """Decorator to register a function as computing the cache for a set"""
1066 def decorator(func):
1073 def decorator(func):
1067 assert name not in cachefuncs
1074 assert name not in cachefuncs
1068 cachefuncs[name] = func
1075 cachefuncs[name] = func
1069 return func
1076 return func
1070 return decorator
1077 return decorator
1071
1078
1072 def getrevs(repo, name):
1079 def getrevs(repo, name):
1073 """Return the set of revision that belong to the <name> set
1080 """Return the set of revision that belong to the <name> set
1074
1081
1075 Such access may compute the set and cache it for future use"""
1082 Such access may compute the set and cache it for future use"""
1076 repo = repo.unfiltered()
1083 repo = repo.unfiltered()
1077 if not repo.obsstore:
1084 if not repo.obsstore:
1078 return frozenset()
1085 return frozenset()
1079 if name not in repo.obsstore.caches:
1086 if name not in repo.obsstore.caches:
1080 repo.obsstore.caches[name] = cachefuncs[name](repo)
1087 repo.obsstore.caches[name] = cachefuncs[name](repo)
1081 return repo.obsstore.caches[name]
1088 return repo.obsstore.caches[name]
1082
1089
1083 # To be simple we need to invalidate obsolescence cache when:
1090 # To be simple we need to invalidate obsolescence cache when:
1084 #
1091 #
1085 # - new changeset is added:
1092 # - new changeset is added:
1086 # - public phase is changed
1093 # - public phase is changed
1087 # - obsolescence marker are added
1094 # - obsolescence marker are added
1088 # - strip is used a repo
1095 # - strip is used a repo
1089 def clearobscaches(repo):
1096 def clearobscaches(repo):
1090 """Remove all obsolescence related cache from a repo
1097 """Remove all obsolescence related cache from a repo
1091
1098
1092 This remove all cache in obsstore is the obsstore already exist on the
1099 This remove all cache in obsstore is the obsstore already exist on the
1093 repo.
1100 repo.
1094
1101
1095 (We could be smarter here given the exact event that trigger the cache
1102 (We could be smarter here given the exact event that trigger the cache
1096 clearing)"""
1103 clearing)"""
1097 # only clear cache is there is obsstore data in this repo
1104 # only clear cache is there is obsstore data in this repo
1098 if 'obsstore' in repo._filecache:
1105 if 'obsstore' in repo._filecache:
1099 repo.obsstore.caches.clear()
1106 repo.obsstore.caches.clear()
1100
1107
1101 @cachefor('obsolete')
1108 @cachefor('obsolete')
1102 def _computeobsoleteset(repo):
1109 def _computeobsoleteset(repo):
1103 """the set of obsolete revisions"""
1110 """the set of obsolete revisions"""
1104 obs = set()
1111 obs = set()
1105 getrev = repo.changelog.nodemap.get
1112 getrev = repo.changelog.nodemap.get
1106 getphase = repo._phasecache.phase
1113 getphase = repo._phasecache.phase
1107 for n in repo.obsstore.successors:
1114 for n in repo.obsstore.successors:
1108 rev = getrev(n)
1115 rev = getrev(n)
1109 if rev is not None and getphase(repo, rev):
1116 if rev is not None and getphase(repo, rev):
1110 obs.add(rev)
1117 obs.add(rev)
1111 return obs
1118 return obs
1112
1119
1113 @cachefor('unstable')
1120 @cachefor('unstable')
1114 def _computeunstableset(repo):
1121 def _computeunstableset(repo):
1115 """the set of non obsolete revisions with obsolete parents"""
1122 """the set of non obsolete revisions with obsolete parents"""
1116 revs = [(ctx.rev(), ctx) for ctx in
1123 revs = [(ctx.rev(), ctx) for ctx in
1117 repo.set('(not public()) and (not obsolete())')]
1124 repo.set('(not public()) and (not obsolete())')]
1118 revs.sort(key=lambda x:x[0])
1125 revs.sort(key=lambda x:x[0])
1119 unstable = set()
1126 unstable = set()
1120 for rev, ctx in revs:
1127 for rev, ctx in revs:
1121 # A rev is unstable if one of its parent is obsolete or unstable
1128 # A rev is unstable if one of its parent is obsolete or unstable
1122 # this works since we traverse following growing rev order
1129 # this works since we traverse following growing rev order
1123 if any((x.obsolete() or (x.rev() in unstable))
1130 if any((x.obsolete() or (x.rev() in unstable))
1124 for x in ctx.parents()):
1131 for x in ctx.parents()):
1125 unstable.add(rev)
1132 unstable.add(rev)
1126 return unstable
1133 return unstable
1127
1134
1128 @cachefor('suspended')
1135 @cachefor('suspended')
1129 def _computesuspendedset(repo):
1136 def _computesuspendedset(repo):
1130 """the set of obsolete parents with non obsolete descendants"""
1137 """the set of obsolete parents with non obsolete descendants"""
1131 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1138 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1132 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1139 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1133
1140
1134 @cachefor('extinct')
1141 @cachefor('extinct')
1135 def _computeextinctset(repo):
1142 def _computeextinctset(repo):
1136 """the set of obsolete parents without non obsolete descendants"""
1143 """the set of obsolete parents without non obsolete descendants"""
1137 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1144 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1138
1145
1139
1146
1140 @cachefor('bumped')
1147 @cachefor('bumped')
1141 def _computebumpedset(repo):
1148 def _computebumpedset(repo):
1142 """the set of revs trying to obsolete public revisions"""
1149 """the set of revs trying to obsolete public revisions"""
1143 bumped = set()
1150 bumped = set()
1144 # util function (avoid attribute lookup in the loop)
1151 # util function (avoid attribute lookup in the loop)
1145 phase = repo._phasecache.phase # would be faster to grab the full list
1152 phase = repo._phasecache.phase # would be faster to grab the full list
1146 public = phases.public
1153 public = phases.public
1147 cl = repo.changelog
1154 cl = repo.changelog
1148 torev = cl.nodemap.get
1155 torev = cl.nodemap.get
1149 for ctx in repo.set('(not public()) and (not obsolete())'):
1156 for ctx in repo.set('(not public()) and (not obsolete())'):
1150 rev = ctx.rev()
1157 rev = ctx.rev()
1151 # We only evaluate mutable, non-obsolete revision
1158 # We only evaluate mutable, non-obsolete revision
1152 node = ctx.node()
1159 node = ctx.node()
1153 # (future) A cache of precursors may worth if split is very common
1160 # (future) A cache of precursors may worth if split is very common
1154 for pnode in allprecursors(repo.obsstore, [node],
1161 for pnode in allprecursors(repo.obsstore, [node],
1155 ignoreflags=bumpedfix):
1162 ignoreflags=bumpedfix):
1156 prev = torev(pnode) # unfiltered! but so is phasecache
1163 prev = torev(pnode) # unfiltered! but so is phasecache
1157 if (prev is not None) and (phase(repo, prev) <= public):
1164 if (prev is not None) and (phase(repo, prev) <= public):
1158 # we have a public precursors
1165 # we have a public precursors
1159 bumped.add(rev)
1166 bumped.add(rev)
1160 break # Next draft!
1167 break # Next draft!
1161 return bumped
1168 return bumped
1162
1169
1163 @cachefor('divergent')
1170 @cachefor('divergent')
1164 def _computedivergentset(repo):
1171 def _computedivergentset(repo):
1165 """the set of rev that compete to be the final successors of some revision.
1172 """the set of rev that compete to be the final successors of some revision.
1166 """
1173 """
1167 divergent = set()
1174 divergent = set()
1168 obsstore = repo.obsstore
1175 obsstore = repo.obsstore
1169 newermap = {}
1176 newermap = {}
1170 for ctx in repo.set('(not public()) - obsolete()'):
1177 for ctx in repo.set('(not public()) - obsolete()'):
1171 mark = obsstore.precursors.get(ctx.node(), ())
1178 mark = obsstore.precursors.get(ctx.node(), ())
1172 toprocess = set(mark)
1179 toprocess = set(mark)
1173 seen = set()
1180 seen = set()
1174 while toprocess:
1181 while toprocess:
1175 prec = toprocess.pop()[0]
1182 prec = toprocess.pop()[0]
1176 if prec in seen:
1183 if prec in seen:
1177 continue # emergency cycle hanging prevention
1184 continue # emergency cycle hanging prevention
1178 seen.add(prec)
1185 seen.add(prec)
1179 if prec not in newermap:
1186 if prec not in newermap:
1180 successorssets(repo, prec, newermap)
1187 successorssets(repo, prec, newermap)
1181 newer = [n for n in newermap[prec] if n]
1188 newer = [n for n in newermap[prec] if n]
1182 if len(newer) > 1:
1189 if len(newer) > 1:
1183 divergent.add(ctx.rev())
1190 divergent.add(ctx.rev())
1184 break
1191 break
1185 toprocess.update(obsstore.precursors.get(prec, ()))
1192 toprocess.update(obsstore.precursors.get(prec, ()))
1186 return divergent
1193 return divergent
1187
1194
1188
1195
1189 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1196 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1190 """Add obsolete markers between changesets in a repo
1197 """Add obsolete markers between changesets in a repo
1191
1198
1192 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1199 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1193 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1200 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1194 containing metadata for this marker only. It is merged with the global
1201 containing metadata for this marker only. It is merged with the global
1195 metadata specified through the `metadata` argument of this function,
1202 metadata specified through the `metadata` argument of this function,
1196
1203
1197 Trying to obsolete a public changeset will raise an exception.
1204 Trying to obsolete a public changeset will raise an exception.
1198
1205
1199 Current user and date are used except if specified otherwise in the
1206 Current user and date are used except if specified otherwise in the
1200 metadata attribute.
1207 metadata attribute.
1201
1208
1202 This function operates within a transaction of its own, but does
1209 This function operates within a transaction of its own, but does
1203 not take any lock on the repo.
1210 not take any lock on the repo.
1204 """
1211 """
1205 # prepare metadata
1212 # prepare metadata
1206 if metadata is None:
1213 if metadata is None:
1207 metadata = {}
1214 metadata = {}
1208 if 'user' not in metadata:
1215 if 'user' not in metadata:
1209 metadata['user'] = repo.ui.username()
1216 metadata['user'] = repo.ui.username()
1210 tr = repo.transaction('add-obsolescence-marker')
1217 tr = repo.transaction('add-obsolescence-marker')
1211 try:
1218 try:
1212 for rel in relations:
1219 for rel in relations:
1213 prec = rel[0]
1220 prec = rel[0]
1214 sucs = rel[1]
1221 sucs = rel[1]
1215 localmetadata = metadata.copy()
1222 localmetadata = metadata.copy()
1216 if 2 < len(rel):
1223 if 2 < len(rel):
1217 localmetadata.update(rel[2])
1224 localmetadata.update(rel[2])
1218
1225
1219 if not prec.mutable():
1226 if not prec.mutable():
1220 raise error.Abort("cannot obsolete public changeset: %s"
1227 raise error.Abort("cannot obsolete public changeset: %s"
1221 % prec,
1228 % prec,
1222 hint='see "hg help phases" for details')
1229 hint='see "hg help phases" for details')
1223 nprec = prec.node()
1230 nprec = prec.node()
1224 nsucs = tuple(s.node() for s in sucs)
1231 nsucs = tuple(s.node() for s in sucs)
1225 npare = None
1232 npare = None
1226 if not nsucs:
1233 if not nsucs:
1227 npare = tuple(p.node() for p in prec.parents())
1234 npare = tuple(p.node() for p in prec.parents())
1228 if nprec in nsucs:
1235 if nprec in nsucs:
1229 raise error.Abort("changeset %s cannot obsolete itself" % prec)
1236 raise error.Abort("changeset %s cannot obsolete itself" % prec)
1230 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1237 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1231 date=date, metadata=localmetadata)
1238 date=date, metadata=localmetadata)
1232 repo.filteredrevcache.clear()
1239 repo.filteredrevcache.clear()
1233 tr.close()
1240 tr.close()
1234 finally:
1241 finally:
1235 tr.release()
1242 tr.release()
1236
1243
1237 def isenabled(repo, option):
1244 def isenabled(repo, option):
1238 """Returns True if the given repository has the given obsolete option
1245 """Returns True if the given repository has the given obsolete option
1239 enabled.
1246 enabled.
1240 """
1247 """
1241 result = set(repo.ui.configlist('experimental', 'evolution'))
1248 result = set(repo.ui.configlist('experimental', 'evolution'))
1242 if 'all' in result:
1249 if 'all' in result:
1243 return True
1250 return True
1244
1251
1245 # For migration purposes, temporarily return true if the config hasn't been
1252 # For migration purposes, temporarily return true if the config hasn't been
1246 # set but _enabled is true.
1253 # set but _enabled is true.
1247 if len(result) == 0 and _enabled:
1254 if len(result) == 0 and _enabled:
1248 return True
1255 return True
1249
1256
1250 # createmarkers must be enabled if other options are enabled
1257 # createmarkers must be enabled if other options are enabled
1251 if ((allowunstableopt in result or exchangeopt in result) and
1258 if ((allowunstableopt in result or exchangeopt in result) and
1252 not createmarkersopt in result):
1259 not createmarkersopt in result):
1253 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
1260 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
1254 "if other obsolete options are enabled"))
1261 "if other obsolete options are enabled"))
1255
1262
1256 return option in result
1263 return option in result
General Comments 0
You need to be logged in to leave comments. Login now