##// END OF EJS Templates
effectflag: store an empty effect flag for the moment...
Boris Feld -
r34414:014d467f default
parent child Browse files
Show More
@@ -1,1083 +1,1095
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 obsutil,
79 obsutil,
80 phases,
80 phases,
81 policy,
81 policy,
82 util,
82 util,
83 )
83 )
84
84
85 parsers = policy.importmod(r'parsers')
85 parsers = policy.importmod(r'parsers')
86
86
87 _pack = struct.pack
87 _pack = struct.pack
88 _unpack = struct.unpack
88 _unpack = struct.unpack
89 _calcsize = struct.calcsize
89 _calcsize = struct.calcsize
90 propertycache = util.propertycache
90 propertycache = util.propertycache
91
91
92 # the obsolete feature is not mature enough to be enabled by default.
92 # the obsolete feature is not mature enough to be enabled by default.
93 # you have to rely on third party extension extension to enable this.
93 # you have to rely on third party extension extension to enable this.
94 _enabled = False
94 _enabled = False
95
95
96 # Options for obsolescence
96 # Options for obsolescence
97 createmarkersopt = 'createmarkers'
97 createmarkersopt = 'createmarkers'
98 allowunstableopt = 'allowunstable'
98 allowunstableopt = 'allowunstable'
99 exchangeopt = 'exchange'
99 exchangeopt = 'exchange'
100
100
101 def isenabled(repo, option):
101 def isenabled(repo, option):
102 """Returns True if the given repository has the given obsolete option
102 """Returns True if the given repository has the given obsolete option
103 enabled.
103 enabled.
104 """
104 """
105 result = set(repo.ui.configlist('experimental', 'stabilization'))
105 result = set(repo.ui.configlist('experimental', 'stabilization'))
106 if 'all' in result:
106 if 'all' in result:
107 return True
107 return True
108
108
109 # For migration purposes, temporarily return true if the config hasn't been
109 # For migration purposes, temporarily return true if the config hasn't been
110 # set but _enabled is true.
110 # set but _enabled is true.
111 if len(result) == 0 and _enabled:
111 if len(result) == 0 and _enabled:
112 return True
112 return True
113
113
114 # createmarkers must be enabled if other options are enabled
114 # createmarkers must be enabled if other options are enabled
115 if ((allowunstableopt in result or exchangeopt in result) and
115 if ((allowunstableopt in result or exchangeopt in result) and
116 not createmarkersopt in result):
116 not createmarkersopt in result):
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 "if other obsolete options are enabled"))
118 "if other obsolete options are enabled"))
119
119
120 return option in result
120 return option in result
121
121
122 ### obsolescence marker flag
122 ### obsolescence marker flag
123
123
124 ## bumpedfix flag
124 ## bumpedfix flag
125 #
125 #
126 # When a changeset A' succeed to a changeset A which became public, we call A'
126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 # "bumped" because it's a successors of a public changesets
127 # "bumped" because it's a successors of a public changesets
128 #
128 #
129 # o A' (bumped)
129 # o A' (bumped)
130 # |`:
130 # |`:
131 # | o A
131 # | o A
132 # |/
132 # |/
133 # o Z
133 # o Z
134 #
134 #
135 # The way to solve this situation is to create a new changeset Ad as children
135 # The way to solve this situation is to create a new changeset Ad as children
136 # of A. This changeset have the same content than A'. So the diff from A to A'
136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 #
138 #
139 # o Ad
139 # o Ad
140 # |`:
140 # |`:
141 # | x A'
141 # | x A'
142 # |'|
142 # |'|
143 # o | A
143 # o | A
144 # |/
144 # |/
145 # o Z
145 # o Z
146 #
146 #
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 # This flag mean that the successors express the changes between the public and
149 # This flag mean that the successors express the changes between the public and
150 # bumped version and fix the situation, breaking the transitivity of
150 # bumped version and fix the situation, breaking the transitivity of
151 # "bumped" here.
151 # "bumped" here.
152 bumpedfix = 1
152 bumpedfix = 1
153 usingsha256 = 2
153 usingsha256 = 2
154
154
155 ## Parsing and writing of version "0"
155 ## Parsing and writing of version "0"
156 #
156 #
157 # The header is followed by the markers. Each marker is made of:
157 # The header is followed by the markers. Each marker is made of:
158 #
158 #
159 # - 1 uint8 : number of new changesets "N", can be zero.
159 # - 1 uint8 : number of new changesets "N", can be zero.
160 #
160 #
161 # - 1 uint32: metadata size "M" in bytes.
161 # - 1 uint32: metadata size "M" in bytes.
162 #
162 #
163 # - 1 byte: a bit field. It is reserved for flags used in common
163 # - 1 byte: a bit field. It is reserved for flags used in common
164 # obsolete marker operations, to avoid repeated decoding of metadata
164 # obsolete marker operations, to avoid repeated decoding of metadata
165 # entries.
165 # entries.
166 #
166 #
167 # - 20 bytes: obsoleted changeset identifier.
167 # - 20 bytes: obsoleted changeset identifier.
168 #
168 #
169 # - N*20 bytes: new changesets identifiers.
169 # - N*20 bytes: new changesets identifiers.
170 #
170 #
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 # string contains a key and a value, separated by a colon ':', without
172 # string contains a key and a value, separated by a colon ':', without
173 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 # cannot contain '\0'.
174 # cannot contain '\0'.
175 _fm0version = 0
175 _fm0version = 0
176 _fm0fixed = '>BIB20s'
176 _fm0fixed = '>BIB20s'
177 _fm0node = '20s'
177 _fm0node = '20s'
178 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fsize = _calcsize(_fm0fixed)
179 _fm0fnodesize = _calcsize(_fm0node)
179 _fm0fnodesize = _calcsize(_fm0node)
180
180
181 def _fm0readmarkers(data, off, stop):
181 def _fm0readmarkers(data, off, stop):
182 # Loop on markers
182 # Loop on markers
183 while off < stop:
183 while off < stop:
184 # read fixed part
184 # read fixed part
185 cur = data[off:off + _fm0fsize]
185 cur = data[off:off + _fm0fsize]
186 off += _fm0fsize
186 off += _fm0fsize
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 # read replacement
188 # read replacement
189 sucs = ()
189 sucs = ()
190 if numsuc:
190 if numsuc:
191 s = (_fm0fnodesize * numsuc)
191 s = (_fm0fnodesize * numsuc)
192 cur = data[off:off + s]
192 cur = data[off:off + s]
193 sucs = _unpack(_fm0node * numsuc, cur)
193 sucs = _unpack(_fm0node * numsuc, cur)
194 off += s
194 off += s
195 # read metadata
195 # read metadata
196 # (metadata will be decoded on demand)
196 # (metadata will be decoded on demand)
197 metadata = data[off:off + mdsize]
197 metadata = data[off:off + mdsize]
198 if len(metadata) != mdsize:
198 if len(metadata) != mdsize:
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 'short, %d bytes expected, got %d')
200 'short, %d bytes expected, got %d')
201 % (mdsize, len(metadata)))
201 % (mdsize, len(metadata)))
202 off += mdsize
202 off += mdsize
203 metadata = _fm0decodemeta(metadata)
203 metadata = _fm0decodemeta(metadata)
204 try:
204 try:
205 when, offset = metadata.pop('date', '0 0').split(' ')
205 when, offset = metadata.pop('date', '0 0').split(' ')
206 date = float(when), int(offset)
206 date = float(when), int(offset)
207 except ValueError:
207 except ValueError:
208 date = (0., 0)
208 date = (0., 0)
209 parents = None
209 parents = None
210 if 'p2' in metadata:
210 if 'p2' in metadata:
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 elif 'p1' in metadata:
212 elif 'p1' in metadata:
213 parents = (metadata.pop('p1', None),)
213 parents = (metadata.pop('p1', None),)
214 elif 'p0' in metadata:
214 elif 'p0' in metadata:
215 parents = ()
215 parents = ()
216 if parents is not None:
216 if parents is not None:
217 try:
217 try:
218 parents = tuple(node.bin(p) for p in parents)
218 parents = tuple(node.bin(p) for p in parents)
219 # if parent content is not a nodeid, drop the data
219 # if parent content is not a nodeid, drop the data
220 for p in parents:
220 for p in parents:
221 if len(p) != 20:
221 if len(p) != 20:
222 parents = None
222 parents = None
223 break
223 break
224 except TypeError:
224 except TypeError:
225 # if content cannot be translated to nodeid drop the data.
225 # if content cannot be translated to nodeid drop the data.
226 parents = None
226 parents = None
227
227
228 metadata = tuple(sorted(metadata.iteritems()))
228 metadata = tuple(sorted(metadata.iteritems()))
229
229
230 yield (pre, sucs, flags, metadata, date, parents)
230 yield (pre, sucs, flags, metadata, date, parents)
231
231
232 def _fm0encodeonemarker(marker):
232 def _fm0encodeonemarker(marker):
233 pre, sucs, flags, metadata, date, parents = marker
233 pre, sucs, flags, metadata, date, parents = marker
234 if flags & usingsha256:
234 if flags & usingsha256:
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 metadata = dict(metadata)
236 metadata = dict(metadata)
237 time, tz = date
237 time, tz = date
238 metadata['date'] = '%r %i' % (time, tz)
238 metadata['date'] = '%r %i' % (time, tz)
239 if parents is not None:
239 if parents is not None:
240 if not parents:
240 if not parents:
241 # mark that we explicitly recorded no parents
241 # mark that we explicitly recorded no parents
242 metadata['p0'] = ''
242 metadata['p0'] = ''
243 for i, p in enumerate(parents, 1):
243 for i, p in enumerate(parents, 1):
244 metadata['p%i' % i] = node.hex(p)
244 metadata['p%i' % i] = node.hex(p)
245 metadata = _fm0encodemeta(metadata)
245 metadata = _fm0encodemeta(metadata)
246 numsuc = len(sucs)
246 numsuc = len(sucs)
247 format = _fm0fixed + (_fm0node * numsuc)
247 format = _fm0fixed + (_fm0node * numsuc)
248 data = [numsuc, len(metadata), flags, pre]
248 data = [numsuc, len(metadata), flags, pre]
249 data.extend(sucs)
249 data.extend(sucs)
250 return _pack(format, *data) + metadata
250 return _pack(format, *data) + metadata
251
251
252 def _fm0encodemeta(meta):
252 def _fm0encodemeta(meta):
253 """Return encoded metadata string to string mapping.
253 """Return encoded metadata string to string mapping.
254
254
255 Assume no ':' in key and no '\0' in both key and value."""
255 Assume no ':' in key and no '\0' in both key and value."""
256 for key, value in meta.iteritems():
256 for key, value in meta.iteritems():
257 if ':' in key or '\0' in key:
257 if ':' in key or '\0' in key:
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 if '\0' in value:
259 if '\0' in value:
260 raise ValueError("':' is forbidden in metadata value'")
260 raise ValueError("':' is forbidden in metadata value'")
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262
262
263 def _fm0decodemeta(data):
263 def _fm0decodemeta(data):
264 """Return string to string dictionary from encoded version."""
264 """Return string to string dictionary from encoded version."""
265 d = {}
265 d = {}
266 for l in data.split('\0'):
266 for l in data.split('\0'):
267 if l:
267 if l:
268 key, value = l.split(':')
268 key, value = l.split(':')
269 d[key] = value
269 d[key] = value
270 return d
270 return d
271
271
272 ## Parsing and writing of version "1"
272 ## Parsing and writing of version "1"
273 #
273 #
274 # The header is followed by the markers. Each marker is made of:
274 # The header is followed by the markers. Each marker is made of:
275 #
275 #
276 # - uint32: total size of the marker (including this field)
276 # - uint32: total size of the marker (including this field)
277 #
277 #
278 # - float64: date in seconds since epoch
278 # - float64: date in seconds since epoch
279 #
279 #
280 # - int16: timezone offset in minutes
280 # - int16: timezone offset in minutes
281 #
281 #
282 # - uint16: a bit field. It is reserved for flags used in common
282 # - uint16: a bit field. It is reserved for flags used in common
283 # obsolete marker operations, to avoid repeated decoding of metadata
283 # obsolete marker operations, to avoid repeated decoding of metadata
284 # entries.
284 # entries.
285 #
285 #
286 # - uint8: number of successors "N", can be zero.
286 # - uint8: number of successors "N", can be zero.
287 #
287 #
288 # - uint8: number of parents "P", can be zero.
288 # - uint8: number of parents "P", can be zero.
289 #
289 #
290 # 0: parents data stored but no parent,
290 # 0: parents data stored but no parent,
291 # 1: one parent stored,
291 # 1: one parent stored,
292 # 2: two parents stored,
292 # 2: two parents stored,
293 # 3: no parent data stored
293 # 3: no parent data stored
294 #
294 #
295 # - uint8: number of metadata entries M
295 # - uint8: number of metadata entries M
296 #
296 #
297 # - 20 or 32 bytes: predecessor changeset identifier.
297 # - 20 or 32 bytes: predecessor changeset identifier.
298 #
298 #
299 # - N*(20 or 32) bytes: successors changesets identifiers.
299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 #
300 #
301 # - P*(20 or 32) bytes: parents of the predecessors changesets.
301 # - P*(20 or 32) bytes: parents of the predecessors changesets.
302 #
302 #
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 #
304 #
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 _fm1version = 1
306 _fm1version = 1
307 _fm1fixed = '>IdhHBBB20s'
307 _fm1fixed = '>IdhHBBB20s'
308 _fm1nodesha1 = '20s'
308 _fm1nodesha1 = '20s'
309 _fm1nodesha256 = '32s'
309 _fm1nodesha256 = '32s'
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 _fm1fsize = _calcsize(_fm1fixed)
312 _fm1fsize = _calcsize(_fm1fixed)
313 _fm1parentnone = 3
313 _fm1parentnone = 3
314 _fm1parentshift = 14
314 _fm1parentshift = 14
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 _fm1metapair = 'BB'
316 _fm1metapair = 'BB'
317 _fm1metapairsize = _calcsize(_fm1metapair)
317 _fm1metapairsize = _calcsize(_fm1metapair)
318
318
319 def _fm1purereadmarkers(data, off, stop):
319 def _fm1purereadmarkers(data, off, stop):
320 # make some global constants local for performance
320 # make some global constants local for performance
321 noneflag = _fm1parentnone
321 noneflag = _fm1parentnone
322 sha2flag = usingsha256
322 sha2flag = usingsha256
323 sha1size = _fm1nodesha1size
323 sha1size = _fm1nodesha1size
324 sha2size = _fm1nodesha256size
324 sha2size = _fm1nodesha256size
325 sha1fmt = _fm1nodesha1
325 sha1fmt = _fm1nodesha1
326 sha2fmt = _fm1nodesha256
326 sha2fmt = _fm1nodesha256
327 metasize = _fm1metapairsize
327 metasize = _fm1metapairsize
328 metafmt = _fm1metapair
328 metafmt = _fm1metapair
329 fsize = _fm1fsize
329 fsize = _fm1fsize
330 unpack = _unpack
330 unpack = _unpack
331
331
332 # Loop on markers
332 # Loop on markers
333 ufixed = struct.Struct(_fm1fixed).unpack
333 ufixed = struct.Struct(_fm1fixed).unpack
334
334
335 while off < stop:
335 while off < stop:
336 # read fixed part
336 # read fixed part
337 o1 = off + fsize
337 o1 = off + fsize
338 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
338 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
339
339
340 if flags & sha2flag:
340 if flags & sha2flag:
341 # FIXME: prec was read as a SHA1, needs to be amended
341 # FIXME: prec was read as a SHA1, needs to be amended
342
342
343 # read 0 or more successors
343 # read 0 or more successors
344 if numsuc == 1:
344 if numsuc == 1:
345 o2 = o1 + sha2size
345 o2 = o1 + sha2size
346 sucs = (data[o1:o2],)
346 sucs = (data[o1:o2],)
347 else:
347 else:
348 o2 = o1 + sha2size * numsuc
348 o2 = o1 + sha2size * numsuc
349 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
349 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
350
350
351 # read parents
351 # read parents
352 if numpar == noneflag:
352 if numpar == noneflag:
353 o3 = o2
353 o3 = o2
354 parents = None
354 parents = None
355 elif numpar == 1:
355 elif numpar == 1:
356 o3 = o2 + sha2size
356 o3 = o2 + sha2size
357 parents = (data[o2:o3],)
357 parents = (data[o2:o3],)
358 else:
358 else:
359 o3 = o2 + sha2size * numpar
359 o3 = o2 + sha2size * numpar
360 parents = unpack(sha2fmt * numpar, data[o2:o3])
360 parents = unpack(sha2fmt * numpar, data[o2:o3])
361 else:
361 else:
362 # read 0 or more successors
362 # read 0 or more successors
363 if numsuc == 1:
363 if numsuc == 1:
364 o2 = o1 + sha1size
364 o2 = o1 + sha1size
365 sucs = (data[o1:o2],)
365 sucs = (data[o1:o2],)
366 else:
366 else:
367 o2 = o1 + sha1size * numsuc
367 o2 = o1 + sha1size * numsuc
368 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
368 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
369
369
370 # read parents
370 # read parents
371 if numpar == noneflag:
371 if numpar == noneflag:
372 o3 = o2
372 o3 = o2
373 parents = None
373 parents = None
374 elif numpar == 1:
374 elif numpar == 1:
375 o3 = o2 + sha1size
375 o3 = o2 + sha1size
376 parents = (data[o2:o3],)
376 parents = (data[o2:o3],)
377 else:
377 else:
378 o3 = o2 + sha1size * numpar
378 o3 = o2 + sha1size * numpar
379 parents = unpack(sha1fmt * numpar, data[o2:o3])
379 parents = unpack(sha1fmt * numpar, data[o2:o3])
380
380
381 # read metadata
381 # read metadata
382 off = o3 + metasize * nummeta
382 off = o3 + metasize * nummeta
383 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
383 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
384 metadata = []
384 metadata = []
385 for idx in xrange(0, len(metapairsize), 2):
385 for idx in xrange(0, len(metapairsize), 2):
386 o1 = off + metapairsize[idx]
386 o1 = off + metapairsize[idx]
387 o2 = o1 + metapairsize[idx + 1]
387 o2 = o1 + metapairsize[idx + 1]
388 metadata.append((data[off:o1], data[o1:o2]))
388 metadata.append((data[off:o1], data[o1:o2]))
389 off = o2
389 off = o2
390
390
391 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
391 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
392
392
393 def _fm1encodeonemarker(marker):
393 def _fm1encodeonemarker(marker):
394 pre, sucs, flags, metadata, date, parents = marker
394 pre, sucs, flags, metadata, date, parents = marker
395 # determine node size
395 # determine node size
396 _fm1node = _fm1nodesha1
396 _fm1node = _fm1nodesha1
397 if flags & usingsha256:
397 if flags & usingsha256:
398 _fm1node = _fm1nodesha256
398 _fm1node = _fm1nodesha256
399 numsuc = len(sucs)
399 numsuc = len(sucs)
400 numextranodes = numsuc
400 numextranodes = numsuc
401 if parents is None:
401 if parents is None:
402 numpar = _fm1parentnone
402 numpar = _fm1parentnone
403 else:
403 else:
404 numpar = len(parents)
404 numpar = len(parents)
405 numextranodes += numpar
405 numextranodes += numpar
406 formatnodes = _fm1node * numextranodes
406 formatnodes = _fm1node * numextranodes
407 formatmeta = _fm1metapair * len(metadata)
407 formatmeta = _fm1metapair * len(metadata)
408 format = _fm1fixed + formatnodes + formatmeta
408 format = _fm1fixed + formatnodes + formatmeta
409 # tz is stored in minutes so we divide by 60
409 # tz is stored in minutes so we divide by 60
410 tz = date[1]//60
410 tz = date[1]//60
411 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
411 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
412 data.extend(sucs)
412 data.extend(sucs)
413 if parents is not None:
413 if parents is not None:
414 data.extend(parents)
414 data.extend(parents)
415 totalsize = _calcsize(format)
415 totalsize = _calcsize(format)
416 for key, value in metadata:
416 for key, value in metadata:
417 lk = len(key)
417 lk = len(key)
418 lv = len(value)
418 lv = len(value)
419 if lk > 255:
419 if lk > 255:
420 msg = ('obsstore metadata key cannot be longer than 255 bytes'
420 msg = ('obsstore metadata key cannot be longer than 255 bytes'
421 ' (key "%s" is %u bytes)') % (key, lk)
421 ' (key "%s" is %u bytes)') % (key, lk)
422 raise error.ProgrammingError(msg)
422 raise error.ProgrammingError(msg)
423 if lv > 255:
423 if lv > 255:
424 msg = ('obsstore metadata value cannot be longer than 255 bytes'
424 msg = ('obsstore metadata value cannot be longer than 255 bytes'
425 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
425 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
426 raise error.ProgrammingError(msg)
426 raise error.ProgrammingError(msg)
427 data.append(lk)
427 data.append(lk)
428 data.append(lv)
428 data.append(lv)
429 totalsize += lk + lv
429 totalsize += lk + lv
430 data[0] = totalsize
430 data[0] = totalsize
431 data = [_pack(format, *data)]
431 data = [_pack(format, *data)]
432 for key, value in metadata:
432 for key, value in metadata:
433 data.append(key)
433 data.append(key)
434 data.append(value)
434 data.append(value)
435 return ''.join(data)
435 return ''.join(data)
436
436
437 def _fm1readmarkers(data, off, stop):
437 def _fm1readmarkers(data, off, stop):
438 native = getattr(parsers, 'fm1readmarkers', None)
438 native = getattr(parsers, 'fm1readmarkers', None)
439 if not native:
439 if not native:
440 return _fm1purereadmarkers(data, off, stop)
440 return _fm1purereadmarkers(data, off, stop)
441 return native(data, off, stop)
441 return native(data, off, stop)
442
442
443 # mapping to read/write various marker formats
443 # mapping to read/write various marker formats
444 # <version> -> (decoder, encoder)
444 # <version> -> (decoder, encoder)
445 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
445 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
446 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
446 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
447
447
448 def _readmarkerversion(data):
448 def _readmarkerversion(data):
449 return _unpack('>B', data[0:1])[0]
449 return _unpack('>B', data[0:1])[0]
450
450
451 @util.nogc
451 @util.nogc
452 def _readmarkers(data, off=None, stop=None):
452 def _readmarkers(data, off=None, stop=None):
453 """Read and enumerate markers from raw data"""
453 """Read and enumerate markers from raw data"""
454 diskversion = _readmarkerversion(data)
454 diskversion = _readmarkerversion(data)
455 if not off:
455 if not off:
456 off = 1 # skip 1 byte version number
456 off = 1 # skip 1 byte version number
457 if stop is None:
457 if stop is None:
458 stop = len(data)
458 stop = len(data)
459 if diskversion not in formats:
459 if diskversion not in formats:
460 msg = _('parsing obsolete marker: unknown version %r') % diskversion
460 msg = _('parsing obsolete marker: unknown version %r') % diskversion
461 raise error.UnknownVersion(msg, version=diskversion)
461 raise error.UnknownVersion(msg, version=diskversion)
462 return diskversion, formats[diskversion][0](data, off, stop)
462 return diskversion, formats[diskversion][0](data, off, stop)
463
463
464 def encodeheader(version=_fm0version):
464 def encodeheader(version=_fm0version):
465 return _pack('>B', version)
465 return _pack('>B', version)
466
466
467 def encodemarkers(markers, addheader=False, version=_fm0version):
467 def encodemarkers(markers, addheader=False, version=_fm0version):
468 # Kept separate from flushmarkers(), it will be reused for
468 # Kept separate from flushmarkers(), it will be reused for
469 # markers exchange.
469 # markers exchange.
470 encodeone = formats[version][1]
470 encodeone = formats[version][1]
471 if addheader:
471 if addheader:
472 yield encodeheader(version)
472 yield encodeheader(version)
473 for marker in markers:
473 for marker in markers:
474 yield encodeone(marker)
474 yield encodeone(marker)
475
475
476 @util.nogc
476 @util.nogc
477 def _addsuccessors(successors, markers):
477 def _addsuccessors(successors, markers):
478 for mark in markers:
478 for mark in markers:
479 successors.setdefault(mark[0], set()).add(mark)
479 successors.setdefault(mark[0], set()).add(mark)
480
480
481 def _addprecursors(*args, **kwargs):
481 def _addprecursors(*args, **kwargs):
482 msg = ("'obsolete._addprecursors' is deprecated, "
482 msg = ("'obsolete._addprecursors' is deprecated, "
483 "use 'obsolete._addpredecessors'")
483 "use 'obsolete._addpredecessors'")
484 util.nouideprecwarn(msg, '4.4')
484 util.nouideprecwarn(msg, '4.4')
485
485
486 return _addpredecessors(*args, **kwargs)
486 return _addpredecessors(*args, **kwargs)
487
487
488 @util.nogc
488 @util.nogc
489 def _addpredecessors(predecessors, markers):
489 def _addpredecessors(predecessors, markers):
490 for mark in markers:
490 for mark in markers:
491 for suc in mark[1]:
491 for suc in mark[1]:
492 predecessors.setdefault(suc, set()).add(mark)
492 predecessors.setdefault(suc, set()).add(mark)
493
493
494 @util.nogc
494 @util.nogc
495 def _addchildren(children, markers):
495 def _addchildren(children, markers):
496 for mark in markers:
496 for mark in markers:
497 parents = mark[5]
497 parents = mark[5]
498 if parents is not None:
498 if parents is not None:
499 for p in parents:
499 for p in parents:
500 children.setdefault(p, set()).add(mark)
500 children.setdefault(p, set()).add(mark)
501
501
502 def _checkinvalidmarkers(markers):
502 def _checkinvalidmarkers(markers):
503 """search for marker with invalid data and raise error if needed
503 """search for marker with invalid data and raise error if needed
504
504
505 Exist as a separated function to allow the evolve extension for a more
505 Exist as a separated function to allow the evolve extension for a more
506 subtle handling.
506 subtle handling.
507 """
507 """
508 for mark in markers:
508 for mark in markers:
509 if node.nullid in mark[1]:
509 if node.nullid in mark[1]:
510 raise error.Abort(_('bad obsolescence marker detected: '
510 raise error.Abort(_('bad obsolescence marker detected: '
511 'invalid successors nullid'))
511 'invalid successors nullid'))
512
512
513 class obsstore(object):
513 class obsstore(object):
514 """Store obsolete markers
514 """Store obsolete markers
515
515
516 Markers can be accessed with two mappings:
516 Markers can be accessed with two mappings:
517 - predecessors[x] -> set(markers on predecessors edges of x)
517 - predecessors[x] -> set(markers on predecessors edges of x)
518 - successors[x] -> set(markers on successors edges of x)
518 - successors[x] -> set(markers on successors edges of x)
519 - children[x] -> set(markers on predecessors edges of children(x)
519 - children[x] -> set(markers on predecessors edges of children(x)
520 """
520 """
521
521
522 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
522 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
523 # prec: nodeid, predecessors changesets
523 # prec: nodeid, predecessors changesets
524 # succs: tuple of nodeid, successor changesets (0-N length)
524 # succs: tuple of nodeid, successor changesets (0-N length)
525 # flag: integer, flag field carrying modifier for the markers (see doc)
525 # flag: integer, flag field carrying modifier for the markers (see doc)
526 # meta: binary blob, encoded metadata dictionary
526 # meta: binary blob, encoded metadata dictionary
527 # date: (float, int) tuple, date of marker creation
527 # date: (float, int) tuple, date of marker creation
528 # parents: (tuple of nodeid) or None, parents of predecessors
528 # parents: (tuple of nodeid) or None, parents of predecessors
529 # None is used when no data has been recorded
529 # None is used when no data has been recorded
530
530
531 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
531 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
532 # caches for various obsolescence related cache
532 # caches for various obsolescence related cache
533 self.caches = {}
533 self.caches = {}
534 self.svfs = svfs
534 self.svfs = svfs
535 self._defaultformat = defaultformat
535 self._defaultformat = defaultformat
536 self._readonly = readonly
536 self._readonly = readonly
537
537
538 def __iter__(self):
538 def __iter__(self):
539 return iter(self._all)
539 return iter(self._all)
540
540
541 def __len__(self):
541 def __len__(self):
542 return len(self._all)
542 return len(self._all)
543
543
544 def __nonzero__(self):
544 def __nonzero__(self):
545 if not self._cached('_all'):
545 if not self._cached('_all'):
546 try:
546 try:
547 return self.svfs.stat('obsstore').st_size > 1
547 return self.svfs.stat('obsstore').st_size > 1
548 except OSError as inst:
548 except OSError as inst:
549 if inst.errno != errno.ENOENT:
549 if inst.errno != errno.ENOENT:
550 raise
550 raise
551 # just build an empty _all list if no obsstore exists, which
551 # just build an empty _all list if no obsstore exists, which
552 # avoids further stat() syscalls
552 # avoids further stat() syscalls
553 return bool(self._all)
553 return bool(self._all)
554
554
555 __bool__ = __nonzero__
555 __bool__ = __nonzero__
556
556
557 @property
557 @property
558 def readonly(self):
558 def readonly(self):
559 """True if marker creation is disabled
559 """True if marker creation is disabled
560
560
561 Remove me in the future when obsolete marker is always on."""
561 Remove me in the future when obsolete marker is always on."""
562 return self._readonly
562 return self._readonly
563
563
564 def create(self, transaction, prec, succs=(), flag=0, parents=None,
564 def create(self, transaction, prec, succs=(), flag=0, parents=None,
565 date=None, metadata=None, ui=None):
565 date=None, metadata=None, ui=None):
566 """obsolete: add a new obsolete marker
566 """obsolete: add a new obsolete marker
567
567
568 * ensuring it is hashable
568 * ensuring it is hashable
569 * check mandatory metadata
569 * check mandatory metadata
570 * encode metadata
570 * encode metadata
571
571
572 If you are a human writing code creating marker you want to use the
572 If you are a human writing code creating marker you want to use the
573 `createmarkers` function in this module instead.
573 `createmarkers` function in this module instead.
574
574
575 return True if a new marker have been added, False if the markers
575 return True if a new marker have been added, False if the markers
576 already existed (no op).
576 already existed (no op).
577 """
577 """
578 if metadata is None:
578 if metadata is None:
579 metadata = {}
579 metadata = {}
580 if date is None:
580 if date is None:
581 if 'date' in metadata:
581 if 'date' in metadata:
582 # as a courtesy for out-of-tree extensions
582 # as a courtesy for out-of-tree extensions
583 date = util.parsedate(metadata.pop('date'))
583 date = util.parsedate(metadata.pop('date'))
584 elif ui is not None:
584 elif ui is not None:
585 date = ui.configdate('devel', 'default-date')
585 date = ui.configdate('devel', 'default-date')
586 if date is None:
586 if date is None:
587 date = util.makedate()
587 date = util.makedate()
588 else:
588 else:
589 date = util.makedate()
589 date = util.makedate()
590 if len(prec) != 20:
590 if len(prec) != 20:
591 raise ValueError(prec)
591 raise ValueError(prec)
592 for succ in succs:
592 for succ in succs:
593 if len(succ) != 20:
593 if len(succ) != 20:
594 raise ValueError(succ)
594 raise ValueError(succ)
595 if prec in succs:
595 if prec in succs:
596 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
596 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
597
597
598 metadata = tuple(sorted(metadata.iteritems()))
598 metadata = tuple(sorted(metadata.iteritems()))
599
599
600 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
600 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
601 return bool(self.add(transaction, [marker]))
601 return bool(self.add(transaction, [marker]))
602
602
603 def add(self, transaction, markers):
603 def add(self, transaction, markers):
604 """Add new markers to the store
604 """Add new markers to the store
605
605
606 Take care of filtering duplicate.
606 Take care of filtering duplicate.
607 Return the number of new marker."""
607 Return the number of new marker."""
608 if self._readonly:
608 if self._readonly:
609 raise error.Abort(_('creating obsolete markers is not enabled on '
609 raise error.Abort(_('creating obsolete markers is not enabled on '
610 'this repo'))
610 'this repo'))
611 known = set()
611 known = set()
612 getsuccessors = self.successors.get
612 getsuccessors = self.successors.get
613 new = []
613 new = []
614 for m in markers:
614 for m in markers:
615 if m not in getsuccessors(m[0], ()) and m not in known:
615 if m not in getsuccessors(m[0], ()) and m not in known:
616 known.add(m)
616 known.add(m)
617 new.append(m)
617 new.append(m)
618 if new:
618 if new:
619 f = self.svfs('obsstore', 'ab')
619 f = self.svfs('obsstore', 'ab')
620 try:
620 try:
621 offset = f.tell()
621 offset = f.tell()
622 transaction.add('obsstore', offset)
622 transaction.add('obsstore', offset)
623 # offset == 0: new file - add the version header
623 # offset == 0: new file - add the version header
624 data = b''.join(encodemarkers(new, offset == 0, self._version))
624 data = b''.join(encodemarkers(new, offset == 0, self._version))
625 f.write(data)
625 f.write(data)
626 finally:
626 finally:
627 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
627 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
628 # call 'filecacheentry.refresh()' here
628 # call 'filecacheentry.refresh()' here
629 f.close()
629 f.close()
630 addedmarkers = transaction.changes.get('obsmarkers')
630 addedmarkers = transaction.changes.get('obsmarkers')
631 if addedmarkers is not None:
631 if addedmarkers is not None:
632 addedmarkers.update(new)
632 addedmarkers.update(new)
633 self._addmarkers(new, data)
633 self._addmarkers(new, data)
634 # new marker *may* have changed several set. invalidate the cache.
634 # new marker *may* have changed several set. invalidate the cache.
635 self.caches.clear()
635 self.caches.clear()
636 # records the number of new markers for the transaction hooks
636 # records the number of new markers for the transaction hooks
637 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
637 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
638 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
638 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
639 return len(new)
639 return len(new)
640
640
641 def mergemarkers(self, transaction, data):
641 def mergemarkers(self, transaction, data):
642 """merge a binary stream of markers inside the obsstore
642 """merge a binary stream of markers inside the obsstore
643
643
644 Returns the number of new markers added."""
644 Returns the number of new markers added."""
645 version, markers = _readmarkers(data)
645 version, markers = _readmarkers(data)
646 return self.add(transaction, markers)
646 return self.add(transaction, markers)
647
647
648 @propertycache
648 @propertycache
649 def _data(self):
649 def _data(self):
650 return self.svfs.tryread('obsstore')
650 return self.svfs.tryread('obsstore')
651
651
652 @propertycache
652 @propertycache
653 def _version(self):
653 def _version(self):
654 if len(self._data) >= 1:
654 if len(self._data) >= 1:
655 return _readmarkerversion(self._data)
655 return _readmarkerversion(self._data)
656 else:
656 else:
657 return self._defaultformat
657 return self._defaultformat
658
658
659 @propertycache
659 @propertycache
660 def _all(self):
660 def _all(self):
661 data = self._data
661 data = self._data
662 if not data:
662 if not data:
663 return []
663 return []
664 self._version, markers = _readmarkers(data)
664 self._version, markers = _readmarkers(data)
665 markers = list(markers)
665 markers = list(markers)
666 _checkinvalidmarkers(markers)
666 _checkinvalidmarkers(markers)
667 return markers
667 return markers
668
668
669 @propertycache
669 @propertycache
670 def successors(self):
670 def successors(self):
671 successors = {}
671 successors = {}
672 _addsuccessors(successors, self._all)
672 _addsuccessors(successors, self._all)
673 return successors
673 return successors
674
674
675 @property
675 @property
676 def precursors(self):
676 def precursors(self):
677 msg = ("'obsstore.precursors' is deprecated, "
677 msg = ("'obsstore.precursors' is deprecated, "
678 "use 'obsstore.predecessors'")
678 "use 'obsstore.predecessors'")
679 util.nouideprecwarn(msg, '4.4')
679 util.nouideprecwarn(msg, '4.4')
680
680
681 return self.predecessors
681 return self.predecessors
682
682
683 @propertycache
683 @propertycache
684 def predecessors(self):
684 def predecessors(self):
685 predecessors = {}
685 predecessors = {}
686 _addpredecessors(predecessors, self._all)
686 _addpredecessors(predecessors, self._all)
687 return predecessors
687 return predecessors
688
688
689 @propertycache
689 @propertycache
690 def children(self):
690 def children(self):
691 children = {}
691 children = {}
692 _addchildren(children, self._all)
692 _addchildren(children, self._all)
693 return children
693 return children
694
694
695 def _cached(self, attr):
695 def _cached(self, attr):
696 return attr in self.__dict__
696 return attr in self.__dict__
697
697
698 def _addmarkers(self, markers, rawdata):
698 def _addmarkers(self, markers, rawdata):
699 markers = list(markers) # to allow repeated iteration
699 markers = list(markers) # to allow repeated iteration
700 self._data = self._data + rawdata
700 self._data = self._data + rawdata
701 self._all.extend(markers)
701 self._all.extend(markers)
702 if self._cached('successors'):
702 if self._cached('successors'):
703 _addsuccessors(self.successors, markers)
703 _addsuccessors(self.successors, markers)
704 if self._cached('predecessors'):
704 if self._cached('predecessors'):
705 _addpredecessors(self.predecessors, markers)
705 _addpredecessors(self.predecessors, markers)
706 if self._cached('children'):
706 if self._cached('children'):
707 _addchildren(self.children, markers)
707 _addchildren(self.children, markers)
708 _checkinvalidmarkers(markers)
708 _checkinvalidmarkers(markers)
709
709
710 def relevantmarkers(self, nodes):
710 def relevantmarkers(self, nodes):
711 """return a set of all obsolescence markers relevant to a set of nodes.
711 """return a set of all obsolescence markers relevant to a set of nodes.
712
712
713 "relevant" to a set of nodes mean:
713 "relevant" to a set of nodes mean:
714
714
715 - marker that use this changeset as successor
715 - marker that use this changeset as successor
716 - prune marker of direct children on this changeset
716 - prune marker of direct children on this changeset
717 - recursive application of the two rules on predecessors of these
717 - recursive application of the two rules on predecessors of these
718 markers
718 markers
719
719
720 It is a set so you cannot rely on order."""
720 It is a set so you cannot rely on order."""
721
721
722 pendingnodes = set(nodes)
722 pendingnodes = set(nodes)
723 seenmarkers = set()
723 seenmarkers = set()
724 seennodes = set(pendingnodes)
724 seennodes = set(pendingnodes)
725 precursorsmarkers = self.predecessors
725 precursorsmarkers = self.predecessors
726 succsmarkers = self.successors
726 succsmarkers = self.successors
727 children = self.children
727 children = self.children
728 while pendingnodes:
728 while pendingnodes:
729 direct = set()
729 direct = set()
730 for current in pendingnodes:
730 for current in pendingnodes:
731 direct.update(precursorsmarkers.get(current, ()))
731 direct.update(precursorsmarkers.get(current, ()))
732 pruned = [m for m in children.get(current, ()) if not m[1]]
732 pruned = [m for m in children.get(current, ()) if not m[1]]
733 direct.update(pruned)
733 direct.update(pruned)
734 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
734 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
735 direct.update(pruned)
735 direct.update(pruned)
736 direct -= seenmarkers
736 direct -= seenmarkers
737 pendingnodes = set([m[0] for m in direct])
737 pendingnodes = set([m[0] for m in direct])
738 seenmarkers |= direct
738 seenmarkers |= direct
739 pendingnodes -= seennodes
739 pendingnodes -= seennodes
740 seennodes |= pendingnodes
740 seennodes |= pendingnodes
741 return seenmarkers
741 return seenmarkers
742
742
743 def makestore(ui, repo):
743 def makestore(ui, repo):
744 """Create an obsstore instance from a repo."""
744 """Create an obsstore instance from a repo."""
745 # read default format for new obsstore.
745 # read default format for new obsstore.
746 # developer config: format.obsstore-version
746 # developer config: format.obsstore-version
747 defaultformat = ui.configint('format', 'obsstore-version')
747 defaultformat = ui.configint('format', 'obsstore-version')
748 # rely on obsstore class default when possible.
748 # rely on obsstore class default when possible.
749 kwargs = {}
749 kwargs = {}
750 if defaultformat is not None:
750 if defaultformat is not None:
751 kwargs['defaultformat'] = defaultformat
751 kwargs['defaultformat'] = defaultformat
752 readonly = not isenabled(repo, createmarkersopt)
752 readonly = not isenabled(repo, createmarkersopt)
753 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
753 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
754 if store and readonly:
754 if store and readonly:
755 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
755 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
756 % len(list(store)))
756 % len(list(store)))
757 return store
757 return store
758
758
759 def commonversion(versions):
759 def commonversion(versions):
760 """Return the newest version listed in both versions and our local formats.
760 """Return the newest version listed in both versions and our local formats.
761
761
762 Returns None if no common version exists.
762 Returns None if no common version exists.
763 """
763 """
764 versions.sort(reverse=True)
764 versions.sort(reverse=True)
765 # search for highest version known on both side
765 # search for highest version known on both side
766 for v in versions:
766 for v in versions:
767 if v in formats:
767 if v in formats:
768 return v
768 return v
769 return None
769 return None
770
770
771 # arbitrary picked to fit into 8K limit from HTTP server
771 # arbitrary picked to fit into 8K limit from HTTP server
772 # you have to take in account:
772 # you have to take in account:
773 # - the version header
773 # - the version header
774 # - the base85 encoding
774 # - the base85 encoding
775 _maxpayload = 5300
775 _maxpayload = 5300
776
776
777 def _pushkeyescape(markers):
777 def _pushkeyescape(markers):
778 """encode markers into a dict suitable for pushkey exchange
778 """encode markers into a dict suitable for pushkey exchange
779
779
780 - binary data is base85 encoded
780 - binary data is base85 encoded
781 - split in chunks smaller than 5300 bytes"""
781 - split in chunks smaller than 5300 bytes"""
782 keys = {}
782 keys = {}
783 parts = []
783 parts = []
784 currentlen = _maxpayload * 2 # ensure we create a new part
784 currentlen = _maxpayload * 2 # ensure we create a new part
785 for marker in markers:
785 for marker in markers:
786 nextdata = _fm0encodeonemarker(marker)
786 nextdata = _fm0encodeonemarker(marker)
787 if (len(nextdata) + currentlen > _maxpayload):
787 if (len(nextdata) + currentlen > _maxpayload):
788 currentpart = []
788 currentpart = []
789 currentlen = 0
789 currentlen = 0
790 parts.append(currentpart)
790 parts.append(currentpart)
791 currentpart.append(nextdata)
791 currentpart.append(nextdata)
792 currentlen += len(nextdata)
792 currentlen += len(nextdata)
793 for idx, part in enumerate(reversed(parts)):
793 for idx, part in enumerate(reversed(parts)):
794 data = ''.join([_pack('>B', _fm0version)] + part)
794 data = ''.join([_pack('>B', _fm0version)] + part)
795 keys['dump%i' % idx] = util.b85encode(data)
795 keys['dump%i' % idx] = util.b85encode(data)
796 return keys
796 return keys
797
797
798 def listmarkers(repo):
798 def listmarkers(repo):
799 """List markers over pushkey"""
799 """List markers over pushkey"""
800 if not repo.obsstore:
800 if not repo.obsstore:
801 return {}
801 return {}
802 return _pushkeyescape(sorted(repo.obsstore))
802 return _pushkeyescape(sorted(repo.obsstore))
803
803
804 def pushmarker(repo, key, old, new):
804 def pushmarker(repo, key, old, new):
805 """Push markers over pushkey"""
805 """Push markers over pushkey"""
806 if not key.startswith('dump'):
806 if not key.startswith('dump'):
807 repo.ui.warn(_('unknown key: %r') % key)
807 repo.ui.warn(_('unknown key: %r') % key)
808 return False
808 return False
809 if old:
809 if old:
810 repo.ui.warn(_('unexpected old value for %r') % key)
810 repo.ui.warn(_('unexpected old value for %r') % key)
811 return False
811 return False
812 data = util.b85decode(new)
812 data = util.b85decode(new)
813 lock = repo.lock()
813 lock = repo.lock()
814 try:
814 try:
815 tr = repo.transaction('pushkey: obsolete markers')
815 tr = repo.transaction('pushkey: obsolete markers')
816 try:
816 try:
817 repo.obsstore.mergemarkers(tr, data)
817 repo.obsstore.mergemarkers(tr, data)
818 repo.invalidatevolatilesets()
818 repo.invalidatevolatilesets()
819 tr.close()
819 tr.close()
820 return True
820 return True
821 finally:
821 finally:
822 tr.release()
822 tr.release()
823 finally:
823 finally:
824 lock.release()
824 lock.release()
825
825
826 # keep compatibility for the 4.3 cycle
826 # keep compatibility for the 4.3 cycle
827 def allprecursors(obsstore, nodes, ignoreflags=0):
827 def allprecursors(obsstore, nodes, ignoreflags=0):
828 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
828 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
829 util.nouideprecwarn(movemsg, '4.3')
829 util.nouideprecwarn(movemsg, '4.3')
830 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
830 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
831
831
832 def allsuccessors(obsstore, nodes, ignoreflags=0):
832 def allsuccessors(obsstore, nodes, ignoreflags=0):
833 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
833 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
834 util.nouideprecwarn(movemsg, '4.3')
834 util.nouideprecwarn(movemsg, '4.3')
835 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
835 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
836
836
837 def marker(repo, data):
837 def marker(repo, data):
838 movemsg = 'obsolete.marker moved to obsutil.marker'
838 movemsg = 'obsolete.marker moved to obsutil.marker'
839 repo.ui.deprecwarn(movemsg, '4.3')
839 repo.ui.deprecwarn(movemsg, '4.3')
840 return obsutil.marker(repo, data)
840 return obsutil.marker(repo, data)
841
841
842 def getmarkers(repo, nodes=None, exclusive=False):
842 def getmarkers(repo, nodes=None, exclusive=False):
843 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
843 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
844 repo.ui.deprecwarn(movemsg, '4.3')
844 repo.ui.deprecwarn(movemsg, '4.3')
845 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
845 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
846
846
847 def exclusivemarkers(repo, nodes):
847 def exclusivemarkers(repo, nodes):
848 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
848 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
849 repo.ui.deprecwarn(movemsg, '4.3')
849 repo.ui.deprecwarn(movemsg, '4.3')
850 return obsutil.exclusivemarkers(repo, nodes)
850 return obsutil.exclusivemarkers(repo, nodes)
851
851
852 def foreground(repo, nodes):
852 def foreground(repo, nodes):
853 movemsg = 'obsolete.foreground moved to obsutil.foreground'
853 movemsg = 'obsolete.foreground moved to obsutil.foreground'
854 repo.ui.deprecwarn(movemsg, '4.3')
854 repo.ui.deprecwarn(movemsg, '4.3')
855 return obsutil.foreground(repo, nodes)
855 return obsutil.foreground(repo, nodes)
856
856
857 def successorssets(repo, initialnode, cache=None):
857 def successorssets(repo, initialnode, cache=None):
858 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
858 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
859 repo.ui.deprecwarn(movemsg, '4.3')
859 repo.ui.deprecwarn(movemsg, '4.3')
860 return obsutil.successorssets(repo, initialnode, cache=cache)
860 return obsutil.successorssets(repo, initialnode, cache=cache)
861
861
862 # mapping of 'set-name' -> <function to compute this set>
862 # mapping of 'set-name' -> <function to compute this set>
863 cachefuncs = {}
863 cachefuncs = {}
864 def cachefor(name):
864 def cachefor(name):
865 """Decorator to register a function as computing the cache for a set"""
865 """Decorator to register a function as computing the cache for a set"""
866 def decorator(func):
866 def decorator(func):
867 if name in cachefuncs:
867 if name in cachefuncs:
868 msg = "duplicated registration for volatileset '%s' (existing: %r)"
868 msg = "duplicated registration for volatileset '%s' (existing: %r)"
869 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
869 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
870 cachefuncs[name] = func
870 cachefuncs[name] = func
871 return func
871 return func
872 return decorator
872 return decorator
873
873
874 def getrevs(repo, name):
874 def getrevs(repo, name):
875 """Return the set of revision that belong to the <name> set
875 """Return the set of revision that belong to the <name> set
876
876
877 Such access may compute the set and cache it for future use"""
877 Such access may compute the set and cache it for future use"""
878 repo = repo.unfiltered()
878 repo = repo.unfiltered()
879 if not repo.obsstore:
879 if not repo.obsstore:
880 return frozenset()
880 return frozenset()
881 if name not in repo.obsstore.caches:
881 if name not in repo.obsstore.caches:
882 repo.obsstore.caches[name] = cachefuncs[name](repo)
882 repo.obsstore.caches[name] = cachefuncs[name](repo)
883 return repo.obsstore.caches[name]
883 return repo.obsstore.caches[name]
884
884
885 # To be simple we need to invalidate obsolescence cache when:
885 # To be simple we need to invalidate obsolescence cache when:
886 #
886 #
887 # - new changeset is added:
887 # - new changeset is added:
888 # - public phase is changed
888 # - public phase is changed
889 # - obsolescence marker are added
889 # - obsolescence marker are added
890 # - strip is used a repo
890 # - strip is used a repo
891 def clearobscaches(repo):
891 def clearobscaches(repo):
892 """Remove all obsolescence related cache from a repo
892 """Remove all obsolescence related cache from a repo
893
893
894 This remove all cache in obsstore is the obsstore already exist on the
894 This remove all cache in obsstore is the obsstore already exist on the
895 repo.
895 repo.
896
896
897 (We could be smarter here given the exact event that trigger the cache
897 (We could be smarter here given the exact event that trigger the cache
898 clearing)"""
898 clearing)"""
899 # only clear cache is there is obsstore data in this repo
899 # only clear cache is there is obsstore data in this repo
900 if 'obsstore' in repo._filecache:
900 if 'obsstore' in repo._filecache:
901 repo.obsstore.caches.clear()
901 repo.obsstore.caches.clear()
902
902
903 def _mutablerevs(repo):
903 def _mutablerevs(repo):
904 """the set of mutable revision in the repository"""
904 """the set of mutable revision in the repository"""
905 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
905 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
906
906
907 @cachefor('obsolete')
907 @cachefor('obsolete')
908 def _computeobsoleteset(repo):
908 def _computeobsoleteset(repo):
909 """the set of obsolete revisions"""
909 """the set of obsolete revisions"""
910 getnode = repo.changelog.node
910 getnode = repo.changelog.node
911 notpublic = _mutablerevs(repo)
911 notpublic = _mutablerevs(repo)
912 isobs = repo.obsstore.successors.__contains__
912 isobs = repo.obsstore.successors.__contains__
913 obs = set(r for r in notpublic if isobs(getnode(r)))
913 obs = set(r for r in notpublic if isobs(getnode(r)))
914 return obs
914 return obs
915
915
916 @cachefor('unstable')
916 @cachefor('unstable')
917 def _computeunstableset(repo):
917 def _computeunstableset(repo):
918 msg = ("'unstable' volatile set is deprecated, "
918 msg = ("'unstable' volatile set is deprecated, "
919 "use 'orphan'")
919 "use 'orphan'")
920 repo.ui.deprecwarn(msg, '4.4')
920 repo.ui.deprecwarn(msg, '4.4')
921
921
922 return _computeorphanset(repo)
922 return _computeorphanset(repo)
923
923
924 @cachefor('orphan')
924 @cachefor('orphan')
925 def _computeorphanset(repo):
925 def _computeorphanset(repo):
926 """the set of non obsolete revisions with obsolete parents"""
926 """the set of non obsolete revisions with obsolete parents"""
927 pfunc = repo.changelog.parentrevs
927 pfunc = repo.changelog.parentrevs
928 mutable = _mutablerevs(repo)
928 mutable = _mutablerevs(repo)
929 obsolete = getrevs(repo, 'obsolete')
929 obsolete = getrevs(repo, 'obsolete')
930 others = mutable - obsolete
930 others = mutable - obsolete
931 unstable = set()
931 unstable = set()
932 for r in sorted(others):
932 for r in sorted(others):
933 # A rev is unstable if one of its parent is obsolete or unstable
933 # A rev is unstable if one of its parent is obsolete or unstable
934 # this works since we traverse following growing rev order
934 # this works since we traverse following growing rev order
935 for p in pfunc(r):
935 for p in pfunc(r):
936 if p in obsolete or p in unstable:
936 if p in obsolete or p in unstable:
937 unstable.add(r)
937 unstable.add(r)
938 break
938 break
939 return unstable
939 return unstable
940
940
941 @cachefor('suspended')
941 @cachefor('suspended')
942 def _computesuspendedset(repo):
942 def _computesuspendedset(repo):
943 """the set of obsolete parents with non obsolete descendants"""
943 """the set of obsolete parents with non obsolete descendants"""
944 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
944 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
945 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
945 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
946
946
947 @cachefor('extinct')
947 @cachefor('extinct')
948 def _computeextinctset(repo):
948 def _computeextinctset(repo):
949 """the set of obsolete parents without non obsolete descendants"""
949 """the set of obsolete parents without non obsolete descendants"""
950 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
950 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
951
951
952 @cachefor('bumped')
952 @cachefor('bumped')
953 def _computebumpedset(repo):
953 def _computebumpedset(repo):
954 msg = ("'bumped' volatile set is deprecated, "
954 msg = ("'bumped' volatile set is deprecated, "
955 "use 'phasedivergent'")
955 "use 'phasedivergent'")
956 repo.ui.deprecwarn(msg, '4.4')
956 repo.ui.deprecwarn(msg, '4.4')
957
957
958 return _computephasedivergentset(repo)
958 return _computephasedivergentset(repo)
959
959
960 @cachefor('phasedivergent')
960 @cachefor('phasedivergent')
961 def _computephasedivergentset(repo):
961 def _computephasedivergentset(repo):
962 """the set of revs trying to obsolete public revisions"""
962 """the set of revs trying to obsolete public revisions"""
963 bumped = set()
963 bumped = set()
964 # util function (avoid attribute lookup in the loop)
964 # util function (avoid attribute lookup in the loop)
965 phase = repo._phasecache.phase # would be faster to grab the full list
965 phase = repo._phasecache.phase # would be faster to grab the full list
966 public = phases.public
966 public = phases.public
967 cl = repo.changelog
967 cl = repo.changelog
968 torev = cl.nodemap.get
968 torev = cl.nodemap.get
969 for ctx in repo.set('(not public()) and (not obsolete())'):
969 for ctx in repo.set('(not public()) and (not obsolete())'):
970 rev = ctx.rev()
970 rev = ctx.rev()
971 # We only evaluate mutable, non-obsolete revision
971 # We only evaluate mutable, non-obsolete revision
972 node = ctx.node()
972 node = ctx.node()
973 # (future) A cache of predecessors may worth if split is very common
973 # (future) A cache of predecessors may worth if split is very common
974 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
974 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
975 ignoreflags=bumpedfix):
975 ignoreflags=bumpedfix):
976 prev = torev(pnode) # unfiltered! but so is phasecache
976 prev = torev(pnode) # unfiltered! but so is phasecache
977 if (prev is not None) and (phase(repo, prev) <= public):
977 if (prev is not None) and (phase(repo, prev) <= public):
978 # we have a public predecessor
978 # we have a public predecessor
979 bumped.add(rev)
979 bumped.add(rev)
980 break # Next draft!
980 break # Next draft!
981 return bumped
981 return bumped
982
982
983 @cachefor('divergent')
983 @cachefor('divergent')
984 def _computedivergentset(repo):
984 def _computedivergentset(repo):
985 msg = ("'divergent' volatile set is deprecated, "
985 msg = ("'divergent' volatile set is deprecated, "
986 "use 'contentdivergent'")
986 "use 'contentdivergent'")
987 repo.ui.deprecwarn(msg, '4.4')
987 repo.ui.deprecwarn(msg, '4.4')
988
988
989 return _computecontentdivergentset(repo)
989 return _computecontentdivergentset(repo)
990
990
991 @cachefor('contentdivergent')
991 @cachefor('contentdivergent')
992 def _computecontentdivergentset(repo):
992 def _computecontentdivergentset(repo):
993 """the set of rev that compete to be the final successors of some revision.
993 """the set of rev that compete to be the final successors of some revision.
994 """
994 """
995 divergent = set()
995 divergent = set()
996 obsstore = repo.obsstore
996 obsstore = repo.obsstore
997 newermap = {}
997 newermap = {}
998 for ctx in repo.set('(not public()) - obsolete()'):
998 for ctx in repo.set('(not public()) - obsolete()'):
999 mark = obsstore.predecessors.get(ctx.node(), ())
999 mark = obsstore.predecessors.get(ctx.node(), ())
1000 toprocess = set(mark)
1000 toprocess = set(mark)
1001 seen = set()
1001 seen = set()
1002 while toprocess:
1002 while toprocess:
1003 prec = toprocess.pop()[0]
1003 prec = toprocess.pop()[0]
1004 if prec in seen:
1004 if prec in seen:
1005 continue # emergency cycle hanging prevention
1005 continue # emergency cycle hanging prevention
1006 seen.add(prec)
1006 seen.add(prec)
1007 if prec not in newermap:
1007 if prec not in newermap:
1008 obsutil.successorssets(repo, prec, cache=newermap)
1008 obsutil.successorssets(repo, prec, cache=newermap)
1009 newer = [n for n in newermap[prec] if n]
1009 newer = [n for n in newermap[prec] if n]
1010 if len(newer) > 1:
1010 if len(newer) > 1:
1011 divergent.add(ctx.rev())
1011 divergent.add(ctx.rev())
1012 break
1012 break
1013 toprocess.update(obsstore.predecessors.get(prec, ()))
1013 toprocess.update(obsstore.predecessors.get(prec, ()))
1014 return divergent
1014 return divergent
1015
1015
1016
1016
1017 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1017 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1018 operation=None):
1018 operation=None):
1019 """Add obsolete markers between changesets in a repo
1019 """Add obsolete markers between changesets in a repo
1020
1020
1021 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1021 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1022 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1022 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1023 containing metadata for this marker only. It is merged with the global
1023 containing metadata for this marker only. It is merged with the global
1024 metadata specified through the `metadata` argument of this function,
1024 metadata specified through the `metadata` argument of this function,
1025
1025
1026 Trying to obsolete a public changeset will raise an exception.
1026 Trying to obsolete a public changeset will raise an exception.
1027
1027
1028 Current user and date are used except if specified otherwise in the
1028 Current user and date are used except if specified otherwise in the
1029 metadata attribute.
1029 metadata attribute.
1030
1030
1031 This function operates within a transaction of its own, but does
1031 This function operates within a transaction of its own, but does
1032 not take any lock on the repo.
1032 not take any lock on the repo.
1033 """
1033 """
1034 # prepare metadata
1034 # prepare metadata
1035 if metadata is None:
1035 if metadata is None:
1036 metadata = {}
1036 metadata = {}
1037 if 'user' not in metadata:
1037 if 'user' not in metadata:
1038 metadata['user'] = repo.ui.username()
1038 metadata['user'] = repo.ui.username()
1039
1039
1040 # Operation metadata handling
1040 # Operation metadata handling
1041 useoperation = repo.ui.configbool('experimental',
1041 useoperation = repo.ui.configbool('experimental',
1042 'stabilization.track-operation')
1042 'stabilization.track-operation')
1043 if useoperation and operation:
1043 if useoperation and operation:
1044 metadata['operation'] = operation
1044 metadata['operation'] = operation
1045
1045
1046 # Effect flag metadata handling
1047 saveeffectflag = repo.ui.configbool('experimental',
1048 'effect-flags',
1049 False)
1050
1046 tr = repo.transaction('add-obsolescence-marker')
1051 tr = repo.transaction('add-obsolescence-marker')
1047 try:
1052 try:
1048 markerargs = []
1053 markerargs = []
1049 for rel in relations:
1054 for rel in relations:
1050 prec = rel[0]
1055 prec = rel[0]
1051 sucs = rel[1]
1056 sucs = rel[1]
1052 localmetadata = metadata.copy()
1057 localmetadata = metadata.copy()
1053 if 2 < len(rel):
1058 if 2 < len(rel):
1054 localmetadata.update(rel[2])
1059 localmetadata.update(rel[2])
1055
1060
1056 if not prec.mutable():
1061 if not prec.mutable():
1057 raise error.Abort(_("cannot obsolete public changeset: %s")
1062 raise error.Abort(_("cannot obsolete public changeset: %s")
1058 % prec,
1063 % prec,
1059 hint="see 'hg help phases' for details")
1064 hint="see 'hg help phases' for details")
1060 nprec = prec.node()
1065 nprec = prec.node()
1061 nsucs = tuple(s.node() for s in sucs)
1066 nsucs = tuple(s.node() for s in sucs)
1062 npare = None
1067 npare = None
1063 if not nsucs:
1068 if not nsucs:
1064 npare = tuple(p.node() for p in prec.parents())
1069 npare = tuple(p.node() for p in prec.parents())
1065 if nprec in nsucs:
1070 if nprec in nsucs:
1066 raise error.Abort(_("changeset %s cannot obsolete itself")
1071 raise error.Abort(_("changeset %s cannot obsolete itself")
1067 % prec)
1072 % prec)
1068
1073
1074 # Effect flag can be different by relation
1075 if saveeffectflag:
1076 # The effect flag is saved in a versioned field name for future
1077 # evolution
1078 effectflag = obsutil.geteffectflag(rel)
1079 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1080
1069 # Creating the marker causes the hidden cache to become invalid,
1081 # Creating the marker causes the hidden cache to become invalid,
1070 # which causes recomputation when we ask for prec.parents() above.
1082 # which causes recomputation when we ask for prec.parents() above.
1071 # Resulting in n^2 behavior. So let's prepare all of the args
1083 # Resulting in n^2 behavior. So let's prepare all of the args
1072 # first, then create the markers.
1084 # first, then create the markers.
1073 markerargs.append((nprec, nsucs, npare, localmetadata))
1085 markerargs.append((nprec, nsucs, npare, localmetadata))
1074
1086
1075 for args in markerargs:
1087 for args in markerargs:
1076 nprec, nsucs, npare, localmetadata = args
1088 nprec, nsucs, npare, localmetadata = args
1077 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1089 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1078 date=date, metadata=localmetadata,
1090 date=date, metadata=localmetadata,
1079 ui=repo.ui)
1091 ui=repo.ui)
1080 repo.filteredrevcache.clear()
1092 repo.filteredrevcache.clear()
1081 tr.close()
1093 tr.close()
1082 finally:
1094 finally:
1083 tr.release()
1095 tr.release()
@@ -1,657 +1,670
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from . import (
10 from . import (
11 phases,
11 phases,
12 util
12 util
13 )
13 )
14
14
15 class marker(object):
15 class marker(object):
16 """Wrap obsolete marker raw data"""
16 """Wrap obsolete marker raw data"""
17
17
18 def __init__(self, repo, data):
18 def __init__(self, repo, data):
19 # the repo argument will be used to create changectx in later version
19 # the repo argument will be used to create changectx in later version
20 self._repo = repo
20 self._repo = repo
21 self._data = data
21 self._data = data
22 self._decodedmeta = None
22 self._decodedmeta = None
23
23
24 def __hash__(self):
24 def __hash__(self):
25 return hash(self._data)
25 return hash(self._data)
26
26
27 def __eq__(self, other):
27 def __eq__(self, other):
28 if type(other) != type(self):
28 if type(other) != type(self):
29 return False
29 return False
30 return self._data == other._data
30 return self._data == other._data
31
31
32 def precnode(self):
32 def precnode(self):
33 msg = ("'marker.precnode' is deprecated, "
33 msg = ("'marker.precnode' is deprecated, "
34 "use 'marker.prednode'")
34 "use 'marker.prednode'")
35 util.nouideprecwarn(msg, '4.4')
35 util.nouideprecwarn(msg, '4.4')
36 return self.prednode()
36 return self.prednode()
37
37
38 def prednode(self):
38 def prednode(self):
39 """Predecessor changeset node identifier"""
39 """Predecessor changeset node identifier"""
40 return self._data[0]
40 return self._data[0]
41
41
42 def succnodes(self):
42 def succnodes(self):
43 """List of successor changesets node identifiers"""
43 """List of successor changesets node identifiers"""
44 return self._data[1]
44 return self._data[1]
45
45
46 def parentnodes(self):
46 def parentnodes(self):
47 """Parents of the predecessors (None if not recorded)"""
47 """Parents of the predecessors (None if not recorded)"""
48 return self._data[5]
48 return self._data[5]
49
49
50 def metadata(self):
50 def metadata(self):
51 """Decoded metadata dictionary"""
51 """Decoded metadata dictionary"""
52 return dict(self._data[3])
52 return dict(self._data[3])
53
53
54 def date(self):
54 def date(self):
55 """Creation date as (unixtime, offset)"""
55 """Creation date as (unixtime, offset)"""
56 return self._data[4]
56 return self._data[4]
57
57
58 def flags(self):
58 def flags(self):
59 """The flags field of the marker"""
59 """The flags field of the marker"""
60 return self._data[2]
60 return self._data[2]
61
61
62 def getmarkers(repo, nodes=None, exclusive=False):
62 def getmarkers(repo, nodes=None, exclusive=False):
63 """returns markers known in a repository
63 """returns markers known in a repository
64
64
65 If <nodes> is specified, only markers "relevant" to those nodes are are
65 If <nodes> is specified, only markers "relevant" to those nodes are are
66 returned"""
66 returned"""
67 if nodes is None:
67 if nodes is None:
68 rawmarkers = repo.obsstore
68 rawmarkers = repo.obsstore
69 elif exclusive:
69 elif exclusive:
70 rawmarkers = exclusivemarkers(repo, nodes)
70 rawmarkers = exclusivemarkers(repo, nodes)
71 else:
71 else:
72 rawmarkers = repo.obsstore.relevantmarkers(nodes)
72 rawmarkers = repo.obsstore.relevantmarkers(nodes)
73
73
74 for markerdata in rawmarkers:
74 for markerdata in rawmarkers:
75 yield marker(repo, markerdata)
75 yield marker(repo, markerdata)
76
76
77 def closestpredecessors(repo, nodeid):
77 def closestpredecessors(repo, nodeid):
78 """yield the list of next predecessors pointing on visible changectx nodes
78 """yield the list of next predecessors pointing on visible changectx nodes
79
79
80 This function respect the repoview filtering, filtered revision will be
80 This function respect the repoview filtering, filtered revision will be
81 considered missing.
81 considered missing.
82 """
82 """
83
83
84 precursors = repo.obsstore.predecessors
84 precursors = repo.obsstore.predecessors
85 stack = [nodeid]
85 stack = [nodeid]
86 seen = set(stack)
86 seen = set(stack)
87
87
88 while stack:
88 while stack:
89 current = stack.pop()
89 current = stack.pop()
90 currentpreccs = precursors.get(current, ())
90 currentpreccs = precursors.get(current, ())
91
91
92 for prec in currentpreccs:
92 for prec in currentpreccs:
93 precnodeid = prec[0]
93 precnodeid = prec[0]
94
94
95 # Basic cycle protection
95 # Basic cycle protection
96 if precnodeid in seen:
96 if precnodeid in seen:
97 continue
97 continue
98 seen.add(precnodeid)
98 seen.add(precnodeid)
99
99
100 if precnodeid in repo:
100 if precnodeid in repo:
101 yield precnodeid
101 yield precnodeid
102 else:
102 else:
103 stack.append(precnodeid)
103 stack.append(precnodeid)
104
104
105 def allprecursors(*args, **kwargs):
105 def allprecursors(*args, **kwargs):
106 """ (DEPRECATED)
106 """ (DEPRECATED)
107 """
107 """
108 msg = ("'obsutil.allprecursors' is deprecated, "
108 msg = ("'obsutil.allprecursors' is deprecated, "
109 "use 'obsutil.allpredecessors'")
109 "use 'obsutil.allpredecessors'")
110 util.nouideprecwarn(msg, '4.4')
110 util.nouideprecwarn(msg, '4.4')
111
111
112 return allpredecessors(*args, **kwargs)
112 return allpredecessors(*args, **kwargs)
113
113
114 def allpredecessors(obsstore, nodes, ignoreflags=0):
114 def allpredecessors(obsstore, nodes, ignoreflags=0):
115 """Yield node for every precursors of <nodes>.
115 """Yield node for every precursors of <nodes>.
116
116
117 Some precursors may be unknown locally.
117 Some precursors may be unknown locally.
118
118
119 This is a linear yield unsuited to detecting folded changesets. It includes
119 This is a linear yield unsuited to detecting folded changesets. It includes
120 initial nodes too."""
120 initial nodes too."""
121
121
122 remaining = set(nodes)
122 remaining = set(nodes)
123 seen = set(remaining)
123 seen = set(remaining)
124 while remaining:
124 while remaining:
125 current = remaining.pop()
125 current = remaining.pop()
126 yield current
126 yield current
127 for mark in obsstore.predecessors.get(current, ()):
127 for mark in obsstore.predecessors.get(current, ()):
128 # ignore marker flagged with specified flag
128 # ignore marker flagged with specified flag
129 if mark[2] & ignoreflags:
129 if mark[2] & ignoreflags:
130 continue
130 continue
131 suc = mark[0]
131 suc = mark[0]
132 if suc not in seen:
132 if suc not in seen:
133 seen.add(suc)
133 seen.add(suc)
134 remaining.add(suc)
134 remaining.add(suc)
135
135
136 def allsuccessors(obsstore, nodes, ignoreflags=0):
136 def allsuccessors(obsstore, nodes, ignoreflags=0):
137 """Yield node for every successor of <nodes>.
137 """Yield node for every successor of <nodes>.
138
138
139 Some successors may be unknown locally.
139 Some successors may be unknown locally.
140
140
141 This is a linear yield unsuited to detecting split changesets. It includes
141 This is a linear yield unsuited to detecting split changesets. It includes
142 initial nodes too."""
142 initial nodes too."""
143 remaining = set(nodes)
143 remaining = set(nodes)
144 seen = set(remaining)
144 seen = set(remaining)
145 while remaining:
145 while remaining:
146 current = remaining.pop()
146 current = remaining.pop()
147 yield current
147 yield current
148 for mark in obsstore.successors.get(current, ()):
148 for mark in obsstore.successors.get(current, ()):
149 # ignore marker flagged with specified flag
149 # ignore marker flagged with specified flag
150 if mark[2] & ignoreflags:
150 if mark[2] & ignoreflags:
151 continue
151 continue
152 for suc in mark[1]:
152 for suc in mark[1]:
153 if suc not in seen:
153 if suc not in seen:
154 seen.add(suc)
154 seen.add(suc)
155 remaining.add(suc)
155 remaining.add(suc)
156
156
157 def _filterprunes(markers):
157 def _filterprunes(markers):
158 """return a set with no prune markers"""
158 """return a set with no prune markers"""
159 return set(m for m in markers if m[1])
159 return set(m for m in markers if m[1])
160
160
161 def exclusivemarkers(repo, nodes):
161 def exclusivemarkers(repo, nodes):
162 """set of markers relevant to "nodes" but no other locally-known nodes
162 """set of markers relevant to "nodes" but no other locally-known nodes
163
163
164 This function compute the set of markers "exclusive" to a locally-known
164 This function compute the set of markers "exclusive" to a locally-known
165 node. This means we walk the markers starting from <nodes> until we reach a
165 node. This means we walk the markers starting from <nodes> until we reach a
166 locally-known precursors outside of <nodes>. Element of <nodes> with
166 locally-known precursors outside of <nodes>. Element of <nodes> with
167 locally-known successors outside of <nodes> are ignored (since their
167 locally-known successors outside of <nodes> are ignored (since their
168 precursors markers are also relevant to these successors).
168 precursors markers are also relevant to these successors).
169
169
170 For example:
170 For example:
171
171
172 # (A0 rewritten as A1)
172 # (A0 rewritten as A1)
173 #
173 #
174 # A0 <-1- A1 # Marker "1" is exclusive to A1
174 # A0 <-1- A1 # Marker "1" is exclusive to A1
175
175
176 or
176 or
177
177
178 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
178 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
179 #
179 #
180 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
180 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
181
181
182 or
182 or
183
183
184 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
184 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
185 #
185 #
186 # <-2- A1 # Marker "2" is exclusive to A0,A1
186 # <-2- A1 # Marker "2" is exclusive to A0,A1
187 # /
187 # /
188 # <-1- A0
188 # <-1- A0
189 # \
189 # \
190 # <-3- A2 # Marker "3" is exclusive to A0,A2
190 # <-3- A2 # Marker "3" is exclusive to A0,A2
191 #
191 #
192 # in addition:
192 # in addition:
193 #
193 #
194 # Markers "2,3" are exclusive to A1,A2
194 # Markers "2,3" are exclusive to A1,A2
195 # Markers "1,2,3" are exclusive to A0,A1,A2
195 # Markers "1,2,3" are exclusive to A0,A1,A2
196
196
197 See test/test-obsolete-bundle-strip.t for more examples.
197 See test/test-obsolete-bundle-strip.t for more examples.
198
198
199 An example usage is strip. When stripping a changeset, we also want to
199 An example usage is strip. When stripping a changeset, we also want to
200 strip the markers exclusive to this changeset. Otherwise we would have
200 strip the markers exclusive to this changeset. Otherwise we would have
201 "dangling"" obsolescence markers from its precursors: Obsolescence markers
201 "dangling"" obsolescence markers from its precursors: Obsolescence markers
202 marking a node as obsolete without any successors available locally.
202 marking a node as obsolete without any successors available locally.
203
203
204 As for relevant markers, the prune markers for children will be followed.
204 As for relevant markers, the prune markers for children will be followed.
205 Of course, they will only be followed if the pruned children is
205 Of course, they will only be followed if the pruned children is
206 locally-known. Since the prune markers are relevant to the pruned node.
206 locally-known. Since the prune markers are relevant to the pruned node.
207 However, while prune markers are considered relevant to the parent of the
207 However, while prune markers are considered relevant to the parent of the
208 pruned changesets, prune markers for locally-known changeset (with no
208 pruned changesets, prune markers for locally-known changeset (with no
209 successors) are considered exclusive to the pruned nodes. This allows
209 successors) are considered exclusive to the pruned nodes. This allows
210 to strip the prune markers (with the rest of the exclusive chain) alongside
210 to strip the prune markers (with the rest of the exclusive chain) alongside
211 the pruned changesets.
211 the pruned changesets.
212 """
212 """
213 # running on a filtered repository would be dangerous as markers could be
213 # running on a filtered repository would be dangerous as markers could be
214 # reported as exclusive when they are relevant for other filtered nodes.
214 # reported as exclusive when they are relevant for other filtered nodes.
215 unfi = repo.unfiltered()
215 unfi = repo.unfiltered()
216
216
217 # shortcut to various useful item
217 # shortcut to various useful item
218 nm = unfi.changelog.nodemap
218 nm = unfi.changelog.nodemap
219 precursorsmarkers = unfi.obsstore.predecessors
219 precursorsmarkers = unfi.obsstore.predecessors
220 successormarkers = unfi.obsstore.successors
220 successormarkers = unfi.obsstore.successors
221 childrenmarkers = unfi.obsstore.children
221 childrenmarkers = unfi.obsstore.children
222
222
223 # exclusive markers (return of the function)
223 # exclusive markers (return of the function)
224 exclmarkers = set()
224 exclmarkers = set()
225 # we need fast membership testing
225 # we need fast membership testing
226 nodes = set(nodes)
226 nodes = set(nodes)
227 # looking for head in the obshistory
227 # looking for head in the obshistory
228 #
228 #
229 # XXX we are ignoring all issues in regard with cycle for now.
229 # XXX we are ignoring all issues in regard with cycle for now.
230 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
230 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
231 stack.sort()
231 stack.sort()
232 # nodes already stacked
232 # nodes already stacked
233 seennodes = set(stack)
233 seennodes = set(stack)
234 while stack:
234 while stack:
235 current = stack.pop()
235 current = stack.pop()
236 # fetch precursors markers
236 # fetch precursors markers
237 markers = list(precursorsmarkers.get(current, ()))
237 markers = list(precursorsmarkers.get(current, ()))
238 # extend the list with prune markers
238 # extend the list with prune markers
239 for mark in successormarkers.get(current, ()):
239 for mark in successormarkers.get(current, ()):
240 if not mark[1]:
240 if not mark[1]:
241 markers.append(mark)
241 markers.append(mark)
242 # and markers from children (looking for prune)
242 # and markers from children (looking for prune)
243 for mark in childrenmarkers.get(current, ()):
243 for mark in childrenmarkers.get(current, ()):
244 if not mark[1]:
244 if not mark[1]:
245 markers.append(mark)
245 markers.append(mark)
246 # traverse the markers
246 # traverse the markers
247 for mark in markers:
247 for mark in markers:
248 if mark in exclmarkers:
248 if mark in exclmarkers:
249 # markers already selected
249 # markers already selected
250 continue
250 continue
251
251
252 # If the markers is about the current node, select it
252 # If the markers is about the current node, select it
253 #
253 #
254 # (this delay the addition of markers from children)
254 # (this delay the addition of markers from children)
255 if mark[1] or mark[0] == current:
255 if mark[1] or mark[0] == current:
256 exclmarkers.add(mark)
256 exclmarkers.add(mark)
257
257
258 # should we keep traversing through the precursors?
258 # should we keep traversing through the precursors?
259 prec = mark[0]
259 prec = mark[0]
260
260
261 # nodes in the stack or already processed
261 # nodes in the stack or already processed
262 if prec in seennodes:
262 if prec in seennodes:
263 continue
263 continue
264
264
265 # is this a locally known node ?
265 # is this a locally known node ?
266 known = prec in nm
266 known = prec in nm
267 # if locally-known and not in the <nodes> set the traversal
267 # if locally-known and not in the <nodes> set the traversal
268 # stop here.
268 # stop here.
269 if known and prec not in nodes:
269 if known and prec not in nodes:
270 continue
270 continue
271
271
272 # do not keep going if there are unselected markers pointing to this
272 # do not keep going if there are unselected markers pointing to this
273 # nodes. If we end up traversing these unselected markers later the
273 # nodes. If we end up traversing these unselected markers later the
274 # node will be taken care of at that point.
274 # node will be taken care of at that point.
275 precmarkers = _filterprunes(successormarkers.get(prec))
275 precmarkers = _filterprunes(successormarkers.get(prec))
276 if precmarkers.issubset(exclmarkers):
276 if precmarkers.issubset(exclmarkers):
277 seennodes.add(prec)
277 seennodes.add(prec)
278 stack.append(prec)
278 stack.append(prec)
279
279
280 return exclmarkers
280 return exclmarkers
281
281
282 def foreground(repo, nodes):
282 def foreground(repo, nodes):
283 """return all nodes in the "foreground" of other node
283 """return all nodes in the "foreground" of other node
284
284
285 The foreground of a revision is anything reachable using parent -> children
285 The foreground of a revision is anything reachable using parent -> children
286 or precursor -> successor relation. It is very similar to "descendant" but
286 or precursor -> successor relation. It is very similar to "descendant" but
287 augmented with obsolescence information.
287 augmented with obsolescence information.
288
288
289 Beware that possible obsolescence cycle may result if complex situation.
289 Beware that possible obsolescence cycle may result if complex situation.
290 """
290 """
291 repo = repo.unfiltered()
291 repo = repo.unfiltered()
292 foreground = set(repo.set('%ln::', nodes))
292 foreground = set(repo.set('%ln::', nodes))
293 if repo.obsstore:
293 if repo.obsstore:
294 # We only need this complicated logic if there is obsolescence
294 # We only need this complicated logic if there is obsolescence
295 # XXX will probably deserve an optimised revset.
295 # XXX will probably deserve an optimised revset.
296 nm = repo.changelog.nodemap
296 nm = repo.changelog.nodemap
297 plen = -1
297 plen = -1
298 # compute the whole set of successors or descendants
298 # compute the whole set of successors or descendants
299 while len(foreground) != plen:
299 while len(foreground) != plen:
300 plen = len(foreground)
300 plen = len(foreground)
301 succs = set(c.node() for c in foreground)
301 succs = set(c.node() for c in foreground)
302 mutable = [c.node() for c in foreground if c.mutable()]
302 mutable = [c.node() for c in foreground if c.mutable()]
303 succs.update(allsuccessors(repo.obsstore, mutable))
303 succs.update(allsuccessors(repo.obsstore, mutable))
304 known = (n for n in succs if n in nm)
304 known = (n for n in succs if n in nm)
305 foreground = set(repo.set('%ln::', known))
305 foreground = set(repo.set('%ln::', known))
306 return set(c.node() for c in foreground)
306 return set(c.node() for c in foreground)
307
307
308 # logic around storing and using effect flags
309 EFFECTFLAGFIELD = "ef1"
310
311 def geteffectflag(relation):
312 """ From an obs-marker relation, compute what changed between the
313 predecessor and the successor.
314 """
315 effects = 0
316
317 source = relation[0]
318
319 return effects
320
308 def getobsoleted(repo, tr):
321 def getobsoleted(repo, tr):
309 """return the set of pre-existing revisions obsoleted by a transaction"""
322 """return the set of pre-existing revisions obsoleted by a transaction"""
310 torev = repo.unfiltered().changelog.nodemap.get
323 torev = repo.unfiltered().changelog.nodemap.get
311 phase = repo._phasecache.phase
324 phase = repo._phasecache.phase
312 succsmarkers = repo.obsstore.successors.get
325 succsmarkers = repo.obsstore.successors.get
313 public = phases.public
326 public = phases.public
314 addedmarkers = tr.changes.get('obsmarkers')
327 addedmarkers = tr.changes.get('obsmarkers')
315 addedrevs = tr.changes.get('revs')
328 addedrevs = tr.changes.get('revs')
316 seenrevs = set(addedrevs)
329 seenrevs = set(addedrevs)
317 obsoleted = set()
330 obsoleted = set()
318 for mark in addedmarkers:
331 for mark in addedmarkers:
319 node = mark[0]
332 node = mark[0]
320 rev = torev(node)
333 rev = torev(node)
321 if rev is None or rev in seenrevs:
334 if rev is None or rev in seenrevs:
322 continue
335 continue
323 seenrevs.add(rev)
336 seenrevs.add(rev)
324 if phase(repo, rev) == public:
337 if phase(repo, rev) == public:
325 continue
338 continue
326 if set(succsmarkers(node) or []).issubset(addedmarkers):
339 if set(succsmarkers(node) or []).issubset(addedmarkers):
327 obsoleted.add(rev)
340 obsoleted.add(rev)
328 return obsoleted
341 return obsoleted
329
342
330 class _succs(list):
343 class _succs(list):
331 """small class to represent a successors with some metadata about it"""
344 """small class to represent a successors with some metadata about it"""
332
345
333 def __init__(self, *args, **kwargs):
346 def __init__(self, *args, **kwargs):
334 super(_succs, self).__init__(*args, **kwargs)
347 super(_succs, self).__init__(*args, **kwargs)
335 self.markers = set()
348 self.markers = set()
336
349
337 def copy(self):
350 def copy(self):
338 new = _succs(self)
351 new = _succs(self)
339 new.markers = self.markers.copy()
352 new.markers = self.markers.copy()
340 return new
353 return new
341
354
342 @util.propertycache
355 @util.propertycache
343 def _set(self):
356 def _set(self):
344 # immutable
357 # immutable
345 return set(self)
358 return set(self)
346
359
347 def canmerge(self, other):
360 def canmerge(self, other):
348 return self._set.issubset(other._set)
361 return self._set.issubset(other._set)
349
362
350 def successorssets(repo, initialnode, closest=False, cache=None):
363 def successorssets(repo, initialnode, closest=False, cache=None):
351 """Return set of all latest successors of initial nodes
364 """Return set of all latest successors of initial nodes
352
365
353 The successors set of a changeset A are the group of revisions that succeed
366 The successors set of a changeset A are the group of revisions that succeed
354 A. It succeeds A as a consistent whole, each revision being only a partial
367 A. It succeeds A as a consistent whole, each revision being only a partial
355 replacement. By default, the successors set contains non-obsolete
368 replacement. By default, the successors set contains non-obsolete
356 changesets only, walking the obsolescence graph until reaching a leaf. If
369 changesets only, walking the obsolescence graph until reaching a leaf. If
357 'closest' is set to True, closest successors-sets are return (the
370 'closest' is set to True, closest successors-sets are return (the
358 obsolescence walk stops on known changesets).
371 obsolescence walk stops on known changesets).
359
372
360 This function returns the full list of successor sets which is why it
373 This function returns the full list of successor sets which is why it
361 returns a list of tuples and not just a single tuple. Each tuple is a valid
374 returns a list of tuples and not just a single tuple. Each tuple is a valid
362 successors set. Note that (A,) may be a valid successors set for changeset A
375 successors set. Note that (A,) may be a valid successors set for changeset A
363 (see below).
376 (see below).
364
377
365 In most cases, a changeset A will have a single element (e.g. the changeset
378 In most cases, a changeset A will have a single element (e.g. the changeset
366 A is replaced by A') in its successors set. Though, it is also common for a
379 A is replaced by A') in its successors set. Though, it is also common for a
367 changeset A to have no elements in its successor set (e.g. the changeset
380 changeset A to have no elements in its successor set (e.g. the changeset
368 has been pruned). Therefore, the returned list of successors sets will be
381 has been pruned). Therefore, the returned list of successors sets will be
369 [(A',)] or [], respectively.
382 [(A',)] or [], respectively.
370
383
371 When a changeset A is split into A' and B', however, it will result in a
384 When a changeset A is split into A' and B', however, it will result in a
372 successors set containing more than a single element, i.e. [(A',B')].
385 successors set containing more than a single element, i.e. [(A',B')].
373 Divergent changesets will result in multiple successors sets, i.e. [(A',),
386 Divergent changesets will result in multiple successors sets, i.e. [(A',),
374 (A'')].
387 (A'')].
375
388
376 If a changeset A is not obsolete, then it will conceptually have no
389 If a changeset A is not obsolete, then it will conceptually have no
377 successors set. To distinguish this from a pruned changeset, the successor
390 successors set. To distinguish this from a pruned changeset, the successor
378 set will contain itself only, i.e. [(A,)].
391 set will contain itself only, i.e. [(A,)].
379
392
380 Finally, final successors unknown locally are considered to be pruned
393 Finally, final successors unknown locally are considered to be pruned
381 (pruned: obsoleted without any successors). (Final: successors not affected
394 (pruned: obsoleted without any successors). (Final: successors not affected
382 by markers).
395 by markers).
383
396
384 The 'closest' mode respect the repoview filtering. For example, without
397 The 'closest' mode respect the repoview filtering. For example, without
385 filter it will stop at the first locally known changeset, with 'visible'
398 filter it will stop at the first locally known changeset, with 'visible'
386 filter it will stop on visible changesets).
399 filter it will stop on visible changesets).
387
400
388 The optional `cache` parameter is a dictionary that may contains
401 The optional `cache` parameter is a dictionary that may contains
389 precomputed successors sets. It is meant to reuse the computation of a
402 precomputed successors sets. It is meant to reuse the computation of a
390 previous call to `successorssets` when multiple calls are made at the same
403 previous call to `successorssets` when multiple calls are made at the same
391 time. The cache dictionary is updated in place. The caller is responsible
404 time. The cache dictionary is updated in place. The caller is responsible
392 for its life span. Code that makes multiple calls to `successorssets`
405 for its life span. Code that makes multiple calls to `successorssets`
393 *should* use this cache mechanism or risk a performance hit.
406 *should* use this cache mechanism or risk a performance hit.
394
407
395 Since results are different depending of the 'closest' most, the same cache
408 Since results are different depending of the 'closest' most, the same cache
396 cannot be reused for both mode.
409 cannot be reused for both mode.
397 """
410 """
398
411
399 succmarkers = repo.obsstore.successors
412 succmarkers = repo.obsstore.successors
400
413
401 # Stack of nodes we search successors sets for
414 # Stack of nodes we search successors sets for
402 toproceed = [initialnode]
415 toproceed = [initialnode]
403 # set version of above list for fast loop detection
416 # set version of above list for fast loop detection
404 # element added to "toproceed" must be added here
417 # element added to "toproceed" must be added here
405 stackedset = set(toproceed)
418 stackedset = set(toproceed)
406 if cache is None:
419 if cache is None:
407 cache = {}
420 cache = {}
408
421
409 # This while loop is the flattened version of a recursive search for
422 # This while loop is the flattened version of a recursive search for
410 # successors sets
423 # successors sets
411 #
424 #
412 # def successorssets(x):
425 # def successorssets(x):
413 # successors = directsuccessors(x)
426 # successors = directsuccessors(x)
414 # ss = [[]]
427 # ss = [[]]
415 # for succ in directsuccessors(x):
428 # for succ in directsuccessors(x):
416 # # product as in itertools cartesian product
429 # # product as in itertools cartesian product
417 # ss = product(ss, successorssets(succ))
430 # ss = product(ss, successorssets(succ))
418 # return ss
431 # return ss
419 #
432 #
420 # But we can not use plain recursive calls here:
433 # But we can not use plain recursive calls here:
421 # - that would blow the python call stack
434 # - that would blow the python call stack
422 # - obsolescence markers may have cycles, we need to handle them.
435 # - obsolescence markers may have cycles, we need to handle them.
423 #
436 #
424 # The `toproceed` list act as our call stack. Every node we search
437 # The `toproceed` list act as our call stack. Every node we search
425 # successors set for are stacked there.
438 # successors set for are stacked there.
426 #
439 #
427 # The `stackedset` is set version of this stack used to check if a node is
440 # The `stackedset` is set version of this stack used to check if a node is
428 # already stacked. This check is used to detect cycles and prevent infinite
441 # already stacked. This check is used to detect cycles and prevent infinite
429 # loop.
442 # loop.
430 #
443 #
431 # successors set of all nodes are stored in the `cache` dictionary.
444 # successors set of all nodes are stored in the `cache` dictionary.
432 #
445 #
433 # After this while loop ends we use the cache to return the successors sets
446 # After this while loop ends we use the cache to return the successors sets
434 # for the node requested by the caller.
447 # for the node requested by the caller.
435 while toproceed:
448 while toproceed:
436 # Every iteration tries to compute the successors sets of the topmost
449 # Every iteration tries to compute the successors sets of the topmost
437 # node of the stack: CURRENT.
450 # node of the stack: CURRENT.
438 #
451 #
439 # There are four possible outcomes:
452 # There are four possible outcomes:
440 #
453 #
441 # 1) We already know the successors sets of CURRENT:
454 # 1) We already know the successors sets of CURRENT:
442 # -> mission accomplished, pop it from the stack.
455 # -> mission accomplished, pop it from the stack.
443 # 2) Stop the walk:
456 # 2) Stop the walk:
444 # default case: Node is not obsolete
457 # default case: Node is not obsolete
445 # closest case: Node is known at this repo filter level
458 # closest case: Node is known at this repo filter level
446 # -> the node is its own successors sets. Add it to the cache.
459 # -> the node is its own successors sets. Add it to the cache.
447 # 3) We do not know successors set of direct successors of CURRENT:
460 # 3) We do not know successors set of direct successors of CURRENT:
448 # -> We add those successors to the stack.
461 # -> We add those successors to the stack.
449 # 4) We know successors sets of all direct successors of CURRENT:
462 # 4) We know successors sets of all direct successors of CURRENT:
450 # -> We can compute CURRENT successors set and add it to the
463 # -> We can compute CURRENT successors set and add it to the
451 # cache.
464 # cache.
452 #
465 #
453 current = toproceed[-1]
466 current = toproceed[-1]
454
467
455 # case 2 condition is a bit hairy because of closest,
468 # case 2 condition is a bit hairy because of closest,
456 # we compute it on its own
469 # we compute it on its own
457 case2condition = ((current not in succmarkers)
470 case2condition = ((current not in succmarkers)
458 or (closest and current != initialnode
471 or (closest and current != initialnode
459 and current in repo))
472 and current in repo))
460
473
461 if current in cache:
474 if current in cache:
462 # case (1): We already know the successors sets
475 # case (1): We already know the successors sets
463 stackedset.remove(toproceed.pop())
476 stackedset.remove(toproceed.pop())
464 elif case2condition:
477 elif case2condition:
465 # case (2): end of walk.
478 # case (2): end of walk.
466 if current in repo:
479 if current in repo:
467 # We have a valid successors.
480 # We have a valid successors.
468 cache[current] = [_succs((current,))]
481 cache[current] = [_succs((current,))]
469 else:
482 else:
470 # Final obsolete version is unknown locally.
483 # Final obsolete version is unknown locally.
471 # Do not count that as a valid successors
484 # Do not count that as a valid successors
472 cache[current] = []
485 cache[current] = []
473 else:
486 else:
474 # cases (3) and (4)
487 # cases (3) and (4)
475 #
488 #
476 # We proceed in two phases. Phase 1 aims to distinguish case (3)
489 # We proceed in two phases. Phase 1 aims to distinguish case (3)
477 # from case (4):
490 # from case (4):
478 #
491 #
479 # For each direct successors of CURRENT, we check whether its
492 # For each direct successors of CURRENT, we check whether its
480 # successors sets are known. If they are not, we stack the
493 # successors sets are known. If they are not, we stack the
481 # unknown node and proceed to the next iteration of the while
494 # unknown node and proceed to the next iteration of the while
482 # loop. (case 3)
495 # loop. (case 3)
483 #
496 #
484 # During this step, we may detect obsolescence cycles: a node
497 # During this step, we may detect obsolescence cycles: a node
485 # with unknown successors sets but already in the call stack.
498 # with unknown successors sets but already in the call stack.
486 # In such a situation, we arbitrary set the successors sets of
499 # In such a situation, we arbitrary set the successors sets of
487 # the node to nothing (node pruned) to break the cycle.
500 # the node to nothing (node pruned) to break the cycle.
488 #
501 #
489 # If no break was encountered we proceed to phase 2.
502 # If no break was encountered we proceed to phase 2.
490 #
503 #
491 # Phase 2 computes successors sets of CURRENT (case 4); see details
504 # Phase 2 computes successors sets of CURRENT (case 4); see details
492 # in phase 2 itself.
505 # in phase 2 itself.
493 #
506 #
494 # Note the two levels of iteration in each phase.
507 # Note the two levels of iteration in each phase.
495 # - The first one handles obsolescence markers using CURRENT as
508 # - The first one handles obsolescence markers using CURRENT as
496 # precursor (successors markers of CURRENT).
509 # precursor (successors markers of CURRENT).
497 #
510 #
498 # Having multiple entry here means divergence.
511 # Having multiple entry here means divergence.
499 #
512 #
500 # - The second one handles successors defined in each marker.
513 # - The second one handles successors defined in each marker.
501 #
514 #
502 # Having none means pruned node, multiple successors means split,
515 # Having none means pruned node, multiple successors means split,
503 # single successors are standard replacement.
516 # single successors are standard replacement.
504 #
517 #
505 for mark in sorted(succmarkers[current]):
518 for mark in sorted(succmarkers[current]):
506 for suc in mark[1]:
519 for suc in mark[1]:
507 if suc not in cache:
520 if suc not in cache:
508 if suc in stackedset:
521 if suc in stackedset:
509 # cycle breaking
522 # cycle breaking
510 cache[suc] = []
523 cache[suc] = []
511 else:
524 else:
512 # case (3) If we have not computed successors sets
525 # case (3) If we have not computed successors sets
513 # of one of those successors we add it to the
526 # of one of those successors we add it to the
514 # `toproceed` stack and stop all work for this
527 # `toproceed` stack and stop all work for this
515 # iteration.
528 # iteration.
516 toproceed.append(suc)
529 toproceed.append(suc)
517 stackedset.add(suc)
530 stackedset.add(suc)
518 break
531 break
519 else:
532 else:
520 continue
533 continue
521 break
534 break
522 else:
535 else:
523 # case (4): we know all successors sets of all direct
536 # case (4): we know all successors sets of all direct
524 # successors
537 # successors
525 #
538 #
526 # Successors set contributed by each marker depends on the
539 # Successors set contributed by each marker depends on the
527 # successors sets of all its "successors" node.
540 # successors sets of all its "successors" node.
528 #
541 #
529 # Each different marker is a divergence in the obsolescence
542 # Each different marker is a divergence in the obsolescence
530 # history. It contributes successors sets distinct from other
543 # history. It contributes successors sets distinct from other
531 # markers.
544 # markers.
532 #
545 #
533 # Within a marker, a successor may have divergent successors
546 # Within a marker, a successor may have divergent successors
534 # sets. In such a case, the marker will contribute multiple
547 # sets. In such a case, the marker will contribute multiple
535 # divergent successors sets. If multiple successors have
548 # divergent successors sets. If multiple successors have
536 # divergent successors sets, a Cartesian product is used.
549 # divergent successors sets, a Cartesian product is used.
537 #
550 #
538 # At the end we post-process successors sets to remove
551 # At the end we post-process successors sets to remove
539 # duplicated entry and successors set that are strict subset of
552 # duplicated entry and successors set that are strict subset of
540 # another one.
553 # another one.
541 succssets = []
554 succssets = []
542 for mark in sorted(succmarkers[current]):
555 for mark in sorted(succmarkers[current]):
543 # successors sets contributed by this marker
556 # successors sets contributed by this marker
544 base = _succs()
557 base = _succs()
545 base.markers.add(mark)
558 base.markers.add(mark)
546 markss = [base]
559 markss = [base]
547 for suc in mark[1]:
560 for suc in mark[1]:
548 # cardinal product with previous successors
561 # cardinal product with previous successors
549 productresult = []
562 productresult = []
550 for prefix in markss:
563 for prefix in markss:
551 for suffix in cache[suc]:
564 for suffix in cache[suc]:
552 newss = prefix.copy()
565 newss = prefix.copy()
553 newss.markers.update(suffix.markers)
566 newss.markers.update(suffix.markers)
554 for part in suffix:
567 for part in suffix:
555 # do not duplicated entry in successors set
568 # do not duplicated entry in successors set
556 # first entry wins.
569 # first entry wins.
557 if part not in newss:
570 if part not in newss:
558 newss.append(part)
571 newss.append(part)
559 productresult.append(newss)
572 productresult.append(newss)
560 markss = productresult
573 markss = productresult
561 succssets.extend(markss)
574 succssets.extend(markss)
562 # remove duplicated and subset
575 # remove duplicated and subset
563 seen = []
576 seen = []
564 final = []
577 final = []
565 candidates = sorted((s for s in succssets if s),
578 candidates = sorted((s for s in succssets if s),
566 key=len, reverse=True)
579 key=len, reverse=True)
567 for cand in candidates:
580 for cand in candidates:
568 for seensuccs in seen:
581 for seensuccs in seen:
569 if cand.canmerge(seensuccs):
582 if cand.canmerge(seensuccs):
570 seensuccs.markers.update(cand.markers)
583 seensuccs.markers.update(cand.markers)
571 break
584 break
572 else:
585 else:
573 final.append(cand)
586 final.append(cand)
574 seen.append(cand)
587 seen.append(cand)
575 final.reverse() # put small successors set first
588 final.reverse() # put small successors set first
576 cache[current] = final
589 cache[current] = final
577 return cache[initialnode]
590 return cache[initialnode]
578
591
579 def successorsandmarkers(repo, ctx):
592 def successorsandmarkers(repo, ctx):
580 """compute the raw data needed for computing obsfate
593 """compute the raw data needed for computing obsfate
581 Returns a list of dict, one dict per successors set
594 Returns a list of dict, one dict per successors set
582 """
595 """
583 if not ctx.obsolete():
596 if not ctx.obsolete():
584 return None
597 return None
585
598
586 ssets = successorssets(repo, ctx.node(), closest=True)
599 ssets = successorssets(repo, ctx.node(), closest=True)
587
600
588 # closestsuccessors returns an empty list for pruned revisions, remap it
601 # closestsuccessors returns an empty list for pruned revisions, remap it
589 # into a list containing an empty list for future processing
602 # into a list containing an empty list for future processing
590 if ssets == []:
603 if ssets == []:
591 ssets = [[]]
604 ssets = [[]]
592
605
593 # Try to recover pruned markers
606 # Try to recover pruned markers
594 succsmap = repo.obsstore.successors
607 succsmap = repo.obsstore.successors
595 fullsuccessorsets = [] # successor set + markers
608 fullsuccessorsets = [] # successor set + markers
596 for sset in ssets:
609 for sset in ssets:
597 if sset:
610 if sset:
598 fullsuccessorsets.append(sset)
611 fullsuccessorsets.append(sset)
599 else:
612 else:
600 # successorsset return an empty set() when ctx or one of its
613 # successorsset return an empty set() when ctx or one of its
601 # successors is pruned.
614 # successors is pruned.
602 # In this case, walk the obs-markers tree again starting with ctx
615 # In this case, walk the obs-markers tree again starting with ctx
603 # and find the relevant pruning obs-makers, the ones without
616 # and find the relevant pruning obs-makers, the ones without
604 # successors.
617 # successors.
605 # Having these markers allow us to compute some information about
618 # Having these markers allow us to compute some information about
606 # its fate, like who pruned this changeset and when.
619 # its fate, like who pruned this changeset and when.
607
620
608 # XXX we do not catch all prune markers (eg rewritten then pruned)
621 # XXX we do not catch all prune markers (eg rewritten then pruned)
609 # (fix me later)
622 # (fix me later)
610 foundany = False
623 foundany = False
611 for mark in succsmap.get(ctx.node(), ()):
624 for mark in succsmap.get(ctx.node(), ()):
612 if not mark[1]:
625 if not mark[1]:
613 foundany = True
626 foundany = True
614 sset = _succs()
627 sset = _succs()
615 sset.markers.add(mark)
628 sset.markers.add(mark)
616 fullsuccessorsets.append(sset)
629 fullsuccessorsets.append(sset)
617 if not foundany:
630 if not foundany:
618 fullsuccessorsets.append(_succs())
631 fullsuccessorsets.append(_succs())
619
632
620 values = []
633 values = []
621 for sset in fullsuccessorsets:
634 for sset in fullsuccessorsets:
622 values.append({'successors': sset, 'markers': sset.markers})
635 values.append({'successors': sset, 'markers': sset.markers})
623
636
624 return values
637 return values
625
638
626 def successorsetverb(successorset):
639 def successorsetverb(successorset):
627 """ Return the verb summarizing the successorset
640 """ Return the verb summarizing the successorset
628 """
641 """
629 if not successorset:
642 if not successorset:
630 verb = 'pruned'
643 verb = 'pruned'
631 elif len(successorset) == 1:
644 elif len(successorset) == 1:
632 verb = 'rewritten'
645 verb = 'rewritten'
633 else:
646 else:
634 verb = 'split'
647 verb = 'split'
635 return verb
648 return verb
636
649
637 def markersdates(markers):
650 def markersdates(markers):
638 """returns the list of dates for a list of markers
651 """returns the list of dates for a list of markers
639 """
652 """
640 return [m[4] for m in markers]
653 return [m[4] for m in markers]
641
654
642 def markersusers(markers):
655 def markersusers(markers):
643 """ Returns a sorted list of markers users without duplicates
656 """ Returns a sorted list of markers users without duplicates
644 """
657 """
645 markersmeta = [dict(m[3]) for m in markers]
658 markersmeta = [dict(m[3]) for m in markers]
646 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
659 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
647
660
648 return sorted(users)
661 return sorted(users)
649
662
650 def markersoperations(markers):
663 def markersoperations(markers):
651 """ Returns a sorted list of markers operations without duplicates
664 """ Returns a sorted list of markers operations without duplicates
652 """
665 """
653 markersmeta = [dict(m[3]) for m in markers]
666 markersmeta = [dict(m[3]) for m in markers]
654 operations = set(meta.get('operation') for meta in markersmeta
667 operations = set(meta.get('operation') for meta in markersmeta
655 if meta.get('operation'))
668 if meta.get('operation'))
656
669
657 return sorted(operations)
670 return sorted(operations)
General Comments 0
You need to be logged in to leave comments. Login now