##// END OF EJS Templates
obsstore: minor optimization for the obsolete revset...
Jun Wu -
r32688:2c1400d4 default
parent child Browse files
Show More
@@ -1,1428 +1,1426 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 phases,
79 phases,
80 policy,
80 policy,
81 util,
81 util,
82 )
82 )
83
83
84 parsers = policy.importmod(r'parsers')
84 parsers = policy.importmod(r'parsers')
85
85
86 _pack = struct.pack
86 _pack = struct.pack
87 _unpack = struct.unpack
87 _unpack = struct.unpack
88 _calcsize = struct.calcsize
88 _calcsize = struct.calcsize
89 propertycache = util.propertycache
89 propertycache = util.propertycache
90
90
91 # the obsolete feature is not mature enough to be enabled by default.
91 # the obsolete feature is not mature enough to be enabled by default.
92 # you have to rely on third party extension extension to enable this.
92 # you have to rely on third party extension extension to enable this.
93 _enabled = False
93 _enabled = False
94
94
95 # Options for obsolescence
95 # Options for obsolescence
96 createmarkersopt = 'createmarkers'
96 createmarkersopt = 'createmarkers'
97 allowunstableopt = 'allowunstable'
97 allowunstableopt = 'allowunstable'
98 exchangeopt = 'exchange'
98 exchangeopt = 'exchange'
99
99
100 def isenabled(repo, option):
100 def isenabled(repo, option):
101 """Returns True if the given repository has the given obsolete option
101 """Returns True if the given repository has the given obsolete option
102 enabled.
102 enabled.
103 """
103 """
104 result = set(repo.ui.configlist('experimental', 'evolution'))
104 result = set(repo.ui.configlist('experimental', 'evolution'))
105 if 'all' in result:
105 if 'all' in result:
106 return True
106 return True
107
107
108 # For migration purposes, temporarily return true if the config hasn't been
108 # For migration purposes, temporarily return true if the config hasn't been
109 # set but _enabled is true.
109 # set but _enabled is true.
110 if len(result) == 0 and _enabled:
110 if len(result) == 0 and _enabled:
111 return True
111 return True
112
112
113 # createmarkers must be enabled if other options are enabled
113 # createmarkers must be enabled if other options are enabled
114 if ((allowunstableopt in result or exchangeopt in result) and
114 if ((allowunstableopt in result or exchangeopt in result) and
115 not createmarkersopt in result):
115 not createmarkersopt in result):
116 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
116 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 "if other obsolete options are enabled"))
117 "if other obsolete options are enabled"))
118
118
119 return option in result
119 return option in result
120
120
121 ### obsolescence marker flag
121 ### obsolescence marker flag
122
122
123 ## bumpedfix flag
123 ## bumpedfix flag
124 #
124 #
125 # When a changeset A' succeed to a changeset A which became public, we call A'
125 # When a changeset A' succeed to a changeset A which became public, we call A'
126 # "bumped" because it's a successors of a public changesets
126 # "bumped" because it's a successors of a public changesets
127 #
127 #
128 # o A' (bumped)
128 # o A' (bumped)
129 # |`:
129 # |`:
130 # | o A
130 # | o A
131 # |/
131 # |/
132 # o Z
132 # o Z
133 #
133 #
134 # The way to solve this situation is to create a new changeset Ad as children
134 # The way to solve this situation is to create a new changeset Ad as children
135 # of A. This changeset have the same content than A'. So the diff from A to A'
135 # of A. This changeset have the same content than A'. So the diff from A to A'
136 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
136 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 #
137 #
138 # o Ad
138 # o Ad
139 # |`:
139 # |`:
140 # | x A'
140 # | x A'
141 # |'|
141 # |'|
142 # o | A
142 # o | A
143 # |/
143 # |/
144 # o Z
144 # o Z
145 #
145 #
146 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
146 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
147 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 # This flag mean that the successors express the changes between the public and
148 # This flag mean that the successors express the changes between the public and
149 # bumped version and fix the situation, breaking the transitivity of
149 # bumped version and fix the situation, breaking the transitivity of
150 # "bumped" here.
150 # "bumped" here.
151 bumpedfix = 1
151 bumpedfix = 1
152 usingsha256 = 2
152 usingsha256 = 2
153
153
154 ## Parsing and writing of version "0"
154 ## Parsing and writing of version "0"
155 #
155 #
156 # The header is followed by the markers. Each marker is made of:
156 # The header is followed by the markers. Each marker is made of:
157 #
157 #
158 # - 1 uint8 : number of new changesets "N", can be zero.
158 # - 1 uint8 : number of new changesets "N", can be zero.
159 #
159 #
160 # - 1 uint32: metadata size "M" in bytes.
160 # - 1 uint32: metadata size "M" in bytes.
161 #
161 #
162 # - 1 byte: a bit field. It is reserved for flags used in common
162 # - 1 byte: a bit field. It is reserved for flags used in common
163 # obsolete marker operations, to avoid repeated decoding of metadata
163 # obsolete marker operations, to avoid repeated decoding of metadata
164 # entries.
164 # entries.
165 #
165 #
166 # - 20 bytes: obsoleted changeset identifier.
166 # - 20 bytes: obsoleted changeset identifier.
167 #
167 #
168 # - N*20 bytes: new changesets identifiers.
168 # - N*20 bytes: new changesets identifiers.
169 #
169 #
170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # string contains a key and a value, separated by a colon ':', without
171 # string contains a key and a value, separated by a colon ':', without
172 # additional encoding. Keys cannot contain '\0' or ':' and values
172 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # cannot contain '\0'.
173 # cannot contain '\0'.
174 _fm0version = 0
174 _fm0version = 0
175 _fm0fixed = '>BIB20s'
175 _fm0fixed = '>BIB20s'
176 _fm0node = '20s'
176 _fm0node = '20s'
177 _fm0fsize = _calcsize(_fm0fixed)
177 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fnodesize = _calcsize(_fm0node)
178 _fm0fnodesize = _calcsize(_fm0node)
179
179
180 def _fm0readmarkers(data, off):
180 def _fm0readmarkers(data, off):
181 # Loop on markers
181 # Loop on markers
182 l = len(data)
182 l = len(data)
183 while off + _fm0fsize <= l:
183 while off + _fm0fsize <= l:
184 # read fixed part
184 # read fixed part
185 cur = data[off:off + _fm0fsize]
185 cur = data[off:off + _fm0fsize]
186 off += _fm0fsize
186 off += _fm0fsize
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 # read replacement
188 # read replacement
189 sucs = ()
189 sucs = ()
190 if numsuc:
190 if numsuc:
191 s = (_fm0fnodesize * numsuc)
191 s = (_fm0fnodesize * numsuc)
192 cur = data[off:off + s]
192 cur = data[off:off + s]
193 sucs = _unpack(_fm0node * numsuc, cur)
193 sucs = _unpack(_fm0node * numsuc, cur)
194 off += s
194 off += s
195 # read metadata
195 # read metadata
196 # (metadata will be decoded on demand)
196 # (metadata will be decoded on demand)
197 metadata = data[off:off + mdsize]
197 metadata = data[off:off + mdsize]
198 if len(metadata) != mdsize:
198 if len(metadata) != mdsize:
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 'short, %d bytes expected, got %d')
200 'short, %d bytes expected, got %d')
201 % (mdsize, len(metadata)))
201 % (mdsize, len(metadata)))
202 off += mdsize
202 off += mdsize
203 metadata = _fm0decodemeta(metadata)
203 metadata = _fm0decodemeta(metadata)
204 try:
204 try:
205 when, offset = metadata.pop('date', '0 0').split(' ')
205 when, offset = metadata.pop('date', '0 0').split(' ')
206 date = float(when), int(offset)
206 date = float(when), int(offset)
207 except ValueError:
207 except ValueError:
208 date = (0., 0)
208 date = (0., 0)
209 parents = None
209 parents = None
210 if 'p2' in metadata:
210 if 'p2' in metadata:
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 elif 'p1' in metadata:
212 elif 'p1' in metadata:
213 parents = (metadata.pop('p1', None),)
213 parents = (metadata.pop('p1', None),)
214 elif 'p0' in metadata:
214 elif 'p0' in metadata:
215 parents = ()
215 parents = ()
216 if parents is not None:
216 if parents is not None:
217 try:
217 try:
218 parents = tuple(node.bin(p) for p in parents)
218 parents = tuple(node.bin(p) for p in parents)
219 # if parent content is not a nodeid, drop the data
219 # if parent content is not a nodeid, drop the data
220 for p in parents:
220 for p in parents:
221 if len(p) != 20:
221 if len(p) != 20:
222 parents = None
222 parents = None
223 break
223 break
224 except TypeError:
224 except TypeError:
225 # if content cannot be translated to nodeid drop the data.
225 # if content cannot be translated to nodeid drop the data.
226 parents = None
226 parents = None
227
227
228 metadata = tuple(sorted(metadata.iteritems()))
228 metadata = tuple(sorted(metadata.iteritems()))
229
229
230 yield (pre, sucs, flags, metadata, date, parents)
230 yield (pre, sucs, flags, metadata, date, parents)
231
231
232 def _fm0encodeonemarker(marker):
232 def _fm0encodeonemarker(marker):
233 pre, sucs, flags, metadata, date, parents = marker
233 pre, sucs, flags, metadata, date, parents = marker
234 if flags & usingsha256:
234 if flags & usingsha256:
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 metadata = dict(metadata)
236 metadata = dict(metadata)
237 time, tz = date
237 time, tz = date
238 metadata['date'] = '%r %i' % (time, tz)
238 metadata['date'] = '%r %i' % (time, tz)
239 if parents is not None:
239 if parents is not None:
240 if not parents:
240 if not parents:
241 # mark that we explicitly recorded no parents
241 # mark that we explicitly recorded no parents
242 metadata['p0'] = ''
242 metadata['p0'] = ''
243 for i, p in enumerate(parents, 1):
243 for i, p in enumerate(parents, 1):
244 metadata['p%i' % i] = node.hex(p)
244 metadata['p%i' % i] = node.hex(p)
245 metadata = _fm0encodemeta(metadata)
245 metadata = _fm0encodemeta(metadata)
246 numsuc = len(sucs)
246 numsuc = len(sucs)
247 format = _fm0fixed + (_fm0node * numsuc)
247 format = _fm0fixed + (_fm0node * numsuc)
248 data = [numsuc, len(metadata), flags, pre]
248 data = [numsuc, len(metadata), flags, pre]
249 data.extend(sucs)
249 data.extend(sucs)
250 return _pack(format, *data) + metadata
250 return _pack(format, *data) + metadata
251
251
252 def _fm0encodemeta(meta):
252 def _fm0encodemeta(meta):
253 """Return encoded metadata string to string mapping.
253 """Return encoded metadata string to string mapping.
254
254
255 Assume no ':' in key and no '\0' in both key and value."""
255 Assume no ':' in key and no '\0' in both key and value."""
256 for key, value in meta.iteritems():
256 for key, value in meta.iteritems():
257 if ':' in key or '\0' in key:
257 if ':' in key or '\0' in key:
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 if '\0' in value:
259 if '\0' in value:
260 raise ValueError("':' is forbidden in metadata value'")
260 raise ValueError("':' is forbidden in metadata value'")
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262
262
263 def _fm0decodemeta(data):
263 def _fm0decodemeta(data):
264 """Return string to string dictionary from encoded version."""
264 """Return string to string dictionary from encoded version."""
265 d = {}
265 d = {}
266 for l in data.split('\0'):
266 for l in data.split('\0'):
267 if l:
267 if l:
268 key, value = l.split(':')
268 key, value = l.split(':')
269 d[key] = value
269 d[key] = value
270 return d
270 return d
271
271
272 ## Parsing and writing of version "1"
272 ## Parsing and writing of version "1"
273 #
273 #
274 # The header is followed by the markers. Each marker is made of:
274 # The header is followed by the markers. Each marker is made of:
275 #
275 #
276 # - uint32: total size of the marker (including this field)
276 # - uint32: total size of the marker (including this field)
277 #
277 #
278 # - float64: date in seconds since epoch
278 # - float64: date in seconds since epoch
279 #
279 #
280 # - int16: timezone offset in minutes
280 # - int16: timezone offset in minutes
281 #
281 #
282 # - uint16: a bit field. It is reserved for flags used in common
282 # - uint16: a bit field. It is reserved for flags used in common
283 # obsolete marker operations, to avoid repeated decoding of metadata
283 # obsolete marker operations, to avoid repeated decoding of metadata
284 # entries.
284 # entries.
285 #
285 #
286 # - uint8: number of successors "N", can be zero.
286 # - uint8: number of successors "N", can be zero.
287 #
287 #
288 # - uint8: number of parents "P", can be zero.
288 # - uint8: number of parents "P", can be zero.
289 #
289 #
290 # 0: parents data stored but no parent,
290 # 0: parents data stored but no parent,
291 # 1: one parent stored,
291 # 1: one parent stored,
292 # 2: two parents stored,
292 # 2: two parents stored,
293 # 3: no parent data stored
293 # 3: no parent data stored
294 #
294 #
295 # - uint8: number of metadata entries M
295 # - uint8: number of metadata entries M
296 #
296 #
297 # - 20 or 32 bytes: precursor changeset identifier.
297 # - 20 or 32 bytes: precursor changeset identifier.
298 #
298 #
299 # - N*(20 or 32) bytes: successors changesets identifiers.
299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 #
300 #
301 # - P*(20 or 32) bytes: parents of the precursors changesets.
301 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 #
302 #
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 #
304 #
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 _fm1version = 1
306 _fm1version = 1
307 _fm1fixed = '>IdhHBBB20s'
307 _fm1fixed = '>IdhHBBB20s'
308 _fm1nodesha1 = '20s'
308 _fm1nodesha1 = '20s'
309 _fm1nodesha256 = '32s'
309 _fm1nodesha256 = '32s'
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 _fm1fsize = _calcsize(_fm1fixed)
312 _fm1fsize = _calcsize(_fm1fixed)
313 _fm1parentnone = 3
313 _fm1parentnone = 3
314 _fm1parentshift = 14
314 _fm1parentshift = 14
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 _fm1metapair = 'BB'
316 _fm1metapair = 'BB'
317 _fm1metapairsize = _calcsize('BB')
317 _fm1metapairsize = _calcsize('BB')
318
318
319 def _fm1purereadmarkers(data, off):
319 def _fm1purereadmarkers(data, off):
320 # make some global constants local for performance
320 # make some global constants local for performance
321 noneflag = _fm1parentnone
321 noneflag = _fm1parentnone
322 sha2flag = usingsha256
322 sha2flag = usingsha256
323 sha1size = _fm1nodesha1size
323 sha1size = _fm1nodesha1size
324 sha2size = _fm1nodesha256size
324 sha2size = _fm1nodesha256size
325 sha1fmt = _fm1nodesha1
325 sha1fmt = _fm1nodesha1
326 sha2fmt = _fm1nodesha256
326 sha2fmt = _fm1nodesha256
327 metasize = _fm1metapairsize
327 metasize = _fm1metapairsize
328 metafmt = _fm1metapair
328 metafmt = _fm1metapair
329 fsize = _fm1fsize
329 fsize = _fm1fsize
330 unpack = _unpack
330 unpack = _unpack
331
331
332 # Loop on markers
332 # Loop on markers
333 stop = len(data) - _fm1fsize
333 stop = len(data) - _fm1fsize
334 ufixed = struct.Struct(_fm1fixed).unpack
334 ufixed = struct.Struct(_fm1fixed).unpack
335
335
336 while off <= stop:
336 while off <= stop:
337 # read fixed part
337 # read fixed part
338 o1 = off + fsize
338 o1 = off + fsize
339 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
339 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340
340
341 if flags & sha2flag:
341 if flags & sha2flag:
342 # FIXME: prec was read as a SHA1, needs to be amended
342 # FIXME: prec was read as a SHA1, needs to be amended
343
343
344 # read 0 or more successors
344 # read 0 or more successors
345 if numsuc == 1:
345 if numsuc == 1:
346 o2 = o1 + sha2size
346 o2 = o1 + sha2size
347 sucs = (data[o1:o2],)
347 sucs = (data[o1:o2],)
348 else:
348 else:
349 o2 = o1 + sha2size * numsuc
349 o2 = o1 + sha2size * numsuc
350 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
350 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351
351
352 # read parents
352 # read parents
353 if numpar == noneflag:
353 if numpar == noneflag:
354 o3 = o2
354 o3 = o2
355 parents = None
355 parents = None
356 elif numpar == 1:
356 elif numpar == 1:
357 o3 = o2 + sha2size
357 o3 = o2 + sha2size
358 parents = (data[o2:o3],)
358 parents = (data[o2:o3],)
359 else:
359 else:
360 o3 = o2 + sha2size * numpar
360 o3 = o2 + sha2size * numpar
361 parents = unpack(sha2fmt * numpar, data[o2:o3])
361 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 else:
362 else:
363 # read 0 or more successors
363 # read 0 or more successors
364 if numsuc == 1:
364 if numsuc == 1:
365 o2 = o1 + sha1size
365 o2 = o1 + sha1size
366 sucs = (data[o1:o2],)
366 sucs = (data[o1:o2],)
367 else:
367 else:
368 o2 = o1 + sha1size * numsuc
368 o2 = o1 + sha1size * numsuc
369 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
369 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370
370
371 # read parents
371 # read parents
372 if numpar == noneflag:
372 if numpar == noneflag:
373 o3 = o2
373 o3 = o2
374 parents = None
374 parents = None
375 elif numpar == 1:
375 elif numpar == 1:
376 o3 = o2 + sha1size
376 o3 = o2 + sha1size
377 parents = (data[o2:o3],)
377 parents = (data[o2:o3],)
378 else:
378 else:
379 o3 = o2 + sha1size * numpar
379 o3 = o2 + sha1size * numpar
380 parents = unpack(sha1fmt * numpar, data[o2:o3])
380 parents = unpack(sha1fmt * numpar, data[o2:o3])
381
381
382 # read metadata
382 # read metadata
383 off = o3 + metasize * nummeta
383 off = o3 + metasize * nummeta
384 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
384 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 metadata = []
385 metadata = []
386 for idx in xrange(0, len(metapairsize), 2):
386 for idx in xrange(0, len(metapairsize), 2):
387 o1 = off + metapairsize[idx]
387 o1 = off + metapairsize[idx]
388 o2 = o1 + metapairsize[idx + 1]
388 o2 = o1 + metapairsize[idx + 1]
389 metadata.append((data[off:o1], data[o1:o2]))
389 metadata.append((data[off:o1], data[o1:o2]))
390 off = o2
390 off = o2
391
391
392 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
392 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393
393
394 def _fm1encodeonemarker(marker):
394 def _fm1encodeonemarker(marker):
395 pre, sucs, flags, metadata, date, parents = marker
395 pre, sucs, flags, metadata, date, parents = marker
396 # determine node size
396 # determine node size
397 _fm1node = _fm1nodesha1
397 _fm1node = _fm1nodesha1
398 if flags & usingsha256:
398 if flags & usingsha256:
399 _fm1node = _fm1nodesha256
399 _fm1node = _fm1nodesha256
400 numsuc = len(sucs)
400 numsuc = len(sucs)
401 numextranodes = numsuc
401 numextranodes = numsuc
402 if parents is None:
402 if parents is None:
403 numpar = _fm1parentnone
403 numpar = _fm1parentnone
404 else:
404 else:
405 numpar = len(parents)
405 numpar = len(parents)
406 numextranodes += numpar
406 numextranodes += numpar
407 formatnodes = _fm1node * numextranodes
407 formatnodes = _fm1node * numextranodes
408 formatmeta = _fm1metapair * len(metadata)
408 formatmeta = _fm1metapair * len(metadata)
409 format = _fm1fixed + formatnodes + formatmeta
409 format = _fm1fixed + formatnodes + formatmeta
410 # tz is stored in minutes so we divide by 60
410 # tz is stored in minutes so we divide by 60
411 tz = date[1]//60
411 tz = date[1]//60
412 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
412 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 data.extend(sucs)
413 data.extend(sucs)
414 if parents is not None:
414 if parents is not None:
415 data.extend(parents)
415 data.extend(parents)
416 totalsize = _calcsize(format)
416 totalsize = _calcsize(format)
417 for key, value in metadata:
417 for key, value in metadata:
418 lk = len(key)
418 lk = len(key)
419 lv = len(value)
419 lv = len(value)
420 data.append(lk)
420 data.append(lk)
421 data.append(lv)
421 data.append(lv)
422 totalsize += lk + lv
422 totalsize += lk + lv
423 data[0] = totalsize
423 data[0] = totalsize
424 data = [_pack(format, *data)]
424 data = [_pack(format, *data)]
425 for key, value in metadata:
425 for key, value in metadata:
426 data.append(key)
426 data.append(key)
427 data.append(value)
427 data.append(value)
428 return ''.join(data)
428 return ''.join(data)
429
429
430 def _fm1readmarkers(data, off):
430 def _fm1readmarkers(data, off):
431 native = getattr(parsers, 'fm1readmarkers', None)
431 native = getattr(parsers, 'fm1readmarkers', None)
432 if not native:
432 if not native:
433 return _fm1purereadmarkers(data, off)
433 return _fm1purereadmarkers(data, off)
434 stop = len(data) - _fm1fsize
434 stop = len(data) - _fm1fsize
435 return native(data, off, stop)
435 return native(data, off, stop)
436
436
437 # mapping to read/write various marker formats
437 # mapping to read/write various marker formats
438 # <version> -> (decoder, encoder)
438 # <version> -> (decoder, encoder)
439 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
439 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
440 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
440 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
441
441
442 @util.nogc
442 @util.nogc
443 def _readmarkers(data):
443 def _readmarkers(data):
444 """Read and enumerate markers from raw data"""
444 """Read and enumerate markers from raw data"""
445 off = 0
445 off = 0
446 diskversion = _unpack('>B', data[off:off + 1])[0]
446 diskversion = _unpack('>B', data[off:off + 1])[0]
447 off += 1
447 off += 1
448 if diskversion not in formats:
448 if diskversion not in formats:
449 msg = _('parsing obsolete marker: unknown version %r') % diskversion
449 msg = _('parsing obsolete marker: unknown version %r') % diskversion
450 raise error.UnknownVersion(msg, version=diskversion)
450 raise error.UnknownVersion(msg, version=diskversion)
451 return diskversion, formats[diskversion][0](data, off)
451 return diskversion, formats[diskversion][0](data, off)
452
452
453 def encodemarkers(markers, addheader=False, version=_fm0version):
453 def encodemarkers(markers, addheader=False, version=_fm0version):
454 # Kept separate from flushmarkers(), it will be reused for
454 # Kept separate from flushmarkers(), it will be reused for
455 # markers exchange.
455 # markers exchange.
456 encodeone = formats[version][1]
456 encodeone = formats[version][1]
457 if addheader:
457 if addheader:
458 yield _pack('>B', version)
458 yield _pack('>B', version)
459 for marker in markers:
459 for marker in markers:
460 yield encodeone(marker)
460 yield encodeone(marker)
461
461
462
462
463 class marker(object):
463 class marker(object):
464 """Wrap obsolete marker raw data"""
464 """Wrap obsolete marker raw data"""
465
465
466 def __init__(self, repo, data):
466 def __init__(self, repo, data):
467 # the repo argument will be used to create changectx in later version
467 # the repo argument will be used to create changectx in later version
468 self._repo = repo
468 self._repo = repo
469 self._data = data
469 self._data = data
470 self._decodedmeta = None
470 self._decodedmeta = None
471
471
472 def __hash__(self):
472 def __hash__(self):
473 return hash(self._data)
473 return hash(self._data)
474
474
475 def __eq__(self, other):
475 def __eq__(self, other):
476 if type(other) != type(self):
476 if type(other) != type(self):
477 return False
477 return False
478 return self._data == other._data
478 return self._data == other._data
479
479
480 def precnode(self):
480 def precnode(self):
481 """Precursor changeset node identifier"""
481 """Precursor changeset node identifier"""
482 return self._data[0]
482 return self._data[0]
483
483
484 def succnodes(self):
484 def succnodes(self):
485 """List of successor changesets node identifiers"""
485 """List of successor changesets node identifiers"""
486 return self._data[1]
486 return self._data[1]
487
487
488 def parentnodes(self):
488 def parentnodes(self):
489 """Parents of the precursors (None if not recorded)"""
489 """Parents of the precursors (None if not recorded)"""
490 return self._data[5]
490 return self._data[5]
491
491
492 def metadata(self):
492 def metadata(self):
493 """Decoded metadata dictionary"""
493 """Decoded metadata dictionary"""
494 return dict(self._data[3])
494 return dict(self._data[3])
495
495
496 def date(self):
496 def date(self):
497 """Creation date as (unixtime, offset)"""
497 """Creation date as (unixtime, offset)"""
498 return self._data[4]
498 return self._data[4]
499
499
500 def flags(self):
500 def flags(self):
501 """The flags field of the marker"""
501 """The flags field of the marker"""
502 return self._data[2]
502 return self._data[2]
503
503
504 @util.nogc
504 @util.nogc
505 def _addsuccessors(successors, markers):
505 def _addsuccessors(successors, markers):
506 for mark in markers:
506 for mark in markers:
507 successors.setdefault(mark[0], set()).add(mark)
507 successors.setdefault(mark[0], set()).add(mark)
508
508
509 @util.nogc
509 @util.nogc
510 def _addprecursors(precursors, markers):
510 def _addprecursors(precursors, markers):
511 for mark in markers:
511 for mark in markers:
512 for suc in mark[1]:
512 for suc in mark[1]:
513 precursors.setdefault(suc, set()).add(mark)
513 precursors.setdefault(suc, set()).add(mark)
514
514
515 @util.nogc
515 @util.nogc
516 def _addchildren(children, markers):
516 def _addchildren(children, markers):
517 for mark in markers:
517 for mark in markers:
518 parents = mark[5]
518 parents = mark[5]
519 if parents is not None:
519 if parents is not None:
520 for p in parents:
520 for p in parents:
521 children.setdefault(p, set()).add(mark)
521 children.setdefault(p, set()).add(mark)
522
522
523 def _checkinvalidmarkers(markers):
523 def _checkinvalidmarkers(markers):
524 """search for marker with invalid data and raise error if needed
524 """search for marker with invalid data and raise error if needed
525
525
526 Exist as a separated function to allow the evolve extension for a more
526 Exist as a separated function to allow the evolve extension for a more
527 subtle handling.
527 subtle handling.
528 """
528 """
529 for mark in markers:
529 for mark in markers:
530 if node.nullid in mark[1]:
530 if node.nullid in mark[1]:
531 raise error.Abort(_('bad obsolescence marker detected: '
531 raise error.Abort(_('bad obsolescence marker detected: '
532 'invalid successors nullid'))
532 'invalid successors nullid'))
533
533
534 class obsstore(object):
534 class obsstore(object):
535 """Store obsolete markers
535 """Store obsolete markers
536
536
537 Markers can be accessed with two mappings:
537 Markers can be accessed with two mappings:
538 - precursors[x] -> set(markers on precursors edges of x)
538 - precursors[x] -> set(markers on precursors edges of x)
539 - successors[x] -> set(markers on successors edges of x)
539 - successors[x] -> set(markers on successors edges of x)
540 - children[x] -> set(markers on precursors edges of children(x)
540 - children[x] -> set(markers on precursors edges of children(x)
541 """
541 """
542
542
543 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
543 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
544 # prec: nodeid, precursor changesets
544 # prec: nodeid, precursor changesets
545 # succs: tuple of nodeid, successor changesets (0-N length)
545 # succs: tuple of nodeid, successor changesets (0-N length)
546 # flag: integer, flag field carrying modifier for the markers (see doc)
546 # flag: integer, flag field carrying modifier for the markers (see doc)
547 # meta: binary blob, encoded metadata dictionary
547 # meta: binary blob, encoded metadata dictionary
548 # date: (float, int) tuple, date of marker creation
548 # date: (float, int) tuple, date of marker creation
549 # parents: (tuple of nodeid) or None, parents of precursors
549 # parents: (tuple of nodeid) or None, parents of precursors
550 # None is used when no data has been recorded
550 # None is used when no data has been recorded
551
551
552 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
552 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
553 # caches for various obsolescence related cache
553 # caches for various obsolescence related cache
554 self.caches = {}
554 self.caches = {}
555 self.svfs = svfs
555 self.svfs = svfs
556 self._version = defaultformat
556 self._version = defaultformat
557 self._readonly = readonly
557 self._readonly = readonly
558
558
559 def __iter__(self):
559 def __iter__(self):
560 return iter(self._all)
560 return iter(self._all)
561
561
562 def __len__(self):
562 def __len__(self):
563 return len(self._all)
563 return len(self._all)
564
564
565 def __nonzero__(self):
565 def __nonzero__(self):
566 if not self._cached('_all'):
566 if not self._cached('_all'):
567 try:
567 try:
568 return self.svfs.stat('obsstore').st_size > 1
568 return self.svfs.stat('obsstore').st_size > 1
569 except OSError as inst:
569 except OSError as inst:
570 if inst.errno != errno.ENOENT:
570 if inst.errno != errno.ENOENT:
571 raise
571 raise
572 # just build an empty _all list if no obsstore exists, which
572 # just build an empty _all list if no obsstore exists, which
573 # avoids further stat() syscalls
573 # avoids further stat() syscalls
574 pass
574 pass
575 return bool(self._all)
575 return bool(self._all)
576
576
577 __bool__ = __nonzero__
577 __bool__ = __nonzero__
578
578
579 @property
579 @property
580 def readonly(self):
580 def readonly(self):
581 """True if marker creation is disabled
581 """True if marker creation is disabled
582
582
583 Remove me in the future when obsolete marker is always on."""
583 Remove me in the future when obsolete marker is always on."""
584 return self._readonly
584 return self._readonly
585
585
586 def create(self, transaction, prec, succs=(), flag=0, parents=None,
586 def create(self, transaction, prec, succs=(), flag=0, parents=None,
587 date=None, metadata=None, ui=None):
587 date=None, metadata=None, ui=None):
588 """obsolete: add a new obsolete marker
588 """obsolete: add a new obsolete marker
589
589
590 * ensuring it is hashable
590 * ensuring it is hashable
591 * check mandatory metadata
591 * check mandatory metadata
592 * encode metadata
592 * encode metadata
593
593
594 If you are a human writing code creating marker you want to use the
594 If you are a human writing code creating marker you want to use the
595 `createmarkers` function in this module instead.
595 `createmarkers` function in this module instead.
596
596
597 return True if a new marker have been added, False if the markers
597 return True if a new marker have been added, False if the markers
598 already existed (no op).
598 already existed (no op).
599 """
599 """
600 if metadata is None:
600 if metadata is None:
601 metadata = {}
601 metadata = {}
602 if date is None:
602 if date is None:
603 if 'date' in metadata:
603 if 'date' in metadata:
604 # as a courtesy for out-of-tree extensions
604 # as a courtesy for out-of-tree extensions
605 date = util.parsedate(metadata.pop('date'))
605 date = util.parsedate(metadata.pop('date'))
606 elif ui is not None:
606 elif ui is not None:
607 date = ui.configdate('devel', 'default-date')
607 date = ui.configdate('devel', 'default-date')
608 if date is None:
608 if date is None:
609 date = util.makedate()
609 date = util.makedate()
610 else:
610 else:
611 date = util.makedate()
611 date = util.makedate()
612 if len(prec) != 20:
612 if len(prec) != 20:
613 raise ValueError(prec)
613 raise ValueError(prec)
614 for succ in succs:
614 for succ in succs:
615 if len(succ) != 20:
615 if len(succ) != 20:
616 raise ValueError(succ)
616 raise ValueError(succ)
617 if prec in succs:
617 if prec in succs:
618 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
618 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
619
619
620 metadata = tuple(sorted(metadata.iteritems()))
620 metadata = tuple(sorted(metadata.iteritems()))
621
621
622 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
622 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
623 return bool(self.add(transaction, [marker]))
623 return bool(self.add(transaction, [marker]))
624
624
625 def add(self, transaction, markers):
625 def add(self, transaction, markers):
626 """Add new markers to the store
626 """Add new markers to the store
627
627
628 Take care of filtering duplicate.
628 Take care of filtering duplicate.
629 Return the number of new marker."""
629 Return the number of new marker."""
630 if self._readonly:
630 if self._readonly:
631 raise error.Abort(_('creating obsolete markers is not enabled on '
631 raise error.Abort(_('creating obsolete markers is not enabled on '
632 'this repo'))
632 'this repo'))
633 known = set(self._all)
633 known = set(self._all)
634 new = []
634 new = []
635 for m in markers:
635 for m in markers:
636 if m not in known:
636 if m not in known:
637 known.add(m)
637 known.add(m)
638 new.append(m)
638 new.append(m)
639 if new:
639 if new:
640 f = self.svfs('obsstore', 'ab')
640 f = self.svfs('obsstore', 'ab')
641 try:
641 try:
642 offset = f.tell()
642 offset = f.tell()
643 transaction.add('obsstore', offset)
643 transaction.add('obsstore', offset)
644 # offset == 0: new file - add the version header
644 # offset == 0: new file - add the version header
645 for bytes in encodemarkers(new, offset == 0, self._version):
645 for bytes in encodemarkers(new, offset == 0, self._version):
646 f.write(bytes)
646 f.write(bytes)
647 finally:
647 finally:
648 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
648 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
649 # call 'filecacheentry.refresh()' here
649 # call 'filecacheentry.refresh()' here
650 f.close()
650 f.close()
651 self._addmarkers(new)
651 self._addmarkers(new)
652 # new marker *may* have changed several set. invalidate the cache.
652 # new marker *may* have changed several set. invalidate the cache.
653 self.caches.clear()
653 self.caches.clear()
654 # records the number of new markers for the transaction hooks
654 # records the number of new markers for the transaction hooks
655 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
655 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
656 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
656 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
657 return len(new)
657 return len(new)
658
658
659 def mergemarkers(self, transaction, data):
659 def mergemarkers(self, transaction, data):
660 """merge a binary stream of markers inside the obsstore
660 """merge a binary stream of markers inside the obsstore
661
661
662 Returns the number of new markers added."""
662 Returns the number of new markers added."""
663 version, markers = _readmarkers(data)
663 version, markers = _readmarkers(data)
664 return self.add(transaction, markers)
664 return self.add(transaction, markers)
665
665
666 @propertycache
666 @propertycache
667 def _all(self):
667 def _all(self):
668 data = self.svfs.tryread('obsstore')
668 data = self.svfs.tryread('obsstore')
669 if not data:
669 if not data:
670 return []
670 return []
671 self._version, markers = _readmarkers(data)
671 self._version, markers = _readmarkers(data)
672 markers = list(markers)
672 markers = list(markers)
673 _checkinvalidmarkers(markers)
673 _checkinvalidmarkers(markers)
674 return markers
674 return markers
675
675
676 @propertycache
676 @propertycache
677 def successors(self):
677 def successors(self):
678 successors = {}
678 successors = {}
679 _addsuccessors(successors, self._all)
679 _addsuccessors(successors, self._all)
680 return successors
680 return successors
681
681
682 @propertycache
682 @propertycache
683 def precursors(self):
683 def precursors(self):
684 precursors = {}
684 precursors = {}
685 _addprecursors(precursors, self._all)
685 _addprecursors(precursors, self._all)
686 return precursors
686 return precursors
687
687
688 @propertycache
688 @propertycache
689 def children(self):
689 def children(self):
690 children = {}
690 children = {}
691 _addchildren(children, self._all)
691 _addchildren(children, self._all)
692 return children
692 return children
693
693
694 def _cached(self, attr):
694 def _cached(self, attr):
695 return attr in self.__dict__
695 return attr in self.__dict__
696
696
697 def _addmarkers(self, markers):
697 def _addmarkers(self, markers):
698 markers = list(markers) # to allow repeated iteration
698 markers = list(markers) # to allow repeated iteration
699 self._all.extend(markers)
699 self._all.extend(markers)
700 if self._cached('successors'):
700 if self._cached('successors'):
701 _addsuccessors(self.successors, markers)
701 _addsuccessors(self.successors, markers)
702 if self._cached('precursors'):
702 if self._cached('precursors'):
703 _addprecursors(self.precursors, markers)
703 _addprecursors(self.precursors, markers)
704 if self._cached('children'):
704 if self._cached('children'):
705 _addchildren(self.children, markers)
705 _addchildren(self.children, markers)
706 _checkinvalidmarkers(markers)
706 _checkinvalidmarkers(markers)
707
707
708 def relevantmarkers(self, nodes):
708 def relevantmarkers(self, nodes):
709 """return a set of all obsolescence markers relevant to a set of nodes.
709 """return a set of all obsolescence markers relevant to a set of nodes.
710
710
711 "relevant" to a set of nodes mean:
711 "relevant" to a set of nodes mean:
712
712
713 - marker that use this changeset as successor
713 - marker that use this changeset as successor
714 - prune marker of direct children on this changeset
714 - prune marker of direct children on this changeset
715 - recursive application of the two rules on precursors of these markers
715 - recursive application of the two rules on precursors of these markers
716
716
717 It is a set so you cannot rely on order."""
717 It is a set so you cannot rely on order."""
718
718
719 pendingnodes = set(nodes)
719 pendingnodes = set(nodes)
720 seenmarkers = set()
720 seenmarkers = set()
721 seennodes = set(pendingnodes)
721 seennodes = set(pendingnodes)
722 precursorsmarkers = self.precursors
722 precursorsmarkers = self.precursors
723 succsmarkers = self.successors
723 succsmarkers = self.successors
724 children = self.children
724 children = self.children
725 while pendingnodes:
725 while pendingnodes:
726 direct = set()
726 direct = set()
727 for current in pendingnodes:
727 for current in pendingnodes:
728 direct.update(precursorsmarkers.get(current, ()))
728 direct.update(precursorsmarkers.get(current, ()))
729 pruned = [m for m in children.get(current, ()) if not m[1]]
729 pruned = [m for m in children.get(current, ()) if not m[1]]
730 direct.update(pruned)
730 direct.update(pruned)
731 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
731 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
732 direct.update(pruned)
732 direct.update(pruned)
733 direct -= seenmarkers
733 direct -= seenmarkers
734 pendingnodes = set([m[0] for m in direct])
734 pendingnodes = set([m[0] for m in direct])
735 seenmarkers |= direct
735 seenmarkers |= direct
736 pendingnodes -= seennodes
736 pendingnodes -= seennodes
737 seennodes |= pendingnodes
737 seennodes |= pendingnodes
738 return seenmarkers
738 return seenmarkers
739
739
740 def _filterprunes(markers):
740 def _filterprunes(markers):
741 """return a set with no prune markers"""
741 """return a set with no prune markers"""
742 return set(m for m in markers if m[1])
742 return set(m for m in markers if m[1])
743
743
744 def exclusivemarkers(repo, nodes):
744 def exclusivemarkers(repo, nodes):
745 """set of markers relevant to "nodes" but no other locally-known nodes
745 """set of markers relevant to "nodes" but no other locally-known nodes
746
746
747 This function compute the set of markers "exclusive" to a locally-known
747 This function compute the set of markers "exclusive" to a locally-known
748 node. This means we walk the markers starting from <nodes> until we reach a
748 node. This means we walk the markers starting from <nodes> until we reach a
749 locally-known precursors outside of <nodes>. Element of <nodes> with
749 locally-known precursors outside of <nodes>. Element of <nodes> with
750 locally-known successors outside of <nodes> are ignored (since their
750 locally-known successors outside of <nodes> are ignored (since their
751 precursors markers are also relevant to these successors).
751 precursors markers are also relevant to these successors).
752
752
753 For example:
753 For example:
754
754
755 # (A0 rewritten as A1)
755 # (A0 rewritten as A1)
756 #
756 #
757 # A0 <-1- A1 # Marker "1" is exclusive to A1
757 # A0 <-1- A1 # Marker "1" is exclusive to A1
758
758
759 or
759 or
760
760
761 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
761 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
762 #
762 #
763 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
763 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
764
764
765 or
765 or
766
766
767 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
767 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
768 #
768 #
769 # <-2- A1 # Marker "2" is exclusive to A0,A1
769 # <-2- A1 # Marker "2" is exclusive to A0,A1
770 # /
770 # /
771 # <-1- A0
771 # <-1- A0
772 # \
772 # \
773 # <-3- A2 # Marker "3" is exclusive to A0,A2
773 # <-3- A2 # Marker "3" is exclusive to A0,A2
774 #
774 #
775 # in addition:
775 # in addition:
776 #
776 #
777 # Markers "2,3" are exclusive to A1,A2
777 # Markers "2,3" are exclusive to A1,A2
778 # Markers "1,2,3" are exclusive to A0,A1,A2
778 # Markers "1,2,3" are exclusive to A0,A1,A2
779
779
780 See test/test-obsolete-bundle-strip.t for more examples.
780 See test/test-obsolete-bundle-strip.t for more examples.
781
781
782 An example usage is strip. When stripping a changeset, we also want to
782 An example usage is strip. When stripping a changeset, we also want to
783 strip the markers exclusive to this changeset. Otherwise we would have
783 strip the markers exclusive to this changeset. Otherwise we would have
784 "dangling"" obsolescence markers from its precursors: Obsolescence markers
784 "dangling"" obsolescence markers from its precursors: Obsolescence markers
785 marking a node as obsolete without any successors available locally.
785 marking a node as obsolete without any successors available locally.
786
786
787 As for relevant markers, the prune markers for children will be followed.
787 As for relevant markers, the prune markers for children will be followed.
788 Of course, they will only be followed if the pruned children is
788 Of course, they will only be followed if the pruned children is
789 locally-known. Since the prune markers are relevant to the pruned node.
789 locally-known. Since the prune markers are relevant to the pruned node.
790 However, while prune markers are considered relevant to the parent of the
790 However, while prune markers are considered relevant to the parent of the
791 pruned changesets, prune markers for locally-known changeset (with no
791 pruned changesets, prune markers for locally-known changeset (with no
792 successors) are considered exclusive to the pruned nodes. This allows
792 successors) are considered exclusive to the pruned nodes. This allows
793 to strip the prune markers (with the rest of the exclusive chain) alongside
793 to strip the prune markers (with the rest of the exclusive chain) alongside
794 the pruned changesets.
794 the pruned changesets.
795 """
795 """
796 # running on a filtered repository would be dangerous as markers could be
796 # running on a filtered repository would be dangerous as markers could be
797 # reported as exclusive when they are relevant for other filtered nodes.
797 # reported as exclusive when they are relevant for other filtered nodes.
798 unfi = repo.unfiltered()
798 unfi = repo.unfiltered()
799
799
800 # shortcut to various useful item
800 # shortcut to various useful item
801 nm = unfi.changelog.nodemap
801 nm = unfi.changelog.nodemap
802 precursorsmarkers = unfi.obsstore.precursors
802 precursorsmarkers = unfi.obsstore.precursors
803 successormarkers = unfi.obsstore.successors
803 successormarkers = unfi.obsstore.successors
804 childrenmarkers = unfi.obsstore.children
804 childrenmarkers = unfi.obsstore.children
805
805
806 # exclusive markers (return of the function)
806 # exclusive markers (return of the function)
807 exclmarkers = set()
807 exclmarkers = set()
808 # we need fast membership testing
808 # we need fast membership testing
809 nodes = set(nodes)
809 nodes = set(nodes)
810 # looking for head in the obshistory
810 # looking for head in the obshistory
811 #
811 #
812 # XXX we are ignoring all issues in regard with cycle for now.
812 # XXX we are ignoring all issues in regard with cycle for now.
813 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
813 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
814 stack.sort()
814 stack.sort()
815 # nodes already stacked
815 # nodes already stacked
816 seennodes = set(stack)
816 seennodes = set(stack)
817 while stack:
817 while stack:
818 current = stack.pop()
818 current = stack.pop()
819 # fetch precursors markers
819 # fetch precursors markers
820 markers = list(precursorsmarkers.get(current, ()))
820 markers = list(precursorsmarkers.get(current, ()))
821 # extend the list with prune markers
821 # extend the list with prune markers
822 for mark in successormarkers.get(current, ()):
822 for mark in successormarkers.get(current, ()):
823 if not mark[1]:
823 if not mark[1]:
824 markers.append(mark)
824 markers.append(mark)
825 # and markers from children (looking for prune)
825 # and markers from children (looking for prune)
826 for mark in childrenmarkers.get(current, ()):
826 for mark in childrenmarkers.get(current, ()):
827 if not mark[1]:
827 if not mark[1]:
828 markers.append(mark)
828 markers.append(mark)
829 # traverse the markers
829 # traverse the markers
830 for mark in markers:
830 for mark in markers:
831 if mark in exclmarkers:
831 if mark in exclmarkers:
832 # markers already selected
832 # markers already selected
833 continue
833 continue
834
834
835 # If the markers is about the current node, select it
835 # If the markers is about the current node, select it
836 #
836 #
837 # (this delay the addition of markers from children)
837 # (this delay the addition of markers from children)
838 if mark[1] or mark[0] == current:
838 if mark[1] or mark[0] == current:
839 exclmarkers.add(mark)
839 exclmarkers.add(mark)
840
840
841 # should we keep traversing through the precursors?
841 # should we keep traversing through the precursors?
842 prec = mark[0]
842 prec = mark[0]
843
843
844 # nodes in the stack or already processed
844 # nodes in the stack or already processed
845 if prec in seennodes:
845 if prec in seennodes:
846 continue
846 continue
847
847
848 # is this a locally known node ?
848 # is this a locally known node ?
849 known = prec in nm
849 known = prec in nm
850 # if locally-known and not in the <nodes> set the traversal
850 # if locally-known and not in the <nodes> set the traversal
851 # stop here.
851 # stop here.
852 if known and prec not in nodes:
852 if known and prec not in nodes:
853 continue
853 continue
854
854
855 # do not keep going if there are unselected markers pointing to this
855 # do not keep going if there are unselected markers pointing to this
856 # nodes. If we end up traversing these unselected markers later the
856 # nodes. If we end up traversing these unselected markers later the
857 # node will be taken care of at that point.
857 # node will be taken care of at that point.
858 precmarkers = _filterprunes(successormarkers.get(prec))
858 precmarkers = _filterprunes(successormarkers.get(prec))
859 if precmarkers.issubset(exclmarkers):
859 if precmarkers.issubset(exclmarkers):
860 seennodes.add(prec)
860 seennodes.add(prec)
861 stack.append(prec)
861 stack.append(prec)
862
862
863 return exclmarkers
863 return exclmarkers
864
864
865 def commonversion(versions):
865 def commonversion(versions):
866 """Return the newest version listed in both versions and our local formats.
866 """Return the newest version listed in both versions and our local formats.
867
867
868 Returns None if no common version exists.
868 Returns None if no common version exists.
869 """
869 """
870 versions.sort(reverse=True)
870 versions.sort(reverse=True)
871 # search for highest version known on both side
871 # search for highest version known on both side
872 for v in versions:
872 for v in versions:
873 if v in formats:
873 if v in formats:
874 return v
874 return v
875 return None
875 return None
876
876
877 # arbitrary picked to fit into 8K limit from HTTP server
877 # arbitrary picked to fit into 8K limit from HTTP server
878 # you have to take in account:
878 # you have to take in account:
879 # - the version header
879 # - the version header
880 # - the base85 encoding
880 # - the base85 encoding
881 _maxpayload = 5300
881 _maxpayload = 5300
882
882
883 def _pushkeyescape(markers):
883 def _pushkeyescape(markers):
884 """encode markers into a dict suitable for pushkey exchange
884 """encode markers into a dict suitable for pushkey exchange
885
885
886 - binary data is base85 encoded
886 - binary data is base85 encoded
887 - split in chunks smaller than 5300 bytes"""
887 - split in chunks smaller than 5300 bytes"""
888 keys = {}
888 keys = {}
889 parts = []
889 parts = []
890 currentlen = _maxpayload * 2 # ensure we create a new part
890 currentlen = _maxpayload * 2 # ensure we create a new part
891 for marker in markers:
891 for marker in markers:
892 nextdata = _fm0encodeonemarker(marker)
892 nextdata = _fm0encodeonemarker(marker)
893 if (len(nextdata) + currentlen > _maxpayload):
893 if (len(nextdata) + currentlen > _maxpayload):
894 currentpart = []
894 currentpart = []
895 currentlen = 0
895 currentlen = 0
896 parts.append(currentpart)
896 parts.append(currentpart)
897 currentpart.append(nextdata)
897 currentpart.append(nextdata)
898 currentlen += len(nextdata)
898 currentlen += len(nextdata)
899 for idx, part in enumerate(reversed(parts)):
899 for idx, part in enumerate(reversed(parts)):
900 data = ''.join([_pack('>B', _fm0version)] + part)
900 data = ''.join([_pack('>B', _fm0version)] + part)
901 keys['dump%i' % idx] = util.b85encode(data)
901 keys['dump%i' % idx] = util.b85encode(data)
902 return keys
902 return keys
903
903
904 def listmarkers(repo):
904 def listmarkers(repo):
905 """List markers over pushkey"""
905 """List markers over pushkey"""
906 if not repo.obsstore:
906 if not repo.obsstore:
907 return {}
907 return {}
908 return _pushkeyescape(sorted(repo.obsstore))
908 return _pushkeyescape(sorted(repo.obsstore))
909
909
910 def pushmarker(repo, key, old, new):
910 def pushmarker(repo, key, old, new):
911 """Push markers over pushkey"""
911 """Push markers over pushkey"""
912 if not key.startswith('dump'):
912 if not key.startswith('dump'):
913 repo.ui.warn(_('unknown key: %r') % key)
913 repo.ui.warn(_('unknown key: %r') % key)
914 return 0
914 return 0
915 if old:
915 if old:
916 repo.ui.warn(_('unexpected old value for %r') % key)
916 repo.ui.warn(_('unexpected old value for %r') % key)
917 return 0
917 return 0
918 data = util.b85decode(new)
918 data = util.b85decode(new)
919 lock = repo.lock()
919 lock = repo.lock()
920 try:
920 try:
921 tr = repo.transaction('pushkey: obsolete markers')
921 tr = repo.transaction('pushkey: obsolete markers')
922 try:
922 try:
923 repo.obsstore.mergemarkers(tr, data)
923 repo.obsstore.mergemarkers(tr, data)
924 repo.invalidatevolatilesets()
924 repo.invalidatevolatilesets()
925 tr.close()
925 tr.close()
926 return 1
926 return 1
927 finally:
927 finally:
928 tr.release()
928 tr.release()
929 finally:
929 finally:
930 lock.release()
930 lock.release()
931
931
932 def getmarkers(repo, nodes=None, exclusive=False):
932 def getmarkers(repo, nodes=None, exclusive=False):
933 """returns markers known in a repository
933 """returns markers known in a repository
934
934
935 If <nodes> is specified, only markers "relevant" to those nodes are are
935 If <nodes> is specified, only markers "relevant" to those nodes are are
936 returned"""
936 returned"""
937 if nodes is None:
937 if nodes is None:
938 rawmarkers = repo.obsstore
938 rawmarkers = repo.obsstore
939 elif exclusive:
939 elif exclusive:
940 rawmarkers = exclusivemarkers(repo, nodes)
940 rawmarkers = exclusivemarkers(repo, nodes)
941 else:
941 else:
942 rawmarkers = repo.obsstore.relevantmarkers(nodes)
942 rawmarkers = repo.obsstore.relevantmarkers(nodes)
943
943
944 for markerdata in rawmarkers:
944 for markerdata in rawmarkers:
945 yield marker(repo, markerdata)
945 yield marker(repo, markerdata)
946
946
947 def relevantmarkers(repo, node):
947 def relevantmarkers(repo, node):
948 """all obsolete markers relevant to some revision"""
948 """all obsolete markers relevant to some revision"""
949 for markerdata in repo.obsstore.relevantmarkers(node):
949 for markerdata in repo.obsstore.relevantmarkers(node):
950 yield marker(repo, markerdata)
950 yield marker(repo, markerdata)
951
951
952
952
953 def precursormarkers(ctx):
953 def precursormarkers(ctx):
954 """obsolete marker marking this changeset as a successors"""
954 """obsolete marker marking this changeset as a successors"""
955 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
955 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
956 yield marker(ctx.repo(), data)
956 yield marker(ctx.repo(), data)
957
957
958 def successormarkers(ctx):
958 def successormarkers(ctx):
959 """obsolete marker making this changeset obsolete"""
959 """obsolete marker making this changeset obsolete"""
960 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
960 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
961 yield marker(ctx.repo(), data)
961 yield marker(ctx.repo(), data)
962
962
963 def allsuccessors(obsstore, nodes, ignoreflags=0):
963 def allsuccessors(obsstore, nodes, ignoreflags=0):
964 """Yield node for every successor of <nodes>.
964 """Yield node for every successor of <nodes>.
965
965
966 Some successors may be unknown locally.
966 Some successors may be unknown locally.
967
967
968 This is a linear yield unsuited to detecting split changesets. It includes
968 This is a linear yield unsuited to detecting split changesets. It includes
969 initial nodes too."""
969 initial nodes too."""
970 remaining = set(nodes)
970 remaining = set(nodes)
971 seen = set(remaining)
971 seen = set(remaining)
972 while remaining:
972 while remaining:
973 current = remaining.pop()
973 current = remaining.pop()
974 yield current
974 yield current
975 for mark in obsstore.successors.get(current, ()):
975 for mark in obsstore.successors.get(current, ()):
976 # ignore marker flagged with specified flag
976 # ignore marker flagged with specified flag
977 if mark[2] & ignoreflags:
977 if mark[2] & ignoreflags:
978 continue
978 continue
979 for suc in mark[1]:
979 for suc in mark[1]:
980 if suc not in seen:
980 if suc not in seen:
981 seen.add(suc)
981 seen.add(suc)
982 remaining.add(suc)
982 remaining.add(suc)
983
983
984 def allprecursors(obsstore, nodes, ignoreflags=0):
984 def allprecursors(obsstore, nodes, ignoreflags=0):
985 """Yield node for every precursors of <nodes>.
985 """Yield node for every precursors of <nodes>.
986
986
987 Some precursors may be unknown locally.
987 Some precursors may be unknown locally.
988
988
989 This is a linear yield unsuited to detecting folded changesets. It includes
989 This is a linear yield unsuited to detecting folded changesets. It includes
990 initial nodes too."""
990 initial nodes too."""
991
991
992 remaining = set(nodes)
992 remaining = set(nodes)
993 seen = set(remaining)
993 seen = set(remaining)
994 while remaining:
994 while remaining:
995 current = remaining.pop()
995 current = remaining.pop()
996 yield current
996 yield current
997 for mark in obsstore.precursors.get(current, ()):
997 for mark in obsstore.precursors.get(current, ()):
998 # ignore marker flagged with specified flag
998 # ignore marker flagged with specified flag
999 if mark[2] & ignoreflags:
999 if mark[2] & ignoreflags:
1000 continue
1000 continue
1001 suc = mark[0]
1001 suc = mark[0]
1002 if suc not in seen:
1002 if suc not in seen:
1003 seen.add(suc)
1003 seen.add(suc)
1004 remaining.add(suc)
1004 remaining.add(suc)
1005
1005
1006 def foreground(repo, nodes):
1006 def foreground(repo, nodes):
1007 """return all nodes in the "foreground" of other node
1007 """return all nodes in the "foreground" of other node
1008
1008
1009 The foreground of a revision is anything reachable using parent -> children
1009 The foreground of a revision is anything reachable using parent -> children
1010 or precursor -> successor relation. It is very similar to "descendant" but
1010 or precursor -> successor relation. It is very similar to "descendant" but
1011 augmented with obsolescence information.
1011 augmented with obsolescence information.
1012
1012
1013 Beware that possible obsolescence cycle may result if complex situation.
1013 Beware that possible obsolescence cycle may result if complex situation.
1014 """
1014 """
1015 repo = repo.unfiltered()
1015 repo = repo.unfiltered()
1016 foreground = set(repo.set('%ln::', nodes))
1016 foreground = set(repo.set('%ln::', nodes))
1017 if repo.obsstore:
1017 if repo.obsstore:
1018 # We only need this complicated logic if there is obsolescence
1018 # We only need this complicated logic if there is obsolescence
1019 # XXX will probably deserve an optimised revset.
1019 # XXX will probably deserve an optimised revset.
1020 nm = repo.changelog.nodemap
1020 nm = repo.changelog.nodemap
1021 plen = -1
1021 plen = -1
1022 # compute the whole set of successors or descendants
1022 # compute the whole set of successors or descendants
1023 while len(foreground) != plen:
1023 while len(foreground) != plen:
1024 plen = len(foreground)
1024 plen = len(foreground)
1025 succs = set(c.node() for c in foreground)
1025 succs = set(c.node() for c in foreground)
1026 mutable = [c.node() for c in foreground if c.mutable()]
1026 mutable = [c.node() for c in foreground if c.mutable()]
1027 succs.update(allsuccessors(repo.obsstore, mutable))
1027 succs.update(allsuccessors(repo.obsstore, mutable))
1028 known = (n for n in succs if n in nm)
1028 known = (n for n in succs if n in nm)
1029 foreground = set(repo.set('%ln::', known))
1029 foreground = set(repo.set('%ln::', known))
1030 return set(c.node() for c in foreground)
1030 return set(c.node() for c in foreground)
1031
1031
1032
1032
1033 def successorssets(repo, initialnode, cache=None):
1033 def successorssets(repo, initialnode, cache=None):
1034 """Return set of all latest successors of initial nodes
1034 """Return set of all latest successors of initial nodes
1035
1035
1036 The successors set of a changeset A are the group of revisions that succeed
1036 The successors set of a changeset A are the group of revisions that succeed
1037 A. It succeeds A as a consistent whole, each revision being only a partial
1037 A. It succeeds A as a consistent whole, each revision being only a partial
1038 replacement. The successors set contains non-obsolete changesets only.
1038 replacement. The successors set contains non-obsolete changesets only.
1039
1039
1040 This function returns the full list of successor sets which is why it
1040 This function returns the full list of successor sets which is why it
1041 returns a list of tuples and not just a single tuple. Each tuple is a valid
1041 returns a list of tuples and not just a single tuple. Each tuple is a valid
1042 successors set. Note that (A,) may be a valid successors set for changeset A
1042 successors set. Note that (A,) may be a valid successors set for changeset A
1043 (see below).
1043 (see below).
1044
1044
1045 In most cases, a changeset A will have a single element (e.g. the changeset
1045 In most cases, a changeset A will have a single element (e.g. the changeset
1046 A is replaced by A') in its successors set. Though, it is also common for a
1046 A is replaced by A') in its successors set. Though, it is also common for a
1047 changeset A to have no elements in its successor set (e.g. the changeset
1047 changeset A to have no elements in its successor set (e.g. the changeset
1048 has been pruned). Therefore, the returned list of successors sets will be
1048 has been pruned). Therefore, the returned list of successors sets will be
1049 [(A',)] or [], respectively.
1049 [(A',)] or [], respectively.
1050
1050
1051 When a changeset A is split into A' and B', however, it will result in a
1051 When a changeset A is split into A' and B', however, it will result in a
1052 successors set containing more than a single element, i.e. [(A',B')].
1052 successors set containing more than a single element, i.e. [(A',B')].
1053 Divergent changesets will result in multiple successors sets, i.e. [(A',),
1053 Divergent changesets will result in multiple successors sets, i.e. [(A',),
1054 (A'')].
1054 (A'')].
1055
1055
1056 If a changeset A is not obsolete, then it will conceptually have no
1056 If a changeset A is not obsolete, then it will conceptually have no
1057 successors set. To distinguish this from a pruned changeset, the successor
1057 successors set. To distinguish this from a pruned changeset, the successor
1058 set will contain itself only, i.e. [(A,)].
1058 set will contain itself only, i.e. [(A,)].
1059
1059
1060 Finally, successors unknown locally are considered to be pruned (obsoleted
1060 Finally, successors unknown locally are considered to be pruned (obsoleted
1061 without any successors).
1061 without any successors).
1062
1062
1063 The optional `cache` parameter is a dictionary that may contain precomputed
1063 The optional `cache` parameter is a dictionary that may contain precomputed
1064 successors sets. It is meant to reuse the computation of a previous call to
1064 successors sets. It is meant to reuse the computation of a previous call to
1065 `successorssets` when multiple calls are made at the same time. The cache
1065 `successorssets` when multiple calls are made at the same time. The cache
1066 dictionary is updated in place. The caller is responsible for its life
1066 dictionary is updated in place. The caller is responsible for its life
1067 span. Code that makes multiple calls to `successorssets` *must* use this
1067 span. Code that makes multiple calls to `successorssets` *must* use this
1068 cache mechanism or suffer terrible performance.
1068 cache mechanism or suffer terrible performance.
1069 """
1069 """
1070
1070
1071 succmarkers = repo.obsstore.successors
1071 succmarkers = repo.obsstore.successors
1072
1072
1073 # Stack of nodes we search successors sets for
1073 # Stack of nodes we search successors sets for
1074 toproceed = [initialnode]
1074 toproceed = [initialnode]
1075 # set version of above list for fast loop detection
1075 # set version of above list for fast loop detection
1076 # element added to "toproceed" must be added here
1076 # element added to "toproceed" must be added here
1077 stackedset = set(toproceed)
1077 stackedset = set(toproceed)
1078 if cache is None:
1078 if cache is None:
1079 cache = {}
1079 cache = {}
1080
1080
1081 # This while loop is the flattened version of a recursive search for
1081 # This while loop is the flattened version of a recursive search for
1082 # successors sets
1082 # successors sets
1083 #
1083 #
1084 # def successorssets(x):
1084 # def successorssets(x):
1085 # successors = directsuccessors(x)
1085 # successors = directsuccessors(x)
1086 # ss = [[]]
1086 # ss = [[]]
1087 # for succ in directsuccessors(x):
1087 # for succ in directsuccessors(x):
1088 # # product as in itertools cartesian product
1088 # # product as in itertools cartesian product
1089 # ss = product(ss, successorssets(succ))
1089 # ss = product(ss, successorssets(succ))
1090 # return ss
1090 # return ss
1091 #
1091 #
1092 # But we can not use plain recursive calls here:
1092 # But we can not use plain recursive calls here:
1093 # - that would blow the python call stack
1093 # - that would blow the python call stack
1094 # - obsolescence markers may have cycles, we need to handle them.
1094 # - obsolescence markers may have cycles, we need to handle them.
1095 #
1095 #
1096 # The `toproceed` list act as our call stack. Every node we search
1096 # The `toproceed` list act as our call stack. Every node we search
1097 # successors set for are stacked there.
1097 # successors set for are stacked there.
1098 #
1098 #
1099 # The `stackedset` is set version of this stack used to check if a node is
1099 # The `stackedset` is set version of this stack used to check if a node is
1100 # already stacked. This check is used to detect cycles and prevent infinite
1100 # already stacked. This check is used to detect cycles and prevent infinite
1101 # loop.
1101 # loop.
1102 #
1102 #
1103 # successors set of all nodes are stored in the `cache` dictionary.
1103 # successors set of all nodes are stored in the `cache` dictionary.
1104 #
1104 #
1105 # After this while loop ends we use the cache to return the successors sets
1105 # After this while loop ends we use the cache to return the successors sets
1106 # for the node requested by the caller.
1106 # for the node requested by the caller.
1107 while toproceed:
1107 while toproceed:
1108 # Every iteration tries to compute the successors sets of the topmost
1108 # Every iteration tries to compute the successors sets of the topmost
1109 # node of the stack: CURRENT.
1109 # node of the stack: CURRENT.
1110 #
1110 #
1111 # There are four possible outcomes:
1111 # There are four possible outcomes:
1112 #
1112 #
1113 # 1) We already know the successors sets of CURRENT:
1113 # 1) We already know the successors sets of CURRENT:
1114 # -> mission accomplished, pop it from the stack.
1114 # -> mission accomplished, pop it from the stack.
1115 # 2) Node is not obsolete:
1115 # 2) Node is not obsolete:
1116 # -> the node is its own successors sets. Add it to the cache.
1116 # -> the node is its own successors sets. Add it to the cache.
1117 # 3) We do not know successors set of direct successors of CURRENT:
1117 # 3) We do not know successors set of direct successors of CURRENT:
1118 # -> We add those successors to the stack.
1118 # -> We add those successors to the stack.
1119 # 4) We know successors sets of all direct successors of CURRENT:
1119 # 4) We know successors sets of all direct successors of CURRENT:
1120 # -> We can compute CURRENT successors set and add it to the
1120 # -> We can compute CURRENT successors set and add it to the
1121 # cache.
1121 # cache.
1122 #
1122 #
1123 current = toproceed[-1]
1123 current = toproceed[-1]
1124 if current in cache:
1124 if current in cache:
1125 # case (1): We already know the successors sets
1125 # case (1): We already know the successors sets
1126 stackedset.remove(toproceed.pop())
1126 stackedset.remove(toproceed.pop())
1127 elif current not in succmarkers:
1127 elif current not in succmarkers:
1128 # case (2): The node is not obsolete.
1128 # case (2): The node is not obsolete.
1129 if current in repo:
1129 if current in repo:
1130 # We have a valid last successors.
1130 # We have a valid last successors.
1131 cache[current] = [(current,)]
1131 cache[current] = [(current,)]
1132 else:
1132 else:
1133 # Final obsolete version is unknown locally.
1133 # Final obsolete version is unknown locally.
1134 # Do not count that as a valid successors
1134 # Do not count that as a valid successors
1135 cache[current] = []
1135 cache[current] = []
1136 else:
1136 else:
1137 # cases (3) and (4)
1137 # cases (3) and (4)
1138 #
1138 #
1139 # We proceed in two phases. Phase 1 aims to distinguish case (3)
1139 # We proceed in two phases. Phase 1 aims to distinguish case (3)
1140 # from case (4):
1140 # from case (4):
1141 #
1141 #
1142 # For each direct successors of CURRENT, we check whether its
1142 # For each direct successors of CURRENT, we check whether its
1143 # successors sets are known. If they are not, we stack the
1143 # successors sets are known. If they are not, we stack the
1144 # unknown node and proceed to the next iteration of the while
1144 # unknown node and proceed to the next iteration of the while
1145 # loop. (case 3)
1145 # loop. (case 3)
1146 #
1146 #
1147 # During this step, we may detect obsolescence cycles: a node
1147 # During this step, we may detect obsolescence cycles: a node
1148 # with unknown successors sets but already in the call stack.
1148 # with unknown successors sets but already in the call stack.
1149 # In such a situation, we arbitrary set the successors sets of
1149 # In such a situation, we arbitrary set the successors sets of
1150 # the node to nothing (node pruned) to break the cycle.
1150 # the node to nothing (node pruned) to break the cycle.
1151 #
1151 #
1152 # If no break was encountered we proceed to phase 2.
1152 # If no break was encountered we proceed to phase 2.
1153 #
1153 #
1154 # Phase 2 computes successors sets of CURRENT (case 4); see details
1154 # Phase 2 computes successors sets of CURRENT (case 4); see details
1155 # in phase 2 itself.
1155 # in phase 2 itself.
1156 #
1156 #
1157 # Note the two levels of iteration in each phase.
1157 # Note the two levels of iteration in each phase.
1158 # - The first one handles obsolescence markers using CURRENT as
1158 # - The first one handles obsolescence markers using CURRENT as
1159 # precursor (successors markers of CURRENT).
1159 # precursor (successors markers of CURRENT).
1160 #
1160 #
1161 # Having multiple entry here means divergence.
1161 # Having multiple entry here means divergence.
1162 #
1162 #
1163 # - The second one handles successors defined in each marker.
1163 # - The second one handles successors defined in each marker.
1164 #
1164 #
1165 # Having none means pruned node, multiple successors means split,
1165 # Having none means pruned node, multiple successors means split,
1166 # single successors are standard replacement.
1166 # single successors are standard replacement.
1167 #
1167 #
1168 for mark in sorted(succmarkers[current]):
1168 for mark in sorted(succmarkers[current]):
1169 for suc in mark[1]:
1169 for suc in mark[1]:
1170 if suc not in cache:
1170 if suc not in cache:
1171 if suc in stackedset:
1171 if suc in stackedset:
1172 # cycle breaking
1172 # cycle breaking
1173 cache[suc] = []
1173 cache[suc] = []
1174 else:
1174 else:
1175 # case (3) If we have not computed successors sets
1175 # case (3) If we have not computed successors sets
1176 # of one of those successors we add it to the
1176 # of one of those successors we add it to the
1177 # `toproceed` stack and stop all work for this
1177 # `toproceed` stack and stop all work for this
1178 # iteration.
1178 # iteration.
1179 toproceed.append(suc)
1179 toproceed.append(suc)
1180 stackedset.add(suc)
1180 stackedset.add(suc)
1181 break
1181 break
1182 else:
1182 else:
1183 continue
1183 continue
1184 break
1184 break
1185 else:
1185 else:
1186 # case (4): we know all successors sets of all direct
1186 # case (4): we know all successors sets of all direct
1187 # successors
1187 # successors
1188 #
1188 #
1189 # Successors set contributed by each marker depends on the
1189 # Successors set contributed by each marker depends on the
1190 # successors sets of all its "successors" node.
1190 # successors sets of all its "successors" node.
1191 #
1191 #
1192 # Each different marker is a divergence in the obsolescence
1192 # Each different marker is a divergence in the obsolescence
1193 # history. It contributes successors sets distinct from other
1193 # history. It contributes successors sets distinct from other
1194 # markers.
1194 # markers.
1195 #
1195 #
1196 # Within a marker, a successor may have divergent successors
1196 # Within a marker, a successor may have divergent successors
1197 # sets. In such a case, the marker will contribute multiple
1197 # sets. In such a case, the marker will contribute multiple
1198 # divergent successors sets. If multiple successors have
1198 # divergent successors sets. If multiple successors have
1199 # divergent successors sets, a Cartesian product is used.
1199 # divergent successors sets, a Cartesian product is used.
1200 #
1200 #
1201 # At the end we post-process successors sets to remove
1201 # At the end we post-process successors sets to remove
1202 # duplicated entry and successors set that are strict subset of
1202 # duplicated entry and successors set that are strict subset of
1203 # another one.
1203 # another one.
1204 succssets = []
1204 succssets = []
1205 for mark in sorted(succmarkers[current]):
1205 for mark in sorted(succmarkers[current]):
1206 # successors sets contributed by this marker
1206 # successors sets contributed by this marker
1207 markss = [[]]
1207 markss = [[]]
1208 for suc in mark[1]:
1208 for suc in mark[1]:
1209 # cardinal product with previous successors
1209 # cardinal product with previous successors
1210 productresult = []
1210 productresult = []
1211 for prefix in markss:
1211 for prefix in markss:
1212 for suffix in cache[suc]:
1212 for suffix in cache[suc]:
1213 newss = list(prefix)
1213 newss = list(prefix)
1214 for part in suffix:
1214 for part in suffix:
1215 # do not duplicated entry in successors set
1215 # do not duplicated entry in successors set
1216 # first entry wins.
1216 # first entry wins.
1217 if part not in newss:
1217 if part not in newss:
1218 newss.append(part)
1218 newss.append(part)
1219 productresult.append(newss)
1219 productresult.append(newss)
1220 markss = productresult
1220 markss = productresult
1221 succssets.extend(markss)
1221 succssets.extend(markss)
1222 # remove duplicated and subset
1222 # remove duplicated and subset
1223 seen = []
1223 seen = []
1224 final = []
1224 final = []
1225 candidate = sorted(((set(s), s) for s in succssets if s),
1225 candidate = sorted(((set(s), s) for s in succssets if s),
1226 key=lambda x: len(x[1]), reverse=True)
1226 key=lambda x: len(x[1]), reverse=True)
1227 for setversion, listversion in candidate:
1227 for setversion, listversion in candidate:
1228 for seenset in seen:
1228 for seenset in seen:
1229 if setversion.issubset(seenset):
1229 if setversion.issubset(seenset):
1230 break
1230 break
1231 else:
1231 else:
1232 final.append(listversion)
1232 final.append(listversion)
1233 seen.append(setversion)
1233 seen.append(setversion)
1234 final.reverse() # put small successors set first
1234 final.reverse() # put small successors set first
1235 cache[current] = final
1235 cache[current] = final
1236 return cache[initialnode]
1236 return cache[initialnode]
1237
1237
1238 # mapping of 'set-name' -> <function to compute this set>
1238 # mapping of 'set-name' -> <function to compute this set>
1239 cachefuncs = {}
1239 cachefuncs = {}
1240 def cachefor(name):
1240 def cachefor(name):
1241 """Decorator to register a function as computing the cache for a set"""
1241 """Decorator to register a function as computing the cache for a set"""
1242 def decorator(func):
1242 def decorator(func):
1243 assert name not in cachefuncs
1243 assert name not in cachefuncs
1244 cachefuncs[name] = func
1244 cachefuncs[name] = func
1245 return func
1245 return func
1246 return decorator
1246 return decorator
1247
1247
1248 def getrevs(repo, name):
1248 def getrevs(repo, name):
1249 """Return the set of revision that belong to the <name> set
1249 """Return the set of revision that belong to the <name> set
1250
1250
1251 Such access may compute the set and cache it for future use"""
1251 Such access may compute the set and cache it for future use"""
1252 repo = repo.unfiltered()
1252 repo = repo.unfiltered()
1253 if not repo.obsstore:
1253 if not repo.obsstore:
1254 return frozenset()
1254 return frozenset()
1255 if name not in repo.obsstore.caches:
1255 if name not in repo.obsstore.caches:
1256 repo.obsstore.caches[name] = cachefuncs[name](repo)
1256 repo.obsstore.caches[name] = cachefuncs[name](repo)
1257 return repo.obsstore.caches[name]
1257 return repo.obsstore.caches[name]
1258
1258
1259 # To be simple we need to invalidate obsolescence cache when:
1259 # To be simple we need to invalidate obsolescence cache when:
1260 #
1260 #
1261 # - new changeset is added:
1261 # - new changeset is added:
1262 # - public phase is changed
1262 # - public phase is changed
1263 # - obsolescence marker are added
1263 # - obsolescence marker are added
1264 # - strip is used a repo
1264 # - strip is used a repo
1265 def clearobscaches(repo):
1265 def clearobscaches(repo):
1266 """Remove all obsolescence related cache from a repo
1266 """Remove all obsolescence related cache from a repo
1267
1267
1268 This remove all cache in obsstore is the obsstore already exist on the
1268 This remove all cache in obsstore is the obsstore already exist on the
1269 repo.
1269 repo.
1270
1270
1271 (We could be smarter here given the exact event that trigger the cache
1271 (We could be smarter here given the exact event that trigger the cache
1272 clearing)"""
1272 clearing)"""
1273 # only clear cache is there is obsstore data in this repo
1273 # only clear cache is there is obsstore data in this repo
1274 if 'obsstore' in repo._filecache:
1274 if 'obsstore' in repo._filecache:
1275 repo.obsstore.caches.clear()
1275 repo.obsstore.caches.clear()
1276
1276
1277 @cachefor('obsolete')
1277 @cachefor('obsolete')
1278 def _computeobsoleteset(repo):
1278 def _computeobsoleteset(repo):
1279 """the set of obsolete revisions"""
1279 """the set of obsolete revisions"""
1280 obs = set()
1281 getnode = repo.changelog.node
1280 getnode = repo.changelog.node
1282 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1281 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1283 for r in notpublic:
1282 isobs = repo.obsstore.successors.__contains__
1284 if getnode(r) in repo.obsstore.successors:
1283 obs = set(r for r in notpublic if isobs(getnode(r)))
1285 obs.add(r)
1286 return obs
1284 return obs
1287
1285
1288 @cachefor('unstable')
1286 @cachefor('unstable')
1289 def _computeunstableset(repo):
1287 def _computeunstableset(repo):
1290 """the set of non obsolete revisions with obsolete parents"""
1288 """the set of non obsolete revisions with obsolete parents"""
1291 revs = [(ctx.rev(), ctx) for ctx in
1289 revs = [(ctx.rev(), ctx) for ctx in
1292 repo.set('(not public()) and (not obsolete())')]
1290 repo.set('(not public()) and (not obsolete())')]
1293 revs.sort(key=lambda x:x[0])
1291 revs.sort(key=lambda x:x[0])
1294 unstable = set()
1292 unstable = set()
1295 for rev, ctx in revs:
1293 for rev, ctx in revs:
1296 # A rev is unstable if one of its parent is obsolete or unstable
1294 # A rev is unstable if one of its parent is obsolete or unstable
1297 # this works since we traverse following growing rev order
1295 # this works since we traverse following growing rev order
1298 if any((x.obsolete() or (x.rev() in unstable))
1296 if any((x.obsolete() or (x.rev() in unstable))
1299 for x in ctx.parents()):
1297 for x in ctx.parents()):
1300 unstable.add(rev)
1298 unstable.add(rev)
1301 return unstable
1299 return unstable
1302
1300
1303 @cachefor('suspended')
1301 @cachefor('suspended')
1304 def _computesuspendedset(repo):
1302 def _computesuspendedset(repo):
1305 """the set of obsolete parents with non obsolete descendants"""
1303 """the set of obsolete parents with non obsolete descendants"""
1306 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1304 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1307 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1305 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1308
1306
1309 @cachefor('extinct')
1307 @cachefor('extinct')
1310 def _computeextinctset(repo):
1308 def _computeextinctset(repo):
1311 """the set of obsolete parents without non obsolete descendants"""
1309 """the set of obsolete parents without non obsolete descendants"""
1312 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1310 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1313
1311
1314
1312
1315 @cachefor('bumped')
1313 @cachefor('bumped')
1316 def _computebumpedset(repo):
1314 def _computebumpedset(repo):
1317 """the set of revs trying to obsolete public revisions"""
1315 """the set of revs trying to obsolete public revisions"""
1318 bumped = set()
1316 bumped = set()
1319 # util function (avoid attribute lookup in the loop)
1317 # util function (avoid attribute lookup in the loop)
1320 phase = repo._phasecache.phase # would be faster to grab the full list
1318 phase = repo._phasecache.phase # would be faster to grab the full list
1321 public = phases.public
1319 public = phases.public
1322 cl = repo.changelog
1320 cl = repo.changelog
1323 torev = cl.nodemap.get
1321 torev = cl.nodemap.get
1324 for ctx in repo.set('(not public()) and (not obsolete())'):
1322 for ctx in repo.set('(not public()) and (not obsolete())'):
1325 rev = ctx.rev()
1323 rev = ctx.rev()
1326 # We only evaluate mutable, non-obsolete revision
1324 # We only evaluate mutable, non-obsolete revision
1327 node = ctx.node()
1325 node = ctx.node()
1328 # (future) A cache of precursors may worth if split is very common
1326 # (future) A cache of precursors may worth if split is very common
1329 for pnode in allprecursors(repo.obsstore, [node],
1327 for pnode in allprecursors(repo.obsstore, [node],
1330 ignoreflags=bumpedfix):
1328 ignoreflags=bumpedfix):
1331 prev = torev(pnode) # unfiltered! but so is phasecache
1329 prev = torev(pnode) # unfiltered! but so is phasecache
1332 if (prev is not None) and (phase(repo, prev) <= public):
1330 if (prev is not None) and (phase(repo, prev) <= public):
1333 # we have a public precursor
1331 # we have a public precursor
1334 bumped.add(rev)
1332 bumped.add(rev)
1335 break # Next draft!
1333 break # Next draft!
1336 return bumped
1334 return bumped
1337
1335
1338 @cachefor('divergent')
1336 @cachefor('divergent')
1339 def _computedivergentset(repo):
1337 def _computedivergentset(repo):
1340 """the set of rev that compete to be the final successors of some revision.
1338 """the set of rev that compete to be the final successors of some revision.
1341 """
1339 """
1342 divergent = set()
1340 divergent = set()
1343 obsstore = repo.obsstore
1341 obsstore = repo.obsstore
1344 newermap = {}
1342 newermap = {}
1345 for ctx in repo.set('(not public()) - obsolete()'):
1343 for ctx in repo.set('(not public()) - obsolete()'):
1346 mark = obsstore.precursors.get(ctx.node(), ())
1344 mark = obsstore.precursors.get(ctx.node(), ())
1347 toprocess = set(mark)
1345 toprocess = set(mark)
1348 seen = set()
1346 seen = set()
1349 while toprocess:
1347 while toprocess:
1350 prec = toprocess.pop()[0]
1348 prec = toprocess.pop()[0]
1351 if prec in seen:
1349 if prec in seen:
1352 continue # emergency cycle hanging prevention
1350 continue # emergency cycle hanging prevention
1353 seen.add(prec)
1351 seen.add(prec)
1354 if prec not in newermap:
1352 if prec not in newermap:
1355 successorssets(repo, prec, newermap)
1353 successorssets(repo, prec, newermap)
1356 newer = [n for n in newermap[prec] if n]
1354 newer = [n for n in newermap[prec] if n]
1357 if len(newer) > 1:
1355 if len(newer) > 1:
1358 divergent.add(ctx.rev())
1356 divergent.add(ctx.rev())
1359 break
1357 break
1360 toprocess.update(obsstore.precursors.get(prec, ()))
1358 toprocess.update(obsstore.precursors.get(prec, ()))
1361 return divergent
1359 return divergent
1362
1360
1363
1361
1364 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1362 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1365 operation=None):
1363 operation=None):
1366 """Add obsolete markers between changesets in a repo
1364 """Add obsolete markers between changesets in a repo
1367
1365
1368 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1366 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1369 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1367 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1370 containing metadata for this marker only. It is merged with the global
1368 containing metadata for this marker only. It is merged with the global
1371 metadata specified through the `metadata` argument of this function,
1369 metadata specified through the `metadata` argument of this function,
1372
1370
1373 Trying to obsolete a public changeset will raise an exception.
1371 Trying to obsolete a public changeset will raise an exception.
1374
1372
1375 Current user and date are used except if specified otherwise in the
1373 Current user and date are used except if specified otherwise in the
1376 metadata attribute.
1374 metadata attribute.
1377
1375
1378 This function operates within a transaction of its own, but does
1376 This function operates within a transaction of its own, but does
1379 not take any lock on the repo.
1377 not take any lock on the repo.
1380 """
1378 """
1381 # prepare metadata
1379 # prepare metadata
1382 if metadata is None:
1380 if metadata is None:
1383 metadata = {}
1381 metadata = {}
1384 if 'user' not in metadata:
1382 if 'user' not in metadata:
1385 metadata['user'] = repo.ui.username()
1383 metadata['user'] = repo.ui.username()
1386 useoperation = repo.ui.configbool('experimental',
1384 useoperation = repo.ui.configbool('experimental',
1387 'evolution.track-operation',
1385 'evolution.track-operation',
1388 False)
1386 False)
1389 if useoperation and operation:
1387 if useoperation and operation:
1390 metadata['operation'] = operation
1388 metadata['operation'] = operation
1391 tr = repo.transaction('add-obsolescence-marker')
1389 tr = repo.transaction('add-obsolescence-marker')
1392 try:
1390 try:
1393 markerargs = []
1391 markerargs = []
1394 for rel in relations:
1392 for rel in relations:
1395 prec = rel[0]
1393 prec = rel[0]
1396 sucs = rel[1]
1394 sucs = rel[1]
1397 localmetadata = metadata.copy()
1395 localmetadata = metadata.copy()
1398 if 2 < len(rel):
1396 if 2 < len(rel):
1399 localmetadata.update(rel[2])
1397 localmetadata.update(rel[2])
1400
1398
1401 if not prec.mutable():
1399 if not prec.mutable():
1402 raise error.Abort(_("cannot obsolete public changeset: %s")
1400 raise error.Abort(_("cannot obsolete public changeset: %s")
1403 % prec,
1401 % prec,
1404 hint="see 'hg help phases' for details")
1402 hint="see 'hg help phases' for details")
1405 nprec = prec.node()
1403 nprec = prec.node()
1406 nsucs = tuple(s.node() for s in sucs)
1404 nsucs = tuple(s.node() for s in sucs)
1407 npare = None
1405 npare = None
1408 if not nsucs:
1406 if not nsucs:
1409 npare = tuple(p.node() for p in prec.parents())
1407 npare = tuple(p.node() for p in prec.parents())
1410 if nprec in nsucs:
1408 if nprec in nsucs:
1411 raise error.Abort(_("changeset %s cannot obsolete itself")
1409 raise error.Abort(_("changeset %s cannot obsolete itself")
1412 % prec)
1410 % prec)
1413
1411
1414 # Creating the marker causes the hidden cache to become invalid,
1412 # Creating the marker causes the hidden cache to become invalid,
1415 # which causes recomputation when we ask for prec.parents() above.
1413 # which causes recomputation when we ask for prec.parents() above.
1416 # Resulting in n^2 behavior. So let's prepare all of the args
1414 # Resulting in n^2 behavior. So let's prepare all of the args
1417 # first, then create the markers.
1415 # first, then create the markers.
1418 markerargs.append((nprec, nsucs, npare, localmetadata))
1416 markerargs.append((nprec, nsucs, npare, localmetadata))
1419
1417
1420 for args in markerargs:
1418 for args in markerargs:
1421 nprec, nsucs, npare, localmetadata = args
1419 nprec, nsucs, npare, localmetadata = args
1422 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1420 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1423 date=date, metadata=localmetadata,
1421 date=date, metadata=localmetadata,
1424 ui=repo.ui)
1422 ui=repo.ui)
1425 repo.filteredrevcache.clear()
1423 repo.filteredrevcache.clear()
1426 tr.close()
1424 tr.close()
1427 finally:
1425 finally:
1428 tr.release()
1426 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now