##// END OF EJS Templates
obsstore: separate marker parsing from obsstore reading...
Jun Wu -
r32690:3fad59fa default
parent child Browse files
Show More
@@ -1,1428 +1,1432 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 phases,
79 phases,
80 policy,
80 policy,
81 util,
81 util,
82 )
82 )
83
83
84 parsers = policy.importmod(r'parsers')
84 parsers = policy.importmod(r'parsers')
85
85
86 _pack = struct.pack
86 _pack = struct.pack
87 _unpack = struct.unpack
87 _unpack = struct.unpack
88 _calcsize = struct.calcsize
88 _calcsize = struct.calcsize
89 propertycache = util.propertycache
89 propertycache = util.propertycache
90
90
91 # the obsolete feature is not mature enough to be enabled by default.
91 # the obsolete feature is not mature enough to be enabled by default.
92 # you have to rely on third party extension extension to enable this.
92 # you have to rely on third party extension extension to enable this.
93 _enabled = False
93 _enabled = False
94
94
95 # Options for obsolescence
95 # Options for obsolescence
96 createmarkersopt = 'createmarkers'
96 createmarkersopt = 'createmarkers'
97 allowunstableopt = 'allowunstable'
97 allowunstableopt = 'allowunstable'
98 exchangeopt = 'exchange'
98 exchangeopt = 'exchange'
99
99
100 def isenabled(repo, option):
100 def isenabled(repo, option):
101 """Returns True if the given repository has the given obsolete option
101 """Returns True if the given repository has the given obsolete option
102 enabled.
102 enabled.
103 """
103 """
104 result = set(repo.ui.configlist('experimental', 'evolution'))
104 result = set(repo.ui.configlist('experimental', 'evolution'))
105 if 'all' in result:
105 if 'all' in result:
106 return True
106 return True
107
107
108 # For migration purposes, temporarily return true if the config hasn't been
108 # For migration purposes, temporarily return true if the config hasn't been
109 # set but _enabled is true.
109 # set but _enabled is true.
110 if len(result) == 0 and _enabled:
110 if len(result) == 0 and _enabled:
111 return True
111 return True
112
112
113 # createmarkers must be enabled if other options are enabled
113 # createmarkers must be enabled if other options are enabled
114 if ((allowunstableopt in result or exchangeopt in result) and
114 if ((allowunstableopt in result or exchangeopt in result) and
115 not createmarkersopt in result):
115 not createmarkersopt in result):
116 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
116 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 "if other obsolete options are enabled"))
117 "if other obsolete options are enabled"))
118
118
119 return option in result
119 return option in result
120
120
121 ### obsolescence marker flag
121 ### obsolescence marker flag
122
122
123 ## bumpedfix flag
123 ## bumpedfix flag
124 #
124 #
125 # When a changeset A' succeed to a changeset A which became public, we call A'
125 # When a changeset A' succeed to a changeset A which became public, we call A'
126 # "bumped" because it's a successors of a public changesets
126 # "bumped" because it's a successors of a public changesets
127 #
127 #
128 # o A' (bumped)
128 # o A' (bumped)
129 # |`:
129 # |`:
130 # | o A
130 # | o A
131 # |/
131 # |/
132 # o Z
132 # o Z
133 #
133 #
134 # The way to solve this situation is to create a new changeset Ad as children
134 # The way to solve this situation is to create a new changeset Ad as children
135 # of A. This changeset have the same content than A'. So the diff from A to A'
135 # of A. This changeset have the same content than A'. So the diff from A to A'
136 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
136 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 #
137 #
138 # o Ad
138 # o Ad
139 # |`:
139 # |`:
140 # | x A'
140 # | x A'
141 # |'|
141 # |'|
142 # o | A
142 # o | A
143 # |/
143 # |/
144 # o Z
144 # o Z
145 #
145 #
146 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
146 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
147 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 # This flag mean that the successors express the changes between the public and
148 # This flag mean that the successors express the changes between the public and
149 # bumped version and fix the situation, breaking the transitivity of
149 # bumped version and fix the situation, breaking the transitivity of
150 # "bumped" here.
150 # "bumped" here.
151 bumpedfix = 1
151 bumpedfix = 1
152 usingsha256 = 2
152 usingsha256 = 2
153
153
154 ## Parsing and writing of version "0"
154 ## Parsing and writing of version "0"
155 #
155 #
156 # The header is followed by the markers. Each marker is made of:
156 # The header is followed by the markers. Each marker is made of:
157 #
157 #
158 # - 1 uint8 : number of new changesets "N", can be zero.
158 # - 1 uint8 : number of new changesets "N", can be zero.
159 #
159 #
160 # - 1 uint32: metadata size "M" in bytes.
160 # - 1 uint32: metadata size "M" in bytes.
161 #
161 #
162 # - 1 byte: a bit field. It is reserved for flags used in common
162 # - 1 byte: a bit field. It is reserved for flags used in common
163 # obsolete marker operations, to avoid repeated decoding of metadata
163 # obsolete marker operations, to avoid repeated decoding of metadata
164 # entries.
164 # entries.
165 #
165 #
166 # - 20 bytes: obsoleted changeset identifier.
166 # - 20 bytes: obsoleted changeset identifier.
167 #
167 #
168 # - N*20 bytes: new changesets identifiers.
168 # - N*20 bytes: new changesets identifiers.
169 #
169 #
170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # string contains a key and a value, separated by a colon ':', without
171 # string contains a key and a value, separated by a colon ':', without
172 # additional encoding. Keys cannot contain '\0' or ':' and values
172 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # cannot contain '\0'.
173 # cannot contain '\0'.
174 _fm0version = 0
174 _fm0version = 0
175 _fm0fixed = '>BIB20s'
175 _fm0fixed = '>BIB20s'
176 _fm0node = '20s'
176 _fm0node = '20s'
177 _fm0fsize = _calcsize(_fm0fixed)
177 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fnodesize = _calcsize(_fm0node)
178 _fm0fnodesize = _calcsize(_fm0node)
179
179
180 def _fm0readmarkers(data, off):
180 def _fm0readmarkers(data, off):
181 # Loop on markers
181 # Loop on markers
182 l = len(data)
182 l = len(data)
183 while off + _fm0fsize <= l:
183 while off + _fm0fsize <= l:
184 # read fixed part
184 # read fixed part
185 cur = data[off:off + _fm0fsize]
185 cur = data[off:off + _fm0fsize]
186 off += _fm0fsize
186 off += _fm0fsize
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 # read replacement
188 # read replacement
189 sucs = ()
189 sucs = ()
190 if numsuc:
190 if numsuc:
191 s = (_fm0fnodesize * numsuc)
191 s = (_fm0fnodesize * numsuc)
192 cur = data[off:off + s]
192 cur = data[off:off + s]
193 sucs = _unpack(_fm0node * numsuc, cur)
193 sucs = _unpack(_fm0node * numsuc, cur)
194 off += s
194 off += s
195 # read metadata
195 # read metadata
196 # (metadata will be decoded on demand)
196 # (metadata will be decoded on demand)
197 metadata = data[off:off + mdsize]
197 metadata = data[off:off + mdsize]
198 if len(metadata) != mdsize:
198 if len(metadata) != mdsize:
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 'short, %d bytes expected, got %d')
200 'short, %d bytes expected, got %d')
201 % (mdsize, len(metadata)))
201 % (mdsize, len(metadata)))
202 off += mdsize
202 off += mdsize
203 metadata = _fm0decodemeta(metadata)
203 metadata = _fm0decodemeta(metadata)
204 try:
204 try:
205 when, offset = metadata.pop('date', '0 0').split(' ')
205 when, offset = metadata.pop('date', '0 0').split(' ')
206 date = float(when), int(offset)
206 date = float(when), int(offset)
207 except ValueError:
207 except ValueError:
208 date = (0., 0)
208 date = (0., 0)
209 parents = None
209 parents = None
210 if 'p2' in metadata:
210 if 'p2' in metadata:
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 elif 'p1' in metadata:
212 elif 'p1' in metadata:
213 parents = (metadata.pop('p1', None),)
213 parents = (metadata.pop('p1', None),)
214 elif 'p0' in metadata:
214 elif 'p0' in metadata:
215 parents = ()
215 parents = ()
216 if parents is not None:
216 if parents is not None:
217 try:
217 try:
218 parents = tuple(node.bin(p) for p in parents)
218 parents = tuple(node.bin(p) for p in parents)
219 # if parent content is not a nodeid, drop the data
219 # if parent content is not a nodeid, drop the data
220 for p in parents:
220 for p in parents:
221 if len(p) != 20:
221 if len(p) != 20:
222 parents = None
222 parents = None
223 break
223 break
224 except TypeError:
224 except TypeError:
225 # if content cannot be translated to nodeid drop the data.
225 # if content cannot be translated to nodeid drop the data.
226 parents = None
226 parents = None
227
227
228 metadata = tuple(sorted(metadata.iteritems()))
228 metadata = tuple(sorted(metadata.iteritems()))
229
229
230 yield (pre, sucs, flags, metadata, date, parents)
230 yield (pre, sucs, flags, metadata, date, parents)
231
231
232 def _fm0encodeonemarker(marker):
232 def _fm0encodeonemarker(marker):
233 pre, sucs, flags, metadata, date, parents = marker
233 pre, sucs, flags, metadata, date, parents = marker
234 if flags & usingsha256:
234 if flags & usingsha256:
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 metadata = dict(metadata)
236 metadata = dict(metadata)
237 time, tz = date
237 time, tz = date
238 metadata['date'] = '%r %i' % (time, tz)
238 metadata['date'] = '%r %i' % (time, tz)
239 if parents is not None:
239 if parents is not None:
240 if not parents:
240 if not parents:
241 # mark that we explicitly recorded no parents
241 # mark that we explicitly recorded no parents
242 metadata['p0'] = ''
242 metadata['p0'] = ''
243 for i, p in enumerate(parents, 1):
243 for i, p in enumerate(parents, 1):
244 metadata['p%i' % i] = node.hex(p)
244 metadata['p%i' % i] = node.hex(p)
245 metadata = _fm0encodemeta(metadata)
245 metadata = _fm0encodemeta(metadata)
246 numsuc = len(sucs)
246 numsuc = len(sucs)
247 format = _fm0fixed + (_fm0node * numsuc)
247 format = _fm0fixed + (_fm0node * numsuc)
248 data = [numsuc, len(metadata), flags, pre]
248 data = [numsuc, len(metadata), flags, pre]
249 data.extend(sucs)
249 data.extend(sucs)
250 return _pack(format, *data) + metadata
250 return _pack(format, *data) + metadata
251
251
252 def _fm0encodemeta(meta):
252 def _fm0encodemeta(meta):
253 """Return encoded metadata string to string mapping.
253 """Return encoded metadata string to string mapping.
254
254
255 Assume no ':' in key and no '\0' in both key and value."""
255 Assume no ':' in key and no '\0' in both key and value."""
256 for key, value in meta.iteritems():
256 for key, value in meta.iteritems():
257 if ':' in key or '\0' in key:
257 if ':' in key or '\0' in key:
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 if '\0' in value:
259 if '\0' in value:
260 raise ValueError("':' is forbidden in metadata value'")
260 raise ValueError("':' is forbidden in metadata value'")
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262
262
263 def _fm0decodemeta(data):
263 def _fm0decodemeta(data):
264 """Return string to string dictionary from encoded version."""
264 """Return string to string dictionary from encoded version."""
265 d = {}
265 d = {}
266 for l in data.split('\0'):
266 for l in data.split('\0'):
267 if l:
267 if l:
268 key, value = l.split(':')
268 key, value = l.split(':')
269 d[key] = value
269 d[key] = value
270 return d
270 return d
271
271
272 ## Parsing and writing of version "1"
272 ## Parsing and writing of version "1"
273 #
273 #
274 # The header is followed by the markers. Each marker is made of:
274 # The header is followed by the markers. Each marker is made of:
275 #
275 #
276 # - uint32: total size of the marker (including this field)
276 # - uint32: total size of the marker (including this field)
277 #
277 #
278 # - float64: date in seconds since epoch
278 # - float64: date in seconds since epoch
279 #
279 #
280 # - int16: timezone offset in minutes
280 # - int16: timezone offset in minutes
281 #
281 #
282 # - uint16: a bit field. It is reserved for flags used in common
282 # - uint16: a bit field. It is reserved for flags used in common
283 # obsolete marker operations, to avoid repeated decoding of metadata
283 # obsolete marker operations, to avoid repeated decoding of metadata
284 # entries.
284 # entries.
285 #
285 #
286 # - uint8: number of successors "N", can be zero.
286 # - uint8: number of successors "N", can be zero.
287 #
287 #
288 # - uint8: number of parents "P", can be zero.
288 # - uint8: number of parents "P", can be zero.
289 #
289 #
290 # 0: parents data stored but no parent,
290 # 0: parents data stored but no parent,
291 # 1: one parent stored,
291 # 1: one parent stored,
292 # 2: two parents stored,
292 # 2: two parents stored,
293 # 3: no parent data stored
293 # 3: no parent data stored
294 #
294 #
295 # - uint8: number of metadata entries M
295 # - uint8: number of metadata entries M
296 #
296 #
297 # - 20 or 32 bytes: precursor changeset identifier.
297 # - 20 or 32 bytes: precursor changeset identifier.
298 #
298 #
299 # - N*(20 or 32) bytes: successors changesets identifiers.
299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 #
300 #
301 # - P*(20 or 32) bytes: parents of the precursors changesets.
301 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 #
302 #
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 #
304 #
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 _fm1version = 1
306 _fm1version = 1
307 _fm1fixed = '>IdhHBBB20s'
307 _fm1fixed = '>IdhHBBB20s'
308 _fm1nodesha1 = '20s'
308 _fm1nodesha1 = '20s'
309 _fm1nodesha256 = '32s'
309 _fm1nodesha256 = '32s'
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 _fm1fsize = _calcsize(_fm1fixed)
312 _fm1fsize = _calcsize(_fm1fixed)
313 _fm1parentnone = 3
313 _fm1parentnone = 3
314 _fm1parentshift = 14
314 _fm1parentshift = 14
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 _fm1metapair = 'BB'
316 _fm1metapair = 'BB'
317 _fm1metapairsize = _calcsize('BB')
317 _fm1metapairsize = _calcsize('BB')
318
318
319 def _fm1purereadmarkers(data, off):
319 def _fm1purereadmarkers(data, off):
320 # make some global constants local for performance
320 # make some global constants local for performance
321 noneflag = _fm1parentnone
321 noneflag = _fm1parentnone
322 sha2flag = usingsha256
322 sha2flag = usingsha256
323 sha1size = _fm1nodesha1size
323 sha1size = _fm1nodesha1size
324 sha2size = _fm1nodesha256size
324 sha2size = _fm1nodesha256size
325 sha1fmt = _fm1nodesha1
325 sha1fmt = _fm1nodesha1
326 sha2fmt = _fm1nodesha256
326 sha2fmt = _fm1nodesha256
327 metasize = _fm1metapairsize
327 metasize = _fm1metapairsize
328 metafmt = _fm1metapair
328 metafmt = _fm1metapair
329 fsize = _fm1fsize
329 fsize = _fm1fsize
330 unpack = _unpack
330 unpack = _unpack
331
331
332 # Loop on markers
332 # Loop on markers
333 stop = len(data) - _fm1fsize
333 stop = len(data) - _fm1fsize
334 ufixed = struct.Struct(_fm1fixed).unpack
334 ufixed = struct.Struct(_fm1fixed).unpack
335
335
336 while off <= stop:
336 while off <= stop:
337 # read fixed part
337 # read fixed part
338 o1 = off + fsize
338 o1 = off + fsize
339 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
339 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340
340
341 if flags & sha2flag:
341 if flags & sha2flag:
342 # FIXME: prec was read as a SHA1, needs to be amended
342 # FIXME: prec was read as a SHA1, needs to be amended
343
343
344 # read 0 or more successors
344 # read 0 or more successors
345 if numsuc == 1:
345 if numsuc == 1:
346 o2 = o1 + sha2size
346 o2 = o1 + sha2size
347 sucs = (data[o1:o2],)
347 sucs = (data[o1:o2],)
348 else:
348 else:
349 o2 = o1 + sha2size * numsuc
349 o2 = o1 + sha2size * numsuc
350 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
350 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351
351
352 # read parents
352 # read parents
353 if numpar == noneflag:
353 if numpar == noneflag:
354 o3 = o2
354 o3 = o2
355 parents = None
355 parents = None
356 elif numpar == 1:
356 elif numpar == 1:
357 o3 = o2 + sha2size
357 o3 = o2 + sha2size
358 parents = (data[o2:o3],)
358 parents = (data[o2:o3],)
359 else:
359 else:
360 o3 = o2 + sha2size * numpar
360 o3 = o2 + sha2size * numpar
361 parents = unpack(sha2fmt * numpar, data[o2:o3])
361 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 else:
362 else:
363 # read 0 or more successors
363 # read 0 or more successors
364 if numsuc == 1:
364 if numsuc == 1:
365 o2 = o1 + sha1size
365 o2 = o1 + sha1size
366 sucs = (data[o1:o2],)
366 sucs = (data[o1:o2],)
367 else:
367 else:
368 o2 = o1 + sha1size * numsuc
368 o2 = o1 + sha1size * numsuc
369 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
369 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370
370
371 # read parents
371 # read parents
372 if numpar == noneflag:
372 if numpar == noneflag:
373 o3 = o2
373 o3 = o2
374 parents = None
374 parents = None
375 elif numpar == 1:
375 elif numpar == 1:
376 o3 = o2 + sha1size
376 o3 = o2 + sha1size
377 parents = (data[o2:o3],)
377 parents = (data[o2:o3],)
378 else:
378 else:
379 o3 = o2 + sha1size * numpar
379 o3 = o2 + sha1size * numpar
380 parents = unpack(sha1fmt * numpar, data[o2:o3])
380 parents = unpack(sha1fmt * numpar, data[o2:o3])
381
381
382 # read metadata
382 # read metadata
383 off = o3 + metasize * nummeta
383 off = o3 + metasize * nummeta
384 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
384 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 metadata = []
385 metadata = []
386 for idx in xrange(0, len(metapairsize), 2):
386 for idx in xrange(0, len(metapairsize), 2):
387 o1 = off + metapairsize[idx]
387 o1 = off + metapairsize[idx]
388 o2 = o1 + metapairsize[idx + 1]
388 o2 = o1 + metapairsize[idx + 1]
389 metadata.append((data[off:o1], data[o1:o2]))
389 metadata.append((data[off:o1], data[o1:o2]))
390 off = o2
390 off = o2
391
391
392 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
392 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393
393
394 def _fm1encodeonemarker(marker):
394 def _fm1encodeonemarker(marker):
395 pre, sucs, flags, metadata, date, parents = marker
395 pre, sucs, flags, metadata, date, parents = marker
396 # determine node size
396 # determine node size
397 _fm1node = _fm1nodesha1
397 _fm1node = _fm1nodesha1
398 if flags & usingsha256:
398 if flags & usingsha256:
399 _fm1node = _fm1nodesha256
399 _fm1node = _fm1nodesha256
400 numsuc = len(sucs)
400 numsuc = len(sucs)
401 numextranodes = numsuc
401 numextranodes = numsuc
402 if parents is None:
402 if parents is None:
403 numpar = _fm1parentnone
403 numpar = _fm1parentnone
404 else:
404 else:
405 numpar = len(parents)
405 numpar = len(parents)
406 numextranodes += numpar
406 numextranodes += numpar
407 formatnodes = _fm1node * numextranodes
407 formatnodes = _fm1node * numextranodes
408 formatmeta = _fm1metapair * len(metadata)
408 formatmeta = _fm1metapair * len(metadata)
409 format = _fm1fixed + formatnodes + formatmeta
409 format = _fm1fixed + formatnodes + formatmeta
410 # tz is stored in minutes so we divide by 60
410 # tz is stored in minutes so we divide by 60
411 tz = date[1]//60
411 tz = date[1]//60
412 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
412 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 data.extend(sucs)
413 data.extend(sucs)
414 if parents is not None:
414 if parents is not None:
415 data.extend(parents)
415 data.extend(parents)
416 totalsize = _calcsize(format)
416 totalsize = _calcsize(format)
417 for key, value in metadata:
417 for key, value in metadata:
418 lk = len(key)
418 lk = len(key)
419 lv = len(value)
419 lv = len(value)
420 data.append(lk)
420 data.append(lk)
421 data.append(lv)
421 data.append(lv)
422 totalsize += lk + lv
422 totalsize += lk + lv
423 data[0] = totalsize
423 data[0] = totalsize
424 data = [_pack(format, *data)]
424 data = [_pack(format, *data)]
425 for key, value in metadata:
425 for key, value in metadata:
426 data.append(key)
426 data.append(key)
427 data.append(value)
427 data.append(value)
428 return ''.join(data)
428 return ''.join(data)
429
429
430 def _fm1readmarkers(data, off):
430 def _fm1readmarkers(data, off):
431 native = getattr(parsers, 'fm1readmarkers', None)
431 native = getattr(parsers, 'fm1readmarkers', None)
432 if not native:
432 if not native:
433 return _fm1purereadmarkers(data, off)
433 return _fm1purereadmarkers(data, off)
434 stop = len(data) - _fm1fsize
434 stop = len(data) - _fm1fsize
435 return native(data, off, stop)
435 return native(data, off, stop)
436
436
437 # mapping to read/write various marker formats
437 # mapping to read/write various marker formats
438 # <version> -> (decoder, encoder)
438 # <version> -> (decoder, encoder)
439 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
439 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
440 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
440 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
441
441
442 def _readmarkerversion(data):
442 def _readmarkerversion(data):
443 return _unpack('>B', data[0:1])[0]
443 return _unpack('>B', data[0:1])[0]
444
444
445 @util.nogc
445 @util.nogc
446 def _readmarkers(data):
446 def _readmarkers(data):
447 """Read and enumerate markers from raw data"""
447 """Read and enumerate markers from raw data"""
448 diskversion = _readmarkerversion(data)
448 diskversion = _readmarkerversion(data)
449 off = 1
449 off = 1
450 if diskversion not in formats:
450 if diskversion not in formats:
451 msg = _('parsing obsolete marker: unknown version %r') % diskversion
451 msg = _('parsing obsolete marker: unknown version %r') % diskversion
452 raise error.UnknownVersion(msg, version=diskversion)
452 raise error.UnknownVersion(msg, version=diskversion)
453 return diskversion, formats[diskversion][0](data, off)
453 return diskversion, formats[diskversion][0](data, off)
454
454
455 def encodemarkers(markers, addheader=False, version=_fm0version):
455 def encodemarkers(markers, addheader=False, version=_fm0version):
456 # Kept separate from flushmarkers(), it will be reused for
456 # Kept separate from flushmarkers(), it will be reused for
457 # markers exchange.
457 # markers exchange.
458 encodeone = formats[version][1]
458 encodeone = formats[version][1]
459 if addheader:
459 if addheader:
460 yield _pack('>B', version)
460 yield _pack('>B', version)
461 for marker in markers:
461 for marker in markers:
462 yield encodeone(marker)
462 yield encodeone(marker)
463
463
464
464
465 class marker(object):
465 class marker(object):
466 """Wrap obsolete marker raw data"""
466 """Wrap obsolete marker raw data"""
467
467
468 def __init__(self, repo, data):
468 def __init__(self, repo, data):
469 # the repo argument will be used to create changectx in later version
469 # the repo argument will be used to create changectx in later version
470 self._repo = repo
470 self._repo = repo
471 self._data = data
471 self._data = data
472 self._decodedmeta = None
472 self._decodedmeta = None
473
473
474 def __hash__(self):
474 def __hash__(self):
475 return hash(self._data)
475 return hash(self._data)
476
476
477 def __eq__(self, other):
477 def __eq__(self, other):
478 if type(other) != type(self):
478 if type(other) != type(self):
479 return False
479 return False
480 return self._data == other._data
480 return self._data == other._data
481
481
482 def precnode(self):
482 def precnode(self):
483 """Precursor changeset node identifier"""
483 """Precursor changeset node identifier"""
484 return self._data[0]
484 return self._data[0]
485
485
486 def succnodes(self):
486 def succnodes(self):
487 """List of successor changesets node identifiers"""
487 """List of successor changesets node identifiers"""
488 return self._data[1]
488 return self._data[1]
489
489
490 def parentnodes(self):
490 def parentnodes(self):
491 """Parents of the precursors (None if not recorded)"""
491 """Parents of the precursors (None if not recorded)"""
492 return self._data[5]
492 return self._data[5]
493
493
494 def metadata(self):
494 def metadata(self):
495 """Decoded metadata dictionary"""
495 """Decoded metadata dictionary"""
496 return dict(self._data[3])
496 return dict(self._data[3])
497
497
498 def date(self):
498 def date(self):
499 """Creation date as (unixtime, offset)"""
499 """Creation date as (unixtime, offset)"""
500 return self._data[4]
500 return self._data[4]
501
501
502 def flags(self):
502 def flags(self):
503 """The flags field of the marker"""
503 """The flags field of the marker"""
504 return self._data[2]
504 return self._data[2]
505
505
506 @util.nogc
506 @util.nogc
507 def _addsuccessors(successors, markers):
507 def _addsuccessors(successors, markers):
508 for mark in markers:
508 for mark in markers:
509 successors.setdefault(mark[0], set()).add(mark)
509 successors.setdefault(mark[0], set()).add(mark)
510
510
511 @util.nogc
511 @util.nogc
512 def _addprecursors(precursors, markers):
512 def _addprecursors(precursors, markers):
513 for mark in markers:
513 for mark in markers:
514 for suc in mark[1]:
514 for suc in mark[1]:
515 precursors.setdefault(suc, set()).add(mark)
515 precursors.setdefault(suc, set()).add(mark)
516
516
517 @util.nogc
517 @util.nogc
518 def _addchildren(children, markers):
518 def _addchildren(children, markers):
519 for mark in markers:
519 for mark in markers:
520 parents = mark[5]
520 parents = mark[5]
521 if parents is not None:
521 if parents is not None:
522 for p in parents:
522 for p in parents:
523 children.setdefault(p, set()).add(mark)
523 children.setdefault(p, set()).add(mark)
524
524
525 def _checkinvalidmarkers(markers):
525 def _checkinvalidmarkers(markers):
526 """search for marker with invalid data and raise error if needed
526 """search for marker with invalid data and raise error if needed
527
527
528 Exist as a separated function to allow the evolve extension for a more
528 Exist as a separated function to allow the evolve extension for a more
529 subtle handling.
529 subtle handling.
530 """
530 """
531 for mark in markers:
531 for mark in markers:
532 if node.nullid in mark[1]:
532 if node.nullid in mark[1]:
533 raise error.Abort(_('bad obsolescence marker detected: '
533 raise error.Abort(_('bad obsolescence marker detected: '
534 'invalid successors nullid'))
534 'invalid successors nullid'))
535
535
536 class obsstore(object):
536 class obsstore(object):
537 """Store obsolete markers
537 """Store obsolete markers
538
538
539 Markers can be accessed with two mappings:
539 Markers can be accessed with two mappings:
540 - precursors[x] -> set(markers on precursors edges of x)
540 - precursors[x] -> set(markers on precursors edges of x)
541 - successors[x] -> set(markers on successors edges of x)
541 - successors[x] -> set(markers on successors edges of x)
542 - children[x] -> set(markers on precursors edges of children(x)
542 - children[x] -> set(markers on precursors edges of children(x)
543 """
543 """
544
544
545 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
545 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
546 # prec: nodeid, precursor changesets
546 # prec: nodeid, precursor changesets
547 # succs: tuple of nodeid, successor changesets (0-N length)
547 # succs: tuple of nodeid, successor changesets (0-N length)
548 # flag: integer, flag field carrying modifier for the markers (see doc)
548 # flag: integer, flag field carrying modifier for the markers (see doc)
549 # meta: binary blob, encoded metadata dictionary
549 # meta: binary blob, encoded metadata dictionary
550 # date: (float, int) tuple, date of marker creation
550 # date: (float, int) tuple, date of marker creation
551 # parents: (tuple of nodeid) or None, parents of precursors
551 # parents: (tuple of nodeid) or None, parents of precursors
552 # None is used when no data has been recorded
552 # None is used when no data has been recorded
553
553
554 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
554 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
555 # caches for various obsolescence related cache
555 # caches for various obsolescence related cache
556 self.caches = {}
556 self.caches = {}
557 self.svfs = svfs
557 self.svfs = svfs
558 self._version = defaultformat
558 self._version = defaultformat
559 self._readonly = readonly
559 self._readonly = readonly
560
560
561 def __iter__(self):
561 def __iter__(self):
562 return iter(self._all)
562 return iter(self._all)
563
563
564 def __len__(self):
564 def __len__(self):
565 return len(self._all)
565 return len(self._all)
566
566
567 def __nonzero__(self):
567 def __nonzero__(self):
568 if not self._cached('_all'):
568 if not self._cached('_all'):
569 try:
569 try:
570 return self.svfs.stat('obsstore').st_size > 1
570 return self.svfs.stat('obsstore').st_size > 1
571 except OSError as inst:
571 except OSError as inst:
572 if inst.errno != errno.ENOENT:
572 if inst.errno != errno.ENOENT:
573 raise
573 raise
574 # just build an empty _all list if no obsstore exists, which
574 # just build an empty _all list if no obsstore exists, which
575 # avoids further stat() syscalls
575 # avoids further stat() syscalls
576 pass
576 pass
577 return bool(self._all)
577 return bool(self._all)
578
578
579 __bool__ = __nonzero__
579 __bool__ = __nonzero__
580
580
581 @property
581 @property
582 def readonly(self):
582 def readonly(self):
583 """True if marker creation is disabled
583 """True if marker creation is disabled
584
584
585 Remove me in the future when obsolete marker is always on."""
585 Remove me in the future when obsolete marker is always on."""
586 return self._readonly
586 return self._readonly
587
587
588 def create(self, transaction, prec, succs=(), flag=0, parents=None,
588 def create(self, transaction, prec, succs=(), flag=0, parents=None,
589 date=None, metadata=None, ui=None):
589 date=None, metadata=None, ui=None):
590 """obsolete: add a new obsolete marker
590 """obsolete: add a new obsolete marker
591
591
592 * ensuring it is hashable
592 * ensuring it is hashable
593 * check mandatory metadata
593 * check mandatory metadata
594 * encode metadata
594 * encode metadata
595
595
596 If you are a human writing code creating marker you want to use the
596 If you are a human writing code creating marker you want to use the
597 `createmarkers` function in this module instead.
597 `createmarkers` function in this module instead.
598
598
599 return True if a new marker have been added, False if the markers
599 return True if a new marker have been added, False if the markers
600 already existed (no op).
600 already existed (no op).
601 """
601 """
602 if metadata is None:
602 if metadata is None:
603 metadata = {}
603 metadata = {}
604 if date is None:
604 if date is None:
605 if 'date' in metadata:
605 if 'date' in metadata:
606 # as a courtesy for out-of-tree extensions
606 # as a courtesy for out-of-tree extensions
607 date = util.parsedate(metadata.pop('date'))
607 date = util.parsedate(metadata.pop('date'))
608 elif ui is not None:
608 elif ui is not None:
609 date = ui.configdate('devel', 'default-date')
609 date = ui.configdate('devel', 'default-date')
610 if date is None:
610 if date is None:
611 date = util.makedate()
611 date = util.makedate()
612 else:
612 else:
613 date = util.makedate()
613 date = util.makedate()
614 if len(prec) != 20:
614 if len(prec) != 20:
615 raise ValueError(prec)
615 raise ValueError(prec)
616 for succ in succs:
616 for succ in succs:
617 if len(succ) != 20:
617 if len(succ) != 20:
618 raise ValueError(succ)
618 raise ValueError(succ)
619 if prec in succs:
619 if prec in succs:
620 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
620 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
621
621
622 metadata = tuple(sorted(metadata.iteritems()))
622 metadata = tuple(sorted(metadata.iteritems()))
623
623
624 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
624 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
625 return bool(self.add(transaction, [marker]))
625 return bool(self.add(transaction, [marker]))
626
626
627 def add(self, transaction, markers):
627 def add(self, transaction, markers):
628 """Add new markers to the store
628 """Add new markers to the store
629
629
630 Take care of filtering duplicate.
630 Take care of filtering duplicate.
631 Return the number of new marker."""
631 Return the number of new marker."""
632 if self._readonly:
632 if self._readonly:
633 raise error.Abort(_('creating obsolete markers is not enabled on '
633 raise error.Abort(_('creating obsolete markers is not enabled on '
634 'this repo'))
634 'this repo'))
635 known = set(self._all)
635 known = set(self._all)
636 new = []
636 new = []
637 for m in markers:
637 for m in markers:
638 if m not in known:
638 if m not in known:
639 known.add(m)
639 known.add(m)
640 new.append(m)
640 new.append(m)
641 if new:
641 if new:
642 f = self.svfs('obsstore', 'ab')
642 f = self.svfs('obsstore', 'ab')
643 try:
643 try:
644 offset = f.tell()
644 offset = f.tell()
645 transaction.add('obsstore', offset)
645 transaction.add('obsstore', offset)
646 # offset == 0: new file - add the version header
646 # offset == 0: new file - add the version header
647 for bytes in encodemarkers(new, offset == 0, self._version):
647 for bytes in encodemarkers(new, offset == 0, self._version):
648 f.write(bytes)
648 f.write(bytes)
649 finally:
649 finally:
650 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
650 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
651 # call 'filecacheentry.refresh()' here
651 # call 'filecacheentry.refresh()' here
652 f.close()
652 f.close()
653 self._addmarkers(new)
653 self._addmarkers(new)
654 # new marker *may* have changed several set. invalidate the cache.
654 # new marker *may* have changed several set. invalidate the cache.
655 self.caches.clear()
655 self.caches.clear()
656 # records the number of new markers for the transaction hooks
656 # records the number of new markers for the transaction hooks
657 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
657 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
658 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
658 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
659 return len(new)
659 return len(new)
660
660
661 def mergemarkers(self, transaction, data):
661 def mergemarkers(self, transaction, data):
662 """merge a binary stream of markers inside the obsstore
662 """merge a binary stream of markers inside the obsstore
663
663
664 Returns the number of new markers added."""
664 Returns the number of new markers added."""
665 version, markers = _readmarkers(data)
665 version, markers = _readmarkers(data)
666 return self.add(transaction, markers)
666 return self.add(transaction, markers)
667
667
668 @propertycache
668 @propertycache
669 def _data(self):
670 return self.svfs.tryread('obsstore')
671
672 @propertycache
669 def _all(self):
673 def _all(self):
670 data = self.svfs.tryread('obsstore')
674 data = self._data
671 if not data:
675 if not data:
672 return []
676 return []
673 self._version, markers = _readmarkers(data)
677 self._version, markers = _readmarkers(data)
674 markers = list(markers)
678 markers = list(markers)
675 _checkinvalidmarkers(markers)
679 _checkinvalidmarkers(markers)
676 return markers
680 return markers
677
681
678 @propertycache
682 @propertycache
679 def successors(self):
683 def successors(self):
680 successors = {}
684 successors = {}
681 _addsuccessors(successors, self._all)
685 _addsuccessors(successors, self._all)
682 return successors
686 return successors
683
687
684 @propertycache
688 @propertycache
685 def precursors(self):
689 def precursors(self):
686 precursors = {}
690 precursors = {}
687 _addprecursors(precursors, self._all)
691 _addprecursors(precursors, self._all)
688 return precursors
692 return precursors
689
693
690 @propertycache
694 @propertycache
691 def children(self):
695 def children(self):
692 children = {}
696 children = {}
693 _addchildren(children, self._all)
697 _addchildren(children, self._all)
694 return children
698 return children
695
699
696 def _cached(self, attr):
700 def _cached(self, attr):
697 return attr in self.__dict__
701 return attr in self.__dict__
698
702
699 def _addmarkers(self, markers):
703 def _addmarkers(self, markers):
700 markers = list(markers) # to allow repeated iteration
704 markers = list(markers) # to allow repeated iteration
701 self._all.extend(markers)
705 self._all.extend(markers)
702 if self._cached('successors'):
706 if self._cached('successors'):
703 _addsuccessors(self.successors, markers)
707 _addsuccessors(self.successors, markers)
704 if self._cached('precursors'):
708 if self._cached('precursors'):
705 _addprecursors(self.precursors, markers)
709 _addprecursors(self.precursors, markers)
706 if self._cached('children'):
710 if self._cached('children'):
707 _addchildren(self.children, markers)
711 _addchildren(self.children, markers)
708 _checkinvalidmarkers(markers)
712 _checkinvalidmarkers(markers)
709
713
710 def relevantmarkers(self, nodes):
714 def relevantmarkers(self, nodes):
711 """return a set of all obsolescence markers relevant to a set of nodes.
715 """return a set of all obsolescence markers relevant to a set of nodes.
712
716
713 "relevant" to a set of nodes mean:
717 "relevant" to a set of nodes mean:
714
718
715 - marker that use this changeset as successor
719 - marker that use this changeset as successor
716 - prune marker of direct children on this changeset
720 - prune marker of direct children on this changeset
717 - recursive application of the two rules on precursors of these markers
721 - recursive application of the two rules on precursors of these markers
718
722
719 It is a set so you cannot rely on order."""
723 It is a set so you cannot rely on order."""
720
724
721 pendingnodes = set(nodes)
725 pendingnodes = set(nodes)
722 seenmarkers = set()
726 seenmarkers = set()
723 seennodes = set(pendingnodes)
727 seennodes = set(pendingnodes)
724 precursorsmarkers = self.precursors
728 precursorsmarkers = self.precursors
725 succsmarkers = self.successors
729 succsmarkers = self.successors
726 children = self.children
730 children = self.children
727 while pendingnodes:
731 while pendingnodes:
728 direct = set()
732 direct = set()
729 for current in pendingnodes:
733 for current in pendingnodes:
730 direct.update(precursorsmarkers.get(current, ()))
734 direct.update(precursorsmarkers.get(current, ()))
731 pruned = [m for m in children.get(current, ()) if not m[1]]
735 pruned = [m for m in children.get(current, ()) if not m[1]]
732 direct.update(pruned)
736 direct.update(pruned)
733 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
737 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
734 direct.update(pruned)
738 direct.update(pruned)
735 direct -= seenmarkers
739 direct -= seenmarkers
736 pendingnodes = set([m[0] for m in direct])
740 pendingnodes = set([m[0] for m in direct])
737 seenmarkers |= direct
741 seenmarkers |= direct
738 pendingnodes -= seennodes
742 pendingnodes -= seennodes
739 seennodes |= pendingnodes
743 seennodes |= pendingnodes
740 return seenmarkers
744 return seenmarkers
741
745
742 def _filterprunes(markers):
746 def _filterprunes(markers):
743 """return a set with no prune markers"""
747 """return a set with no prune markers"""
744 return set(m for m in markers if m[1])
748 return set(m for m in markers if m[1])
745
749
746 def exclusivemarkers(repo, nodes):
750 def exclusivemarkers(repo, nodes):
747 """set of markers relevant to "nodes" but no other locally-known nodes
751 """set of markers relevant to "nodes" but no other locally-known nodes
748
752
749 This function compute the set of markers "exclusive" to a locally-known
753 This function compute the set of markers "exclusive" to a locally-known
750 node. This means we walk the markers starting from <nodes> until we reach a
754 node. This means we walk the markers starting from <nodes> until we reach a
751 locally-known precursors outside of <nodes>. Element of <nodes> with
755 locally-known precursors outside of <nodes>. Element of <nodes> with
752 locally-known successors outside of <nodes> are ignored (since their
756 locally-known successors outside of <nodes> are ignored (since their
753 precursors markers are also relevant to these successors).
757 precursors markers are also relevant to these successors).
754
758
755 For example:
759 For example:
756
760
757 # (A0 rewritten as A1)
761 # (A0 rewritten as A1)
758 #
762 #
759 # A0 <-1- A1 # Marker "1" is exclusive to A1
763 # A0 <-1- A1 # Marker "1" is exclusive to A1
760
764
761 or
765 or
762
766
763 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
767 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
764 #
768 #
765 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
769 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
766
770
767 or
771 or
768
772
769 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
773 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
770 #
774 #
771 # <-2- A1 # Marker "2" is exclusive to A0,A1
775 # <-2- A1 # Marker "2" is exclusive to A0,A1
772 # /
776 # /
773 # <-1- A0
777 # <-1- A0
774 # \
778 # \
775 # <-3- A2 # Marker "3" is exclusive to A0,A2
779 # <-3- A2 # Marker "3" is exclusive to A0,A2
776 #
780 #
777 # in addition:
781 # in addition:
778 #
782 #
779 # Markers "2,3" are exclusive to A1,A2
783 # Markers "2,3" are exclusive to A1,A2
780 # Markers "1,2,3" are exclusive to A0,A1,A2
784 # Markers "1,2,3" are exclusive to A0,A1,A2
781
785
782 See test/test-obsolete-bundle-strip.t for more examples.
786 See test/test-obsolete-bundle-strip.t for more examples.
783
787
784 An example usage is strip. When stripping a changeset, we also want to
788 An example usage is strip. When stripping a changeset, we also want to
785 strip the markers exclusive to this changeset. Otherwise we would have
789 strip the markers exclusive to this changeset. Otherwise we would have
786 "dangling"" obsolescence markers from its precursors: Obsolescence markers
790 "dangling"" obsolescence markers from its precursors: Obsolescence markers
787 marking a node as obsolete without any successors available locally.
791 marking a node as obsolete without any successors available locally.
788
792
789 As for relevant markers, the prune markers for children will be followed.
793 As for relevant markers, the prune markers for children will be followed.
790 Of course, they will only be followed if the pruned children is
794 Of course, they will only be followed if the pruned children is
791 locally-known. Since the prune markers are relevant to the pruned node.
795 locally-known. Since the prune markers are relevant to the pruned node.
792 However, while prune markers are considered relevant to the parent of the
796 However, while prune markers are considered relevant to the parent of the
793 pruned changesets, prune markers for locally-known changeset (with no
797 pruned changesets, prune markers for locally-known changeset (with no
794 successors) are considered exclusive to the pruned nodes. This allows
798 successors) are considered exclusive to the pruned nodes. This allows
795 to strip the prune markers (with the rest of the exclusive chain) alongside
799 to strip the prune markers (with the rest of the exclusive chain) alongside
796 the pruned changesets.
800 the pruned changesets.
797 """
801 """
798 # running on a filtered repository would be dangerous as markers could be
802 # running on a filtered repository would be dangerous as markers could be
799 # reported as exclusive when they are relevant for other filtered nodes.
803 # reported as exclusive when they are relevant for other filtered nodes.
800 unfi = repo.unfiltered()
804 unfi = repo.unfiltered()
801
805
802 # shortcut to various useful item
806 # shortcut to various useful item
803 nm = unfi.changelog.nodemap
807 nm = unfi.changelog.nodemap
804 precursorsmarkers = unfi.obsstore.precursors
808 precursorsmarkers = unfi.obsstore.precursors
805 successormarkers = unfi.obsstore.successors
809 successormarkers = unfi.obsstore.successors
806 childrenmarkers = unfi.obsstore.children
810 childrenmarkers = unfi.obsstore.children
807
811
808 # exclusive markers (return of the function)
812 # exclusive markers (return of the function)
809 exclmarkers = set()
813 exclmarkers = set()
810 # we need fast membership testing
814 # we need fast membership testing
811 nodes = set(nodes)
815 nodes = set(nodes)
812 # looking for head in the obshistory
816 # looking for head in the obshistory
813 #
817 #
814 # XXX we are ignoring all issues in regard with cycle for now.
818 # XXX we are ignoring all issues in regard with cycle for now.
815 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
819 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
816 stack.sort()
820 stack.sort()
817 # nodes already stacked
821 # nodes already stacked
818 seennodes = set(stack)
822 seennodes = set(stack)
819 while stack:
823 while stack:
820 current = stack.pop()
824 current = stack.pop()
821 # fetch precursors markers
825 # fetch precursors markers
822 markers = list(precursorsmarkers.get(current, ()))
826 markers = list(precursorsmarkers.get(current, ()))
823 # extend the list with prune markers
827 # extend the list with prune markers
824 for mark in successormarkers.get(current, ()):
828 for mark in successormarkers.get(current, ()):
825 if not mark[1]:
829 if not mark[1]:
826 markers.append(mark)
830 markers.append(mark)
827 # and markers from children (looking for prune)
831 # and markers from children (looking for prune)
828 for mark in childrenmarkers.get(current, ()):
832 for mark in childrenmarkers.get(current, ()):
829 if not mark[1]:
833 if not mark[1]:
830 markers.append(mark)
834 markers.append(mark)
831 # traverse the markers
835 # traverse the markers
832 for mark in markers:
836 for mark in markers:
833 if mark in exclmarkers:
837 if mark in exclmarkers:
834 # markers already selected
838 # markers already selected
835 continue
839 continue
836
840
837 # If the markers is about the current node, select it
841 # If the markers is about the current node, select it
838 #
842 #
839 # (this delay the addition of markers from children)
843 # (this delay the addition of markers from children)
840 if mark[1] or mark[0] == current:
844 if mark[1] or mark[0] == current:
841 exclmarkers.add(mark)
845 exclmarkers.add(mark)
842
846
843 # should we keep traversing through the precursors?
847 # should we keep traversing through the precursors?
844 prec = mark[0]
848 prec = mark[0]
845
849
846 # nodes in the stack or already processed
850 # nodes in the stack or already processed
847 if prec in seennodes:
851 if prec in seennodes:
848 continue
852 continue
849
853
850 # is this a locally known node ?
854 # is this a locally known node ?
851 known = prec in nm
855 known = prec in nm
852 # if locally-known and not in the <nodes> set the traversal
856 # if locally-known and not in the <nodes> set the traversal
853 # stop here.
857 # stop here.
854 if known and prec not in nodes:
858 if known and prec not in nodes:
855 continue
859 continue
856
860
857 # do not keep going if there are unselected markers pointing to this
861 # do not keep going if there are unselected markers pointing to this
858 # nodes. If we end up traversing these unselected markers later the
862 # nodes. If we end up traversing these unselected markers later the
859 # node will be taken care of at that point.
863 # node will be taken care of at that point.
860 precmarkers = _filterprunes(successormarkers.get(prec))
864 precmarkers = _filterprunes(successormarkers.get(prec))
861 if precmarkers.issubset(exclmarkers):
865 if precmarkers.issubset(exclmarkers):
862 seennodes.add(prec)
866 seennodes.add(prec)
863 stack.append(prec)
867 stack.append(prec)
864
868
865 return exclmarkers
869 return exclmarkers
866
870
867 def commonversion(versions):
871 def commonversion(versions):
868 """Return the newest version listed in both versions and our local formats.
872 """Return the newest version listed in both versions and our local formats.
869
873
870 Returns None if no common version exists.
874 Returns None if no common version exists.
871 """
875 """
872 versions.sort(reverse=True)
876 versions.sort(reverse=True)
873 # search for highest version known on both side
877 # search for highest version known on both side
874 for v in versions:
878 for v in versions:
875 if v in formats:
879 if v in formats:
876 return v
880 return v
877 return None
881 return None
878
882
879 # arbitrary picked to fit into 8K limit from HTTP server
883 # arbitrary picked to fit into 8K limit from HTTP server
880 # you have to take in account:
884 # you have to take in account:
881 # - the version header
885 # - the version header
882 # - the base85 encoding
886 # - the base85 encoding
883 _maxpayload = 5300
887 _maxpayload = 5300
884
888
885 def _pushkeyescape(markers):
889 def _pushkeyescape(markers):
886 """encode markers into a dict suitable for pushkey exchange
890 """encode markers into a dict suitable for pushkey exchange
887
891
888 - binary data is base85 encoded
892 - binary data is base85 encoded
889 - split in chunks smaller than 5300 bytes"""
893 - split in chunks smaller than 5300 bytes"""
890 keys = {}
894 keys = {}
891 parts = []
895 parts = []
892 currentlen = _maxpayload * 2 # ensure we create a new part
896 currentlen = _maxpayload * 2 # ensure we create a new part
893 for marker in markers:
897 for marker in markers:
894 nextdata = _fm0encodeonemarker(marker)
898 nextdata = _fm0encodeonemarker(marker)
895 if (len(nextdata) + currentlen > _maxpayload):
899 if (len(nextdata) + currentlen > _maxpayload):
896 currentpart = []
900 currentpart = []
897 currentlen = 0
901 currentlen = 0
898 parts.append(currentpart)
902 parts.append(currentpart)
899 currentpart.append(nextdata)
903 currentpart.append(nextdata)
900 currentlen += len(nextdata)
904 currentlen += len(nextdata)
901 for idx, part in enumerate(reversed(parts)):
905 for idx, part in enumerate(reversed(parts)):
902 data = ''.join([_pack('>B', _fm0version)] + part)
906 data = ''.join([_pack('>B', _fm0version)] + part)
903 keys['dump%i' % idx] = util.b85encode(data)
907 keys['dump%i' % idx] = util.b85encode(data)
904 return keys
908 return keys
905
909
906 def listmarkers(repo):
910 def listmarkers(repo):
907 """List markers over pushkey"""
911 """List markers over pushkey"""
908 if not repo.obsstore:
912 if not repo.obsstore:
909 return {}
913 return {}
910 return _pushkeyescape(sorted(repo.obsstore))
914 return _pushkeyescape(sorted(repo.obsstore))
911
915
912 def pushmarker(repo, key, old, new):
916 def pushmarker(repo, key, old, new):
913 """Push markers over pushkey"""
917 """Push markers over pushkey"""
914 if not key.startswith('dump'):
918 if not key.startswith('dump'):
915 repo.ui.warn(_('unknown key: %r') % key)
919 repo.ui.warn(_('unknown key: %r') % key)
916 return 0
920 return 0
917 if old:
921 if old:
918 repo.ui.warn(_('unexpected old value for %r') % key)
922 repo.ui.warn(_('unexpected old value for %r') % key)
919 return 0
923 return 0
920 data = util.b85decode(new)
924 data = util.b85decode(new)
921 lock = repo.lock()
925 lock = repo.lock()
922 try:
926 try:
923 tr = repo.transaction('pushkey: obsolete markers')
927 tr = repo.transaction('pushkey: obsolete markers')
924 try:
928 try:
925 repo.obsstore.mergemarkers(tr, data)
929 repo.obsstore.mergemarkers(tr, data)
926 repo.invalidatevolatilesets()
930 repo.invalidatevolatilesets()
927 tr.close()
931 tr.close()
928 return 1
932 return 1
929 finally:
933 finally:
930 tr.release()
934 tr.release()
931 finally:
935 finally:
932 lock.release()
936 lock.release()
933
937
934 def getmarkers(repo, nodes=None, exclusive=False):
938 def getmarkers(repo, nodes=None, exclusive=False):
935 """returns markers known in a repository
939 """returns markers known in a repository
936
940
937 If <nodes> is specified, only markers "relevant" to those nodes are are
941 If <nodes> is specified, only markers "relevant" to those nodes are are
938 returned"""
942 returned"""
939 if nodes is None:
943 if nodes is None:
940 rawmarkers = repo.obsstore
944 rawmarkers = repo.obsstore
941 elif exclusive:
945 elif exclusive:
942 rawmarkers = exclusivemarkers(repo, nodes)
946 rawmarkers = exclusivemarkers(repo, nodes)
943 else:
947 else:
944 rawmarkers = repo.obsstore.relevantmarkers(nodes)
948 rawmarkers = repo.obsstore.relevantmarkers(nodes)
945
949
946 for markerdata in rawmarkers:
950 for markerdata in rawmarkers:
947 yield marker(repo, markerdata)
951 yield marker(repo, markerdata)
948
952
949 def relevantmarkers(repo, node):
953 def relevantmarkers(repo, node):
950 """all obsolete markers relevant to some revision"""
954 """all obsolete markers relevant to some revision"""
951 for markerdata in repo.obsstore.relevantmarkers(node):
955 for markerdata in repo.obsstore.relevantmarkers(node):
952 yield marker(repo, markerdata)
956 yield marker(repo, markerdata)
953
957
954
958
955 def precursormarkers(ctx):
959 def precursormarkers(ctx):
956 """obsolete marker marking this changeset as a successors"""
960 """obsolete marker marking this changeset as a successors"""
957 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
961 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
958 yield marker(ctx.repo(), data)
962 yield marker(ctx.repo(), data)
959
963
960 def successormarkers(ctx):
964 def successormarkers(ctx):
961 """obsolete marker making this changeset obsolete"""
965 """obsolete marker making this changeset obsolete"""
962 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
966 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
963 yield marker(ctx.repo(), data)
967 yield marker(ctx.repo(), data)
964
968
965 def allsuccessors(obsstore, nodes, ignoreflags=0):
969 def allsuccessors(obsstore, nodes, ignoreflags=0):
966 """Yield node for every successor of <nodes>.
970 """Yield node for every successor of <nodes>.
967
971
968 Some successors may be unknown locally.
972 Some successors may be unknown locally.
969
973
970 This is a linear yield unsuited to detecting split changesets. It includes
974 This is a linear yield unsuited to detecting split changesets. It includes
971 initial nodes too."""
975 initial nodes too."""
972 remaining = set(nodes)
976 remaining = set(nodes)
973 seen = set(remaining)
977 seen = set(remaining)
974 while remaining:
978 while remaining:
975 current = remaining.pop()
979 current = remaining.pop()
976 yield current
980 yield current
977 for mark in obsstore.successors.get(current, ()):
981 for mark in obsstore.successors.get(current, ()):
978 # ignore marker flagged with specified flag
982 # ignore marker flagged with specified flag
979 if mark[2] & ignoreflags:
983 if mark[2] & ignoreflags:
980 continue
984 continue
981 for suc in mark[1]:
985 for suc in mark[1]:
982 if suc not in seen:
986 if suc not in seen:
983 seen.add(suc)
987 seen.add(suc)
984 remaining.add(suc)
988 remaining.add(suc)
985
989
986 def allprecursors(obsstore, nodes, ignoreflags=0):
990 def allprecursors(obsstore, nodes, ignoreflags=0):
987 """Yield node for every precursors of <nodes>.
991 """Yield node for every precursors of <nodes>.
988
992
989 Some precursors may be unknown locally.
993 Some precursors may be unknown locally.
990
994
991 This is a linear yield unsuited to detecting folded changesets. It includes
995 This is a linear yield unsuited to detecting folded changesets. It includes
992 initial nodes too."""
996 initial nodes too."""
993
997
994 remaining = set(nodes)
998 remaining = set(nodes)
995 seen = set(remaining)
999 seen = set(remaining)
996 while remaining:
1000 while remaining:
997 current = remaining.pop()
1001 current = remaining.pop()
998 yield current
1002 yield current
999 for mark in obsstore.precursors.get(current, ()):
1003 for mark in obsstore.precursors.get(current, ()):
1000 # ignore marker flagged with specified flag
1004 # ignore marker flagged with specified flag
1001 if mark[2] & ignoreflags:
1005 if mark[2] & ignoreflags:
1002 continue
1006 continue
1003 suc = mark[0]
1007 suc = mark[0]
1004 if suc not in seen:
1008 if suc not in seen:
1005 seen.add(suc)
1009 seen.add(suc)
1006 remaining.add(suc)
1010 remaining.add(suc)
1007
1011
1008 def foreground(repo, nodes):
1012 def foreground(repo, nodes):
1009 """return all nodes in the "foreground" of other node
1013 """return all nodes in the "foreground" of other node
1010
1014
1011 The foreground of a revision is anything reachable using parent -> children
1015 The foreground of a revision is anything reachable using parent -> children
1012 or precursor -> successor relation. It is very similar to "descendant" but
1016 or precursor -> successor relation. It is very similar to "descendant" but
1013 augmented with obsolescence information.
1017 augmented with obsolescence information.
1014
1018
1015 Beware that possible obsolescence cycle may result if complex situation.
1019 Beware that possible obsolescence cycle may result if complex situation.
1016 """
1020 """
1017 repo = repo.unfiltered()
1021 repo = repo.unfiltered()
1018 foreground = set(repo.set('%ln::', nodes))
1022 foreground = set(repo.set('%ln::', nodes))
1019 if repo.obsstore:
1023 if repo.obsstore:
1020 # We only need this complicated logic if there is obsolescence
1024 # We only need this complicated logic if there is obsolescence
1021 # XXX will probably deserve an optimised revset.
1025 # XXX will probably deserve an optimised revset.
1022 nm = repo.changelog.nodemap
1026 nm = repo.changelog.nodemap
1023 plen = -1
1027 plen = -1
1024 # compute the whole set of successors or descendants
1028 # compute the whole set of successors or descendants
1025 while len(foreground) != plen:
1029 while len(foreground) != plen:
1026 plen = len(foreground)
1030 plen = len(foreground)
1027 succs = set(c.node() for c in foreground)
1031 succs = set(c.node() for c in foreground)
1028 mutable = [c.node() for c in foreground if c.mutable()]
1032 mutable = [c.node() for c in foreground if c.mutable()]
1029 succs.update(allsuccessors(repo.obsstore, mutable))
1033 succs.update(allsuccessors(repo.obsstore, mutable))
1030 known = (n for n in succs if n in nm)
1034 known = (n for n in succs if n in nm)
1031 foreground = set(repo.set('%ln::', known))
1035 foreground = set(repo.set('%ln::', known))
1032 return set(c.node() for c in foreground)
1036 return set(c.node() for c in foreground)
1033
1037
1034
1038
1035 def successorssets(repo, initialnode, cache=None):
1039 def successorssets(repo, initialnode, cache=None):
1036 """Return set of all latest successors of initial nodes
1040 """Return set of all latest successors of initial nodes
1037
1041
1038 The successors set of a changeset A are the group of revisions that succeed
1042 The successors set of a changeset A are the group of revisions that succeed
1039 A. It succeeds A as a consistent whole, each revision being only a partial
1043 A. It succeeds A as a consistent whole, each revision being only a partial
1040 replacement. The successors set contains non-obsolete changesets only.
1044 replacement. The successors set contains non-obsolete changesets only.
1041
1045
1042 This function returns the full list of successor sets which is why it
1046 This function returns the full list of successor sets which is why it
1043 returns a list of tuples and not just a single tuple. Each tuple is a valid
1047 returns a list of tuples and not just a single tuple. Each tuple is a valid
1044 successors set. Note that (A,) may be a valid successors set for changeset A
1048 successors set. Note that (A,) may be a valid successors set for changeset A
1045 (see below).
1049 (see below).
1046
1050
1047 In most cases, a changeset A will have a single element (e.g. the changeset
1051 In most cases, a changeset A will have a single element (e.g. the changeset
1048 A is replaced by A') in its successors set. Though, it is also common for a
1052 A is replaced by A') in its successors set. Though, it is also common for a
1049 changeset A to have no elements in its successor set (e.g. the changeset
1053 changeset A to have no elements in its successor set (e.g. the changeset
1050 has been pruned). Therefore, the returned list of successors sets will be
1054 has been pruned). Therefore, the returned list of successors sets will be
1051 [(A',)] or [], respectively.
1055 [(A',)] or [], respectively.
1052
1056
1053 When a changeset A is split into A' and B', however, it will result in a
1057 When a changeset A is split into A' and B', however, it will result in a
1054 successors set containing more than a single element, i.e. [(A',B')].
1058 successors set containing more than a single element, i.e. [(A',B')].
1055 Divergent changesets will result in multiple successors sets, i.e. [(A',),
1059 Divergent changesets will result in multiple successors sets, i.e. [(A',),
1056 (A'')].
1060 (A'')].
1057
1061
1058 If a changeset A is not obsolete, then it will conceptually have no
1062 If a changeset A is not obsolete, then it will conceptually have no
1059 successors set. To distinguish this from a pruned changeset, the successor
1063 successors set. To distinguish this from a pruned changeset, the successor
1060 set will contain itself only, i.e. [(A,)].
1064 set will contain itself only, i.e. [(A,)].
1061
1065
1062 Finally, successors unknown locally are considered to be pruned (obsoleted
1066 Finally, successors unknown locally are considered to be pruned (obsoleted
1063 without any successors).
1067 without any successors).
1064
1068
1065 The optional `cache` parameter is a dictionary that may contain precomputed
1069 The optional `cache` parameter is a dictionary that may contain precomputed
1066 successors sets. It is meant to reuse the computation of a previous call to
1070 successors sets. It is meant to reuse the computation of a previous call to
1067 `successorssets` when multiple calls are made at the same time. The cache
1071 `successorssets` when multiple calls are made at the same time. The cache
1068 dictionary is updated in place. The caller is responsible for its life
1072 dictionary is updated in place. The caller is responsible for its life
1069 span. Code that makes multiple calls to `successorssets` *must* use this
1073 span. Code that makes multiple calls to `successorssets` *must* use this
1070 cache mechanism or suffer terrible performance.
1074 cache mechanism or suffer terrible performance.
1071 """
1075 """
1072
1076
1073 succmarkers = repo.obsstore.successors
1077 succmarkers = repo.obsstore.successors
1074
1078
1075 # Stack of nodes we search successors sets for
1079 # Stack of nodes we search successors sets for
1076 toproceed = [initialnode]
1080 toproceed = [initialnode]
1077 # set version of above list for fast loop detection
1081 # set version of above list for fast loop detection
1078 # element added to "toproceed" must be added here
1082 # element added to "toproceed" must be added here
1079 stackedset = set(toproceed)
1083 stackedset = set(toproceed)
1080 if cache is None:
1084 if cache is None:
1081 cache = {}
1085 cache = {}
1082
1086
1083 # This while loop is the flattened version of a recursive search for
1087 # This while loop is the flattened version of a recursive search for
1084 # successors sets
1088 # successors sets
1085 #
1089 #
1086 # def successorssets(x):
1090 # def successorssets(x):
1087 # successors = directsuccessors(x)
1091 # successors = directsuccessors(x)
1088 # ss = [[]]
1092 # ss = [[]]
1089 # for succ in directsuccessors(x):
1093 # for succ in directsuccessors(x):
1090 # # product as in itertools cartesian product
1094 # # product as in itertools cartesian product
1091 # ss = product(ss, successorssets(succ))
1095 # ss = product(ss, successorssets(succ))
1092 # return ss
1096 # return ss
1093 #
1097 #
1094 # But we can not use plain recursive calls here:
1098 # But we can not use plain recursive calls here:
1095 # - that would blow the python call stack
1099 # - that would blow the python call stack
1096 # - obsolescence markers may have cycles, we need to handle them.
1100 # - obsolescence markers may have cycles, we need to handle them.
1097 #
1101 #
1098 # The `toproceed` list act as our call stack. Every node we search
1102 # The `toproceed` list act as our call stack. Every node we search
1099 # successors set for are stacked there.
1103 # successors set for are stacked there.
1100 #
1104 #
1101 # The `stackedset` is set version of this stack used to check if a node is
1105 # The `stackedset` is set version of this stack used to check if a node is
1102 # already stacked. This check is used to detect cycles and prevent infinite
1106 # already stacked. This check is used to detect cycles and prevent infinite
1103 # loop.
1107 # loop.
1104 #
1108 #
1105 # successors set of all nodes are stored in the `cache` dictionary.
1109 # successors set of all nodes are stored in the `cache` dictionary.
1106 #
1110 #
1107 # After this while loop ends we use the cache to return the successors sets
1111 # After this while loop ends we use the cache to return the successors sets
1108 # for the node requested by the caller.
1112 # for the node requested by the caller.
1109 while toproceed:
1113 while toproceed:
1110 # Every iteration tries to compute the successors sets of the topmost
1114 # Every iteration tries to compute the successors sets of the topmost
1111 # node of the stack: CURRENT.
1115 # node of the stack: CURRENT.
1112 #
1116 #
1113 # There are four possible outcomes:
1117 # There are four possible outcomes:
1114 #
1118 #
1115 # 1) We already know the successors sets of CURRENT:
1119 # 1) We already know the successors sets of CURRENT:
1116 # -> mission accomplished, pop it from the stack.
1120 # -> mission accomplished, pop it from the stack.
1117 # 2) Node is not obsolete:
1121 # 2) Node is not obsolete:
1118 # -> the node is its own successors sets. Add it to the cache.
1122 # -> the node is its own successors sets. Add it to the cache.
1119 # 3) We do not know successors set of direct successors of CURRENT:
1123 # 3) We do not know successors set of direct successors of CURRENT:
1120 # -> We add those successors to the stack.
1124 # -> We add those successors to the stack.
1121 # 4) We know successors sets of all direct successors of CURRENT:
1125 # 4) We know successors sets of all direct successors of CURRENT:
1122 # -> We can compute CURRENT successors set and add it to the
1126 # -> We can compute CURRENT successors set and add it to the
1123 # cache.
1127 # cache.
1124 #
1128 #
1125 current = toproceed[-1]
1129 current = toproceed[-1]
1126 if current in cache:
1130 if current in cache:
1127 # case (1): We already know the successors sets
1131 # case (1): We already know the successors sets
1128 stackedset.remove(toproceed.pop())
1132 stackedset.remove(toproceed.pop())
1129 elif current not in succmarkers:
1133 elif current not in succmarkers:
1130 # case (2): The node is not obsolete.
1134 # case (2): The node is not obsolete.
1131 if current in repo:
1135 if current in repo:
1132 # We have a valid last successors.
1136 # We have a valid last successors.
1133 cache[current] = [(current,)]
1137 cache[current] = [(current,)]
1134 else:
1138 else:
1135 # Final obsolete version is unknown locally.
1139 # Final obsolete version is unknown locally.
1136 # Do not count that as a valid successors
1140 # Do not count that as a valid successors
1137 cache[current] = []
1141 cache[current] = []
1138 else:
1142 else:
1139 # cases (3) and (4)
1143 # cases (3) and (4)
1140 #
1144 #
1141 # We proceed in two phases. Phase 1 aims to distinguish case (3)
1145 # We proceed in two phases. Phase 1 aims to distinguish case (3)
1142 # from case (4):
1146 # from case (4):
1143 #
1147 #
1144 # For each direct successors of CURRENT, we check whether its
1148 # For each direct successors of CURRENT, we check whether its
1145 # successors sets are known. If they are not, we stack the
1149 # successors sets are known. If they are not, we stack the
1146 # unknown node and proceed to the next iteration of the while
1150 # unknown node and proceed to the next iteration of the while
1147 # loop. (case 3)
1151 # loop. (case 3)
1148 #
1152 #
1149 # During this step, we may detect obsolescence cycles: a node
1153 # During this step, we may detect obsolescence cycles: a node
1150 # with unknown successors sets but already in the call stack.
1154 # with unknown successors sets but already in the call stack.
1151 # In such a situation, we arbitrary set the successors sets of
1155 # In such a situation, we arbitrary set the successors sets of
1152 # the node to nothing (node pruned) to break the cycle.
1156 # the node to nothing (node pruned) to break the cycle.
1153 #
1157 #
1154 # If no break was encountered we proceed to phase 2.
1158 # If no break was encountered we proceed to phase 2.
1155 #
1159 #
1156 # Phase 2 computes successors sets of CURRENT (case 4); see details
1160 # Phase 2 computes successors sets of CURRENT (case 4); see details
1157 # in phase 2 itself.
1161 # in phase 2 itself.
1158 #
1162 #
1159 # Note the two levels of iteration in each phase.
1163 # Note the two levels of iteration in each phase.
1160 # - The first one handles obsolescence markers using CURRENT as
1164 # - The first one handles obsolescence markers using CURRENT as
1161 # precursor (successors markers of CURRENT).
1165 # precursor (successors markers of CURRENT).
1162 #
1166 #
1163 # Having multiple entry here means divergence.
1167 # Having multiple entry here means divergence.
1164 #
1168 #
1165 # - The second one handles successors defined in each marker.
1169 # - The second one handles successors defined in each marker.
1166 #
1170 #
1167 # Having none means pruned node, multiple successors means split,
1171 # Having none means pruned node, multiple successors means split,
1168 # single successors are standard replacement.
1172 # single successors are standard replacement.
1169 #
1173 #
1170 for mark in sorted(succmarkers[current]):
1174 for mark in sorted(succmarkers[current]):
1171 for suc in mark[1]:
1175 for suc in mark[1]:
1172 if suc not in cache:
1176 if suc not in cache:
1173 if suc in stackedset:
1177 if suc in stackedset:
1174 # cycle breaking
1178 # cycle breaking
1175 cache[suc] = []
1179 cache[suc] = []
1176 else:
1180 else:
1177 # case (3) If we have not computed successors sets
1181 # case (3) If we have not computed successors sets
1178 # of one of those successors we add it to the
1182 # of one of those successors we add it to the
1179 # `toproceed` stack and stop all work for this
1183 # `toproceed` stack and stop all work for this
1180 # iteration.
1184 # iteration.
1181 toproceed.append(suc)
1185 toproceed.append(suc)
1182 stackedset.add(suc)
1186 stackedset.add(suc)
1183 break
1187 break
1184 else:
1188 else:
1185 continue
1189 continue
1186 break
1190 break
1187 else:
1191 else:
1188 # case (4): we know all successors sets of all direct
1192 # case (4): we know all successors sets of all direct
1189 # successors
1193 # successors
1190 #
1194 #
1191 # Successors set contributed by each marker depends on the
1195 # Successors set contributed by each marker depends on the
1192 # successors sets of all its "successors" node.
1196 # successors sets of all its "successors" node.
1193 #
1197 #
1194 # Each different marker is a divergence in the obsolescence
1198 # Each different marker is a divergence in the obsolescence
1195 # history. It contributes successors sets distinct from other
1199 # history. It contributes successors sets distinct from other
1196 # markers.
1200 # markers.
1197 #
1201 #
1198 # Within a marker, a successor may have divergent successors
1202 # Within a marker, a successor may have divergent successors
1199 # sets. In such a case, the marker will contribute multiple
1203 # sets. In such a case, the marker will contribute multiple
1200 # divergent successors sets. If multiple successors have
1204 # divergent successors sets. If multiple successors have
1201 # divergent successors sets, a Cartesian product is used.
1205 # divergent successors sets, a Cartesian product is used.
1202 #
1206 #
1203 # At the end we post-process successors sets to remove
1207 # At the end we post-process successors sets to remove
1204 # duplicated entry and successors set that are strict subset of
1208 # duplicated entry and successors set that are strict subset of
1205 # another one.
1209 # another one.
1206 succssets = []
1210 succssets = []
1207 for mark in sorted(succmarkers[current]):
1211 for mark in sorted(succmarkers[current]):
1208 # successors sets contributed by this marker
1212 # successors sets contributed by this marker
1209 markss = [[]]
1213 markss = [[]]
1210 for suc in mark[1]:
1214 for suc in mark[1]:
1211 # cardinal product with previous successors
1215 # cardinal product with previous successors
1212 productresult = []
1216 productresult = []
1213 for prefix in markss:
1217 for prefix in markss:
1214 for suffix in cache[suc]:
1218 for suffix in cache[suc]:
1215 newss = list(prefix)
1219 newss = list(prefix)
1216 for part in suffix:
1220 for part in suffix:
1217 # do not duplicated entry in successors set
1221 # do not duplicated entry in successors set
1218 # first entry wins.
1222 # first entry wins.
1219 if part not in newss:
1223 if part not in newss:
1220 newss.append(part)
1224 newss.append(part)
1221 productresult.append(newss)
1225 productresult.append(newss)
1222 markss = productresult
1226 markss = productresult
1223 succssets.extend(markss)
1227 succssets.extend(markss)
1224 # remove duplicated and subset
1228 # remove duplicated and subset
1225 seen = []
1229 seen = []
1226 final = []
1230 final = []
1227 candidate = sorted(((set(s), s) for s in succssets if s),
1231 candidate = sorted(((set(s), s) for s in succssets if s),
1228 key=lambda x: len(x[1]), reverse=True)
1232 key=lambda x: len(x[1]), reverse=True)
1229 for setversion, listversion in candidate:
1233 for setversion, listversion in candidate:
1230 for seenset in seen:
1234 for seenset in seen:
1231 if setversion.issubset(seenset):
1235 if setversion.issubset(seenset):
1232 break
1236 break
1233 else:
1237 else:
1234 final.append(listversion)
1238 final.append(listversion)
1235 seen.append(setversion)
1239 seen.append(setversion)
1236 final.reverse() # put small successors set first
1240 final.reverse() # put small successors set first
1237 cache[current] = final
1241 cache[current] = final
1238 return cache[initialnode]
1242 return cache[initialnode]
1239
1243
1240 # mapping of 'set-name' -> <function to compute this set>
1244 # mapping of 'set-name' -> <function to compute this set>
1241 cachefuncs = {}
1245 cachefuncs = {}
1242 def cachefor(name):
1246 def cachefor(name):
1243 """Decorator to register a function as computing the cache for a set"""
1247 """Decorator to register a function as computing the cache for a set"""
1244 def decorator(func):
1248 def decorator(func):
1245 assert name not in cachefuncs
1249 assert name not in cachefuncs
1246 cachefuncs[name] = func
1250 cachefuncs[name] = func
1247 return func
1251 return func
1248 return decorator
1252 return decorator
1249
1253
1250 def getrevs(repo, name):
1254 def getrevs(repo, name):
1251 """Return the set of revision that belong to the <name> set
1255 """Return the set of revision that belong to the <name> set
1252
1256
1253 Such access may compute the set and cache it for future use"""
1257 Such access may compute the set and cache it for future use"""
1254 repo = repo.unfiltered()
1258 repo = repo.unfiltered()
1255 if not repo.obsstore:
1259 if not repo.obsstore:
1256 return frozenset()
1260 return frozenset()
1257 if name not in repo.obsstore.caches:
1261 if name not in repo.obsstore.caches:
1258 repo.obsstore.caches[name] = cachefuncs[name](repo)
1262 repo.obsstore.caches[name] = cachefuncs[name](repo)
1259 return repo.obsstore.caches[name]
1263 return repo.obsstore.caches[name]
1260
1264
1261 # To be simple we need to invalidate obsolescence cache when:
1265 # To be simple we need to invalidate obsolescence cache when:
1262 #
1266 #
1263 # - new changeset is added:
1267 # - new changeset is added:
1264 # - public phase is changed
1268 # - public phase is changed
1265 # - obsolescence marker are added
1269 # - obsolescence marker are added
1266 # - strip is used a repo
1270 # - strip is used a repo
1267 def clearobscaches(repo):
1271 def clearobscaches(repo):
1268 """Remove all obsolescence related cache from a repo
1272 """Remove all obsolescence related cache from a repo
1269
1273
1270 This remove all cache in obsstore is the obsstore already exist on the
1274 This remove all cache in obsstore is the obsstore already exist on the
1271 repo.
1275 repo.
1272
1276
1273 (We could be smarter here given the exact event that trigger the cache
1277 (We could be smarter here given the exact event that trigger the cache
1274 clearing)"""
1278 clearing)"""
1275 # only clear cache is there is obsstore data in this repo
1279 # only clear cache is there is obsstore data in this repo
1276 if 'obsstore' in repo._filecache:
1280 if 'obsstore' in repo._filecache:
1277 repo.obsstore.caches.clear()
1281 repo.obsstore.caches.clear()
1278
1282
1279 @cachefor('obsolete')
1283 @cachefor('obsolete')
1280 def _computeobsoleteset(repo):
1284 def _computeobsoleteset(repo):
1281 """the set of obsolete revisions"""
1285 """the set of obsolete revisions"""
1282 getnode = repo.changelog.node
1286 getnode = repo.changelog.node
1283 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1287 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1284 isobs = repo.obsstore.successors.__contains__
1288 isobs = repo.obsstore.successors.__contains__
1285 obs = set(r for r in notpublic if isobs(getnode(r)))
1289 obs = set(r for r in notpublic if isobs(getnode(r)))
1286 return obs
1290 return obs
1287
1291
1288 @cachefor('unstable')
1292 @cachefor('unstable')
1289 def _computeunstableset(repo):
1293 def _computeunstableset(repo):
1290 """the set of non obsolete revisions with obsolete parents"""
1294 """the set of non obsolete revisions with obsolete parents"""
1291 revs = [(ctx.rev(), ctx) for ctx in
1295 revs = [(ctx.rev(), ctx) for ctx in
1292 repo.set('(not public()) and (not obsolete())')]
1296 repo.set('(not public()) and (not obsolete())')]
1293 revs.sort(key=lambda x:x[0])
1297 revs.sort(key=lambda x:x[0])
1294 unstable = set()
1298 unstable = set()
1295 for rev, ctx in revs:
1299 for rev, ctx in revs:
1296 # A rev is unstable if one of its parent is obsolete or unstable
1300 # A rev is unstable if one of its parent is obsolete or unstable
1297 # this works since we traverse following growing rev order
1301 # this works since we traverse following growing rev order
1298 if any((x.obsolete() or (x.rev() in unstable))
1302 if any((x.obsolete() or (x.rev() in unstable))
1299 for x in ctx.parents()):
1303 for x in ctx.parents()):
1300 unstable.add(rev)
1304 unstable.add(rev)
1301 return unstable
1305 return unstable
1302
1306
1303 @cachefor('suspended')
1307 @cachefor('suspended')
1304 def _computesuspendedset(repo):
1308 def _computesuspendedset(repo):
1305 """the set of obsolete parents with non obsolete descendants"""
1309 """the set of obsolete parents with non obsolete descendants"""
1306 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1310 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1307 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1311 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1308
1312
1309 @cachefor('extinct')
1313 @cachefor('extinct')
1310 def _computeextinctset(repo):
1314 def _computeextinctset(repo):
1311 """the set of obsolete parents without non obsolete descendants"""
1315 """the set of obsolete parents without non obsolete descendants"""
1312 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1316 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1313
1317
1314
1318
1315 @cachefor('bumped')
1319 @cachefor('bumped')
1316 def _computebumpedset(repo):
1320 def _computebumpedset(repo):
1317 """the set of revs trying to obsolete public revisions"""
1321 """the set of revs trying to obsolete public revisions"""
1318 bumped = set()
1322 bumped = set()
1319 # util function (avoid attribute lookup in the loop)
1323 # util function (avoid attribute lookup in the loop)
1320 phase = repo._phasecache.phase # would be faster to grab the full list
1324 phase = repo._phasecache.phase # would be faster to grab the full list
1321 public = phases.public
1325 public = phases.public
1322 cl = repo.changelog
1326 cl = repo.changelog
1323 torev = cl.nodemap.get
1327 torev = cl.nodemap.get
1324 for ctx in repo.set('(not public()) and (not obsolete())'):
1328 for ctx in repo.set('(not public()) and (not obsolete())'):
1325 rev = ctx.rev()
1329 rev = ctx.rev()
1326 # We only evaluate mutable, non-obsolete revision
1330 # We only evaluate mutable, non-obsolete revision
1327 node = ctx.node()
1331 node = ctx.node()
1328 # (future) A cache of precursors may worth if split is very common
1332 # (future) A cache of precursors may worth if split is very common
1329 for pnode in allprecursors(repo.obsstore, [node],
1333 for pnode in allprecursors(repo.obsstore, [node],
1330 ignoreflags=bumpedfix):
1334 ignoreflags=bumpedfix):
1331 prev = torev(pnode) # unfiltered! but so is phasecache
1335 prev = torev(pnode) # unfiltered! but so is phasecache
1332 if (prev is not None) and (phase(repo, prev) <= public):
1336 if (prev is not None) and (phase(repo, prev) <= public):
1333 # we have a public precursor
1337 # we have a public precursor
1334 bumped.add(rev)
1338 bumped.add(rev)
1335 break # Next draft!
1339 break # Next draft!
1336 return bumped
1340 return bumped
1337
1341
1338 @cachefor('divergent')
1342 @cachefor('divergent')
1339 def _computedivergentset(repo):
1343 def _computedivergentset(repo):
1340 """the set of rev that compete to be the final successors of some revision.
1344 """the set of rev that compete to be the final successors of some revision.
1341 """
1345 """
1342 divergent = set()
1346 divergent = set()
1343 obsstore = repo.obsstore
1347 obsstore = repo.obsstore
1344 newermap = {}
1348 newermap = {}
1345 for ctx in repo.set('(not public()) - obsolete()'):
1349 for ctx in repo.set('(not public()) - obsolete()'):
1346 mark = obsstore.precursors.get(ctx.node(), ())
1350 mark = obsstore.precursors.get(ctx.node(), ())
1347 toprocess = set(mark)
1351 toprocess = set(mark)
1348 seen = set()
1352 seen = set()
1349 while toprocess:
1353 while toprocess:
1350 prec = toprocess.pop()[0]
1354 prec = toprocess.pop()[0]
1351 if prec in seen:
1355 if prec in seen:
1352 continue # emergency cycle hanging prevention
1356 continue # emergency cycle hanging prevention
1353 seen.add(prec)
1357 seen.add(prec)
1354 if prec not in newermap:
1358 if prec not in newermap:
1355 successorssets(repo, prec, newermap)
1359 successorssets(repo, prec, newermap)
1356 newer = [n for n in newermap[prec] if n]
1360 newer = [n for n in newermap[prec] if n]
1357 if len(newer) > 1:
1361 if len(newer) > 1:
1358 divergent.add(ctx.rev())
1362 divergent.add(ctx.rev())
1359 break
1363 break
1360 toprocess.update(obsstore.precursors.get(prec, ()))
1364 toprocess.update(obsstore.precursors.get(prec, ()))
1361 return divergent
1365 return divergent
1362
1366
1363
1367
1364 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1368 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1365 operation=None):
1369 operation=None):
1366 """Add obsolete markers between changesets in a repo
1370 """Add obsolete markers between changesets in a repo
1367
1371
1368 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1372 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1369 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1373 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1370 containing metadata for this marker only. It is merged with the global
1374 containing metadata for this marker only. It is merged with the global
1371 metadata specified through the `metadata` argument of this function,
1375 metadata specified through the `metadata` argument of this function,
1372
1376
1373 Trying to obsolete a public changeset will raise an exception.
1377 Trying to obsolete a public changeset will raise an exception.
1374
1378
1375 Current user and date are used except if specified otherwise in the
1379 Current user and date are used except if specified otherwise in the
1376 metadata attribute.
1380 metadata attribute.
1377
1381
1378 This function operates within a transaction of its own, but does
1382 This function operates within a transaction of its own, but does
1379 not take any lock on the repo.
1383 not take any lock on the repo.
1380 """
1384 """
1381 # prepare metadata
1385 # prepare metadata
1382 if metadata is None:
1386 if metadata is None:
1383 metadata = {}
1387 metadata = {}
1384 if 'user' not in metadata:
1388 if 'user' not in metadata:
1385 metadata['user'] = repo.ui.username()
1389 metadata['user'] = repo.ui.username()
1386 useoperation = repo.ui.configbool('experimental',
1390 useoperation = repo.ui.configbool('experimental',
1387 'evolution.track-operation',
1391 'evolution.track-operation',
1388 False)
1392 False)
1389 if useoperation and operation:
1393 if useoperation and operation:
1390 metadata['operation'] = operation
1394 metadata['operation'] = operation
1391 tr = repo.transaction('add-obsolescence-marker')
1395 tr = repo.transaction('add-obsolescence-marker')
1392 try:
1396 try:
1393 markerargs = []
1397 markerargs = []
1394 for rel in relations:
1398 for rel in relations:
1395 prec = rel[0]
1399 prec = rel[0]
1396 sucs = rel[1]
1400 sucs = rel[1]
1397 localmetadata = metadata.copy()
1401 localmetadata = metadata.copy()
1398 if 2 < len(rel):
1402 if 2 < len(rel):
1399 localmetadata.update(rel[2])
1403 localmetadata.update(rel[2])
1400
1404
1401 if not prec.mutable():
1405 if not prec.mutable():
1402 raise error.Abort(_("cannot obsolete public changeset: %s")
1406 raise error.Abort(_("cannot obsolete public changeset: %s")
1403 % prec,
1407 % prec,
1404 hint="see 'hg help phases' for details")
1408 hint="see 'hg help phases' for details")
1405 nprec = prec.node()
1409 nprec = prec.node()
1406 nsucs = tuple(s.node() for s in sucs)
1410 nsucs = tuple(s.node() for s in sucs)
1407 npare = None
1411 npare = None
1408 if not nsucs:
1412 if not nsucs:
1409 npare = tuple(p.node() for p in prec.parents())
1413 npare = tuple(p.node() for p in prec.parents())
1410 if nprec in nsucs:
1414 if nprec in nsucs:
1411 raise error.Abort(_("changeset %s cannot obsolete itself")
1415 raise error.Abort(_("changeset %s cannot obsolete itself")
1412 % prec)
1416 % prec)
1413
1417
1414 # Creating the marker causes the hidden cache to become invalid,
1418 # Creating the marker causes the hidden cache to become invalid,
1415 # which causes recomputation when we ask for prec.parents() above.
1419 # which causes recomputation when we ask for prec.parents() above.
1416 # Resulting in n^2 behavior. So let's prepare all of the args
1420 # Resulting in n^2 behavior. So let's prepare all of the args
1417 # first, then create the markers.
1421 # first, then create the markers.
1418 markerargs.append((nprec, nsucs, npare, localmetadata))
1422 markerargs.append((nprec, nsucs, npare, localmetadata))
1419
1423
1420 for args in markerargs:
1424 for args in markerargs:
1421 nprec, nsucs, npare, localmetadata = args
1425 nprec, nsucs, npare, localmetadata = args
1422 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1426 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1423 date=date, metadata=localmetadata,
1427 date=date, metadata=localmetadata,
1424 ui=repo.ui)
1428 ui=repo.ui)
1425 repo.filteredrevcache.clear()
1429 repo.filteredrevcache.clear()
1426 tr.close()
1430 tr.close()
1427 finally:
1431 finally:
1428 tr.release()
1432 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now