##// END OF EJS Templates
obsolete: refactor function for getting obsolete options...
Gregory Szorc -
r37149:d30810d0 default
parent child Browse files
Show More
@@ -1,1014 +1,1023 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 obsutil,
79 obsutil,
80 phases,
80 phases,
81 policy,
81 policy,
82 util,
82 util,
83 )
83 )
84 from .utils import dateutil
84 from .utils import dateutil
85
85
86 parsers = policy.importmod(r'parsers')
86 parsers = policy.importmod(r'parsers')
87
87
88 _pack = struct.pack
88 _pack = struct.pack
89 _unpack = struct.unpack
89 _unpack = struct.unpack
90 _calcsize = struct.calcsize
90 _calcsize = struct.calcsize
91 propertycache = util.propertycache
91 propertycache = util.propertycache
92
92
93 # the obsolete feature is not mature enough to be enabled by default.
93 # the obsolete feature is not mature enough to be enabled by default.
94 # you have to rely on third party extension extension to enable this.
94 # you have to rely on third party extension extension to enable this.
95 _enabled = False
95 _enabled = False
96
96
97 # Options for obsolescence
97 # Options for obsolescence
98 createmarkersopt = 'createmarkers'
98 createmarkersopt = 'createmarkers'
99 allowunstableopt = 'allowunstable'
99 allowunstableopt = 'allowunstable'
100 exchangeopt = 'exchange'
100 exchangeopt = 'exchange'
101
101
102 def _getoptionvalue(repo, option):
102 def _getoptionvalue(repo, option):
103 """Returns True if the given repository has the given obsolete option
103 """Returns True if the given repository has the given obsolete option
104 enabled.
104 enabled.
105 """
105 """
106 configkey = 'evolution.%s' % option
106 configkey = 'evolution.%s' % option
107 newconfig = repo.ui.configbool('experimental', configkey)
107 newconfig = repo.ui.configbool('experimental', configkey)
108
108
109 # Return the value only if defined
109 # Return the value only if defined
110 if newconfig is not None:
110 if newconfig is not None:
111 return newconfig
111 return newconfig
112
112
113 # Fallback on generic option
113 # Fallback on generic option
114 try:
114 try:
115 return repo.ui.configbool('experimental', 'evolution')
115 return repo.ui.configbool('experimental', 'evolution')
116 except (error.ConfigError, AttributeError):
116 except (error.ConfigError, AttributeError):
117 # Fallback on old-fashion config
117 # Fallback on old-fashion config
118 # inconsistent config: experimental.evolution
118 # inconsistent config: experimental.evolution
119 result = set(repo.ui.configlist('experimental', 'evolution'))
119 result = set(repo.ui.configlist('experimental', 'evolution'))
120
120
121 if 'all' in result:
121 if 'all' in result:
122 return True
122 return True
123
123
124 # For migration purposes, temporarily return true if the config hasn't
124 # For migration purposes, temporarily return true if the config hasn't
125 # been set but _enabled is true.
125 # been set but _enabled is true.
126 if len(result) == 0 and _enabled:
126 if len(result) == 0 and _enabled:
127 return True
127 return True
128
128
129 # Temporary hack for next check
129 # Temporary hack for next check
130 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
130 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
131 if newconfig:
131 if newconfig:
132 result.add('createmarkers')
132 result.add('createmarkers')
133
133
134 return option in result
134 return option in result
135
135
136 def getoptions(repo):
137 """Returns dicts showing state of obsolescence features."""
138
139 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
140 unstablevalue = _getoptionvalue(repo, allowunstableopt)
141 exchangevalue = _getoptionvalue(repo, exchangeopt)
142
143 # createmarkers must be enabled if other options are enabled
144 if ((unstablevalue or exchangevalue) and not createmarkersvalue):
145 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
146 "if other obsolete options are enabled"))
147
148 return {
149 createmarkersopt: createmarkersvalue,
150 allowunstableopt: unstablevalue,
151 exchangeopt: exchangevalue,
152 }
153
136 def isenabled(repo, option):
154 def isenabled(repo, option):
137 """Returns True if the given repository has the given obsolete option
155 """Returns True if the given repository has the given obsolete option
138 enabled.
156 enabled.
139 """
157 """
140 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
158 return getoptions(repo)[option]
141 unstabluevalue = _getoptionvalue(repo, allowunstableopt)
142 exchangevalue = _getoptionvalue(repo, exchangeopt)
143
144 # createmarkers must be enabled if other options are enabled
145 if ((unstabluevalue or exchangevalue) and not createmarkersvalue):
146 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
147 "if other obsolete options are enabled"))
148
149 return _getoptionvalue(repo, option)
150
159
151 # Creating aliases for marker flags because evolve extension looks for
160 # Creating aliases for marker flags because evolve extension looks for
152 # bumpedfix in obsolete.py
161 # bumpedfix in obsolete.py
153 bumpedfix = obsutil.bumpedfix
162 bumpedfix = obsutil.bumpedfix
154 usingsha256 = obsutil.usingsha256
163 usingsha256 = obsutil.usingsha256
155
164
156 ## Parsing and writing of version "0"
165 ## Parsing and writing of version "0"
157 #
166 #
158 # The header is followed by the markers. Each marker is made of:
167 # The header is followed by the markers. Each marker is made of:
159 #
168 #
160 # - 1 uint8 : number of new changesets "N", can be zero.
169 # - 1 uint8 : number of new changesets "N", can be zero.
161 #
170 #
162 # - 1 uint32: metadata size "M" in bytes.
171 # - 1 uint32: metadata size "M" in bytes.
163 #
172 #
164 # - 1 byte: a bit field. It is reserved for flags used in common
173 # - 1 byte: a bit field. It is reserved for flags used in common
165 # obsolete marker operations, to avoid repeated decoding of metadata
174 # obsolete marker operations, to avoid repeated decoding of metadata
166 # entries.
175 # entries.
167 #
176 #
168 # - 20 bytes: obsoleted changeset identifier.
177 # - 20 bytes: obsoleted changeset identifier.
169 #
178 #
170 # - N*20 bytes: new changesets identifiers.
179 # - N*20 bytes: new changesets identifiers.
171 #
180 #
172 # - M bytes: metadata as a sequence of nul-terminated strings. Each
181 # - M bytes: metadata as a sequence of nul-terminated strings. Each
173 # string contains a key and a value, separated by a colon ':', without
182 # string contains a key and a value, separated by a colon ':', without
174 # additional encoding. Keys cannot contain '\0' or ':' and values
183 # additional encoding. Keys cannot contain '\0' or ':' and values
175 # cannot contain '\0'.
184 # cannot contain '\0'.
176 _fm0version = 0
185 _fm0version = 0
177 _fm0fixed = '>BIB20s'
186 _fm0fixed = '>BIB20s'
178 _fm0node = '20s'
187 _fm0node = '20s'
179 _fm0fsize = _calcsize(_fm0fixed)
188 _fm0fsize = _calcsize(_fm0fixed)
180 _fm0fnodesize = _calcsize(_fm0node)
189 _fm0fnodesize = _calcsize(_fm0node)
181
190
182 def _fm0readmarkers(data, off, stop):
191 def _fm0readmarkers(data, off, stop):
183 # Loop on markers
192 # Loop on markers
184 while off < stop:
193 while off < stop:
185 # read fixed part
194 # read fixed part
186 cur = data[off:off + _fm0fsize]
195 cur = data[off:off + _fm0fsize]
187 off += _fm0fsize
196 off += _fm0fsize
188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
197 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
189 # read replacement
198 # read replacement
190 sucs = ()
199 sucs = ()
191 if numsuc:
200 if numsuc:
192 s = (_fm0fnodesize * numsuc)
201 s = (_fm0fnodesize * numsuc)
193 cur = data[off:off + s]
202 cur = data[off:off + s]
194 sucs = _unpack(_fm0node * numsuc, cur)
203 sucs = _unpack(_fm0node * numsuc, cur)
195 off += s
204 off += s
196 # read metadata
205 # read metadata
197 # (metadata will be decoded on demand)
206 # (metadata will be decoded on demand)
198 metadata = data[off:off + mdsize]
207 metadata = data[off:off + mdsize]
199 if len(metadata) != mdsize:
208 if len(metadata) != mdsize:
200 raise error.Abort(_('parsing obsolete marker: metadata is too '
209 raise error.Abort(_('parsing obsolete marker: metadata is too '
201 'short, %d bytes expected, got %d')
210 'short, %d bytes expected, got %d')
202 % (mdsize, len(metadata)))
211 % (mdsize, len(metadata)))
203 off += mdsize
212 off += mdsize
204 metadata = _fm0decodemeta(metadata)
213 metadata = _fm0decodemeta(metadata)
205 try:
214 try:
206 when, offset = metadata.pop('date', '0 0').split(' ')
215 when, offset = metadata.pop('date', '0 0').split(' ')
207 date = float(when), int(offset)
216 date = float(when), int(offset)
208 except ValueError:
217 except ValueError:
209 date = (0., 0)
218 date = (0., 0)
210 parents = None
219 parents = None
211 if 'p2' in metadata:
220 if 'p2' in metadata:
212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
221 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
213 elif 'p1' in metadata:
222 elif 'p1' in metadata:
214 parents = (metadata.pop('p1', None),)
223 parents = (metadata.pop('p1', None),)
215 elif 'p0' in metadata:
224 elif 'p0' in metadata:
216 parents = ()
225 parents = ()
217 if parents is not None:
226 if parents is not None:
218 try:
227 try:
219 parents = tuple(node.bin(p) for p in parents)
228 parents = tuple(node.bin(p) for p in parents)
220 # if parent content is not a nodeid, drop the data
229 # if parent content is not a nodeid, drop the data
221 for p in parents:
230 for p in parents:
222 if len(p) != 20:
231 if len(p) != 20:
223 parents = None
232 parents = None
224 break
233 break
225 except TypeError:
234 except TypeError:
226 # if content cannot be translated to nodeid drop the data.
235 # if content cannot be translated to nodeid drop the data.
227 parents = None
236 parents = None
228
237
229 metadata = tuple(sorted(metadata.iteritems()))
238 metadata = tuple(sorted(metadata.iteritems()))
230
239
231 yield (pre, sucs, flags, metadata, date, parents)
240 yield (pre, sucs, flags, metadata, date, parents)
232
241
233 def _fm0encodeonemarker(marker):
242 def _fm0encodeonemarker(marker):
234 pre, sucs, flags, metadata, date, parents = marker
243 pre, sucs, flags, metadata, date, parents = marker
235 if flags & usingsha256:
244 if flags & usingsha256:
236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
245 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
237 metadata = dict(metadata)
246 metadata = dict(metadata)
238 time, tz = date
247 time, tz = date
239 metadata['date'] = '%r %i' % (time, tz)
248 metadata['date'] = '%r %i' % (time, tz)
240 if parents is not None:
249 if parents is not None:
241 if not parents:
250 if not parents:
242 # mark that we explicitly recorded no parents
251 # mark that we explicitly recorded no parents
243 metadata['p0'] = ''
252 metadata['p0'] = ''
244 for i, p in enumerate(parents, 1):
253 for i, p in enumerate(parents, 1):
245 metadata['p%i' % i] = node.hex(p)
254 metadata['p%i' % i] = node.hex(p)
246 metadata = _fm0encodemeta(metadata)
255 metadata = _fm0encodemeta(metadata)
247 numsuc = len(sucs)
256 numsuc = len(sucs)
248 format = _fm0fixed + (_fm0node * numsuc)
257 format = _fm0fixed + (_fm0node * numsuc)
249 data = [numsuc, len(metadata), flags, pre]
258 data = [numsuc, len(metadata), flags, pre]
250 data.extend(sucs)
259 data.extend(sucs)
251 return _pack(format, *data) + metadata
260 return _pack(format, *data) + metadata
252
261
253 def _fm0encodemeta(meta):
262 def _fm0encodemeta(meta):
254 """Return encoded metadata string to string mapping.
263 """Return encoded metadata string to string mapping.
255
264
256 Assume no ':' in key and no '\0' in both key and value."""
265 Assume no ':' in key and no '\0' in both key and value."""
257 for key, value in meta.iteritems():
266 for key, value in meta.iteritems():
258 if ':' in key or '\0' in key:
267 if ':' in key or '\0' in key:
259 raise ValueError("':' and '\0' are forbidden in metadata key'")
268 raise ValueError("':' and '\0' are forbidden in metadata key'")
260 if '\0' in value:
269 if '\0' in value:
261 raise ValueError("':' is forbidden in metadata value'")
270 raise ValueError("':' is forbidden in metadata value'")
262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
271 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
263
272
264 def _fm0decodemeta(data):
273 def _fm0decodemeta(data):
265 """Return string to string dictionary from encoded version."""
274 """Return string to string dictionary from encoded version."""
266 d = {}
275 d = {}
267 for l in data.split('\0'):
276 for l in data.split('\0'):
268 if l:
277 if l:
269 key, value = l.split(':')
278 key, value = l.split(':')
270 d[key] = value
279 d[key] = value
271 return d
280 return d
272
281
273 ## Parsing and writing of version "1"
282 ## Parsing and writing of version "1"
274 #
283 #
275 # The header is followed by the markers. Each marker is made of:
284 # The header is followed by the markers. Each marker is made of:
276 #
285 #
277 # - uint32: total size of the marker (including this field)
286 # - uint32: total size of the marker (including this field)
278 #
287 #
279 # - float64: date in seconds since epoch
288 # - float64: date in seconds since epoch
280 #
289 #
281 # - int16: timezone offset in minutes
290 # - int16: timezone offset in minutes
282 #
291 #
283 # - uint16: a bit field. It is reserved for flags used in common
292 # - uint16: a bit field. It is reserved for flags used in common
284 # obsolete marker operations, to avoid repeated decoding of metadata
293 # obsolete marker operations, to avoid repeated decoding of metadata
285 # entries.
294 # entries.
286 #
295 #
287 # - uint8: number of successors "N", can be zero.
296 # - uint8: number of successors "N", can be zero.
288 #
297 #
289 # - uint8: number of parents "P", can be zero.
298 # - uint8: number of parents "P", can be zero.
290 #
299 #
291 # 0: parents data stored but no parent,
300 # 0: parents data stored but no parent,
292 # 1: one parent stored,
301 # 1: one parent stored,
293 # 2: two parents stored,
302 # 2: two parents stored,
294 # 3: no parent data stored
303 # 3: no parent data stored
295 #
304 #
296 # - uint8: number of metadata entries M
305 # - uint8: number of metadata entries M
297 #
306 #
298 # - 20 or 32 bytes: predecessor changeset identifier.
307 # - 20 or 32 bytes: predecessor changeset identifier.
299 #
308 #
300 # - N*(20 or 32) bytes: successors changesets identifiers.
309 # - N*(20 or 32) bytes: successors changesets identifiers.
301 #
310 #
302 # - P*(20 or 32) bytes: parents of the predecessors changesets.
311 # - P*(20 or 32) bytes: parents of the predecessors changesets.
303 #
312 #
304 # - M*(uint8, uint8): size of all metadata entries (key and value)
313 # - M*(uint8, uint8): size of all metadata entries (key and value)
305 #
314 #
306 # - remaining bytes: the metadata, each (key, value) pair after the other.
315 # - remaining bytes: the metadata, each (key, value) pair after the other.
307 _fm1version = 1
316 _fm1version = 1
308 _fm1fixed = '>IdhHBBB20s'
317 _fm1fixed = '>IdhHBBB20s'
309 _fm1nodesha1 = '20s'
318 _fm1nodesha1 = '20s'
310 _fm1nodesha256 = '32s'
319 _fm1nodesha256 = '32s'
311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
320 _fm1nodesha1size = _calcsize(_fm1nodesha1)
312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
321 _fm1nodesha256size = _calcsize(_fm1nodesha256)
313 _fm1fsize = _calcsize(_fm1fixed)
322 _fm1fsize = _calcsize(_fm1fixed)
314 _fm1parentnone = 3
323 _fm1parentnone = 3
315 _fm1parentshift = 14
324 _fm1parentshift = 14
316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
325 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
317 _fm1metapair = 'BB'
326 _fm1metapair = 'BB'
318 _fm1metapairsize = _calcsize(_fm1metapair)
327 _fm1metapairsize = _calcsize(_fm1metapair)
319
328
320 def _fm1purereadmarkers(data, off, stop):
329 def _fm1purereadmarkers(data, off, stop):
321 # make some global constants local for performance
330 # make some global constants local for performance
322 noneflag = _fm1parentnone
331 noneflag = _fm1parentnone
323 sha2flag = usingsha256
332 sha2flag = usingsha256
324 sha1size = _fm1nodesha1size
333 sha1size = _fm1nodesha1size
325 sha2size = _fm1nodesha256size
334 sha2size = _fm1nodesha256size
326 sha1fmt = _fm1nodesha1
335 sha1fmt = _fm1nodesha1
327 sha2fmt = _fm1nodesha256
336 sha2fmt = _fm1nodesha256
328 metasize = _fm1metapairsize
337 metasize = _fm1metapairsize
329 metafmt = _fm1metapair
338 metafmt = _fm1metapair
330 fsize = _fm1fsize
339 fsize = _fm1fsize
331 unpack = _unpack
340 unpack = _unpack
332
341
333 # Loop on markers
342 # Loop on markers
334 ufixed = struct.Struct(_fm1fixed).unpack
343 ufixed = struct.Struct(_fm1fixed).unpack
335
344
336 while off < stop:
345 while off < stop:
337 # read fixed part
346 # read fixed part
338 o1 = off + fsize
347 o1 = off + fsize
339 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
348 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340
349
341 if flags & sha2flag:
350 if flags & sha2flag:
342 # FIXME: prec was read as a SHA1, needs to be amended
351 # FIXME: prec was read as a SHA1, needs to be amended
343
352
344 # read 0 or more successors
353 # read 0 or more successors
345 if numsuc == 1:
354 if numsuc == 1:
346 o2 = o1 + sha2size
355 o2 = o1 + sha2size
347 sucs = (data[o1:o2],)
356 sucs = (data[o1:o2],)
348 else:
357 else:
349 o2 = o1 + sha2size * numsuc
358 o2 = o1 + sha2size * numsuc
350 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
359 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351
360
352 # read parents
361 # read parents
353 if numpar == noneflag:
362 if numpar == noneflag:
354 o3 = o2
363 o3 = o2
355 parents = None
364 parents = None
356 elif numpar == 1:
365 elif numpar == 1:
357 o3 = o2 + sha2size
366 o3 = o2 + sha2size
358 parents = (data[o2:o3],)
367 parents = (data[o2:o3],)
359 else:
368 else:
360 o3 = o2 + sha2size * numpar
369 o3 = o2 + sha2size * numpar
361 parents = unpack(sha2fmt * numpar, data[o2:o3])
370 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 else:
371 else:
363 # read 0 or more successors
372 # read 0 or more successors
364 if numsuc == 1:
373 if numsuc == 1:
365 o2 = o1 + sha1size
374 o2 = o1 + sha1size
366 sucs = (data[o1:o2],)
375 sucs = (data[o1:o2],)
367 else:
376 else:
368 o2 = o1 + sha1size * numsuc
377 o2 = o1 + sha1size * numsuc
369 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
378 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370
379
371 # read parents
380 # read parents
372 if numpar == noneflag:
381 if numpar == noneflag:
373 o3 = o2
382 o3 = o2
374 parents = None
383 parents = None
375 elif numpar == 1:
384 elif numpar == 1:
376 o3 = o2 + sha1size
385 o3 = o2 + sha1size
377 parents = (data[o2:o3],)
386 parents = (data[o2:o3],)
378 else:
387 else:
379 o3 = o2 + sha1size * numpar
388 o3 = o2 + sha1size * numpar
380 parents = unpack(sha1fmt * numpar, data[o2:o3])
389 parents = unpack(sha1fmt * numpar, data[o2:o3])
381
390
382 # read metadata
391 # read metadata
383 off = o3 + metasize * nummeta
392 off = o3 + metasize * nummeta
384 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
393 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 metadata = []
394 metadata = []
386 for idx in xrange(0, len(metapairsize), 2):
395 for idx in xrange(0, len(metapairsize), 2):
387 o1 = off + metapairsize[idx]
396 o1 = off + metapairsize[idx]
388 o2 = o1 + metapairsize[idx + 1]
397 o2 = o1 + metapairsize[idx + 1]
389 metadata.append((data[off:o1], data[o1:o2]))
398 metadata.append((data[off:o1], data[o1:o2]))
390 off = o2
399 off = o2
391
400
392 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
401 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393
402
394 def _fm1encodeonemarker(marker):
403 def _fm1encodeonemarker(marker):
395 pre, sucs, flags, metadata, date, parents = marker
404 pre, sucs, flags, metadata, date, parents = marker
396 # determine node size
405 # determine node size
397 _fm1node = _fm1nodesha1
406 _fm1node = _fm1nodesha1
398 if flags & usingsha256:
407 if flags & usingsha256:
399 _fm1node = _fm1nodesha256
408 _fm1node = _fm1nodesha256
400 numsuc = len(sucs)
409 numsuc = len(sucs)
401 numextranodes = numsuc
410 numextranodes = numsuc
402 if parents is None:
411 if parents is None:
403 numpar = _fm1parentnone
412 numpar = _fm1parentnone
404 else:
413 else:
405 numpar = len(parents)
414 numpar = len(parents)
406 numextranodes += numpar
415 numextranodes += numpar
407 formatnodes = _fm1node * numextranodes
416 formatnodes = _fm1node * numextranodes
408 formatmeta = _fm1metapair * len(metadata)
417 formatmeta = _fm1metapair * len(metadata)
409 format = _fm1fixed + formatnodes + formatmeta
418 format = _fm1fixed + formatnodes + formatmeta
410 # tz is stored in minutes so we divide by 60
419 # tz is stored in minutes so we divide by 60
411 tz = date[1]//60
420 tz = date[1]//60
412 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
421 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 data.extend(sucs)
422 data.extend(sucs)
414 if parents is not None:
423 if parents is not None:
415 data.extend(parents)
424 data.extend(parents)
416 totalsize = _calcsize(format)
425 totalsize = _calcsize(format)
417 for key, value in metadata:
426 for key, value in metadata:
418 lk = len(key)
427 lk = len(key)
419 lv = len(value)
428 lv = len(value)
420 if lk > 255:
429 if lk > 255:
421 msg = ('obsstore metadata key cannot be longer than 255 bytes'
430 msg = ('obsstore metadata key cannot be longer than 255 bytes'
422 ' (key "%s" is %u bytes)') % (key, lk)
431 ' (key "%s" is %u bytes)') % (key, lk)
423 raise error.ProgrammingError(msg)
432 raise error.ProgrammingError(msg)
424 if lv > 255:
433 if lv > 255:
425 msg = ('obsstore metadata value cannot be longer than 255 bytes'
434 msg = ('obsstore metadata value cannot be longer than 255 bytes'
426 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
435 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
427 raise error.ProgrammingError(msg)
436 raise error.ProgrammingError(msg)
428 data.append(lk)
437 data.append(lk)
429 data.append(lv)
438 data.append(lv)
430 totalsize += lk + lv
439 totalsize += lk + lv
431 data[0] = totalsize
440 data[0] = totalsize
432 data = [_pack(format, *data)]
441 data = [_pack(format, *data)]
433 for key, value in metadata:
442 for key, value in metadata:
434 data.append(key)
443 data.append(key)
435 data.append(value)
444 data.append(value)
436 return ''.join(data)
445 return ''.join(data)
437
446
438 def _fm1readmarkers(data, off, stop):
447 def _fm1readmarkers(data, off, stop):
439 native = getattr(parsers, 'fm1readmarkers', None)
448 native = getattr(parsers, 'fm1readmarkers', None)
440 if not native:
449 if not native:
441 return _fm1purereadmarkers(data, off, stop)
450 return _fm1purereadmarkers(data, off, stop)
442 return native(data, off, stop)
451 return native(data, off, stop)
443
452
444 # mapping to read/write various marker formats
453 # mapping to read/write various marker formats
445 # <version> -> (decoder, encoder)
454 # <version> -> (decoder, encoder)
446 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
455 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
447 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
456 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
448
457
449 def _readmarkerversion(data):
458 def _readmarkerversion(data):
450 return _unpack('>B', data[0:1])[0]
459 return _unpack('>B', data[0:1])[0]
451
460
452 @util.nogc
461 @util.nogc
453 def _readmarkers(data, off=None, stop=None):
462 def _readmarkers(data, off=None, stop=None):
454 """Read and enumerate markers from raw data"""
463 """Read and enumerate markers from raw data"""
455 diskversion = _readmarkerversion(data)
464 diskversion = _readmarkerversion(data)
456 if not off:
465 if not off:
457 off = 1 # skip 1 byte version number
466 off = 1 # skip 1 byte version number
458 if stop is None:
467 if stop is None:
459 stop = len(data)
468 stop = len(data)
460 if diskversion not in formats:
469 if diskversion not in formats:
461 msg = _('parsing obsolete marker: unknown version %r') % diskversion
470 msg = _('parsing obsolete marker: unknown version %r') % diskversion
462 raise error.UnknownVersion(msg, version=diskversion)
471 raise error.UnknownVersion(msg, version=diskversion)
463 return diskversion, formats[diskversion][0](data, off, stop)
472 return diskversion, formats[diskversion][0](data, off, stop)
464
473
465 def encodeheader(version=_fm0version):
474 def encodeheader(version=_fm0version):
466 return _pack('>B', version)
475 return _pack('>B', version)
467
476
468 def encodemarkers(markers, addheader=False, version=_fm0version):
477 def encodemarkers(markers, addheader=False, version=_fm0version):
469 # Kept separate from flushmarkers(), it will be reused for
478 # Kept separate from flushmarkers(), it will be reused for
470 # markers exchange.
479 # markers exchange.
471 encodeone = formats[version][1]
480 encodeone = formats[version][1]
472 if addheader:
481 if addheader:
473 yield encodeheader(version)
482 yield encodeheader(version)
474 for marker in markers:
483 for marker in markers:
475 yield encodeone(marker)
484 yield encodeone(marker)
476
485
477 @util.nogc
486 @util.nogc
478 def _addsuccessors(successors, markers):
487 def _addsuccessors(successors, markers):
479 for mark in markers:
488 for mark in markers:
480 successors.setdefault(mark[0], set()).add(mark)
489 successors.setdefault(mark[0], set()).add(mark)
481
490
482 @util.nogc
491 @util.nogc
483 def _addpredecessors(predecessors, markers):
492 def _addpredecessors(predecessors, markers):
484 for mark in markers:
493 for mark in markers:
485 for suc in mark[1]:
494 for suc in mark[1]:
486 predecessors.setdefault(suc, set()).add(mark)
495 predecessors.setdefault(suc, set()).add(mark)
487
496
488 @util.nogc
497 @util.nogc
489 def _addchildren(children, markers):
498 def _addchildren(children, markers):
490 for mark in markers:
499 for mark in markers:
491 parents = mark[5]
500 parents = mark[5]
492 if parents is not None:
501 if parents is not None:
493 for p in parents:
502 for p in parents:
494 children.setdefault(p, set()).add(mark)
503 children.setdefault(p, set()).add(mark)
495
504
496 def _checkinvalidmarkers(markers):
505 def _checkinvalidmarkers(markers):
497 """search for marker with invalid data and raise error if needed
506 """search for marker with invalid data and raise error if needed
498
507
499 Exist as a separated function to allow the evolve extension for a more
508 Exist as a separated function to allow the evolve extension for a more
500 subtle handling.
509 subtle handling.
501 """
510 """
502 for mark in markers:
511 for mark in markers:
503 if node.nullid in mark[1]:
512 if node.nullid in mark[1]:
504 raise error.Abort(_('bad obsolescence marker detected: '
513 raise error.Abort(_('bad obsolescence marker detected: '
505 'invalid successors nullid'))
514 'invalid successors nullid'))
506
515
507 class obsstore(object):
516 class obsstore(object):
508 """Store obsolete markers
517 """Store obsolete markers
509
518
510 Markers can be accessed with two mappings:
519 Markers can be accessed with two mappings:
511 - predecessors[x] -> set(markers on predecessors edges of x)
520 - predecessors[x] -> set(markers on predecessors edges of x)
512 - successors[x] -> set(markers on successors edges of x)
521 - successors[x] -> set(markers on successors edges of x)
513 - children[x] -> set(markers on predecessors edges of children(x)
522 - children[x] -> set(markers on predecessors edges of children(x)
514 """
523 """
515
524
516 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
525 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
517 # prec: nodeid, predecessors changesets
526 # prec: nodeid, predecessors changesets
518 # succs: tuple of nodeid, successor changesets (0-N length)
527 # succs: tuple of nodeid, successor changesets (0-N length)
519 # flag: integer, flag field carrying modifier for the markers (see doc)
528 # flag: integer, flag field carrying modifier for the markers (see doc)
520 # meta: binary blob, encoded metadata dictionary
529 # meta: binary blob, encoded metadata dictionary
521 # date: (float, int) tuple, date of marker creation
530 # date: (float, int) tuple, date of marker creation
522 # parents: (tuple of nodeid) or None, parents of predecessors
531 # parents: (tuple of nodeid) or None, parents of predecessors
523 # None is used when no data has been recorded
532 # None is used when no data has been recorded
524
533
525 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
534 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
526 # caches for various obsolescence related cache
535 # caches for various obsolescence related cache
527 self.caches = {}
536 self.caches = {}
528 self.svfs = svfs
537 self.svfs = svfs
529 self._defaultformat = defaultformat
538 self._defaultformat = defaultformat
530 self._readonly = readonly
539 self._readonly = readonly
531
540
532 def __iter__(self):
541 def __iter__(self):
533 return iter(self._all)
542 return iter(self._all)
534
543
535 def __len__(self):
544 def __len__(self):
536 return len(self._all)
545 return len(self._all)
537
546
538 def __nonzero__(self):
547 def __nonzero__(self):
539 if not self._cached(r'_all'):
548 if not self._cached(r'_all'):
540 try:
549 try:
541 return self.svfs.stat('obsstore').st_size > 1
550 return self.svfs.stat('obsstore').st_size > 1
542 except OSError as inst:
551 except OSError as inst:
543 if inst.errno != errno.ENOENT:
552 if inst.errno != errno.ENOENT:
544 raise
553 raise
545 # just build an empty _all list if no obsstore exists, which
554 # just build an empty _all list if no obsstore exists, which
546 # avoids further stat() syscalls
555 # avoids further stat() syscalls
547 return bool(self._all)
556 return bool(self._all)
548
557
549 __bool__ = __nonzero__
558 __bool__ = __nonzero__
550
559
551 @property
560 @property
552 def readonly(self):
561 def readonly(self):
553 """True if marker creation is disabled
562 """True if marker creation is disabled
554
563
555 Remove me in the future when obsolete marker is always on."""
564 Remove me in the future when obsolete marker is always on."""
556 return self._readonly
565 return self._readonly
557
566
558 def create(self, transaction, prec, succs=(), flag=0, parents=None,
567 def create(self, transaction, prec, succs=(), flag=0, parents=None,
559 date=None, metadata=None, ui=None):
568 date=None, metadata=None, ui=None):
560 """obsolete: add a new obsolete marker
569 """obsolete: add a new obsolete marker
561
570
562 * ensuring it is hashable
571 * ensuring it is hashable
563 * check mandatory metadata
572 * check mandatory metadata
564 * encode metadata
573 * encode metadata
565
574
566 If you are a human writing code creating marker you want to use the
575 If you are a human writing code creating marker you want to use the
567 `createmarkers` function in this module instead.
576 `createmarkers` function in this module instead.
568
577
569 return True if a new marker have been added, False if the markers
578 return True if a new marker have been added, False if the markers
570 already existed (no op).
579 already existed (no op).
571 """
580 """
572 if metadata is None:
581 if metadata is None:
573 metadata = {}
582 metadata = {}
574 if date is None:
583 if date is None:
575 if 'date' in metadata:
584 if 'date' in metadata:
576 # as a courtesy for out-of-tree extensions
585 # as a courtesy for out-of-tree extensions
577 date = dateutil.parsedate(metadata.pop('date'))
586 date = dateutil.parsedate(metadata.pop('date'))
578 elif ui is not None:
587 elif ui is not None:
579 date = ui.configdate('devel', 'default-date')
588 date = ui.configdate('devel', 'default-date')
580 if date is None:
589 if date is None:
581 date = dateutil.makedate()
590 date = dateutil.makedate()
582 else:
591 else:
583 date = dateutil.makedate()
592 date = dateutil.makedate()
584 if len(prec) != 20:
593 if len(prec) != 20:
585 raise ValueError(prec)
594 raise ValueError(prec)
586 for succ in succs:
595 for succ in succs:
587 if len(succ) != 20:
596 if len(succ) != 20:
588 raise ValueError(succ)
597 raise ValueError(succ)
589 if prec in succs:
598 if prec in succs:
590 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
599 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
591
600
592 metadata = tuple(sorted(metadata.iteritems()))
601 metadata = tuple(sorted(metadata.iteritems()))
593
602
594 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
603 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
595 return bool(self.add(transaction, [marker]))
604 return bool(self.add(transaction, [marker]))
596
605
597 def add(self, transaction, markers):
606 def add(self, transaction, markers):
598 """Add new markers to the store
607 """Add new markers to the store
599
608
600 Take care of filtering duplicate.
609 Take care of filtering duplicate.
601 Return the number of new marker."""
610 Return the number of new marker."""
602 if self._readonly:
611 if self._readonly:
603 raise error.Abort(_('creating obsolete markers is not enabled on '
612 raise error.Abort(_('creating obsolete markers is not enabled on '
604 'this repo'))
613 'this repo'))
605 known = set()
614 known = set()
606 getsuccessors = self.successors.get
615 getsuccessors = self.successors.get
607 new = []
616 new = []
608 for m in markers:
617 for m in markers:
609 if m not in getsuccessors(m[0], ()) and m not in known:
618 if m not in getsuccessors(m[0], ()) and m not in known:
610 known.add(m)
619 known.add(m)
611 new.append(m)
620 new.append(m)
612 if new:
621 if new:
613 f = self.svfs('obsstore', 'ab')
622 f = self.svfs('obsstore', 'ab')
614 try:
623 try:
615 offset = f.tell()
624 offset = f.tell()
616 transaction.add('obsstore', offset)
625 transaction.add('obsstore', offset)
617 # offset == 0: new file - add the version header
626 # offset == 0: new file - add the version header
618 data = b''.join(encodemarkers(new, offset == 0, self._version))
627 data = b''.join(encodemarkers(new, offset == 0, self._version))
619 f.write(data)
628 f.write(data)
620 finally:
629 finally:
621 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
630 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
622 # call 'filecacheentry.refresh()' here
631 # call 'filecacheentry.refresh()' here
623 f.close()
632 f.close()
624 addedmarkers = transaction.changes.get('obsmarkers')
633 addedmarkers = transaction.changes.get('obsmarkers')
625 if addedmarkers is not None:
634 if addedmarkers is not None:
626 addedmarkers.update(new)
635 addedmarkers.update(new)
627 self._addmarkers(new, data)
636 self._addmarkers(new, data)
628 # new marker *may* have changed several set. invalidate the cache.
637 # new marker *may* have changed several set. invalidate the cache.
629 self.caches.clear()
638 self.caches.clear()
630 # records the number of new markers for the transaction hooks
639 # records the number of new markers for the transaction hooks
631 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
640 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
632 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
641 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
633 return len(new)
642 return len(new)
634
643
635 def mergemarkers(self, transaction, data):
644 def mergemarkers(self, transaction, data):
636 """merge a binary stream of markers inside the obsstore
645 """merge a binary stream of markers inside the obsstore
637
646
638 Returns the number of new markers added."""
647 Returns the number of new markers added."""
639 version, markers = _readmarkers(data)
648 version, markers = _readmarkers(data)
640 return self.add(transaction, markers)
649 return self.add(transaction, markers)
641
650
642 @propertycache
651 @propertycache
643 def _data(self):
652 def _data(self):
644 return self.svfs.tryread('obsstore')
653 return self.svfs.tryread('obsstore')
645
654
646 @propertycache
655 @propertycache
647 def _version(self):
656 def _version(self):
648 if len(self._data) >= 1:
657 if len(self._data) >= 1:
649 return _readmarkerversion(self._data)
658 return _readmarkerversion(self._data)
650 else:
659 else:
651 return self._defaultformat
660 return self._defaultformat
652
661
653 @propertycache
662 @propertycache
654 def _all(self):
663 def _all(self):
655 data = self._data
664 data = self._data
656 if not data:
665 if not data:
657 return []
666 return []
658 self._version, markers = _readmarkers(data)
667 self._version, markers = _readmarkers(data)
659 markers = list(markers)
668 markers = list(markers)
660 _checkinvalidmarkers(markers)
669 _checkinvalidmarkers(markers)
661 return markers
670 return markers
662
671
663 @propertycache
672 @propertycache
664 def successors(self):
673 def successors(self):
665 successors = {}
674 successors = {}
666 _addsuccessors(successors, self._all)
675 _addsuccessors(successors, self._all)
667 return successors
676 return successors
668
677
669 @propertycache
678 @propertycache
670 def predecessors(self):
679 def predecessors(self):
671 predecessors = {}
680 predecessors = {}
672 _addpredecessors(predecessors, self._all)
681 _addpredecessors(predecessors, self._all)
673 return predecessors
682 return predecessors
674
683
675 @propertycache
684 @propertycache
676 def children(self):
685 def children(self):
677 children = {}
686 children = {}
678 _addchildren(children, self._all)
687 _addchildren(children, self._all)
679 return children
688 return children
680
689
681 def _cached(self, attr):
690 def _cached(self, attr):
682 return attr in self.__dict__
691 return attr in self.__dict__
683
692
684 def _addmarkers(self, markers, rawdata):
693 def _addmarkers(self, markers, rawdata):
685 markers = list(markers) # to allow repeated iteration
694 markers = list(markers) # to allow repeated iteration
686 self._data = self._data + rawdata
695 self._data = self._data + rawdata
687 self._all.extend(markers)
696 self._all.extend(markers)
688 if self._cached(r'successors'):
697 if self._cached(r'successors'):
689 _addsuccessors(self.successors, markers)
698 _addsuccessors(self.successors, markers)
690 if self._cached(r'predecessors'):
699 if self._cached(r'predecessors'):
691 _addpredecessors(self.predecessors, markers)
700 _addpredecessors(self.predecessors, markers)
692 if self._cached(r'children'):
701 if self._cached(r'children'):
693 _addchildren(self.children, markers)
702 _addchildren(self.children, markers)
694 _checkinvalidmarkers(markers)
703 _checkinvalidmarkers(markers)
695
704
696 def relevantmarkers(self, nodes):
705 def relevantmarkers(self, nodes):
697 """return a set of all obsolescence markers relevant to a set of nodes.
706 """return a set of all obsolescence markers relevant to a set of nodes.
698
707
699 "relevant" to a set of nodes mean:
708 "relevant" to a set of nodes mean:
700
709
701 - marker that use this changeset as successor
710 - marker that use this changeset as successor
702 - prune marker of direct children on this changeset
711 - prune marker of direct children on this changeset
703 - recursive application of the two rules on predecessors of these
712 - recursive application of the two rules on predecessors of these
704 markers
713 markers
705
714
706 It is a set so you cannot rely on order."""
715 It is a set so you cannot rely on order."""
707
716
708 pendingnodes = set(nodes)
717 pendingnodes = set(nodes)
709 seenmarkers = set()
718 seenmarkers = set()
710 seennodes = set(pendingnodes)
719 seennodes = set(pendingnodes)
711 precursorsmarkers = self.predecessors
720 precursorsmarkers = self.predecessors
712 succsmarkers = self.successors
721 succsmarkers = self.successors
713 children = self.children
722 children = self.children
714 while pendingnodes:
723 while pendingnodes:
715 direct = set()
724 direct = set()
716 for current in pendingnodes:
725 for current in pendingnodes:
717 direct.update(precursorsmarkers.get(current, ()))
726 direct.update(precursorsmarkers.get(current, ()))
718 pruned = [m for m in children.get(current, ()) if not m[1]]
727 pruned = [m for m in children.get(current, ()) if not m[1]]
719 direct.update(pruned)
728 direct.update(pruned)
720 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
729 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
721 direct.update(pruned)
730 direct.update(pruned)
722 direct -= seenmarkers
731 direct -= seenmarkers
723 pendingnodes = set([m[0] for m in direct])
732 pendingnodes = set([m[0] for m in direct])
724 seenmarkers |= direct
733 seenmarkers |= direct
725 pendingnodes -= seennodes
734 pendingnodes -= seennodes
726 seennodes |= pendingnodes
735 seennodes |= pendingnodes
727 return seenmarkers
736 return seenmarkers
728
737
729 def makestore(ui, repo):
738 def makestore(ui, repo):
730 """Create an obsstore instance from a repo."""
739 """Create an obsstore instance from a repo."""
731 # read default format for new obsstore.
740 # read default format for new obsstore.
732 # developer config: format.obsstore-version
741 # developer config: format.obsstore-version
733 defaultformat = ui.configint('format', 'obsstore-version')
742 defaultformat = ui.configint('format', 'obsstore-version')
734 # rely on obsstore class default when possible.
743 # rely on obsstore class default when possible.
735 kwargs = {}
744 kwargs = {}
736 if defaultformat is not None:
745 if defaultformat is not None:
737 kwargs[r'defaultformat'] = defaultformat
746 kwargs[r'defaultformat'] = defaultformat
738 readonly = not isenabled(repo, createmarkersopt)
747 readonly = not isenabled(repo, createmarkersopt)
739 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
748 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
740 if store and readonly:
749 if store and readonly:
741 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
750 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
742 % len(list(store)))
751 % len(list(store)))
743 return store
752 return store
744
753
745 def commonversion(versions):
754 def commonversion(versions):
746 """Return the newest version listed in both versions and our local formats.
755 """Return the newest version listed in both versions and our local formats.
747
756
748 Returns None if no common version exists.
757 Returns None if no common version exists.
749 """
758 """
750 versions.sort(reverse=True)
759 versions.sort(reverse=True)
751 # search for highest version known on both side
760 # search for highest version known on both side
752 for v in versions:
761 for v in versions:
753 if v in formats:
762 if v in formats:
754 return v
763 return v
755 return None
764 return None
756
765
757 # arbitrary picked to fit into 8K limit from HTTP server
766 # arbitrary picked to fit into 8K limit from HTTP server
758 # you have to take in account:
767 # you have to take in account:
759 # - the version header
768 # - the version header
760 # - the base85 encoding
769 # - the base85 encoding
761 _maxpayload = 5300
770 _maxpayload = 5300
762
771
763 def _pushkeyescape(markers):
772 def _pushkeyescape(markers):
764 """encode markers into a dict suitable for pushkey exchange
773 """encode markers into a dict suitable for pushkey exchange
765
774
766 - binary data is base85 encoded
775 - binary data is base85 encoded
767 - split in chunks smaller than 5300 bytes"""
776 - split in chunks smaller than 5300 bytes"""
768 keys = {}
777 keys = {}
769 parts = []
778 parts = []
770 currentlen = _maxpayload * 2 # ensure we create a new part
779 currentlen = _maxpayload * 2 # ensure we create a new part
771 for marker in markers:
780 for marker in markers:
772 nextdata = _fm0encodeonemarker(marker)
781 nextdata = _fm0encodeonemarker(marker)
773 if (len(nextdata) + currentlen > _maxpayload):
782 if (len(nextdata) + currentlen > _maxpayload):
774 currentpart = []
783 currentpart = []
775 currentlen = 0
784 currentlen = 0
776 parts.append(currentpart)
785 parts.append(currentpart)
777 currentpart.append(nextdata)
786 currentpart.append(nextdata)
778 currentlen += len(nextdata)
787 currentlen += len(nextdata)
779 for idx, part in enumerate(reversed(parts)):
788 for idx, part in enumerate(reversed(parts)):
780 data = ''.join([_pack('>B', _fm0version)] + part)
789 data = ''.join([_pack('>B', _fm0version)] + part)
781 keys['dump%i' % idx] = util.b85encode(data)
790 keys['dump%i' % idx] = util.b85encode(data)
782 return keys
791 return keys
783
792
784 def listmarkers(repo):
793 def listmarkers(repo):
785 """List markers over pushkey"""
794 """List markers over pushkey"""
786 if not repo.obsstore:
795 if not repo.obsstore:
787 return {}
796 return {}
788 return _pushkeyescape(sorted(repo.obsstore))
797 return _pushkeyescape(sorted(repo.obsstore))
789
798
790 def pushmarker(repo, key, old, new):
799 def pushmarker(repo, key, old, new):
791 """Push markers over pushkey"""
800 """Push markers over pushkey"""
792 if not key.startswith('dump'):
801 if not key.startswith('dump'):
793 repo.ui.warn(_('unknown key: %r') % key)
802 repo.ui.warn(_('unknown key: %r') % key)
794 return False
803 return False
795 if old:
804 if old:
796 repo.ui.warn(_('unexpected old value for %r') % key)
805 repo.ui.warn(_('unexpected old value for %r') % key)
797 return False
806 return False
798 data = util.b85decode(new)
807 data = util.b85decode(new)
799 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
808 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
800 repo.obsstore.mergemarkers(tr, data)
809 repo.obsstore.mergemarkers(tr, data)
801 repo.invalidatevolatilesets()
810 repo.invalidatevolatilesets()
802 return True
811 return True
803
812
804 # mapping of 'set-name' -> <function to compute this set>
813 # mapping of 'set-name' -> <function to compute this set>
805 cachefuncs = {}
814 cachefuncs = {}
806 def cachefor(name):
815 def cachefor(name):
807 """Decorator to register a function as computing the cache for a set"""
816 """Decorator to register a function as computing the cache for a set"""
808 def decorator(func):
817 def decorator(func):
809 if name in cachefuncs:
818 if name in cachefuncs:
810 msg = "duplicated registration for volatileset '%s' (existing: %r)"
819 msg = "duplicated registration for volatileset '%s' (existing: %r)"
811 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
820 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
812 cachefuncs[name] = func
821 cachefuncs[name] = func
813 return func
822 return func
814 return decorator
823 return decorator
815
824
816 def getrevs(repo, name):
825 def getrevs(repo, name):
817 """Return the set of revision that belong to the <name> set
826 """Return the set of revision that belong to the <name> set
818
827
819 Such access may compute the set and cache it for future use"""
828 Such access may compute the set and cache it for future use"""
820 repo = repo.unfiltered()
829 repo = repo.unfiltered()
821 if not repo.obsstore:
830 if not repo.obsstore:
822 return frozenset()
831 return frozenset()
823 if name not in repo.obsstore.caches:
832 if name not in repo.obsstore.caches:
824 repo.obsstore.caches[name] = cachefuncs[name](repo)
833 repo.obsstore.caches[name] = cachefuncs[name](repo)
825 return repo.obsstore.caches[name]
834 return repo.obsstore.caches[name]
826
835
827 # To be simple we need to invalidate obsolescence cache when:
836 # To be simple we need to invalidate obsolescence cache when:
828 #
837 #
829 # - new changeset is added:
838 # - new changeset is added:
830 # - public phase is changed
839 # - public phase is changed
831 # - obsolescence marker are added
840 # - obsolescence marker are added
832 # - strip is used a repo
841 # - strip is used a repo
833 def clearobscaches(repo):
842 def clearobscaches(repo):
834 """Remove all obsolescence related cache from a repo
843 """Remove all obsolescence related cache from a repo
835
844
836 This remove all cache in obsstore is the obsstore already exist on the
845 This remove all cache in obsstore is the obsstore already exist on the
837 repo.
846 repo.
838
847
839 (We could be smarter here given the exact event that trigger the cache
848 (We could be smarter here given the exact event that trigger the cache
840 clearing)"""
849 clearing)"""
841 # only clear cache is there is obsstore data in this repo
850 # only clear cache is there is obsstore data in this repo
842 if 'obsstore' in repo._filecache:
851 if 'obsstore' in repo._filecache:
843 repo.obsstore.caches.clear()
852 repo.obsstore.caches.clear()
844
853
845 def _mutablerevs(repo):
854 def _mutablerevs(repo):
846 """the set of mutable revision in the repository"""
855 """the set of mutable revision in the repository"""
847 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
856 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
848
857
849 @cachefor('obsolete')
858 @cachefor('obsolete')
850 def _computeobsoleteset(repo):
859 def _computeobsoleteset(repo):
851 """the set of obsolete revisions"""
860 """the set of obsolete revisions"""
852 getnode = repo.changelog.node
861 getnode = repo.changelog.node
853 notpublic = _mutablerevs(repo)
862 notpublic = _mutablerevs(repo)
854 isobs = repo.obsstore.successors.__contains__
863 isobs = repo.obsstore.successors.__contains__
855 obs = set(r for r in notpublic if isobs(getnode(r)))
864 obs = set(r for r in notpublic if isobs(getnode(r)))
856 return obs
865 return obs
857
866
858 @cachefor('orphan')
867 @cachefor('orphan')
859 def _computeorphanset(repo):
868 def _computeorphanset(repo):
860 """the set of non obsolete revisions with obsolete parents"""
869 """the set of non obsolete revisions with obsolete parents"""
861 pfunc = repo.changelog.parentrevs
870 pfunc = repo.changelog.parentrevs
862 mutable = _mutablerevs(repo)
871 mutable = _mutablerevs(repo)
863 obsolete = getrevs(repo, 'obsolete')
872 obsolete = getrevs(repo, 'obsolete')
864 others = mutable - obsolete
873 others = mutable - obsolete
865 unstable = set()
874 unstable = set()
866 for r in sorted(others):
875 for r in sorted(others):
867 # A rev is unstable if one of its parent is obsolete or unstable
876 # A rev is unstable if one of its parent is obsolete or unstable
868 # this works since we traverse following growing rev order
877 # this works since we traverse following growing rev order
869 for p in pfunc(r):
878 for p in pfunc(r):
870 if p in obsolete or p in unstable:
879 if p in obsolete or p in unstable:
871 unstable.add(r)
880 unstable.add(r)
872 break
881 break
873 return unstable
882 return unstable
874
883
875 @cachefor('suspended')
884 @cachefor('suspended')
876 def _computesuspendedset(repo):
885 def _computesuspendedset(repo):
877 """the set of obsolete parents with non obsolete descendants"""
886 """the set of obsolete parents with non obsolete descendants"""
878 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
887 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
879 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
888 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
880
889
881 @cachefor('extinct')
890 @cachefor('extinct')
882 def _computeextinctset(repo):
891 def _computeextinctset(repo):
883 """the set of obsolete parents without non obsolete descendants"""
892 """the set of obsolete parents without non obsolete descendants"""
884 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
893 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
885
894
886 @cachefor('phasedivergent')
895 @cachefor('phasedivergent')
887 def _computephasedivergentset(repo):
896 def _computephasedivergentset(repo):
888 """the set of revs trying to obsolete public revisions"""
897 """the set of revs trying to obsolete public revisions"""
889 bumped = set()
898 bumped = set()
890 # util function (avoid attribute lookup in the loop)
899 # util function (avoid attribute lookup in the loop)
891 phase = repo._phasecache.phase # would be faster to grab the full list
900 phase = repo._phasecache.phase # would be faster to grab the full list
892 public = phases.public
901 public = phases.public
893 cl = repo.changelog
902 cl = repo.changelog
894 torev = cl.nodemap.get
903 torev = cl.nodemap.get
895 tonode = cl.node
904 tonode = cl.node
896 for rev in repo.revs('(not public()) and (not obsolete())'):
905 for rev in repo.revs('(not public()) and (not obsolete())'):
897 # We only evaluate mutable, non-obsolete revision
906 # We only evaluate mutable, non-obsolete revision
898 node = tonode(rev)
907 node = tonode(rev)
899 # (future) A cache of predecessors may worth if split is very common
908 # (future) A cache of predecessors may worth if split is very common
900 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
909 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
901 ignoreflags=bumpedfix):
910 ignoreflags=bumpedfix):
902 prev = torev(pnode) # unfiltered! but so is phasecache
911 prev = torev(pnode) # unfiltered! but so is phasecache
903 if (prev is not None) and (phase(repo, prev) <= public):
912 if (prev is not None) and (phase(repo, prev) <= public):
904 # we have a public predecessor
913 # we have a public predecessor
905 bumped.add(rev)
914 bumped.add(rev)
906 break # Next draft!
915 break # Next draft!
907 return bumped
916 return bumped
908
917
909 @cachefor('contentdivergent')
918 @cachefor('contentdivergent')
910 def _computecontentdivergentset(repo):
919 def _computecontentdivergentset(repo):
911 """the set of rev that compete to be the final successors of some revision.
920 """the set of rev that compete to be the final successors of some revision.
912 """
921 """
913 divergent = set()
922 divergent = set()
914 obsstore = repo.obsstore
923 obsstore = repo.obsstore
915 newermap = {}
924 newermap = {}
916 tonode = repo.changelog.node
925 tonode = repo.changelog.node
917 for rev in repo.revs('(not public()) - obsolete()'):
926 for rev in repo.revs('(not public()) - obsolete()'):
918 node = tonode(rev)
927 node = tonode(rev)
919 mark = obsstore.predecessors.get(node, ())
928 mark = obsstore.predecessors.get(node, ())
920 toprocess = set(mark)
929 toprocess = set(mark)
921 seen = set()
930 seen = set()
922 while toprocess:
931 while toprocess:
923 prec = toprocess.pop()[0]
932 prec = toprocess.pop()[0]
924 if prec in seen:
933 if prec in seen:
925 continue # emergency cycle hanging prevention
934 continue # emergency cycle hanging prevention
926 seen.add(prec)
935 seen.add(prec)
927 if prec not in newermap:
936 if prec not in newermap:
928 obsutil.successorssets(repo, prec, cache=newermap)
937 obsutil.successorssets(repo, prec, cache=newermap)
929 newer = [n for n in newermap[prec] if n]
938 newer = [n for n in newermap[prec] if n]
930 if len(newer) > 1:
939 if len(newer) > 1:
931 divergent.add(rev)
940 divergent.add(rev)
932 break
941 break
933 toprocess.update(obsstore.predecessors.get(prec, ()))
942 toprocess.update(obsstore.predecessors.get(prec, ()))
934 return divergent
943 return divergent
935
944
936
945
937 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
946 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
938 operation=None):
947 operation=None):
939 """Add obsolete markers between changesets in a repo
948 """Add obsolete markers between changesets in a repo
940
949
941 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
950 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
942 tuple. `old` and `news` are changectx. metadata is an optional dictionary
951 tuple. `old` and `news` are changectx. metadata is an optional dictionary
943 containing metadata for this marker only. It is merged with the global
952 containing metadata for this marker only. It is merged with the global
944 metadata specified through the `metadata` argument of this function,
953 metadata specified through the `metadata` argument of this function,
945
954
946 Trying to obsolete a public changeset will raise an exception.
955 Trying to obsolete a public changeset will raise an exception.
947
956
948 Current user and date are used except if specified otherwise in the
957 Current user and date are used except if specified otherwise in the
949 metadata attribute.
958 metadata attribute.
950
959
951 This function operates within a transaction of its own, but does
960 This function operates within a transaction of its own, but does
952 not take any lock on the repo.
961 not take any lock on the repo.
953 """
962 """
954 # prepare metadata
963 # prepare metadata
955 if metadata is None:
964 if metadata is None:
956 metadata = {}
965 metadata = {}
957 if 'user' not in metadata:
966 if 'user' not in metadata:
958 develuser = repo.ui.config('devel', 'user.obsmarker')
967 develuser = repo.ui.config('devel', 'user.obsmarker')
959 if develuser:
968 if develuser:
960 metadata['user'] = develuser
969 metadata['user'] = develuser
961 else:
970 else:
962 metadata['user'] = repo.ui.username()
971 metadata['user'] = repo.ui.username()
963
972
964 # Operation metadata handling
973 # Operation metadata handling
965 useoperation = repo.ui.configbool('experimental',
974 useoperation = repo.ui.configbool('experimental',
966 'evolution.track-operation')
975 'evolution.track-operation')
967 if useoperation and operation:
976 if useoperation and operation:
968 metadata['operation'] = operation
977 metadata['operation'] = operation
969
978
970 # Effect flag metadata handling
979 # Effect flag metadata handling
971 saveeffectflag = repo.ui.configbool('experimental',
980 saveeffectflag = repo.ui.configbool('experimental',
972 'evolution.effect-flags')
981 'evolution.effect-flags')
973
982
974 with repo.transaction('add-obsolescence-marker') as tr:
983 with repo.transaction('add-obsolescence-marker') as tr:
975 markerargs = []
984 markerargs = []
976 for rel in relations:
985 for rel in relations:
977 prec = rel[0]
986 prec = rel[0]
978 sucs = rel[1]
987 sucs = rel[1]
979 localmetadata = metadata.copy()
988 localmetadata = metadata.copy()
980 if 2 < len(rel):
989 if 2 < len(rel):
981 localmetadata.update(rel[2])
990 localmetadata.update(rel[2])
982
991
983 if not prec.mutable():
992 if not prec.mutable():
984 raise error.Abort(_("cannot obsolete public changeset: %s")
993 raise error.Abort(_("cannot obsolete public changeset: %s")
985 % prec,
994 % prec,
986 hint="see 'hg help phases' for details")
995 hint="see 'hg help phases' for details")
987 nprec = prec.node()
996 nprec = prec.node()
988 nsucs = tuple(s.node() for s in sucs)
997 nsucs = tuple(s.node() for s in sucs)
989 npare = None
998 npare = None
990 if not nsucs:
999 if not nsucs:
991 npare = tuple(p.node() for p in prec.parents())
1000 npare = tuple(p.node() for p in prec.parents())
992 if nprec in nsucs:
1001 if nprec in nsucs:
993 raise error.Abort(_("changeset %s cannot obsolete itself")
1002 raise error.Abort(_("changeset %s cannot obsolete itself")
994 % prec)
1003 % prec)
995
1004
996 # Effect flag can be different by relation
1005 # Effect flag can be different by relation
997 if saveeffectflag:
1006 if saveeffectflag:
998 # The effect flag is saved in a versioned field name for future
1007 # The effect flag is saved in a versioned field name for future
999 # evolution
1008 # evolution
1000 effectflag = obsutil.geteffectflag(rel)
1009 effectflag = obsutil.geteffectflag(rel)
1001 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1010 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1002
1011
1003 # Creating the marker causes the hidden cache to become invalid,
1012 # Creating the marker causes the hidden cache to become invalid,
1004 # which causes recomputation when we ask for prec.parents() above.
1013 # which causes recomputation when we ask for prec.parents() above.
1005 # Resulting in n^2 behavior. So let's prepare all of the args
1014 # Resulting in n^2 behavior. So let's prepare all of the args
1006 # first, then create the markers.
1015 # first, then create the markers.
1007 markerargs.append((nprec, nsucs, npare, localmetadata))
1016 markerargs.append((nprec, nsucs, npare, localmetadata))
1008
1017
1009 for args in markerargs:
1018 for args in markerargs:
1010 nprec, nsucs, npare, localmetadata = args
1019 nprec, nsucs, npare, localmetadata = args
1011 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1020 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1012 date=date, metadata=localmetadata,
1021 date=date, metadata=localmetadata,
1013 ui=repo.ui)
1022 ui=repo.ui)
1014 repo.filteredrevcache.clear()
1023 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now