##// END OF EJS Templates
obsolete: add a note that explains creating aliases for marker flags
av6 -
r37035:98c14e85 default
parent child Browse files
Show More
@@ -1,1012 +1,1014 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 obsutil,
79 obsutil,
80 phases,
80 phases,
81 policy,
81 policy,
82 util,
82 util,
83 )
83 )
84 from .utils import dateutil
84 from .utils import dateutil
85
85
86 parsers = policy.importmod(r'parsers')
86 parsers = policy.importmod(r'parsers')
87
87
88 _pack = struct.pack
88 _pack = struct.pack
89 _unpack = struct.unpack
89 _unpack = struct.unpack
90 _calcsize = struct.calcsize
90 _calcsize = struct.calcsize
91 propertycache = util.propertycache
91 propertycache = util.propertycache
92
92
93 # the obsolete feature is not mature enough to be enabled by default.
93 # the obsolete feature is not mature enough to be enabled by default.
94 # you have to rely on third party extension extension to enable this.
94 # you have to rely on third party extension extension to enable this.
95 _enabled = False
95 _enabled = False
96
96
97 # Options for obsolescence
97 # Options for obsolescence
98 createmarkersopt = 'createmarkers'
98 createmarkersopt = 'createmarkers'
99 allowunstableopt = 'allowunstable'
99 allowunstableopt = 'allowunstable'
100 exchangeopt = 'exchange'
100 exchangeopt = 'exchange'
101
101
102 def _getoptionvalue(repo, option):
102 def _getoptionvalue(repo, option):
103 """Returns True if the given repository has the given obsolete option
103 """Returns True if the given repository has the given obsolete option
104 enabled.
104 enabled.
105 """
105 """
106 configkey = 'evolution.%s' % option
106 configkey = 'evolution.%s' % option
107 newconfig = repo.ui.configbool('experimental', configkey)
107 newconfig = repo.ui.configbool('experimental', configkey)
108
108
109 # Return the value only if defined
109 # Return the value only if defined
110 if newconfig is not None:
110 if newconfig is not None:
111 return newconfig
111 return newconfig
112
112
113 # Fallback on generic option
113 # Fallback on generic option
114 try:
114 try:
115 return repo.ui.configbool('experimental', 'evolution')
115 return repo.ui.configbool('experimental', 'evolution')
116 except (error.ConfigError, AttributeError):
116 except (error.ConfigError, AttributeError):
117 # Fallback on old-fashion config
117 # Fallback on old-fashion config
118 # inconsistent config: experimental.evolution
118 # inconsistent config: experimental.evolution
119 result = set(repo.ui.configlist('experimental', 'evolution'))
119 result = set(repo.ui.configlist('experimental', 'evolution'))
120
120
121 if 'all' in result:
121 if 'all' in result:
122 return True
122 return True
123
123
124 # For migration purposes, temporarily return true if the config hasn't
124 # For migration purposes, temporarily return true if the config hasn't
125 # been set but _enabled is true.
125 # been set but _enabled is true.
126 if len(result) == 0 and _enabled:
126 if len(result) == 0 and _enabled:
127 return True
127 return True
128
128
129 # Temporary hack for next check
129 # Temporary hack for next check
130 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
130 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
131 if newconfig:
131 if newconfig:
132 result.add('createmarkers')
132 result.add('createmarkers')
133
133
134 return option in result
134 return option in result
135
135
136 def isenabled(repo, option):
136 def isenabled(repo, option):
137 """Returns True if the given repository has the given obsolete option
137 """Returns True if the given repository has the given obsolete option
138 enabled.
138 enabled.
139 """
139 """
140 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
140 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
141 unstabluevalue = _getoptionvalue(repo, allowunstableopt)
141 unstabluevalue = _getoptionvalue(repo, allowunstableopt)
142 exchangevalue = _getoptionvalue(repo, exchangeopt)
142 exchangevalue = _getoptionvalue(repo, exchangeopt)
143
143
144 # createmarkers must be enabled if other options are enabled
144 # createmarkers must be enabled if other options are enabled
145 if ((unstabluevalue or exchangevalue) and not createmarkersvalue):
145 if ((unstabluevalue or exchangevalue) and not createmarkersvalue):
146 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
146 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
147 "if other obsolete options are enabled"))
147 "if other obsolete options are enabled"))
148
148
149 return _getoptionvalue(repo, option)
149 return _getoptionvalue(repo, option)
150
150
151 # Creating aliases for marker flags because evolve extension looks for
152 # bumpedfix in obsolete.py
151 bumpedfix = obsutil.bumpedfix
153 bumpedfix = obsutil.bumpedfix
152 usingsha256 = obsutil.usingsha256
154 usingsha256 = obsutil.usingsha256
153
155
154 ## Parsing and writing of version "0"
156 ## Parsing and writing of version "0"
155 #
157 #
156 # The header is followed by the markers. Each marker is made of:
158 # The header is followed by the markers. Each marker is made of:
157 #
159 #
158 # - 1 uint8 : number of new changesets "N", can be zero.
160 # - 1 uint8 : number of new changesets "N", can be zero.
159 #
161 #
160 # - 1 uint32: metadata size "M" in bytes.
162 # - 1 uint32: metadata size "M" in bytes.
161 #
163 #
162 # - 1 byte: a bit field. It is reserved for flags used in common
164 # - 1 byte: a bit field. It is reserved for flags used in common
163 # obsolete marker operations, to avoid repeated decoding of metadata
165 # obsolete marker operations, to avoid repeated decoding of metadata
164 # entries.
166 # entries.
165 #
167 #
166 # - 20 bytes: obsoleted changeset identifier.
168 # - 20 bytes: obsoleted changeset identifier.
167 #
169 #
168 # - N*20 bytes: new changesets identifiers.
170 # - N*20 bytes: new changesets identifiers.
169 #
171 #
170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # string contains a key and a value, separated by a colon ':', without
173 # string contains a key and a value, separated by a colon ':', without
172 # additional encoding. Keys cannot contain '\0' or ':' and values
174 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # cannot contain '\0'.
175 # cannot contain '\0'.
174 _fm0version = 0
176 _fm0version = 0
175 _fm0fixed = '>BIB20s'
177 _fm0fixed = '>BIB20s'
176 _fm0node = '20s'
178 _fm0node = '20s'
177 _fm0fsize = _calcsize(_fm0fixed)
179 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fnodesize = _calcsize(_fm0node)
180 _fm0fnodesize = _calcsize(_fm0node)
179
181
180 def _fm0readmarkers(data, off, stop):
182 def _fm0readmarkers(data, off, stop):
181 # Loop on markers
183 # Loop on markers
182 while off < stop:
184 while off < stop:
183 # read fixed part
185 # read fixed part
184 cur = data[off:off + _fm0fsize]
186 cur = data[off:off + _fm0fsize]
185 off += _fm0fsize
187 off += _fm0fsize
186 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
187 # read replacement
189 # read replacement
188 sucs = ()
190 sucs = ()
189 if numsuc:
191 if numsuc:
190 s = (_fm0fnodesize * numsuc)
192 s = (_fm0fnodesize * numsuc)
191 cur = data[off:off + s]
193 cur = data[off:off + s]
192 sucs = _unpack(_fm0node * numsuc, cur)
194 sucs = _unpack(_fm0node * numsuc, cur)
193 off += s
195 off += s
194 # read metadata
196 # read metadata
195 # (metadata will be decoded on demand)
197 # (metadata will be decoded on demand)
196 metadata = data[off:off + mdsize]
198 metadata = data[off:off + mdsize]
197 if len(metadata) != mdsize:
199 if len(metadata) != mdsize:
198 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 raise error.Abort(_('parsing obsolete marker: metadata is too '
199 'short, %d bytes expected, got %d')
201 'short, %d bytes expected, got %d')
200 % (mdsize, len(metadata)))
202 % (mdsize, len(metadata)))
201 off += mdsize
203 off += mdsize
202 metadata = _fm0decodemeta(metadata)
204 metadata = _fm0decodemeta(metadata)
203 try:
205 try:
204 when, offset = metadata.pop('date', '0 0').split(' ')
206 when, offset = metadata.pop('date', '0 0').split(' ')
205 date = float(when), int(offset)
207 date = float(when), int(offset)
206 except ValueError:
208 except ValueError:
207 date = (0., 0)
209 date = (0., 0)
208 parents = None
210 parents = None
209 if 'p2' in metadata:
211 if 'p2' in metadata:
210 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
211 elif 'p1' in metadata:
213 elif 'p1' in metadata:
212 parents = (metadata.pop('p1', None),)
214 parents = (metadata.pop('p1', None),)
213 elif 'p0' in metadata:
215 elif 'p0' in metadata:
214 parents = ()
216 parents = ()
215 if parents is not None:
217 if parents is not None:
216 try:
218 try:
217 parents = tuple(node.bin(p) for p in parents)
219 parents = tuple(node.bin(p) for p in parents)
218 # if parent content is not a nodeid, drop the data
220 # if parent content is not a nodeid, drop the data
219 for p in parents:
221 for p in parents:
220 if len(p) != 20:
222 if len(p) != 20:
221 parents = None
223 parents = None
222 break
224 break
223 except TypeError:
225 except TypeError:
224 # if content cannot be translated to nodeid drop the data.
226 # if content cannot be translated to nodeid drop the data.
225 parents = None
227 parents = None
226
228
227 metadata = tuple(sorted(metadata.iteritems()))
229 metadata = tuple(sorted(metadata.iteritems()))
228
230
229 yield (pre, sucs, flags, metadata, date, parents)
231 yield (pre, sucs, flags, metadata, date, parents)
230
232
231 def _fm0encodeonemarker(marker):
233 def _fm0encodeonemarker(marker):
232 pre, sucs, flags, metadata, date, parents = marker
234 pre, sucs, flags, metadata, date, parents = marker
233 if flags & usingsha256:
235 if flags & usingsha256:
234 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
235 metadata = dict(metadata)
237 metadata = dict(metadata)
236 time, tz = date
238 time, tz = date
237 metadata['date'] = '%r %i' % (time, tz)
239 metadata['date'] = '%r %i' % (time, tz)
238 if parents is not None:
240 if parents is not None:
239 if not parents:
241 if not parents:
240 # mark that we explicitly recorded no parents
242 # mark that we explicitly recorded no parents
241 metadata['p0'] = ''
243 metadata['p0'] = ''
242 for i, p in enumerate(parents, 1):
244 for i, p in enumerate(parents, 1):
243 metadata['p%i' % i] = node.hex(p)
245 metadata['p%i' % i] = node.hex(p)
244 metadata = _fm0encodemeta(metadata)
246 metadata = _fm0encodemeta(metadata)
245 numsuc = len(sucs)
247 numsuc = len(sucs)
246 format = _fm0fixed + (_fm0node * numsuc)
248 format = _fm0fixed + (_fm0node * numsuc)
247 data = [numsuc, len(metadata), flags, pre]
249 data = [numsuc, len(metadata), flags, pre]
248 data.extend(sucs)
250 data.extend(sucs)
249 return _pack(format, *data) + metadata
251 return _pack(format, *data) + metadata
250
252
251 def _fm0encodemeta(meta):
253 def _fm0encodemeta(meta):
252 """Return encoded metadata string to string mapping.
254 """Return encoded metadata string to string mapping.
253
255
254 Assume no ':' in key and no '\0' in both key and value."""
256 Assume no ':' in key and no '\0' in both key and value."""
255 for key, value in meta.iteritems():
257 for key, value in meta.iteritems():
256 if ':' in key or '\0' in key:
258 if ':' in key or '\0' in key:
257 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 raise ValueError("':' and '\0' are forbidden in metadata key'")
258 if '\0' in value:
260 if '\0' in value:
259 raise ValueError("':' is forbidden in metadata value'")
261 raise ValueError("':' is forbidden in metadata value'")
260 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
261
263
262 def _fm0decodemeta(data):
264 def _fm0decodemeta(data):
263 """Return string to string dictionary from encoded version."""
265 """Return string to string dictionary from encoded version."""
264 d = {}
266 d = {}
265 for l in data.split('\0'):
267 for l in data.split('\0'):
266 if l:
268 if l:
267 key, value = l.split(':')
269 key, value = l.split(':')
268 d[key] = value
270 d[key] = value
269 return d
271 return d
270
272
271 ## Parsing and writing of version "1"
273 ## Parsing and writing of version "1"
272 #
274 #
273 # The header is followed by the markers. Each marker is made of:
275 # The header is followed by the markers. Each marker is made of:
274 #
276 #
275 # - uint32: total size of the marker (including this field)
277 # - uint32: total size of the marker (including this field)
276 #
278 #
277 # - float64: date in seconds since epoch
279 # - float64: date in seconds since epoch
278 #
280 #
279 # - int16: timezone offset in minutes
281 # - int16: timezone offset in minutes
280 #
282 #
281 # - uint16: a bit field. It is reserved for flags used in common
283 # - uint16: a bit field. It is reserved for flags used in common
282 # obsolete marker operations, to avoid repeated decoding of metadata
284 # obsolete marker operations, to avoid repeated decoding of metadata
283 # entries.
285 # entries.
284 #
286 #
285 # - uint8: number of successors "N", can be zero.
287 # - uint8: number of successors "N", can be zero.
286 #
288 #
287 # - uint8: number of parents "P", can be zero.
289 # - uint8: number of parents "P", can be zero.
288 #
290 #
289 # 0: parents data stored but no parent,
291 # 0: parents data stored but no parent,
290 # 1: one parent stored,
292 # 1: one parent stored,
291 # 2: two parents stored,
293 # 2: two parents stored,
292 # 3: no parent data stored
294 # 3: no parent data stored
293 #
295 #
294 # - uint8: number of metadata entries M
296 # - uint8: number of metadata entries M
295 #
297 #
296 # - 20 or 32 bytes: predecessor changeset identifier.
298 # - 20 or 32 bytes: predecessor changeset identifier.
297 #
299 #
298 # - N*(20 or 32) bytes: successors changesets identifiers.
300 # - N*(20 or 32) bytes: successors changesets identifiers.
299 #
301 #
300 # - P*(20 or 32) bytes: parents of the predecessors changesets.
302 # - P*(20 or 32) bytes: parents of the predecessors changesets.
301 #
303 #
302 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 # - M*(uint8, uint8): size of all metadata entries (key and value)
303 #
305 #
304 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 # - remaining bytes: the metadata, each (key, value) pair after the other.
305 _fm1version = 1
307 _fm1version = 1
306 _fm1fixed = '>IdhHBBB20s'
308 _fm1fixed = '>IdhHBBB20s'
307 _fm1nodesha1 = '20s'
309 _fm1nodesha1 = '20s'
308 _fm1nodesha256 = '32s'
310 _fm1nodesha256 = '32s'
309 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
310 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
311 _fm1fsize = _calcsize(_fm1fixed)
313 _fm1fsize = _calcsize(_fm1fixed)
312 _fm1parentnone = 3
314 _fm1parentnone = 3
313 _fm1parentshift = 14
315 _fm1parentshift = 14
314 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
315 _fm1metapair = 'BB'
317 _fm1metapair = 'BB'
316 _fm1metapairsize = _calcsize(_fm1metapair)
318 _fm1metapairsize = _calcsize(_fm1metapair)
317
319
318 def _fm1purereadmarkers(data, off, stop):
320 def _fm1purereadmarkers(data, off, stop):
319 # make some global constants local for performance
321 # make some global constants local for performance
320 noneflag = _fm1parentnone
322 noneflag = _fm1parentnone
321 sha2flag = usingsha256
323 sha2flag = usingsha256
322 sha1size = _fm1nodesha1size
324 sha1size = _fm1nodesha1size
323 sha2size = _fm1nodesha256size
325 sha2size = _fm1nodesha256size
324 sha1fmt = _fm1nodesha1
326 sha1fmt = _fm1nodesha1
325 sha2fmt = _fm1nodesha256
327 sha2fmt = _fm1nodesha256
326 metasize = _fm1metapairsize
328 metasize = _fm1metapairsize
327 metafmt = _fm1metapair
329 metafmt = _fm1metapair
328 fsize = _fm1fsize
330 fsize = _fm1fsize
329 unpack = _unpack
331 unpack = _unpack
330
332
331 # Loop on markers
333 # Loop on markers
332 ufixed = struct.Struct(_fm1fixed).unpack
334 ufixed = struct.Struct(_fm1fixed).unpack
333
335
334 while off < stop:
336 while off < stop:
335 # read fixed part
337 # read fixed part
336 o1 = off + fsize
338 o1 = off + fsize
337 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
339 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
338
340
339 if flags & sha2flag:
341 if flags & sha2flag:
340 # FIXME: prec was read as a SHA1, needs to be amended
342 # FIXME: prec was read as a SHA1, needs to be amended
341
343
342 # read 0 or more successors
344 # read 0 or more successors
343 if numsuc == 1:
345 if numsuc == 1:
344 o2 = o1 + sha2size
346 o2 = o1 + sha2size
345 sucs = (data[o1:o2],)
347 sucs = (data[o1:o2],)
346 else:
348 else:
347 o2 = o1 + sha2size * numsuc
349 o2 = o1 + sha2size * numsuc
348 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
350 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
349
351
350 # read parents
352 # read parents
351 if numpar == noneflag:
353 if numpar == noneflag:
352 o3 = o2
354 o3 = o2
353 parents = None
355 parents = None
354 elif numpar == 1:
356 elif numpar == 1:
355 o3 = o2 + sha2size
357 o3 = o2 + sha2size
356 parents = (data[o2:o3],)
358 parents = (data[o2:o3],)
357 else:
359 else:
358 o3 = o2 + sha2size * numpar
360 o3 = o2 + sha2size * numpar
359 parents = unpack(sha2fmt * numpar, data[o2:o3])
361 parents = unpack(sha2fmt * numpar, data[o2:o3])
360 else:
362 else:
361 # read 0 or more successors
363 # read 0 or more successors
362 if numsuc == 1:
364 if numsuc == 1:
363 o2 = o1 + sha1size
365 o2 = o1 + sha1size
364 sucs = (data[o1:o2],)
366 sucs = (data[o1:o2],)
365 else:
367 else:
366 o2 = o1 + sha1size * numsuc
368 o2 = o1 + sha1size * numsuc
367 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
369 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
368
370
369 # read parents
371 # read parents
370 if numpar == noneflag:
372 if numpar == noneflag:
371 o3 = o2
373 o3 = o2
372 parents = None
374 parents = None
373 elif numpar == 1:
375 elif numpar == 1:
374 o3 = o2 + sha1size
376 o3 = o2 + sha1size
375 parents = (data[o2:o3],)
377 parents = (data[o2:o3],)
376 else:
378 else:
377 o3 = o2 + sha1size * numpar
379 o3 = o2 + sha1size * numpar
378 parents = unpack(sha1fmt * numpar, data[o2:o3])
380 parents = unpack(sha1fmt * numpar, data[o2:o3])
379
381
380 # read metadata
382 # read metadata
381 off = o3 + metasize * nummeta
383 off = o3 + metasize * nummeta
382 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
384 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
383 metadata = []
385 metadata = []
384 for idx in xrange(0, len(metapairsize), 2):
386 for idx in xrange(0, len(metapairsize), 2):
385 o1 = off + metapairsize[idx]
387 o1 = off + metapairsize[idx]
386 o2 = o1 + metapairsize[idx + 1]
388 o2 = o1 + metapairsize[idx + 1]
387 metadata.append((data[off:o1], data[o1:o2]))
389 metadata.append((data[off:o1], data[o1:o2]))
388 off = o2
390 off = o2
389
391
390 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
392 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
391
393
392 def _fm1encodeonemarker(marker):
394 def _fm1encodeonemarker(marker):
393 pre, sucs, flags, metadata, date, parents = marker
395 pre, sucs, flags, metadata, date, parents = marker
394 # determine node size
396 # determine node size
395 _fm1node = _fm1nodesha1
397 _fm1node = _fm1nodesha1
396 if flags & usingsha256:
398 if flags & usingsha256:
397 _fm1node = _fm1nodesha256
399 _fm1node = _fm1nodesha256
398 numsuc = len(sucs)
400 numsuc = len(sucs)
399 numextranodes = numsuc
401 numextranodes = numsuc
400 if parents is None:
402 if parents is None:
401 numpar = _fm1parentnone
403 numpar = _fm1parentnone
402 else:
404 else:
403 numpar = len(parents)
405 numpar = len(parents)
404 numextranodes += numpar
406 numextranodes += numpar
405 formatnodes = _fm1node * numextranodes
407 formatnodes = _fm1node * numextranodes
406 formatmeta = _fm1metapair * len(metadata)
408 formatmeta = _fm1metapair * len(metadata)
407 format = _fm1fixed + formatnodes + formatmeta
409 format = _fm1fixed + formatnodes + formatmeta
408 # tz is stored in minutes so we divide by 60
410 # tz is stored in minutes so we divide by 60
409 tz = date[1]//60
411 tz = date[1]//60
410 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
412 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
411 data.extend(sucs)
413 data.extend(sucs)
412 if parents is not None:
414 if parents is not None:
413 data.extend(parents)
415 data.extend(parents)
414 totalsize = _calcsize(format)
416 totalsize = _calcsize(format)
415 for key, value in metadata:
417 for key, value in metadata:
416 lk = len(key)
418 lk = len(key)
417 lv = len(value)
419 lv = len(value)
418 if lk > 255:
420 if lk > 255:
419 msg = ('obsstore metadata key cannot be longer than 255 bytes'
421 msg = ('obsstore metadata key cannot be longer than 255 bytes'
420 ' (key "%s" is %u bytes)') % (key, lk)
422 ' (key "%s" is %u bytes)') % (key, lk)
421 raise error.ProgrammingError(msg)
423 raise error.ProgrammingError(msg)
422 if lv > 255:
424 if lv > 255:
423 msg = ('obsstore metadata value cannot be longer than 255 bytes'
425 msg = ('obsstore metadata value cannot be longer than 255 bytes'
424 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
426 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
425 raise error.ProgrammingError(msg)
427 raise error.ProgrammingError(msg)
426 data.append(lk)
428 data.append(lk)
427 data.append(lv)
429 data.append(lv)
428 totalsize += lk + lv
430 totalsize += lk + lv
429 data[0] = totalsize
431 data[0] = totalsize
430 data = [_pack(format, *data)]
432 data = [_pack(format, *data)]
431 for key, value in metadata:
433 for key, value in metadata:
432 data.append(key)
434 data.append(key)
433 data.append(value)
435 data.append(value)
434 return ''.join(data)
436 return ''.join(data)
435
437
436 def _fm1readmarkers(data, off, stop):
438 def _fm1readmarkers(data, off, stop):
437 native = getattr(parsers, 'fm1readmarkers', None)
439 native = getattr(parsers, 'fm1readmarkers', None)
438 if not native:
440 if not native:
439 return _fm1purereadmarkers(data, off, stop)
441 return _fm1purereadmarkers(data, off, stop)
440 return native(data, off, stop)
442 return native(data, off, stop)
441
443
442 # mapping to read/write various marker formats
444 # mapping to read/write various marker formats
443 # <version> -> (decoder, encoder)
445 # <version> -> (decoder, encoder)
444 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
446 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
445 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
447 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
446
448
447 def _readmarkerversion(data):
449 def _readmarkerversion(data):
448 return _unpack('>B', data[0:1])[0]
450 return _unpack('>B', data[0:1])[0]
449
451
450 @util.nogc
452 @util.nogc
451 def _readmarkers(data, off=None, stop=None):
453 def _readmarkers(data, off=None, stop=None):
452 """Read and enumerate markers from raw data"""
454 """Read and enumerate markers from raw data"""
453 diskversion = _readmarkerversion(data)
455 diskversion = _readmarkerversion(data)
454 if not off:
456 if not off:
455 off = 1 # skip 1 byte version number
457 off = 1 # skip 1 byte version number
456 if stop is None:
458 if stop is None:
457 stop = len(data)
459 stop = len(data)
458 if diskversion not in formats:
460 if diskversion not in formats:
459 msg = _('parsing obsolete marker: unknown version %r') % diskversion
461 msg = _('parsing obsolete marker: unknown version %r') % diskversion
460 raise error.UnknownVersion(msg, version=diskversion)
462 raise error.UnknownVersion(msg, version=diskversion)
461 return diskversion, formats[diskversion][0](data, off, stop)
463 return diskversion, formats[diskversion][0](data, off, stop)
462
464
463 def encodeheader(version=_fm0version):
465 def encodeheader(version=_fm0version):
464 return _pack('>B', version)
466 return _pack('>B', version)
465
467
466 def encodemarkers(markers, addheader=False, version=_fm0version):
468 def encodemarkers(markers, addheader=False, version=_fm0version):
467 # Kept separate from flushmarkers(), it will be reused for
469 # Kept separate from flushmarkers(), it will be reused for
468 # markers exchange.
470 # markers exchange.
469 encodeone = formats[version][1]
471 encodeone = formats[version][1]
470 if addheader:
472 if addheader:
471 yield encodeheader(version)
473 yield encodeheader(version)
472 for marker in markers:
474 for marker in markers:
473 yield encodeone(marker)
475 yield encodeone(marker)
474
476
475 @util.nogc
477 @util.nogc
476 def _addsuccessors(successors, markers):
478 def _addsuccessors(successors, markers):
477 for mark in markers:
479 for mark in markers:
478 successors.setdefault(mark[0], set()).add(mark)
480 successors.setdefault(mark[0], set()).add(mark)
479
481
480 @util.nogc
482 @util.nogc
481 def _addpredecessors(predecessors, markers):
483 def _addpredecessors(predecessors, markers):
482 for mark in markers:
484 for mark in markers:
483 for suc in mark[1]:
485 for suc in mark[1]:
484 predecessors.setdefault(suc, set()).add(mark)
486 predecessors.setdefault(suc, set()).add(mark)
485
487
486 @util.nogc
488 @util.nogc
487 def _addchildren(children, markers):
489 def _addchildren(children, markers):
488 for mark in markers:
490 for mark in markers:
489 parents = mark[5]
491 parents = mark[5]
490 if parents is not None:
492 if parents is not None:
491 for p in parents:
493 for p in parents:
492 children.setdefault(p, set()).add(mark)
494 children.setdefault(p, set()).add(mark)
493
495
494 def _checkinvalidmarkers(markers):
496 def _checkinvalidmarkers(markers):
495 """search for marker with invalid data and raise error if needed
497 """search for marker with invalid data and raise error if needed
496
498
497 Exist as a separated function to allow the evolve extension for a more
499 Exist as a separated function to allow the evolve extension for a more
498 subtle handling.
500 subtle handling.
499 """
501 """
500 for mark in markers:
502 for mark in markers:
501 if node.nullid in mark[1]:
503 if node.nullid in mark[1]:
502 raise error.Abort(_('bad obsolescence marker detected: '
504 raise error.Abort(_('bad obsolescence marker detected: '
503 'invalid successors nullid'))
505 'invalid successors nullid'))
504
506
505 class obsstore(object):
507 class obsstore(object):
506 """Store obsolete markers
508 """Store obsolete markers
507
509
508 Markers can be accessed with two mappings:
510 Markers can be accessed with two mappings:
509 - predecessors[x] -> set(markers on predecessors edges of x)
511 - predecessors[x] -> set(markers on predecessors edges of x)
510 - successors[x] -> set(markers on successors edges of x)
512 - successors[x] -> set(markers on successors edges of x)
511 - children[x] -> set(markers on predecessors edges of children(x)
513 - children[x] -> set(markers on predecessors edges of children(x)
512 """
514 """
513
515
514 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
516 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
515 # prec: nodeid, predecessors changesets
517 # prec: nodeid, predecessors changesets
516 # succs: tuple of nodeid, successor changesets (0-N length)
518 # succs: tuple of nodeid, successor changesets (0-N length)
517 # flag: integer, flag field carrying modifier for the markers (see doc)
519 # flag: integer, flag field carrying modifier for the markers (see doc)
518 # meta: binary blob, encoded metadata dictionary
520 # meta: binary blob, encoded metadata dictionary
519 # date: (float, int) tuple, date of marker creation
521 # date: (float, int) tuple, date of marker creation
520 # parents: (tuple of nodeid) or None, parents of predecessors
522 # parents: (tuple of nodeid) or None, parents of predecessors
521 # None is used when no data has been recorded
523 # None is used when no data has been recorded
522
524
523 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
525 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
524 # caches for various obsolescence related cache
526 # caches for various obsolescence related cache
525 self.caches = {}
527 self.caches = {}
526 self.svfs = svfs
528 self.svfs = svfs
527 self._defaultformat = defaultformat
529 self._defaultformat = defaultformat
528 self._readonly = readonly
530 self._readonly = readonly
529
531
530 def __iter__(self):
532 def __iter__(self):
531 return iter(self._all)
533 return iter(self._all)
532
534
533 def __len__(self):
535 def __len__(self):
534 return len(self._all)
536 return len(self._all)
535
537
536 def __nonzero__(self):
538 def __nonzero__(self):
537 if not self._cached(r'_all'):
539 if not self._cached(r'_all'):
538 try:
540 try:
539 return self.svfs.stat('obsstore').st_size > 1
541 return self.svfs.stat('obsstore').st_size > 1
540 except OSError as inst:
542 except OSError as inst:
541 if inst.errno != errno.ENOENT:
543 if inst.errno != errno.ENOENT:
542 raise
544 raise
543 # just build an empty _all list if no obsstore exists, which
545 # just build an empty _all list if no obsstore exists, which
544 # avoids further stat() syscalls
546 # avoids further stat() syscalls
545 return bool(self._all)
547 return bool(self._all)
546
548
547 __bool__ = __nonzero__
549 __bool__ = __nonzero__
548
550
549 @property
551 @property
550 def readonly(self):
552 def readonly(self):
551 """True if marker creation is disabled
553 """True if marker creation is disabled
552
554
553 Remove me in the future when obsolete marker is always on."""
555 Remove me in the future when obsolete marker is always on."""
554 return self._readonly
556 return self._readonly
555
557
556 def create(self, transaction, prec, succs=(), flag=0, parents=None,
558 def create(self, transaction, prec, succs=(), flag=0, parents=None,
557 date=None, metadata=None, ui=None):
559 date=None, metadata=None, ui=None):
558 """obsolete: add a new obsolete marker
560 """obsolete: add a new obsolete marker
559
561
560 * ensuring it is hashable
562 * ensuring it is hashable
561 * check mandatory metadata
563 * check mandatory metadata
562 * encode metadata
564 * encode metadata
563
565
564 If you are a human writing code creating marker you want to use the
566 If you are a human writing code creating marker you want to use the
565 `createmarkers` function in this module instead.
567 `createmarkers` function in this module instead.
566
568
567 return True if a new marker have been added, False if the markers
569 return True if a new marker have been added, False if the markers
568 already existed (no op).
570 already existed (no op).
569 """
571 """
570 if metadata is None:
572 if metadata is None:
571 metadata = {}
573 metadata = {}
572 if date is None:
574 if date is None:
573 if 'date' in metadata:
575 if 'date' in metadata:
574 # as a courtesy for out-of-tree extensions
576 # as a courtesy for out-of-tree extensions
575 date = dateutil.parsedate(metadata.pop('date'))
577 date = dateutil.parsedate(metadata.pop('date'))
576 elif ui is not None:
578 elif ui is not None:
577 date = ui.configdate('devel', 'default-date')
579 date = ui.configdate('devel', 'default-date')
578 if date is None:
580 if date is None:
579 date = dateutil.makedate()
581 date = dateutil.makedate()
580 else:
582 else:
581 date = dateutil.makedate()
583 date = dateutil.makedate()
582 if len(prec) != 20:
584 if len(prec) != 20:
583 raise ValueError(prec)
585 raise ValueError(prec)
584 for succ in succs:
586 for succ in succs:
585 if len(succ) != 20:
587 if len(succ) != 20:
586 raise ValueError(succ)
588 raise ValueError(succ)
587 if prec in succs:
589 if prec in succs:
588 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
590 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
589
591
590 metadata = tuple(sorted(metadata.iteritems()))
592 metadata = tuple(sorted(metadata.iteritems()))
591
593
592 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
594 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
593 return bool(self.add(transaction, [marker]))
595 return bool(self.add(transaction, [marker]))
594
596
595 def add(self, transaction, markers):
597 def add(self, transaction, markers):
596 """Add new markers to the store
598 """Add new markers to the store
597
599
598 Take care of filtering duplicate.
600 Take care of filtering duplicate.
599 Return the number of new marker."""
601 Return the number of new marker."""
600 if self._readonly:
602 if self._readonly:
601 raise error.Abort(_('creating obsolete markers is not enabled on '
603 raise error.Abort(_('creating obsolete markers is not enabled on '
602 'this repo'))
604 'this repo'))
603 known = set()
605 known = set()
604 getsuccessors = self.successors.get
606 getsuccessors = self.successors.get
605 new = []
607 new = []
606 for m in markers:
608 for m in markers:
607 if m not in getsuccessors(m[0], ()) and m not in known:
609 if m not in getsuccessors(m[0], ()) and m not in known:
608 known.add(m)
610 known.add(m)
609 new.append(m)
611 new.append(m)
610 if new:
612 if new:
611 f = self.svfs('obsstore', 'ab')
613 f = self.svfs('obsstore', 'ab')
612 try:
614 try:
613 offset = f.tell()
615 offset = f.tell()
614 transaction.add('obsstore', offset)
616 transaction.add('obsstore', offset)
615 # offset == 0: new file - add the version header
617 # offset == 0: new file - add the version header
616 data = b''.join(encodemarkers(new, offset == 0, self._version))
618 data = b''.join(encodemarkers(new, offset == 0, self._version))
617 f.write(data)
619 f.write(data)
618 finally:
620 finally:
619 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
621 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
620 # call 'filecacheentry.refresh()' here
622 # call 'filecacheentry.refresh()' here
621 f.close()
623 f.close()
622 addedmarkers = transaction.changes.get('obsmarkers')
624 addedmarkers = transaction.changes.get('obsmarkers')
623 if addedmarkers is not None:
625 if addedmarkers is not None:
624 addedmarkers.update(new)
626 addedmarkers.update(new)
625 self._addmarkers(new, data)
627 self._addmarkers(new, data)
626 # new marker *may* have changed several set. invalidate the cache.
628 # new marker *may* have changed several set. invalidate the cache.
627 self.caches.clear()
629 self.caches.clear()
628 # records the number of new markers for the transaction hooks
630 # records the number of new markers for the transaction hooks
629 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
631 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
630 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
632 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
631 return len(new)
633 return len(new)
632
634
633 def mergemarkers(self, transaction, data):
635 def mergemarkers(self, transaction, data):
634 """merge a binary stream of markers inside the obsstore
636 """merge a binary stream of markers inside the obsstore
635
637
636 Returns the number of new markers added."""
638 Returns the number of new markers added."""
637 version, markers = _readmarkers(data)
639 version, markers = _readmarkers(data)
638 return self.add(transaction, markers)
640 return self.add(transaction, markers)
639
641
640 @propertycache
642 @propertycache
641 def _data(self):
643 def _data(self):
642 return self.svfs.tryread('obsstore')
644 return self.svfs.tryread('obsstore')
643
645
644 @propertycache
646 @propertycache
645 def _version(self):
647 def _version(self):
646 if len(self._data) >= 1:
648 if len(self._data) >= 1:
647 return _readmarkerversion(self._data)
649 return _readmarkerversion(self._data)
648 else:
650 else:
649 return self._defaultformat
651 return self._defaultformat
650
652
651 @propertycache
653 @propertycache
652 def _all(self):
654 def _all(self):
653 data = self._data
655 data = self._data
654 if not data:
656 if not data:
655 return []
657 return []
656 self._version, markers = _readmarkers(data)
658 self._version, markers = _readmarkers(data)
657 markers = list(markers)
659 markers = list(markers)
658 _checkinvalidmarkers(markers)
660 _checkinvalidmarkers(markers)
659 return markers
661 return markers
660
662
661 @propertycache
663 @propertycache
662 def successors(self):
664 def successors(self):
663 successors = {}
665 successors = {}
664 _addsuccessors(successors, self._all)
666 _addsuccessors(successors, self._all)
665 return successors
667 return successors
666
668
667 @propertycache
669 @propertycache
668 def predecessors(self):
670 def predecessors(self):
669 predecessors = {}
671 predecessors = {}
670 _addpredecessors(predecessors, self._all)
672 _addpredecessors(predecessors, self._all)
671 return predecessors
673 return predecessors
672
674
673 @propertycache
675 @propertycache
674 def children(self):
676 def children(self):
675 children = {}
677 children = {}
676 _addchildren(children, self._all)
678 _addchildren(children, self._all)
677 return children
679 return children
678
680
679 def _cached(self, attr):
681 def _cached(self, attr):
680 return attr in self.__dict__
682 return attr in self.__dict__
681
683
682 def _addmarkers(self, markers, rawdata):
684 def _addmarkers(self, markers, rawdata):
683 markers = list(markers) # to allow repeated iteration
685 markers = list(markers) # to allow repeated iteration
684 self._data = self._data + rawdata
686 self._data = self._data + rawdata
685 self._all.extend(markers)
687 self._all.extend(markers)
686 if self._cached(r'successors'):
688 if self._cached(r'successors'):
687 _addsuccessors(self.successors, markers)
689 _addsuccessors(self.successors, markers)
688 if self._cached(r'predecessors'):
690 if self._cached(r'predecessors'):
689 _addpredecessors(self.predecessors, markers)
691 _addpredecessors(self.predecessors, markers)
690 if self._cached(r'children'):
692 if self._cached(r'children'):
691 _addchildren(self.children, markers)
693 _addchildren(self.children, markers)
692 _checkinvalidmarkers(markers)
694 _checkinvalidmarkers(markers)
693
695
694 def relevantmarkers(self, nodes):
696 def relevantmarkers(self, nodes):
695 """return a set of all obsolescence markers relevant to a set of nodes.
697 """return a set of all obsolescence markers relevant to a set of nodes.
696
698
697 "relevant" to a set of nodes mean:
699 "relevant" to a set of nodes mean:
698
700
699 - marker that use this changeset as successor
701 - marker that use this changeset as successor
700 - prune marker of direct children on this changeset
702 - prune marker of direct children on this changeset
701 - recursive application of the two rules on predecessors of these
703 - recursive application of the two rules on predecessors of these
702 markers
704 markers
703
705
704 It is a set so you cannot rely on order."""
706 It is a set so you cannot rely on order."""
705
707
706 pendingnodes = set(nodes)
708 pendingnodes = set(nodes)
707 seenmarkers = set()
709 seenmarkers = set()
708 seennodes = set(pendingnodes)
710 seennodes = set(pendingnodes)
709 precursorsmarkers = self.predecessors
711 precursorsmarkers = self.predecessors
710 succsmarkers = self.successors
712 succsmarkers = self.successors
711 children = self.children
713 children = self.children
712 while pendingnodes:
714 while pendingnodes:
713 direct = set()
715 direct = set()
714 for current in pendingnodes:
716 for current in pendingnodes:
715 direct.update(precursorsmarkers.get(current, ()))
717 direct.update(precursorsmarkers.get(current, ()))
716 pruned = [m for m in children.get(current, ()) if not m[1]]
718 pruned = [m for m in children.get(current, ()) if not m[1]]
717 direct.update(pruned)
719 direct.update(pruned)
718 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
720 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
719 direct.update(pruned)
721 direct.update(pruned)
720 direct -= seenmarkers
722 direct -= seenmarkers
721 pendingnodes = set([m[0] for m in direct])
723 pendingnodes = set([m[0] for m in direct])
722 seenmarkers |= direct
724 seenmarkers |= direct
723 pendingnodes -= seennodes
725 pendingnodes -= seennodes
724 seennodes |= pendingnodes
726 seennodes |= pendingnodes
725 return seenmarkers
727 return seenmarkers
726
728
727 def makestore(ui, repo):
729 def makestore(ui, repo):
728 """Create an obsstore instance from a repo."""
730 """Create an obsstore instance from a repo."""
729 # read default format for new obsstore.
731 # read default format for new obsstore.
730 # developer config: format.obsstore-version
732 # developer config: format.obsstore-version
731 defaultformat = ui.configint('format', 'obsstore-version')
733 defaultformat = ui.configint('format', 'obsstore-version')
732 # rely on obsstore class default when possible.
734 # rely on obsstore class default when possible.
733 kwargs = {}
735 kwargs = {}
734 if defaultformat is not None:
736 if defaultformat is not None:
735 kwargs[r'defaultformat'] = defaultformat
737 kwargs[r'defaultformat'] = defaultformat
736 readonly = not isenabled(repo, createmarkersopt)
738 readonly = not isenabled(repo, createmarkersopt)
737 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
739 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
738 if store and readonly:
740 if store and readonly:
739 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
741 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
740 % len(list(store)))
742 % len(list(store)))
741 return store
743 return store
742
744
743 def commonversion(versions):
745 def commonversion(versions):
744 """Return the newest version listed in both versions and our local formats.
746 """Return the newest version listed in both versions and our local formats.
745
747
746 Returns None if no common version exists.
748 Returns None if no common version exists.
747 """
749 """
748 versions.sort(reverse=True)
750 versions.sort(reverse=True)
749 # search for highest version known on both side
751 # search for highest version known on both side
750 for v in versions:
752 for v in versions:
751 if v in formats:
753 if v in formats:
752 return v
754 return v
753 return None
755 return None
754
756
755 # arbitrary picked to fit into 8K limit from HTTP server
757 # arbitrary picked to fit into 8K limit from HTTP server
756 # you have to take in account:
758 # you have to take in account:
757 # - the version header
759 # - the version header
758 # - the base85 encoding
760 # - the base85 encoding
759 _maxpayload = 5300
761 _maxpayload = 5300
760
762
761 def _pushkeyescape(markers):
763 def _pushkeyescape(markers):
762 """encode markers into a dict suitable for pushkey exchange
764 """encode markers into a dict suitable for pushkey exchange
763
765
764 - binary data is base85 encoded
766 - binary data is base85 encoded
765 - split in chunks smaller than 5300 bytes"""
767 - split in chunks smaller than 5300 bytes"""
766 keys = {}
768 keys = {}
767 parts = []
769 parts = []
768 currentlen = _maxpayload * 2 # ensure we create a new part
770 currentlen = _maxpayload * 2 # ensure we create a new part
769 for marker in markers:
771 for marker in markers:
770 nextdata = _fm0encodeonemarker(marker)
772 nextdata = _fm0encodeonemarker(marker)
771 if (len(nextdata) + currentlen > _maxpayload):
773 if (len(nextdata) + currentlen > _maxpayload):
772 currentpart = []
774 currentpart = []
773 currentlen = 0
775 currentlen = 0
774 parts.append(currentpart)
776 parts.append(currentpart)
775 currentpart.append(nextdata)
777 currentpart.append(nextdata)
776 currentlen += len(nextdata)
778 currentlen += len(nextdata)
777 for idx, part in enumerate(reversed(parts)):
779 for idx, part in enumerate(reversed(parts)):
778 data = ''.join([_pack('>B', _fm0version)] + part)
780 data = ''.join([_pack('>B', _fm0version)] + part)
779 keys['dump%i' % idx] = util.b85encode(data)
781 keys['dump%i' % idx] = util.b85encode(data)
780 return keys
782 return keys
781
783
782 def listmarkers(repo):
784 def listmarkers(repo):
783 """List markers over pushkey"""
785 """List markers over pushkey"""
784 if not repo.obsstore:
786 if not repo.obsstore:
785 return {}
787 return {}
786 return _pushkeyescape(sorted(repo.obsstore))
788 return _pushkeyescape(sorted(repo.obsstore))
787
789
788 def pushmarker(repo, key, old, new):
790 def pushmarker(repo, key, old, new):
789 """Push markers over pushkey"""
791 """Push markers over pushkey"""
790 if not key.startswith('dump'):
792 if not key.startswith('dump'):
791 repo.ui.warn(_('unknown key: %r') % key)
793 repo.ui.warn(_('unknown key: %r') % key)
792 return False
794 return False
793 if old:
795 if old:
794 repo.ui.warn(_('unexpected old value for %r') % key)
796 repo.ui.warn(_('unexpected old value for %r') % key)
795 return False
797 return False
796 data = util.b85decode(new)
798 data = util.b85decode(new)
797 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
799 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
798 repo.obsstore.mergemarkers(tr, data)
800 repo.obsstore.mergemarkers(tr, data)
799 repo.invalidatevolatilesets()
801 repo.invalidatevolatilesets()
800 return True
802 return True
801
803
802 # mapping of 'set-name' -> <function to compute this set>
804 # mapping of 'set-name' -> <function to compute this set>
803 cachefuncs = {}
805 cachefuncs = {}
804 def cachefor(name):
806 def cachefor(name):
805 """Decorator to register a function as computing the cache for a set"""
807 """Decorator to register a function as computing the cache for a set"""
806 def decorator(func):
808 def decorator(func):
807 if name in cachefuncs:
809 if name in cachefuncs:
808 msg = "duplicated registration for volatileset '%s' (existing: %r)"
810 msg = "duplicated registration for volatileset '%s' (existing: %r)"
809 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
811 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
810 cachefuncs[name] = func
812 cachefuncs[name] = func
811 return func
813 return func
812 return decorator
814 return decorator
813
815
814 def getrevs(repo, name):
816 def getrevs(repo, name):
815 """Return the set of revision that belong to the <name> set
817 """Return the set of revision that belong to the <name> set
816
818
817 Such access may compute the set and cache it for future use"""
819 Such access may compute the set and cache it for future use"""
818 repo = repo.unfiltered()
820 repo = repo.unfiltered()
819 if not repo.obsstore:
821 if not repo.obsstore:
820 return frozenset()
822 return frozenset()
821 if name not in repo.obsstore.caches:
823 if name not in repo.obsstore.caches:
822 repo.obsstore.caches[name] = cachefuncs[name](repo)
824 repo.obsstore.caches[name] = cachefuncs[name](repo)
823 return repo.obsstore.caches[name]
825 return repo.obsstore.caches[name]
824
826
825 # To be simple we need to invalidate obsolescence cache when:
827 # To be simple we need to invalidate obsolescence cache when:
826 #
828 #
827 # - new changeset is added:
829 # - new changeset is added:
828 # - public phase is changed
830 # - public phase is changed
829 # - obsolescence marker are added
831 # - obsolescence marker are added
830 # - strip is used a repo
832 # - strip is used a repo
831 def clearobscaches(repo):
833 def clearobscaches(repo):
832 """Remove all obsolescence related cache from a repo
834 """Remove all obsolescence related cache from a repo
833
835
834 This remove all cache in obsstore is the obsstore already exist on the
836 This remove all cache in obsstore is the obsstore already exist on the
835 repo.
837 repo.
836
838
837 (We could be smarter here given the exact event that trigger the cache
839 (We could be smarter here given the exact event that trigger the cache
838 clearing)"""
840 clearing)"""
839 # only clear cache is there is obsstore data in this repo
841 # only clear cache is there is obsstore data in this repo
840 if 'obsstore' in repo._filecache:
842 if 'obsstore' in repo._filecache:
841 repo.obsstore.caches.clear()
843 repo.obsstore.caches.clear()
842
844
843 def _mutablerevs(repo):
845 def _mutablerevs(repo):
844 """the set of mutable revision in the repository"""
846 """the set of mutable revision in the repository"""
845 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
847 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
846
848
847 @cachefor('obsolete')
849 @cachefor('obsolete')
848 def _computeobsoleteset(repo):
850 def _computeobsoleteset(repo):
849 """the set of obsolete revisions"""
851 """the set of obsolete revisions"""
850 getnode = repo.changelog.node
852 getnode = repo.changelog.node
851 notpublic = _mutablerevs(repo)
853 notpublic = _mutablerevs(repo)
852 isobs = repo.obsstore.successors.__contains__
854 isobs = repo.obsstore.successors.__contains__
853 obs = set(r for r in notpublic if isobs(getnode(r)))
855 obs = set(r for r in notpublic if isobs(getnode(r)))
854 return obs
856 return obs
855
857
856 @cachefor('orphan')
858 @cachefor('orphan')
857 def _computeorphanset(repo):
859 def _computeorphanset(repo):
858 """the set of non obsolete revisions with obsolete parents"""
860 """the set of non obsolete revisions with obsolete parents"""
859 pfunc = repo.changelog.parentrevs
861 pfunc = repo.changelog.parentrevs
860 mutable = _mutablerevs(repo)
862 mutable = _mutablerevs(repo)
861 obsolete = getrevs(repo, 'obsolete')
863 obsolete = getrevs(repo, 'obsolete')
862 others = mutable - obsolete
864 others = mutable - obsolete
863 unstable = set()
865 unstable = set()
864 for r in sorted(others):
866 for r in sorted(others):
865 # A rev is unstable if one of its parent is obsolete or unstable
867 # A rev is unstable if one of its parent is obsolete or unstable
866 # this works since we traverse following growing rev order
868 # this works since we traverse following growing rev order
867 for p in pfunc(r):
869 for p in pfunc(r):
868 if p in obsolete or p in unstable:
870 if p in obsolete or p in unstable:
869 unstable.add(r)
871 unstable.add(r)
870 break
872 break
871 return unstable
873 return unstable
872
874
873 @cachefor('suspended')
875 @cachefor('suspended')
874 def _computesuspendedset(repo):
876 def _computesuspendedset(repo):
875 """the set of obsolete parents with non obsolete descendants"""
877 """the set of obsolete parents with non obsolete descendants"""
876 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
878 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
877 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
879 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
878
880
879 @cachefor('extinct')
881 @cachefor('extinct')
880 def _computeextinctset(repo):
882 def _computeextinctset(repo):
881 """the set of obsolete parents without non obsolete descendants"""
883 """the set of obsolete parents without non obsolete descendants"""
882 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
884 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
883
885
884 @cachefor('phasedivergent')
886 @cachefor('phasedivergent')
885 def _computephasedivergentset(repo):
887 def _computephasedivergentset(repo):
886 """the set of revs trying to obsolete public revisions"""
888 """the set of revs trying to obsolete public revisions"""
887 bumped = set()
889 bumped = set()
888 # util function (avoid attribute lookup in the loop)
890 # util function (avoid attribute lookup in the loop)
889 phase = repo._phasecache.phase # would be faster to grab the full list
891 phase = repo._phasecache.phase # would be faster to grab the full list
890 public = phases.public
892 public = phases.public
891 cl = repo.changelog
893 cl = repo.changelog
892 torev = cl.nodemap.get
894 torev = cl.nodemap.get
893 tonode = cl.node
895 tonode = cl.node
894 for rev in repo.revs('(not public()) and (not obsolete())'):
896 for rev in repo.revs('(not public()) and (not obsolete())'):
895 # We only evaluate mutable, non-obsolete revision
897 # We only evaluate mutable, non-obsolete revision
896 node = tonode(rev)
898 node = tonode(rev)
897 # (future) A cache of predecessors may worth if split is very common
899 # (future) A cache of predecessors may worth if split is very common
898 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
900 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
899 ignoreflags=bumpedfix):
901 ignoreflags=bumpedfix):
900 prev = torev(pnode) # unfiltered! but so is phasecache
902 prev = torev(pnode) # unfiltered! but so is phasecache
901 if (prev is not None) and (phase(repo, prev) <= public):
903 if (prev is not None) and (phase(repo, prev) <= public):
902 # we have a public predecessor
904 # we have a public predecessor
903 bumped.add(rev)
905 bumped.add(rev)
904 break # Next draft!
906 break # Next draft!
905 return bumped
907 return bumped
906
908
907 @cachefor('contentdivergent')
909 @cachefor('contentdivergent')
908 def _computecontentdivergentset(repo):
910 def _computecontentdivergentset(repo):
909 """the set of rev that compete to be the final successors of some revision.
911 """the set of rev that compete to be the final successors of some revision.
910 """
912 """
911 divergent = set()
913 divergent = set()
912 obsstore = repo.obsstore
914 obsstore = repo.obsstore
913 newermap = {}
915 newermap = {}
914 tonode = repo.changelog.node
916 tonode = repo.changelog.node
915 for rev in repo.revs('(not public()) - obsolete()'):
917 for rev in repo.revs('(not public()) - obsolete()'):
916 node = tonode(rev)
918 node = tonode(rev)
917 mark = obsstore.predecessors.get(node, ())
919 mark = obsstore.predecessors.get(node, ())
918 toprocess = set(mark)
920 toprocess = set(mark)
919 seen = set()
921 seen = set()
920 while toprocess:
922 while toprocess:
921 prec = toprocess.pop()[0]
923 prec = toprocess.pop()[0]
922 if prec in seen:
924 if prec in seen:
923 continue # emergency cycle hanging prevention
925 continue # emergency cycle hanging prevention
924 seen.add(prec)
926 seen.add(prec)
925 if prec not in newermap:
927 if prec not in newermap:
926 obsutil.successorssets(repo, prec, cache=newermap)
928 obsutil.successorssets(repo, prec, cache=newermap)
927 newer = [n for n in newermap[prec] if n]
929 newer = [n for n in newermap[prec] if n]
928 if len(newer) > 1:
930 if len(newer) > 1:
929 divergent.add(rev)
931 divergent.add(rev)
930 break
932 break
931 toprocess.update(obsstore.predecessors.get(prec, ()))
933 toprocess.update(obsstore.predecessors.get(prec, ()))
932 return divergent
934 return divergent
933
935
934
936
935 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
937 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
936 operation=None):
938 operation=None):
937 """Add obsolete markers between changesets in a repo
939 """Add obsolete markers between changesets in a repo
938
940
939 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
941 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
940 tuple. `old` and `news` are changectx. metadata is an optional dictionary
942 tuple. `old` and `news` are changectx. metadata is an optional dictionary
941 containing metadata for this marker only. It is merged with the global
943 containing metadata for this marker only. It is merged with the global
942 metadata specified through the `metadata` argument of this function,
944 metadata specified through the `metadata` argument of this function,
943
945
944 Trying to obsolete a public changeset will raise an exception.
946 Trying to obsolete a public changeset will raise an exception.
945
947
946 Current user and date are used except if specified otherwise in the
948 Current user and date are used except if specified otherwise in the
947 metadata attribute.
949 metadata attribute.
948
950
949 This function operates within a transaction of its own, but does
951 This function operates within a transaction of its own, but does
950 not take any lock on the repo.
952 not take any lock on the repo.
951 """
953 """
952 # prepare metadata
954 # prepare metadata
953 if metadata is None:
955 if metadata is None:
954 metadata = {}
956 metadata = {}
955 if 'user' not in metadata:
957 if 'user' not in metadata:
956 develuser = repo.ui.config('devel', 'user.obsmarker')
958 develuser = repo.ui.config('devel', 'user.obsmarker')
957 if develuser:
959 if develuser:
958 metadata['user'] = develuser
960 metadata['user'] = develuser
959 else:
961 else:
960 metadata['user'] = repo.ui.username()
962 metadata['user'] = repo.ui.username()
961
963
962 # Operation metadata handling
964 # Operation metadata handling
963 useoperation = repo.ui.configbool('experimental',
965 useoperation = repo.ui.configbool('experimental',
964 'evolution.track-operation')
966 'evolution.track-operation')
965 if useoperation and operation:
967 if useoperation and operation:
966 metadata['operation'] = operation
968 metadata['operation'] = operation
967
969
968 # Effect flag metadata handling
970 # Effect flag metadata handling
969 saveeffectflag = repo.ui.configbool('experimental',
971 saveeffectflag = repo.ui.configbool('experimental',
970 'evolution.effect-flags')
972 'evolution.effect-flags')
971
973
972 with repo.transaction('add-obsolescence-marker') as tr:
974 with repo.transaction('add-obsolescence-marker') as tr:
973 markerargs = []
975 markerargs = []
974 for rel in relations:
976 for rel in relations:
975 prec = rel[0]
977 prec = rel[0]
976 sucs = rel[1]
978 sucs = rel[1]
977 localmetadata = metadata.copy()
979 localmetadata = metadata.copy()
978 if 2 < len(rel):
980 if 2 < len(rel):
979 localmetadata.update(rel[2])
981 localmetadata.update(rel[2])
980
982
981 if not prec.mutable():
983 if not prec.mutable():
982 raise error.Abort(_("cannot obsolete public changeset: %s")
984 raise error.Abort(_("cannot obsolete public changeset: %s")
983 % prec,
985 % prec,
984 hint="see 'hg help phases' for details")
986 hint="see 'hg help phases' for details")
985 nprec = prec.node()
987 nprec = prec.node()
986 nsucs = tuple(s.node() for s in sucs)
988 nsucs = tuple(s.node() for s in sucs)
987 npare = None
989 npare = None
988 if not nsucs:
990 if not nsucs:
989 npare = tuple(p.node() for p in prec.parents())
991 npare = tuple(p.node() for p in prec.parents())
990 if nprec in nsucs:
992 if nprec in nsucs:
991 raise error.Abort(_("changeset %s cannot obsolete itself")
993 raise error.Abort(_("changeset %s cannot obsolete itself")
992 % prec)
994 % prec)
993
995
994 # Effect flag can be different by relation
996 # Effect flag can be different by relation
995 if saveeffectflag:
997 if saveeffectflag:
996 # The effect flag is saved in a versioned field name for future
998 # The effect flag is saved in a versioned field name for future
997 # evolution
999 # evolution
998 effectflag = obsutil.geteffectflag(rel)
1000 effectflag = obsutil.geteffectflag(rel)
999 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1001 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1000
1002
1001 # Creating the marker causes the hidden cache to become invalid,
1003 # Creating the marker causes the hidden cache to become invalid,
1002 # which causes recomputation when we ask for prec.parents() above.
1004 # which causes recomputation when we ask for prec.parents() above.
1003 # Resulting in n^2 behavior. So let's prepare all of the args
1005 # Resulting in n^2 behavior. So let's prepare all of the args
1004 # first, then create the markers.
1006 # first, then create the markers.
1005 markerargs.append((nprec, nsucs, npare, localmetadata))
1007 markerargs.append((nprec, nsucs, npare, localmetadata))
1006
1008
1007 for args in markerargs:
1009 for args in markerargs:
1008 nprec, nsucs, npare, localmetadata = args
1010 nprec, nsucs, npare, localmetadata = args
1009 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1011 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1010 date=date, metadata=localmetadata,
1012 date=date, metadata=localmetadata,
1011 ui=repo.ui)
1013 ui=repo.ui)
1012 repo.filteredrevcache.clear()
1014 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now