##// END OF EJS Templates
obsolete: convert error string to a sysstr...
Augie Fackler -
r40200:fee61693 default
parent child Browse files
Show More
@@ -1,1058 +1,1059 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import hashlib
73 import hashlib
74 import struct
74 import struct
75
75
76 from .i18n import _
76 from .i18n import _
77 from . import (
77 from . import (
78 encoding,
78 encoding,
79 error,
79 error,
80 node,
80 node,
81 obsutil,
81 obsutil,
82 phases,
82 phases,
83 policy,
83 policy,
84 pycompat,
84 pycompat,
85 util,
85 util,
86 )
86 )
87 from .utils import dateutil
87 from .utils import dateutil
88
88
89 parsers = policy.importmod(r'parsers')
89 parsers = policy.importmod(r'parsers')
90
90
91 _pack = struct.pack
91 _pack = struct.pack
92 _unpack = struct.unpack
92 _unpack = struct.unpack
93 _calcsize = struct.calcsize
93 _calcsize = struct.calcsize
94 propertycache = util.propertycache
94 propertycache = util.propertycache
95
95
96 # the obsolete feature is not mature enough to be enabled by default.
96 # the obsolete feature is not mature enough to be enabled by default.
97 # you have to rely on third party extension extension to enable this.
97 # you have to rely on third party extension extension to enable this.
98 _enabled = False
98 _enabled = False
99
99
100 # Options for obsolescence
100 # Options for obsolescence
101 createmarkersopt = 'createmarkers'
101 createmarkersopt = 'createmarkers'
102 allowunstableopt = 'allowunstable'
102 allowunstableopt = 'allowunstable'
103 exchangeopt = 'exchange'
103 exchangeopt = 'exchange'
104
104
105 def _getoptionvalue(repo, option):
105 def _getoptionvalue(repo, option):
106 """Returns True if the given repository has the given obsolete option
106 """Returns True if the given repository has the given obsolete option
107 enabled.
107 enabled.
108 """
108 """
109 configkey = 'evolution.%s' % option
109 configkey = 'evolution.%s' % option
110 newconfig = repo.ui.configbool('experimental', configkey)
110 newconfig = repo.ui.configbool('experimental', configkey)
111
111
112 # Return the value only if defined
112 # Return the value only if defined
113 if newconfig is not None:
113 if newconfig is not None:
114 return newconfig
114 return newconfig
115
115
116 # Fallback on generic option
116 # Fallback on generic option
117 try:
117 try:
118 return repo.ui.configbool('experimental', 'evolution')
118 return repo.ui.configbool('experimental', 'evolution')
119 except (error.ConfigError, AttributeError):
119 except (error.ConfigError, AttributeError):
120 # Fallback on old-fashion config
120 # Fallback on old-fashion config
121 # inconsistent config: experimental.evolution
121 # inconsistent config: experimental.evolution
122 result = set(repo.ui.configlist('experimental', 'evolution'))
122 result = set(repo.ui.configlist('experimental', 'evolution'))
123
123
124 if 'all' in result:
124 if 'all' in result:
125 return True
125 return True
126
126
127 # For migration purposes, temporarily return true if the config hasn't
127 # For migration purposes, temporarily return true if the config hasn't
128 # been set but _enabled is true.
128 # been set but _enabled is true.
129 if len(result) == 0 and _enabled:
129 if len(result) == 0 and _enabled:
130 return True
130 return True
131
131
132 # Temporary hack for next check
132 # Temporary hack for next check
133 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
133 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
134 if newconfig:
134 if newconfig:
135 result.add('createmarkers')
135 result.add('createmarkers')
136
136
137 return option in result
137 return option in result
138
138
139 def getoptions(repo):
139 def getoptions(repo):
140 """Returns dicts showing state of obsolescence features."""
140 """Returns dicts showing state of obsolescence features."""
141
141
142 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
142 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
143 unstablevalue = _getoptionvalue(repo, allowunstableopt)
143 unstablevalue = _getoptionvalue(repo, allowunstableopt)
144 exchangevalue = _getoptionvalue(repo, exchangeopt)
144 exchangevalue = _getoptionvalue(repo, exchangeopt)
145
145
146 # createmarkers must be enabled if other options are enabled
146 # createmarkers must be enabled if other options are enabled
147 if ((unstablevalue or exchangevalue) and not createmarkersvalue):
147 if ((unstablevalue or exchangevalue) and not createmarkersvalue):
148 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
148 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
149 "if other obsolete options are enabled"))
149 "if other obsolete options are enabled"))
150
150
151 return {
151 return {
152 createmarkersopt: createmarkersvalue,
152 createmarkersopt: createmarkersvalue,
153 allowunstableopt: unstablevalue,
153 allowunstableopt: unstablevalue,
154 exchangeopt: exchangevalue,
154 exchangeopt: exchangevalue,
155 }
155 }
156
156
157 def isenabled(repo, option):
157 def isenabled(repo, option):
158 """Returns True if the given repository has the given obsolete option
158 """Returns True if the given repository has the given obsolete option
159 enabled.
159 enabled.
160 """
160 """
161 return getoptions(repo)[option]
161 return getoptions(repo)[option]
162
162
163 # Creating aliases for marker flags because evolve extension looks for
163 # Creating aliases for marker flags because evolve extension looks for
164 # bumpedfix in obsolete.py
164 # bumpedfix in obsolete.py
165 bumpedfix = obsutil.bumpedfix
165 bumpedfix = obsutil.bumpedfix
166 usingsha256 = obsutil.usingsha256
166 usingsha256 = obsutil.usingsha256
167
167
168 ## Parsing and writing of version "0"
168 ## Parsing and writing of version "0"
169 #
169 #
170 # The header is followed by the markers. Each marker is made of:
170 # The header is followed by the markers. Each marker is made of:
171 #
171 #
172 # - 1 uint8 : number of new changesets "N", can be zero.
172 # - 1 uint8 : number of new changesets "N", can be zero.
173 #
173 #
174 # - 1 uint32: metadata size "M" in bytes.
174 # - 1 uint32: metadata size "M" in bytes.
175 #
175 #
176 # - 1 byte: a bit field. It is reserved for flags used in common
176 # - 1 byte: a bit field. It is reserved for flags used in common
177 # obsolete marker operations, to avoid repeated decoding of metadata
177 # obsolete marker operations, to avoid repeated decoding of metadata
178 # entries.
178 # entries.
179 #
179 #
180 # - 20 bytes: obsoleted changeset identifier.
180 # - 20 bytes: obsoleted changeset identifier.
181 #
181 #
182 # - N*20 bytes: new changesets identifiers.
182 # - N*20 bytes: new changesets identifiers.
183 #
183 #
184 # - M bytes: metadata as a sequence of nul-terminated strings. Each
184 # - M bytes: metadata as a sequence of nul-terminated strings. Each
185 # string contains a key and a value, separated by a colon ':', without
185 # string contains a key and a value, separated by a colon ':', without
186 # additional encoding. Keys cannot contain '\0' or ':' and values
186 # additional encoding. Keys cannot contain '\0' or ':' and values
187 # cannot contain '\0'.
187 # cannot contain '\0'.
188 _fm0version = 0
188 _fm0version = 0
189 _fm0fixed = '>BIB20s'
189 _fm0fixed = '>BIB20s'
190 _fm0node = '20s'
190 _fm0node = '20s'
191 _fm0fsize = _calcsize(_fm0fixed)
191 _fm0fsize = _calcsize(_fm0fixed)
192 _fm0fnodesize = _calcsize(_fm0node)
192 _fm0fnodesize = _calcsize(_fm0node)
193
193
194 def _fm0readmarkers(data, off, stop):
194 def _fm0readmarkers(data, off, stop):
195 # Loop on markers
195 # Loop on markers
196 while off < stop:
196 while off < stop:
197 # read fixed part
197 # read fixed part
198 cur = data[off:off + _fm0fsize]
198 cur = data[off:off + _fm0fsize]
199 off += _fm0fsize
199 off += _fm0fsize
200 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
200 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
201 # read replacement
201 # read replacement
202 sucs = ()
202 sucs = ()
203 if numsuc:
203 if numsuc:
204 s = (_fm0fnodesize * numsuc)
204 s = (_fm0fnodesize * numsuc)
205 cur = data[off:off + s]
205 cur = data[off:off + s]
206 sucs = _unpack(_fm0node * numsuc, cur)
206 sucs = _unpack(_fm0node * numsuc, cur)
207 off += s
207 off += s
208 # read metadata
208 # read metadata
209 # (metadata will be decoded on demand)
209 # (metadata will be decoded on demand)
210 metadata = data[off:off + mdsize]
210 metadata = data[off:off + mdsize]
211 if len(metadata) != mdsize:
211 if len(metadata) != mdsize:
212 raise error.Abort(_('parsing obsolete marker: metadata is too '
212 raise error.Abort(_('parsing obsolete marker: metadata is too '
213 'short, %d bytes expected, got %d')
213 'short, %d bytes expected, got %d')
214 % (mdsize, len(metadata)))
214 % (mdsize, len(metadata)))
215 off += mdsize
215 off += mdsize
216 metadata = _fm0decodemeta(metadata)
216 metadata = _fm0decodemeta(metadata)
217 try:
217 try:
218 when, offset = metadata.pop('date', '0 0').split(' ')
218 when, offset = metadata.pop('date', '0 0').split(' ')
219 date = float(when), int(offset)
219 date = float(when), int(offset)
220 except ValueError:
220 except ValueError:
221 date = (0., 0)
221 date = (0., 0)
222 parents = None
222 parents = None
223 if 'p2' in metadata:
223 if 'p2' in metadata:
224 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
224 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
225 elif 'p1' in metadata:
225 elif 'p1' in metadata:
226 parents = (metadata.pop('p1', None),)
226 parents = (metadata.pop('p1', None),)
227 elif 'p0' in metadata:
227 elif 'p0' in metadata:
228 parents = ()
228 parents = ()
229 if parents is not None:
229 if parents is not None:
230 try:
230 try:
231 parents = tuple(node.bin(p) for p in parents)
231 parents = tuple(node.bin(p) for p in parents)
232 # if parent content is not a nodeid, drop the data
232 # if parent content is not a nodeid, drop the data
233 for p in parents:
233 for p in parents:
234 if len(p) != 20:
234 if len(p) != 20:
235 parents = None
235 parents = None
236 break
236 break
237 except TypeError:
237 except TypeError:
238 # if content cannot be translated to nodeid drop the data.
238 # if content cannot be translated to nodeid drop the data.
239 parents = None
239 parents = None
240
240
241 metadata = tuple(sorted(metadata.iteritems()))
241 metadata = tuple(sorted(metadata.iteritems()))
242
242
243 yield (pre, sucs, flags, metadata, date, parents)
243 yield (pre, sucs, flags, metadata, date, parents)
244
244
245 def _fm0encodeonemarker(marker):
245 def _fm0encodeonemarker(marker):
246 pre, sucs, flags, metadata, date, parents = marker
246 pre, sucs, flags, metadata, date, parents = marker
247 if flags & usingsha256:
247 if flags & usingsha256:
248 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
248 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
249 metadata = dict(metadata)
249 metadata = dict(metadata)
250 time, tz = date
250 time, tz = date
251 metadata['date'] = '%r %i' % (time, tz)
251 metadata['date'] = '%r %i' % (time, tz)
252 if parents is not None:
252 if parents is not None:
253 if not parents:
253 if not parents:
254 # mark that we explicitly recorded no parents
254 # mark that we explicitly recorded no parents
255 metadata['p0'] = ''
255 metadata['p0'] = ''
256 for i, p in enumerate(parents, 1):
256 for i, p in enumerate(parents, 1):
257 metadata['p%i' % i] = node.hex(p)
257 metadata['p%i' % i] = node.hex(p)
258 metadata = _fm0encodemeta(metadata)
258 metadata = _fm0encodemeta(metadata)
259 numsuc = len(sucs)
259 numsuc = len(sucs)
260 format = _fm0fixed + (_fm0node * numsuc)
260 format = _fm0fixed + (_fm0node * numsuc)
261 data = [numsuc, len(metadata), flags, pre]
261 data = [numsuc, len(metadata), flags, pre]
262 data.extend(sucs)
262 data.extend(sucs)
263 return _pack(format, *data) + metadata
263 return _pack(format, *data) + metadata
264
264
265 def _fm0encodemeta(meta):
265 def _fm0encodemeta(meta):
266 """Return encoded metadata string to string mapping.
266 """Return encoded metadata string to string mapping.
267
267
268 Assume no ':' in key and no '\0' in both key and value."""
268 Assume no ':' in key and no '\0' in both key and value."""
269 for key, value in meta.iteritems():
269 for key, value in meta.iteritems():
270 if ':' in key or '\0' in key:
270 if ':' in key or '\0' in key:
271 raise ValueError("':' and '\0' are forbidden in metadata key'")
271 raise ValueError("':' and '\0' are forbidden in metadata key'")
272 if '\0' in value:
272 if '\0' in value:
273 raise ValueError("':' is forbidden in metadata value'")
273 raise ValueError("':' is forbidden in metadata value'")
274 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
274 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
275
275
276 def _fm0decodemeta(data):
276 def _fm0decodemeta(data):
277 """Return string to string dictionary from encoded version."""
277 """Return string to string dictionary from encoded version."""
278 d = {}
278 d = {}
279 for l in data.split('\0'):
279 for l in data.split('\0'):
280 if l:
280 if l:
281 key, value = l.split(':', 1)
281 key, value = l.split(':', 1)
282 d[key] = value
282 d[key] = value
283 return d
283 return d
284
284
285 ## Parsing and writing of version "1"
285 ## Parsing and writing of version "1"
286 #
286 #
287 # The header is followed by the markers. Each marker is made of:
287 # The header is followed by the markers. Each marker is made of:
288 #
288 #
289 # - uint32: total size of the marker (including this field)
289 # - uint32: total size of the marker (including this field)
290 #
290 #
291 # - float64: date in seconds since epoch
291 # - float64: date in seconds since epoch
292 #
292 #
293 # - int16: timezone offset in minutes
293 # - int16: timezone offset in minutes
294 #
294 #
295 # - uint16: a bit field. It is reserved for flags used in common
295 # - uint16: a bit field. It is reserved for flags used in common
296 # obsolete marker operations, to avoid repeated decoding of metadata
296 # obsolete marker operations, to avoid repeated decoding of metadata
297 # entries.
297 # entries.
298 #
298 #
299 # - uint8: number of successors "N", can be zero.
299 # - uint8: number of successors "N", can be zero.
300 #
300 #
301 # - uint8: number of parents "P", can be zero.
301 # - uint8: number of parents "P", can be zero.
302 #
302 #
303 # 0: parents data stored but no parent,
303 # 0: parents data stored but no parent,
304 # 1: one parent stored,
304 # 1: one parent stored,
305 # 2: two parents stored,
305 # 2: two parents stored,
306 # 3: no parent data stored
306 # 3: no parent data stored
307 #
307 #
308 # - uint8: number of metadata entries M
308 # - uint8: number of metadata entries M
309 #
309 #
310 # - 20 or 32 bytes: predecessor changeset identifier.
310 # - 20 or 32 bytes: predecessor changeset identifier.
311 #
311 #
312 # - N*(20 or 32) bytes: successors changesets identifiers.
312 # - N*(20 or 32) bytes: successors changesets identifiers.
313 #
313 #
314 # - P*(20 or 32) bytes: parents of the predecessors changesets.
314 # - P*(20 or 32) bytes: parents of the predecessors changesets.
315 #
315 #
316 # - M*(uint8, uint8): size of all metadata entries (key and value)
316 # - M*(uint8, uint8): size of all metadata entries (key and value)
317 #
317 #
318 # - remaining bytes: the metadata, each (key, value) pair after the other.
318 # - remaining bytes: the metadata, each (key, value) pair after the other.
319 _fm1version = 1
319 _fm1version = 1
320 _fm1fixed = '>IdhHBBB20s'
320 _fm1fixed = '>IdhHBBB20s'
321 _fm1nodesha1 = '20s'
321 _fm1nodesha1 = '20s'
322 _fm1nodesha256 = '32s'
322 _fm1nodesha256 = '32s'
323 _fm1nodesha1size = _calcsize(_fm1nodesha1)
323 _fm1nodesha1size = _calcsize(_fm1nodesha1)
324 _fm1nodesha256size = _calcsize(_fm1nodesha256)
324 _fm1nodesha256size = _calcsize(_fm1nodesha256)
325 _fm1fsize = _calcsize(_fm1fixed)
325 _fm1fsize = _calcsize(_fm1fixed)
326 _fm1parentnone = 3
326 _fm1parentnone = 3
327 _fm1parentshift = 14
327 _fm1parentshift = 14
328 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
328 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
329 _fm1metapair = 'BB'
329 _fm1metapair = 'BB'
330 _fm1metapairsize = _calcsize(_fm1metapair)
330 _fm1metapairsize = _calcsize(_fm1metapair)
331
331
332 def _fm1purereadmarkers(data, off, stop):
332 def _fm1purereadmarkers(data, off, stop):
333 # make some global constants local for performance
333 # make some global constants local for performance
334 noneflag = _fm1parentnone
334 noneflag = _fm1parentnone
335 sha2flag = usingsha256
335 sha2flag = usingsha256
336 sha1size = _fm1nodesha1size
336 sha1size = _fm1nodesha1size
337 sha2size = _fm1nodesha256size
337 sha2size = _fm1nodesha256size
338 sha1fmt = _fm1nodesha1
338 sha1fmt = _fm1nodesha1
339 sha2fmt = _fm1nodesha256
339 sha2fmt = _fm1nodesha256
340 metasize = _fm1metapairsize
340 metasize = _fm1metapairsize
341 metafmt = _fm1metapair
341 metafmt = _fm1metapair
342 fsize = _fm1fsize
342 fsize = _fm1fsize
343 unpack = _unpack
343 unpack = _unpack
344
344
345 # Loop on markers
345 # Loop on markers
346 ufixed = struct.Struct(_fm1fixed).unpack
346 ufixed = struct.Struct(_fm1fixed).unpack
347
347
348 while off < stop:
348 while off < stop:
349 # read fixed part
349 # read fixed part
350 o1 = off + fsize
350 o1 = off + fsize
351 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
351 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
352
352
353 if flags & sha2flag:
353 if flags & sha2flag:
354 # FIXME: prec was read as a SHA1, needs to be amended
354 # FIXME: prec was read as a SHA1, needs to be amended
355
355
356 # read 0 or more successors
356 # read 0 or more successors
357 if numsuc == 1:
357 if numsuc == 1:
358 o2 = o1 + sha2size
358 o2 = o1 + sha2size
359 sucs = (data[o1:o2],)
359 sucs = (data[o1:o2],)
360 else:
360 else:
361 o2 = o1 + sha2size * numsuc
361 o2 = o1 + sha2size * numsuc
362 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
362 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
363
363
364 # read parents
364 # read parents
365 if numpar == noneflag:
365 if numpar == noneflag:
366 o3 = o2
366 o3 = o2
367 parents = None
367 parents = None
368 elif numpar == 1:
368 elif numpar == 1:
369 o3 = o2 + sha2size
369 o3 = o2 + sha2size
370 parents = (data[o2:o3],)
370 parents = (data[o2:o3],)
371 else:
371 else:
372 o3 = o2 + sha2size * numpar
372 o3 = o2 + sha2size * numpar
373 parents = unpack(sha2fmt * numpar, data[o2:o3])
373 parents = unpack(sha2fmt * numpar, data[o2:o3])
374 else:
374 else:
375 # read 0 or more successors
375 # read 0 or more successors
376 if numsuc == 1:
376 if numsuc == 1:
377 o2 = o1 + sha1size
377 o2 = o1 + sha1size
378 sucs = (data[o1:o2],)
378 sucs = (data[o1:o2],)
379 else:
379 else:
380 o2 = o1 + sha1size * numsuc
380 o2 = o1 + sha1size * numsuc
381 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
381 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
382
382
383 # read parents
383 # read parents
384 if numpar == noneflag:
384 if numpar == noneflag:
385 o3 = o2
385 o3 = o2
386 parents = None
386 parents = None
387 elif numpar == 1:
387 elif numpar == 1:
388 o3 = o2 + sha1size
388 o3 = o2 + sha1size
389 parents = (data[o2:o3],)
389 parents = (data[o2:o3],)
390 else:
390 else:
391 o3 = o2 + sha1size * numpar
391 o3 = o2 + sha1size * numpar
392 parents = unpack(sha1fmt * numpar, data[o2:o3])
392 parents = unpack(sha1fmt * numpar, data[o2:o3])
393
393
394 # read metadata
394 # read metadata
395 off = o3 + metasize * nummeta
395 off = o3 + metasize * nummeta
396 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
396 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
397 metadata = []
397 metadata = []
398 for idx in pycompat.xrange(0, len(metapairsize), 2):
398 for idx in pycompat.xrange(0, len(metapairsize), 2):
399 o1 = off + metapairsize[idx]
399 o1 = off + metapairsize[idx]
400 o2 = o1 + metapairsize[idx + 1]
400 o2 = o1 + metapairsize[idx + 1]
401 metadata.append((data[off:o1], data[o1:o2]))
401 metadata.append((data[off:o1], data[o1:o2]))
402 off = o2
402 off = o2
403
403
404 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
404 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
405
405
406 def _fm1encodeonemarker(marker):
406 def _fm1encodeonemarker(marker):
407 pre, sucs, flags, metadata, date, parents = marker
407 pre, sucs, flags, metadata, date, parents = marker
408 # determine node size
408 # determine node size
409 _fm1node = _fm1nodesha1
409 _fm1node = _fm1nodesha1
410 if flags & usingsha256:
410 if flags & usingsha256:
411 _fm1node = _fm1nodesha256
411 _fm1node = _fm1nodesha256
412 numsuc = len(sucs)
412 numsuc = len(sucs)
413 numextranodes = numsuc
413 numextranodes = numsuc
414 if parents is None:
414 if parents is None:
415 numpar = _fm1parentnone
415 numpar = _fm1parentnone
416 else:
416 else:
417 numpar = len(parents)
417 numpar = len(parents)
418 numextranodes += numpar
418 numextranodes += numpar
419 formatnodes = _fm1node * numextranodes
419 formatnodes = _fm1node * numextranodes
420 formatmeta = _fm1metapair * len(metadata)
420 formatmeta = _fm1metapair * len(metadata)
421 format = _fm1fixed + formatnodes + formatmeta
421 format = _fm1fixed + formatnodes + formatmeta
422 # tz is stored in minutes so we divide by 60
422 # tz is stored in minutes so we divide by 60
423 tz = date[1]//60
423 tz = date[1]//60
424 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
424 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
425 data.extend(sucs)
425 data.extend(sucs)
426 if parents is not None:
426 if parents is not None:
427 data.extend(parents)
427 data.extend(parents)
428 totalsize = _calcsize(format)
428 totalsize = _calcsize(format)
429 for key, value in metadata:
429 for key, value in metadata:
430 lk = len(key)
430 lk = len(key)
431 lv = len(value)
431 lv = len(value)
432 if lk > 255:
432 if lk > 255:
433 msg = ('obsstore metadata key cannot be longer than 255 bytes'
433 msg = ('obsstore metadata key cannot be longer than 255 bytes'
434 ' (key "%s" is %u bytes)') % (key, lk)
434 ' (key "%s" is %u bytes)') % (key, lk)
435 raise error.ProgrammingError(msg)
435 raise error.ProgrammingError(msg)
436 if lv > 255:
436 if lv > 255:
437 msg = ('obsstore metadata value cannot be longer than 255 bytes'
437 msg = ('obsstore metadata value cannot be longer than 255 bytes'
438 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
438 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
439 raise error.ProgrammingError(msg)
439 raise error.ProgrammingError(msg)
440 data.append(lk)
440 data.append(lk)
441 data.append(lv)
441 data.append(lv)
442 totalsize += lk + lv
442 totalsize += lk + lv
443 data[0] = totalsize
443 data[0] = totalsize
444 data = [_pack(format, *data)]
444 data = [_pack(format, *data)]
445 for key, value in metadata:
445 for key, value in metadata:
446 data.append(key)
446 data.append(key)
447 data.append(value)
447 data.append(value)
448 return ''.join(data)
448 return ''.join(data)
449
449
450 def _fm1readmarkers(data, off, stop):
450 def _fm1readmarkers(data, off, stop):
451 native = getattr(parsers, 'fm1readmarkers', None)
451 native = getattr(parsers, 'fm1readmarkers', None)
452 if not native:
452 if not native:
453 return _fm1purereadmarkers(data, off, stop)
453 return _fm1purereadmarkers(data, off, stop)
454 return native(data, off, stop)
454 return native(data, off, stop)
455
455
456 # mapping to read/write various marker formats
456 # mapping to read/write various marker formats
457 # <version> -> (decoder, encoder)
457 # <version> -> (decoder, encoder)
458 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
458 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
459 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
459 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
460
460
461 def _readmarkerversion(data):
461 def _readmarkerversion(data):
462 return _unpack('>B', data[0:1])[0]
462 return _unpack('>B', data[0:1])[0]
463
463
464 @util.nogc
464 @util.nogc
465 def _readmarkers(data, off=None, stop=None):
465 def _readmarkers(data, off=None, stop=None):
466 """Read and enumerate markers from raw data"""
466 """Read and enumerate markers from raw data"""
467 diskversion = _readmarkerversion(data)
467 diskversion = _readmarkerversion(data)
468 if not off:
468 if not off:
469 off = 1 # skip 1 byte version number
469 off = 1 # skip 1 byte version number
470 if stop is None:
470 if stop is None:
471 stop = len(data)
471 stop = len(data)
472 if diskversion not in formats:
472 if diskversion not in formats:
473 msg = _('parsing obsolete marker: unknown version %r') % diskversion
473 msg = _('parsing obsolete marker: unknown version %r') % diskversion
474 raise error.UnknownVersion(msg, version=diskversion)
474 raise error.UnknownVersion(msg, version=diskversion)
475 return diskversion, formats[diskversion][0](data, off, stop)
475 return diskversion, formats[diskversion][0](data, off, stop)
476
476
477 def encodeheader(version=_fm0version):
477 def encodeheader(version=_fm0version):
478 return _pack('>B', version)
478 return _pack('>B', version)
479
479
480 def encodemarkers(markers, addheader=False, version=_fm0version):
480 def encodemarkers(markers, addheader=False, version=_fm0version):
481 # Kept separate from flushmarkers(), it will be reused for
481 # Kept separate from flushmarkers(), it will be reused for
482 # markers exchange.
482 # markers exchange.
483 encodeone = formats[version][1]
483 encodeone = formats[version][1]
484 if addheader:
484 if addheader:
485 yield encodeheader(version)
485 yield encodeheader(version)
486 for marker in markers:
486 for marker in markers:
487 yield encodeone(marker)
487 yield encodeone(marker)
488
488
489 @util.nogc
489 @util.nogc
490 def _addsuccessors(successors, markers):
490 def _addsuccessors(successors, markers):
491 for mark in markers:
491 for mark in markers:
492 successors.setdefault(mark[0], set()).add(mark)
492 successors.setdefault(mark[0], set()).add(mark)
493
493
494 @util.nogc
494 @util.nogc
495 def _addpredecessors(predecessors, markers):
495 def _addpredecessors(predecessors, markers):
496 for mark in markers:
496 for mark in markers:
497 for suc in mark[1]:
497 for suc in mark[1]:
498 predecessors.setdefault(suc, set()).add(mark)
498 predecessors.setdefault(suc, set()).add(mark)
499
499
500 @util.nogc
500 @util.nogc
501 def _addchildren(children, markers):
501 def _addchildren(children, markers):
502 for mark in markers:
502 for mark in markers:
503 parents = mark[5]
503 parents = mark[5]
504 if parents is not None:
504 if parents is not None:
505 for p in parents:
505 for p in parents:
506 children.setdefault(p, set()).add(mark)
506 children.setdefault(p, set()).add(mark)
507
507
508 def _checkinvalidmarkers(markers):
508 def _checkinvalidmarkers(markers):
509 """search for marker with invalid data and raise error if needed
509 """search for marker with invalid data and raise error if needed
510
510
511 Exist as a separated function to allow the evolve extension for a more
511 Exist as a separated function to allow the evolve extension for a more
512 subtle handling.
512 subtle handling.
513 """
513 """
514 for mark in markers:
514 for mark in markers:
515 if node.nullid in mark[1]:
515 if node.nullid in mark[1]:
516 raise error.Abort(_('bad obsolescence marker detected: '
516 raise error.Abort(_('bad obsolescence marker detected: '
517 'invalid successors nullid'))
517 'invalid successors nullid'))
518
518
519 class obsstore(object):
519 class obsstore(object):
520 """Store obsolete markers
520 """Store obsolete markers
521
521
522 Markers can be accessed with two mappings:
522 Markers can be accessed with two mappings:
523 - predecessors[x] -> set(markers on predecessors edges of x)
523 - predecessors[x] -> set(markers on predecessors edges of x)
524 - successors[x] -> set(markers on successors edges of x)
524 - successors[x] -> set(markers on successors edges of x)
525 - children[x] -> set(markers on predecessors edges of children(x)
525 - children[x] -> set(markers on predecessors edges of children(x)
526 """
526 """
527
527
528 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
528 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
529 # prec: nodeid, predecessors changesets
529 # prec: nodeid, predecessors changesets
530 # succs: tuple of nodeid, successor changesets (0-N length)
530 # succs: tuple of nodeid, successor changesets (0-N length)
531 # flag: integer, flag field carrying modifier for the markers (see doc)
531 # flag: integer, flag field carrying modifier for the markers (see doc)
532 # meta: binary blob in UTF-8, encoded metadata dictionary
532 # meta: binary blob in UTF-8, encoded metadata dictionary
533 # date: (float, int) tuple, date of marker creation
533 # date: (float, int) tuple, date of marker creation
534 # parents: (tuple of nodeid) or None, parents of predecessors
534 # parents: (tuple of nodeid) or None, parents of predecessors
535 # None is used when no data has been recorded
535 # None is used when no data has been recorded
536
536
537 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
537 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
538 # caches for various obsolescence related cache
538 # caches for various obsolescence related cache
539 self.caches = {}
539 self.caches = {}
540 self.svfs = svfs
540 self.svfs = svfs
541 self._defaultformat = defaultformat
541 self._defaultformat = defaultformat
542 self._readonly = readonly
542 self._readonly = readonly
543
543
544 def __iter__(self):
544 def __iter__(self):
545 return iter(self._all)
545 return iter(self._all)
546
546
547 def __len__(self):
547 def __len__(self):
548 return len(self._all)
548 return len(self._all)
549
549
550 def __nonzero__(self):
550 def __nonzero__(self):
551 if not self._cached(r'_all'):
551 if not self._cached(r'_all'):
552 try:
552 try:
553 return self.svfs.stat('obsstore').st_size > 1
553 return self.svfs.stat('obsstore').st_size > 1
554 except OSError as inst:
554 except OSError as inst:
555 if inst.errno != errno.ENOENT:
555 if inst.errno != errno.ENOENT:
556 raise
556 raise
557 # just build an empty _all list if no obsstore exists, which
557 # just build an empty _all list if no obsstore exists, which
558 # avoids further stat() syscalls
558 # avoids further stat() syscalls
559 return bool(self._all)
559 return bool(self._all)
560
560
561 __bool__ = __nonzero__
561 __bool__ = __nonzero__
562
562
563 @property
563 @property
564 def readonly(self):
564 def readonly(self):
565 """True if marker creation is disabled
565 """True if marker creation is disabled
566
566
567 Remove me in the future when obsolete marker is always on."""
567 Remove me in the future when obsolete marker is always on."""
568 return self._readonly
568 return self._readonly
569
569
570 def create(self, transaction, prec, succs=(), flag=0, parents=None,
570 def create(self, transaction, prec, succs=(), flag=0, parents=None,
571 date=None, metadata=None, ui=None):
571 date=None, metadata=None, ui=None):
572 """obsolete: add a new obsolete marker
572 """obsolete: add a new obsolete marker
573
573
574 * ensuring it is hashable
574 * ensuring it is hashable
575 * check mandatory metadata
575 * check mandatory metadata
576 * encode metadata
576 * encode metadata
577
577
578 If you are a human writing code creating marker you want to use the
578 If you are a human writing code creating marker you want to use the
579 `createmarkers` function in this module instead.
579 `createmarkers` function in this module instead.
580
580
581 return True if a new marker have been added, False if the markers
581 return True if a new marker have been added, False if the markers
582 already existed (no op).
582 already existed (no op).
583 """
583 """
584 if metadata is None:
584 if metadata is None:
585 metadata = {}
585 metadata = {}
586 if date is None:
586 if date is None:
587 if 'date' in metadata:
587 if 'date' in metadata:
588 # as a courtesy for out-of-tree extensions
588 # as a courtesy for out-of-tree extensions
589 date = dateutil.parsedate(metadata.pop('date'))
589 date = dateutil.parsedate(metadata.pop('date'))
590 elif ui is not None:
590 elif ui is not None:
591 date = ui.configdate('devel', 'default-date')
591 date = ui.configdate('devel', 'default-date')
592 if date is None:
592 if date is None:
593 date = dateutil.makedate()
593 date = dateutil.makedate()
594 else:
594 else:
595 date = dateutil.makedate()
595 date = dateutil.makedate()
596 if len(prec) != 20:
596 if len(prec) != 20:
597 raise ValueError(prec)
597 raise ValueError(prec)
598 for succ in succs:
598 for succ in succs:
599 if len(succ) != 20:
599 if len(succ) != 20:
600 raise ValueError(succ)
600 raise ValueError(succ)
601 if prec in succs:
601 if prec in succs:
602 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
602 raise ValueError(
603 pycompat.sysstr(_('in-marker cycle with %s') % node.hex(prec)))
603
604
604 metadata = tuple(sorted(metadata.iteritems()))
605 metadata = tuple(sorted(metadata.iteritems()))
605 for k, v in metadata:
606 for k, v in metadata:
606 try:
607 try:
607 # might be better to reject non-ASCII keys
608 # might be better to reject non-ASCII keys
608 k.decode('utf-8')
609 k.decode('utf-8')
609 v.decode('utf-8')
610 v.decode('utf-8')
610 except UnicodeDecodeError:
611 except UnicodeDecodeError:
611 raise error.ProgrammingError(
612 raise error.ProgrammingError(
612 'obsstore metadata must be valid UTF-8 sequence '
613 'obsstore metadata must be valid UTF-8 sequence '
613 '(key = %r, value = %r)'
614 '(key = %r, value = %r)'
614 % (pycompat.bytestr(k), pycompat.bytestr(v)))
615 % (pycompat.bytestr(k), pycompat.bytestr(v)))
615
616
616 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
617 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
617 return bool(self.add(transaction, [marker]))
618 return bool(self.add(transaction, [marker]))
618
619
619 def add(self, transaction, markers):
620 def add(self, transaction, markers):
620 """Add new markers to the store
621 """Add new markers to the store
621
622
622 Take care of filtering duplicate.
623 Take care of filtering duplicate.
623 Return the number of new marker."""
624 Return the number of new marker."""
624 if self._readonly:
625 if self._readonly:
625 raise error.Abort(_('creating obsolete markers is not enabled on '
626 raise error.Abort(_('creating obsolete markers is not enabled on '
626 'this repo'))
627 'this repo'))
627 known = set()
628 known = set()
628 getsuccessors = self.successors.get
629 getsuccessors = self.successors.get
629 new = []
630 new = []
630 for m in markers:
631 for m in markers:
631 if m not in getsuccessors(m[0], ()) and m not in known:
632 if m not in getsuccessors(m[0], ()) and m not in known:
632 known.add(m)
633 known.add(m)
633 new.append(m)
634 new.append(m)
634 if new:
635 if new:
635 f = self.svfs('obsstore', 'ab')
636 f = self.svfs('obsstore', 'ab')
636 try:
637 try:
637 offset = f.tell()
638 offset = f.tell()
638 transaction.add('obsstore', offset)
639 transaction.add('obsstore', offset)
639 # offset == 0: new file - add the version header
640 # offset == 0: new file - add the version header
640 data = b''.join(encodemarkers(new, offset == 0, self._version))
641 data = b''.join(encodemarkers(new, offset == 0, self._version))
641 f.write(data)
642 f.write(data)
642 finally:
643 finally:
643 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
644 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
644 # call 'filecacheentry.refresh()' here
645 # call 'filecacheentry.refresh()' here
645 f.close()
646 f.close()
646 addedmarkers = transaction.changes.get('obsmarkers')
647 addedmarkers = transaction.changes.get('obsmarkers')
647 if addedmarkers is not None:
648 if addedmarkers is not None:
648 addedmarkers.update(new)
649 addedmarkers.update(new)
649 self._addmarkers(new, data)
650 self._addmarkers(new, data)
650 # new marker *may* have changed several set. invalidate the cache.
651 # new marker *may* have changed several set. invalidate the cache.
651 self.caches.clear()
652 self.caches.clear()
652 # records the number of new markers for the transaction hooks
653 # records the number of new markers for the transaction hooks
653 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
654 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
654 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
655 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
655 return len(new)
656 return len(new)
656
657
657 def mergemarkers(self, transaction, data):
658 def mergemarkers(self, transaction, data):
658 """merge a binary stream of markers inside the obsstore
659 """merge a binary stream of markers inside the obsstore
659
660
660 Returns the number of new markers added."""
661 Returns the number of new markers added."""
661 version, markers = _readmarkers(data)
662 version, markers = _readmarkers(data)
662 return self.add(transaction, markers)
663 return self.add(transaction, markers)
663
664
664 @propertycache
665 @propertycache
665 def _data(self):
666 def _data(self):
666 return self.svfs.tryread('obsstore')
667 return self.svfs.tryread('obsstore')
667
668
668 @propertycache
669 @propertycache
669 def _version(self):
670 def _version(self):
670 if len(self._data) >= 1:
671 if len(self._data) >= 1:
671 return _readmarkerversion(self._data)
672 return _readmarkerversion(self._data)
672 else:
673 else:
673 return self._defaultformat
674 return self._defaultformat
674
675
675 @propertycache
676 @propertycache
676 def _all(self):
677 def _all(self):
677 data = self._data
678 data = self._data
678 if not data:
679 if not data:
679 return []
680 return []
680 self._version, markers = _readmarkers(data)
681 self._version, markers = _readmarkers(data)
681 markers = list(markers)
682 markers = list(markers)
682 _checkinvalidmarkers(markers)
683 _checkinvalidmarkers(markers)
683 return markers
684 return markers
684
685
685 @propertycache
686 @propertycache
686 def successors(self):
687 def successors(self):
687 successors = {}
688 successors = {}
688 _addsuccessors(successors, self._all)
689 _addsuccessors(successors, self._all)
689 return successors
690 return successors
690
691
691 @propertycache
692 @propertycache
692 def predecessors(self):
693 def predecessors(self):
693 predecessors = {}
694 predecessors = {}
694 _addpredecessors(predecessors, self._all)
695 _addpredecessors(predecessors, self._all)
695 return predecessors
696 return predecessors
696
697
697 @propertycache
698 @propertycache
698 def children(self):
699 def children(self):
699 children = {}
700 children = {}
700 _addchildren(children, self._all)
701 _addchildren(children, self._all)
701 return children
702 return children
702
703
703 def _cached(self, attr):
704 def _cached(self, attr):
704 return attr in self.__dict__
705 return attr in self.__dict__
705
706
706 def _addmarkers(self, markers, rawdata):
707 def _addmarkers(self, markers, rawdata):
707 markers = list(markers) # to allow repeated iteration
708 markers = list(markers) # to allow repeated iteration
708 self._data = self._data + rawdata
709 self._data = self._data + rawdata
709 self._all.extend(markers)
710 self._all.extend(markers)
710 if self._cached(r'successors'):
711 if self._cached(r'successors'):
711 _addsuccessors(self.successors, markers)
712 _addsuccessors(self.successors, markers)
712 if self._cached(r'predecessors'):
713 if self._cached(r'predecessors'):
713 _addpredecessors(self.predecessors, markers)
714 _addpredecessors(self.predecessors, markers)
714 if self._cached(r'children'):
715 if self._cached(r'children'):
715 _addchildren(self.children, markers)
716 _addchildren(self.children, markers)
716 _checkinvalidmarkers(markers)
717 _checkinvalidmarkers(markers)
717
718
718 def relevantmarkers(self, nodes):
719 def relevantmarkers(self, nodes):
719 """return a set of all obsolescence markers relevant to a set of nodes.
720 """return a set of all obsolescence markers relevant to a set of nodes.
720
721
721 "relevant" to a set of nodes mean:
722 "relevant" to a set of nodes mean:
722
723
723 - marker that use this changeset as successor
724 - marker that use this changeset as successor
724 - prune marker of direct children on this changeset
725 - prune marker of direct children on this changeset
725 - recursive application of the two rules on predecessors of these
726 - recursive application of the two rules on predecessors of these
726 markers
727 markers
727
728
728 It is a set so you cannot rely on order."""
729 It is a set so you cannot rely on order."""
729
730
730 pendingnodes = set(nodes)
731 pendingnodes = set(nodes)
731 seenmarkers = set()
732 seenmarkers = set()
732 seennodes = set(pendingnodes)
733 seennodes = set(pendingnodes)
733 precursorsmarkers = self.predecessors
734 precursorsmarkers = self.predecessors
734 succsmarkers = self.successors
735 succsmarkers = self.successors
735 children = self.children
736 children = self.children
736 while pendingnodes:
737 while pendingnodes:
737 direct = set()
738 direct = set()
738 for current in pendingnodes:
739 for current in pendingnodes:
739 direct.update(precursorsmarkers.get(current, ()))
740 direct.update(precursorsmarkers.get(current, ()))
740 pruned = [m for m in children.get(current, ()) if not m[1]]
741 pruned = [m for m in children.get(current, ()) if not m[1]]
741 direct.update(pruned)
742 direct.update(pruned)
742 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
743 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
743 direct.update(pruned)
744 direct.update(pruned)
744 direct -= seenmarkers
745 direct -= seenmarkers
745 pendingnodes = set([m[0] for m in direct])
746 pendingnodes = set([m[0] for m in direct])
746 seenmarkers |= direct
747 seenmarkers |= direct
747 pendingnodes -= seennodes
748 pendingnodes -= seennodes
748 seennodes |= pendingnodes
749 seennodes |= pendingnodes
749 return seenmarkers
750 return seenmarkers
750
751
751 def makestore(ui, repo):
752 def makestore(ui, repo):
752 """Create an obsstore instance from a repo."""
753 """Create an obsstore instance from a repo."""
753 # read default format for new obsstore.
754 # read default format for new obsstore.
754 # developer config: format.obsstore-version
755 # developer config: format.obsstore-version
755 defaultformat = ui.configint('format', 'obsstore-version')
756 defaultformat = ui.configint('format', 'obsstore-version')
756 # rely on obsstore class default when possible.
757 # rely on obsstore class default when possible.
757 kwargs = {}
758 kwargs = {}
758 if defaultformat is not None:
759 if defaultformat is not None:
759 kwargs[r'defaultformat'] = defaultformat
760 kwargs[r'defaultformat'] = defaultformat
760 readonly = not isenabled(repo, createmarkersopt)
761 readonly = not isenabled(repo, createmarkersopt)
761 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
762 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
762 if store and readonly:
763 if store and readonly:
763 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
764 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
764 % len(list(store)))
765 % len(list(store)))
765 return store
766 return store
766
767
767 def commonversion(versions):
768 def commonversion(versions):
768 """Return the newest version listed in both versions and our local formats.
769 """Return the newest version listed in both versions and our local formats.
769
770
770 Returns None if no common version exists.
771 Returns None if no common version exists.
771 """
772 """
772 versions.sort(reverse=True)
773 versions.sort(reverse=True)
773 # search for highest version known on both side
774 # search for highest version known on both side
774 for v in versions:
775 for v in versions:
775 if v in formats:
776 if v in formats:
776 return v
777 return v
777 return None
778 return None
778
779
779 # arbitrary picked to fit into 8K limit from HTTP server
780 # arbitrary picked to fit into 8K limit from HTTP server
780 # you have to take in account:
781 # you have to take in account:
781 # - the version header
782 # - the version header
782 # - the base85 encoding
783 # - the base85 encoding
783 _maxpayload = 5300
784 _maxpayload = 5300
784
785
785 def _pushkeyescape(markers):
786 def _pushkeyescape(markers):
786 """encode markers into a dict suitable for pushkey exchange
787 """encode markers into a dict suitable for pushkey exchange
787
788
788 - binary data is base85 encoded
789 - binary data is base85 encoded
789 - split in chunks smaller than 5300 bytes"""
790 - split in chunks smaller than 5300 bytes"""
790 keys = {}
791 keys = {}
791 parts = []
792 parts = []
792 currentlen = _maxpayload * 2 # ensure we create a new part
793 currentlen = _maxpayload * 2 # ensure we create a new part
793 for marker in markers:
794 for marker in markers:
794 nextdata = _fm0encodeonemarker(marker)
795 nextdata = _fm0encodeonemarker(marker)
795 if (len(nextdata) + currentlen > _maxpayload):
796 if (len(nextdata) + currentlen > _maxpayload):
796 currentpart = []
797 currentpart = []
797 currentlen = 0
798 currentlen = 0
798 parts.append(currentpart)
799 parts.append(currentpart)
799 currentpart.append(nextdata)
800 currentpart.append(nextdata)
800 currentlen += len(nextdata)
801 currentlen += len(nextdata)
801 for idx, part in enumerate(reversed(parts)):
802 for idx, part in enumerate(reversed(parts)):
802 data = ''.join([_pack('>B', _fm0version)] + part)
803 data = ''.join([_pack('>B', _fm0version)] + part)
803 keys['dump%i' % idx] = util.b85encode(data)
804 keys['dump%i' % idx] = util.b85encode(data)
804 return keys
805 return keys
805
806
806 def listmarkers(repo):
807 def listmarkers(repo):
807 """List markers over pushkey"""
808 """List markers over pushkey"""
808 if not repo.obsstore:
809 if not repo.obsstore:
809 return {}
810 return {}
810 return _pushkeyescape(sorted(repo.obsstore))
811 return _pushkeyescape(sorted(repo.obsstore))
811
812
812 def pushmarker(repo, key, old, new):
813 def pushmarker(repo, key, old, new):
813 """Push markers over pushkey"""
814 """Push markers over pushkey"""
814 if not key.startswith('dump'):
815 if not key.startswith('dump'):
815 repo.ui.warn(_('unknown key: %r') % key)
816 repo.ui.warn(_('unknown key: %r') % key)
816 return False
817 return False
817 if old:
818 if old:
818 repo.ui.warn(_('unexpected old value for %r') % key)
819 repo.ui.warn(_('unexpected old value for %r') % key)
819 return False
820 return False
820 data = util.b85decode(new)
821 data = util.b85decode(new)
821 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
822 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
822 repo.obsstore.mergemarkers(tr, data)
823 repo.obsstore.mergemarkers(tr, data)
823 repo.invalidatevolatilesets()
824 repo.invalidatevolatilesets()
824 return True
825 return True
825
826
826 # mapping of 'set-name' -> <function to compute this set>
827 # mapping of 'set-name' -> <function to compute this set>
827 cachefuncs = {}
828 cachefuncs = {}
828 def cachefor(name):
829 def cachefor(name):
829 """Decorator to register a function as computing the cache for a set"""
830 """Decorator to register a function as computing the cache for a set"""
830 def decorator(func):
831 def decorator(func):
831 if name in cachefuncs:
832 if name in cachefuncs:
832 msg = "duplicated registration for volatileset '%s' (existing: %r)"
833 msg = "duplicated registration for volatileset '%s' (existing: %r)"
833 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
834 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
834 cachefuncs[name] = func
835 cachefuncs[name] = func
835 return func
836 return func
836 return decorator
837 return decorator
837
838
838 def getrevs(repo, name):
839 def getrevs(repo, name):
839 """Return the set of revision that belong to the <name> set
840 """Return the set of revision that belong to the <name> set
840
841
841 Such access may compute the set and cache it for future use"""
842 Such access may compute the set and cache it for future use"""
842 repo = repo.unfiltered()
843 repo = repo.unfiltered()
843 if not repo.obsstore:
844 if not repo.obsstore:
844 return frozenset()
845 return frozenset()
845 if name not in repo.obsstore.caches:
846 if name not in repo.obsstore.caches:
846 repo.obsstore.caches[name] = cachefuncs[name](repo)
847 repo.obsstore.caches[name] = cachefuncs[name](repo)
847 return repo.obsstore.caches[name]
848 return repo.obsstore.caches[name]
848
849
849 # To be simple we need to invalidate obsolescence cache when:
850 # To be simple we need to invalidate obsolescence cache when:
850 #
851 #
851 # - new changeset is added:
852 # - new changeset is added:
852 # - public phase is changed
853 # - public phase is changed
853 # - obsolescence marker are added
854 # - obsolescence marker are added
854 # - strip is used a repo
855 # - strip is used a repo
855 def clearobscaches(repo):
856 def clearobscaches(repo):
856 """Remove all obsolescence related cache from a repo
857 """Remove all obsolescence related cache from a repo
857
858
858 This remove all cache in obsstore is the obsstore already exist on the
859 This remove all cache in obsstore is the obsstore already exist on the
859 repo.
860 repo.
860
861
861 (We could be smarter here given the exact event that trigger the cache
862 (We could be smarter here given the exact event that trigger the cache
862 clearing)"""
863 clearing)"""
863 # only clear cache is there is obsstore data in this repo
864 # only clear cache is there is obsstore data in this repo
864 if 'obsstore' in repo._filecache:
865 if 'obsstore' in repo._filecache:
865 repo.obsstore.caches.clear()
866 repo.obsstore.caches.clear()
866
867
867 def _mutablerevs(repo):
868 def _mutablerevs(repo):
868 """the set of mutable revision in the repository"""
869 """the set of mutable revision in the repository"""
869 return repo._phasecache.getrevset(repo, phases.mutablephases)
870 return repo._phasecache.getrevset(repo, phases.mutablephases)
870
871
871 @cachefor('obsolete')
872 @cachefor('obsolete')
872 def _computeobsoleteset(repo):
873 def _computeobsoleteset(repo):
873 """the set of obsolete revisions"""
874 """the set of obsolete revisions"""
874 getnode = repo.changelog.node
875 getnode = repo.changelog.node
875 notpublic = _mutablerevs(repo)
876 notpublic = _mutablerevs(repo)
876 isobs = repo.obsstore.successors.__contains__
877 isobs = repo.obsstore.successors.__contains__
877 obs = set(r for r in notpublic if isobs(getnode(r)))
878 obs = set(r for r in notpublic if isobs(getnode(r)))
878 return obs
879 return obs
879
880
880 @cachefor('orphan')
881 @cachefor('orphan')
881 def _computeorphanset(repo):
882 def _computeorphanset(repo):
882 """the set of non obsolete revisions with obsolete parents"""
883 """the set of non obsolete revisions with obsolete parents"""
883 pfunc = repo.changelog.parentrevs
884 pfunc = repo.changelog.parentrevs
884 mutable = _mutablerevs(repo)
885 mutable = _mutablerevs(repo)
885 obsolete = getrevs(repo, 'obsolete')
886 obsolete = getrevs(repo, 'obsolete')
886 others = mutable - obsolete
887 others = mutable - obsolete
887 unstable = set()
888 unstable = set()
888 for r in sorted(others):
889 for r in sorted(others):
889 # A rev is unstable if one of its parent is obsolete or unstable
890 # A rev is unstable if one of its parent is obsolete or unstable
890 # this works since we traverse following growing rev order
891 # this works since we traverse following growing rev order
891 for p in pfunc(r):
892 for p in pfunc(r):
892 if p in obsolete or p in unstable:
893 if p in obsolete or p in unstable:
893 unstable.add(r)
894 unstable.add(r)
894 break
895 break
895 return unstable
896 return unstable
896
897
897 @cachefor('suspended')
898 @cachefor('suspended')
898 def _computesuspendedset(repo):
899 def _computesuspendedset(repo):
899 """the set of obsolete parents with non obsolete descendants"""
900 """the set of obsolete parents with non obsolete descendants"""
900 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
901 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
901 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
902 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
902
903
903 @cachefor('extinct')
904 @cachefor('extinct')
904 def _computeextinctset(repo):
905 def _computeextinctset(repo):
905 """the set of obsolete parents without non obsolete descendants"""
906 """the set of obsolete parents without non obsolete descendants"""
906 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
907 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
907
908
908 @cachefor('phasedivergent')
909 @cachefor('phasedivergent')
909 def _computephasedivergentset(repo):
910 def _computephasedivergentset(repo):
910 """the set of revs trying to obsolete public revisions"""
911 """the set of revs trying to obsolete public revisions"""
911 bumped = set()
912 bumped = set()
912 # util function (avoid attribute lookup in the loop)
913 # util function (avoid attribute lookup in the loop)
913 phase = repo._phasecache.phase # would be faster to grab the full list
914 phase = repo._phasecache.phase # would be faster to grab the full list
914 public = phases.public
915 public = phases.public
915 cl = repo.changelog
916 cl = repo.changelog
916 torev = cl.nodemap.get
917 torev = cl.nodemap.get
917 tonode = cl.node
918 tonode = cl.node
918 for rev in repo.revs('(not public()) and (not obsolete())'):
919 for rev in repo.revs('(not public()) and (not obsolete())'):
919 # We only evaluate mutable, non-obsolete revision
920 # We only evaluate mutable, non-obsolete revision
920 node = tonode(rev)
921 node = tonode(rev)
921 # (future) A cache of predecessors may worth if split is very common
922 # (future) A cache of predecessors may worth if split is very common
922 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
923 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
923 ignoreflags=bumpedfix):
924 ignoreflags=bumpedfix):
924 prev = torev(pnode) # unfiltered! but so is phasecache
925 prev = torev(pnode) # unfiltered! but so is phasecache
925 if (prev is not None) and (phase(repo, prev) <= public):
926 if (prev is not None) and (phase(repo, prev) <= public):
926 # we have a public predecessor
927 # we have a public predecessor
927 bumped.add(rev)
928 bumped.add(rev)
928 break # Next draft!
929 break # Next draft!
929 return bumped
930 return bumped
930
931
931 @cachefor('contentdivergent')
932 @cachefor('contentdivergent')
932 def _computecontentdivergentset(repo):
933 def _computecontentdivergentset(repo):
933 """the set of rev that compete to be the final successors of some revision.
934 """the set of rev that compete to be the final successors of some revision.
934 """
935 """
935 divergent = set()
936 divergent = set()
936 obsstore = repo.obsstore
937 obsstore = repo.obsstore
937 newermap = {}
938 newermap = {}
938 tonode = repo.changelog.node
939 tonode = repo.changelog.node
939 for rev in repo.revs('(not public()) - obsolete()'):
940 for rev in repo.revs('(not public()) - obsolete()'):
940 node = tonode(rev)
941 node = tonode(rev)
941 mark = obsstore.predecessors.get(node, ())
942 mark = obsstore.predecessors.get(node, ())
942 toprocess = set(mark)
943 toprocess = set(mark)
943 seen = set()
944 seen = set()
944 while toprocess:
945 while toprocess:
945 prec = toprocess.pop()[0]
946 prec = toprocess.pop()[0]
946 if prec in seen:
947 if prec in seen:
947 continue # emergency cycle hanging prevention
948 continue # emergency cycle hanging prevention
948 seen.add(prec)
949 seen.add(prec)
949 if prec not in newermap:
950 if prec not in newermap:
950 obsutil.successorssets(repo, prec, cache=newermap)
951 obsutil.successorssets(repo, prec, cache=newermap)
951 newer = [n for n in newermap[prec] if n]
952 newer = [n for n in newermap[prec] if n]
952 if len(newer) > 1:
953 if len(newer) > 1:
953 divergent.add(rev)
954 divergent.add(rev)
954 break
955 break
955 toprocess.update(obsstore.predecessors.get(prec, ()))
956 toprocess.update(obsstore.predecessors.get(prec, ()))
956 return divergent
957 return divergent
957
958
958 def makefoldid(relation, user):
959 def makefoldid(relation, user):
959
960
960 folddigest = hashlib.sha1(user)
961 folddigest = hashlib.sha1(user)
961 for p in relation[0] + relation[1]:
962 for p in relation[0] + relation[1]:
962 folddigest.update('%d' % p.rev())
963 folddigest.update('%d' % p.rev())
963 folddigest.update(p.node())
964 folddigest.update(p.node())
964 # Since fold only has to compete against fold for the same successors, it
965 # Since fold only has to compete against fold for the same successors, it
965 # seems fine to use a small ID. Smaller ID save space.
966 # seems fine to use a small ID. Smaller ID save space.
966 return node.hex(folddigest.digest())[:8]
967 return node.hex(folddigest.digest())[:8]
967
968
968 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
969 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
969 operation=None):
970 operation=None):
970 """Add obsolete markers between changesets in a repo
971 """Add obsolete markers between changesets in a repo
971
972
972 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
973 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
973 tuple. `old` and `news` are changectx. metadata is an optional dictionary
974 tuple. `old` and `news` are changectx. metadata is an optional dictionary
974 containing metadata for this marker only. It is merged with the global
975 containing metadata for this marker only. It is merged with the global
975 metadata specified through the `metadata` argument of this function.
976 metadata specified through the `metadata` argument of this function.
976 Any string values in metadata must be UTF-8 bytes.
977 Any string values in metadata must be UTF-8 bytes.
977
978
978 Trying to obsolete a public changeset will raise an exception.
979 Trying to obsolete a public changeset will raise an exception.
979
980
980 Current user and date are used except if specified otherwise in the
981 Current user and date are used except if specified otherwise in the
981 metadata attribute.
982 metadata attribute.
982
983
983 This function operates within a transaction of its own, but does
984 This function operates within a transaction of its own, but does
984 not take any lock on the repo.
985 not take any lock on the repo.
985 """
986 """
986 # prepare metadata
987 # prepare metadata
987 if metadata is None:
988 if metadata is None:
988 metadata = {}
989 metadata = {}
989 if 'user' not in metadata:
990 if 'user' not in metadata:
990 luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username()
991 luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username()
991 metadata['user'] = encoding.fromlocal(luser)
992 metadata['user'] = encoding.fromlocal(luser)
992
993
993 # Operation metadata handling
994 # Operation metadata handling
994 useoperation = repo.ui.configbool('experimental',
995 useoperation = repo.ui.configbool('experimental',
995 'evolution.track-operation')
996 'evolution.track-operation')
996 if useoperation and operation:
997 if useoperation and operation:
997 metadata['operation'] = operation
998 metadata['operation'] = operation
998
999
999 # Effect flag metadata handling
1000 # Effect flag metadata handling
1000 saveeffectflag = repo.ui.configbool('experimental',
1001 saveeffectflag = repo.ui.configbool('experimental',
1001 'evolution.effect-flags')
1002 'evolution.effect-flags')
1002
1003
1003 with repo.transaction('add-obsolescence-marker') as tr:
1004 with repo.transaction('add-obsolescence-marker') as tr:
1004 markerargs = []
1005 markerargs = []
1005 for rel in relations:
1006 for rel in relations:
1006 predecessors = rel[0]
1007 predecessors = rel[0]
1007 if not isinstance(predecessors, tuple):
1008 if not isinstance(predecessors, tuple):
1008 # preserve compat with old API until all caller are migrated
1009 # preserve compat with old API until all caller are migrated
1009 predecessors = (predecessors,)
1010 predecessors = (predecessors,)
1010 if len(predecessors) > 1 and len(rel[1]) != 1:
1011 if len(predecessors) > 1 and len(rel[1]) != 1:
1011 msg = 'Fold markers can only have 1 successors, not %d'
1012 msg = 'Fold markers can only have 1 successors, not %d'
1012 raise error.ProgrammingError(msg % len(rel[1]))
1013 raise error.ProgrammingError(msg % len(rel[1]))
1013 foldid = None
1014 foldid = None
1014 foldsize = len(predecessors)
1015 foldsize = len(predecessors)
1015 if 1 < foldsize:
1016 if 1 < foldsize:
1016 foldid = makefoldid(rel, metadata['user'])
1017 foldid = makefoldid(rel, metadata['user'])
1017 for foldidx, prec in enumerate(predecessors, 1):
1018 for foldidx, prec in enumerate(predecessors, 1):
1018 sucs = rel[1]
1019 sucs = rel[1]
1019 localmetadata = metadata.copy()
1020 localmetadata = metadata.copy()
1020 if len(rel) > 2:
1021 if len(rel) > 2:
1021 localmetadata.update(rel[2])
1022 localmetadata.update(rel[2])
1022 if foldid is not None:
1023 if foldid is not None:
1023 localmetadata['fold-id'] = foldid
1024 localmetadata['fold-id'] = foldid
1024 localmetadata['fold-idx'] = '%d' % foldidx
1025 localmetadata['fold-idx'] = '%d' % foldidx
1025 localmetadata['fold-size'] = '%d' % foldsize
1026 localmetadata['fold-size'] = '%d' % foldsize
1026
1027
1027 if not prec.mutable():
1028 if not prec.mutable():
1028 raise error.Abort(_("cannot obsolete public changeset: %s")
1029 raise error.Abort(_("cannot obsolete public changeset: %s")
1029 % prec,
1030 % prec,
1030 hint="see 'hg help phases' for details")
1031 hint="see 'hg help phases' for details")
1031 nprec = prec.node()
1032 nprec = prec.node()
1032 nsucs = tuple(s.node() for s in sucs)
1033 nsucs = tuple(s.node() for s in sucs)
1033 npare = None
1034 npare = None
1034 if not nsucs:
1035 if not nsucs:
1035 npare = tuple(p.node() for p in prec.parents())
1036 npare = tuple(p.node() for p in prec.parents())
1036 if nprec in nsucs:
1037 if nprec in nsucs:
1037 raise error.Abort(_("changeset %s cannot obsolete itself")
1038 raise error.Abort(_("changeset %s cannot obsolete itself")
1038 % prec)
1039 % prec)
1039
1040
1040 # Effect flag can be different by relation
1041 # Effect flag can be different by relation
1041 if saveeffectflag:
1042 if saveeffectflag:
1042 # The effect flag is saved in a versioned field name for
1043 # The effect flag is saved in a versioned field name for
1043 # future evolution
1044 # future evolution
1044 effectflag = obsutil.geteffectflag(prec, sucs)
1045 effectflag = obsutil.geteffectflag(prec, sucs)
1045 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1046 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1046
1047
1047 # Creating the marker causes the hidden cache to become
1048 # Creating the marker causes the hidden cache to become
1048 # invalid, which causes recomputation when we ask for
1049 # invalid, which causes recomputation when we ask for
1049 # prec.parents() above. Resulting in n^2 behavior. So let's
1050 # prec.parents() above. Resulting in n^2 behavior. So let's
1050 # prepare all of the args first, then create the markers.
1051 # prepare all of the args first, then create the markers.
1051 markerargs.append((nprec, nsucs, npare, localmetadata))
1052 markerargs.append((nprec, nsucs, npare, localmetadata))
1052
1053
1053 for args in markerargs:
1054 for args in markerargs:
1054 nprec, nsucs, npare, localmetadata = args
1055 nprec, nsucs, npare, localmetadata = args
1055 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1056 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1056 date=date, metadata=localmetadata,
1057 date=date, metadata=localmetadata,
1057 ui=repo.ui)
1058 ui=repo.ui)
1058 repo.filteredrevcache.clear()
1059 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now