##// END OF EJS Templates
obsolete: prefetch the repo.obsstore used in phasedivergence loop...
Boris Feld -
r40498:6e2a2455 default
parent child Browse files
Show More
@@ -1,1059 +1,1060 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import hashlib
73 import hashlib
74 import struct
74 import struct
75
75
76 from .i18n import _
76 from .i18n import _
77 from . import (
77 from . import (
78 encoding,
78 encoding,
79 error,
79 error,
80 node,
80 node,
81 obsutil,
81 obsutil,
82 phases,
82 phases,
83 policy,
83 policy,
84 pycompat,
84 pycompat,
85 util,
85 util,
86 )
86 )
87 from .utils import dateutil
87 from .utils import dateutil
88
88
89 parsers = policy.importmod(r'parsers')
89 parsers = policy.importmod(r'parsers')
90
90
91 _pack = struct.pack
91 _pack = struct.pack
92 _unpack = struct.unpack
92 _unpack = struct.unpack
93 _calcsize = struct.calcsize
93 _calcsize = struct.calcsize
94 propertycache = util.propertycache
94 propertycache = util.propertycache
95
95
96 # the obsolete feature is not mature enough to be enabled by default.
96 # the obsolete feature is not mature enough to be enabled by default.
97 # you have to rely on third party extension extension to enable this.
97 # you have to rely on third party extension extension to enable this.
98 _enabled = False
98 _enabled = False
99
99
100 # Options for obsolescence
100 # Options for obsolescence
101 createmarkersopt = 'createmarkers'
101 createmarkersopt = 'createmarkers'
102 allowunstableopt = 'allowunstable'
102 allowunstableopt = 'allowunstable'
103 exchangeopt = 'exchange'
103 exchangeopt = 'exchange'
104
104
105 def _getoptionvalue(repo, option):
105 def _getoptionvalue(repo, option):
106 """Returns True if the given repository has the given obsolete option
106 """Returns True if the given repository has the given obsolete option
107 enabled.
107 enabled.
108 """
108 """
109 configkey = 'evolution.%s' % option
109 configkey = 'evolution.%s' % option
110 newconfig = repo.ui.configbool('experimental', configkey)
110 newconfig = repo.ui.configbool('experimental', configkey)
111
111
112 # Return the value only if defined
112 # Return the value only if defined
113 if newconfig is not None:
113 if newconfig is not None:
114 return newconfig
114 return newconfig
115
115
116 # Fallback on generic option
116 # Fallback on generic option
117 try:
117 try:
118 return repo.ui.configbool('experimental', 'evolution')
118 return repo.ui.configbool('experimental', 'evolution')
119 except (error.ConfigError, AttributeError):
119 except (error.ConfigError, AttributeError):
120 # Fallback on old-fashion config
120 # Fallback on old-fashion config
121 # inconsistent config: experimental.evolution
121 # inconsistent config: experimental.evolution
122 result = set(repo.ui.configlist('experimental', 'evolution'))
122 result = set(repo.ui.configlist('experimental', 'evolution'))
123
123
124 if 'all' in result:
124 if 'all' in result:
125 return True
125 return True
126
126
127 # For migration purposes, temporarily return true if the config hasn't
127 # For migration purposes, temporarily return true if the config hasn't
128 # been set but _enabled is true.
128 # been set but _enabled is true.
129 if len(result) == 0 and _enabled:
129 if len(result) == 0 and _enabled:
130 return True
130 return True
131
131
132 # Temporary hack for next check
132 # Temporary hack for next check
133 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
133 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
134 if newconfig:
134 if newconfig:
135 result.add('createmarkers')
135 result.add('createmarkers')
136
136
137 return option in result
137 return option in result
138
138
139 def getoptions(repo):
139 def getoptions(repo):
140 """Returns dicts showing state of obsolescence features."""
140 """Returns dicts showing state of obsolescence features."""
141
141
142 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
142 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
143 unstablevalue = _getoptionvalue(repo, allowunstableopt)
143 unstablevalue = _getoptionvalue(repo, allowunstableopt)
144 exchangevalue = _getoptionvalue(repo, exchangeopt)
144 exchangevalue = _getoptionvalue(repo, exchangeopt)
145
145
146 # createmarkers must be enabled if other options are enabled
146 # createmarkers must be enabled if other options are enabled
147 if ((unstablevalue or exchangevalue) and not createmarkersvalue):
147 if ((unstablevalue or exchangevalue) and not createmarkersvalue):
148 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
148 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
149 "if other obsolete options are enabled"))
149 "if other obsolete options are enabled"))
150
150
151 return {
151 return {
152 createmarkersopt: createmarkersvalue,
152 createmarkersopt: createmarkersvalue,
153 allowunstableopt: unstablevalue,
153 allowunstableopt: unstablevalue,
154 exchangeopt: exchangevalue,
154 exchangeopt: exchangevalue,
155 }
155 }
156
156
157 def isenabled(repo, option):
157 def isenabled(repo, option):
158 """Returns True if the given repository has the given obsolete option
158 """Returns True if the given repository has the given obsolete option
159 enabled.
159 enabled.
160 """
160 """
161 return getoptions(repo)[option]
161 return getoptions(repo)[option]
162
162
163 # Creating aliases for marker flags because evolve extension looks for
163 # Creating aliases for marker flags because evolve extension looks for
164 # bumpedfix in obsolete.py
164 # bumpedfix in obsolete.py
165 bumpedfix = obsutil.bumpedfix
165 bumpedfix = obsutil.bumpedfix
166 usingsha256 = obsutil.usingsha256
166 usingsha256 = obsutil.usingsha256
167
167
168 ## Parsing and writing of version "0"
168 ## Parsing and writing of version "0"
169 #
169 #
170 # The header is followed by the markers. Each marker is made of:
170 # The header is followed by the markers. Each marker is made of:
171 #
171 #
172 # - 1 uint8 : number of new changesets "N", can be zero.
172 # - 1 uint8 : number of new changesets "N", can be zero.
173 #
173 #
174 # - 1 uint32: metadata size "M" in bytes.
174 # - 1 uint32: metadata size "M" in bytes.
175 #
175 #
176 # - 1 byte: a bit field. It is reserved for flags used in common
176 # - 1 byte: a bit field. It is reserved for flags used in common
177 # obsolete marker operations, to avoid repeated decoding of metadata
177 # obsolete marker operations, to avoid repeated decoding of metadata
178 # entries.
178 # entries.
179 #
179 #
180 # - 20 bytes: obsoleted changeset identifier.
180 # - 20 bytes: obsoleted changeset identifier.
181 #
181 #
182 # - N*20 bytes: new changesets identifiers.
182 # - N*20 bytes: new changesets identifiers.
183 #
183 #
184 # - M bytes: metadata as a sequence of nul-terminated strings. Each
184 # - M bytes: metadata as a sequence of nul-terminated strings. Each
185 # string contains a key and a value, separated by a colon ':', without
185 # string contains a key and a value, separated by a colon ':', without
186 # additional encoding. Keys cannot contain '\0' or ':' and values
186 # additional encoding. Keys cannot contain '\0' or ':' and values
187 # cannot contain '\0'.
187 # cannot contain '\0'.
188 _fm0version = 0
188 _fm0version = 0
189 _fm0fixed = '>BIB20s'
189 _fm0fixed = '>BIB20s'
190 _fm0node = '20s'
190 _fm0node = '20s'
191 _fm0fsize = _calcsize(_fm0fixed)
191 _fm0fsize = _calcsize(_fm0fixed)
192 _fm0fnodesize = _calcsize(_fm0node)
192 _fm0fnodesize = _calcsize(_fm0node)
193
193
194 def _fm0readmarkers(data, off, stop):
194 def _fm0readmarkers(data, off, stop):
195 # Loop on markers
195 # Loop on markers
196 while off < stop:
196 while off < stop:
197 # read fixed part
197 # read fixed part
198 cur = data[off:off + _fm0fsize]
198 cur = data[off:off + _fm0fsize]
199 off += _fm0fsize
199 off += _fm0fsize
200 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
200 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
201 # read replacement
201 # read replacement
202 sucs = ()
202 sucs = ()
203 if numsuc:
203 if numsuc:
204 s = (_fm0fnodesize * numsuc)
204 s = (_fm0fnodesize * numsuc)
205 cur = data[off:off + s]
205 cur = data[off:off + s]
206 sucs = _unpack(_fm0node * numsuc, cur)
206 sucs = _unpack(_fm0node * numsuc, cur)
207 off += s
207 off += s
208 # read metadata
208 # read metadata
209 # (metadata will be decoded on demand)
209 # (metadata will be decoded on demand)
210 metadata = data[off:off + mdsize]
210 metadata = data[off:off + mdsize]
211 if len(metadata) != mdsize:
211 if len(metadata) != mdsize:
212 raise error.Abort(_('parsing obsolete marker: metadata is too '
212 raise error.Abort(_('parsing obsolete marker: metadata is too '
213 'short, %d bytes expected, got %d')
213 'short, %d bytes expected, got %d')
214 % (mdsize, len(metadata)))
214 % (mdsize, len(metadata)))
215 off += mdsize
215 off += mdsize
216 metadata = _fm0decodemeta(metadata)
216 metadata = _fm0decodemeta(metadata)
217 try:
217 try:
218 when, offset = metadata.pop('date', '0 0').split(' ')
218 when, offset = metadata.pop('date', '0 0').split(' ')
219 date = float(when), int(offset)
219 date = float(when), int(offset)
220 except ValueError:
220 except ValueError:
221 date = (0., 0)
221 date = (0., 0)
222 parents = None
222 parents = None
223 if 'p2' in metadata:
223 if 'p2' in metadata:
224 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
224 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
225 elif 'p1' in metadata:
225 elif 'p1' in metadata:
226 parents = (metadata.pop('p1', None),)
226 parents = (metadata.pop('p1', None),)
227 elif 'p0' in metadata:
227 elif 'p0' in metadata:
228 parents = ()
228 parents = ()
229 if parents is not None:
229 if parents is not None:
230 try:
230 try:
231 parents = tuple(node.bin(p) for p in parents)
231 parents = tuple(node.bin(p) for p in parents)
232 # if parent content is not a nodeid, drop the data
232 # if parent content is not a nodeid, drop the data
233 for p in parents:
233 for p in parents:
234 if len(p) != 20:
234 if len(p) != 20:
235 parents = None
235 parents = None
236 break
236 break
237 except TypeError:
237 except TypeError:
238 # if content cannot be translated to nodeid drop the data.
238 # if content cannot be translated to nodeid drop the data.
239 parents = None
239 parents = None
240
240
241 metadata = tuple(sorted(metadata.iteritems()))
241 metadata = tuple(sorted(metadata.iteritems()))
242
242
243 yield (pre, sucs, flags, metadata, date, parents)
243 yield (pre, sucs, flags, metadata, date, parents)
244
244
245 def _fm0encodeonemarker(marker):
245 def _fm0encodeonemarker(marker):
246 pre, sucs, flags, metadata, date, parents = marker
246 pre, sucs, flags, metadata, date, parents = marker
247 if flags & usingsha256:
247 if flags & usingsha256:
248 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
248 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
249 metadata = dict(metadata)
249 metadata = dict(metadata)
250 time, tz = date
250 time, tz = date
251 metadata['date'] = '%r %i' % (time, tz)
251 metadata['date'] = '%r %i' % (time, tz)
252 if parents is not None:
252 if parents is not None:
253 if not parents:
253 if not parents:
254 # mark that we explicitly recorded no parents
254 # mark that we explicitly recorded no parents
255 metadata['p0'] = ''
255 metadata['p0'] = ''
256 for i, p in enumerate(parents, 1):
256 for i, p in enumerate(parents, 1):
257 metadata['p%i' % i] = node.hex(p)
257 metadata['p%i' % i] = node.hex(p)
258 metadata = _fm0encodemeta(metadata)
258 metadata = _fm0encodemeta(metadata)
259 numsuc = len(sucs)
259 numsuc = len(sucs)
260 format = _fm0fixed + (_fm0node * numsuc)
260 format = _fm0fixed + (_fm0node * numsuc)
261 data = [numsuc, len(metadata), flags, pre]
261 data = [numsuc, len(metadata), flags, pre]
262 data.extend(sucs)
262 data.extend(sucs)
263 return _pack(format, *data) + metadata
263 return _pack(format, *data) + metadata
264
264
265 def _fm0encodemeta(meta):
265 def _fm0encodemeta(meta):
266 """Return encoded metadata string to string mapping.
266 """Return encoded metadata string to string mapping.
267
267
268 Assume no ':' in key and no '\0' in both key and value."""
268 Assume no ':' in key and no '\0' in both key and value."""
269 for key, value in meta.iteritems():
269 for key, value in meta.iteritems():
270 if ':' in key or '\0' in key:
270 if ':' in key or '\0' in key:
271 raise ValueError("':' and '\0' are forbidden in metadata key'")
271 raise ValueError("':' and '\0' are forbidden in metadata key'")
272 if '\0' in value:
272 if '\0' in value:
273 raise ValueError("':' is forbidden in metadata value'")
273 raise ValueError("':' is forbidden in metadata value'")
274 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
274 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
275
275
276 def _fm0decodemeta(data):
276 def _fm0decodemeta(data):
277 """Return string to string dictionary from encoded version."""
277 """Return string to string dictionary from encoded version."""
278 d = {}
278 d = {}
279 for l in data.split('\0'):
279 for l in data.split('\0'):
280 if l:
280 if l:
281 key, value = l.split(':', 1)
281 key, value = l.split(':', 1)
282 d[key] = value
282 d[key] = value
283 return d
283 return d
284
284
285 ## Parsing and writing of version "1"
285 ## Parsing and writing of version "1"
286 #
286 #
287 # The header is followed by the markers. Each marker is made of:
287 # The header is followed by the markers. Each marker is made of:
288 #
288 #
289 # - uint32: total size of the marker (including this field)
289 # - uint32: total size of the marker (including this field)
290 #
290 #
291 # - float64: date in seconds since epoch
291 # - float64: date in seconds since epoch
292 #
292 #
293 # - int16: timezone offset in minutes
293 # - int16: timezone offset in minutes
294 #
294 #
295 # - uint16: a bit field. It is reserved for flags used in common
295 # - uint16: a bit field. It is reserved for flags used in common
296 # obsolete marker operations, to avoid repeated decoding of metadata
296 # obsolete marker operations, to avoid repeated decoding of metadata
297 # entries.
297 # entries.
298 #
298 #
299 # - uint8: number of successors "N", can be zero.
299 # - uint8: number of successors "N", can be zero.
300 #
300 #
301 # - uint8: number of parents "P", can be zero.
301 # - uint8: number of parents "P", can be zero.
302 #
302 #
303 # 0: parents data stored but no parent,
303 # 0: parents data stored but no parent,
304 # 1: one parent stored,
304 # 1: one parent stored,
305 # 2: two parents stored,
305 # 2: two parents stored,
306 # 3: no parent data stored
306 # 3: no parent data stored
307 #
307 #
308 # - uint8: number of metadata entries M
308 # - uint8: number of metadata entries M
309 #
309 #
310 # - 20 or 32 bytes: predecessor changeset identifier.
310 # - 20 or 32 bytes: predecessor changeset identifier.
311 #
311 #
312 # - N*(20 or 32) bytes: successors changesets identifiers.
312 # - N*(20 or 32) bytes: successors changesets identifiers.
313 #
313 #
314 # - P*(20 or 32) bytes: parents of the predecessors changesets.
314 # - P*(20 or 32) bytes: parents of the predecessors changesets.
315 #
315 #
316 # - M*(uint8, uint8): size of all metadata entries (key and value)
316 # - M*(uint8, uint8): size of all metadata entries (key and value)
317 #
317 #
318 # - remaining bytes: the metadata, each (key, value) pair after the other.
318 # - remaining bytes: the metadata, each (key, value) pair after the other.
319 _fm1version = 1
319 _fm1version = 1
320 _fm1fixed = '>IdhHBBB20s'
320 _fm1fixed = '>IdhHBBB20s'
321 _fm1nodesha1 = '20s'
321 _fm1nodesha1 = '20s'
322 _fm1nodesha256 = '32s'
322 _fm1nodesha256 = '32s'
323 _fm1nodesha1size = _calcsize(_fm1nodesha1)
323 _fm1nodesha1size = _calcsize(_fm1nodesha1)
324 _fm1nodesha256size = _calcsize(_fm1nodesha256)
324 _fm1nodesha256size = _calcsize(_fm1nodesha256)
325 _fm1fsize = _calcsize(_fm1fixed)
325 _fm1fsize = _calcsize(_fm1fixed)
326 _fm1parentnone = 3
326 _fm1parentnone = 3
327 _fm1parentshift = 14
327 _fm1parentshift = 14
328 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
328 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
329 _fm1metapair = 'BB'
329 _fm1metapair = 'BB'
330 _fm1metapairsize = _calcsize(_fm1metapair)
330 _fm1metapairsize = _calcsize(_fm1metapair)
331
331
332 def _fm1purereadmarkers(data, off, stop):
332 def _fm1purereadmarkers(data, off, stop):
333 # make some global constants local for performance
333 # make some global constants local for performance
334 noneflag = _fm1parentnone
334 noneflag = _fm1parentnone
335 sha2flag = usingsha256
335 sha2flag = usingsha256
336 sha1size = _fm1nodesha1size
336 sha1size = _fm1nodesha1size
337 sha2size = _fm1nodesha256size
337 sha2size = _fm1nodesha256size
338 sha1fmt = _fm1nodesha1
338 sha1fmt = _fm1nodesha1
339 sha2fmt = _fm1nodesha256
339 sha2fmt = _fm1nodesha256
340 metasize = _fm1metapairsize
340 metasize = _fm1metapairsize
341 metafmt = _fm1metapair
341 metafmt = _fm1metapair
342 fsize = _fm1fsize
342 fsize = _fm1fsize
343 unpack = _unpack
343 unpack = _unpack
344
344
345 # Loop on markers
345 # Loop on markers
346 ufixed = struct.Struct(_fm1fixed).unpack
346 ufixed = struct.Struct(_fm1fixed).unpack
347
347
348 while off < stop:
348 while off < stop:
349 # read fixed part
349 # read fixed part
350 o1 = off + fsize
350 o1 = off + fsize
351 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
351 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
352
352
353 if flags & sha2flag:
353 if flags & sha2flag:
354 # FIXME: prec was read as a SHA1, needs to be amended
354 # FIXME: prec was read as a SHA1, needs to be amended
355
355
356 # read 0 or more successors
356 # read 0 or more successors
357 if numsuc == 1:
357 if numsuc == 1:
358 o2 = o1 + sha2size
358 o2 = o1 + sha2size
359 sucs = (data[o1:o2],)
359 sucs = (data[o1:o2],)
360 else:
360 else:
361 o2 = o1 + sha2size * numsuc
361 o2 = o1 + sha2size * numsuc
362 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
362 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
363
363
364 # read parents
364 # read parents
365 if numpar == noneflag:
365 if numpar == noneflag:
366 o3 = o2
366 o3 = o2
367 parents = None
367 parents = None
368 elif numpar == 1:
368 elif numpar == 1:
369 o3 = o2 + sha2size
369 o3 = o2 + sha2size
370 parents = (data[o2:o3],)
370 parents = (data[o2:o3],)
371 else:
371 else:
372 o3 = o2 + sha2size * numpar
372 o3 = o2 + sha2size * numpar
373 parents = unpack(sha2fmt * numpar, data[o2:o3])
373 parents = unpack(sha2fmt * numpar, data[o2:o3])
374 else:
374 else:
375 # read 0 or more successors
375 # read 0 or more successors
376 if numsuc == 1:
376 if numsuc == 1:
377 o2 = o1 + sha1size
377 o2 = o1 + sha1size
378 sucs = (data[o1:o2],)
378 sucs = (data[o1:o2],)
379 else:
379 else:
380 o2 = o1 + sha1size * numsuc
380 o2 = o1 + sha1size * numsuc
381 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
381 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
382
382
383 # read parents
383 # read parents
384 if numpar == noneflag:
384 if numpar == noneflag:
385 o3 = o2
385 o3 = o2
386 parents = None
386 parents = None
387 elif numpar == 1:
387 elif numpar == 1:
388 o3 = o2 + sha1size
388 o3 = o2 + sha1size
389 parents = (data[o2:o3],)
389 parents = (data[o2:o3],)
390 else:
390 else:
391 o3 = o2 + sha1size * numpar
391 o3 = o2 + sha1size * numpar
392 parents = unpack(sha1fmt * numpar, data[o2:o3])
392 parents = unpack(sha1fmt * numpar, data[o2:o3])
393
393
394 # read metadata
394 # read metadata
395 off = o3 + metasize * nummeta
395 off = o3 + metasize * nummeta
396 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
396 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
397 metadata = []
397 metadata = []
398 for idx in pycompat.xrange(0, len(metapairsize), 2):
398 for idx in pycompat.xrange(0, len(metapairsize), 2):
399 o1 = off + metapairsize[idx]
399 o1 = off + metapairsize[idx]
400 o2 = o1 + metapairsize[idx + 1]
400 o2 = o1 + metapairsize[idx + 1]
401 metadata.append((data[off:o1], data[o1:o2]))
401 metadata.append((data[off:o1], data[o1:o2]))
402 off = o2
402 off = o2
403
403
404 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
404 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
405
405
406 def _fm1encodeonemarker(marker):
406 def _fm1encodeonemarker(marker):
407 pre, sucs, flags, metadata, date, parents = marker
407 pre, sucs, flags, metadata, date, parents = marker
408 # determine node size
408 # determine node size
409 _fm1node = _fm1nodesha1
409 _fm1node = _fm1nodesha1
410 if flags & usingsha256:
410 if flags & usingsha256:
411 _fm1node = _fm1nodesha256
411 _fm1node = _fm1nodesha256
412 numsuc = len(sucs)
412 numsuc = len(sucs)
413 numextranodes = numsuc
413 numextranodes = numsuc
414 if parents is None:
414 if parents is None:
415 numpar = _fm1parentnone
415 numpar = _fm1parentnone
416 else:
416 else:
417 numpar = len(parents)
417 numpar = len(parents)
418 numextranodes += numpar
418 numextranodes += numpar
419 formatnodes = _fm1node * numextranodes
419 formatnodes = _fm1node * numextranodes
420 formatmeta = _fm1metapair * len(metadata)
420 formatmeta = _fm1metapair * len(metadata)
421 format = _fm1fixed + formatnodes + formatmeta
421 format = _fm1fixed + formatnodes + formatmeta
422 # tz is stored in minutes so we divide by 60
422 # tz is stored in minutes so we divide by 60
423 tz = date[1]//60
423 tz = date[1]//60
424 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
424 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
425 data.extend(sucs)
425 data.extend(sucs)
426 if parents is not None:
426 if parents is not None:
427 data.extend(parents)
427 data.extend(parents)
428 totalsize = _calcsize(format)
428 totalsize = _calcsize(format)
429 for key, value in metadata:
429 for key, value in metadata:
430 lk = len(key)
430 lk = len(key)
431 lv = len(value)
431 lv = len(value)
432 if lk > 255:
432 if lk > 255:
433 msg = ('obsstore metadata key cannot be longer than 255 bytes'
433 msg = ('obsstore metadata key cannot be longer than 255 bytes'
434 ' (key "%s" is %u bytes)') % (key, lk)
434 ' (key "%s" is %u bytes)') % (key, lk)
435 raise error.ProgrammingError(msg)
435 raise error.ProgrammingError(msg)
436 if lv > 255:
436 if lv > 255:
437 msg = ('obsstore metadata value cannot be longer than 255 bytes'
437 msg = ('obsstore metadata value cannot be longer than 255 bytes'
438 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
438 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
439 raise error.ProgrammingError(msg)
439 raise error.ProgrammingError(msg)
440 data.append(lk)
440 data.append(lk)
441 data.append(lv)
441 data.append(lv)
442 totalsize += lk + lv
442 totalsize += lk + lv
443 data[0] = totalsize
443 data[0] = totalsize
444 data = [_pack(format, *data)]
444 data = [_pack(format, *data)]
445 for key, value in metadata:
445 for key, value in metadata:
446 data.append(key)
446 data.append(key)
447 data.append(value)
447 data.append(value)
448 return ''.join(data)
448 return ''.join(data)
449
449
450 def _fm1readmarkers(data, off, stop):
450 def _fm1readmarkers(data, off, stop):
451 native = getattr(parsers, 'fm1readmarkers', None)
451 native = getattr(parsers, 'fm1readmarkers', None)
452 if not native:
452 if not native:
453 return _fm1purereadmarkers(data, off, stop)
453 return _fm1purereadmarkers(data, off, stop)
454 return native(data, off, stop)
454 return native(data, off, stop)
455
455
456 # mapping to read/write various marker formats
456 # mapping to read/write various marker formats
457 # <version> -> (decoder, encoder)
457 # <version> -> (decoder, encoder)
458 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
458 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
459 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
459 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
460
460
461 def _readmarkerversion(data):
461 def _readmarkerversion(data):
462 return _unpack('>B', data[0:1])[0]
462 return _unpack('>B', data[0:1])[0]
463
463
464 @util.nogc
464 @util.nogc
465 def _readmarkers(data, off=None, stop=None):
465 def _readmarkers(data, off=None, stop=None):
466 """Read and enumerate markers from raw data"""
466 """Read and enumerate markers from raw data"""
467 diskversion = _readmarkerversion(data)
467 diskversion = _readmarkerversion(data)
468 if not off:
468 if not off:
469 off = 1 # skip 1 byte version number
469 off = 1 # skip 1 byte version number
470 if stop is None:
470 if stop is None:
471 stop = len(data)
471 stop = len(data)
472 if diskversion not in formats:
472 if diskversion not in formats:
473 msg = _('parsing obsolete marker: unknown version %r') % diskversion
473 msg = _('parsing obsolete marker: unknown version %r') % diskversion
474 raise error.UnknownVersion(msg, version=diskversion)
474 raise error.UnknownVersion(msg, version=diskversion)
475 return diskversion, formats[diskversion][0](data, off, stop)
475 return diskversion, formats[diskversion][0](data, off, stop)
476
476
477 def encodeheader(version=_fm0version):
477 def encodeheader(version=_fm0version):
478 return _pack('>B', version)
478 return _pack('>B', version)
479
479
480 def encodemarkers(markers, addheader=False, version=_fm0version):
480 def encodemarkers(markers, addheader=False, version=_fm0version):
481 # Kept separate from flushmarkers(), it will be reused for
481 # Kept separate from flushmarkers(), it will be reused for
482 # markers exchange.
482 # markers exchange.
483 encodeone = formats[version][1]
483 encodeone = formats[version][1]
484 if addheader:
484 if addheader:
485 yield encodeheader(version)
485 yield encodeheader(version)
486 for marker in markers:
486 for marker in markers:
487 yield encodeone(marker)
487 yield encodeone(marker)
488
488
489 @util.nogc
489 @util.nogc
490 def _addsuccessors(successors, markers):
490 def _addsuccessors(successors, markers):
491 for mark in markers:
491 for mark in markers:
492 successors.setdefault(mark[0], set()).add(mark)
492 successors.setdefault(mark[0], set()).add(mark)
493
493
494 @util.nogc
494 @util.nogc
495 def _addpredecessors(predecessors, markers):
495 def _addpredecessors(predecessors, markers):
496 for mark in markers:
496 for mark in markers:
497 for suc in mark[1]:
497 for suc in mark[1]:
498 predecessors.setdefault(suc, set()).add(mark)
498 predecessors.setdefault(suc, set()).add(mark)
499
499
500 @util.nogc
500 @util.nogc
501 def _addchildren(children, markers):
501 def _addchildren(children, markers):
502 for mark in markers:
502 for mark in markers:
503 parents = mark[5]
503 parents = mark[5]
504 if parents is not None:
504 if parents is not None:
505 for p in parents:
505 for p in parents:
506 children.setdefault(p, set()).add(mark)
506 children.setdefault(p, set()).add(mark)
507
507
508 def _checkinvalidmarkers(markers):
508 def _checkinvalidmarkers(markers):
509 """search for marker with invalid data and raise error if needed
509 """search for marker with invalid data and raise error if needed
510
510
511 Exist as a separated function to allow the evolve extension for a more
511 Exist as a separated function to allow the evolve extension for a more
512 subtle handling.
512 subtle handling.
513 """
513 """
514 for mark in markers:
514 for mark in markers:
515 if node.nullid in mark[1]:
515 if node.nullid in mark[1]:
516 raise error.Abort(_('bad obsolescence marker detected: '
516 raise error.Abort(_('bad obsolescence marker detected: '
517 'invalid successors nullid'))
517 'invalid successors nullid'))
518
518
519 class obsstore(object):
519 class obsstore(object):
520 """Store obsolete markers
520 """Store obsolete markers
521
521
522 Markers can be accessed with two mappings:
522 Markers can be accessed with two mappings:
523 - predecessors[x] -> set(markers on predecessors edges of x)
523 - predecessors[x] -> set(markers on predecessors edges of x)
524 - successors[x] -> set(markers on successors edges of x)
524 - successors[x] -> set(markers on successors edges of x)
525 - children[x] -> set(markers on predecessors edges of children(x)
525 - children[x] -> set(markers on predecessors edges of children(x)
526 """
526 """
527
527
528 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
528 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
529 # prec: nodeid, predecessors changesets
529 # prec: nodeid, predecessors changesets
530 # succs: tuple of nodeid, successor changesets (0-N length)
530 # succs: tuple of nodeid, successor changesets (0-N length)
531 # flag: integer, flag field carrying modifier for the markers (see doc)
531 # flag: integer, flag field carrying modifier for the markers (see doc)
532 # meta: binary blob in UTF-8, encoded metadata dictionary
532 # meta: binary blob in UTF-8, encoded metadata dictionary
533 # date: (float, int) tuple, date of marker creation
533 # date: (float, int) tuple, date of marker creation
534 # parents: (tuple of nodeid) or None, parents of predecessors
534 # parents: (tuple of nodeid) or None, parents of predecessors
535 # None is used when no data has been recorded
535 # None is used when no data has been recorded
536
536
537 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
537 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
538 # caches for various obsolescence related cache
538 # caches for various obsolescence related cache
539 self.caches = {}
539 self.caches = {}
540 self.svfs = svfs
540 self.svfs = svfs
541 self._defaultformat = defaultformat
541 self._defaultformat = defaultformat
542 self._readonly = readonly
542 self._readonly = readonly
543
543
544 def __iter__(self):
544 def __iter__(self):
545 return iter(self._all)
545 return iter(self._all)
546
546
547 def __len__(self):
547 def __len__(self):
548 return len(self._all)
548 return len(self._all)
549
549
550 def __nonzero__(self):
550 def __nonzero__(self):
551 if not self._cached(r'_all'):
551 if not self._cached(r'_all'):
552 try:
552 try:
553 return self.svfs.stat('obsstore').st_size > 1
553 return self.svfs.stat('obsstore').st_size > 1
554 except OSError as inst:
554 except OSError as inst:
555 if inst.errno != errno.ENOENT:
555 if inst.errno != errno.ENOENT:
556 raise
556 raise
557 # just build an empty _all list if no obsstore exists, which
557 # just build an empty _all list if no obsstore exists, which
558 # avoids further stat() syscalls
558 # avoids further stat() syscalls
559 return bool(self._all)
559 return bool(self._all)
560
560
561 __bool__ = __nonzero__
561 __bool__ = __nonzero__
562
562
563 @property
563 @property
564 def readonly(self):
564 def readonly(self):
565 """True if marker creation is disabled
565 """True if marker creation is disabled
566
566
567 Remove me in the future when obsolete marker is always on."""
567 Remove me in the future when obsolete marker is always on."""
568 return self._readonly
568 return self._readonly
569
569
570 def create(self, transaction, prec, succs=(), flag=0, parents=None,
570 def create(self, transaction, prec, succs=(), flag=0, parents=None,
571 date=None, metadata=None, ui=None):
571 date=None, metadata=None, ui=None):
572 """obsolete: add a new obsolete marker
572 """obsolete: add a new obsolete marker
573
573
574 * ensuring it is hashable
574 * ensuring it is hashable
575 * check mandatory metadata
575 * check mandatory metadata
576 * encode metadata
576 * encode metadata
577
577
578 If you are a human writing code creating marker you want to use the
578 If you are a human writing code creating marker you want to use the
579 `createmarkers` function in this module instead.
579 `createmarkers` function in this module instead.
580
580
581 return True if a new marker have been added, False if the markers
581 return True if a new marker have been added, False if the markers
582 already existed (no op).
582 already existed (no op).
583 """
583 """
584 if metadata is None:
584 if metadata is None:
585 metadata = {}
585 metadata = {}
586 if date is None:
586 if date is None:
587 if 'date' in metadata:
587 if 'date' in metadata:
588 # as a courtesy for out-of-tree extensions
588 # as a courtesy for out-of-tree extensions
589 date = dateutil.parsedate(metadata.pop('date'))
589 date = dateutil.parsedate(metadata.pop('date'))
590 elif ui is not None:
590 elif ui is not None:
591 date = ui.configdate('devel', 'default-date')
591 date = ui.configdate('devel', 'default-date')
592 if date is None:
592 if date is None:
593 date = dateutil.makedate()
593 date = dateutil.makedate()
594 else:
594 else:
595 date = dateutil.makedate()
595 date = dateutil.makedate()
596 if len(prec) != 20:
596 if len(prec) != 20:
597 raise ValueError(prec)
597 raise ValueError(prec)
598 for succ in succs:
598 for succ in succs:
599 if len(succ) != 20:
599 if len(succ) != 20:
600 raise ValueError(succ)
600 raise ValueError(succ)
601 if prec in succs:
601 if prec in succs:
602 raise ValueError(
602 raise ValueError(
603 r'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec)))
603 r'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec)))
604
604
605 metadata = tuple(sorted(metadata.iteritems()))
605 metadata = tuple(sorted(metadata.iteritems()))
606 for k, v in metadata:
606 for k, v in metadata:
607 try:
607 try:
608 # might be better to reject non-ASCII keys
608 # might be better to reject non-ASCII keys
609 k.decode('utf-8')
609 k.decode('utf-8')
610 v.decode('utf-8')
610 v.decode('utf-8')
611 except UnicodeDecodeError:
611 except UnicodeDecodeError:
612 raise error.ProgrammingError(
612 raise error.ProgrammingError(
613 'obsstore metadata must be valid UTF-8 sequence '
613 'obsstore metadata must be valid UTF-8 sequence '
614 '(key = %r, value = %r)'
614 '(key = %r, value = %r)'
615 % (pycompat.bytestr(k), pycompat.bytestr(v)))
615 % (pycompat.bytestr(k), pycompat.bytestr(v)))
616
616
617 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
617 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
618 return bool(self.add(transaction, [marker]))
618 return bool(self.add(transaction, [marker]))
619
619
620 def add(self, transaction, markers):
620 def add(self, transaction, markers):
621 """Add new markers to the store
621 """Add new markers to the store
622
622
623 Take care of filtering duplicate.
623 Take care of filtering duplicate.
624 Return the number of new marker."""
624 Return the number of new marker."""
625 if self._readonly:
625 if self._readonly:
626 raise error.Abort(_('creating obsolete markers is not enabled on '
626 raise error.Abort(_('creating obsolete markers is not enabled on '
627 'this repo'))
627 'this repo'))
628 known = set()
628 known = set()
629 getsuccessors = self.successors.get
629 getsuccessors = self.successors.get
630 new = []
630 new = []
631 for m in markers:
631 for m in markers:
632 if m not in getsuccessors(m[0], ()) and m not in known:
632 if m not in getsuccessors(m[0], ()) and m not in known:
633 known.add(m)
633 known.add(m)
634 new.append(m)
634 new.append(m)
635 if new:
635 if new:
636 f = self.svfs('obsstore', 'ab')
636 f = self.svfs('obsstore', 'ab')
637 try:
637 try:
638 offset = f.tell()
638 offset = f.tell()
639 transaction.add('obsstore', offset)
639 transaction.add('obsstore', offset)
640 # offset == 0: new file - add the version header
640 # offset == 0: new file - add the version header
641 data = b''.join(encodemarkers(new, offset == 0, self._version))
641 data = b''.join(encodemarkers(new, offset == 0, self._version))
642 f.write(data)
642 f.write(data)
643 finally:
643 finally:
644 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
644 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
645 # call 'filecacheentry.refresh()' here
645 # call 'filecacheentry.refresh()' here
646 f.close()
646 f.close()
647 addedmarkers = transaction.changes.get('obsmarkers')
647 addedmarkers = transaction.changes.get('obsmarkers')
648 if addedmarkers is not None:
648 if addedmarkers is not None:
649 addedmarkers.update(new)
649 addedmarkers.update(new)
650 self._addmarkers(new, data)
650 self._addmarkers(new, data)
651 # new marker *may* have changed several set. invalidate the cache.
651 # new marker *may* have changed several set. invalidate the cache.
652 self.caches.clear()
652 self.caches.clear()
653 # records the number of new markers for the transaction hooks
653 # records the number of new markers for the transaction hooks
654 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
654 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
655 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
655 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
656 return len(new)
656 return len(new)
657
657
658 def mergemarkers(self, transaction, data):
658 def mergemarkers(self, transaction, data):
659 """merge a binary stream of markers inside the obsstore
659 """merge a binary stream of markers inside the obsstore
660
660
661 Returns the number of new markers added."""
661 Returns the number of new markers added."""
662 version, markers = _readmarkers(data)
662 version, markers = _readmarkers(data)
663 return self.add(transaction, markers)
663 return self.add(transaction, markers)
664
664
665 @propertycache
665 @propertycache
666 def _data(self):
666 def _data(self):
667 return self.svfs.tryread('obsstore')
667 return self.svfs.tryread('obsstore')
668
668
669 @propertycache
669 @propertycache
670 def _version(self):
670 def _version(self):
671 if len(self._data) >= 1:
671 if len(self._data) >= 1:
672 return _readmarkerversion(self._data)
672 return _readmarkerversion(self._data)
673 else:
673 else:
674 return self._defaultformat
674 return self._defaultformat
675
675
676 @propertycache
676 @propertycache
677 def _all(self):
677 def _all(self):
678 data = self._data
678 data = self._data
679 if not data:
679 if not data:
680 return []
680 return []
681 self._version, markers = _readmarkers(data)
681 self._version, markers = _readmarkers(data)
682 markers = list(markers)
682 markers = list(markers)
683 _checkinvalidmarkers(markers)
683 _checkinvalidmarkers(markers)
684 return markers
684 return markers
685
685
686 @propertycache
686 @propertycache
687 def successors(self):
687 def successors(self):
688 successors = {}
688 successors = {}
689 _addsuccessors(successors, self._all)
689 _addsuccessors(successors, self._all)
690 return successors
690 return successors
691
691
692 @propertycache
692 @propertycache
693 def predecessors(self):
693 def predecessors(self):
694 predecessors = {}
694 predecessors = {}
695 _addpredecessors(predecessors, self._all)
695 _addpredecessors(predecessors, self._all)
696 return predecessors
696 return predecessors
697
697
698 @propertycache
698 @propertycache
699 def children(self):
699 def children(self):
700 children = {}
700 children = {}
701 _addchildren(children, self._all)
701 _addchildren(children, self._all)
702 return children
702 return children
703
703
704 def _cached(self, attr):
704 def _cached(self, attr):
705 return attr in self.__dict__
705 return attr in self.__dict__
706
706
707 def _addmarkers(self, markers, rawdata):
707 def _addmarkers(self, markers, rawdata):
708 markers = list(markers) # to allow repeated iteration
708 markers = list(markers) # to allow repeated iteration
709 self._data = self._data + rawdata
709 self._data = self._data + rawdata
710 self._all.extend(markers)
710 self._all.extend(markers)
711 if self._cached(r'successors'):
711 if self._cached(r'successors'):
712 _addsuccessors(self.successors, markers)
712 _addsuccessors(self.successors, markers)
713 if self._cached(r'predecessors'):
713 if self._cached(r'predecessors'):
714 _addpredecessors(self.predecessors, markers)
714 _addpredecessors(self.predecessors, markers)
715 if self._cached(r'children'):
715 if self._cached(r'children'):
716 _addchildren(self.children, markers)
716 _addchildren(self.children, markers)
717 _checkinvalidmarkers(markers)
717 _checkinvalidmarkers(markers)
718
718
719 def relevantmarkers(self, nodes):
719 def relevantmarkers(self, nodes):
720 """return a set of all obsolescence markers relevant to a set of nodes.
720 """return a set of all obsolescence markers relevant to a set of nodes.
721
721
722 "relevant" to a set of nodes mean:
722 "relevant" to a set of nodes mean:
723
723
724 - marker that use this changeset as successor
724 - marker that use this changeset as successor
725 - prune marker of direct children on this changeset
725 - prune marker of direct children on this changeset
726 - recursive application of the two rules on predecessors of these
726 - recursive application of the two rules on predecessors of these
727 markers
727 markers
728
728
729 It is a set so you cannot rely on order."""
729 It is a set so you cannot rely on order."""
730
730
731 pendingnodes = set(nodes)
731 pendingnodes = set(nodes)
732 seenmarkers = set()
732 seenmarkers = set()
733 seennodes = set(pendingnodes)
733 seennodes = set(pendingnodes)
734 precursorsmarkers = self.predecessors
734 precursorsmarkers = self.predecessors
735 succsmarkers = self.successors
735 succsmarkers = self.successors
736 children = self.children
736 children = self.children
737 while pendingnodes:
737 while pendingnodes:
738 direct = set()
738 direct = set()
739 for current in pendingnodes:
739 for current in pendingnodes:
740 direct.update(precursorsmarkers.get(current, ()))
740 direct.update(precursorsmarkers.get(current, ()))
741 pruned = [m for m in children.get(current, ()) if not m[1]]
741 pruned = [m for m in children.get(current, ()) if not m[1]]
742 direct.update(pruned)
742 direct.update(pruned)
743 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
743 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
744 direct.update(pruned)
744 direct.update(pruned)
745 direct -= seenmarkers
745 direct -= seenmarkers
746 pendingnodes = set([m[0] for m in direct])
746 pendingnodes = set([m[0] for m in direct])
747 seenmarkers |= direct
747 seenmarkers |= direct
748 pendingnodes -= seennodes
748 pendingnodes -= seennodes
749 seennodes |= pendingnodes
749 seennodes |= pendingnodes
750 return seenmarkers
750 return seenmarkers
751
751
752 def makestore(ui, repo):
752 def makestore(ui, repo):
753 """Create an obsstore instance from a repo."""
753 """Create an obsstore instance from a repo."""
754 # read default format for new obsstore.
754 # read default format for new obsstore.
755 # developer config: format.obsstore-version
755 # developer config: format.obsstore-version
756 defaultformat = ui.configint('format', 'obsstore-version')
756 defaultformat = ui.configint('format', 'obsstore-version')
757 # rely on obsstore class default when possible.
757 # rely on obsstore class default when possible.
758 kwargs = {}
758 kwargs = {}
759 if defaultformat is not None:
759 if defaultformat is not None:
760 kwargs[r'defaultformat'] = defaultformat
760 kwargs[r'defaultformat'] = defaultformat
761 readonly = not isenabled(repo, createmarkersopt)
761 readonly = not isenabled(repo, createmarkersopt)
762 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
762 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
763 if store and readonly:
763 if store and readonly:
764 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
764 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
765 % len(list(store)))
765 % len(list(store)))
766 return store
766 return store
767
767
768 def commonversion(versions):
768 def commonversion(versions):
769 """Return the newest version listed in both versions and our local formats.
769 """Return the newest version listed in both versions and our local formats.
770
770
771 Returns None if no common version exists.
771 Returns None if no common version exists.
772 """
772 """
773 versions.sort(reverse=True)
773 versions.sort(reverse=True)
774 # search for highest version known on both side
774 # search for highest version known on both side
775 for v in versions:
775 for v in versions:
776 if v in formats:
776 if v in formats:
777 return v
777 return v
778 return None
778 return None
779
779
780 # arbitrary picked to fit into 8K limit from HTTP server
780 # arbitrary picked to fit into 8K limit from HTTP server
781 # you have to take in account:
781 # you have to take in account:
782 # - the version header
782 # - the version header
783 # - the base85 encoding
783 # - the base85 encoding
784 _maxpayload = 5300
784 _maxpayload = 5300
785
785
786 def _pushkeyescape(markers):
786 def _pushkeyescape(markers):
787 """encode markers into a dict suitable for pushkey exchange
787 """encode markers into a dict suitable for pushkey exchange
788
788
789 - binary data is base85 encoded
789 - binary data is base85 encoded
790 - split in chunks smaller than 5300 bytes"""
790 - split in chunks smaller than 5300 bytes"""
791 keys = {}
791 keys = {}
792 parts = []
792 parts = []
793 currentlen = _maxpayload * 2 # ensure we create a new part
793 currentlen = _maxpayload * 2 # ensure we create a new part
794 for marker in markers:
794 for marker in markers:
795 nextdata = _fm0encodeonemarker(marker)
795 nextdata = _fm0encodeonemarker(marker)
796 if (len(nextdata) + currentlen > _maxpayload):
796 if (len(nextdata) + currentlen > _maxpayload):
797 currentpart = []
797 currentpart = []
798 currentlen = 0
798 currentlen = 0
799 parts.append(currentpart)
799 parts.append(currentpart)
800 currentpart.append(nextdata)
800 currentpart.append(nextdata)
801 currentlen += len(nextdata)
801 currentlen += len(nextdata)
802 for idx, part in enumerate(reversed(parts)):
802 for idx, part in enumerate(reversed(parts)):
803 data = ''.join([_pack('>B', _fm0version)] + part)
803 data = ''.join([_pack('>B', _fm0version)] + part)
804 keys['dump%i' % idx] = util.b85encode(data)
804 keys['dump%i' % idx] = util.b85encode(data)
805 return keys
805 return keys
806
806
807 def listmarkers(repo):
807 def listmarkers(repo):
808 """List markers over pushkey"""
808 """List markers over pushkey"""
809 if not repo.obsstore:
809 if not repo.obsstore:
810 return {}
810 return {}
811 return _pushkeyescape(sorted(repo.obsstore))
811 return _pushkeyescape(sorted(repo.obsstore))
812
812
813 def pushmarker(repo, key, old, new):
813 def pushmarker(repo, key, old, new):
814 """Push markers over pushkey"""
814 """Push markers over pushkey"""
815 if not key.startswith('dump'):
815 if not key.startswith('dump'):
816 repo.ui.warn(_('unknown key: %r') % key)
816 repo.ui.warn(_('unknown key: %r') % key)
817 return False
817 return False
818 if old:
818 if old:
819 repo.ui.warn(_('unexpected old value for %r') % key)
819 repo.ui.warn(_('unexpected old value for %r') % key)
820 return False
820 return False
821 data = util.b85decode(new)
821 data = util.b85decode(new)
822 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
822 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
823 repo.obsstore.mergemarkers(tr, data)
823 repo.obsstore.mergemarkers(tr, data)
824 repo.invalidatevolatilesets()
824 repo.invalidatevolatilesets()
825 return True
825 return True
826
826
827 # mapping of 'set-name' -> <function to compute this set>
827 # mapping of 'set-name' -> <function to compute this set>
828 cachefuncs = {}
828 cachefuncs = {}
829 def cachefor(name):
829 def cachefor(name):
830 """Decorator to register a function as computing the cache for a set"""
830 """Decorator to register a function as computing the cache for a set"""
831 def decorator(func):
831 def decorator(func):
832 if name in cachefuncs:
832 if name in cachefuncs:
833 msg = "duplicated registration for volatileset '%s' (existing: %r)"
833 msg = "duplicated registration for volatileset '%s' (existing: %r)"
834 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
834 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
835 cachefuncs[name] = func
835 cachefuncs[name] = func
836 return func
836 return func
837 return decorator
837 return decorator
838
838
839 def getrevs(repo, name):
839 def getrevs(repo, name):
840 """Return the set of revision that belong to the <name> set
840 """Return the set of revision that belong to the <name> set
841
841
842 Such access may compute the set and cache it for future use"""
842 Such access may compute the set and cache it for future use"""
843 repo = repo.unfiltered()
843 repo = repo.unfiltered()
844 if not repo.obsstore:
844 if not repo.obsstore:
845 return frozenset()
845 return frozenset()
846 if name not in repo.obsstore.caches:
846 if name not in repo.obsstore.caches:
847 repo.obsstore.caches[name] = cachefuncs[name](repo)
847 repo.obsstore.caches[name] = cachefuncs[name](repo)
848 return repo.obsstore.caches[name]
848 return repo.obsstore.caches[name]
849
849
850 # To be simple we need to invalidate obsolescence cache when:
850 # To be simple we need to invalidate obsolescence cache when:
851 #
851 #
852 # - new changeset is added:
852 # - new changeset is added:
853 # - public phase is changed
853 # - public phase is changed
854 # - obsolescence marker are added
854 # - obsolescence marker are added
855 # - strip is used a repo
855 # - strip is used a repo
856 def clearobscaches(repo):
856 def clearobscaches(repo):
857 """Remove all obsolescence related cache from a repo
857 """Remove all obsolescence related cache from a repo
858
858
859 This remove all cache in obsstore is the obsstore already exist on the
859 This remove all cache in obsstore is the obsstore already exist on the
860 repo.
860 repo.
861
861
862 (We could be smarter here given the exact event that trigger the cache
862 (We could be smarter here given the exact event that trigger the cache
863 clearing)"""
863 clearing)"""
864 # only clear cache is there is obsstore data in this repo
864 # only clear cache is there is obsstore data in this repo
865 if 'obsstore' in repo._filecache:
865 if 'obsstore' in repo._filecache:
866 repo.obsstore.caches.clear()
866 repo.obsstore.caches.clear()
867
867
868 def _mutablerevs(repo):
868 def _mutablerevs(repo):
869 """the set of mutable revision in the repository"""
869 """the set of mutable revision in the repository"""
870 return repo._phasecache.getrevset(repo, phases.mutablephases)
870 return repo._phasecache.getrevset(repo, phases.mutablephases)
871
871
872 @cachefor('obsolete')
872 @cachefor('obsolete')
873 def _computeobsoleteset(repo):
873 def _computeobsoleteset(repo):
874 """the set of obsolete revisions"""
874 """the set of obsolete revisions"""
875 getnode = repo.changelog.node
875 getnode = repo.changelog.node
876 notpublic = _mutablerevs(repo)
876 notpublic = _mutablerevs(repo)
877 isobs = repo.obsstore.successors.__contains__
877 isobs = repo.obsstore.successors.__contains__
878 obs = set(r for r in notpublic if isobs(getnode(r)))
878 obs = set(r for r in notpublic if isobs(getnode(r)))
879 return obs
879 return obs
880
880
881 @cachefor('orphan')
881 @cachefor('orphan')
882 def _computeorphanset(repo):
882 def _computeorphanset(repo):
883 """the set of non obsolete revisions with obsolete parents"""
883 """the set of non obsolete revisions with obsolete parents"""
884 pfunc = repo.changelog.parentrevs
884 pfunc = repo.changelog.parentrevs
885 mutable = _mutablerevs(repo)
885 mutable = _mutablerevs(repo)
886 obsolete = getrevs(repo, 'obsolete')
886 obsolete = getrevs(repo, 'obsolete')
887 others = mutable - obsolete
887 others = mutable - obsolete
888 unstable = set()
888 unstable = set()
889 for r in sorted(others):
889 for r in sorted(others):
890 # A rev is unstable if one of its parent is obsolete or unstable
890 # A rev is unstable if one of its parent is obsolete or unstable
891 # this works since we traverse following growing rev order
891 # this works since we traverse following growing rev order
892 for p in pfunc(r):
892 for p in pfunc(r):
893 if p in obsolete or p in unstable:
893 if p in obsolete or p in unstable:
894 unstable.add(r)
894 unstable.add(r)
895 break
895 break
896 return unstable
896 return unstable
897
897
898 @cachefor('suspended')
898 @cachefor('suspended')
899 def _computesuspendedset(repo):
899 def _computesuspendedset(repo):
900 """the set of obsolete parents with non obsolete descendants"""
900 """the set of obsolete parents with non obsolete descendants"""
901 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
901 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
902 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
902 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
903
903
904 @cachefor('extinct')
904 @cachefor('extinct')
905 def _computeextinctset(repo):
905 def _computeextinctset(repo):
906 """the set of obsolete parents without non obsolete descendants"""
906 """the set of obsolete parents without non obsolete descendants"""
907 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
907 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
908
908
909 @cachefor('phasedivergent')
909 @cachefor('phasedivergent')
910 def _computephasedivergentset(repo):
910 def _computephasedivergentset(repo):
911 """the set of revs trying to obsolete public revisions"""
911 """the set of revs trying to obsolete public revisions"""
912 bumped = set()
912 bumped = set()
913 # util function (avoid attribute lookup in the loop)
913 # util function (avoid attribute lookup in the loop)
914 phase = repo._phasecache.phase # would be faster to grab the full list
914 phase = repo._phasecache.phase # would be faster to grab the full list
915 public = phases.public
915 public = phases.public
916 cl = repo.changelog
916 cl = repo.changelog
917 torev = cl.nodemap.get
917 torev = cl.nodemap.get
918 tonode = cl.node
918 tonode = cl.node
919 obsstore = repo.obsstore
919 for rev in repo.revs('(not public()) and (not obsolete())'):
920 for rev in repo.revs('(not public()) and (not obsolete())'):
920 # We only evaluate mutable, non-obsolete revision
921 # We only evaluate mutable, non-obsolete revision
921 node = tonode(rev)
922 node = tonode(rev)
922 # (future) A cache of predecessors may worth if split is very common
923 # (future) A cache of predecessors may worth if split is very common
923 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
924 for pnode in obsutil.allpredecessors(obsstore, [node],
924 ignoreflags=bumpedfix):
925 ignoreflags=bumpedfix):
925 prev = torev(pnode) # unfiltered! but so is phasecache
926 prev = torev(pnode) # unfiltered! but so is phasecache
926 if (prev is not None) and (phase(repo, prev) <= public):
927 if (prev is not None) and (phase(repo, prev) <= public):
927 # we have a public predecessor
928 # we have a public predecessor
928 bumped.add(rev)
929 bumped.add(rev)
929 break # Next draft!
930 break # Next draft!
930 return bumped
931 return bumped
931
932
932 @cachefor('contentdivergent')
933 @cachefor('contentdivergent')
933 def _computecontentdivergentset(repo):
934 def _computecontentdivergentset(repo):
934 """the set of rev that compete to be the final successors of some revision.
935 """the set of rev that compete to be the final successors of some revision.
935 """
936 """
936 divergent = set()
937 divergent = set()
937 obsstore = repo.obsstore
938 obsstore = repo.obsstore
938 newermap = {}
939 newermap = {}
939 tonode = repo.changelog.node
940 tonode = repo.changelog.node
940 for rev in repo.revs('(not public()) - obsolete()'):
941 for rev in repo.revs('(not public()) - obsolete()'):
941 node = tonode(rev)
942 node = tonode(rev)
942 mark = obsstore.predecessors.get(node, ())
943 mark = obsstore.predecessors.get(node, ())
943 toprocess = set(mark)
944 toprocess = set(mark)
944 seen = set()
945 seen = set()
945 while toprocess:
946 while toprocess:
946 prec = toprocess.pop()[0]
947 prec = toprocess.pop()[0]
947 if prec in seen:
948 if prec in seen:
948 continue # emergency cycle hanging prevention
949 continue # emergency cycle hanging prevention
949 seen.add(prec)
950 seen.add(prec)
950 if prec not in newermap:
951 if prec not in newermap:
951 obsutil.successorssets(repo, prec, cache=newermap)
952 obsutil.successorssets(repo, prec, cache=newermap)
952 newer = [n for n in newermap[prec] if n]
953 newer = [n for n in newermap[prec] if n]
953 if len(newer) > 1:
954 if len(newer) > 1:
954 divergent.add(rev)
955 divergent.add(rev)
955 break
956 break
956 toprocess.update(obsstore.predecessors.get(prec, ()))
957 toprocess.update(obsstore.predecessors.get(prec, ()))
957 return divergent
958 return divergent
958
959
959 def makefoldid(relation, user):
960 def makefoldid(relation, user):
960
961
961 folddigest = hashlib.sha1(user)
962 folddigest = hashlib.sha1(user)
962 for p in relation[0] + relation[1]:
963 for p in relation[0] + relation[1]:
963 folddigest.update('%d' % p.rev())
964 folddigest.update('%d' % p.rev())
964 folddigest.update(p.node())
965 folddigest.update(p.node())
965 # Since fold only has to compete against fold for the same successors, it
966 # Since fold only has to compete against fold for the same successors, it
966 # seems fine to use a small ID. Smaller ID save space.
967 # seems fine to use a small ID. Smaller ID save space.
967 return node.hex(folddigest.digest())[:8]
968 return node.hex(folddigest.digest())[:8]
968
969
969 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
970 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
970 operation=None):
971 operation=None):
971 """Add obsolete markers between changesets in a repo
972 """Add obsolete markers between changesets in a repo
972
973
973 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
974 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
974 tuple. `old` and `news` are changectx. metadata is an optional dictionary
975 tuple. `old` and `news` are changectx. metadata is an optional dictionary
975 containing metadata for this marker only. It is merged with the global
976 containing metadata for this marker only. It is merged with the global
976 metadata specified through the `metadata` argument of this function.
977 metadata specified through the `metadata` argument of this function.
977 Any string values in metadata must be UTF-8 bytes.
978 Any string values in metadata must be UTF-8 bytes.
978
979
979 Trying to obsolete a public changeset will raise an exception.
980 Trying to obsolete a public changeset will raise an exception.
980
981
981 Current user and date are used except if specified otherwise in the
982 Current user and date are used except if specified otherwise in the
982 metadata attribute.
983 metadata attribute.
983
984
984 This function operates within a transaction of its own, but does
985 This function operates within a transaction of its own, but does
985 not take any lock on the repo.
986 not take any lock on the repo.
986 """
987 """
987 # prepare metadata
988 # prepare metadata
988 if metadata is None:
989 if metadata is None:
989 metadata = {}
990 metadata = {}
990 if 'user' not in metadata:
991 if 'user' not in metadata:
991 luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username()
992 luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username()
992 metadata['user'] = encoding.fromlocal(luser)
993 metadata['user'] = encoding.fromlocal(luser)
993
994
994 # Operation metadata handling
995 # Operation metadata handling
995 useoperation = repo.ui.configbool('experimental',
996 useoperation = repo.ui.configbool('experimental',
996 'evolution.track-operation')
997 'evolution.track-operation')
997 if useoperation and operation:
998 if useoperation and operation:
998 metadata['operation'] = operation
999 metadata['operation'] = operation
999
1000
1000 # Effect flag metadata handling
1001 # Effect flag metadata handling
1001 saveeffectflag = repo.ui.configbool('experimental',
1002 saveeffectflag = repo.ui.configbool('experimental',
1002 'evolution.effect-flags')
1003 'evolution.effect-flags')
1003
1004
1004 with repo.transaction('add-obsolescence-marker') as tr:
1005 with repo.transaction('add-obsolescence-marker') as tr:
1005 markerargs = []
1006 markerargs = []
1006 for rel in relations:
1007 for rel in relations:
1007 predecessors = rel[0]
1008 predecessors = rel[0]
1008 if not isinstance(predecessors, tuple):
1009 if not isinstance(predecessors, tuple):
1009 # preserve compat with old API until all caller are migrated
1010 # preserve compat with old API until all caller are migrated
1010 predecessors = (predecessors,)
1011 predecessors = (predecessors,)
1011 if len(predecessors) > 1 and len(rel[1]) != 1:
1012 if len(predecessors) > 1 and len(rel[1]) != 1:
1012 msg = 'Fold markers can only have 1 successors, not %d'
1013 msg = 'Fold markers can only have 1 successors, not %d'
1013 raise error.ProgrammingError(msg % len(rel[1]))
1014 raise error.ProgrammingError(msg % len(rel[1]))
1014 foldid = None
1015 foldid = None
1015 foldsize = len(predecessors)
1016 foldsize = len(predecessors)
1016 if 1 < foldsize:
1017 if 1 < foldsize:
1017 foldid = makefoldid(rel, metadata['user'])
1018 foldid = makefoldid(rel, metadata['user'])
1018 for foldidx, prec in enumerate(predecessors, 1):
1019 for foldidx, prec in enumerate(predecessors, 1):
1019 sucs = rel[1]
1020 sucs = rel[1]
1020 localmetadata = metadata.copy()
1021 localmetadata = metadata.copy()
1021 if len(rel) > 2:
1022 if len(rel) > 2:
1022 localmetadata.update(rel[2])
1023 localmetadata.update(rel[2])
1023 if foldid is not None:
1024 if foldid is not None:
1024 localmetadata['fold-id'] = foldid
1025 localmetadata['fold-id'] = foldid
1025 localmetadata['fold-idx'] = '%d' % foldidx
1026 localmetadata['fold-idx'] = '%d' % foldidx
1026 localmetadata['fold-size'] = '%d' % foldsize
1027 localmetadata['fold-size'] = '%d' % foldsize
1027
1028
1028 if not prec.mutable():
1029 if not prec.mutable():
1029 raise error.Abort(_("cannot obsolete public changeset: %s")
1030 raise error.Abort(_("cannot obsolete public changeset: %s")
1030 % prec,
1031 % prec,
1031 hint="see 'hg help phases' for details")
1032 hint="see 'hg help phases' for details")
1032 nprec = prec.node()
1033 nprec = prec.node()
1033 nsucs = tuple(s.node() for s in sucs)
1034 nsucs = tuple(s.node() for s in sucs)
1034 npare = None
1035 npare = None
1035 if not nsucs:
1036 if not nsucs:
1036 npare = tuple(p.node() for p in prec.parents())
1037 npare = tuple(p.node() for p in prec.parents())
1037 if nprec in nsucs:
1038 if nprec in nsucs:
1038 raise error.Abort(_("changeset %s cannot obsolete itself")
1039 raise error.Abort(_("changeset %s cannot obsolete itself")
1039 % prec)
1040 % prec)
1040
1041
1041 # Effect flag can be different by relation
1042 # Effect flag can be different by relation
1042 if saveeffectflag:
1043 if saveeffectflag:
1043 # The effect flag is saved in a versioned field name for
1044 # The effect flag is saved in a versioned field name for
1044 # future evolution
1045 # future evolution
1045 effectflag = obsutil.geteffectflag(prec, sucs)
1046 effectflag = obsutil.geteffectflag(prec, sucs)
1046 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1047 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1047
1048
1048 # Creating the marker causes the hidden cache to become
1049 # Creating the marker causes the hidden cache to become
1049 # invalid, which causes recomputation when we ask for
1050 # invalid, which causes recomputation when we ask for
1050 # prec.parents() above. Resulting in n^2 behavior. So let's
1051 # prec.parents() above. Resulting in n^2 behavior. So let's
1051 # prepare all of the args first, then create the markers.
1052 # prepare all of the args first, then create the markers.
1052 markerargs.append((nprec, nsucs, npare, localmetadata))
1053 markerargs.append((nprec, nsucs, npare, localmetadata))
1053
1054
1054 for args in markerargs:
1055 for args in markerargs:
1055 nprec, nsucs, npare, localmetadata = args
1056 nprec, nsucs, npare, localmetadata = args
1056 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1057 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1057 date=date, metadata=localmetadata,
1058 date=date, metadata=localmetadata,
1058 ui=repo.ui)
1059 ui=repo.ui)
1059 repo.filteredrevcache.clear()
1060 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now