##// END OF EJS Templates
obsolete: allow multiple predecessors in createmarkers...
Boris Feld -
r39958:6335c0de default
parent child Browse files
Show More
@@ -1,1034 +1,1040 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 encoding,
77 encoding,
78 error,
78 error,
79 node,
79 node,
80 obsutil,
80 obsutil,
81 phases,
81 phases,
82 policy,
82 policy,
83 pycompat,
83 pycompat,
84 util,
84 util,
85 )
85 )
86 from .utils import dateutil
86 from .utils import dateutil
87
87
88 parsers = policy.importmod(r'parsers')
88 parsers = policy.importmod(r'parsers')
89
89
90 _pack = struct.pack
90 _pack = struct.pack
91 _unpack = struct.unpack
91 _unpack = struct.unpack
92 _calcsize = struct.calcsize
92 _calcsize = struct.calcsize
93 propertycache = util.propertycache
93 propertycache = util.propertycache
94
94
95 # the obsolete feature is not mature enough to be enabled by default.
95 # the obsolete feature is not mature enough to be enabled by default.
96 # you have to rely on third party extension extension to enable this.
96 # you have to rely on third party extension extension to enable this.
97 _enabled = False
97 _enabled = False
98
98
99 # Options for obsolescence
99 # Options for obsolescence
100 createmarkersopt = 'createmarkers'
100 createmarkersopt = 'createmarkers'
101 allowunstableopt = 'allowunstable'
101 allowunstableopt = 'allowunstable'
102 exchangeopt = 'exchange'
102 exchangeopt = 'exchange'
103
103
104 def _getoptionvalue(repo, option):
104 def _getoptionvalue(repo, option):
105 """Returns True if the given repository has the given obsolete option
105 """Returns True if the given repository has the given obsolete option
106 enabled.
106 enabled.
107 """
107 """
108 configkey = 'evolution.%s' % option
108 configkey = 'evolution.%s' % option
109 newconfig = repo.ui.configbool('experimental', configkey)
109 newconfig = repo.ui.configbool('experimental', configkey)
110
110
111 # Return the value only if defined
111 # Return the value only if defined
112 if newconfig is not None:
112 if newconfig is not None:
113 return newconfig
113 return newconfig
114
114
115 # Fallback on generic option
115 # Fallback on generic option
116 try:
116 try:
117 return repo.ui.configbool('experimental', 'evolution')
117 return repo.ui.configbool('experimental', 'evolution')
118 except (error.ConfigError, AttributeError):
118 except (error.ConfigError, AttributeError):
119 # Fallback on old-fashion config
119 # Fallback on old-fashion config
120 # inconsistent config: experimental.evolution
120 # inconsistent config: experimental.evolution
121 result = set(repo.ui.configlist('experimental', 'evolution'))
121 result = set(repo.ui.configlist('experimental', 'evolution'))
122
122
123 if 'all' in result:
123 if 'all' in result:
124 return True
124 return True
125
125
126 # For migration purposes, temporarily return true if the config hasn't
126 # For migration purposes, temporarily return true if the config hasn't
127 # been set but _enabled is true.
127 # been set but _enabled is true.
128 if len(result) == 0 and _enabled:
128 if len(result) == 0 and _enabled:
129 return True
129 return True
130
130
131 # Temporary hack for next check
131 # Temporary hack for next check
132 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
132 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
133 if newconfig:
133 if newconfig:
134 result.add('createmarkers')
134 result.add('createmarkers')
135
135
136 return option in result
136 return option in result
137
137
138 def getoptions(repo):
138 def getoptions(repo):
139 """Returns dicts showing state of obsolescence features."""
139 """Returns dicts showing state of obsolescence features."""
140
140
141 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
141 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
142 unstablevalue = _getoptionvalue(repo, allowunstableopt)
142 unstablevalue = _getoptionvalue(repo, allowunstableopt)
143 exchangevalue = _getoptionvalue(repo, exchangeopt)
143 exchangevalue = _getoptionvalue(repo, exchangeopt)
144
144
145 # createmarkers must be enabled if other options are enabled
145 # createmarkers must be enabled if other options are enabled
146 if ((unstablevalue or exchangevalue) and not createmarkersvalue):
146 if ((unstablevalue or exchangevalue) and not createmarkersvalue):
147 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
147 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
148 "if other obsolete options are enabled"))
148 "if other obsolete options are enabled"))
149
149
150 return {
150 return {
151 createmarkersopt: createmarkersvalue,
151 createmarkersopt: createmarkersvalue,
152 allowunstableopt: unstablevalue,
152 allowunstableopt: unstablevalue,
153 exchangeopt: exchangevalue,
153 exchangeopt: exchangevalue,
154 }
154 }
155
155
156 def isenabled(repo, option):
156 def isenabled(repo, option):
157 """Returns True if the given repository has the given obsolete option
157 """Returns True if the given repository has the given obsolete option
158 enabled.
158 enabled.
159 """
159 """
160 return getoptions(repo)[option]
160 return getoptions(repo)[option]
161
161
162 # Creating aliases for marker flags because evolve extension looks for
162 # Creating aliases for marker flags because evolve extension looks for
163 # bumpedfix in obsolete.py
163 # bumpedfix in obsolete.py
164 bumpedfix = obsutil.bumpedfix
164 bumpedfix = obsutil.bumpedfix
165 usingsha256 = obsutil.usingsha256
165 usingsha256 = obsutil.usingsha256
166
166
167 ## Parsing and writing of version "0"
167 ## Parsing and writing of version "0"
168 #
168 #
169 # The header is followed by the markers. Each marker is made of:
169 # The header is followed by the markers. Each marker is made of:
170 #
170 #
171 # - 1 uint8 : number of new changesets "N", can be zero.
171 # - 1 uint8 : number of new changesets "N", can be zero.
172 #
172 #
173 # - 1 uint32: metadata size "M" in bytes.
173 # - 1 uint32: metadata size "M" in bytes.
174 #
174 #
175 # - 1 byte: a bit field. It is reserved for flags used in common
175 # - 1 byte: a bit field. It is reserved for flags used in common
176 # obsolete marker operations, to avoid repeated decoding of metadata
176 # obsolete marker operations, to avoid repeated decoding of metadata
177 # entries.
177 # entries.
178 #
178 #
179 # - 20 bytes: obsoleted changeset identifier.
179 # - 20 bytes: obsoleted changeset identifier.
180 #
180 #
181 # - N*20 bytes: new changesets identifiers.
181 # - N*20 bytes: new changesets identifiers.
182 #
182 #
183 # - M bytes: metadata as a sequence of nul-terminated strings. Each
183 # - M bytes: metadata as a sequence of nul-terminated strings. Each
184 # string contains a key and a value, separated by a colon ':', without
184 # string contains a key and a value, separated by a colon ':', without
185 # additional encoding. Keys cannot contain '\0' or ':' and values
185 # additional encoding. Keys cannot contain '\0' or ':' and values
186 # cannot contain '\0'.
186 # cannot contain '\0'.
187 _fm0version = 0
187 _fm0version = 0
188 _fm0fixed = '>BIB20s'
188 _fm0fixed = '>BIB20s'
189 _fm0node = '20s'
189 _fm0node = '20s'
190 _fm0fsize = _calcsize(_fm0fixed)
190 _fm0fsize = _calcsize(_fm0fixed)
191 _fm0fnodesize = _calcsize(_fm0node)
191 _fm0fnodesize = _calcsize(_fm0node)
192
192
193 def _fm0readmarkers(data, off, stop):
193 def _fm0readmarkers(data, off, stop):
194 # Loop on markers
194 # Loop on markers
195 while off < stop:
195 while off < stop:
196 # read fixed part
196 # read fixed part
197 cur = data[off:off + _fm0fsize]
197 cur = data[off:off + _fm0fsize]
198 off += _fm0fsize
198 off += _fm0fsize
199 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
199 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
200 # read replacement
200 # read replacement
201 sucs = ()
201 sucs = ()
202 if numsuc:
202 if numsuc:
203 s = (_fm0fnodesize * numsuc)
203 s = (_fm0fnodesize * numsuc)
204 cur = data[off:off + s]
204 cur = data[off:off + s]
205 sucs = _unpack(_fm0node * numsuc, cur)
205 sucs = _unpack(_fm0node * numsuc, cur)
206 off += s
206 off += s
207 # read metadata
207 # read metadata
208 # (metadata will be decoded on demand)
208 # (metadata will be decoded on demand)
209 metadata = data[off:off + mdsize]
209 metadata = data[off:off + mdsize]
210 if len(metadata) != mdsize:
210 if len(metadata) != mdsize:
211 raise error.Abort(_('parsing obsolete marker: metadata is too '
211 raise error.Abort(_('parsing obsolete marker: metadata is too '
212 'short, %d bytes expected, got %d')
212 'short, %d bytes expected, got %d')
213 % (mdsize, len(metadata)))
213 % (mdsize, len(metadata)))
214 off += mdsize
214 off += mdsize
215 metadata = _fm0decodemeta(metadata)
215 metadata = _fm0decodemeta(metadata)
216 try:
216 try:
217 when, offset = metadata.pop('date', '0 0').split(' ')
217 when, offset = metadata.pop('date', '0 0').split(' ')
218 date = float(when), int(offset)
218 date = float(when), int(offset)
219 except ValueError:
219 except ValueError:
220 date = (0., 0)
220 date = (0., 0)
221 parents = None
221 parents = None
222 if 'p2' in metadata:
222 if 'p2' in metadata:
223 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
223 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
224 elif 'p1' in metadata:
224 elif 'p1' in metadata:
225 parents = (metadata.pop('p1', None),)
225 parents = (metadata.pop('p1', None),)
226 elif 'p0' in metadata:
226 elif 'p0' in metadata:
227 parents = ()
227 parents = ()
228 if parents is not None:
228 if parents is not None:
229 try:
229 try:
230 parents = tuple(node.bin(p) for p in parents)
230 parents = tuple(node.bin(p) for p in parents)
231 # if parent content is not a nodeid, drop the data
231 # if parent content is not a nodeid, drop the data
232 for p in parents:
232 for p in parents:
233 if len(p) != 20:
233 if len(p) != 20:
234 parents = None
234 parents = None
235 break
235 break
236 except TypeError:
236 except TypeError:
237 # if content cannot be translated to nodeid drop the data.
237 # if content cannot be translated to nodeid drop the data.
238 parents = None
238 parents = None
239
239
240 metadata = tuple(sorted(metadata.iteritems()))
240 metadata = tuple(sorted(metadata.iteritems()))
241
241
242 yield (pre, sucs, flags, metadata, date, parents)
242 yield (pre, sucs, flags, metadata, date, parents)
243
243
244 def _fm0encodeonemarker(marker):
244 def _fm0encodeonemarker(marker):
245 pre, sucs, flags, metadata, date, parents = marker
245 pre, sucs, flags, metadata, date, parents = marker
246 if flags & usingsha256:
246 if flags & usingsha256:
247 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
247 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
248 metadata = dict(metadata)
248 metadata = dict(metadata)
249 time, tz = date
249 time, tz = date
250 metadata['date'] = '%r %i' % (time, tz)
250 metadata['date'] = '%r %i' % (time, tz)
251 if parents is not None:
251 if parents is not None:
252 if not parents:
252 if not parents:
253 # mark that we explicitly recorded no parents
253 # mark that we explicitly recorded no parents
254 metadata['p0'] = ''
254 metadata['p0'] = ''
255 for i, p in enumerate(parents, 1):
255 for i, p in enumerate(parents, 1):
256 metadata['p%i' % i] = node.hex(p)
256 metadata['p%i' % i] = node.hex(p)
257 metadata = _fm0encodemeta(metadata)
257 metadata = _fm0encodemeta(metadata)
258 numsuc = len(sucs)
258 numsuc = len(sucs)
259 format = _fm0fixed + (_fm0node * numsuc)
259 format = _fm0fixed + (_fm0node * numsuc)
260 data = [numsuc, len(metadata), flags, pre]
260 data = [numsuc, len(metadata), flags, pre]
261 data.extend(sucs)
261 data.extend(sucs)
262 return _pack(format, *data) + metadata
262 return _pack(format, *data) + metadata
263
263
264 def _fm0encodemeta(meta):
264 def _fm0encodemeta(meta):
265 """Return encoded metadata string to string mapping.
265 """Return encoded metadata string to string mapping.
266
266
267 Assume no ':' in key and no '\0' in both key and value."""
267 Assume no ':' in key and no '\0' in both key and value."""
268 for key, value in meta.iteritems():
268 for key, value in meta.iteritems():
269 if ':' in key or '\0' in key:
269 if ':' in key or '\0' in key:
270 raise ValueError("':' and '\0' are forbidden in metadata key'")
270 raise ValueError("':' and '\0' are forbidden in metadata key'")
271 if '\0' in value:
271 if '\0' in value:
272 raise ValueError("':' is forbidden in metadata value'")
272 raise ValueError("':' is forbidden in metadata value'")
273 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
273 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
274
274
275 def _fm0decodemeta(data):
275 def _fm0decodemeta(data):
276 """Return string to string dictionary from encoded version."""
276 """Return string to string dictionary from encoded version."""
277 d = {}
277 d = {}
278 for l in data.split('\0'):
278 for l in data.split('\0'):
279 if l:
279 if l:
280 key, value = l.split(':')
280 key, value = l.split(':')
281 d[key] = value
281 d[key] = value
282 return d
282 return d
283
283
284 ## Parsing and writing of version "1"
284 ## Parsing and writing of version "1"
285 #
285 #
286 # The header is followed by the markers. Each marker is made of:
286 # The header is followed by the markers. Each marker is made of:
287 #
287 #
288 # - uint32: total size of the marker (including this field)
288 # - uint32: total size of the marker (including this field)
289 #
289 #
290 # - float64: date in seconds since epoch
290 # - float64: date in seconds since epoch
291 #
291 #
292 # - int16: timezone offset in minutes
292 # - int16: timezone offset in minutes
293 #
293 #
294 # - uint16: a bit field. It is reserved for flags used in common
294 # - uint16: a bit field. It is reserved for flags used in common
295 # obsolete marker operations, to avoid repeated decoding of metadata
295 # obsolete marker operations, to avoid repeated decoding of metadata
296 # entries.
296 # entries.
297 #
297 #
298 # - uint8: number of successors "N", can be zero.
298 # - uint8: number of successors "N", can be zero.
299 #
299 #
300 # - uint8: number of parents "P", can be zero.
300 # - uint8: number of parents "P", can be zero.
301 #
301 #
302 # 0: parents data stored but no parent,
302 # 0: parents data stored but no parent,
303 # 1: one parent stored,
303 # 1: one parent stored,
304 # 2: two parents stored,
304 # 2: two parents stored,
305 # 3: no parent data stored
305 # 3: no parent data stored
306 #
306 #
307 # - uint8: number of metadata entries M
307 # - uint8: number of metadata entries M
308 #
308 #
309 # - 20 or 32 bytes: predecessor changeset identifier.
309 # - 20 or 32 bytes: predecessor changeset identifier.
310 #
310 #
311 # - N*(20 or 32) bytes: successors changesets identifiers.
311 # - N*(20 or 32) bytes: successors changesets identifiers.
312 #
312 #
313 # - P*(20 or 32) bytes: parents of the predecessors changesets.
313 # - P*(20 or 32) bytes: parents of the predecessors changesets.
314 #
314 #
315 # - M*(uint8, uint8): size of all metadata entries (key and value)
315 # - M*(uint8, uint8): size of all metadata entries (key and value)
316 #
316 #
317 # - remaining bytes: the metadata, each (key, value) pair after the other.
317 # - remaining bytes: the metadata, each (key, value) pair after the other.
318 _fm1version = 1
318 _fm1version = 1
319 _fm1fixed = '>IdhHBBB20s'
319 _fm1fixed = '>IdhHBBB20s'
320 _fm1nodesha1 = '20s'
320 _fm1nodesha1 = '20s'
321 _fm1nodesha256 = '32s'
321 _fm1nodesha256 = '32s'
322 _fm1nodesha1size = _calcsize(_fm1nodesha1)
322 _fm1nodesha1size = _calcsize(_fm1nodesha1)
323 _fm1nodesha256size = _calcsize(_fm1nodesha256)
323 _fm1nodesha256size = _calcsize(_fm1nodesha256)
324 _fm1fsize = _calcsize(_fm1fixed)
324 _fm1fsize = _calcsize(_fm1fixed)
325 _fm1parentnone = 3
325 _fm1parentnone = 3
326 _fm1parentshift = 14
326 _fm1parentshift = 14
327 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
327 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
328 _fm1metapair = 'BB'
328 _fm1metapair = 'BB'
329 _fm1metapairsize = _calcsize(_fm1metapair)
329 _fm1metapairsize = _calcsize(_fm1metapair)
330
330
331 def _fm1purereadmarkers(data, off, stop):
331 def _fm1purereadmarkers(data, off, stop):
332 # make some global constants local for performance
332 # make some global constants local for performance
333 noneflag = _fm1parentnone
333 noneflag = _fm1parentnone
334 sha2flag = usingsha256
334 sha2flag = usingsha256
335 sha1size = _fm1nodesha1size
335 sha1size = _fm1nodesha1size
336 sha2size = _fm1nodesha256size
336 sha2size = _fm1nodesha256size
337 sha1fmt = _fm1nodesha1
337 sha1fmt = _fm1nodesha1
338 sha2fmt = _fm1nodesha256
338 sha2fmt = _fm1nodesha256
339 metasize = _fm1metapairsize
339 metasize = _fm1metapairsize
340 metafmt = _fm1metapair
340 metafmt = _fm1metapair
341 fsize = _fm1fsize
341 fsize = _fm1fsize
342 unpack = _unpack
342 unpack = _unpack
343
343
344 # Loop on markers
344 # Loop on markers
345 ufixed = struct.Struct(_fm1fixed).unpack
345 ufixed = struct.Struct(_fm1fixed).unpack
346
346
347 while off < stop:
347 while off < stop:
348 # read fixed part
348 # read fixed part
349 o1 = off + fsize
349 o1 = off + fsize
350 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
350 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
351
351
352 if flags & sha2flag:
352 if flags & sha2flag:
353 # FIXME: prec was read as a SHA1, needs to be amended
353 # FIXME: prec was read as a SHA1, needs to be amended
354
354
355 # read 0 or more successors
355 # read 0 or more successors
356 if numsuc == 1:
356 if numsuc == 1:
357 o2 = o1 + sha2size
357 o2 = o1 + sha2size
358 sucs = (data[o1:o2],)
358 sucs = (data[o1:o2],)
359 else:
359 else:
360 o2 = o1 + sha2size * numsuc
360 o2 = o1 + sha2size * numsuc
361 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
361 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
362
362
363 # read parents
363 # read parents
364 if numpar == noneflag:
364 if numpar == noneflag:
365 o3 = o2
365 o3 = o2
366 parents = None
366 parents = None
367 elif numpar == 1:
367 elif numpar == 1:
368 o3 = o2 + sha2size
368 o3 = o2 + sha2size
369 parents = (data[o2:o3],)
369 parents = (data[o2:o3],)
370 else:
370 else:
371 o3 = o2 + sha2size * numpar
371 o3 = o2 + sha2size * numpar
372 parents = unpack(sha2fmt * numpar, data[o2:o3])
372 parents = unpack(sha2fmt * numpar, data[o2:o3])
373 else:
373 else:
374 # read 0 or more successors
374 # read 0 or more successors
375 if numsuc == 1:
375 if numsuc == 1:
376 o2 = o1 + sha1size
376 o2 = o1 + sha1size
377 sucs = (data[o1:o2],)
377 sucs = (data[o1:o2],)
378 else:
378 else:
379 o2 = o1 + sha1size * numsuc
379 o2 = o1 + sha1size * numsuc
380 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
380 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
381
381
382 # read parents
382 # read parents
383 if numpar == noneflag:
383 if numpar == noneflag:
384 o3 = o2
384 o3 = o2
385 parents = None
385 parents = None
386 elif numpar == 1:
386 elif numpar == 1:
387 o3 = o2 + sha1size
387 o3 = o2 + sha1size
388 parents = (data[o2:o3],)
388 parents = (data[o2:o3],)
389 else:
389 else:
390 o3 = o2 + sha1size * numpar
390 o3 = o2 + sha1size * numpar
391 parents = unpack(sha1fmt * numpar, data[o2:o3])
391 parents = unpack(sha1fmt * numpar, data[o2:o3])
392
392
393 # read metadata
393 # read metadata
394 off = o3 + metasize * nummeta
394 off = o3 + metasize * nummeta
395 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
395 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
396 metadata = []
396 metadata = []
397 for idx in pycompat.xrange(0, len(metapairsize), 2):
397 for idx in pycompat.xrange(0, len(metapairsize), 2):
398 o1 = off + metapairsize[idx]
398 o1 = off + metapairsize[idx]
399 o2 = o1 + metapairsize[idx + 1]
399 o2 = o1 + metapairsize[idx + 1]
400 metadata.append((data[off:o1], data[o1:o2]))
400 metadata.append((data[off:o1], data[o1:o2]))
401 off = o2
401 off = o2
402
402
403 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
403 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
404
404
405 def _fm1encodeonemarker(marker):
405 def _fm1encodeonemarker(marker):
406 pre, sucs, flags, metadata, date, parents = marker
406 pre, sucs, flags, metadata, date, parents = marker
407 # determine node size
407 # determine node size
408 _fm1node = _fm1nodesha1
408 _fm1node = _fm1nodesha1
409 if flags & usingsha256:
409 if flags & usingsha256:
410 _fm1node = _fm1nodesha256
410 _fm1node = _fm1nodesha256
411 numsuc = len(sucs)
411 numsuc = len(sucs)
412 numextranodes = numsuc
412 numextranodes = numsuc
413 if parents is None:
413 if parents is None:
414 numpar = _fm1parentnone
414 numpar = _fm1parentnone
415 else:
415 else:
416 numpar = len(parents)
416 numpar = len(parents)
417 numextranodes += numpar
417 numextranodes += numpar
418 formatnodes = _fm1node * numextranodes
418 formatnodes = _fm1node * numextranodes
419 formatmeta = _fm1metapair * len(metadata)
419 formatmeta = _fm1metapair * len(metadata)
420 format = _fm1fixed + formatnodes + formatmeta
420 format = _fm1fixed + formatnodes + formatmeta
421 # tz is stored in minutes so we divide by 60
421 # tz is stored in minutes so we divide by 60
422 tz = date[1]//60
422 tz = date[1]//60
423 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
423 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
424 data.extend(sucs)
424 data.extend(sucs)
425 if parents is not None:
425 if parents is not None:
426 data.extend(parents)
426 data.extend(parents)
427 totalsize = _calcsize(format)
427 totalsize = _calcsize(format)
428 for key, value in metadata:
428 for key, value in metadata:
429 lk = len(key)
429 lk = len(key)
430 lv = len(value)
430 lv = len(value)
431 if lk > 255:
431 if lk > 255:
432 msg = ('obsstore metadata key cannot be longer than 255 bytes'
432 msg = ('obsstore metadata key cannot be longer than 255 bytes'
433 ' (key "%s" is %u bytes)') % (key, lk)
433 ' (key "%s" is %u bytes)') % (key, lk)
434 raise error.ProgrammingError(msg)
434 raise error.ProgrammingError(msg)
435 if lv > 255:
435 if lv > 255:
436 msg = ('obsstore metadata value cannot be longer than 255 bytes'
436 msg = ('obsstore metadata value cannot be longer than 255 bytes'
437 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
437 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
438 raise error.ProgrammingError(msg)
438 raise error.ProgrammingError(msg)
439 data.append(lk)
439 data.append(lk)
440 data.append(lv)
440 data.append(lv)
441 totalsize += lk + lv
441 totalsize += lk + lv
442 data[0] = totalsize
442 data[0] = totalsize
443 data = [_pack(format, *data)]
443 data = [_pack(format, *data)]
444 for key, value in metadata:
444 for key, value in metadata:
445 data.append(key)
445 data.append(key)
446 data.append(value)
446 data.append(value)
447 return ''.join(data)
447 return ''.join(data)
448
448
449 def _fm1readmarkers(data, off, stop):
449 def _fm1readmarkers(data, off, stop):
450 native = getattr(parsers, 'fm1readmarkers', None)
450 native = getattr(parsers, 'fm1readmarkers', None)
451 if not native:
451 if not native:
452 return _fm1purereadmarkers(data, off, stop)
452 return _fm1purereadmarkers(data, off, stop)
453 return native(data, off, stop)
453 return native(data, off, stop)
454
454
455 # mapping to read/write various marker formats
455 # mapping to read/write various marker formats
456 # <version> -> (decoder, encoder)
456 # <version> -> (decoder, encoder)
457 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
457 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
458 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
458 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
459
459
460 def _readmarkerversion(data):
460 def _readmarkerversion(data):
461 return _unpack('>B', data[0:1])[0]
461 return _unpack('>B', data[0:1])[0]
462
462
463 @util.nogc
463 @util.nogc
464 def _readmarkers(data, off=None, stop=None):
464 def _readmarkers(data, off=None, stop=None):
465 """Read and enumerate markers from raw data"""
465 """Read and enumerate markers from raw data"""
466 diskversion = _readmarkerversion(data)
466 diskversion = _readmarkerversion(data)
467 if not off:
467 if not off:
468 off = 1 # skip 1 byte version number
468 off = 1 # skip 1 byte version number
469 if stop is None:
469 if stop is None:
470 stop = len(data)
470 stop = len(data)
471 if diskversion not in formats:
471 if diskversion not in formats:
472 msg = _('parsing obsolete marker: unknown version %r') % diskversion
472 msg = _('parsing obsolete marker: unknown version %r') % diskversion
473 raise error.UnknownVersion(msg, version=diskversion)
473 raise error.UnknownVersion(msg, version=diskversion)
474 return diskversion, formats[diskversion][0](data, off, stop)
474 return diskversion, formats[diskversion][0](data, off, stop)
475
475
476 def encodeheader(version=_fm0version):
476 def encodeheader(version=_fm0version):
477 return _pack('>B', version)
477 return _pack('>B', version)
478
478
479 def encodemarkers(markers, addheader=False, version=_fm0version):
479 def encodemarkers(markers, addheader=False, version=_fm0version):
480 # Kept separate from flushmarkers(), it will be reused for
480 # Kept separate from flushmarkers(), it will be reused for
481 # markers exchange.
481 # markers exchange.
482 encodeone = formats[version][1]
482 encodeone = formats[version][1]
483 if addheader:
483 if addheader:
484 yield encodeheader(version)
484 yield encodeheader(version)
485 for marker in markers:
485 for marker in markers:
486 yield encodeone(marker)
486 yield encodeone(marker)
487
487
488 @util.nogc
488 @util.nogc
489 def _addsuccessors(successors, markers):
489 def _addsuccessors(successors, markers):
490 for mark in markers:
490 for mark in markers:
491 successors.setdefault(mark[0], set()).add(mark)
491 successors.setdefault(mark[0], set()).add(mark)
492
492
493 @util.nogc
493 @util.nogc
494 def _addpredecessors(predecessors, markers):
494 def _addpredecessors(predecessors, markers):
495 for mark in markers:
495 for mark in markers:
496 for suc in mark[1]:
496 for suc in mark[1]:
497 predecessors.setdefault(suc, set()).add(mark)
497 predecessors.setdefault(suc, set()).add(mark)
498
498
499 @util.nogc
499 @util.nogc
500 def _addchildren(children, markers):
500 def _addchildren(children, markers):
501 for mark in markers:
501 for mark in markers:
502 parents = mark[5]
502 parents = mark[5]
503 if parents is not None:
503 if parents is not None:
504 for p in parents:
504 for p in parents:
505 children.setdefault(p, set()).add(mark)
505 children.setdefault(p, set()).add(mark)
506
506
507 def _checkinvalidmarkers(markers):
507 def _checkinvalidmarkers(markers):
508 """search for marker with invalid data and raise error if needed
508 """search for marker with invalid data and raise error if needed
509
509
510 Exist as a separated function to allow the evolve extension for a more
510 Exist as a separated function to allow the evolve extension for a more
511 subtle handling.
511 subtle handling.
512 """
512 """
513 for mark in markers:
513 for mark in markers:
514 if node.nullid in mark[1]:
514 if node.nullid in mark[1]:
515 raise error.Abort(_('bad obsolescence marker detected: '
515 raise error.Abort(_('bad obsolescence marker detected: '
516 'invalid successors nullid'))
516 'invalid successors nullid'))
517
517
518 class obsstore(object):
518 class obsstore(object):
519 """Store obsolete markers
519 """Store obsolete markers
520
520
521 Markers can be accessed with two mappings:
521 Markers can be accessed with two mappings:
522 - predecessors[x] -> set(markers on predecessors edges of x)
522 - predecessors[x] -> set(markers on predecessors edges of x)
523 - successors[x] -> set(markers on successors edges of x)
523 - successors[x] -> set(markers on successors edges of x)
524 - children[x] -> set(markers on predecessors edges of children(x)
524 - children[x] -> set(markers on predecessors edges of children(x)
525 """
525 """
526
526
527 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
527 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
528 # prec: nodeid, predecessors changesets
528 # prec: nodeid, predecessors changesets
529 # succs: tuple of nodeid, successor changesets (0-N length)
529 # succs: tuple of nodeid, successor changesets (0-N length)
530 # flag: integer, flag field carrying modifier for the markers (see doc)
530 # flag: integer, flag field carrying modifier for the markers (see doc)
531 # meta: binary blob in UTF-8, encoded metadata dictionary
531 # meta: binary blob in UTF-8, encoded metadata dictionary
532 # date: (float, int) tuple, date of marker creation
532 # date: (float, int) tuple, date of marker creation
533 # parents: (tuple of nodeid) or None, parents of predecessors
533 # parents: (tuple of nodeid) or None, parents of predecessors
534 # None is used when no data has been recorded
534 # None is used when no data has been recorded
535
535
536 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
536 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
537 # caches for various obsolescence related cache
537 # caches for various obsolescence related cache
538 self.caches = {}
538 self.caches = {}
539 self.svfs = svfs
539 self.svfs = svfs
540 self._defaultformat = defaultformat
540 self._defaultformat = defaultformat
541 self._readonly = readonly
541 self._readonly = readonly
542
542
543 def __iter__(self):
543 def __iter__(self):
544 return iter(self._all)
544 return iter(self._all)
545
545
546 def __len__(self):
546 def __len__(self):
547 return len(self._all)
547 return len(self._all)
548
548
549 def __nonzero__(self):
549 def __nonzero__(self):
550 if not self._cached(r'_all'):
550 if not self._cached(r'_all'):
551 try:
551 try:
552 return self.svfs.stat('obsstore').st_size > 1
552 return self.svfs.stat('obsstore').st_size > 1
553 except OSError as inst:
553 except OSError as inst:
554 if inst.errno != errno.ENOENT:
554 if inst.errno != errno.ENOENT:
555 raise
555 raise
556 # just build an empty _all list if no obsstore exists, which
556 # just build an empty _all list if no obsstore exists, which
557 # avoids further stat() syscalls
557 # avoids further stat() syscalls
558 return bool(self._all)
558 return bool(self._all)
559
559
560 __bool__ = __nonzero__
560 __bool__ = __nonzero__
561
561
562 @property
562 @property
563 def readonly(self):
563 def readonly(self):
564 """True if marker creation is disabled
564 """True if marker creation is disabled
565
565
566 Remove me in the future when obsolete marker is always on."""
566 Remove me in the future when obsolete marker is always on."""
567 return self._readonly
567 return self._readonly
568
568
569 def create(self, transaction, prec, succs=(), flag=0, parents=None,
569 def create(self, transaction, prec, succs=(), flag=0, parents=None,
570 date=None, metadata=None, ui=None):
570 date=None, metadata=None, ui=None):
571 """obsolete: add a new obsolete marker
571 """obsolete: add a new obsolete marker
572
572
573 * ensuring it is hashable
573 * ensuring it is hashable
574 * check mandatory metadata
574 * check mandatory metadata
575 * encode metadata
575 * encode metadata
576
576
577 If you are a human writing code creating marker you want to use the
577 If you are a human writing code creating marker you want to use the
578 `createmarkers` function in this module instead.
578 `createmarkers` function in this module instead.
579
579
580 return True if a new marker have been added, False if the markers
580 return True if a new marker have been added, False if the markers
581 already existed (no op).
581 already existed (no op).
582 """
582 """
583 if metadata is None:
583 if metadata is None:
584 metadata = {}
584 metadata = {}
585 if date is None:
585 if date is None:
586 if 'date' in metadata:
586 if 'date' in metadata:
587 # as a courtesy for out-of-tree extensions
587 # as a courtesy for out-of-tree extensions
588 date = dateutil.parsedate(metadata.pop('date'))
588 date = dateutil.parsedate(metadata.pop('date'))
589 elif ui is not None:
589 elif ui is not None:
590 date = ui.configdate('devel', 'default-date')
590 date = ui.configdate('devel', 'default-date')
591 if date is None:
591 if date is None:
592 date = dateutil.makedate()
592 date = dateutil.makedate()
593 else:
593 else:
594 date = dateutil.makedate()
594 date = dateutil.makedate()
595 if len(prec) != 20:
595 if len(prec) != 20:
596 raise ValueError(prec)
596 raise ValueError(prec)
597 for succ in succs:
597 for succ in succs:
598 if len(succ) != 20:
598 if len(succ) != 20:
599 raise ValueError(succ)
599 raise ValueError(succ)
600 if prec in succs:
600 if prec in succs:
601 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
601 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
602
602
603 metadata = tuple(sorted(metadata.iteritems()))
603 metadata = tuple(sorted(metadata.iteritems()))
604 for k, v in metadata:
604 for k, v in metadata:
605 try:
605 try:
606 # might be better to reject non-ASCII keys
606 # might be better to reject non-ASCII keys
607 k.decode('utf-8')
607 k.decode('utf-8')
608 v.decode('utf-8')
608 v.decode('utf-8')
609 except UnicodeDecodeError:
609 except UnicodeDecodeError:
610 raise error.ProgrammingError(
610 raise error.ProgrammingError(
611 'obsstore metadata must be valid UTF-8 sequence '
611 'obsstore metadata must be valid UTF-8 sequence '
612 '(key = %r, value = %r)'
612 '(key = %r, value = %r)'
613 % (pycompat.bytestr(k), pycompat.bytestr(v)))
613 % (pycompat.bytestr(k), pycompat.bytestr(v)))
614
614
615 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
615 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
616 return bool(self.add(transaction, [marker]))
616 return bool(self.add(transaction, [marker]))
617
617
618 def add(self, transaction, markers):
618 def add(self, transaction, markers):
619 """Add new markers to the store
619 """Add new markers to the store
620
620
621 Take care of filtering duplicate.
621 Take care of filtering duplicate.
622 Return the number of new marker."""
622 Return the number of new marker."""
623 if self._readonly:
623 if self._readonly:
624 raise error.Abort(_('creating obsolete markers is not enabled on '
624 raise error.Abort(_('creating obsolete markers is not enabled on '
625 'this repo'))
625 'this repo'))
626 known = set()
626 known = set()
627 getsuccessors = self.successors.get
627 getsuccessors = self.successors.get
628 new = []
628 new = []
629 for m in markers:
629 for m in markers:
630 if m not in getsuccessors(m[0], ()) and m not in known:
630 if m not in getsuccessors(m[0], ()) and m not in known:
631 known.add(m)
631 known.add(m)
632 new.append(m)
632 new.append(m)
633 if new:
633 if new:
634 f = self.svfs('obsstore', 'ab')
634 f = self.svfs('obsstore', 'ab')
635 try:
635 try:
636 offset = f.tell()
636 offset = f.tell()
637 transaction.add('obsstore', offset)
637 transaction.add('obsstore', offset)
638 # offset == 0: new file - add the version header
638 # offset == 0: new file - add the version header
639 data = b''.join(encodemarkers(new, offset == 0, self._version))
639 data = b''.join(encodemarkers(new, offset == 0, self._version))
640 f.write(data)
640 f.write(data)
641 finally:
641 finally:
642 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
642 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
643 # call 'filecacheentry.refresh()' here
643 # call 'filecacheentry.refresh()' here
644 f.close()
644 f.close()
645 addedmarkers = transaction.changes.get('obsmarkers')
645 addedmarkers = transaction.changes.get('obsmarkers')
646 if addedmarkers is not None:
646 if addedmarkers is not None:
647 addedmarkers.update(new)
647 addedmarkers.update(new)
648 self._addmarkers(new, data)
648 self._addmarkers(new, data)
649 # new marker *may* have changed several set. invalidate the cache.
649 # new marker *may* have changed several set. invalidate the cache.
650 self.caches.clear()
650 self.caches.clear()
651 # records the number of new markers for the transaction hooks
651 # records the number of new markers for the transaction hooks
652 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
652 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
653 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
653 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
654 return len(new)
654 return len(new)
655
655
656 def mergemarkers(self, transaction, data):
656 def mergemarkers(self, transaction, data):
657 """merge a binary stream of markers inside the obsstore
657 """merge a binary stream of markers inside the obsstore
658
658
659 Returns the number of new markers added."""
659 Returns the number of new markers added."""
660 version, markers = _readmarkers(data)
660 version, markers = _readmarkers(data)
661 return self.add(transaction, markers)
661 return self.add(transaction, markers)
662
662
663 @propertycache
663 @propertycache
664 def _data(self):
664 def _data(self):
665 return self.svfs.tryread('obsstore')
665 return self.svfs.tryread('obsstore')
666
666
667 @propertycache
667 @propertycache
668 def _version(self):
668 def _version(self):
669 if len(self._data) >= 1:
669 if len(self._data) >= 1:
670 return _readmarkerversion(self._data)
670 return _readmarkerversion(self._data)
671 else:
671 else:
672 return self._defaultformat
672 return self._defaultformat
673
673
674 @propertycache
674 @propertycache
675 def _all(self):
675 def _all(self):
676 data = self._data
676 data = self._data
677 if not data:
677 if not data:
678 return []
678 return []
679 self._version, markers = _readmarkers(data)
679 self._version, markers = _readmarkers(data)
680 markers = list(markers)
680 markers = list(markers)
681 _checkinvalidmarkers(markers)
681 _checkinvalidmarkers(markers)
682 return markers
682 return markers
683
683
684 @propertycache
684 @propertycache
685 def successors(self):
685 def successors(self):
686 successors = {}
686 successors = {}
687 _addsuccessors(successors, self._all)
687 _addsuccessors(successors, self._all)
688 return successors
688 return successors
689
689
690 @propertycache
690 @propertycache
691 def predecessors(self):
691 def predecessors(self):
692 predecessors = {}
692 predecessors = {}
693 _addpredecessors(predecessors, self._all)
693 _addpredecessors(predecessors, self._all)
694 return predecessors
694 return predecessors
695
695
696 @propertycache
696 @propertycache
697 def children(self):
697 def children(self):
698 children = {}
698 children = {}
699 _addchildren(children, self._all)
699 _addchildren(children, self._all)
700 return children
700 return children
701
701
702 def _cached(self, attr):
702 def _cached(self, attr):
703 return attr in self.__dict__
703 return attr in self.__dict__
704
704
705 def _addmarkers(self, markers, rawdata):
705 def _addmarkers(self, markers, rawdata):
706 markers = list(markers) # to allow repeated iteration
706 markers = list(markers) # to allow repeated iteration
707 self._data = self._data + rawdata
707 self._data = self._data + rawdata
708 self._all.extend(markers)
708 self._all.extend(markers)
709 if self._cached(r'successors'):
709 if self._cached(r'successors'):
710 _addsuccessors(self.successors, markers)
710 _addsuccessors(self.successors, markers)
711 if self._cached(r'predecessors'):
711 if self._cached(r'predecessors'):
712 _addpredecessors(self.predecessors, markers)
712 _addpredecessors(self.predecessors, markers)
713 if self._cached(r'children'):
713 if self._cached(r'children'):
714 _addchildren(self.children, markers)
714 _addchildren(self.children, markers)
715 _checkinvalidmarkers(markers)
715 _checkinvalidmarkers(markers)
716
716
717 def relevantmarkers(self, nodes):
717 def relevantmarkers(self, nodes):
718 """return a set of all obsolescence markers relevant to a set of nodes.
718 """return a set of all obsolescence markers relevant to a set of nodes.
719
719
720 "relevant" to a set of nodes mean:
720 "relevant" to a set of nodes mean:
721
721
722 - marker that use this changeset as successor
722 - marker that use this changeset as successor
723 - prune marker of direct children on this changeset
723 - prune marker of direct children on this changeset
724 - recursive application of the two rules on predecessors of these
724 - recursive application of the two rules on predecessors of these
725 markers
725 markers
726
726
727 It is a set so you cannot rely on order."""
727 It is a set so you cannot rely on order."""
728
728
729 pendingnodes = set(nodes)
729 pendingnodes = set(nodes)
730 seenmarkers = set()
730 seenmarkers = set()
731 seennodes = set(pendingnodes)
731 seennodes = set(pendingnodes)
732 precursorsmarkers = self.predecessors
732 precursorsmarkers = self.predecessors
733 succsmarkers = self.successors
733 succsmarkers = self.successors
734 children = self.children
734 children = self.children
735 while pendingnodes:
735 while pendingnodes:
736 direct = set()
736 direct = set()
737 for current in pendingnodes:
737 for current in pendingnodes:
738 direct.update(precursorsmarkers.get(current, ()))
738 direct.update(precursorsmarkers.get(current, ()))
739 pruned = [m for m in children.get(current, ()) if not m[1]]
739 pruned = [m for m in children.get(current, ()) if not m[1]]
740 direct.update(pruned)
740 direct.update(pruned)
741 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
741 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
742 direct.update(pruned)
742 direct.update(pruned)
743 direct -= seenmarkers
743 direct -= seenmarkers
744 pendingnodes = set([m[0] for m in direct])
744 pendingnodes = set([m[0] for m in direct])
745 seenmarkers |= direct
745 seenmarkers |= direct
746 pendingnodes -= seennodes
746 pendingnodes -= seennodes
747 seennodes |= pendingnodes
747 seennodes |= pendingnodes
748 return seenmarkers
748 return seenmarkers
749
749
750 def makestore(ui, repo):
750 def makestore(ui, repo):
751 """Create an obsstore instance from a repo."""
751 """Create an obsstore instance from a repo."""
752 # read default format for new obsstore.
752 # read default format for new obsstore.
753 # developer config: format.obsstore-version
753 # developer config: format.obsstore-version
754 defaultformat = ui.configint('format', 'obsstore-version')
754 defaultformat = ui.configint('format', 'obsstore-version')
755 # rely on obsstore class default when possible.
755 # rely on obsstore class default when possible.
756 kwargs = {}
756 kwargs = {}
757 if defaultformat is not None:
757 if defaultformat is not None:
758 kwargs[r'defaultformat'] = defaultformat
758 kwargs[r'defaultformat'] = defaultformat
759 readonly = not isenabled(repo, createmarkersopt)
759 readonly = not isenabled(repo, createmarkersopt)
760 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
760 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
761 if store and readonly:
761 if store and readonly:
762 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
762 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
763 % len(list(store)))
763 % len(list(store)))
764 return store
764 return store
765
765
766 def commonversion(versions):
766 def commonversion(versions):
767 """Return the newest version listed in both versions and our local formats.
767 """Return the newest version listed in both versions and our local formats.
768
768
769 Returns None if no common version exists.
769 Returns None if no common version exists.
770 """
770 """
771 versions.sort(reverse=True)
771 versions.sort(reverse=True)
772 # search for highest version known on both side
772 # search for highest version known on both side
773 for v in versions:
773 for v in versions:
774 if v in formats:
774 if v in formats:
775 return v
775 return v
776 return None
776 return None
777
777
778 # arbitrary picked to fit into 8K limit from HTTP server
778 # arbitrary picked to fit into 8K limit from HTTP server
779 # you have to take in account:
779 # you have to take in account:
780 # - the version header
780 # - the version header
781 # - the base85 encoding
781 # - the base85 encoding
782 _maxpayload = 5300
782 _maxpayload = 5300
783
783
784 def _pushkeyescape(markers):
784 def _pushkeyescape(markers):
785 """encode markers into a dict suitable for pushkey exchange
785 """encode markers into a dict suitable for pushkey exchange
786
786
787 - binary data is base85 encoded
787 - binary data is base85 encoded
788 - split in chunks smaller than 5300 bytes"""
788 - split in chunks smaller than 5300 bytes"""
789 keys = {}
789 keys = {}
790 parts = []
790 parts = []
791 currentlen = _maxpayload * 2 # ensure we create a new part
791 currentlen = _maxpayload * 2 # ensure we create a new part
792 for marker in markers:
792 for marker in markers:
793 nextdata = _fm0encodeonemarker(marker)
793 nextdata = _fm0encodeonemarker(marker)
794 if (len(nextdata) + currentlen > _maxpayload):
794 if (len(nextdata) + currentlen > _maxpayload):
795 currentpart = []
795 currentpart = []
796 currentlen = 0
796 currentlen = 0
797 parts.append(currentpart)
797 parts.append(currentpart)
798 currentpart.append(nextdata)
798 currentpart.append(nextdata)
799 currentlen += len(nextdata)
799 currentlen += len(nextdata)
800 for idx, part in enumerate(reversed(parts)):
800 for idx, part in enumerate(reversed(parts)):
801 data = ''.join([_pack('>B', _fm0version)] + part)
801 data = ''.join([_pack('>B', _fm0version)] + part)
802 keys['dump%i' % idx] = util.b85encode(data)
802 keys['dump%i' % idx] = util.b85encode(data)
803 return keys
803 return keys
804
804
805 def listmarkers(repo):
805 def listmarkers(repo):
806 """List markers over pushkey"""
806 """List markers over pushkey"""
807 if not repo.obsstore:
807 if not repo.obsstore:
808 return {}
808 return {}
809 return _pushkeyescape(sorted(repo.obsstore))
809 return _pushkeyescape(sorted(repo.obsstore))
810
810
811 def pushmarker(repo, key, old, new):
811 def pushmarker(repo, key, old, new):
812 """Push markers over pushkey"""
812 """Push markers over pushkey"""
813 if not key.startswith('dump'):
813 if not key.startswith('dump'):
814 repo.ui.warn(_('unknown key: %r') % key)
814 repo.ui.warn(_('unknown key: %r') % key)
815 return False
815 return False
816 if old:
816 if old:
817 repo.ui.warn(_('unexpected old value for %r') % key)
817 repo.ui.warn(_('unexpected old value for %r') % key)
818 return False
818 return False
819 data = util.b85decode(new)
819 data = util.b85decode(new)
820 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
820 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
821 repo.obsstore.mergemarkers(tr, data)
821 repo.obsstore.mergemarkers(tr, data)
822 repo.invalidatevolatilesets()
822 repo.invalidatevolatilesets()
823 return True
823 return True
824
824
825 # mapping of 'set-name' -> <function to compute this set>
825 # mapping of 'set-name' -> <function to compute this set>
826 cachefuncs = {}
826 cachefuncs = {}
827 def cachefor(name):
827 def cachefor(name):
828 """Decorator to register a function as computing the cache for a set"""
828 """Decorator to register a function as computing the cache for a set"""
829 def decorator(func):
829 def decorator(func):
830 if name in cachefuncs:
830 if name in cachefuncs:
831 msg = "duplicated registration for volatileset '%s' (existing: %r)"
831 msg = "duplicated registration for volatileset '%s' (existing: %r)"
832 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
832 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
833 cachefuncs[name] = func
833 cachefuncs[name] = func
834 return func
834 return func
835 return decorator
835 return decorator
836
836
837 def getrevs(repo, name):
837 def getrevs(repo, name):
838 """Return the set of revision that belong to the <name> set
838 """Return the set of revision that belong to the <name> set
839
839
840 Such access may compute the set and cache it for future use"""
840 Such access may compute the set and cache it for future use"""
841 repo = repo.unfiltered()
841 repo = repo.unfiltered()
842 if not repo.obsstore:
842 if not repo.obsstore:
843 return frozenset()
843 return frozenset()
844 if name not in repo.obsstore.caches:
844 if name not in repo.obsstore.caches:
845 repo.obsstore.caches[name] = cachefuncs[name](repo)
845 repo.obsstore.caches[name] = cachefuncs[name](repo)
846 return repo.obsstore.caches[name]
846 return repo.obsstore.caches[name]
847
847
848 # To be simple we need to invalidate obsolescence cache when:
848 # To be simple we need to invalidate obsolescence cache when:
849 #
849 #
850 # - new changeset is added:
850 # - new changeset is added:
851 # - public phase is changed
851 # - public phase is changed
852 # - obsolescence marker are added
852 # - obsolescence marker are added
853 # - strip is used a repo
853 # - strip is used a repo
854 def clearobscaches(repo):
854 def clearobscaches(repo):
855 """Remove all obsolescence related cache from a repo
855 """Remove all obsolescence related cache from a repo
856
856
857 This remove all cache in obsstore is the obsstore already exist on the
857 This remove all cache in obsstore is the obsstore already exist on the
858 repo.
858 repo.
859
859
860 (We could be smarter here given the exact event that trigger the cache
860 (We could be smarter here given the exact event that trigger the cache
861 clearing)"""
861 clearing)"""
862 # only clear cache is there is obsstore data in this repo
862 # only clear cache is there is obsstore data in this repo
863 if 'obsstore' in repo._filecache:
863 if 'obsstore' in repo._filecache:
864 repo.obsstore.caches.clear()
864 repo.obsstore.caches.clear()
865
865
866 def _mutablerevs(repo):
866 def _mutablerevs(repo):
867 """the set of mutable revision in the repository"""
867 """the set of mutable revision in the repository"""
868 return repo._phasecache.getrevset(repo, phases.mutablephases)
868 return repo._phasecache.getrevset(repo, phases.mutablephases)
869
869
870 @cachefor('obsolete')
870 @cachefor('obsolete')
871 def _computeobsoleteset(repo):
871 def _computeobsoleteset(repo):
872 """the set of obsolete revisions"""
872 """the set of obsolete revisions"""
873 getnode = repo.changelog.node
873 getnode = repo.changelog.node
874 notpublic = _mutablerevs(repo)
874 notpublic = _mutablerevs(repo)
875 isobs = repo.obsstore.successors.__contains__
875 isobs = repo.obsstore.successors.__contains__
876 obs = set(r for r in notpublic if isobs(getnode(r)))
876 obs = set(r for r in notpublic if isobs(getnode(r)))
877 return obs
877 return obs
878
878
879 @cachefor('orphan')
879 @cachefor('orphan')
880 def _computeorphanset(repo):
880 def _computeorphanset(repo):
881 """the set of non obsolete revisions with obsolete parents"""
881 """the set of non obsolete revisions with obsolete parents"""
882 pfunc = repo.changelog.parentrevs
882 pfunc = repo.changelog.parentrevs
883 mutable = _mutablerevs(repo)
883 mutable = _mutablerevs(repo)
884 obsolete = getrevs(repo, 'obsolete')
884 obsolete = getrevs(repo, 'obsolete')
885 others = mutable - obsolete
885 others = mutable - obsolete
886 unstable = set()
886 unstable = set()
887 for r in sorted(others):
887 for r in sorted(others):
888 # A rev is unstable if one of its parent is obsolete or unstable
888 # A rev is unstable if one of its parent is obsolete or unstable
889 # this works since we traverse following growing rev order
889 # this works since we traverse following growing rev order
890 for p in pfunc(r):
890 for p in pfunc(r):
891 if p in obsolete or p in unstable:
891 if p in obsolete or p in unstable:
892 unstable.add(r)
892 unstable.add(r)
893 break
893 break
894 return unstable
894 return unstable
895
895
896 @cachefor('suspended')
896 @cachefor('suspended')
897 def _computesuspendedset(repo):
897 def _computesuspendedset(repo):
898 """the set of obsolete parents with non obsolete descendants"""
898 """the set of obsolete parents with non obsolete descendants"""
899 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
899 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
900 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
900 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
901
901
902 @cachefor('extinct')
902 @cachefor('extinct')
903 def _computeextinctset(repo):
903 def _computeextinctset(repo):
904 """the set of obsolete parents without non obsolete descendants"""
904 """the set of obsolete parents without non obsolete descendants"""
905 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
905 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
906
906
907 @cachefor('phasedivergent')
907 @cachefor('phasedivergent')
908 def _computephasedivergentset(repo):
908 def _computephasedivergentset(repo):
909 """the set of revs trying to obsolete public revisions"""
909 """the set of revs trying to obsolete public revisions"""
910 bumped = set()
910 bumped = set()
911 # util function (avoid attribute lookup in the loop)
911 # util function (avoid attribute lookup in the loop)
912 phase = repo._phasecache.phase # would be faster to grab the full list
912 phase = repo._phasecache.phase # would be faster to grab the full list
913 public = phases.public
913 public = phases.public
914 cl = repo.changelog
914 cl = repo.changelog
915 torev = cl.nodemap.get
915 torev = cl.nodemap.get
916 tonode = cl.node
916 tonode = cl.node
917 for rev in repo.revs('(not public()) and (not obsolete())'):
917 for rev in repo.revs('(not public()) and (not obsolete())'):
918 # We only evaluate mutable, non-obsolete revision
918 # We only evaluate mutable, non-obsolete revision
919 node = tonode(rev)
919 node = tonode(rev)
920 # (future) A cache of predecessors may worth if split is very common
920 # (future) A cache of predecessors may worth if split is very common
921 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
921 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
922 ignoreflags=bumpedfix):
922 ignoreflags=bumpedfix):
923 prev = torev(pnode) # unfiltered! but so is phasecache
923 prev = torev(pnode) # unfiltered! but so is phasecache
924 if (prev is not None) and (phase(repo, prev) <= public):
924 if (prev is not None) and (phase(repo, prev) <= public):
925 # we have a public predecessor
925 # we have a public predecessor
926 bumped.add(rev)
926 bumped.add(rev)
927 break # Next draft!
927 break # Next draft!
928 return bumped
928 return bumped
929
929
930 @cachefor('contentdivergent')
930 @cachefor('contentdivergent')
931 def _computecontentdivergentset(repo):
931 def _computecontentdivergentset(repo):
932 """the set of rev that compete to be the final successors of some revision.
932 """the set of rev that compete to be the final successors of some revision.
933 """
933 """
934 divergent = set()
934 divergent = set()
935 obsstore = repo.obsstore
935 obsstore = repo.obsstore
936 newermap = {}
936 newermap = {}
937 tonode = repo.changelog.node
937 tonode = repo.changelog.node
938 for rev in repo.revs('(not public()) - obsolete()'):
938 for rev in repo.revs('(not public()) - obsolete()'):
939 node = tonode(rev)
939 node = tonode(rev)
940 mark = obsstore.predecessors.get(node, ())
940 mark = obsstore.predecessors.get(node, ())
941 toprocess = set(mark)
941 toprocess = set(mark)
942 seen = set()
942 seen = set()
943 while toprocess:
943 while toprocess:
944 prec = toprocess.pop()[0]
944 prec = toprocess.pop()[0]
945 if prec in seen:
945 if prec in seen:
946 continue # emergency cycle hanging prevention
946 continue # emergency cycle hanging prevention
947 seen.add(prec)
947 seen.add(prec)
948 if prec not in newermap:
948 if prec not in newermap:
949 obsutil.successorssets(repo, prec, cache=newermap)
949 obsutil.successorssets(repo, prec, cache=newermap)
950 newer = [n for n in newermap[prec] if n]
950 newer = [n for n in newermap[prec] if n]
951 if len(newer) > 1:
951 if len(newer) > 1:
952 divergent.add(rev)
952 divergent.add(rev)
953 break
953 break
954 toprocess.update(obsstore.predecessors.get(prec, ()))
954 toprocess.update(obsstore.predecessors.get(prec, ()))
955 return divergent
955 return divergent
956
956
957
957
958 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
958 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
959 operation=None):
959 operation=None):
960 """Add obsolete markers between changesets in a repo
960 """Add obsolete markers between changesets in a repo
961
961
962 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
962 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
963 tuple. `old` and `news` are changectx. metadata is an optional dictionary
963 tuple. `old` and `news` are changectx. metadata is an optional dictionary
964 containing metadata for this marker only. It is merged with the global
964 containing metadata for this marker only. It is merged with the global
965 metadata specified through the `metadata` argument of this function.
965 metadata specified through the `metadata` argument of this function.
966 Any string values in metadata must be UTF-8 bytes.
966 Any string values in metadata must be UTF-8 bytes.
967
967
968 Trying to obsolete a public changeset will raise an exception.
968 Trying to obsolete a public changeset will raise an exception.
969
969
970 Current user and date are used except if specified otherwise in the
970 Current user and date are used except if specified otherwise in the
971 metadata attribute.
971 metadata attribute.
972
972
973 This function operates within a transaction of its own, but does
973 This function operates within a transaction of its own, but does
974 not take any lock on the repo.
974 not take any lock on the repo.
975 """
975 """
976 # prepare metadata
976 # prepare metadata
977 if metadata is None:
977 if metadata is None:
978 metadata = {}
978 metadata = {}
979 if 'user' not in metadata:
979 if 'user' not in metadata:
980 luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username()
980 luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username()
981 metadata['user'] = encoding.fromlocal(luser)
981 metadata['user'] = encoding.fromlocal(luser)
982
982
983 # Operation metadata handling
983 # Operation metadata handling
984 useoperation = repo.ui.configbool('experimental',
984 useoperation = repo.ui.configbool('experimental',
985 'evolution.track-operation')
985 'evolution.track-operation')
986 if useoperation and operation:
986 if useoperation and operation:
987 metadata['operation'] = operation
987 metadata['operation'] = operation
988
988
989 # Effect flag metadata handling
989 # Effect flag metadata handling
990 saveeffectflag = repo.ui.configbool('experimental',
990 saveeffectflag = repo.ui.configbool('experimental',
991 'evolution.effect-flags')
991 'evolution.effect-flags')
992
992
993 with repo.transaction('add-obsolescence-marker') as tr:
993 with repo.transaction('add-obsolescence-marker') as tr:
994 markerargs = []
994 markerargs = []
995 for rel in relations:
995 for rel in relations:
996 prec = rel[0]
996 predecessors = rel[0]
997 if True:
997 if not isinstance(predecessors, tuple):
998 # preserve compat with old API until all caller are migrated
999 predecessors = (predecessors,)
1000 if 1 < len(predecessors) and len(rel[1]) != 1:
1001 msg = 'Fold markers can only have 1 successors, not %d'
1002 raise error.ProgrammingError(msg % len(rel[1]))
1003 for prec in predecessors:
998 sucs = rel[1]
1004 sucs = rel[1]
999 localmetadata = metadata.copy()
1005 localmetadata = metadata.copy()
1000 if 2 < len(rel):
1006 if 2 < len(rel):
1001 localmetadata.update(rel[2])
1007 localmetadata.update(rel[2])
1002
1008
1003 if not prec.mutable():
1009 if not prec.mutable():
1004 raise error.Abort(_("cannot obsolete public changeset: %s")
1010 raise error.Abort(_("cannot obsolete public changeset: %s")
1005 % prec,
1011 % prec,
1006 hint="see 'hg help phases' for details")
1012 hint="see 'hg help phases' for details")
1007 nprec = prec.node()
1013 nprec = prec.node()
1008 nsucs = tuple(s.node() for s in sucs)
1014 nsucs = tuple(s.node() for s in sucs)
1009 npare = None
1015 npare = None
1010 if not nsucs:
1016 if not nsucs:
1011 npare = tuple(p.node() for p in prec.parents())
1017 npare = tuple(p.node() for p in prec.parents())
1012 if nprec in nsucs:
1018 if nprec in nsucs:
1013 raise error.Abort(_("changeset %s cannot obsolete itself")
1019 raise error.Abort(_("changeset %s cannot obsolete itself")
1014 % prec)
1020 % prec)
1015
1021
1016 # Effect flag can be different by relation
1022 # Effect flag can be different by relation
1017 if saveeffectflag:
1023 if saveeffectflag:
1018 # The effect flag is saved in a versioned field name for
1024 # The effect flag is saved in a versioned field name for
1019 # future evolution
1025 # future evolution
1020 effectflag = obsutil.geteffectflag(prec, sucs)
1026 effectflag = obsutil.geteffectflag(prec, sucs)
1021 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1027 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1022
1028
1023 # Creating the marker causes the hidden cache to become
1029 # Creating the marker causes the hidden cache to become
1024 # invalid, which causes recomputation when we ask for
1030 # invalid, which causes recomputation when we ask for
1025 # prec.parents() above. Resulting in n^2 behavior. So let's
1031 # prec.parents() above. Resulting in n^2 behavior. So let's
1026 # prepare all of the args first, then create the markers.
1032 # prepare all of the args first, then create the markers.
1027 markerargs.append((nprec, nsucs, npare, localmetadata))
1033 markerargs.append((nprec, nsucs, npare, localmetadata))
1028
1034
1029 for args in markerargs:
1035 for args in markerargs:
1030 nprec, nsucs, npare, localmetadata = args
1036 nprec, nsucs, npare, localmetadata = args
1031 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1037 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1032 date=date, metadata=localmetadata,
1038 date=date, metadata=localmetadata,
1033 ui=repo.ui)
1039 ui=repo.ui)
1034 repo.filteredrevcache.clear()
1040 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now