##// END OF EJS Templates
obsstore: let read marker API take a range of offsets...
Jun Wu -
r33504:5d3ba439 default
parent child Browse files
Show More
@@ -1,1034 +1,1034 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 obsutil,
79 obsutil,
80 phases,
80 phases,
81 policy,
81 policy,
82 util,
82 util,
83 )
83 )
84
84
85 parsers = policy.importmod(r'parsers')
85 parsers = policy.importmod(r'parsers')
86
86
87 _pack = struct.pack
87 _pack = struct.pack
88 _unpack = struct.unpack
88 _unpack = struct.unpack
89 _calcsize = struct.calcsize
89 _calcsize = struct.calcsize
90 propertycache = util.propertycache
90 propertycache = util.propertycache
91
91
92 # the obsolete feature is not mature enough to be enabled by default.
92 # the obsolete feature is not mature enough to be enabled by default.
93 # you have to rely on third party extension extension to enable this.
93 # you have to rely on third party extension extension to enable this.
94 _enabled = False
94 _enabled = False
95
95
96 # Options for obsolescence
96 # Options for obsolescence
97 createmarkersopt = 'createmarkers'
97 createmarkersopt = 'createmarkers'
98 allowunstableopt = 'allowunstable'
98 allowunstableopt = 'allowunstable'
99 exchangeopt = 'exchange'
99 exchangeopt = 'exchange'
100
100
101 def isenabled(repo, option):
101 def isenabled(repo, option):
102 """Returns True if the given repository has the given obsolete option
102 """Returns True if the given repository has the given obsolete option
103 enabled.
103 enabled.
104 """
104 """
105 result = set(repo.ui.configlist('experimental', 'evolution'))
105 result = set(repo.ui.configlist('experimental', 'evolution'))
106 if 'all' in result:
106 if 'all' in result:
107 return True
107 return True
108
108
109 # For migration purposes, temporarily return true if the config hasn't been
109 # For migration purposes, temporarily return true if the config hasn't been
110 # set but _enabled is true.
110 # set but _enabled is true.
111 if len(result) == 0 and _enabled:
111 if len(result) == 0 and _enabled:
112 return True
112 return True
113
113
114 # createmarkers must be enabled if other options are enabled
114 # createmarkers must be enabled if other options are enabled
115 if ((allowunstableopt in result or exchangeopt in result) and
115 if ((allowunstableopt in result or exchangeopt in result) and
116 not createmarkersopt in result):
116 not createmarkersopt in result):
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 "if other obsolete options are enabled"))
118 "if other obsolete options are enabled"))
119
119
120 return option in result
120 return option in result
121
121
122 ### obsolescence marker flag
122 ### obsolescence marker flag
123
123
124 ## bumpedfix flag
124 ## bumpedfix flag
125 #
125 #
126 # When a changeset A' succeed to a changeset A which became public, we call A'
126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 # "bumped" because it's a successors of a public changesets
127 # "bumped" because it's a successors of a public changesets
128 #
128 #
129 # o A' (bumped)
129 # o A' (bumped)
130 # |`:
130 # |`:
131 # | o A
131 # | o A
132 # |/
132 # |/
133 # o Z
133 # o Z
134 #
134 #
135 # The way to solve this situation is to create a new changeset Ad as children
135 # The way to solve this situation is to create a new changeset Ad as children
136 # of A. This changeset have the same content than A'. So the diff from A to A'
136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 #
138 #
139 # o Ad
139 # o Ad
140 # |`:
140 # |`:
141 # | x A'
141 # | x A'
142 # |'|
142 # |'|
143 # o | A
143 # o | A
144 # |/
144 # |/
145 # o Z
145 # o Z
146 #
146 #
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 # This flag mean that the successors express the changes between the public and
149 # This flag mean that the successors express the changes between the public and
150 # bumped version and fix the situation, breaking the transitivity of
150 # bumped version and fix the situation, breaking the transitivity of
151 # "bumped" here.
151 # "bumped" here.
152 bumpedfix = 1
152 bumpedfix = 1
153 usingsha256 = 2
153 usingsha256 = 2
154
154
155 ## Parsing and writing of version "0"
155 ## Parsing and writing of version "0"
156 #
156 #
157 # The header is followed by the markers. Each marker is made of:
157 # The header is followed by the markers. Each marker is made of:
158 #
158 #
159 # - 1 uint8 : number of new changesets "N", can be zero.
159 # - 1 uint8 : number of new changesets "N", can be zero.
160 #
160 #
161 # - 1 uint32: metadata size "M" in bytes.
161 # - 1 uint32: metadata size "M" in bytes.
162 #
162 #
163 # - 1 byte: a bit field. It is reserved for flags used in common
163 # - 1 byte: a bit field. It is reserved for flags used in common
164 # obsolete marker operations, to avoid repeated decoding of metadata
164 # obsolete marker operations, to avoid repeated decoding of metadata
165 # entries.
165 # entries.
166 #
166 #
167 # - 20 bytes: obsoleted changeset identifier.
167 # - 20 bytes: obsoleted changeset identifier.
168 #
168 #
169 # - N*20 bytes: new changesets identifiers.
169 # - N*20 bytes: new changesets identifiers.
170 #
170 #
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 # string contains a key and a value, separated by a colon ':', without
172 # string contains a key and a value, separated by a colon ':', without
173 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 # cannot contain '\0'.
174 # cannot contain '\0'.
175 _fm0version = 0
175 _fm0version = 0
176 _fm0fixed = '>BIB20s'
176 _fm0fixed = '>BIB20s'
177 _fm0node = '20s'
177 _fm0node = '20s'
178 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fsize = _calcsize(_fm0fixed)
179 _fm0fnodesize = _calcsize(_fm0node)
179 _fm0fnodesize = _calcsize(_fm0node)
180
180
181 def _fm0readmarkers(data, off):
181 def _fm0readmarkers(data, off, stop):
182 # Loop on markers
182 # Loop on markers
183 l = len(data)
183 while off < stop:
184 while off + _fm0fsize <= l:
185 # read fixed part
184 # read fixed part
186 cur = data[off:off + _fm0fsize]
185 cur = data[off:off + _fm0fsize]
187 off += _fm0fsize
186 off += _fm0fsize
188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
189 # read replacement
188 # read replacement
190 sucs = ()
189 sucs = ()
191 if numsuc:
190 if numsuc:
192 s = (_fm0fnodesize * numsuc)
191 s = (_fm0fnodesize * numsuc)
193 cur = data[off:off + s]
192 cur = data[off:off + s]
194 sucs = _unpack(_fm0node * numsuc, cur)
193 sucs = _unpack(_fm0node * numsuc, cur)
195 off += s
194 off += s
196 # read metadata
195 # read metadata
197 # (metadata will be decoded on demand)
196 # (metadata will be decoded on demand)
198 metadata = data[off:off + mdsize]
197 metadata = data[off:off + mdsize]
199 if len(metadata) != mdsize:
198 if len(metadata) != mdsize:
200 raise error.Abort(_('parsing obsolete marker: metadata is too '
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
201 'short, %d bytes expected, got %d')
200 'short, %d bytes expected, got %d')
202 % (mdsize, len(metadata)))
201 % (mdsize, len(metadata)))
203 off += mdsize
202 off += mdsize
204 metadata = _fm0decodemeta(metadata)
203 metadata = _fm0decodemeta(metadata)
205 try:
204 try:
206 when, offset = metadata.pop('date', '0 0').split(' ')
205 when, offset = metadata.pop('date', '0 0').split(' ')
207 date = float(when), int(offset)
206 date = float(when), int(offset)
208 except ValueError:
207 except ValueError:
209 date = (0., 0)
208 date = (0., 0)
210 parents = None
209 parents = None
211 if 'p2' in metadata:
210 if 'p2' in metadata:
212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
213 elif 'p1' in metadata:
212 elif 'p1' in metadata:
214 parents = (metadata.pop('p1', None),)
213 parents = (metadata.pop('p1', None),)
215 elif 'p0' in metadata:
214 elif 'p0' in metadata:
216 parents = ()
215 parents = ()
217 if parents is not None:
216 if parents is not None:
218 try:
217 try:
219 parents = tuple(node.bin(p) for p in parents)
218 parents = tuple(node.bin(p) for p in parents)
220 # if parent content is not a nodeid, drop the data
219 # if parent content is not a nodeid, drop the data
221 for p in parents:
220 for p in parents:
222 if len(p) != 20:
221 if len(p) != 20:
223 parents = None
222 parents = None
224 break
223 break
225 except TypeError:
224 except TypeError:
226 # if content cannot be translated to nodeid drop the data.
225 # if content cannot be translated to nodeid drop the data.
227 parents = None
226 parents = None
228
227
229 metadata = tuple(sorted(metadata.iteritems()))
228 metadata = tuple(sorted(metadata.iteritems()))
230
229
231 yield (pre, sucs, flags, metadata, date, parents)
230 yield (pre, sucs, flags, metadata, date, parents)
232
231
233 def _fm0encodeonemarker(marker):
232 def _fm0encodeonemarker(marker):
234 pre, sucs, flags, metadata, date, parents = marker
233 pre, sucs, flags, metadata, date, parents = marker
235 if flags & usingsha256:
234 if flags & usingsha256:
236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
237 metadata = dict(metadata)
236 metadata = dict(metadata)
238 time, tz = date
237 time, tz = date
239 metadata['date'] = '%r %i' % (time, tz)
238 metadata['date'] = '%r %i' % (time, tz)
240 if parents is not None:
239 if parents is not None:
241 if not parents:
240 if not parents:
242 # mark that we explicitly recorded no parents
241 # mark that we explicitly recorded no parents
243 metadata['p0'] = ''
242 metadata['p0'] = ''
244 for i, p in enumerate(parents, 1):
243 for i, p in enumerate(parents, 1):
245 metadata['p%i' % i] = node.hex(p)
244 metadata['p%i' % i] = node.hex(p)
246 metadata = _fm0encodemeta(metadata)
245 metadata = _fm0encodemeta(metadata)
247 numsuc = len(sucs)
246 numsuc = len(sucs)
248 format = _fm0fixed + (_fm0node * numsuc)
247 format = _fm0fixed + (_fm0node * numsuc)
249 data = [numsuc, len(metadata), flags, pre]
248 data = [numsuc, len(metadata), flags, pre]
250 data.extend(sucs)
249 data.extend(sucs)
251 return _pack(format, *data) + metadata
250 return _pack(format, *data) + metadata
252
251
253 def _fm0encodemeta(meta):
252 def _fm0encodemeta(meta):
254 """Return encoded metadata string to string mapping.
253 """Return encoded metadata string to string mapping.
255
254
256 Assume no ':' in key and no '\0' in both key and value."""
255 Assume no ':' in key and no '\0' in both key and value."""
257 for key, value in meta.iteritems():
256 for key, value in meta.iteritems():
258 if ':' in key or '\0' in key:
257 if ':' in key or '\0' in key:
259 raise ValueError("':' and '\0' are forbidden in metadata key'")
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
260 if '\0' in value:
259 if '\0' in value:
261 raise ValueError("':' is forbidden in metadata value'")
260 raise ValueError("':' is forbidden in metadata value'")
262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
263
262
264 def _fm0decodemeta(data):
263 def _fm0decodemeta(data):
265 """Return string to string dictionary from encoded version."""
264 """Return string to string dictionary from encoded version."""
266 d = {}
265 d = {}
267 for l in data.split('\0'):
266 for l in data.split('\0'):
268 if l:
267 if l:
269 key, value = l.split(':')
268 key, value = l.split(':')
270 d[key] = value
269 d[key] = value
271 return d
270 return d
272
271
273 ## Parsing and writing of version "1"
272 ## Parsing and writing of version "1"
274 #
273 #
275 # The header is followed by the markers. Each marker is made of:
274 # The header is followed by the markers. Each marker is made of:
276 #
275 #
277 # - uint32: total size of the marker (including this field)
276 # - uint32: total size of the marker (including this field)
278 #
277 #
279 # - float64: date in seconds since epoch
278 # - float64: date in seconds since epoch
280 #
279 #
281 # - int16: timezone offset in minutes
280 # - int16: timezone offset in minutes
282 #
281 #
283 # - uint16: a bit field. It is reserved for flags used in common
282 # - uint16: a bit field. It is reserved for flags used in common
284 # obsolete marker operations, to avoid repeated decoding of metadata
283 # obsolete marker operations, to avoid repeated decoding of metadata
285 # entries.
284 # entries.
286 #
285 #
287 # - uint8: number of successors "N", can be zero.
286 # - uint8: number of successors "N", can be zero.
288 #
287 #
289 # - uint8: number of parents "P", can be zero.
288 # - uint8: number of parents "P", can be zero.
290 #
289 #
291 # 0: parents data stored but no parent,
290 # 0: parents data stored but no parent,
292 # 1: one parent stored,
291 # 1: one parent stored,
293 # 2: two parents stored,
292 # 2: two parents stored,
294 # 3: no parent data stored
293 # 3: no parent data stored
295 #
294 #
296 # - uint8: number of metadata entries M
295 # - uint8: number of metadata entries M
297 #
296 #
298 # - 20 or 32 bytes: precursor changeset identifier.
297 # - 20 or 32 bytes: precursor changeset identifier.
299 #
298 #
300 # - N*(20 or 32) bytes: successors changesets identifiers.
299 # - N*(20 or 32) bytes: successors changesets identifiers.
301 #
300 #
302 # - P*(20 or 32) bytes: parents of the precursors changesets.
301 # - P*(20 or 32) bytes: parents of the precursors changesets.
303 #
302 #
304 # - M*(uint8, uint8): size of all metadata entries (key and value)
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
305 #
304 #
306 # - remaining bytes: the metadata, each (key, value) pair after the other.
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
307 _fm1version = 1
306 _fm1version = 1
308 _fm1fixed = '>IdhHBBB20s'
307 _fm1fixed = '>IdhHBBB20s'
309 _fm1nodesha1 = '20s'
308 _fm1nodesha1 = '20s'
310 _fm1nodesha256 = '32s'
309 _fm1nodesha256 = '32s'
311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
313 _fm1fsize = _calcsize(_fm1fixed)
312 _fm1fsize = _calcsize(_fm1fixed)
314 _fm1parentnone = 3
313 _fm1parentnone = 3
315 _fm1parentshift = 14
314 _fm1parentshift = 14
316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
317 _fm1metapair = 'BB'
316 _fm1metapair = 'BB'
318 _fm1metapairsize = _calcsize('BB')
317 _fm1metapairsize = _calcsize('BB')
319
318
320 def _fm1purereadmarkers(data, off):
319 def _fm1purereadmarkers(data, off, stop):
321 # make some global constants local for performance
320 # make some global constants local for performance
322 noneflag = _fm1parentnone
321 noneflag = _fm1parentnone
323 sha2flag = usingsha256
322 sha2flag = usingsha256
324 sha1size = _fm1nodesha1size
323 sha1size = _fm1nodesha1size
325 sha2size = _fm1nodesha256size
324 sha2size = _fm1nodesha256size
326 sha1fmt = _fm1nodesha1
325 sha1fmt = _fm1nodesha1
327 sha2fmt = _fm1nodesha256
326 sha2fmt = _fm1nodesha256
328 metasize = _fm1metapairsize
327 metasize = _fm1metapairsize
329 metafmt = _fm1metapair
328 metafmt = _fm1metapair
330 fsize = _fm1fsize
329 fsize = _fm1fsize
331 unpack = _unpack
330 unpack = _unpack
332
331
333 # Loop on markers
332 # Loop on markers
334 stop = len(data) - _fm1fsize
335 ufixed = struct.Struct(_fm1fixed).unpack
333 ufixed = struct.Struct(_fm1fixed).unpack
336
334
337 while off <= stop:
335 while off < stop:
338 # read fixed part
336 # read fixed part
339 o1 = off + fsize
337 o1 = off + fsize
340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
338 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
341
339
342 if flags & sha2flag:
340 if flags & sha2flag:
343 # FIXME: prec was read as a SHA1, needs to be amended
341 # FIXME: prec was read as a SHA1, needs to be amended
344
342
345 # read 0 or more successors
343 # read 0 or more successors
346 if numsuc == 1:
344 if numsuc == 1:
347 o2 = o1 + sha2size
345 o2 = o1 + sha2size
348 sucs = (data[o1:o2],)
346 sucs = (data[o1:o2],)
349 else:
347 else:
350 o2 = o1 + sha2size * numsuc
348 o2 = o1 + sha2size * numsuc
351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
349 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
352
350
353 # read parents
351 # read parents
354 if numpar == noneflag:
352 if numpar == noneflag:
355 o3 = o2
353 o3 = o2
356 parents = None
354 parents = None
357 elif numpar == 1:
355 elif numpar == 1:
358 o3 = o2 + sha2size
356 o3 = o2 + sha2size
359 parents = (data[o2:o3],)
357 parents = (data[o2:o3],)
360 else:
358 else:
361 o3 = o2 + sha2size * numpar
359 o3 = o2 + sha2size * numpar
362 parents = unpack(sha2fmt * numpar, data[o2:o3])
360 parents = unpack(sha2fmt * numpar, data[o2:o3])
363 else:
361 else:
364 # read 0 or more successors
362 # read 0 or more successors
365 if numsuc == 1:
363 if numsuc == 1:
366 o2 = o1 + sha1size
364 o2 = o1 + sha1size
367 sucs = (data[o1:o2],)
365 sucs = (data[o1:o2],)
368 else:
366 else:
369 o2 = o1 + sha1size * numsuc
367 o2 = o1 + sha1size * numsuc
370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
368 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
371
369
372 # read parents
370 # read parents
373 if numpar == noneflag:
371 if numpar == noneflag:
374 o3 = o2
372 o3 = o2
375 parents = None
373 parents = None
376 elif numpar == 1:
374 elif numpar == 1:
377 o3 = o2 + sha1size
375 o3 = o2 + sha1size
378 parents = (data[o2:o3],)
376 parents = (data[o2:o3],)
379 else:
377 else:
380 o3 = o2 + sha1size * numpar
378 o3 = o2 + sha1size * numpar
381 parents = unpack(sha1fmt * numpar, data[o2:o3])
379 parents = unpack(sha1fmt * numpar, data[o2:o3])
382
380
383 # read metadata
381 # read metadata
384 off = o3 + metasize * nummeta
382 off = o3 + metasize * nummeta
385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
383 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
386 metadata = []
384 metadata = []
387 for idx in xrange(0, len(metapairsize), 2):
385 for idx in xrange(0, len(metapairsize), 2):
388 o1 = off + metapairsize[idx]
386 o1 = off + metapairsize[idx]
389 o2 = o1 + metapairsize[idx + 1]
387 o2 = o1 + metapairsize[idx + 1]
390 metadata.append((data[off:o1], data[o1:o2]))
388 metadata.append((data[off:o1], data[o1:o2]))
391 off = o2
389 off = o2
392
390
393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
391 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
394
392
395 def _fm1encodeonemarker(marker):
393 def _fm1encodeonemarker(marker):
396 pre, sucs, flags, metadata, date, parents = marker
394 pre, sucs, flags, metadata, date, parents = marker
397 # determine node size
395 # determine node size
398 _fm1node = _fm1nodesha1
396 _fm1node = _fm1nodesha1
399 if flags & usingsha256:
397 if flags & usingsha256:
400 _fm1node = _fm1nodesha256
398 _fm1node = _fm1nodesha256
401 numsuc = len(sucs)
399 numsuc = len(sucs)
402 numextranodes = numsuc
400 numextranodes = numsuc
403 if parents is None:
401 if parents is None:
404 numpar = _fm1parentnone
402 numpar = _fm1parentnone
405 else:
403 else:
406 numpar = len(parents)
404 numpar = len(parents)
407 numextranodes += numpar
405 numextranodes += numpar
408 formatnodes = _fm1node * numextranodes
406 formatnodes = _fm1node * numextranodes
409 formatmeta = _fm1metapair * len(metadata)
407 formatmeta = _fm1metapair * len(metadata)
410 format = _fm1fixed + formatnodes + formatmeta
408 format = _fm1fixed + formatnodes + formatmeta
411 # tz is stored in minutes so we divide by 60
409 # tz is stored in minutes so we divide by 60
412 tz = date[1]//60
410 tz = date[1]//60
413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
411 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
414 data.extend(sucs)
412 data.extend(sucs)
415 if parents is not None:
413 if parents is not None:
416 data.extend(parents)
414 data.extend(parents)
417 totalsize = _calcsize(format)
415 totalsize = _calcsize(format)
418 for key, value in metadata:
416 for key, value in metadata:
419 lk = len(key)
417 lk = len(key)
420 lv = len(value)
418 lv = len(value)
421 data.append(lk)
419 data.append(lk)
422 data.append(lv)
420 data.append(lv)
423 totalsize += lk + lv
421 totalsize += lk + lv
424 data[0] = totalsize
422 data[0] = totalsize
425 data = [_pack(format, *data)]
423 data = [_pack(format, *data)]
426 for key, value in metadata:
424 for key, value in metadata:
427 data.append(key)
425 data.append(key)
428 data.append(value)
426 data.append(value)
429 return ''.join(data)
427 return ''.join(data)
430
428
431 def _fm1readmarkers(data, off):
429 def _fm1readmarkers(data, off, stop):
432 native = getattr(parsers, 'fm1readmarkers', None)
430 native = getattr(parsers, 'fm1readmarkers', None)
433 if not native:
431 if not native:
434 return _fm1purereadmarkers(data, off)
432 return _fm1purereadmarkers(data, off, stop)
435 stop = len(data) - _fm1fsize
436 return native(data, off, stop)
433 return native(data, off, stop)
437
434
438 # mapping to read/write various marker formats
435 # mapping to read/write various marker formats
439 # <version> -> (decoder, encoder)
436 # <version> -> (decoder, encoder)
440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
437 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
438 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
442
439
443 def _readmarkerversion(data):
440 def _readmarkerversion(data):
444 return _unpack('>B', data[0:1])[0]
441 return _unpack('>B', data[0:1])[0]
445
442
446 @util.nogc
443 @util.nogc
447 def _readmarkers(data):
444 def _readmarkers(data, off=None, stop=None):
448 """Read and enumerate markers from raw data"""
445 """Read and enumerate markers from raw data"""
449 diskversion = _readmarkerversion(data)
446 diskversion = _readmarkerversion(data)
450 off = 1
447 if not off:
448 off = 1 # skip 1 byte version number
449 if stop is None:
450 stop = len(data)
451 if diskversion not in formats:
451 if diskversion not in formats:
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 raise error.UnknownVersion(msg, version=diskversion)
453 raise error.UnknownVersion(msg, version=diskversion)
454 return diskversion, formats[diskversion][0](data, off)
454 return diskversion, formats[diskversion][0](data, off, stop)
455
455
456 def encodeheader(version=_fm0version):
456 def encodeheader(version=_fm0version):
457 return _pack('>B', version)
457 return _pack('>B', version)
458
458
459 def encodemarkers(markers, addheader=False, version=_fm0version):
459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 # Kept separate from flushmarkers(), it will be reused for
460 # Kept separate from flushmarkers(), it will be reused for
461 # markers exchange.
461 # markers exchange.
462 encodeone = formats[version][1]
462 encodeone = formats[version][1]
463 if addheader:
463 if addheader:
464 yield encodeheader(version)
464 yield encodeheader(version)
465 for marker in markers:
465 for marker in markers:
466 yield encodeone(marker)
466 yield encodeone(marker)
467
467
468 @util.nogc
468 @util.nogc
469 def _addsuccessors(successors, markers):
469 def _addsuccessors(successors, markers):
470 for mark in markers:
470 for mark in markers:
471 successors.setdefault(mark[0], set()).add(mark)
471 successors.setdefault(mark[0], set()).add(mark)
472
472
473 @util.nogc
473 @util.nogc
474 def _addprecursors(precursors, markers):
474 def _addprecursors(precursors, markers):
475 for mark in markers:
475 for mark in markers:
476 for suc in mark[1]:
476 for suc in mark[1]:
477 precursors.setdefault(suc, set()).add(mark)
477 precursors.setdefault(suc, set()).add(mark)
478
478
479 @util.nogc
479 @util.nogc
480 def _addchildren(children, markers):
480 def _addchildren(children, markers):
481 for mark in markers:
481 for mark in markers:
482 parents = mark[5]
482 parents = mark[5]
483 if parents is not None:
483 if parents is not None:
484 for p in parents:
484 for p in parents:
485 children.setdefault(p, set()).add(mark)
485 children.setdefault(p, set()).add(mark)
486
486
487 def _checkinvalidmarkers(markers):
487 def _checkinvalidmarkers(markers):
488 """search for marker with invalid data and raise error if needed
488 """search for marker with invalid data and raise error if needed
489
489
490 Exist as a separated function to allow the evolve extension for a more
490 Exist as a separated function to allow the evolve extension for a more
491 subtle handling.
491 subtle handling.
492 """
492 """
493 for mark in markers:
493 for mark in markers:
494 if node.nullid in mark[1]:
494 if node.nullid in mark[1]:
495 raise error.Abort(_('bad obsolescence marker detected: '
495 raise error.Abort(_('bad obsolescence marker detected: '
496 'invalid successors nullid'))
496 'invalid successors nullid'))
497
497
498 class obsstore(object):
498 class obsstore(object):
499 """Store obsolete markers
499 """Store obsolete markers
500
500
501 Markers can be accessed with two mappings:
501 Markers can be accessed with two mappings:
502 - precursors[x] -> set(markers on precursors edges of x)
502 - precursors[x] -> set(markers on precursors edges of x)
503 - successors[x] -> set(markers on successors edges of x)
503 - successors[x] -> set(markers on successors edges of x)
504 - children[x] -> set(markers on precursors edges of children(x)
504 - children[x] -> set(markers on precursors edges of children(x)
505 """
505 """
506
506
507 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
507 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
508 # prec: nodeid, precursor changesets
508 # prec: nodeid, precursor changesets
509 # succs: tuple of nodeid, successor changesets (0-N length)
509 # succs: tuple of nodeid, successor changesets (0-N length)
510 # flag: integer, flag field carrying modifier for the markers (see doc)
510 # flag: integer, flag field carrying modifier for the markers (see doc)
511 # meta: binary blob, encoded metadata dictionary
511 # meta: binary blob, encoded metadata dictionary
512 # date: (float, int) tuple, date of marker creation
512 # date: (float, int) tuple, date of marker creation
513 # parents: (tuple of nodeid) or None, parents of precursors
513 # parents: (tuple of nodeid) or None, parents of precursors
514 # None is used when no data has been recorded
514 # None is used when no data has been recorded
515
515
516 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
516 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
517 # caches for various obsolescence related cache
517 # caches for various obsolescence related cache
518 self.caches = {}
518 self.caches = {}
519 self.svfs = svfs
519 self.svfs = svfs
520 self._defaultformat = defaultformat
520 self._defaultformat = defaultformat
521 self._readonly = readonly
521 self._readonly = readonly
522
522
523 def __iter__(self):
523 def __iter__(self):
524 return iter(self._all)
524 return iter(self._all)
525
525
526 def __len__(self):
526 def __len__(self):
527 return len(self._all)
527 return len(self._all)
528
528
529 def __nonzero__(self):
529 def __nonzero__(self):
530 if not self._cached('_all'):
530 if not self._cached('_all'):
531 try:
531 try:
532 return self.svfs.stat('obsstore').st_size > 1
532 return self.svfs.stat('obsstore').st_size > 1
533 except OSError as inst:
533 except OSError as inst:
534 if inst.errno != errno.ENOENT:
534 if inst.errno != errno.ENOENT:
535 raise
535 raise
536 # just build an empty _all list if no obsstore exists, which
536 # just build an empty _all list if no obsstore exists, which
537 # avoids further stat() syscalls
537 # avoids further stat() syscalls
538 pass
538 pass
539 return bool(self._all)
539 return bool(self._all)
540
540
541 __bool__ = __nonzero__
541 __bool__ = __nonzero__
542
542
543 @property
543 @property
544 def readonly(self):
544 def readonly(self):
545 """True if marker creation is disabled
545 """True if marker creation is disabled
546
546
547 Remove me in the future when obsolete marker is always on."""
547 Remove me in the future when obsolete marker is always on."""
548 return self._readonly
548 return self._readonly
549
549
550 def create(self, transaction, prec, succs=(), flag=0, parents=None,
550 def create(self, transaction, prec, succs=(), flag=0, parents=None,
551 date=None, metadata=None, ui=None):
551 date=None, metadata=None, ui=None):
552 """obsolete: add a new obsolete marker
552 """obsolete: add a new obsolete marker
553
553
554 * ensuring it is hashable
554 * ensuring it is hashable
555 * check mandatory metadata
555 * check mandatory metadata
556 * encode metadata
556 * encode metadata
557
557
558 If you are a human writing code creating marker you want to use the
558 If you are a human writing code creating marker you want to use the
559 `createmarkers` function in this module instead.
559 `createmarkers` function in this module instead.
560
560
561 return True if a new marker have been added, False if the markers
561 return True if a new marker have been added, False if the markers
562 already existed (no op).
562 already existed (no op).
563 """
563 """
564 if metadata is None:
564 if metadata is None:
565 metadata = {}
565 metadata = {}
566 if date is None:
566 if date is None:
567 if 'date' in metadata:
567 if 'date' in metadata:
568 # as a courtesy for out-of-tree extensions
568 # as a courtesy for out-of-tree extensions
569 date = util.parsedate(metadata.pop('date'))
569 date = util.parsedate(metadata.pop('date'))
570 elif ui is not None:
570 elif ui is not None:
571 date = ui.configdate('devel', 'default-date')
571 date = ui.configdate('devel', 'default-date')
572 if date is None:
572 if date is None:
573 date = util.makedate()
573 date = util.makedate()
574 else:
574 else:
575 date = util.makedate()
575 date = util.makedate()
576 if len(prec) != 20:
576 if len(prec) != 20:
577 raise ValueError(prec)
577 raise ValueError(prec)
578 for succ in succs:
578 for succ in succs:
579 if len(succ) != 20:
579 if len(succ) != 20:
580 raise ValueError(succ)
580 raise ValueError(succ)
581 if prec in succs:
581 if prec in succs:
582 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
582 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
583
583
584 metadata = tuple(sorted(metadata.iteritems()))
584 metadata = tuple(sorted(metadata.iteritems()))
585
585
586 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
586 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
587 return bool(self.add(transaction, [marker]))
587 return bool(self.add(transaction, [marker]))
588
588
589 def add(self, transaction, markers):
589 def add(self, transaction, markers):
590 """Add new markers to the store
590 """Add new markers to the store
591
591
592 Take care of filtering duplicate.
592 Take care of filtering duplicate.
593 Return the number of new marker."""
593 Return the number of new marker."""
594 if self._readonly:
594 if self._readonly:
595 raise error.Abort(_('creating obsolete markers is not enabled on '
595 raise error.Abort(_('creating obsolete markers is not enabled on '
596 'this repo'))
596 'this repo'))
597 known = set()
597 known = set()
598 getsuccessors = self.successors.get
598 getsuccessors = self.successors.get
599 new = []
599 new = []
600 for m in markers:
600 for m in markers:
601 if m not in getsuccessors(m[0], ()) and m not in known:
601 if m not in getsuccessors(m[0], ()) and m not in known:
602 known.add(m)
602 known.add(m)
603 new.append(m)
603 new.append(m)
604 if new:
604 if new:
605 f = self.svfs('obsstore', 'ab')
605 f = self.svfs('obsstore', 'ab')
606 try:
606 try:
607 offset = f.tell()
607 offset = f.tell()
608 transaction.add('obsstore', offset)
608 transaction.add('obsstore', offset)
609 # offset == 0: new file - add the version header
609 # offset == 0: new file - add the version header
610 data = b''.join(encodemarkers(new, offset == 0, self._version))
610 data = b''.join(encodemarkers(new, offset == 0, self._version))
611 f.write(data)
611 f.write(data)
612 finally:
612 finally:
613 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
613 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
614 # call 'filecacheentry.refresh()' here
614 # call 'filecacheentry.refresh()' here
615 f.close()
615 f.close()
616 addedmarkers = transaction.changes.get('obsmarkers')
616 addedmarkers = transaction.changes.get('obsmarkers')
617 if addedmarkers is not None:
617 if addedmarkers is not None:
618 addedmarkers.update(new)
618 addedmarkers.update(new)
619 self._addmarkers(new, data)
619 self._addmarkers(new, data)
620 # new marker *may* have changed several set. invalidate the cache.
620 # new marker *may* have changed several set. invalidate the cache.
621 self.caches.clear()
621 self.caches.clear()
622 # records the number of new markers for the transaction hooks
622 # records the number of new markers for the transaction hooks
623 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
623 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
624 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
624 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
625 return len(new)
625 return len(new)
626
626
627 def mergemarkers(self, transaction, data):
627 def mergemarkers(self, transaction, data):
628 """merge a binary stream of markers inside the obsstore
628 """merge a binary stream of markers inside the obsstore
629
629
630 Returns the number of new markers added."""
630 Returns the number of new markers added."""
631 version, markers = _readmarkers(data)
631 version, markers = _readmarkers(data)
632 return self.add(transaction, markers)
632 return self.add(transaction, markers)
633
633
634 @propertycache
634 @propertycache
635 def _data(self):
635 def _data(self):
636 return self.svfs.tryread('obsstore')
636 return self.svfs.tryread('obsstore')
637
637
638 @propertycache
638 @propertycache
639 def _version(self):
639 def _version(self):
640 if len(self._data) >= 1:
640 if len(self._data) >= 1:
641 return _readmarkerversion(self._data)
641 return _readmarkerversion(self._data)
642 else:
642 else:
643 return self._defaultformat
643 return self._defaultformat
644
644
645 @propertycache
645 @propertycache
646 def _all(self):
646 def _all(self):
647 data = self._data
647 data = self._data
648 if not data:
648 if not data:
649 return []
649 return []
650 self._version, markers = _readmarkers(data)
650 self._version, markers = _readmarkers(data)
651 markers = list(markers)
651 markers = list(markers)
652 _checkinvalidmarkers(markers)
652 _checkinvalidmarkers(markers)
653 return markers
653 return markers
654
654
655 @propertycache
655 @propertycache
656 def successors(self):
656 def successors(self):
657 successors = {}
657 successors = {}
658 _addsuccessors(successors, self._all)
658 _addsuccessors(successors, self._all)
659 return successors
659 return successors
660
660
661 @propertycache
661 @propertycache
662 def precursors(self):
662 def precursors(self):
663 precursors = {}
663 precursors = {}
664 _addprecursors(precursors, self._all)
664 _addprecursors(precursors, self._all)
665 return precursors
665 return precursors
666
666
667 @propertycache
667 @propertycache
668 def children(self):
668 def children(self):
669 children = {}
669 children = {}
670 _addchildren(children, self._all)
670 _addchildren(children, self._all)
671 return children
671 return children
672
672
673 def _cached(self, attr):
673 def _cached(self, attr):
674 return attr in self.__dict__
674 return attr in self.__dict__
675
675
676 def _addmarkers(self, markers, rawdata):
676 def _addmarkers(self, markers, rawdata):
677 markers = list(markers) # to allow repeated iteration
677 markers = list(markers) # to allow repeated iteration
678 self._data = self._data + rawdata
678 self._data = self._data + rawdata
679 self._all.extend(markers)
679 self._all.extend(markers)
680 if self._cached('successors'):
680 if self._cached('successors'):
681 _addsuccessors(self.successors, markers)
681 _addsuccessors(self.successors, markers)
682 if self._cached('precursors'):
682 if self._cached('precursors'):
683 _addprecursors(self.precursors, markers)
683 _addprecursors(self.precursors, markers)
684 if self._cached('children'):
684 if self._cached('children'):
685 _addchildren(self.children, markers)
685 _addchildren(self.children, markers)
686 _checkinvalidmarkers(markers)
686 _checkinvalidmarkers(markers)
687
687
688 def relevantmarkers(self, nodes):
688 def relevantmarkers(self, nodes):
689 """return a set of all obsolescence markers relevant to a set of nodes.
689 """return a set of all obsolescence markers relevant to a set of nodes.
690
690
691 "relevant" to a set of nodes mean:
691 "relevant" to a set of nodes mean:
692
692
693 - marker that use this changeset as successor
693 - marker that use this changeset as successor
694 - prune marker of direct children on this changeset
694 - prune marker of direct children on this changeset
695 - recursive application of the two rules on precursors of these markers
695 - recursive application of the two rules on precursors of these markers
696
696
697 It is a set so you cannot rely on order."""
697 It is a set so you cannot rely on order."""
698
698
699 pendingnodes = set(nodes)
699 pendingnodes = set(nodes)
700 seenmarkers = set()
700 seenmarkers = set()
701 seennodes = set(pendingnodes)
701 seennodes = set(pendingnodes)
702 precursorsmarkers = self.precursors
702 precursorsmarkers = self.precursors
703 succsmarkers = self.successors
703 succsmarkers = self.successors
704 children = self.children
704 children = self.children
705 while pendingnodes:
705 while pendingnodes:
706 direct = set()
706 direct = set()
707 for current in pendingnodes:
707 for current in pendingnodes:
708 direct.update(precursorsmarkers.get(current, ()))
708 direct.update(precursorsmarkers.get(current, ()))
709 pruned = [m for m in children.get(current, ()) if not m[1]]
709 pruned = [m for m in children.get(current, ()) if not m[1]]
710 direct.update(pruned)
710 direct.update(pruned)
711 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
711 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
712 direct.update(pruned)
712 direct.update(pruned)
713 direct -= seenmarkers
713 direct -= seenmarkers
714 pendingnodes = set([m[0] for m in direct])
714 pendingnodes = set([m[0] for m in direct])
715 seenmarkers |= direct
715 seenmarkers |= direct
716 pendingnodes -= seennodes
716 pendingnodes -= seennodes
717 seennodes |= pendingnodes
717 seennodes |= pendingnodes
718 return seenmarkers
718 return seenmarkers
719
719
720 def makestore(ui, repo):
720 def makestore(ui, repo):
721 """Create an obsstore instance from a repo."""
721 """Create an obsstore instance from a repo."""
722 # read default format for new obsstore.
722 # read default format for new obsstore.
723 # developer config: format.obsstore-version
723 # developer config: format.obsstore-version
724 defaultformat = ui.configint('format', 'obsstore-version')
724 defaultformat = ui.configint('format', 'obsstore-version')
725 # rely on obsstore class default when possible.
725 # rely on obsstore class default when possible.
726 kwargs = {}
726 kwargs = {}
727 if defaultformat is not None:
727 if defaultformat is not None:
728 kwargs['defaultformat'] = defaultformat
728 kwargs['defaultformat'] = defaultformat
729 readonly = not isenabled(repo, createmarkersopt)
729 readonly = not isenabled(repo, createmarkersopt)
730 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
730 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
731 if store and readonly:
731 if store and readonly:
732 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
732 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
733 % len(list(store)))
733 % len(list(store)))
734 return store
734 return store
735
735
736 def commonversion(versions):
736 def commonversion(versions):
737 """Return the newest version listed in both versions and our local formats.
737 """Return the newest version listed in both versions and our local formats.
738
738
739 Returns None if no common version exists.
739 Returns None if no common version exists.
740 """
740 """
741 versions.sort(reverse=True)
741 versions.sort(reverse=True)
742 # search for highest version known on both side
742 # search for highest version known on both side
743 for v in versions:
743 for v in versions:
744 if v in formats:
744 if v in formats:
745 return v
745 return v
746 return None
746 return None
747
747
748 # arbitrary picked to fit into 8K limit from HTTP server
748 # arbitrary picked to fit into 8K limit from HTTP server
749 # you have to take in account:
749 # you have to take in account:
750 # - the version header
750 # - the version header
751 # - the base85 encoding
751 # - the base85 encoding
752 _maxpayload = 5300
752 _maxpayload = 5300
753
753
754 def _pushkeyescape(markers):
754 def _pushkeyescape(markers):
755 """encode markers into a dict suitable for pushkey exchange
755 """encode markers into a dict suitable for pushkey exchange
756
756
757 - binary data is base85 encoded
757 - binary data is base85 encoded
758 - split in chunks smaller than 5300 bytes"""
758 - split in chunks smaller than 5300 bytes"""
759 keys = {}
759 keys = {}
760 parts = []
760 parts = []
761 currentlen = _maxpayload * 2 # ensure we create a new part
761 currentlen = _maxpayload * 2 # ensure we create a new part
762 for marker in markers:
762 for marker in markers:
763 nextdata = _fm0encodeonemarker(marker)
763 nextdata = _fm0encodeonemarker(marker)
764 if (len(nextdata) + currentlen > _maxpayload):
764 if (len(nextdata) + currentlen > _maxpayload):
765 currentpart = []
765 currentpart = []
766 currentlen = 0
766 currentlen = 0
767 parts.append(currentpart)
767 parts.append(currentpart)
768 currentpart.append(nextdata)
768 currentpart.append(nextdata)
769 currentlen += len(nextdata)
769 currentlen += len(nextdata)
770 for idx, part in enumerate(reversed(parts)):
770 for idx, part in enumerate(reversed(parts)):
771 data = ''.join([_pack('>B', _fm0version)] + part)
771 data = ''.join([_pack('>B', _fm0version)] + part)
772 keys['dump%i' % idx] = util.b85encode(data)
772 keys['dump%i' % idx] = util.b85encode(data)
773 return keys
773 return keys
774
774
775 def listmarkers(repo):
775 def listmarkers(repo):
776 """List markers over pushkey"""
776 """List markers over pushkey"""
777 if not repo.obsstore:
777 if not repo.obsstore:
778 return {}
778 return {}
779 return _pushkeyescape(sorted(repo.obsstore))
779 return _pushkeyescape(sorted(repo.obsstore))
780
780
781 def pushmarker(repo, key, old, new):
781 def pushmarker(repo, key, old, new):
782 """Push markers over pushkey"""
782 """Push markers over pushkey"""
783 if not key.startswith('dump'):
783 if not key.startswith('dump'):
784 repo.ui.warn(_('unknown key: %r') % key)
784 repo.ui.warn(_('unknown key: %r') % key)
785 return False
785 return False
786 if old:
786 if old:
787 repo.ui.warn(_('unexpected old value for %r') % key)
787 repo.ui.warn(_('unexpected old value for %r') % key)
788 return False
788 return False
789 data = util.b85decode(new)
789 data = util.b85decode(new)
790 lock = repo.lock()
790 lock = repo.lock()
791 try:
791 try:
792 tr = repo.transaction('pushkey: obsolete markers')
792 tr = repo.transaction('pushkey: obsolete markers')
793 try:
793 try:
794 repo.obsstore.mergemarkers(tr, data)
794 repo.obsstore.mergemarkers(tr, data)
795 repo.invalidatevolatilesets()
795 repo.invalidatevolatilesets()
796 tr.close()
796 tr.close()
797 return True
797 return True
798 finally:
798 finally:
799 tr.release()
799 tr.release()
800 finally:
800 finally:
801 lock.release()
801 lock.release()
802
802
803 # keep compatibility for the 4.3 cycle
803 # keep compatibility for the 4.3 cycle
804 def allprecursors(obsstore, nodes, ignoreflags=0):
804 def allprecursors(obsstore, nodes, ignoreflags=0):
805 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
805 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
806 util.nouideprecwarn(movemsg, '4.3')
806 util.nouideprecwarn(movemsg, '4.3')
807 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
807 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
808
808
809 def allsuccessors(obsstore, nodes, ignoreflags=0):
809 def allsuccessors(obsstore, nodes, ignoreflags=0):
810 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
810 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
811 util.nouideprecwarn(movemsg, '4.3')
811 util.nouideprecwarn(movemsg, '4.3')
812 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
812 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
813
813
814 def marker(repo, data):
814 def marker(repo, data):
815 movemsg = 'obsolete.marker moved to obsutil.marker'
815 movemsg = 'obsolete.marker moved to obsutil.marker'
816 repo.ui.deprecwarn(movemsg, '4.3')
816 repo.ui.deprecwarn(movemsg, '4.3')
817 return obsutil.marker(repo, data)
817 return obsutil.marker(repo, data)
818
818
819 def getmarkers(repo, nodes=None, exclusive=False):
819 def getmarkers(repo, nodes=None, exclusive=False):
820 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
820 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
821 repo.ui.deprecwarn(movemsg, '4.3')
821 repo.ui.deprecwarn(movemsg, '4.3')
822 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
822 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
823
823
824 def exclusivemarkers(repo, nodes):
824 def exclusivemarkers(repo, nodes):
825 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
825 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
826 repo.ui.deprecwarn(movemsg, '4.3')
826 repo.ui.deprecwarn(movemsg, '4.3')
827 return obsutil.exclusivemarkers(repo, nodes)
827 return obsutil.exclusivemarkers(repo, nodes)
828
828
829 def foreground(repo, nodes):
829 def foreground(repo, nodes):
830 movemsg = 'obsolete.foreground moved to obsutil.foreground'
830 movemsg = 'obsolete.foreground moved to obsutil.foreground'
831 repo.ui.deprecwarn(movemsg, '4.3')
831 repo.ui.deprecwarn(movemsg, '4.3')
832 return obsutil.foreground(repo, nodes)
832 return obsutil.foreground(repo, nodes)
833
833
834 def successorssets(repo, initialnode, cache=None):
834 def successorssets(repo, initialnode, cache=None):
835 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
835 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
836 repo.ui.deprecwarn(movemsg, '4.3')
836 repo.ui.deprecwarn(movemsg, '4.3')
837 return obsutil.successorssets(repo, initialnode, cache=cache)
837 return obsutil.successorssets(repo, initialnode, cache=cache)
838
838
839 # mapping of 'set-name' -> <function to compute this set>
839 # mapping of 'set-name' -> <function to compute this set>
840 cachefuncs = {}
840 cachefuncs = {}
841 def cachefor(name):
841 def cachefor(name):
842 """Decorator to register a function as computing the cache for a set"""
842 """Decorator to register a function as computing the cache for a set"""
843 def decorator(func):
843 def decorator(func):
844 if name in cachefuncs:
844 if name in cachefuncs:
845 msg = "duplicated registration for volatileset '%s' (existing: %r)"
845 msg = "duplicated registration for volatileset '%s' (existing: %r)"
846 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
846 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
847 cachefuncs[name] = func
847 cachefuncs[name] = func
848 return func
848 return func
849 return decorator
849 return decorator
850
850
851 def getrevs(repo, name):
851 def getrevs(repo, name):
852 """Return the set of revision that belong to the <name> set
852 """Return the set of revision that belong to the <name> set
853
853
854 Such access may compute the set and cache it for future use"""
854 Such access may compute the set and cache it for future use"""
855 repo = repo.unfiltered()
855 repo = repo.unfiltered()
856 if not repo.obsstore:
856 if not repo.obsstore:
857 return frozenset()
857 return frozenset()
858 if name not in repo.obsstore.caches:
858 if name not in repo.obsstore.caches:
859 repo.obsstore.caches[name] = cachefuncs[name](repo)
859 repo.obsstore.caches[name] = cachefuncs[name](repo)
860 return repo.obsstore.caches[name]
860 return repo.obsstore.caches[name]
861
861
862 # To be simple we need to invalidate obsolescence cache when:
862 # To be simple we need to invalidate obsolescence cache when:
863 #
863 #
864 # - new changeset is added:
864 # - new changeset is added:
865 # - public phase is changed
865 # - public phase is changed
866 # - obsolescence marker are added
866 # - obsolescence marker are added
867 # - strip is used a repo
867 # - strip is used a repo
868 def clearobscaches(repo):
868 def clearobscaches(repo):
869 """Remove all obsolescence related cache from a repo
869 """Remove all obsolescence related cache from a repo
870
870
871 This remove all cache in obsstore is the obsstore already exist on the
871 This remove all cache in obsstore is the obsstore already exist on the
872 repo.
872 repo.
873
873
874 (We could be smarter here given the exact event that trigger the cache
874 (We could be smarter here given the exact event that trigger the cache
875 clearing)"""
875 clearing)"""
876 # only clear cache is there is obsstore data in this repo
876 # only clear cache is there is obsstore data in this repo
877 if 'obsstore' in repo._filecache:
877 if 'obsstore' in repo._filecache:
878 repo.obsstore.caches.clear()
878 repo.obsstore.caches.clear()
879
879
880 def _mutablerevs(repo):
880 def _mutablerevs(repo):
881 """the set of mutable revision in the repository"""
881 """the set of mutable revision in the repository"""
882 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
882 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
883
883
884 @cachefor('obsolete')
884 @cachefor('obsolete')
885 def _computeobsoleteset(repo):
885 def _computeobsoleteset(repo):
886 """the set of obsolete revisions"""
886 """the set of obsolete revisions"""
887 getnode = repo.changelog.node
887 getnode = repo.changelog.node
888 notpublic = _mutablerevs(repo)
888 notpublic = _mutablerevs(repo)
889 isobs = repo.obsstore.successors.__contains__
889 isobs = repo.obsstore.successors.__contains__
890 obs = set(r for r in notpublic if isobs(getnode(r)))
890 obs = set(r for r in notpublic if isobs(getnode(r)))
891 return obs
891 return obs
892
892
893 @cachefor('unstable')
893 @cachefor('unstable')
894 def _computeunstableset(repo):
894 def _computeunstableset(repo):
895 """the set of non obsolete revisions with obsolete parents"""
895 """the set of non obsolete revisions with obsolete parents"""
896 pfunc = repo.changelog.parentrevs
896 pfunc = repo.changelog.parentrevs
897 mutable = _mutablerevs(repo)
897 mutable = _mutablerevs(repo)
898 obsolete = getrevs(repo, 'obsolete')
898 obsolete = getrevs(repo, 'obsolete')
899 others = mutable - obsolete
899 others = mutable - obsolete
900 unstable = set()
900 unstable = set()
901 for r in sorted(others):
901 for r in sorted(others):
902 # A rev is unstable if one of its parent is obsolete or unstable
902 # A rev is unstable if one of its parent is obsolete or unstable
903 # this works since we traverse following growing rev order
903 # this works since we traverse following growing rev order
904 for p in pfunc(r):
904 for p in pfunc(r):
905 if p in obsolete or p in unstable:
905 if p in obsolete or p in unstable:
906 unstable.add(r)
906 unstable.add(r)
907 break
907 break
908 return unstable
908 return unstable
909
909
910 @cachefor('suspended')
910 @cachefor('suspended')
911 def _computesuspendedset(repo):
911 def _computesuspendedset(repo):
912 """the set of obsolete parents with non obsolete descendants"""
912 """the set of obsolete parents with non obsolete descendants"""
913 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
913 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
914 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
914 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
915
915
916 @cachefor('extinct')
916 @cachefor('extinct')
917 def _computeextinctset(repo):
917 def _computeextinctset(repo):
918 """the set of obsolete parents without non obsolete descendants"""
918 """the set of obsolete parents without non obsolete descendants"""
919 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
919 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
920
920
921
921
922 @cachefor('bumped')
922 @cachefor('bumped')
923 def _computebumpedset(repo):
923 def _computebumpedset(repo):
924 """the set of revs trying to obsolete public revisions"""
924 """the set of revs trying to obsolete public revisions"""
925 bumped = set()
925 bumped = set()
926 # util function (avoid attribute lookup in the loop)
926 # util function (avoid attribute lookup in the loop)
927 phase = repo._phasecache.phase # would be faster to grab the full list
927 phase = repo._phasecache.phase # would be faster to grab the full list
928 public = phases.public
928 public = phases.public
929 cl = repo.changelog
929 cl = repo.changelog
930 torev = cl.nodemap.get
930 torev = cl.nodemap.get
931 for ctx in repo.set('(not public()) and (not obsolete())'):
931 for ctx in repo.set('(not public()) and (not obsolete())'):
932 rev = ctx.rev()
932 rev = ctx.rev()
933 # We only evaluate mutable, non-obsolete revision
933 # We only evaluate mutable, non-obsolete revision
934 node = ctx.node()
934 node = ctx.node()
935 # (future) A cache of precursors may worth if split is very common
935 # (future) A cache of precursors may worth if split is very common
936 for pnode in obsutil.allprecursors(repo.obsstore, [node],
936 for pnode in obsutil.allprecursors(repo.obsstore, [node],
937 ignoreflags=bumpedfix):
937 ignoreflags=bumpedfix):
938 prev = torev(pnode) # unfiltered! but so is phasecache
938 prev = torev(pnode) # unfiltered! but so is phasecache
939 if (prev is not None) and (phase(repo, prev) <= public):
939 if (prev is not None) and (phase(repo, prev) <= public):
940 # we have a public precursor
940 # we have a public precursor
941 bumped.add(rev)
941 bumped.add(rev)
942 break # Next draft!
942 break # Next draft!
943 return bumped
943 return bumped
944
944
945 @cachefor('divergent')
945 @cachefor('divergent')
946 def _computedivergentset(repo):
946 def _computedivergentset(repo):
947 """the set of rev that compete to be the final successors of some revision.
947 """the set of rev that compete to be the final successors of some revision.
948 """
948 """
949 divergent = set()
949 divergent = set()
950 obsstore = repo.obsstore
950 obsstore = repo.obsstore
951 newermap = {}
951 newermap = {}
952 for ctx in repo.set('(not public()) - obsolete()'):
952 for ctx in repo.set('(not public()) - obsolete()'):
953 mark = obsstore.precursors.get(ctx.node(), ())
953 mark = obsstore.precursors.get(ctx.node(), ())
954 toprocess = set(mark)
954 toprocess = set(mark)
955 seen = set()
955 seen = set()
956 while toprocess:
956 while toprocess:
957 prec = toprocess.pop()[0]
957 prec = toprocess.pop()[0]
958 if prec in seen:
958 if prec in seen:
959 continue # emergency cycle hanging prevention
959 continue # emergency cycle hanging prevention
960 seen.add(prec)
960 seen.add(prec)
961 if prec not in newermap:
961 if prec not in newermap:
962 obsutil.successorssets(repo, prec, cache=newermap)
962 obsutil.successorssets(repo, prec, cache=newermap)
963 newer = [n for n in newermap[prec] if n]
963 newer = [n for n in newermap[prec] if n]
964 if len(newer) > 1:
964 if len(newer) > 1:
965 divergent.add(ctx.rev())
965 divergent.add(ctx.rev())
966 break
966 break
967 toprocess.update(obsstore.precursors.get(prec, ()))
967 toprocess.update(obsstore.precursors.get(prec, ()))
968 return divergent
968 return divergent
969
969
970
970
971 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
971 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
972 operation=None):
972 operation=None):
973 """Add obsolete markers between changesets in a repo
973 """Add obsolete markers between changesets in a repo
974
974
975 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
975 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
976 tuple. `old` and `news` are changectx. metadata is an optional dictionary
976 tuple. `old` and `news` are changectx. metadata is an optional dictionary
977 containing metadata for this marker only. It is merged with the global
977 containing metadata for this marker only. It is merged with the global
978 metadata specified through the `metadata` argument of this function,
978 metadata specified through the `metadata` argument of this function,
979
979
980 Trying to obsolete a public changeset will raise an exception.
980 Trying to obsolete a public changeset will raise an exception.
981
981
982 Current user and date are used except if specified otherwise in the
982 Current user and date are used except if specified otherwise in the
983 metadata attribute.
983 metadata attribute.
984
984
985 This function operates within a transaction of its own, but does
985 This function operates within a transaction of its own, but does
986 not take any lock on the repo.
986 not take any lock on the repo.
987 """
987 """
988 # prepare metadata
988 # prepare metadata
989 if metadata is None:
989 if metadata is None:
990 metadata = {}
990 metadata = {}
991 if 'user' not in metadata:
991 if 'user' not in metadata:
992 metadata['user'] = repo.ui.username()
992 metadata['user'] = repo.ui.username()
993 useoperation = repo.ui.configbool('experimental',
993 useoperation = repo.ui.configbool('experimental',
994 'evolution.track-operation')
994 'evolution.track-operation')
995 if useoperation and operation:
995 if useoperation and operation:
996 metadata['operation'] = operation
996 metadata['operation'] = operation
997 tr = repo.transaction('add-obsolescence-marker')
997 tr = repo.transaction('add-obsolescence-marker')
998 try:
998 try:
999 markerargs = []
999 markerargs = []
1000 for rel in relations:
1000 for rel in relations:
1001 prec = rel[0]
1001 prec = rel[0]
1002 sucs = rel[1]
1002 sucs = rel[1]
1003 localmetadata = metadata.copy()
1003 localmetadata = metadata.copy()
1004 if 2 < len(rel):
1004 if 2 < len(rel):
1005 localmetadata.update(rel[2])
1005 localmetadata.update(rel[2])
1006
1006
1007 if not prec.mutable():
1007 if not prec.mutable():
1008 raise error.Abort(_("cannot obsolete public changeset: %s")
1008 raise error.Abort(_("cannot obsolete public changeset: %s")
1009 % prec,
1009 % prec,
1010 hint="see 'hg help phases' for details")
1010 hint="see 'hg help phases' for details")
1011 nprec = prec.node()
1011 nprec = prec.node()
1012 nsucs = tuple(s.node() for s in sucs)
1012 nsucs = tuple(s.node() for s in sucs)
1013 npare = None
1013 npare = None
1014 if not nsucs:
1014 if not nsucs:
1015 npare = tuple(p.node() for p in prec.parents())
1015 npare = tuple(p.node() for p in prec.parents())
1016 if nprec in nsucs:
1016 if nprec in nsucs:
1017 raise error.Abort(_("changeset %s cannot obsolete itself")
1017 raise error.Abort(_("changeset %s cannot obsolete itself")
1018 % prec)
1018 % prec)
1019
1019
1020 # Creating the marker causes the hidden cache to become invalid,
1020 # Creating the marker causes the hidden cache to become invalid,
1021 # which causes recomputation when we ask for prec.parents() above.
1021 # which causes recomputation when we ask for prec.parents() above.
1022 # Resulting in n^2 behavior. So let's prepare all of the args
1022 # Resulting in n^2 behavior. So let's prepare all of the args
1023 # first, then create the markers.
1023 # first, then create the markers.
1024 markerargs.append((nprec, nsucs, npare, localmetadata))
1024 markerargs.append((nprec, nsucs, npare, localmetadata))
1025
1025
1026 for args in markerargs:
1026 for args in markerargs:
1027 nprec, nsucs, npare, localmetadata = args
1027 nprec, nsucs, npare, localmetadata = args
1028 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1028 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1029 date=date, metadata=localmetadata,
1029 date=date, metadata=localmetadata,
1030 ui=repo.ui)
1030 ui=repo.ui)
1031 repo.filteredrevcache.clear()
1031 repo.filteredrevcache.clear()
1032 tr.close()
1032 tr.close()
1033 finally:
1033 finally:
1034 tr.release()
1034 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now