##// END OF EJS Templates
obsolete: remove superfluous pass statements
Augie Fackler -
r34378:6b724caa default
parent child Browse files
Show More
@@ -1,1073 +1,1072 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 obsutil,
79 obsutil,
80 phases,
80 phases,
81 policy,
81 policy,
82 util,
82 util,
83 )
83 )
84
84
85 parsers = policy.importmod(r'parsers')
85 parsers = policy.importmod(r'parsers')
86
86
87 _pack = struct.pack
87 _pack = struct.pack
88 _unpack = struct.unpack
88 _unpack = struct.unpack
89 _calcsize = struct.calcsize
89 _calcsize = struct.calcsize
90 propertycache = util.propertycache
90 propertycache = util.propertycache
91
91
92 # the obsolete feature is not mature enough to be enabled by default.
92 # the obsolete feature is not mature enough to be enabled by default.
93 # you have to rely on third party extension extension to enable this.
93 # you have to rely on third party extension extension to enable this.
94 _enabled = False
94 _enabled = False
95
95
96 # Options for obsolescence
96 # Options for obsolescence
97 createmarkersopt = 'createmarkers'
97 createmarkersopt = 'createmarkers'
98 allowunstableopt = 'allowunstable'
98 allowunstableopt = 'allowunstable'
99 exchangeopt = 'exchange'
99 exchangeopt = 'exchange'
100
100
101 def isenabled(repo, option):
101 def isenabled(repo, option):
102 """Returns True if the given repository has the given obsolete option
102 """Returns True if the given repository has the given obsolete option
103 enabled.
103 enabled.
104 """
104 """
105 result = set(repo.ui.configlist('experimental', 'stabilization'))
105 result = set(repo.ui.configlist('experimental', 'stabilization'))
106 if 'all' in result:
106 if 'all' in result:
107 return True
107 return True
108
108
109 # For migration purposes, temporarily return true if the config hasn't been
109 # For migration purposes, temporarily return true if the config hasn't been
110 # set but _enabled is true.
110 # set but _enabled is true.
111 if len(result) == 0 and _enabled:
111 if len(result) == 0 and _enabled:
112 return True
112 return True
113
113
114 # createmarkers must be enabled if other options are enabled
114 # createmarkers must be enabled if other options are enabled
115 if ((allowunstableopt in result or exchangeopt in result) and
115 if ((allowunstableopt in result or exchangeopt in result) and
116 not createmarkersopt in result):
116 not createmarkersopt in result):
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 "if other obsolete options are enabled"))
118 "if other obsolete options are enabled"))
119
119
120 return option in result
120 return option in result
121
121
122 ### obsolescence marker flag
122 ### obsolescence marker flag
123
123
124 ## bumpedfix flag
124 ## bumpedfix flag
125 #
125 #
126 # When a changeset A' succeed to a changeset A which became public, we call A'
126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 # "bumped" because it's a successors of a public changesets
127 # "bumped" because it's a successors of a public changesets
128 #
128 #
129 # o A' (bumped)
129 # o A' (bumped)
130 # |`:
130 # |`:
131 # | o A
131 # | o A
132 # |/
132 # |/
133 # o Z
133 # o Z
134 #
134 #
135 # The way to solve this situation is to create a new changeset Ad as children
135 # The way to solve this situation is to create a new changeset Ad as children
136 # of A. This changeset have the same content than A'. So the diff from A to A'
136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 #
138 #
139 # o Ad
139 # o Ad
140 # |`:
140 # |`:
141 # | x A'
141 # | x A'
142 # |'|
142 # |'|
143 # o | A
143 # o | A
144 # |/
144 # |/
145 # o Z
145 # o Z
146 #
146 #
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 # This flag mean that the successors express the changes between the public and
149 # This flag mean that the successors express the changes between the public and
150 # bumped version and fix the situation, breaking the transitivity of
150 # bumped version and fix the situation, breaking the transitivity of
151 # "bumped" here.
151 # "bumped" here.
152 bumpedfix = 1
152 bumpedfix = 1
153 usingsha256 = 2
153 usingsha256 = 2
154
154
155 ## Parsing and writing of version "0"
155 ## Parsing and writing of version "0"
156 #
156 #
157 # The header is followed by the markers. Each marker is made of:
157 # The header is followed by the markers. Each marker is made of:
158 #
158 #
159 # - 1 uint8 : number of new changesets "N", can be zero.
159 # - 1 uint8 : number of new changesets "N", can be zero.
160 #
160 #
161 # - 1 uint32: metadata size "M" in bytes.
161 # - 1 uint32: metadata size "M" in bytes.
162 #
162 #
163 # - 1 byte: a bit field. It is reserved for flags used in common
163 # - 1 byte: a bit field. It is reserved for flags used in common
164 # obsolete marker operations, to avoid repeated decoding of metadata
164 # obsolete marker operations, to avoid repeated decoding of metadata
165 # entries.
165 # entries.
166 #
166 #
167 # - 20 bytes: obsoleted changeset identifier.
167 # - 20 bytes: obsoleted changeset identifier.
168 #
168 #
169 # - N*20 bytes: new changesets identifiers.
169 # - N*20 bytes: new changesets identifiers.
170 #
170 #
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 # string contains a key and a value, separated by a colon ':', without
172 # string contains a key and a value, separated by a colon ':', without
173 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 # cannot contain '\0'.
174 # cannot contain '\0'.
175 _fm0version = 0
175 _fm0version = 0
176 _fm0fixed = '>BIB20s'
176 _fm0fixed = '>BIB20s'
177 _fm0node = '20s'
177 _fm0node = '20s'
178 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fsize = _calcsize(_fm0fixed)
179 _fm0fnodesize = _calcsize(_fm0node)
179 _fm0fnodesize = _calcsize(_fm0node)
180
180
181 def _fm0readmarkers(data, off, stop):
181 def _fm0readmarkers(data, off, stop):
182 # Loop on markers
182 # Loop on markers
183 while off < stop:
183 while off < stop:
184 # read fixed part
184 # read fixed part
185 cur = data[off:off + _fm0fsize]
185 cur = data[off:off + _fm0fsize]
186 off += _fm0fsize
186 off += _fm0fsize
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 # read replacement
188 # read replacement
189 sucs = ()
189 sucs = ()
190 if numsuc:
190 if numsuc:
191 s = (_fm0fnodesize * numsuc)
191 s = (_fm0fnodesize * numsuc)
192 cur = data[off:off + s]
192 cur = data[off:off + s]
193 sucs = _unpack(_fm0node * numsuc, cur)
193 sucs = _unpack(_fm0node * numsuc, cur)
194 off += s
194 off += s
195 # read metadata
195 # read metadata
196 # (metadata will be decoded on demand)
196 # (metadata will be decoded on demand)
197 metadata = data[off:off + mdsize]
197 metadata = data[off:off + mdsize]
198 if len(metadata) != mdsize:
198 if len(metadata) != mdsize:
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 'short, %d bytes expected, got %d')
200 'short, %d bytes expected, got %d')
201 % (mdsize, len(metadata)))
201 % (mdsize, len(metadata)))
202 off += mdsize
202 off += mdsize
203 metadata = _fm0decodemeta(metadata)
203 metadata = _fm0decodemeta(metadata)
204 try:
204 try:
205 when, offset = metadata.pop('date', '0 0').split(' ')
205 when, offset = metadata.pop('date', '0 0').split(' ')
206 date = float(when), int(offset)
206 date = float(when), int(offset)
207 except ValueError:
207 except ValueError:
208 date = (0., 0)
208 date = (0., 0)
209 parents = None
209 parents = None
210 if 'p2' in metadata:
210 if 'p2' in metadata:
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 elif 'p1' in metadata:
212 elif 'p1' in metadata:
213 parents = (metadata.pop('p1', None),)
213 parents = (metadata.pop('p1', None),)
214 elif 'p0' in metadata:
214 elif 'p0' in metadata:
215 parents = ()
215 parents = ()
216 if parents is not None:
216 if parents is not None:
217 try:
217 try:
218 parents = tuple(node.bin(p) for p in parents)
218 parents = tuple(node.bin(p) for p in parents)
219 # if parent content is not a nodeid, drop the data
219 # if parent content is not a nodeid, drop the data
220 for p in parents:
220 for p in parents:
221 if len(p) != 20:
221 if len(p) != 20:
222 parents = None
222 parents = None
223 break
223 break
224 except TypeError:
224 except TypeError:
225 # if content cannot be translated to nodeid drop the data.
225 # if content cannot be translated to nodeid drop the data.
226 parents = None
226 parents = None
227
227
228 metadata = tuple(sorted(metadata.iteritems()))
228 metadata = tuple(sorted(metadata.iteritems()))
229
229
230 yield (pre, sucs, flags, metadata, date, parents)
230 yield (pre, sucs, flags, metadata, date, parents)
231
231
232 def _fm0encodeonemarker(marker):
232 def _fm0encodeonemarker(marker):
233 pre, sucs, flags, metadata, date, parents = marker
233 pre, sucs, flags, metadata, date, parents = marker
234 if flags & usingsha256:
234 if flags & usingsha256:
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 metadata = dict(metadata)
236 metadata = dict(metadata)
237 time, tz = date
237 time, tz = date
238 metadata['date'] = '%r %i' % (time, tz)
238 metadata['date'] = '%r %i' % (time, tz)
239 if parents is not None:
239 if parents is not None:
240 if not parents:
240 if not parents:
241 # mark that we explicitly recorded no parents
241 # mark that we explicitly recorded no parents
242 metadata['p0'] = ''
242 metadata['p0'] = ''
243 for i, p in enumerate(parents, 1):
243 for i, p in enumerate(parents, 1):
244 metadata['p%i' % i] = node.hex(p)
244 metadata['p%i' % i] = node.hex(p)
245 metadata = _fm0encodemeta(metadata)
245 metadata = _fm0encodemeta(metadata)
246 numsuc = len(sucs)
246 numsuc = len(sucs)
247 format = _fm0fixed + (_fm0node * numsuc)
247 format = _fm0fixed + (_fm0node * numsuc)
248 data = [numsuc, len(metadata), flags, pre]
248 data = [numsuc, len(metadata), flags, pre]
249 data.extend(sucs)
249 data.extend(sucs)
250 return _pack(format, *data) + metadata
250 return _pack(format, *data) + metadata
251
251
252 def _fm0encodemeta(meta):
252 def _fm0encodemeta(meta):
253 """Return encoded metadata string to string mapping.
253 """Return encoded metadata string to string mapping.
254
254
255 Assume no ':' in key and no '\0' in both key and value."""
255 Assume no ':' in key and no '\0' in both key and value."""
256 for key, value in meta.iteritems():
256 for key, value in meta.iteritems():
257 if ':' in key or '\0' in key:
257 if ':' in key or '\0' in key:
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 if '\0' in value:
259 if '\0' in value:
260 raise ValueError("':' is forbidden in metadata value'")
260 raise ValueError("':' is forbidden in metadata value'")
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262
262
263 def _fm0decodemeta(data):
263 def _fm0decodemeta(data):
264 """Return string to string dictionary from encoded version."""
264 """Return string to string dictionary from encoded version."""
265 d = {}
265 d = {}
266 for l in data.split('\0'):
266 for l in data.split('\0'):
267 if l:
267 if l:
268 key, value = l.split(':')
268 key, value = l.split(':')
269 d[key] = value
269 d[key] = value
270 return d
270 return d
271
271
272 ## Parsing and writing of version "1"
272 ## Parsing and writing of version "1"
273 #
273 #
274 # The header is followed by the markers. Each marker is made of:
274 # The header is followed by the markers. Each marker is made of:
275 #
275 #
276 # - uint32: total size of the marker (including this field)
276 # - uint32: total size of the marker (including this field)
277 #
277 #
278 # - float64: date in seconds since epoch
278 # - float64: date in seconds since epoch
279 #
279 #
280 # - int16: timezone offset in minutes
280 # - int16: timezone offset in minutes
281 #
281 #
282 # - uint16: a bit field. It is reserved for flags used in common
282 # - uint16: a bit field. It is reserved for flags used in common
283 # obsolete marker operations, to avoid repeated decoding of metadata
283 # obsolete marker operations, to avoid repeated decoding of metadata
284 # entries.
284 # entries.
285 #
285 #
286 # - uint8: number of successors "N", can be zero.
286 # - uint8: number of successors "N", can be zero.
287 #
287 #
288 # - uint8: number of parents "P", can be zero.
288 # - uint8: number of parents "P", can be zero.
289 #
289 #
290 # 0: parents data stored but no parent,
290 # 0: parents data stored but no parent,
291 # 1: one parent stored,
291 # 1: one parent stored,
292 # 2: two parents stored,
292 # 2: two parents stored,
293 # 3: no parent data stored
293 # 3: no parent data stored
294 #
294 #
295 # - uint8: number of metadata entries M
295 # - uint8: number of metadata entries M
296 #
296 #
297 # - 20 or 32 bytes: predecessor changeset identifier.
297 # - 20 or 32 bytes: predecessor changeset identifier.
298 #
298 #
299 # - N*(20 or 32) bytes: successors changesets identifiers.
299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 #
300 #
301 # - P*(20 or 32) bytes: parents of the predecessors changesets.
301 # - P*(20 or 32) bytes: parents of the predecessors changesets.
302 #
302 #
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 #
304 #
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 _fm1version = 1
306 _fm1version = 1
307 _fm1fixed = '>IdhHBBB20s'
307 _fm1fixed = '>IdhHBBB20s'
308 _fm1nodesha1 = '20s'
308 _fm1nodesha1 = '20s'
309 _fm1nodesha256 = '32s'
309 _fm1nodesha256 = '32s'
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 _fm1fsize = _calcsize(_fm1fixed)
312 _fm1fsize = _calcsize(_fm1fixed)
313 _fm1parentnone = 3
313 _fm1parentnone = 3
314 _fm1parentshift = 14
314 _fm1parentshift = 14
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 _fm1metapair = 'BB'
316 _fm1metapair = 'BB'
317 _fm1metapairsize = _calcsize(_fm1metapair)
317 _fm1metapairsize = _calcsize(_fm1metapair)
318
318
319 def _fm1purereadmarkers(data, off, stop):
319 def _fm1purereadmarkers(data, off, stop):
320 # make some global constants local for performance
320 # make some global constants local for performance
321 noneflag = _fm1parentnone
321 noneflag = _fm1parentnone
322 sha2flag = usingsha256
322 sha2flag = usingsha256
323 sha1size = _fm1nodesha1size
323 sha1size = _fm1nodesha1size
324 sha2size = _fm1nodesha256size
324 sha2size = _fm1nodesha256size
325 sha1fmt = _fm1nodesha1
325 sha1fmt = _fm1nodesha1
326 sha2fmt = _fm1nodesha256
326 sha2fmt = _fm1nodesha256
327 metasize = _fm1metapairsize
327 metasize = _fm1metapairsize
328 metafmt = _fm1metapair
328 metafmt = _fm1metapair
329 fsize = _fm1fsize
329 fsize = _fm1fsize
330 unpack = _unpack
330 unpack = _unpack
331
331
332 # Loop on markers
332 # Loop on markers
333 ufixed = struct.Struct(_fm1fixed).unpack
333 ufixed = struct.Struct(_fm1fixed).unpack
334
334
335 while off < stop:
335 while off < stop:
336 # read fixed part
336 # read fixed part
337 o1 = off + fsize
337 o1 = off + fsize
338 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
338 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
339
339
340 if flags & sha2flag:
340 if flags & sha2flag:
341 # FIXME: prec was read as a SHA1, needs to be amended
341 # FIXME: prec was read as a SHA1, needs to be amended
342
342
343 # read 0 or more successors
343 # read 0 or more successors
344 if numsuc == 1:
344 if numsuc == 1:
345 o2 = o1 + sha2size
345 o2 = o1 + sha2size
346 sucs = (data[o1:o2],)
346 sucs = (data[o1:o2],)
347 else:
347 else:
348 o2 = o1 + sha2size * numsuc
348 o2 = o1 + sha2size * numsuc
349 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
349 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
350
350
351 # read parents
351 # read parents
352 if numpar == noneflag:
352 if numpar == noneflag:
353 o3 = o2
353 o3 = o2
354 parents = None
354 parents = None
355 elif numpar == 1:
355 elif numpar == 1:
356 o3 = o2 + sha2size
356 o3 = o2 + sha2size
357 parents = (data[o2:o3],)
357 parents = (data[o2:o3],)
358 else:
358 else:
359 o3 = o2 + sha2size * numpar
359 o3 = o2 + sha2size * numpar
360 parents = unpack(sha2fmt * numpar, data[o2:o3])
360 parents = unpack(sha2fmt * numpar, data[o2:o3])
361 else:
361 else:
362 # read 0 or more successors
362 # read 0 or more successors
363 if numsuc == 1:
363 if numsuc == 1:
364 o2 = o1 + sha1size
364 o2 = o1 + sha1size
365 sucs = (data[o1:o2],)
365 sucs = (data[o1:o2],)
366 else:
366 else:
367 o2 = o1 + sha1size * numsuc
367 o2 = o1 + sha1size * numsuc
368 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
368 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
369
369
370 # read parents
370 # read parents
371 if numpar == noneflag:
371 if numpar == noneflag:
372 o3 = o2
372 o3 = o2
373 parents = None
373 parents = None
374 elif numpar == 1:
374 elif numpar == 1:
375 o3 = o2 + sha1size
375 o3 = o2 + sha1size
376 parents = (data[o2:o3],)
376 parents = (data[o2:o3],)
377 else:
377 else:
378 o3 = o2 + sha1size * numpar
378 o3 = o2 + sha1size * numpar
379 parents = unpack(sha1fmt * numpar, data[o2:o3])
379 parents = unpack(sha1fmt * numpar, data[o2:o3])
380
380
381 # read metadata
381 # read metadata
382 off = o3 + metasize * nummeta
382 off = o3 + metasize * nummeta
383 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
383 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
384 metadata = []
384 metadata = []
385 for idx in xrange(0, len(metapairsize), 2):
385 for idx in xrange(0, len(metapairsize), 2):
386 o1 = off + metapairsize[idx]
386 o1 = off + metapairsize[idx]
387 o2 = o1 + metapairsize[idx + 1]
387 o2 = o1 + metapairsize[idx + 1]
388 metadata.append((data[off:o1], data[o1:o2]))
388 metadata.append((data[off:o1], data[o1:o2]))
389 off = o2
389 off = o2
390
390
391 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
391 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
392
392
393 def _fm1encodeonemarker(marker):
393 def _fm1encodeonemarker(marker):
394 pre, sucs, flags, metadata, date, parents = marker
394 pre, sucs, flags, metadata, date, parents = marker
395 # determine node size
395 # determine node size
396 _fm1node = _fm1nodesha1
396 _fm1node = _fm1nodesha1
397 if flags & usingsha256:
397 if flags & usingsha256:
398 _fm1node = _fm1nodesha256
398 _fm1node = _fm1nodesha256
399 numsuc = len(sucs)
399 numsuc = len(sucs)
400 numextranodes = numsuc
400 numextranodes = numsuc
401 if parents is None:
401 if parents is None:
402 numpar = _fm1parentnone
402 numpar = _fm1parentnone
403 else:
403 else:
404 numpar = len(parents)
404 numpar = len(parents)
405 numextranodes += numpar
405 numextranodes += numpar
406 formatnodes = _fm1node * numextranodes
406 formatnodes = _fm1node * numextranodes
407 formatmeta = _fm1metapair * len(metadata)
407 formatmeta = _fm1metapair * len(metadata)
408 format = _fm1fixed + formatnodes + formatmeta
408 format = _fm1fixed + formatnodes + formatmeta
409 # tz is stored in minutes so we divide by 60
409 # tz is stored in minutes so we divide by 60
410 tz = date[1]//60
410 tz = date[1]//60
411 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
411 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
412 data.extend(sucs)
412 data.extend(sucs)
413 if parents is not None:
413 if parents is not None:
414 data.extend(parents)
414 data.extend(parents)
415 totalsize = _calcsize(format)
415 totalsize = _calcsize(format)
416 for key, value in metadata:
416 for key, value in metadata:
417 lk = len(key)
417 lk = len(key)
418 lv = len(value)
418 lv = len(value)
419 data.append(lk)
419 data.append(lk)
420 data.append(lv)
420 data.append(lv)
421 totalsize += lk + lv
421 totalsize += lk + lv
422 data[0] = totalsize
422 data[0] = totalsize
423 data = [_pack(format, *data)]
423 data = [_pack(format, *data)]
424 for key, value in metadata:
424 for key, value in metadata:
425 data.append(key)
425 data.append(key)
426 data.append(value)
426 data.append(value)
427 return ''.join(data)
427 return ''.join(data)
428
428
429 def _fm1readmarkers(data, off, stop):
429 def _fm1readmarkers(data, off, stop):
430 native = getattr(parsers, 'fm1readmarkers', None)
430 native = getattr(parsers, 'fm1readmarkers', None)
431 if not native:
431 if not native:
432 return _fm1purereadmarkers(data, off, stop)
432 return _fm1purereadmarkers(data, off, stop)
433 return native(data, off, stop)
433 return native(data, off, stop)
434
434
435 # mapping to read/write various marker formats
435 # mapping to read/write various marker formats
436 # <version> -> (decoder, encoder)
436 # <version> -> (decoder, encoder)
437 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
437 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
438 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
438 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
439
439
440 def _readmarkerversion(data):
440 def _readmarkerversion(data):
441 return _unpack('>B', data[0:1])[0]
441 return _unpack('>B', data[0:1])[0]
442
442
443 @util.nogc
443 @util.nogc
444 def _readmarkers(data, off=None, stop=None):
444 def _readmarkers(data, off=None, stop=None):
445 """Read and enumerate markers from raw data"""
445 """Read and enumerate markers from raw data"""
446 diskversion = _readmarkerversion(data)
446 diskversion = _readmarkerversion(data)
447 if not off:
447 if not off:
448 off = 1 # skip 1 byte version number
448 off = 1 # skip 1 byte version number
449 if stop is None:
449 if stop is None:
450 stop = len(data)
450 stop = len(data)
451 if diskversion not in formats:
451 if diskversion not in formats:
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 raise error.UnknownVersion(msg, version=diskversion)
453 raise error.UnknownVersion(msg, version=diskversion)
454 return diskversion, formats[diskversion][0](data, off, stop)
454 return diskversion, formats[diskversion][0](data, off, stop)
455
455
456 def encodeheader(version=_fm0version):
456 def encodeheader(version=_fm0version):
457 return _pack('>B', version)
457 return _pack('>B', version)
458
458
459 def encodemarkers(markers, addheader=False, version=_fm0version):
459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 # Kept separate from flushmarkers(), it will be reused for
460 # Kept separate from flushmarkers(), it will be reused for
461 # markers exchange.
461 # markers exchange.
462 encodeone = formats[version][1]
462 encodeone = formats[version][1]
463 if addheader:
463 if addheader:
464 yield encodeheader(version)
464 yield encodeheader(version)
465 for marker in markers:
465 for marker in markers:
466 yield encodeone(marker)
466 yield encodeone(marker)
467
467
468 @util.nogc
468 @util.nogc
469 def _addsuccessors(successors, markers):
469 def _addsuccessors(successors, markers):
470 for mark in markers:
470 for mark in markers:
471 successors.setdefault(mark[0], set()).add(mark)
471 successors.setdefault(mark[0], set()).add(mark)
472
472
473 def _addprecursors(*args, **kwargs):
473 def _addprecursors(*args, **kwargs):
474 msg = ("'obsolete._addprecursors' is deprecated, "
474 msg = ("'obsolete._addprecursors' is deprecated, "
475 "use 'obsolete._addpredecessors'")
475 "use 'obsolete._addpredecessors'")
476 util.nouideprecwarn(msg, '4.4')
476 util.nouideprecwarn(msg, '4.4')
477
477
478 return _addpredecessors(*args, **kwargs)
478 return _addpredecessors(*args, **kwargs)
479
479
480 @util.nogc
480 @util.nogc
481 def _addpredecessors(predecessors, markers):
481 def _addpredecessors(predecessors, markers):
482 for mark in markers:
482 for mark in markers:
483 for suc in mark[1]:
483 for suc in mark[1]:
484 predecessors.setdefault(suc, set()).add(mark)
484 predecessors.setdefault(suc, set()).add(mark)
485
485
486 @util.nogc
486 @util.nogc
487 def _addchildren(children, markers):
487 def _addchildren(children, markers):
488 for mark in markers:
488 for mark in markers:
489 parents = mark[5]
489 parents = mark[5]
490 if parents is not None:
490 if parents is not None:
491 for p in parents:
491 for p in parents:
492 children.setdefault(p, set()).add(mark)
492 children.setdefault(p, set()).add(mark)
493
493
494 def _checkinvalidmarkers(markers):
494 def _checkinvalidmarkers(markers):
495 """search for marker with invalid data and raise error if needed
495 """search for marker with invalid data and raise error if needed
496
496
497 Exist as a separated function to allow the evolve extension for a more
497 Exist as a separated function to allow the evolve extension for a more
498 subtle handling.
498 subtle handling.
499 """
499 """
500 for mark in markers:
500 for mark in markers:
501 if node.nullid in mark[1]:
501 if node.nullid in mark[1]:
502 raise error.Abort(_('bad obsolescence marker detected: '
502 raise error.Abort(_('bad obsolescence marker detected: '
503 'invalid successors nullid'))
503 'invalid successors nullid'))
504
504
505 class obsstore(object):
505 class obsstore(object):
506 """Store obsolete markers
506 """Store obsolete markers
507
507
508 Markers can be accessed with two mappings:
508 Markers can be accessed with two mappings:
509 - predecessors[x] -> set(markers on predecessors edges of x)
509 - predecessors[x] -> set(markers on predecessors edges of x)
510 - successors[x] -> set(markers on successors edges of x)
510 - successors[x] -> set(markers on successors edges of x)
511 - children[x] -> set(markers on predecessors edges of children(x)
511 - children[x] -> set(markers on predecessors edges of children(x)
512 """
512 """
513
513
514 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
514 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
515 # prec: nodeid, predecessors changesets
515 # prec: nodeid, predecessors changesets
516 # succs: tuple of nodeid, successor changesets (0-N length)
516 # succs: tuple of nodeid, successor changesets (0-N length)
517 # flag: integer, flag field carrying modifier for the markers (see doc)
517 # flag: integer, flag field carrying modifier for the markers (see doc)
518 # meta: binary blob, encoded metadata dictionary
518 # meta: binary blob, encoded metadata dictionary
519 # date: (float, int) tuple, date of marker creation
519 # date: (float, int) tuple, date of marker creation
520 # parents: (tuple of nodeid) or None, parents of predecessors
520 # parents: (tuple of nodeid) or None, parents of predecessors
521 # None is used when no data has been recorded
521 # None is used when no data has been recorded
522
522
523 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
523 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
524 # caches for various obsolescence related cache
524 # caches for various obsolescence related cache
525 self.caches = {}
525 self.caches = {}
526 self.svfs = svfs
526 self.svfs = svfs
527 self._defaultformat = defaultformat
527 self._defaultformat = defaultformat
528 self._readonly = readonly
528 self._readonly = readonly
529
529
530 def __iter__(self):
530 def __iter__(self):
531 return iter(self._all)
531 return iter(self._all)
532
532
533 def __len__(self):
533 def __len__(self):
534 return len(self._all)
534 return len(self._all)
535
535
536 def __nonzero__(self):
536 def __nonzero__(self):
537 if not self._cached('_all'):
537 if not self._cached('_all'):
538 try:
538 try:
539 return self.svfs.stat('obsstore').st_size > 1
539 return self.svfs.stat('obsstore').st_size > 1
540 except OSError as inst:
540 except OSError as inst:
541 if inst.errno != errno.ENOENT:
541 if inst.errno != errno.ENOENT:
542 raise
542 raise
543 # just build an empty _all list if no obsstore exists, which
543 # just build an empty _all list if no obsstore exists, which
544 # avoids further stat() syscalls
544 # avoids further stat() syscalls
545 pass
546 return bool(self._all)
545 return bool(self._all)
547
546
548 __bool__ = __nonzero__
547 __bool__ = __nonzero__
549
548
550 @property
549 @property
551 def readonly(self):
550 def readonly(self):
552 """True if marker creation is disabled
551 """True if marker creation is disabled
553
552
554 Remove me in the future when obsolete marker is always on."""
553 Remove me in the future when obsolete marker is always on."""
555 return self._readonly
554 return self._readonly
556
555
557 def create(self, transaction, prec, succs=(), flag=0, parents=None,
556 def create(self, transaction, prec, succs=(), flag=0, parents=None,
558 date=None, metadata=None, ui=None):
557 date=None, metadata=None, ui=None):
559 """obsolete: add a new obsolete marker
558 """obsolete: add a new obsolete marker
560
559
561 * ensuring it is hashable
560 * ensuring it is hashable
562 * check mandatory metadata
561 * check mandatory metadata
563 * encode metadata
562 * encode metadata
564
563
565 If you are a human writing code creating marker you want to use the
564 If you are a human writing code creating marker you want to use the
566 `createmarkers` function in this module instead.
565 `createmarkers` function in this module instead.
567
566
568 return True if a new marker have been added, False if the markers
567 return True if a new marker have been added, False if the markers
569 already existed (no op).
568 already existed (no op).
570 """
569 """
571 if metadata is None:
570 if metadata is None:
572 metadata = {}
571 metadata = {}
573 if date is None:
572 if date is None:
574 if 'date' in metadata:
573 if 'date' in metadata:
575 # as a courtesy for out-of-tree extensions
574 # as a courtesy for out-of-tree extensions
576 date = util.parsedate(metadata.pop('date'))
575 date = util.parsedate(metadata.pop('date'))
577 elif ui is not None:
576 elif ui is not None:
578 date = ui.configdate('devel', 'default-date')
577 date = ui.configdate('devel', 'default-date')
579 if date is None:
578 if date is None:
580 date = util.makedate()
579 date = util.makedate()
581 else:
580 else:
582 date = util.makedate()
581 date = util.makedate()
583 if len(prec) != 20:
582 if len(prec) != 20:
584 raise ValueError(prec)
583 raise ValueError(prec)
585 for succ in succs:
584 for succ in succs:
586 if len(succ) != 20:
585 if len(succ) != 20:
587 raise ValueError(succ)
586 raise ValueError(succ)
588 if prec in succs:
587 if prec in succs:
589 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
588 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
590
589
591 metadata = tuple(sorted(metadata.iteritems()))
590 metadata = tuple(sorted(metadata.iteritems()))
592
591
593 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
592 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
594 return bool(self.add(transaction, [marker]))
593 return bool(self.add(transaction, [marker]))
595
594
596 def add(self, transaction, markers):
595 def add(self, transaction, markers):
597 """Add new markers to the store
596 """Add new markers to the store
598
597
599 Take care of filtering duplicate.
598 Take care of filtering duplicate.
600 Return the number of new marker."""
599 Return the number of new marker."""
601 if self._readonly:
600 if self._readonly:
602 raise error.Abort(_('creating obsolete markers is not enabled on '
601 raise error.Abort(_('creating obsolete markers is not enabled on '
603 'this repo'))
602 'this repo'))
604 known = set()
603 known = set()
605 getsuccessors = self.successors.get
604 getsuccessors = self.successors.get
606 new = []
605 new = []
607 for m in markers:
606 for m in markers:
608 if m not in getsuccessors(m[0], ()) and m not in known:
607 if m not in getsuccessors(m[0], ()) and m not in known:
609 known.add(m)
608 known.add(m)
610 new.append(m)
609 new.append(m)
611 if new:
610 if new:
612 f = self.svfs('obsstore', 'ab')
611 f = self.svfs('obsstore', 'ab')
613 try:
612 try:
614 offset = f.tell()
613 offset = f.tell()
615 transaction.add('obsstore', offset)
614 transaction.add('obsstore', offset)
616 # offset == 0: new file - add the version header
615 # offset == 0: new file - add the version header
617 data = b''.join(encodemarkers(new, offset == 0, self._version))
616 data = b''.join(encodemarkers(new, offset == 0, self._version))
618 f.write(data)
617 f.write(data)
619 finally:
618 finally:
620 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
619 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
621 # call 'filecacheentry.refresh()' here
620 # call 'filecacheentry.refresh()' here
622 f.close()
621 f.close()
623 addedmarkers = transaction.changes.get('obsmarkers')
622 addedmarkers = transaction.changes.get('obsmarkers')
624 if addedmarkers is not None:
623 if addedmarkers is not None:
625 addedmarkers.update(new)
624 addedmarkers.update(new)
626 self._addmarkers(new, data)
625 self._addmarkers(new, data)
627 # new marker *may* have changed several set. invalidate the cache.
626 # new marker *may* have changed several set. invalidate the cache.
628 self.caches.clear()
627 self.caches.clear()
629 # records the number of new markers for the transaction hooks
628 # records the number of new markers for the transaction hooks
630 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
629 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
631 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
630 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
632 return len(new)
631 return len(new)
633
632
634 def mergemarkers(self, transaction, data):
633 def mergemarkers(self, transaction, data):
635 """merge a binary stream of markers inside the obsstore
634 """merge a binary stream of markers inside the obsstore
636
635
637 Returns the number of new markers added."""
636 Returns the number of new markers added."""
638 version, markers = _readmarkers(data)
637 version, markers = _readmarkers(data)
639 return self.add(transaction, markers)
638 return self.add(transaction, markers)
640
639
641 @propertycache
640 @propertycache
642 def _data(self):
641 def _data(self):
643 return self.svfs.tryread('obsstore')
642 return self.svfs.tryread('obsstore')
644
643
645 @propertycache
644 @propertycache
646 def _version(self):
645 def _version(self):
647 if len(self._data) >= 1:
646 if len(self._data) >= 1:
648 return _readmarkerversion(self._data)
647 return _readmarkerversion(self._data)
649 else:
648 else:
650 return self._defaultformat
649 return self._defaultformat
651
650
652 @propertycache
651 @propertycache
653 def _all(self):
652 def _all(self):
654 data = self._data
653 data = self._data
655 if not data:
654 if not data:
656 return []
655 return []
657 self._version, markers = _readmarkers(data)
656 self._version, markers = _readmarkers(data)
658 markers = list(markers)
657 markers = list(markers)
659 _checkinvalidmarkers(markers)
658 _checkinvalidmarkers(markers)
660 return markers
659 return markers
661
660
662 @propertycache
661 @propertycache
663 def successors(self):
662 def successors(self):
664 successors = {}
663 successors = {}
665 _addsuccessors(successors, self._all)
664 _addsuccessors(successors, self._all)
666 return successors
665 return successors
667
666
668 @property
667 @property
669 def precursors(self):
668 def precursors(self):
670 msg = ("'obsstore.precursors' is deprecated, "
669 msg = ("'obsstore.precursors' is deprecated, "
671 "use 'obsstore.predecessors'")
670 "use 'obsstore.predecessors'")
672 util.nouideprecwarn(msg, '4.4')
671 util.nouideprecwarn(msg, '4.4')
673
672
674 return self.predecessors
673 return self.predecessors
675
674
676 @propertycache
675 @propertycache
677 def predecessors(self):
676 def predecessors(self):
678 predecessors = {}
677 predecessors = {}
679 _addpredecessors(predecessors, self._all)
678 _addpredecessors(predecessors, self._all)
680 return predecessors
679 return predecessors
681
680
682 @propertycache
681 @propertycache
683 def children(self):
682 def children(self):
684 children = {}
683 children = {}
685 _addchildren(children, self._all)
684 _addchildren(children, self._all)
686 return children
685 return children
687
686
688 def _cached(self, attr):
687 def _cached(self, attr):
689 return attr in self.__dict__
688 return attr in self.__dict__
690
689
691 def _addmarkers(self, markers, rawdata):
690 def _addmarkers(self, markers, rawdata):
692 markers = list(markers) # to allow repeated iteration
691 markers = list(markers) # to allow repeated iteration
693 self._data = self._data + rawdata
692 self._data = self._data + rawdata
694 self._all.extend(markers)
693 self._all.extend(markers)
695 if self._cached('successors'):
694 if self._cached('successors'):
696 _addsuccessors(self.successors, markers)
695 _addsuccessors(self.successors, markers)
697 if self._cached('predecessors'):
696 if self._cached('predecessors'):
698 _addpredecessors(self.predecessors, markers)
697 _addpredecessors(self.predecessors, markers)
699 if self._cached('children'):
698 if self._cached('children'):
700 _addchildren(self.children, markers)
699 _addchildren(self.children, markers)
701 _checkinvalidmarkers(markers)
700 _checkinvalidmarkers(markers)
702
701
703 def relevantmarkers(self, nodes):
702 def relevantmarkers(self, nodes):
704 """return a set of all obsolescence markers relevant to a set of nodes.
703 """return a set of all obsolescence markers relevant to a set of nodes.
705
704
706 "relevant" to a set of nodes mean:
705 "relevant" to a set of nodes mean:
707
706
708 - marker that use this changeset as successor
707 - marker that use this changeset as successor
709 - prune marker of direct children on this changeset
708 - prune marker of direct children on this changeset
710 - recursive application of the two rules on predecessors of these
709 - recursive application of the two rules on predecessors of these
711 markers
710 markers
712
711
713 It is a set so you cannot rely on order."""
712 It is a set so you cannot rely on order."""
714
713
715 pendingnodes = set(nodes)
714 pendingnodes = set(nodes)
716 seenmarkers = set()
715 seenmarkers = set()
717 seennodes = set(pendingnodes)
716 seennodes = set(pendingnodes)
718 precursorsmarkers = self.predecessors
717 precursorsmarkers = self.predecessors
719 succsmarkers = self.successors
718 succsmarkers = self.successors
720 children = self.children
719 children = self.children
721 while pendingnodes:
720 while pendingnodes:
722 direct = set()
721 direct = set()
723 for current in pendingnodes:
722 for current in pendingnodes:
724 direct.update(precursorsmarkers.get(current, ()))
723 direct.update(precursorsmarkers.get(current, ()))
725 pruned = [m for m in children.get(current, ()) if not m[1]]
724 pruned = [m for m in children.get(current, ()) if not m[1]]
726 direct.update(pruned)
725 direct.update(pruned)
727 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
726 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
728 direct.update(pruned)
727 direct.update(pruned)
729 direct -= seenmarkers
728 direct -= seenmarkers
730 pendingnodes = set([m[0] for m in direct])
729 pendingnodes = set([m[0] for m in direct])
731 seenmarkers |= direct
730 seenmarkers |= direct
732 pendingnodes -= seennodes
731 pendingnodes -= seennodes
733 seennodes |= pendingnodes
732 seennodes |= pendingnodes
734 return seenmarkers
733 return seenmarkers
735
734
736 def makestore(ui, repo):
735 def makestore(ui, repo):
737 """Create an obsstore instance from a repo."""
736 """Create an obsstore instance from a repo."""
738 # read default format for new obsstore.
737 # read default format for new obsstore.
739 # developer config: format.obsstore-version
738 # developer config: format.obsstore-version
740 defaultformat = ui.configint('format', 'obsstore-version')
739 defaultformat = ui.configint('format', 'obsstore-version')
741 # rely on obsstore class default when possible.
740 # rely on obsstore class default when possible.
742 kwargs = {}
741 kwargs = {}
743 if defaultformat is not None:
742 if defaultformat is not None:
744 kwargs['defaultformat'] = defaultformat
743 kwargs['defaultformat'] = defaultformat
745 readonly = not isenabled(repo, createmarkersopt)
744 readonly = not isenabled(repo, createmarkersopt)
746 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
745 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
747 if store and readonly:
746 if store and readonly:
748 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
747 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
749 % len(list(store)))
748 % len(list(store)))
750 return store
749 return store
751
750
752 def commonversion(versions):
751 def commonversion(versions):
753 """Return the newest version listed in both versions and our local formats.
752 """Return the newest version listed in both versions and our local formats.
754
753
755 Returns None if no common version exists.
754 Returns None if no common version exists.
756 """
755 """
757 versions.sort(reverse=True)
756 versions.sort(reverse=True)
758 # search for highest version known on both side
757 # search for highest version known on both side
759 for v in versions:
758 for v in versions:
760 if v in formats:
759 if v in formats:
761 return v
760 return v
762 return None
761 return None
763
762
764 # arbitrary picked to fit into 8K limit from HTTP server
763 # arbitrary picked to fit into 8K limit from HTTP server
765 # you have to take in account:
764 # you have to take in account:
766 # - the version header
765 # - the version header
767 # - the base85 encoding
766 # - the base85 encoding
768 _maxpayload = 5300
767 _maxpayload = 5300
769
768
770 def _pushkeyescape(markers):
769 def _pushkeyescape(markers):
771 """encode markers into a dict suitable for pushkey exchange
770 """encode markers into a dict suitable for pushkey exchange
772
771
773 - binary data is base85 encoded
772 - binary data is base85 encoded
774 - split in chunks smaller than 5300 bytes"""
773 - split in chunks smaller than 5300 bytes"""
775 keys = {}
774 keys = {}
776 parts = []
775 parts = []
777 currentlen = _maxpayload * 2 # ensure we create a new part
776 currentlen = _maxpayload * 2 # ensure we create a new part
778 for marker in markers:
777 for marker in markers:
779 nextdata = _fm0encodeonemarker(marker)
778 nextdata = _fm0encodeonemarker(marker)
780 if (len(nextdata) + currentlen > _maxpayload):
779 if (len(nextdata) + currentlen > _maxpayload):
781 currentpart = []
780 currentpart = []
782 currentlen = 0
781 currentlen = 0
783 parts.append(currentpart)
782 parts.append(currentpart)
784 currentpart.append(nextdata)
783 currentpart.append(nextdata)
785 currentlen += len(nextdata)
784 currentlen += len(nextdata)
786 for idx, part in enumerate(reversed(parts)):
785 for idx, part in enumerate(reversed(parts)):
787 data = ''.join([_pack('>B', _fm0version)] + part)
786 data = ''.join([_pack('>B', _fm0version)] + part)
788 keys['dump%i' % idx] = util.b85encode(data)
787 keys['dump%i' % idx] = util.b85encode(data)
789 return keys
788 return keys
790
789
791 def listmarkers(repo):
790 def listmarkers(repo):
792 """List markers over pushkey"""
791 """List markers over pushkey"""
793 if not repo.obsstore:
792 if not repo.obsstore:
794 return {}
793 return {}
795 return _pushkeyescape(sorted(repo.obsstore))
794 return _pushkeyescape(sorted(repo.obsstore))
796
795
797 def pushmarker(repo, key, old, new):
796 def pushmarker(repo, key, old, new):
798 """Push markers over pushkey"""
797 """Push markers over pushkey"""
799 if not key.startswith('dump'):
798 if not key.startswith('dump'):
800 repo.ui.warn(_('unknown key: %r') % key)
799 repo.ui.warn(_('unknown key: %r') % key)
801 return False
800 return False
802 if old:
801 if old:
803 repo.ui.warn(_('unexpected old value for %r') % key)
802 repo.ui.warn(_('unexpected old value for %r') % key)
804 return False
803 return False
805 data = util.b85decode(new)
804 data = util.b85decode(new)
806 lock = repo.lock()
805 lock = repo.lock()
807 try:
806 try:
808 tr = repo.transaction('pushkey: obsolete markers')
807 tr = repo.transaction('pushkey: obsolete markers')
809 try:
808 try:
810 repo.obsstore.mergemarkers(tr, data)
809 repo.obsstore.mergemarkers(tr, data)
811 repo.invalidatevolatilesets()
810 repo.invalidatevolatilesets()
812 tr.close()
811 tr.close()
813 return True
812 return True
814 finally:
813 finally:
815 tr.release()
814 tr.release()
816 finally:
815 finally:
817 lock.release()
816 lock.release()
818
817
819 # keep compatibility for the 4.3 cycle
818 # keep compatibility for the 4.3 cycle
820 def allprecursors(obsstore, nodes, ignoreflags=0):
819 def allprecursors(obsstore, nodes, ignoreflags=0):
821 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
820 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
822 util.nouideprecwarn(movemsg, '4.3')
821 util.nouideprecwarn(movemsg, '4.3')
823 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
822 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
824
823
825 def allsuccessors(obsstore, nodes, ignoreflags=0):
824 def allsuccessors(obsstore, nodes, ignoreflags=0):
826 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
825 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
827 util.nouideprecwarn(movemsg, '4.3')
826 util.nouideprecwarn(movemsg, '4.3')
828 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
827 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
829
828
830 def marker(repo, data):
829 def marker(repo, data):
831 movemsg = 'obsolete.marker moved to obsutil.marker'
830 movemsg = 'obsolete.marker moved to obsutil.marker'
832 repo.ui.deprecwarn(movemsg, '4.3')
831 repo.ui.deprecwarn(movemsg, '4.3')
833 return obsutil.marker(repo, data)
832 return obsutil.marker(repo, data)
834
833
835 def getmarkers(repo, nodes=None, exclusive=False):
834 def getmarkers(repo, nodes=None, exclusive=False):
836 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
835 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
837 repo.ui.deprecwarn(movemsg, '4.3')
836 repo.ui.deprecwarn(movemsg, '4.3')
838 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
837 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
839
838
840 def exclusivemarkers(repo, nodes):
839 def exclusivemarkers(repo, nodes):
841 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
840 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
842 repo.ui.deprecwarn(movemsg, '4.3')
841 repo.ui.deprecwarn(movemsg, '4.3')
843 return obsutil.exclusivemarkers(repo, nodes)
842 return obsutil.exclusivemarkers(repo, nodes)
844
843
845 def foreground(repo, nodes):
844 def foreground(repo, nodes):
846 movemsg = 'obsolete.foreground moved to obsutil.foreground'
845 movemsg = 'obsolete.foreground moved to obsutil.foreground'
847 repo.ui.deprecwarn(movemsg, '4.3')
846 repo.ui.deprecwarn(movemsg, '4.3')
848 return obsutil.foreground(repo, nodes)
847 return obsutil.foreground(repo, nodes)
849
848
850 def successorssets(repo, initialnode, cache=None):
849 def successorssets(repo, initialnode, cache=None):
851 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
850 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
852 repo.ui.deprecwarn(movemsg, '4.3')
851 repo.ui.deprecwarn(movemsg, '4.3')
853 return obsutil.successorssets(repo, initialnode, cache=cache)
852 return obsutil.successorssets(repo, initialnode, cache=cache)
854
853
855 # mapping of 'set-name' -> <function to compute this set>
854 # mapping of 'set-name' -> <function to compute this set>
856 cachefuncs = {}
855 cachefuncs = {}
857 def cachefor(name):
856 def cachefor(name):
858 """Decorator to register a function as computing the cache for a set"""
857 """Decorator to register a function as computing the cache for a set"""
859 def decorator(func):
858 def decorator(func):
860 if name in cachefuncs:
859 if name in cachefuncs:
861 msg = "duplicated registration for volatileset '%s' (existing: %r)"
860 msg = "duplicated registration for volatileset '%s' (existing: %r)"
862 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
861 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
863 cachefuncs[name] = func
862 cachefuncs[name] = func
864 return func
863 return func
865 return decorator
864 return decorator
866
865
867 def getrevs(repo, name):
866 def getrevs(repo, name):
868 """Return the set of revision that belong to the <name> set
867 """Return the set of revision that belong to the <name> set
869
868
870 Such access may compute the set and cache it for future use"""
869 Such access may compute the set and cache it for future use"""
871 repo = repo.unfiltered()
870 repo = repo.unfiltered()
872 if not repo.obsstore:
871 if not repo.obsstore:
873 return frozenset()
872 return frozenset()
874 if name not in repo.obsstore.caches:
873 if name not in repo.obsstore.caches:
875 repo.obsstore.caches[name] = cachefuncs[name](repo)
874 repo.obsstore.caches[name] = cachefuncs[name](repo)
876 return repo.obsstore.caches[name]
875 return repo.obsstore.caches[name]
877
876
878 # To be simple we need to invalidate obsolescence cache when:
877 # To be simple we need to invalidate obsolescence cache when:
879 #
878 #
880 # - new changeset is added:
879 # - new changeset is added:
881 # - public phase is changed
880 # - public phase is changed
882 # - obsolescence marker are added
881 # - obsolescence marker are added
883 # - strip is used a repo
882 # - strip is used a repo
884 def clearobscaches(repo):
883 def clearobscaches(repo):
885 """Remove all obsolescence related cache from a repo
884 """Remove all obsolescence related cache from a repo
886
885
887 This remove all cache in obsstore is the obsstore already exist on the
886 This remove all cache in obsstore is the obsstore already exist on the
888 repo.
887 repo.
889
888
890 (We could be smarter here given the exact event that trigger the cache
889 (We could be smarter here given the exact event that trigger the cache
891 clearing)"""
890 clearing)"""
892 # only clear cache is there is obsstore data in this repo
891 # only clear cache is there is obsstore data in this repo
893 if 'obsstore' in repo._filecache:
892 if 'obsstore' in repo._filecache:
894 repo.obsstore.caches.clear()
893 repo.obsstore.caches.clear()
895
894
896 def _mutablerevs(repo):
895 def _mutablerevs(repo):
897 """the set of mutable revision in the repository"""
896 """the set of mutable revision in the repository"""
898 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
897 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
899
898
900 @cachefor('obsolete')
899 @cachefor('obsolete')
901 def _computeobsoleteset(repo):
900 def _computeobsoleteset(repo):
902 """the set of obsolete revisions"""
901 """the set of obsolete revisions"""
903 getnode = repo.changelog.node
902 getnode = repo.changelog.node
904 notpublic = _mutablerevs(repo)
903 notpublic = _mutablerevs(repo)
905 isobs = repo.obsstore.successors.__contains__
904 isobs = repo.obsstore.successors.__contains__
906 obs = set(r for r in notpublic if isobs(getnode(r)))
905 obs = set(r for r in notpublic if isobs(getnode(r)))
907 return obs
906 return obs
908
907
909 @cachefor('unstable')
908 @cachefor('unstable')
910 def _computeunstableset(repo):
909 def _computeunstableset(repo):
911 msg = ("'unstable' volatile set is deprecated, "
910 msg = ("'unstable' volatile set is deprecated, "
912 "use 'orphan'")
911 "use 'orphan'")
913 repo.ui.deprecwarn(msg, '4.4')
912 repo.ui.deprecwarn(msg, '4.4')
914
913
915 return _computeorphanset(repo)
914 return _computeorphanset(repo)
916
915
917 @cachefor('orphan')
916 @cachefor('orphan')
918 def _computeorphanset(repo):
917 def _computeorphanset(repo):
919 """the set of non obsolete revisions with obsolete parents"""
918 """the set of non obsolete revisions with obsolete parents"""
920 pfunc = repo.changelog.parentrevs
919 pfunc = repo.changelog.parentrevs
921 mutable = _mutablerevs(repo)
920 mutable = _mutablerevs(repo)
922 obsolete = getrevs(repo, 'obsolete')
921 obsolete = getrevs(repo, 'obsolete')
923 others = mutable - obsolete
922 others = mutable - obsolete
924 unstable = set()
923 unstable = set()
925 for r in sorted(others):
924 for r in sorted(others):
926 # A rev is unstable if one of its parent is obsolete or unstable
925 # A rev is unstable if one of its parent is obsolete or unstable
927 # this works since we traverse following growing rev order
926 # this works since we traverse following growing rev order
928 for p in pfunc(r):
927 for p in pfunc(r):
929 if p in obsolete or p in unstable:
928 if p in obsolete or p in unstable:
930 unstable.add(r)
929 unstable.add(r)
931 break
930 break
932 return unstable
931 return unstable
933
932
934 @cachefor('suspended')
933 @cachefor('suspended')
935 def _computesuspendedset(repo):
934 def _computesuspendedset(repo):
936 """the set of obsolete parents with non obsolete descendants"""
935 """the set of obsolete parents with non obsolete descendants"""
937 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
936 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
938 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
937 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
939
938
940 @cachefor('extinct')
939 @cachefor('extinct')
941 def _computeextinctset(repo):
940 def _computeextinctset(repo):
942 """the set of obsolete parents without non obsolete descendants"""
941 """the set of obsolete parents without non obsolete descendants"""
943 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
942 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
944
943
945 @cachefor('bumped')
944 @cachefor('bumped')
946 def _computebumpedset(repo):
945 def _computebumpedset(repo):
947 msg = ("'bumped' volatile set is deprecated, "
946 msg = ("'bumped' volatile set is deprecated, "
948 "use 'phasedivergent'")
947 "use 'phasedivergent'")
949 repo.ui.deprecwarn(msg, '4.4')
948 repo.ui.deprecwarn(msg, '4.4')
950
949
951 return _computephasedivergentset(repo)
950 return _computephasedivergentset(repo)
952
951
953 @cachefor('phasedivergent')
952 @cachefor('phasedivergent')
954 def _computephasedivergentset(repo):
953 def _computephasedivergentset(repo):
955 """the set of revs trying to obsolete public revisions"""
954 """the set of revs trying to obsolete public revisions"""
956 bumped = set()
955 bumped = set()
957 # util function (avoid attribute lookup in the loop)
956 # util function (avoid attribute lookup in the loop)
958 phase = repo._phasecache.phase # would be faster to grab the full list
957 phase = repo._phasecache.phase # would be faster to grab the full list
959 public = phases.public
958 public = phases.public
960 cl = repo.changelog
959 cl = repo.changelog
961 torev = cl.nodemap.get
960 torev = cl.nodemap.get
962 for ctx in repo.set('(not public()) and (not obsolete())'):
961 for ctx in repo.set('(not public()) and (not obsolete())'):
963 rev = ctx.rev()
962 rev = ctx.rev()
964 # We only evaluate mutable, non-obsolete revision
963 # We only evaluate mutable, non-obsolete revision
965 node = ctx.node()
964 node = ctx.node()
966 # (future) A cache of predecessors may worth if split is very common
965 # (future) A cache of predecessors may worth if split is very common
967 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
966 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
968 ignoreflags=bumpedfix):
967 ignoreflags=bumpedfix):
969 prev = torev(pnode) # unfiltered! but so is phasecache
968 prev = torev(pnode) # unfiltered! but so is phasecache
970 if (prev is not None) and (phase(repo, prev) <= public):
969 if (prev is not None) and (phase(repo, prev) <= public):
971 # we have a public predecessor
970 # we have a public predecessor
972 bumped.add(rev)
971 bumped.add(rev)
973 break # Next draft!
972 break # Next draft!
974 return bumped
973 return bumped
975
974
976 @cachefor('divergent')
975 @cachefor('divergent')
977 def _computedivergentset(repo):
976 def _computedivergentset(repo):
978 msg = ("'divergent' volatile set is deprecated, "
977 msg = ("'divergent' volatile set is deprecated, "
979 "use 'contentdivergent'")
978 "use 'contentdivergent'")
980 repo.ui.deprecwarn(msg, '4.4')
979 repo.ui.deprecwarn(msg, '4.4')
981
980
982 return _computecontentdivergentset(repo)
981 return _computecontentdivergentset(repo)
983
982
984 @cachefor('contentdivergent')
983 @cachefor('contentdivergent')
985 def _computecontentdivergentset(repo):
984 def _computecontentdivergentset(repo):
986 """the set of rev that compete to be the final successors of some revision.
985 """the set of rev that compete to be the final successors of some revision.
987 """
986 """
988 divergent = set()
987 divergent = set()
989 obsstore = repo.obsstore
988 obsstore = repo.obsstore
990 newermap = {}
989 newermap = {}
991 for ctx in repo.set('(not public()) - obsolete()'):
990 for ctx in repo.set('(not public()) - obsolete()'):
992 mark = obsstore.predecessors.get(ctx.node(), ())
991 mark = obsstore.predecessors.get(ctx.node(), ())
993 toprocess = set(mark)
992 toprocess = set(mark)
994 seen = set()
993 seen = set()
995 while toprocess:
994 while toprocess:
996 prec = toprocess.pop()[0]
995 prec = toprocess.pop()[0]
997 if prec in seen:
996 if prec in seen:
998 continue # emergency cycle hanging prevention
997 continue # emergency cycle hanging prevention
999 seen.add(prec)
998 seen.add(prec)
1000 if prec not in newermap:
999 if prec not in newermap:
1001 obsutil.successorssets(repo, prec, cache=newermap)
1000 obsutil.successorssets(repo, prec, cache=newermap)
1002 newer = [n for n in newermap[prec] if n]
1001 newer = [n for n in newermap[prec] if n]
1003 if len(newer) > 1:
1002 if len(newer) > 1:
1004 divergent.add(ctx.rev())
1003 divergent.add(ctx.rev())
1005 break
1004 break
1006 toprocess.update(obsstore.predecessors.get(prec, ()))
1005 toprocess.update(obsstore.predecessors.get(prec, ()))
1007 return divergent
1006 return divergent
1008
1007
1009
1008
1010 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1009 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1011 operation=None):
1010 operation=None):
1012 """Add obsolete markers between changesets in a repo
1011 """Add obsolete markers between changesets in a repo
1013
1012
1014 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1013 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1015 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1014 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1016 containing metadata for this marker only. It is merged with the global
1015 containing metadata for this marker only. It is merged with the global
1017 metadata specified through the `metadata` argument of this function,
1016 metadata specified through the `metadata` argument of this function,
1018
1017
1019 Trying to obsolete a public changeset will raise an exception.
1018 Trying to obsolete a public changeset will raise an exception.
1020
1019
1021 Current user and date are used except if specified otherwise in the
1020 Current user and date are used except if specified otherwise in the
1022 metadata attribute.
1021 metadata attribute.
1023
1022
1024 This function operates within a transaction of its own, but does
1023 This function operates within a transaction of its own, but does
1025 not take any lock on the repo.
1024 not take any lock on the repo.
1026 """
1025 """
1027 # prepare metadata
1026 # prepare metadata
1028 if metadata is None:
1027 if metadata is None:
1029 metadata = {}
1028 metadata = {}
1030 if 'user' not in metadata:
1029 if 'user' not in metadata:
1031 metadata['user'] = repo.ui.username()
1030 metadata['user'] = repo.ui.username()
1032 useoperation = repo.ui.configbool('experimental',
1031 useoperation = repo.ui.configbool('experimental',
1033 'stabilization.track-operation')
1032 'stabilization.track-operation')
1034 if useoperation and operation:
1033 if useoperation and operation:
1035 metadata['operation'] = operation
1034 metadata['operation'] = operation
1036 tr = repo.transaction('add-obsolescence-marker')
1035 tr = repo.transaction('add-obsolescence-marker')
1037 try:
1036 try:
1038 markerargs = []
1037 markerargs = []
1039 for rel in relations:
1038 for rel in relations:
1040 prec = rel[0]
1039 prec = rel[0]
1041 sucs = rel[1]
1040 sucs = rel[1]
1042 localmetadata = metadata.copy()
1041 localmetadata = metadata.copy()
1043 if 2 < len(rel):
1042 if 2 < len(rel):
1044 localmetadata.update(rel[2])
1043 localmetadata.update(rel[2])
1045
1044
1046 if not prec.mutable():
1045 if not prec.mutable():
1047 raise error.Abort(_("cannot obsolete public changeset: %s")
1046 raise error.Abort(_("cannot obsolete public changeset: %s")
1048 % prec,
1047 % prec,
1049 hint="see 'hg help phases' for details")
1048 hint="see 'hg help phases' for details")
1050 nprec = prec.node()
1049 nprec = prec.node()
1051 nsucs = tuple(s.node() for s in sucs)
1050 nsucs = tuple(s.node() for s in sucs)
1052 npare = None
1051 npare = None
1053 if not nsucs:
1052 if not nsucs:
1054 npare = tuple(p.node() for p in prec.parents())
1053 npare = tuple(p.node() for p in prec.parents())
1055 if nprec in nsucs:
1054 if nprec in nsucs:
1056 raise error.Abort(_("changeset %s cannot obsolete itself")
1055 raise error.Abort(_("changeset %s cannot obsolete itself")
1057 % prec)
1056 % prec)
1058
1057
1059 # Creating the marker causes the hidden cache to become invalid,
1058 # Creating the marker causes the hidden cache to become invalid,
1060 # which causes recomputation when we ask for prec.parents() above.
1059 # which causes recomputation when we ask for prec.parents() above.
1061 # Resulting in n^2 behavior. So let's prepare all of the args
1060 # Resulting in n^2 behavior. So let's prepare all of the args
1062 # first, then create the markers.
1061 # first, then create the markers.
1063 markerargs.append((nprec, nsucs, npare, localmetadata))
1062 markerargs.append((nprec, nsucs, npare, localmetadata))
1064
1063
1065 for args in markerargs:
1064 for args in markerargs:
1066 nprec, nsucs, npare, localmetadata = args
1065 nprec, nsucs, npare, localmetadata = args
1067 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1066 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1068 date=date, metadata=localmetadata,
1067 date=date, metadata=localmetadata,
1069 ui=repo.ui)
1068 ui=repo.ui)
1070 repo.filteredrevcache.clear()
1069 repo.filteredrevcache.clear()
1071 tr.close()
1070 tr.close()
1072 finally:
1071 finally:
1073 tr.release()
1072 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now