##// END OF EJS Templates
obsolete: drop usage of changectx in '_computephasedivergentset'...
Boris Feld -
r35134:82680919 default
parent child Browse files
Show More
@@ -1,1126 +1,1126 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 obsutil,
79 obsutil,
80 phases,
80 phases,
81 policy,
81 policy,
82 util,
82 util,
83 )
83 )
84
84
85 parsers = policy.importmod(r'parsers')
85 parsers = policy.importmod(r'parsers')
86
86
87 _pack = struct.pack
87 _pack = struct.pack
88 _unpack = struct.unpack
88 _unpack = struct.unpack
89 _calcsize = struct.calcsize
89 _calcsize = struct.calcsize
90 propertycache = util.propertycache
90 propertycache = util.propertycache
91
91
92 # the obsolete feature is not mature enough to be enabled by default.
92 # the obsolete feature is not mature enough to be enabled by default.
93 # you have to rely on third party extension extension to enable this.
93 # you have to rely on third party extension extension to enable this.
94 _enabled = False
94 _enabled = False
95
95
96 # Options for obsolescence
96 # Options for obsolescence
97 createmarkersopt = 'createmarkers'
97 createmarkersopt = 'createmarkers'
98 allowunstableopt = 'allowunstable'
98 allowunstableopt = 'allowunstable'
99 exchangeopt = 'exchange'
99 exchangeopt = 'exchange'
100
100
101 def _getoptionvalue(repo, option):
101 def _getoptionvalue(repo, option):
102 """Returns True if the given repository has the given obsolete option
102 """Returns True if the given repository has the given obsolete option
103 enabled.
103 enabled.
104 """
104 """
105 configkey = 'evolution.%s' % option
105 configkey = 'evolution.%s' % option
106 newconfig = repo.ui.configbool('experimental', configkey)
106 newconfig = repo.ui.configbool('experimental', configkey)
107
107
108 # Return the value only if defined
108 # Return the value only if defined
109 if newconfig is not None:
109 if newconfig is not None:
110 return newconfig
110 return newconfig
111
111
112 # Fallback on generic option
112 # Fallback on generic option
113 try:
113 try:
114 return repo.ui.configbool('experimental', 'evolution')
114 return repo.ui.configbool('experimental', 'evolution')
115 except (error.ConfigError, AttributeError):
115 except (error.ConfigError, AttributeError):
116 # Fallback on old-fashion config
116 # Fallback on old-fashion config
117 # inconsistent config: experimental.evolution
117 # inconsistent config: experimental.evolution
118 result = set(repo.ui.configlist('experimental', 'evolution'))
118 result = set(repo.ui.configlist('experimental', 'evolution'))
119
119
120 if 'all' in result:
120 if 'all' in result:
121 return True
121 return True
122
122
123 # For migration purposes, temporarily return true if the config hasn't
123 # For migration purposes, temporarily return true if the config hasn't
124 # been set but _enabled is true.
124 # been set but _enabled is true.
125 if len(result) == 0 and _enabled:
125 if len(result) == 0 and _enabled:
126 return True
126 return True
127
127
128 # Temporary hack for next check
128 # Temporary hack for next check
129 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
129 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
130 if newconfig:
130 if newconfig:
131 result.add('createmarkers')
131 result.add('createmarkers')
132
132
133 return option in result
133 return option in result
134
134
135 def isenabled(repo, option):
135 def isenabled(repo, option):
136 """Returns True if the given repository has the given obsolete option
136 """Returns True if the given repository has the given obsolete option
137 enabled.
137 enabled.
138 """
138 """
139 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
139 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
140 unstabluevalue = _getoptionvalue(repo, allowunstableopt)
140 unstabluevalue = _getoptionvalue(repo, allowunstableopt)
141 exchangevalue = _getoptionvalue(repo, exchangeopt)
141 exchangevalue = _getoptionvalue(repo, exchangeopt)
142
142
143 # createmarkers must be enabled if other options are enabled
143 # createmarkers must be enabled if other options are enabled
144 if ((unstabluevalue or exchangevalue) and not createmarkersvalue):
144 if ((unstabluevalue or exchangevalue) and not createmarkersvalue):
145 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
145 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
146 "if other obsolete options are enabled"))
146 "if other obsolete options are enabled"))
147
147
148 return _getoptionvalue(repo, option)
148 return _getoptionvalue(repo, option)
149
149
150 ### obsolescence marker flag
150 ### obsolescence marker flag
151
151
152 ## bumpedfix flag
152 ## bumpedfix flag
153 #
153 #
154 # When a changeset A' succeed to a changeset A which became public, we call A'
154 # When a changeset A' succeed to a changeset A which became public, we call A'
155 # "bumped" because it's a successors of a public changesets
155 # "bumped" because it's a successors of a public changesets
156 #
156 #
157 # o A' (bumped)
157 # o A' (bumped)
158 # |`:
158 # |`:
159 # | o A
159 # | o A
160 # |/
160 # |/
161 # o Z
161 # o Z
162 #
162 #
163 # The way to solve this situation is to create a new changeset Ad as children
163 # The way to solve this situation is to create a new changeset Ad as children
164 # of A. This changeset have the same content than A'. So the diff from A to A'
164 # of A. This changeset have the same content than A'. So the diff from A to A'
165 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
165 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
166 #
166 #
167 # o Ad
167 # o Ad
168 # |`:
168 # |`:
169 # | x A'
169 # | x A'
170 # |'|
170 # |'|
171 # o | A
171 # o | A
172 # |/
172 # |/
173 # o Z
173 # o Z
174 #
174 #
175 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
175 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
176 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
176 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
177 # This flag mean that the successors express the changes between the public and
177 # This flag mean that the successors express the changes between the public and
178 # bumped version and fix the situation, breaking the transitivity of
178 # bumped version and fix the situation, breaking the transitivity of
179 # "bumped" here.
179 # "bumped" here.
180 bumpedfix = 1
180 bumpedfix = 1
181 usingsha256 = 2
181 usingsha256 = 2
182
182
183 ## Parsing and writing of version "0"
183 ## Parsing and writing of version "0"
184 #
184 #
185 # The header is followed by the markers. Each marker is made of:
185 # The header is followed by the markers. Each marker is made of:
186 #
186 #
187 # - 1 uint8 : number of new changesets "N", can be zero.
187 # - 1 uint8 : number of new changesets "N", can be zero.
188 #
188 #
189 # - 1 uint32: metadata size "M" in bytes.
189 # - 1 uint32: metadata size "M" in bytes.
190 #
190 #
191 # - 1 byte: a bit field. It is reserved for flags used in common
191 # - 1 byte: a bit field. It is reserved for flags used in common
192 # obsolete marker operations, to avoid repeated decoding of metadata
192 # obsolete marker operations, to avoid repeated decoding of metadata
193 # entries.
193 # entries.
194 #
194 #
195 # - 20 bytes: obsoleted changeset identifier.
195 # - 20 bytes: obsoleted changeset identifier.
196 #
196 #
197 # - N*20 bytes: new changesets identifiers.
197 # - N*20 bytes: new changesets identifiers.
198 #
198 #
199 # - M bytes: metadata as a sequence of nul-terminated strings. Each
199 # - M bytes: metadata as a sequence of nul-terminated strings. Each
200 # string contains a key and a value, separated by a colon ':', without
200 # string contains a key and a value, separated by a colon ':', without
201 # additional encoding. Keys cannot contain '\0' or ':' and values
201 # additional encoding. Keys cannot contain '\0' or ':' and values
202 # cannot contain '\0'.
202 # cannot contain '\0'.
203 _fm0version = 0
203 _fm0version = 0
204 _fm0fixed = '>BIB20s'
204 _fm0fixed = '>BIB20s'
205 _fm0node = '20s'
205 _fm0node = '20s'
206 _fm0fsize = _calcsize(_fm0fixed)
206 _fm0fsize = _calcsize(_fm0fixed)
207 _fm0fnodesize = _calcsize(_fm0node)
207 _fm0fnodesize = _calcsize(_fm0node)
208
208
209 def _fm0readmarkers(data, off, stop):
209 def _fm0readmarkers(data, off, stop):
210 # Loop on markers
210 # Loop on markers
211 while off < stop:
211 while off < stop:
212 # read fixed part
212 # read fixed part
213 cur = data[off:off + _fm0fsize]
213 cur = data[off:off + _fm0fsize]
214 off += _fm0fsize
214 off += _fm0fsize
215 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
215 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
216 # read replacement
216 # read replacement
217 sucs = ()
217 sucs = ()
218 if numsuc:
218 if numsuc:
219 s = (_fm0fnodesize * numsuc)
219 s = (_fm0fnodesize * numsuc)
220 cur = data[off:off + s]
220 cur = data[off:off + s]
221 sucs = _unpack(_fm0node * numsuc, cur)
221 sucs = _unpack(_fm0node * numsuc, cur)
222 off += s
222 off += s
223 # read metadata
223 # read metadata
224 # (metadata will be decoded on demand)
224 # (metadata will be decoded on demand)
225 metadata = data[off:off + mdsize]
225 metadata = data[off:off + mdsize]
226 if len(metadata) != mdsize:
226 if len(metadata) != mdsize:
227 raise error.Abort(_('parsing obsolete marker: metadata is too '
227 raise error.Abort(_('parsing obsolete marker: metadata is too '
228 'short, %d bytes expected, got %d')
228 'short, %d bytes expected, got %d')
229 % (mdsize, len(metadata)))
229 % (mdsize, len(metadata)))
230 off += mdsize
230 off += mdsize
231 metadata = _fm0decodemeta(metadata)
231 metadata = _fm0decodemeta(metadata)
232 try:
232 try:
233 when, offset = metadata.pop('date', '0 0').split(' ')
233 when, offset = metadata.pop('date', '0 0').split(' ')
234 date = float(when), int(offset)
234 date = float(when), int(offset)
235 except ValueError:
235 except ValueError:
236 date = (0., 0)
236 date = (0., 0)
237 parents = None
237 parents = None
238 if 'p2' in metadata:
238 if 'p2' in metadata:
239 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
239 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
240 elif 'p1' in metadata:
240 elif 'p1' in metadata:
241 parents = (metadata.pop('p1', None),)
241 parents = (metadata.pop('p1', None),)
242 elif 'p0' in metadata:
242 elif 'p0' in metadata:
243 parents = ()
243 parents = ()
244 if parents is not None:
244 if parents is not None:
245 try:
245 try:
246 parents = tuple(node.bin(p) for p in parents)
246 parents = tuple(node.bin(p) for p in parents)
247 # if parent content is not a nodeid, drop the data
247 # if parent content is not a nodeid, drop the data
248 for p in parents:
248 for p in parents:
249 if len(p) != 20:
249 if len(p) != 20:
250 parents = None
250 parents = None
251 break
251 break
252 except TypeError:
252 except TypeError:
253 # if content cannot be translated to nodeid drop the data.
253 # if content cannot be translated to nodeid drop the data.
254 parents = None
254 parents = None
255
255
256 metadata = tuple(sorted(metadata.iteritems()))
256 metadata = tuple(sorted(metadata.iteritems()))
257
257
258 yield (pre, sucs, flags, metadata, date, parents)
258 yield (pre, sucs, flags, metadata, date, parents)
259
259
260 def _fm0encodeonemarker(marker):
260 def _fm0encodeonemarker(marker):
261 pre, sucs, flags, metadata, date, parents = marker
261 pre, sucs, flags, metadata, date, parents = marker
262 if flags & usingsha256:
262 if flags & usingsha256:
263 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
263 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
264 metadata = dict(metadata)
264 metadata = dict(metadata)
265 time, tz = date
265 time, tz = date
266 metadata['date'] = '%r %i' % (time, tz)
266 metadata['date'] = '%r %i' % (time, tz)
267 if parents is not None:
267 if parents is not None:
268 if not parents:
268 if not parents:
269 # mark that we explicitly recorded no parents
269 # mark that we explicitly recorded no parents
270 metadata['p0'] = ''
270 metadata['p0'] = ''
271 for i, p in enumerate(parents, 1):
271 for i, p in enumerate(parents, 1):
272 metadata['p%i' % i] = node.hex(p)
272 metadata['p%i' % i] = node.hex(p)
273 metadata = _fm0encodemeta(metadata)
273 metadata = _fm0encodemeta(metadata)
274 numsuc = len(sucs)
274 numsuc = len(sucs)
275 format = _fm0fixed + (_fm0node * numsuc)
275 format = _fm0fixed + (_fm0node * numsuc)
276 data = [numsuc, len(metadata), flags, pre]
276 data = [numsuc, len(metadata), flags, pre]
277 data.extend(sucs)
277 data.extend(sucs)
278 return _pack(format, *data) + metadata
278 return _pack(format, *data) + metadata
279
279
280 def _fm0encodemeta(meta):
280 def _fm0encodemeta(meta):
281 """Return encoded metadata string to string mapping.
281 """Return encoded metadata string to string mapping.
282
282
283 Assume no ':' in key and no '\0' in both key and value."""
283 Assume no ':' in key and no '\0' in both key and value."""
284 for key, value in meta.iteritems():
284 for key, value in meta.iteritems():
285 if ':' in key or '\0' in key:
285 if ':' in key or '\0' in key:
286 raise ValueError("':' and '\0' are forbidden in metadata key'")
286 raise ValueError("':' and '\0' are forbidden in metadata key'")
287 if '\0' in value:
287 if '\0' in value:
288 raise ValueError("':' is forbidden in metadata value'")
288 raise ValueError("':' is forbidden in metadata value'")
289 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
289 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
290
290
291 def _fm0decodemeta(data):
291 def _fm0decodemeta(data):
292 """Return string to string dictionary from encoded version."""
292 """Return string to string dictionary from encoded version."""
293 d = {}
293 d = {}
294 for l in data.split('\0'):
294 for l in data.split('\0'):
295 if l:
295 if l:
296 key, value = l.split(':')
296 key, value = l.split(':')
297 d[key] = value
297 d[key] = value
298 return d
298 return d
299
299
300 ## Parsing and writing of version "1"
300 ## Parsing and writing of version "1"
301 #
301 #
302 # The header is followed by the markers. Each marker is made of:
302 # The header is followed by the markers. Each marker is made of:
303 #
303 #
304 # - uint32: total size of the marker (including this field)
304 # - uint32: total size of the marker (including this field)
305 #
305 #
306 # - float64: date in seconds since epoch
306 # - float64: date in seconds since epoch
307 #
307 #
308 # - int16: timezone offset in minutes
308 # - int16: timezone offset in minutes
309 #
309 #
310 # - uint16: a bit field. It is reserved for flags used in common
310 # - uint16: a bit field. It is reserved for flags used in common
311 # obsolete marker operations, to avoid repeated decoding of metadata
311 # obsolete marker operations, to avoid repeated decoding of metadata
312 # entries.
312 # entries.
313 #
313 #
314 # - uint8: number of successors "N", can be zero.
314 # - uint8: number of successors "N", can be zero.
315 #
315 #
316 # - uint8: number of parents "P", can be zero.
316 # - uint8: number of parents "P", can be zero.
317 #
317 #
318 # 0: parents data stored but no parent,
318 # 0: parents data stored but no parent,
319 # 1: one parent stored,
319 # 1: one parent stored,
320 # 2: two parents stored,
320 # 2: two parents stored,
321 # 3: no parent data stored
321 # 3: no parent data stored
322 #
322 #
323 # - uint8: number of metadata entries M
323 # - uint8: number of metadata entries M
324 #
324 #
325 # - 20 or 32 bytes: predecessor changeset identifier.
325 # - 20 or 32 bytes: predecessor changeset identifier.
326 #
326 #
327 # - N*(20 or 32) bytes: successors changesets identifiers.
327 # - N*(20 or 32) bytes: successors changesets identifiers.
328 #
328 #
329 # - P*(20 or 32) bytes: parents of the predecessors changesets.
329 # - P*(20 or 32) bytes: parents of the predecessors changesets.
330 #
330 #
331 # - M*(uint8, uint8): size of all metadata entries (key and value)
331 # - M*(uint8, uint8): size of all metadata entries (key and value)
332 #
332 #
333 # - remaining bytes: the metadata, each (key, value) pair after the other.
333 # - remaining bytes: the metadata, each (key, value) pair after the other.
334 _fm1version = 1
334 _fm1version = 1
335 _fm1fixed = '>IdhHBBB20s'
335 _fm1fixed = '>IdhHBBB20s'
336 _fm1nodesha1 = '20s'
336 _fm1nodesha1 = '20s'
337 _fm1nodesha256 = '32s'
337 _fm1nodesha256 = '32s'
338 _fm1nodesha1size = _calcsize(_fm1nodesha1)
338 _fm1nodesha1size = _calcsize(_fm1nodesha1)
339 _fm1nodesha256size = _calcsize(_fm1nodesha256)
339 _fm1nodesha256size = _calcsize(_fm1nodesha256)
340 _fm1fsize = _calcsize(_fm1fixed)
340 _fm1fsize = _calcsize(_fm1fixed)
341 _fm1parentnone = 3
341 _fm1parentnone = 3
342 _fm1parentshift = 14
342 _fm1parentshift = 14
343 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
343 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
344 _fm1metapair = 'BB'
344 _fm1metapair = 'BB'
345 _fm1metapairsize = _calcsize(_fm1metapair)
345 _fm1metapairsize = _calcsize(_fm1metapair)
346
346
347 def _fm1purereadmarkers(data, off, stop):
347 def _fm1purereadmarkers(data, off, stop):
348 # make some global constants local for performance
348 # make some global constants local for performance
349 noneflag = _fm1parentnone
349 noneflag = _fm1parentnone
350 sha2flag = usingsha256
350 sha2flag = usingsha256
351 sha1size = _fm1nodesha1size
351 sha1size = _fm1nodesha1size
352 sha2size = _fm1nodesha256size
352 sha2size = _fm1nodesha256size
353 sha1fmt = _fm1nodesha1
353 sha1fmt = _fm1nodesha1
354 sha2fmt = _fm1nodesha256
354 sha2fmt = _fm1nodesha256
355 metasize = _fm1metapairsize
355 metasize = _fm1metapairsize
356 metafmt = _fm1metapair
356 metafmt = _fm1metapair
357 fsize = _fm1fsize
357 fsize = _fm1fsize
358 unpack = _unpack
358 unpack = _unpack
359
359
360 # Loop on markers
360 # Loop on markers
361 ufixed = struct.Struct(_fm1fixed).unpack
361 ufixed = struct.Struct(_fm1fixed).unpack
362
362
363 while off < stop:
363 while off < stop:
364 # read fixed part
364 # read fixed part
365 o1 = off + fsize
365 o1 = off + fsize
366 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
366 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
367
367
368 if flags & sha2flag:
368 if flags & sha2flag:
369 # FIXME: prec was read as a SHA1, needs to be amended
369 # FIXME: prec was read as a SHA1, needs to be amended
370
370
371 # read 0 or more successors
371 # read 0 or more successors
372 if numsuc == 1:
372 if numsuc == 1:
373 o2 = o1 + sha2size
373 o2 = o1 + sha2size
374 sucs = (data[o1:o2],)
374 sucs = (data[o1:o2],)
375 else:
375 else:
376 o2 = o1 + sha2size * numsuc
376 o2 = o1 + sha2size * numsuc
377 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
377 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
378
378
379 # read parents
379 # read parents
380 if numpar == noneflag:
380 if numpar == noneflag:
381 o3 = o2
381 o3 = o2
382 parents = None
382 parents = None
383 elif numpar == 1:
383 elif numpar == 1:
384 o3 = o2 + sha2size
384 o3 = o2 + sha2size
385 parents = (data[o2:o3],)
385 parents = (data[o2:o3],)
386 else:
386 else:
387 o3 = o2 + sha2size * numpar
387 o3 = o2 + sha2size * numpar
388 parents = unpack(sha2fmt * numpar, data[o2:o3])
388 parents = unpack(sha2fmt * numpar, data[o2:o3])
389 else:
389 else:
390 # read 0 or more successors
390 # read 0 or more successors
391 if numsuc == 1:
391 if numsuc == 1:
392 o2 = o1 + sha1size
392 o2 = o1 + sha1size
393 sucs = (data[o1:o2],)
393 sucs = (data[o1:o2],)
394 else:
394 else:
395 o2 = o1 + sha1size * numsuc
395 o2 = o1 + sha1size * numsuc
396 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
396 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
397
397
398 # read parents
398 # read parents
399 if numpar == noneflag:
399 if numpar == noneflag:
400 o3 = o2
400 o3 = o2
401 parents = None
401 parents = None
402 elif numpar == 1:
402 elif numpar == 1:
403 o3 = o2 + sha1size
403 o3 = o2 + sha1size
404 parents = (data[o2:o3],)
404 parents = (data[o2:o3],)
405 else:
405 else:
406 o3 = o2 + sha1size * numpar
406 o3 = o2 + sha1size * numpar
407 parents = unpack(sha1fmt * numpar, data[o2:o3])
407 parents = unpack(sha1fmt * numpar, data[o2:o3])
408
408
409 # read metadata
409 # read metadata
410 off = o3 + metasize * nummeta
410 off = o3 + metasize * nummeta
411 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
411 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
412 metadata = []
412 metadata = []
413 for idx in xrange(0, len(metapairsize), 2):
413 for idx in xrange(0, len(metapairsize), 2):
414 o1 = off + metapairsize[idx]
414 o1 = off + metapairsize[idx]
415 o2 = o1 + metapairsize[idx + 1]
415 o2 = o1 + metapairsize[idx + 1]
416 metadata.append((data[off:o1], data[o1:o2]))
416 metadata.append((data[off:o1], data[o1:o2]))
417 off = o2
417 off = o2
418
418
419 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
419 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
420
420
421 def _fm1encodeonemarker(marker):
421 def _fm1encodeonemarker(marker):
422 pre, sucs, flags, metadata, date, parents = marker
422 pre, sucs, flags, metadata, date, parents = marker
423 # determine node size
423 # determine node size
424 _fm1node = _fm1nodesha1
424 _fm1node = _fm1nodesha1
425 if flags & usingsha256:
425 if flags & usingsha256:
426 _fm1node = _fm1nodesha256
426 _fm1node = _fm1nodesha256
427 numsuc = len(sucs)
427 numsuc = len(sucs)
428 numextranodes = numsuc
428 numextranodes = numsuc
429 if parents is None:
429 if parents is None:
430 numpar = _fm1parentnone
430 numpar = _fm1parentnone
431 else:
431 else:
432 numpar = len(parents)
432 numpar = len(parents)
433 numextranodes += numpar
433 numextranodes += numpar
434 formatnodes = _fm1node * numextranodes
434 formatnodes = _fm1node * numextranodes
435 formatmeta = _fm1metapair * len(metadata)
435 formatmeta = _fm1metapair * len(metadata)
436 format = _fm1fixed + formatnodes + formatmeta
436 format = _fm1fixed + formatnodes + formatmeta
437 # tz is stored in minutes so we divide by 60
437 # tz is stored in minutes so we divide by 60
438 tz = date[1]//60
438 tz = date[1]//60
439 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
439 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
440 data.extend(sucs)
440 data.extend(sucs)
441 if parents is not None:
441 if parents is not None:
442 data.extend(parents)
442 data.extend(parents)
443 totalsize = _calcsize(format)
443 totalsize = _calcsize(format)
444 for key, value in metadata:
444 for key, value in metadata:
445 lk = len(key)
445 lk = len(key)
446 lv = len(value)
446 lv = len(value)
447 if lk > 255:
447 if lk > 255:
448 msg = ('obsstore metadata key cannot be longer than 255 bytes'
448 msg = ('obsstore metadata key cannot be longer than 255 bytes'
449 ' (key "%s" is %u bytes)') % (key, lk)
449 ' (key "%s" is %u bytes)') % (key, lk)
450 raise error.ProgrammingError(msg)
450 raise error.ProgrammingError(msg)
451 if lv > 255:
451 if lv > 255:
452 msg = ('obsstore metadata value cannot be longer than 255 bytes'
452 msg = ('obsstore metadata value cannot be longer than 255 bytes'
453 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
453 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
454 raise error.ProgrammingError(msg)
454 raise error.ProgrammingError(msg)
455 data.append(lk)
455 data.append(lk)
456 data.append(lv)
456 data.append(lv)
457 totalsize += lk + lv
457 totalsize += lk + lv
458 data[0] = totalsize
458 data[0] = totalsize
459 data = [_pack(format, *data)]
459 data = [_pack(format, *data)]
460 for key, value in metadata:
460 for key, value in metadata:
461 data.append(key)
461 data.append(key)
462 data.append(value)
462 data.append(value)
463 return ''.join(data)
463 return ''.join(data)
464
464
465 def _fm1readmarkers(data, off, stop):
465 def _fm1readmarkers(data, off, stop):
466 native = getattr(parsers, 'fm1readmarkers', None)
466 native = getattr(parsers, 'fm1readmarkers', None)
467 if not native:
467 if not native:
468 return _fm1purereadmarkers(data, off, stop)
468 return _fm1purereadmarkers(data, off, stop)
469 return native(data, off, stop)
469 return native(data, off, stop)
470
470
471 # mapping to read/write various marker formats
471 # mapping to read/write various marker formats
472 # <version> -> (decoder, encoder)
472 # <version> -> (decoder, encoder)
473 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
473 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
474 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
474 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
475
475
476 def _readmarkerversion(data):
476 def _readmarkerversion(data):
477 return _unpack('>B', data[0:1])[0]
477 return _unpack('>B', data[0:1])[0]
478
478
479 @util.nogc
479 @util.nogc
480 def _readmarkers(data, off=None, stop=None):
480 def _readmarkers(data, off=None, stop=None):
481 """Read and enumerate markers from raw data"""
481 """Read and enumerate markers from raw data"""
482 diskversion = _readmarkerversion(data)
482 diskversion = _readmarkerversion(data)
483 if not off:
483 if not off:
484 off = 1 # skip 1 byte version number
484 off = 1 # skip 1 byte version number
485 if stop is None:
485 if stop is None:
486 stop = len(data)
486 stop = len(data)
487 if diskversion not in formats:
487 if diskversion not in formats:
488 msg = _('parsing obsolete marker: unknown version %r') % diskversion
488 msg = _('parsing obsolete marker: unknown version %r') % diskversion
489 raise error.UnknownVersion(msg, version=diskversion)
489 raise error.UnknownVersion(msg, version=diskversion)
490 return diskversion, formats[diskversion][0](data, off, stop)
490 return diskversion, formats[diskversion][0](data, off, stop)
491
491
492 def encodeheader(version=_fm0version):
492 def encodeheader(version=_fm0version):
493 return _pack('>B', version)
493 return _pack('>B', version)
494
494
495 def encodemarkers(markers, addheader=False, version=_fm0version):
495 def encodemarkers(markers, addheader=False, version=_fm0version):
496 # Kept separate from flushmarkers(), it will be reused for
496 # Kept separate from flushmarkers(), it will be reused for
497 # markers exchange.
497 # markers exchange.
498 encodeone = formats[version][1]
498 encodeone = formats[version][1]
499 if addheader:
499 if addheader:
500 yield encodeheader(version)
500 yield encodeheader(version)
501 for marker in markers:
501 for marker in markers:
502 yield encodeone(marker)
502 yield encodeone(marker)
503
503
504 @util.nogc
504 @util.nogc
505 def _addsuccessors(successors, markers):
505 def _addsuccessors(successors, markers):
506 for mark in markers:
506 for mark in markers:
507 successors.setdefault(mark[0], set()).add(mark)
507 successors.setdefault(mark[0], set()).add(mark)
508
508
509 def _addprecursors(*args, **kwargs):
509 def _addprecursors(*args, **kwargs):
510 msg = ("'obsolete._addprecursors' is deprecated, "
510 msg = ("'obsolete._addprecursors' is deprecated, "
511 "use 'obsolete._addpredecessors'")
511 "use 'obsolete._addpredecessors'")
512 util.nouideprecwarn(msg, '4.4')
512 util.nouideprecwarn(msg, '4.4')
513
513
514 return _addpredecessors(*args, **kwargs)
514 return _addpredecessors(*args, **kwargs)
515
515
516 @util.nogc
516 @util.nogc
517 def _addpredecessors(predecessors, markers):
517 def _addpredecessors(predecessors, markers):
518 for mark in markers:
518 for mark in markers:
519 for suc in mark[1]:
519 for suc in mark[1]:
520 predecessors.setdefault(suc, set()).add(mark)
520 predecessors.setdefault(suc, set()).add(mark)
521
521
522 @util.nogc
522 @util.nogc
523 def _addchildren(children, markers):
523 def _addchildren(children, markers):
524 for mark in markers:
524 for mark in markers:
525 parents = mark[5]
525 parents = mark[5]
526 if parents is not None:
526 if parents is not None:
527 for p in parents:
527 for p in parents:
528 children.setdefault(p, set()).add(mark)
528 children.setdefault(p, set()).add(mark)
529
529
530 def _checkinvalidmarkers(markers):
530 def _checkinvalidmarkers(markers):
531 """search for marker with invalid data and raise error if needed
531 """search for marker with invalid data and raise error if needed
532
532
533 Exist as a separated function to allow the evolve extension for a more
533 Exist as a separated function to allow the evolve extension for a more
534 subtle handling.
534 subtle handling.
535 """
535 """
536 for mark in markers:
536 for mark in markers:
537 if node.nullid in mark[1]:
537 if node.nullid in mark[1]:
538 raise error.Abort(_('bad obsolescence marker detected: '
538 raise error.Abort(_('bad obsolescence marker detected: '
539 'invalid successors nullid'))
539 'invalid successors nullid'))
540
540
541 class obsstore(object):
541 class obsstore(object):
542 """Store obsolete markers
542 """Store obsolete markers
543
543
544 Markers can be accessed with two mappings:
544 Markers can be accessed with two mappings:
545 - predecessors[x] -> set(markers on predecessors edges of x)
545 - predecessors[x] -> set(markers on predecessors edges of x)
546 - successors[x] -> set(markers on successors edges of x)
546 - successors[x] -> set(markers on successors edges of x)
547 - children[x] -> set(markers on predecessors edges of children(x)
547 - children[x] -> set(markers on predecessors edges of children(x)
548 """
548 """
549
549
550 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
550 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
551 # prec: nodeid, predecessors changesets
551 # prec: nodeid, predecessors changesets
552 # succs: tuple of nodeid, successor changesets (0-N length)
552 # succs: tuple of nodeid, successor changesets (0-N length)
553 # flag: integer, flag field carrying modifier for the markers (see doc)
553 # flag: integer, flag field carrying modifier for the markers (see doc)
554 # meta: binary blob, encoded metadata dictionary
554 # meta: binary blob, encoded metadata dictionary
555 # date: (float, int) tuple, date of marker creation
555 # date: (float, int) tuple, date of marker creation
556 # parents: (tuple of nodeid) or None, parents of predecessors
556 # parents: (tuple of nodeid) or None, parents of predecessors
557 # None is used when no data has been recorded
557 # None is used when no data has been recorded
558
558
559 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
559 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
560 # caches for various obsolescence related cache
560 # caches for various obsolescence related cache
561 self.caches = {}
561 self.caches = {}
562 self.svfs = svfs
562 self.svfs = svfs
563 self._defaultformat = defaultformat
563 self._defaultformat = defaultformat
564 self._readonly = readonly
564 self._readonly = readonly
565
565
566 def __iter__(self):
566 def __iter__(self):
567 return iter(self._all)
567 return iter(self._all)
568
568
569 def __len__(self):
569 def __len__(self):
570 return len(self._all)
570 return len(self._all)
571
571
572 def __nonzero__(self):
572 def __nonzero__(self):
573 if not self._cached('_all'):
573 if not self._cached('_all'):
574 try:
574 try:
575 return self.svfs.stat('obsstore').st_size > 1
575 return self.svfs.stat('obsstore').st_size > 1
576 except OSError as inst:
576 except OSError as inst:
577 if inst.errno != errno.ENOENT:
577 if inst.errno != errno.ENOENT:
578 raise
578 raise
579 # just build an empty _all list if no obsstore exists, which
579 # just build an empty _all list if no obsstore exists, which
580 # avoids further stat() syscalls
580 # avoids further stat() syscalls
581 return bool(self._all)
581 return bool(self._all)
582
582
583 __bool__ = __nonzero__
583 __bool__ = __nonzero__
584
584
585 @property
585 @property
586 def readonly(self):
586 def readonly(self):
587 """True if marker creation is disabled
587 """True if marker creation is disabled
588
588
589 Remove me in the future when obsolete marker is always on."""
589 Remove me in the future when obsolete marker is always on."""
590 return self._readonly
590 return self._readonly
591
591
592 def create(self, transaction, prec, succs=(), flag=0, parents=None,
592 def create(self, transaction, prec, succs=(), flag=0, parents=None,
593 date=None, metadata=None, ui=None):
593 date=None, metadata=None, ui=None):
594 """obsolete: add a new obsolete marker
594 """obsolete: add a new obsolete marker
595
595
596 * ensuring it is hashable
596 * ensuring it is hashable
597 * check mandatory metadata
597 * check mandatory metadata
598 * encode metadata
598 * encode metadata
599
599
600 If you are a human writing code creating marker you want to use the
600 If you are a human writing code creating marker you want to use the
601 `createmarkers` function in this module instead.
601 `createmarkers` function in this module instead.
602
602
603 return True if a new marker have been added, False if the markers
603 return True if a new marker have been added, False if the markers
604 already existed (no op).
604 already existed (no op).
605 """
605 """
606 if metadata is None:
606 if metadata is None:
607 metadata = {}
607 metadata = {}
608 if date is None:
608 if date is None:
609 if 'date' in metadata:
609 if 'date' in metadata:
610 # as a courtesy for out-of-tree extensions
610 # as a courtesy for out-of-tree extensions
611 date = util.parsedate(metadata.pop('date'))
611 date = util.parsedate(metadata.pop('date'))
612 elif ui is not None:
612 elif ui is not None:
613 date = ui.configdate('devel', 'default-date')
613 date = ui.configdate('devel', 'default-date')
614 if date is None:
614 if date is None:
615 date = util.makedate()
615 date = util.makedate()
616 else:
616 else:
617 date = util.makedate()
617 date = util.makedate()
618 if len(prec) != 20:
618 if len(prec) != 20:
619 raise ValueError(prec)
619 raise ValueError(prec)
620 for succ in succs:
620 for succ in succs:
621 if len(succ) != 20:
621 if len(succ) != 20:
622 raise ValueError(succ)
622 raise ValueError(succ)
623 if prec in succs:
623 if prec in succs:
624 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
624 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
625
625
626 metadata = tuple(sorted(metadata.iteritems()))
626 metadata = tuple(sorted(metadata.iteritems()))
627
627
628 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
628 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
629 return bool(self.add(transaction, [marker]))
629 return bool(self.add(transaction, [marker]))
630
630
631 def add(self, transaction, markers):
631 def add(self, transaction, markers):
632 """Add new markers to the store
632 """Add new markers to the store
633
633
634 Take care of filtering duplicate.
634 Take care of filtering duplicate.
635 Return the number of new marker."""
635 Return the number of new marker."""
636 if self._readonly:
636 if self._readonly:
637 raise error.Abort(_('creating obsolete markers is not enabled on '
637 raise error.Abort(_('creating obsolete markers is not enabled on '
638 'this repo'))
638 'this repo'))
639 known = set()
639 known = set()
640 getsuccessors = self.successors.get
640 getsuccessors = self.successors.get
641 new = []
641 new = []
642 for m in markers:
642 for m in markers:
643 if m not in getsuccessors(m[0], ()) and m not in known:
643 if m not in getsuccessors(m[0], ()) and m not in known:
644 known.add(m)
644 known.add(m)
645 new.append(m)
645 new.append(m)
646 if new:
646 if new:
647 f = self.svfs('obsstore', 'ab')
647 f = self.svfs('obsstore', 'ab')
648 try:
648 try:
649 offset = f.tell()
649 offset = f.tell()
650 transaction.add('obsstore', offset)
650 transaction.add('obsstore', offset)
651 # offset == 0: new file - add the version header
651 # offset == 0: new file - add the version header
652 data = b''.join(encodemarkers(new, offset == 0, self._version))
652 data = b''.join(encodemarkers(new, offset == 0, self._version))
653 f.write(data)
653 f.write(data)
654 finally:
654 finally:
655 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
655 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
656 # call 'filecacheentry.refresh()' here
656 # call 'filecacheentry.refresh()' here
657 f.close()
657 f.close()
658 addedmarkers = transaction.changes.get('obsmarkers')
658 addedmarkers = transaction.changes.get('obsmarkers')
659 if addedmarkers is not None:
659 if addedmarkers is not None:
660 addedmarkers.update(new)
660 addedmarkers.update(new)
661 self._addmarkers(new, data)
661 self._addmarkers(new, data)
662 # new marker *may* have changed several set. invalidate the cache.
662 # new marker *may* have changed several set. invalidate the cache.
663 self.caches.clear()
663 self.caches.clear()
664 # records the number of new markers for the transaction hooks
664 # records the number of new markers for the transaction hooks
665 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
665 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
666 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
666 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
667 return len(new)
667 return len(new)
668
668
669 def mergemarkers(self, transaction, data):
669 def mergemarkers(self, transaction, data):
670 """merge a binary stream of markers inside the obsstore
670 """merge a binary stream of markers inside the obsstore
671
671
672 Returns the number of new markers added."""
672 Returns the number of new markers added."""
673 version, markers = _readmarkers(data)
673 version, markers = _readmarkers(data)
674 return self.add(transaction, markers)
674 return self.add(transaction, markers)
675
675
676 @propertycache
676 @propertycache
677 def _data(self):
677 def _data(self):
678 return self.svfs.tryread('obsstore')
678 return self.svfs.tryread('obsstore')
679
679
680 @propertycache
680 @propertycache
681 def _version(self):
681 def _version(self):
682 if len(self._data) >= 1:
682 if len(self._data) >= 1:
683 return _readmarkerversion(self._data)
683 return _readmarkerversion(self._data)
684 else:
684 else:
685 return self._defaultformat
685 return self._defaultformat
686
686
687 @propertycache
687 @propertycache
688 def _all(self):
688 def _all(self):
689 data = self._data
689 data = self._data
690 if not data:
690 if not data:
691 return []
691 return []
692 self._version, markers = _readmarkers(data)
692 self._version, markers = _readmarkers(data)
693 markers = list(markers)
693 markers = list(markers)
694 _checkinvalidmarkers(markers)
694 _checkinvalidmarkers(markers)
695 return markers
695 return markers
696
696
697 @propertycache
697 @propertycache
698 def successors(self):
698 def successors(self):
699 successors = {}
699 successors = {}
700 _addsuccessors(successors, self._all)
700 _addsuccessors(successors, self._all)
701 return successors
701 return successors
702
702
703 @property
703 @property
704 def precursors(self):
704 def precursors(self):
705 msg = ("'obsstore.precursors' is deprecated, "
705 msg = ("'obsstore.precursors' is deprecated, "
706 "use 'obsstore.predecessors'")
706 "use 'obsstore.predecessors'")
707 util.nouideprecwarn(msg, '4.4')
707 util.nouideprecwarn(msg, '4.4')
708
708
709 return self.predecessors
709 return self.predecessors
710
710
711 @propertycache
711 @propertycache
712 def predecessors(self):
712 def predecessors(self):
713 predecessors = {}
713 predecessors = {}
714 _addpredecessors(predecessors, self._all)
714 _addpredecessors(predecessors, self._all)
715 return predecessors
715 return predecessors
716
716
717 @propertycache
717 @propertycache
718 def children(self):
718 def children(self):
719 children = {}
719 children = {}
720 _addchildren(children, self._all)
720 _addchildren(children, self._all)
721 return children
721 return children
722
722
723 def _cached(self, attr):
723 def _cached(self, attr):
724 return attr in self.__dict__
724 return attr in self.__dict__
725
725
726 def _addmarkers(self, markers, rawdata):
726 def _addmarkers(self, markers, rawdata):
727 markers = list(markers) # to allow repeated iteration
727 markers = list(markers) # to allow repeated iteration
728 self._data = self._data + rawdata
728 self._data = self._data + rawdata
729 self._all.extend(markers)
729 self._all.extend(markers)
730 if self._cached('successors'):
730 if self._cached('successors'):
731 _addsuccessors(self.successors, markers)
731 _addsuccessors(self.successors, markers)
732 if self._cached('predecessors'):
732 if self._cached('predecessors'):
733 _addpredecessors(self.predecessors, markers)
733 _addpredecessors(self.predecessors, markers)
734 if self._cached('children'):
734 if self._cached('children'):
735 _addchildren(self.children, markers)
735 _addchildren(self.children, markers)
736 _checkinvalidmarkers(markers)
736 _checkinvalidmarkers(markers)
737
737
738 def relevantmarkers(self, nodes):
738 def relevantmarkers(self, nodes):
739 """return a set of all obsolescence markers relevant to a set of nodes.
739 """return a set of all obsolescence markers relevant to a set of nodes.
740
740
741 "relevant" to a set of nodes mean:
741 "relevant" to a set of nodes mean:
742
742
743 - marker that use this changeset as successor
743 - marker that use this changeset as successor
744 - prune marker of direct children on this changeset
744 - prune marker of direct children on this changeset
745 - recursive application of the two rules on predecessors of these
745 - recursive application of the two rules on predecessors of these
746 markers
746 markers
747
747
748 It is a set so you cannot rely on order."""
748 It is a set so you cannot rely on order."""
749
749
750 pendingnodes = set(nodes)
750 pendingnodes = set(nodes)
751 seenmarkers = set()
751 seenmarkers = set()
752 seennodes = set(pendingnodes)
752 seennodes = set(pendingnodes)
753 precursorsmarkers = self.predecessors
753 precursorsmarkers = self.predecessors
754 succsmarkers = self.successors
754 succsmarkers = self.successors
755 children = self.children
755 children = self.children
756 while pendingnodes:
756 while pendingnodes:
757 direct = set()
757 direct = set()
758 for current in pendingnodes:
758 for current in pendingnodes:
759 direct.update(precursorsmarkers.get(current, ()))
759 direct.update(precursorsmarkers.get(current, ()))
760 pruned = [m for m in children.get(current, ()) if not m[1]]
760 pruned = [m for m in children.get(current, ()) if not m[1]]
761 direct.update(pruned)
761 direct.update(pruned)
762 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
762 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
763 direct.update(pruned)
763 direct.update(pruned)
764 direct -= seenmarkers
764 direct -= seenmarkers
765 pendingnodes = set([m[0] for m in direct])
765 pendingnodes = set([m[0] for m in direct])
766 seenmarkers |= direct
766 seenmarkers |= direct
767 pendingnodes -= seennodes
767 pendingnodes -= seennodes
768 seennodes |= pendingnodes
768 seennodes |= pendingnodes
769 return seenmarkers
769 return seenmarkers
770
770
771 def makestore(ui, repo):
771 def makestore(ui, repo):
772 """Create an obsstore instance from a repo."""
772 """Create an obsstore instance from a repo."""
773 # read default format for new obsstore.
773 # read default format for new obsstore.
774 # developer config: format.obsstore-version
774 # developer config: format.obsstore-version
775 defaultformat = ui.configint('format', 'obsstore-version')
775 defaultformat = ui.configint('format', 'obsstore-version')
776 # rely on obsstore class default when possible.
776 # rely on obsstore class default when possible.
777 kwargs = {}
777 kwargs = {}
778 if defaultformat is not None:
778 if defaultformat is not None:
779 kwargs['defaultformat'] = defaultformat
779 kwargs['defaultformat'] = defaultformat
780 readonly = not isenabled(repo, createmarkersopt)
780 readonly = not isenabled(repo, createmarkersopt)
781 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
781 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
782 if store and readonly:
782 if store and readonly:
783 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
783 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
784 % len(list(store)))
784 % len(list(store)))
785 return store
785 return store
786
786
787 def commonversion(versions):
787 def commonversion(versions):
788 """Return the newest version listed in both versions and our local formats.
788 """Return the newest version listed in both versions and our local formats.
789
789
790 Returns None if no common version exists.
790 Returns None if no common version exists.
791 """
791 """
792 versions.sort(reverse=True)
792 versions.sort(reverse=True)
793 # search for highest version known on both side
793 # search for highest version known on both side
794 for v in versions:
794 for v in versions:
795 if v in formats:
795 if v in formats:
796 return v
796 return v
797 return None
797 return None
798
798
799 # arbitrary picked to fit into 8K limit from HTTP server
799 # arbitrary picked to fit into 8K limit from HTTP server
800 # you have to take in account:
800 # you have to take in account:
801 # - the version header
801 # - the version header
802 # - the base85 encoding
802 # - the base85 encoding
803 _maxpayload = 5300
803 _maxpayload = 5300
804
804
805 def _pushkeyescape(markers):
805 def _pushkeyescape(markers):
806 """encode markers into a dict suitable for pushkey exchange
806 """encode markers into a dict suitable for pushkey exchange
807
807
808 - binary data is base85 encoded
808 - binary data is base85 encoded
809 - split in chunks smaller than 5300 bytes"""
809 - split in chunks smaller than 5300 bytes"""
810 keys = {}
810 keys = {}
811 parts = []
811 parts = []
812 currentlen = _maxpayload * 2 # ensure we create a new part
812 currentlen = _maxpayload * 2 # ensure we create a new part
813 for marker in markers:
813 for marker in markers:
814 nextdata = _fm0encodeonemarker(marker)
814 nextdata = _fm0encodeonemarker(marker)
815 if (len(nextdata) + currentlen > _maxpayload):
815 if (len(nextdata) + currentlen > _maxpayload):
816 currentpart = []
816 currentpart = []
817 currentlen = 0
817 currentlen = 0
818 parts.append(currentpart)
818 parts.append(currentpart)
819 currentpart.append(nextdata)
819 currentpart.append(nextdata)
820 currentlen += len(nextdata)
820 currentlen += len(nextdata)
821 for idx, part in enumerate(reversed(parts)):
821 for idx, part in enumerate(reversed(parts)):
822 data = ''.join([_pack('>B', _fm0version)] + part)
822 data = ''.join([_pack('>B', _fm0version)] + part)
823 keys['dump%i' % idx] = util.b85encode(data)
823 keys['dump%i' % idx] = util.b85encode(data)
824 return keys
824 return keys
825
825
826 def listmarkers(repo):
826 def listmarkers(repo):
827 """List markers over pushkey"""
827 """List markers over pushkey"""
828 if not repo.obsstore:
828 if not repo.obsstore:
829 return {}
829 return {}
830 return _pushkeyescape(sorted(repo.obsstore))
830 return _pushkeyescape(sorted(repo.obsstore))
831
831
832 def pushmarker(repo, key, old, new):
832 def pushmarker(repo, key, old, new):
833 """Push markers over pushkey"""
833 """Push markers over pushkey"""
834 if not key.startswith('dump'):
834 if not key.startswith('dump'):
835 repo.ui.warn(_('unknown key: %r') % key)
835 repo.ui.warn(_('unknown key: %r') % key)
836 return False
836 return False
837 if old:
837 if old:
838 repo.ui.warn(_('unexpected old value for %r') % key)
838 repo.ui.warn(_('unexpected old value for %r') % key)
839 return False
839 return False
840 data = util.b85decode(new)
840 data = util.b85decode(new)
841 lock = repo.lock()
841 lock = repo.lock()
842 try:
842 try:
843 tr = repo.transaction('pushkey: obsolete markers')
843 tr = repo.transaction('pushkey: obsolete markers')
844 try:
844 try:
845 repo.obsstore.mergemarkers(tr, data)
845 repo.obsstore.mergemarkers(tr, data)
846 repo.invalidatevolatilesets()
846 repo.invalidatevolatilesets()
847 tr.close()
847 tr.close()
848 return True
848 return True
849 finally:
849 finally:
850 tr.release()
850 tr.release()
851 finally:
851 finally:
852 lock.release()
852 lock.release()
853
853
854 # keep compatibility for the 4.3 cycle
854 # keep compatibility for the 4.3 cycle
855 def allprecursors(obsstore, nodes, ignoreflags=0):
855 def allprecursors(obsstore, nodes, ignoreflags=0):
856 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
856 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
857 util.nouideprecwarn(movemsg, '4.3')
857 util.nouideprecwarn(movemsg, '4.3')
858 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
858 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
859
859
860 def allsuccessors(obsstore, nodes, ignoreflags=0):
860 def allsuccessors(obsstore, nodes, ignoreflags=0):
861 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
861 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
862 util.nouideprecwarn(movemsg, '4.3')
862 util.nouideprecwarn(movemsg, '4.3')
863 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
863 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
864
864
865 def marker(repo, data):
865 def marker(repo, data):
866 movemsg = 'obsolete.marker moved to obsutil.marker'
866 movemsg = 'obsolete.marker moved to obsutil.marker'
867 repo.ui.deprecwarn(movemsg, '4.3')
867 repo.ui.deprecwarn(movemsg, '4.3')
868 return obsutil.marker(repo, data)
868 return obsutil.marker(repo, data)
869
869
870 def getmarkers(repo, nodes=None, exclusive=False):
870 def getmarkers(repo, nodes=None, exclusive=False):
871 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
871 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
872 repo.ui.deprecwarn(movemsg, '4.3')
872 repo.ui.deprecwarn(movemsg, '4.3')
873 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
873 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
874
874
875 def exclusivemarkers(repo, nodes):
875 def exclusivemarkers(repo, nodes):
876 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
876 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
877 repo.ui.deprecwarn(movemsg, '4.3')
877 repo.ui.deprecwarn(movemsg, '4.3')
878 return obsutil.exclusivemarkers(repo, nodes)
878 return obsutil.exclusivemarkers(repo, nodes)
879
879
880 def foreground(repo, nodes):
880 def foreground(repo, nodes):
881 movemsg = 'obsolete.foreground moved to obsutil.foreground'
881 movemsg = 'obsolete.foreground moved to obsutil.foreground'
882 repo.ui.deprecwarn(movemsg, '4.3')
882 repo.ui.deprecwarn(movemsg, '4.3')
883 return obsutil.foreground(repo, nodes)
883 return obsutil.foreground(repo, nodes)
884
884
885 def successorssets(repo, initialnode, cache=None):
885 def successorssets(repo, initialnode, cache=None):
886 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
886 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
887 repo.ui.deprecwarn(movemsg, '4.3')
887 repo.ui.deprecwarn(movemsg, '4.3')
888 return obsutil.successorssets(repo, initialnode, cache=cache)
888 return obsutil.successorssets(repo, initialnode, cache=cache)
889
889
890 # mapping of 'set-name' -> <function to compute this set>
890 # mapping of 'set-name' -> <function to compute this set>
891 cachefuncs = {}
891 cachefuncs = {}
892 def cachefor(name):
892 def cachefor(name):
893 """Decorator to register a function as computing the cache for a set"""
893 """Decorator to register a function as computing the cache for a set"""
894 def decorator(func):
894 def decorator(func):
895 if name in cachefuncs:
895 if name in cachefuncs:
896 msg = "duplicated registration for volatileset '%s' (existing: %r)"
896 msg = "duplicated registration for volatileset '%s' (existing: %r)"
897 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
897 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
898 cachefuncs[name] = func
898 cachefuncs[name] = func
899 return func
899 return func
900 return decorator
900 return decorator
901
901
902 def getrevs(repo, name):
902 def getrevs(repo, name):
903 """Return the set of revision that belong to the <name> set
903 """Return the set of revision that belong to the <name> set
904
904
905 Such access may compute the set and cache it for future use"""
905 Such access may compute the set and cache it for future use"""
906 repo = repo.unfiltered()
906 repo = repo.unfiltered()
907 if not repo.obsstore:
907 if not repo.obsstore:
908 return frozenset()
908 return frozenset()
909 if name not in repo.obsstore.caches:
909 if name not in repo.obsstore.caches:
910 repo.obsstore.caches[name] = cachefuncs[name](repo)
910 repo.obsstore.caches[name] = cachefuncs[name](repo)
911 return repo.obsstore.caches[name]
911 return repo.obsstore.caches[name]
912
912
913 # To be simple we need to invalidate obsolescence cache when:
913 # To be simple we need to invalidate obsolescence cache when:
914 #
914 #
915 # - new changeset is added:
915 # - new changeset is added:
916 # - public phase is changed
916 # - public phase is changed
917 # - obsolescence marker are added
917 # - obsolescence marker are added
918 # - strip is used a repo
918 # - strip is used a repo
919 def clearobscaches(repo):
919 def clearobscaches(repo):
920 """Remove all obsolescence related cache from a repo
920 """Remove all obsolescence related cache from a repo
921
921
922 This remove all cache in obsstore is the obsstore already exist on the
922 This remove all cache in obsstore is the obsstore already exist on the
923 repo.
923 repo.
924
924
925 (We could be smarter here given the exact event that trigger the cache
925 (We could be smarter here given the exact event that trigger the cache
926 clearing)"""
926 clearing)"""
927 # only clear cache is there is obsstore data in this repo
927 # only clear cache is there is obsstore data in this repo
928 if 'obsstore' in repo._filecache:
928 if 'obsstore' in repo._filecache:
929 repo.obsstore.caches.clear()
929 repo.obsstore.caches.clear()
930
930
931 def _mutablerevs(repo):
931 def _mutablerevs(repo):
932 """the set of mutable revision in the repository"""
932 """the set of mutable revision in the repository"""
933 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
933 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
934
934
935 @cachefor('obsolete')
935 @cachefor('obsolete')
936 def _computeobsoleteset(repo):
936 def _computeobsoleteset(repo):
937 """the set of obsolete revisions"""
937 """the set of obsolete revisions"""
938 getnode = repo.changelog.node
938 getnode = repo.changelog.node
939 notpublic = _mutablerevs(repo)
939 notpublic = _mutablerevs(repo)
940 isobs = repo.obsstore.successors.__contains__
940 isobs = repo.obsstore.successors.__contains__
941 obs = set(r for r in notpublic if isobs(getnode(r)))
941 obs = set(r for r in notpublic if isobs(getnode(r)))
942 return obs
942 return obs
943
943
944 @cachefor('unstable')
944 @cachefor('unstable')
945 def _computeunstableset(repo):
945 def _computeunstableset(repo):
946 msg = ("'unstable' volatile set is deprecated, "
946 msg = ("'unstable' volatile set is deprecated, "
947 "use 'orphan'")
947 "use 'orphan'")
948 repo.ui.deprecwarn(msg, '4.4')
948 repo.ui.deprecwarn(msg, '4.4')
949
949
950 return _computeorphanset(repo)
950 return _computeorphanset(repo)
951
951
952 @cachefor('orphan')
952 @cachefor('orphan')
953 def _computeorphanset(repo):
953 def _computeorphanset(repo):
954 """the set of non obsolete revisions with obsolete parents"""
954 """the set of non obsolete revisions with obsolete parents"""
955 pfunc = repo.changelog.parentrevs
955 pfunc = repo.changelog.parentrevs
956 mutable = _mutablerevs(repo)
956 mutable = _mutablerevs(repo)
957 obsolete = getrevs(repo, 'obsolete')
957 obsolete = getrevs(repo, 'obsolete')
958 others = mutable - obsolete
958 others = mutable - obsolete
959 unstable = set()
959 unstable = set()
960 for r in sorted(others):
960 for r in sorted(others):
961 # A rev is unstable if one of its parent is obsolete or unstable
961 # A rev is unstable if one of its parent is obsolete or unstable
962 # this works since we traverse following growing rev order
962 # this works since we traverse following growing rev order
963 for p in pfunc(r):
963 for p in pfunc(r):
964 if p in obsolete or p in unstable:
964 if p in obsolete or p in unstable:
965 unstable.add(r)
965 unstable.add(r)
966 break
966 break
967 return unstable
967 return unstable
968
968
969 @cachefor('suspended')
969 @cachefor('suspended')
970 def _computesuspendedset(repo):
970 def _computesuspendedset(repo):
971 """the set of obsolete parents with non obsolete descendants"""
971 """the set of obsolete parents with non obsolete descendants"""
972 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
972 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
973 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
973 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
974
974
975 @cachefor('extinct')
975 @cachefor('extinct')
976 def _computeextinctset(repo):
976 def _computeextinctset(repo):
977 """the set of obsolete parents without non obsolete descendants"""
977 """the set of obsolete parents without non obsolete descendants"""
978 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
978 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
979
979
980 @cachefor('bumped')
980 @cachefor('bumped')
981 def _computebumpedset(repo):
981 def _computebumpedset(repo):
982 msg = ("'bumped' volatile set is deprecated, "
982 msg = ("'bumped' volatile set is deprecated, "
983 "use 'phasedivergent'")
983 "use 'phasedivergent'")
984 repo.ui.deprecwarn(msg, '4.4')
984 repo.ui.deprecwarn(msg, '4.4')
985
985
986 return _computephasedivergentset(repo)
986 return _computephasedivergentset(repo)
987
987
988 @cachefor('phasedivergent')
988 @cachefor('phasedivergent')
989 def _computephasedivergentset(repo):
989 def _computephasedivergentset(repo):
990 """the set of revs trying to obsolete public revisions"""
990 """the set of revs trying to obsolete public revisions"""
991 bumped = set()
991 bumped = set()
992 # util function (avoid attribute lookup in the loop)
992 # util function (avoid attribute lookup in the loop)
993 phase = repo._phasecache.phase # would be faster to grab the full list
993 phase = repo._phasecache.phase # would be faster to grab the full list
994 public = phases.public
994 public = phases.public
995 cl = repo.changelog
995 cl = repo.changelog
996 torev = cl.nodemap.get
996 torev = cl.nodemap.get
997 for ctx in repo.set('(not public()) and (not obsolete())'):
997 tonode = cl.node
998 rev = ctx.rev()
998 for rev in repo.revs('(not public()) and (not obsolete())'):
999 # We only evaluate mutable, non-obsolete revision
999 # We only evaluate mutable, non-obsolete revision
1000 node = ctx.node()
1000 node = tonode(rev)
1001 # (future) A cache of predecessors may worth if split is very common
1001 # (future) A cache of predecessors may worth if split is very common
1002 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
1002 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
1003 ignoreflags=bumpedfix):
1003 ignoreflags=bumpedfix):
1004 prev = torev(pnode) # unfiltered! but so is phasecache
1004 prev = torev(pnode) # unfiltered! but so is phasecache
1005 if (prev is not None) and (phase(repo, prev) <= public):
1005 if (prev is not None) and (phase(repo, prev) <= public):
1006 # we have a public predecessor
1006 # we have a public predecessor
1007 bumped.add(rev)
1007 bumped.add(rev)
1008 break # Next draft!
1008 break # Next draft!
1009 return bumped
1009 return bumped
1010
1010
1011 @cachefor('divergent')
1011 @cachefor('divergent')
1012 def _computedivergentset(repo):
1012 def _computedivergentset(repo):
1013 msg = ("'divergent' volatile set is deprecated, "
1013 msg = ("'divergent' volatile set is deprecated, "
1014 "use 'contentdivergent'")
1014 "use 'contentdivergent'")
1015 repo.ui.deprecwarn(msg, '4.4')
1015 repo.ui.deprecwarn(msg, '4.4')
1016
1016
1017 return _computecontentdivergentset(repo)
1017 return _computecontentdivergentset(repo)
1018
1018
1019 @cachefor('contentdivergent')
1019 @cachefor('contentdivergent')
1020 def _computecontentdivergentset(repo):
1020 def _computecontentdivergentset(repo):
1021 """the set of rev that compete to be the final successors of some revision.
1021 """the set of rev that compete to be the final successors of some revision.
1022 """
1022 """
1023 divergent = set()
1023 divergent = set()
1024 obsstore = repo.obsstore
1024 obsstore = repo.obsstore
1025 newermap = {}
1025 newermap = {}
1026 for ctx in repo.set('(not public()) - obsolete()'):
1026 for ctx in repo.set('(not public()) - obsolete()'):
1027 mark = obsstore.predecessors.get(ctx.node(), ())
1027 mark = obsstore.predecessors.get(ctx.node(), ())
1028 toprocess = set(mark)
1028 toprocess = set(mark)
1029 seen = set()
1029 seen = set()
1030 while toprocess:
1030 while toprocess:
1031 prec = toprocess.pop()[0]
1031 prec = toprocess.pop()[0]
1032 if prec in seen:
1032 if prec in seen:
1033 continue # emergency cycle hanging prevention
1033 continue # emergency cycle hanging prevention
1034 seen.add(prec)
1034 seen.add(prec)
1035 if prec not in newermap:
1035 if prec not in newermap:
1036 obsutil.successorssets(repo, prec, cache=newermap)
1036 obsutil.successorssets(repo, prec, cache=newermap)
1037 newer = [n for n in newermap[prec] if n]
1037 newer = [n for n in newermap[prec] if n]
1038 if len(newer) > 1:
1038 if len(newer) > 1:
1039 divergent.add(ctx.rev())
1039 divergent.add(ctx.rev())
1040 break
1040 break
1041 toprocess.update(obsstore.predecessors.get(prec, ()))
1041 toprocess.update(obsstore.predecessors.get(prec, ()))
1042 return divergent
1042 return divergent
1043
1043
1044
1044
1045 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1045 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1046 operation=None):
1046 operation=None):
1047 """Add obsolete markers between changesets in a repo
1047 """Add obsolete markers between changesets in a repo
1048
1048
1049 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1049 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1050 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1050 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1051 containing metadata for this marker only. It is merged with the global
1051 containing metadata for this marker only. It is merged with the global
1052 metadata specified through the `metadata` argument of this function,
1052 metadata specified through the `metadata` argument of this function,
1053
1053
1054 Trying to obsolete a public changeset will raise an exception.
1054 Trying to obsolete a public changeset will raise an exception.
1055
1055
1056 Current user and date are used except if specified otherwise in the
1056 Current user and date are used except if specified otherwise in the
1057 metadata attribute.
1057 metadata attribute.
1058
1058
1059 This function operates within a transaction of its own, but does
1059 This function operates within a transaction of its own, but does
1060 not take any lock on the repo.
1060 not take any lock on the repo.
1061 """
1061 """
1062 # prepare metadata
1062 # prepare metadata
1063 if metadata is None:
1063 if metadata is None:
1064 metadata = {}
1064 metadata = {}
1065 if 'user' not in metadata:
1065 if 'user' not in metadata:
1066 develuser = repo.ui.config('devel', 'user.obsmarker')
1066 develuser = repo.ui.config('devel', 'user.obsmarker')
1067 if develuser:
1067 if develuser:
1068 metadata['user'] = develuser
1068 metadata['user'] = develuser
1069 else:
1069 else:
1070 metadata['user'] = repo.ui.username()
1070 metadata['user'] = repo.ui.username()
1071
1071
1072 # Operation metadata handling
1072 # Operation metadata handling
1073 useoperation = repo.ui.configbool('experimental',
1073 useoperation = repo.ui.configbool('experimental',
1074 'evolution.track-operation')
1074 'evolution.track-operation')
1075 if useoperation and operation:
1075 if useoperation and operation:
1076 metadata['operation'] = operation
1076 metadata['operation'] = operation
1077
1077
1078 # Effect flag metadata handling
1078 # Effect flag metadata handling
1079 saveeffectflag = repo.ui.configbool('experimental',
1079 saveeffectflag = repo.ui.configbool('experimental',
1080 'evolution.effect-flags')
1080 'evolution.effect-flags')
1081
1081
1082 tr = repo.transaction('add-obsolescence-marker')
1082 tr = repo.transaction('add-obsolescence-marker')
1083 try:
1083 try:
1084 markerargs = []
1084 markerargs = []
1085 for rel in relations:
1085 for rel in relations:
1086 prec = rel[0]
1086 prec = rel[0]
1087 sucs = rel[1]
1087 sucs = rel[1]
1088 localmetadata = metadata.copy()
1088 localmetadata = metadata.copy()
1089 if 2 < len(rel):
1089 if 2 < len(rel):
1090 localmetadata.update(rel[2])
1090 localmetadata.update(rel[2])
1091
1091
1092 if not prec.mutable():
1092 if not prec.mutable():
1093 raise error.Abort(_("cannot obsolete public changeset: %s")
1093 raise error.Abort(_("cannot obsolete public changeset: %s")
1094 % prec,
1094 % prec,
1095 hint="see 'hg help phases' for details")
1095 hint="see 'hg help phases' for details")
1096 nprec = prec.node()
1096 nprec = prec.node()
1097 nsucs = tuple(s.node() for s in sucs)
1097 nsucs = tuple(s.node() for s in sucs)
1098 npare = None
1098 npare = None
1099 if not nsucs:
1099 if not nsucs:
1100 npare = tuple(p.node() for p in prec.parents())
1100 npare = tuple(p.node() for p in prec.parents())
1101 if nprec in nsucs:
1101 if nprec in nsucs:
1102 raise error.Abort(_("changeset %s cannot obsolete itself")
1102 raise error.Abort(_("changeset %s cannot obsolete itself")
1103 % prec)
1103 % prec)
1104
1104
1105 # Effect flag can be different by relation
1105 # Effect flag can be different by relation
1106 if saveeffectflag:
1106 if saveeffectflag:
1107 # The effect flag is saved in a versioned field name for future
1107 # The effect flag is saved in a versioned field name for future
1108 # evolution
1108 # evolution
1109 effectflag = obsutil.geteffectflag(rel)
1109 effectflag = obsutil.geteffectflag(rel)
1110 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1110 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1111
1111
1112 # Creating the marker causes the hidden cache to become invalid,
1112 # Creating the marker causes the hidden cache to become invalid,
1113 # which causes recomputation when we ask for prec.parents() above.
1113 # which causes recomputation when we ask for prec.parents() above.
1114 # Resulting in n^2 behavior. So let's prepare all of the args
1114 # Resulting in n^2 behavior. So let's prepare all of the args
1115 # first, then create the markers.
1115 # first, then create the markers.
1116 markerargs.append((nprec, nsucs, npare, localmetadata))
1116 markerargs.append((nprec, nsucs, npare, localmetadata))
1117
1117
1118 for args in markerargs:
1118 for args in markerargs:
1119 nprec, nsucs, npare, localmetadata = args
1119 nprec, nsucs, npare, localmetadata = args
1120 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1120 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1121 date=date, metadata=localmetadata,
1121 date=date, metadata=localmetadata,
1122 ui=repo.ui)
1122 ui=repo.ui)
1123 repo.filteredrevcache.clear()
1123 repo.filteredrevcache.clear()
1124 tr.close()
1124 tr.close()
1125 finally:
1125 finally:
1126 tr.release()
1126 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now