##// END OF EJS Templates
obsolete: use native string when peeking in __dict__...
Augie Fackler -
r35855:d8f891ec default
parent child Browse files
Show More
@@ -1,1116 +1,1116 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 obsutil,
79 obsutil,
80 phases,
80 phases,
81 policy,
81 policy,
82 util,
82 util,
83 )
83 )
84
84
85 parsers = policy.importmod(r'parsers')
85 parsers = policy.importmod(r'parsers')
86
86
87 _pack = struct.pack
87 _pack = struct.pack
88 _unpack = struct.unpack
88 _unpack = struct.unpack
89 _calcsize = struct.calcsize
89 _calcsize = struct.calcsize
90 propertycache = util.propertycache
90 propertycache = util.propertycache
91
91
92 # the obsolete feature is not mature enough to be enabled by default.
92 # the obsolete feature is not mature enough to be enabled by default.
93 # you have to rely on third party extension extension to enable this.
93 # you have to rely on third party extension extension to enable this.
94 _enabled = False
94 _enabled = False
95
95
96 # Options for obsolescence
96 # Options for obsolescence
97 createmarkersopt = 'createmarkers'
97 createmarkersopt = 'createmarkers'
98 allowunstableopt = 'allowunstable'
98 allowunstableopt = 'allowunstable'
99 exchangeopt = 'exchange'
99 exchangeopt = 'exchange'
100
100
101 def _getoptionvalue(repo, option):
101 def _getoptionvalue(repo, option):
102 """Returns True if the given repository has the given obsolete option
102 """Returns True if the given repository has the given obsolete option
103 enabled.
103 enabled.
104 """
104 """
105 configkey = 'evolution.%s' % option
105 configkey = 'evolution.%s' % option
106 newconfig = repo.ui.configbool('experimental', configkey)
106 newconfig = repo.ui.configbool('experimental', configkey)
107
107
108 # Return the value only if defined
108 # Return the value only if defined
109 if newconfig is not None:
109 if newconfig is not None:
110 return newconfig
110 return newconfig
111
111
112 # Fallback on generic option
112 # Fallback on generic option
113 try:
113 try:
114 return repo.ui.configbool('experimental', 'evolution')
114 return repo.ui.configbool('experimental', 'evolution')
115 except (error.ConfigError, AttributeError):
115 except (error.ConfigError, AttributeError):
116 # Fallback on old-fashion config
116 # Fallback on old-fashion config
117 # inconsistent config: experimental.evolution
117 # inconsistent config: experimental.evolution
118 result = set(repo.ui.configlist('experimental', 'evolution'))
118 result = set(repo.ui.configlist('experimental', 'evolution'))
119
119
120 if 'all' in result:
120 if 'all' in result:
121 return True
121 return True
122
122
123 # For migration purposes, temporarily return true if the config hasn't
123 # For migration purposes, temporarily return true if the config hasn't
124 # been set but _enabled is true.
124 # been set but _enabled is true.
125 if len(result) == 0 and _enabled:
125 if len(result) == 0 and _enabled:
126 return True
126 return True
127
127
128 # Temporary hack for next check
128 # Temporary hack for next check
129 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
129 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
130 if newconfig:
130 if newconfig:
131 result.add('createmarkers')
131 result.add('createmarkers')
132
132
133 return option in result
133 return option in result
134
134
135 def isenabled(repo, option):
135 def isenabled(repo, option):
136 """Returns True if the given repository has the given obsolete option
136 """Returns True if the given repository has the given obsolete option
137 enabled.
137 enabled.
138 """
138 """
139 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
139 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
140 unstabluevalue = _getoptionvalue(repo, allowunstableopt)
140 unstabluevalue = _getoptionvalue(repo, allowunstableopt)
141 exchangevalue = _getoptionvalue(repo, exchangeopt)
141 exchangevalue = _getoptionvalue(repo, exchangeopt)
142
142
143 # createmarkers must be enabled if other options are enabled
143 # createmarkers must be enabled if other options are enabled
144 if ((unstabluevalue or exchangevalue) and not createmarkersvalue):
144 if ((unstabluevalue or exchangevalue) and not createmarkersvalue):
145 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
145 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
146 "if other obsolete options are enabled"))
146 "if other obsolete options are enabled"))
147
147
148 return _getoptionvalue(repo, option)
148 return _getoptionvalue(repo, option)
149
149
150 ### obsolescence marker flag
150 ### obsolescence marker flag
151
151
152 ## bumpedfix flag
152 ## bumpedfix flag
153 #
153 #
154 # When a changeset A' succeed to a changeset A which became public, we call A'
154 # When a changeset A' succeed to a changeset A which became public, we call A'
155 # "bumped" because it's a successors of a public changesets
155 # "bumped" because it's a successors of a public changesets
156 #
156 #
157 # o A' (bumped)
157 # o A' (bumped)
158 # |`:
158 # |`:
159 # | o A
159 # | o A
160 # |/
160 # |/
161 # o Z
161 # o Z
162 #
162 #
163 # The way to solve this situation is to create a new changeset Ad as children
163 # The way to solve this situation is to create a new changeset Ad as children
164 # of A. This changeset have the same content than A'. So the diff from A to A'
164 # of A. This changeset have the same content than A'. So the diff from A to A'
165 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
165 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
166 #
166 #
167 # o Ad
167 # o Ad
168 # |`:
168 # |`:
169 # | x A'
169 # | x A'
170 # |'|
170 # |'|
171 # o | A
171 # o | A
172 # |/
172 # |/
173 # o Z
173 # o Z
174 #
174 #
175 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
175 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
176 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
176 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
177 # This flag mean that the successors express the changes between the public and
177 # This flag mean that the successors express the changes between the public and
178 # bumped version and fix the situation, breaking the transitivity of
178 # bumped version and fix the situation, breaking the transitivity of
179 # "bumped" here.
179 # "bumped" here.
180 bumpedfix = 1
180 bumpedfix = 1
181 usingsha256 = 2
181 usingsha256 = 2
182
182
183 ## Parsing and writing of version "0"
183 ## Parsing and writing of version "0"
184 #
184 #
185 # The header is followed by the markers. Each marker is made of:
185 # The header is followed by the markers. Each marker is made of:
186 #
186 #
187 # - 1 uint8 : number of new changesets "N", can be zero.
187 # - 1 uint8 : number of new changesets "N", can be zero.
188 #
188 #
189 # - 1 uint32: metadata size "M" in bytes.
189 # - 1 uint32: metadata size "M" in bytes.
190 #
190 #
191 # - 1 byte: a bit field. It is reserved for flags used in common
191 # - 1 byte: a bit field. It is reserved for flags used in common
192 # obsolete marker operations, to avoid repeated decoding of metadata
192 # obsolete marker operations, to avoid repeated decoding of metadata
193 # entries.
193 # entries.
194 #
194 #
195 # - 20 bytes: obsoleted changeset identifier.
195 # - 20 bytes: obsoleted changeset identifier.
196 #
196 #
197 # - N*20 bytes: new changesets identifiers.
197 # - N*20 bytes: new changesets identifiers.
198 #
198 #
199 # - M bytes: metadata as a sequence of nul-terminated strings. Each
199 # - M bytes: metadata as a sequence of nul-terminated strings. Each
200 # string contains a key and a value, separated by a colon ':', without
200 # string contains a key and a value, separated by a colon ':', without
201 # additional encoding. Keys cannot contain '\0' or ':' and values
201 # additional encoding. Keys cannot contain '\0' or ':' and values
202 # cannot contain '\0'.
202 # cannot contain '\0'.
203 _fm0version = 0
203 _fm0version = 0
204 _fm0fixed = '>BIB20s'
204 _fm0fixed = '>BIB20s'
205 _fm0node = '20s'
205 _fm0node = '20s'
206 _fm0fsize = _calcsize(_fm0fixed)
206 _fm0fsize = _calcsize(_fm0fixed)
207 _fm0fnodesize = _calcsize(_fm0node)
207 _fm0fnodesize = _calcsize(_fm0node)
208
208
209 def _fm0readmarkers(data, off, stop):
209 def _fm0readmarkers(data, off, stop):
210 # Loop on markers
210 # Loop on markers
211 while off < stop:
211 while off < stop:
212 # read fixed part
212 # read fixed part
213 cur = data[off:off + _fm0fsize]
213 cur = data[off:off + _fm0fsize]
214 off += _fm0fsize
214 off += _fm0fsize
215 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
215 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
216 # read replacement
216 # read replacement
217 sucs = ()
217 sucs = ()
218 if numsuc:
218 if numsuc:
219 s = (_fm0fnodesize * numsuc)
219 s = (_fm0fnodesize * numsuc)
220 cur = data[off:off + s]
220 cur = data[off:off + s]
221 sucs = _unpack(_fm0node * numsuc, cur)
221 sucs = _unpack(_fm0node * numsuc, cur)
222 off += s
222 off += s
223 # read metadata
223 # read metadata
224 # (metadata will be decoded on demand)
224 # (metadata will be decoded on demand)
225 metadata = data[off:off + mdsize]
225 metadata = data[off:off + mdsize]
226 if len(metadata) != mdsize:
226 if len(metadata) != mdsize:
227 raise error.Abort(_('parsing obsolete marker: metadata is too '
227 raise error.Abort(_('parsing obsolete marker: metadata is too '
228 'short, %d bytes expected, got %d')
228 'short, %d bytes expected, got %d')
229 % (mdsize, len(metadata)))
229 % (mdsize, len(metadata)))
230 off += mdsize
230 off += mdsize
231 metadata = _fm0decodemeta(metadata)
231 metadata = _fm0decodemeta(metadata)
232 try:
232 try:
233 when, offset = metadata.pop('date', '0 0').split(' ')
233 when, offset = metadata.pop('date', '0 0').split(' ')
234 date = float(when), int(offset)
234 date = float(when), int(offset)
235 except ValueError:
235 except ValueError:
236 date = (0., 0)
236 date = (0., 0)
237 parents = None
237 parents = None
238 if 'p2' in metadata:
238 if 'p2' in metadata:
239 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
239 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
240 elif 'p1' in metadata:
240 elif 'p1' in metadata:
241 parents = (metadata.pop('p1', None),)
241 parents = (metadata.pop('p1', None),)
242 elif 'p0' in metadata:
242 elif 'p0' in metadata:
243 parents = ()
243 parents = ()
244 if parents is not None:
244 if parents is not None:
245 try:
245 try:
246 parents = tuple(node.bin(p) for p in parents)
246 parents = tuple(node.bin(p) for p in parents)
247 # if parent content is not a nodeid, drop the data
247 # if parent content is not a nodeid, drop the data
248 for p in parents:
248 for p in parents:
249 if len(p) != 20:
249 if len(p) != 20:
250 parents = None
250 parents = None
251 break
251 break
252 except TypeError:
252 except TypeError:
253 # if content cannot be translated to nodeid drop the data.
253 # if content cannot be translated to nodeid drop the data.
254 parents = None
254 parents = None
255
255
256 metadata = tuple(sorted(metadata.iteritems()))
256 metadata = tuple(sorted(metadata.iteritems()))
257
257
258 yield (pre, sucs, flags, metadata, date, parents)
258 yield (pre, sucs, flags, metadata, date, parents)
259
259
260 def _fm0encodeonemarker(marker):
260 def _fm0encodeonemarker(marker):
261 pre, sucs, flags, metadata, date, parents = marker
261 pre, sucs, flags, metadata, date, parents = marker
262 if flags & usingsha256:
262 if flags & usingsha256:
263 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
263 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
264 metadata = dict(metadata)
264 metadata = dict(metadata)
265 time, tz = date
265 time, tz = date
266 metadata['date'] = '%r %i' % (time, tz)
266 metadata['date'] = '%r %i' % (time, tz)
267 if parents is not None:
267 if parents is not None:
268 if not parents:
268 if not parents:
269 # mark that we explicitly recorded no parents
269 # mark that we explicitly recorded no parents
270 metadata['p0'] = ''
270 metadata['p0'] = ''
271 for i, p in enumerate(parents, 1):
271 for i, p in enumerate(parents, 1):
272 metadata['p%i' % i] = node.hex(p)
272 metadata['p%i' % i] = node.hex(p)
273 metadata = _fm0encodemeta(metadata)
273 metadata = _fm0encodemeta(metadata)
274 numsuc = len(sucs)
274 numsuc = len(sucs)
275 format = _fm0fixed + (_fm0node * numsuc)
275 format = _fm0fixed + (_fm0node * numsuc)
276 data = [numsuc, len(metadata), flags, pre]
276 data = [numsuc, len(metadata), flags, pre]
277 data.extend(sucs)
277 data.extend(sucs)
278 return _pack(format, *data) + metadata
278 return _pack(format, *data) + metadata
279
279
280 def _fm0encodemeta(meta):
280 def _fm0encodemeta(meta):
281 """Return encoded metadata string to string mapping.
281 """Return encoded metadata string to string mapping.
282
282
283 Assume no ':' in key and no '\0' in both key and value."""
283 Assume no ':' in key and no '\0' in both key and value."""
284 for key, value in meta.iteritems():
284 for key, value in meta.iteritems():
285 if ':' in key or '\0' in key:
285 if ':' in key or '\0' in key:
286 raise ValueError("':' and '\0' are forbidden in metadata key'")
286 raise ValueError("':' and '\0' are forbidden in metadata key'")
287 if '\0' in value:
287 if '\0' in value:
288 raise ValueError("':' is forbidden in metadata value'")
288 raise ValueError("':' is forbidden in metadata value'")
289 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
289 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
290
290
291 def _fm0decodemeta(data):
291 def _fm0decodemeta(data):
292 """Return string to string dictionary from encoded version."""
292 """Return string to string dictionary from encoded version."""
293 d = {}
293 d = {}
294 for l in data.split('\0'):
294 for l in data.split('\0'):
295 if l:
295 if l:
296 key, value = l.split(':')
296 key, value = l.split(':')
297 d[key] = value
297 d[key] = value
298 return d
298 return d
299
299
300 ## Parsing and writing of version "1"
300 ## Parsing and writing of version "1"
301 #
301 #
302 # The header is followed by the markers. Each marker is made of:
302 # The header is followed by the markers. Each marker is made of:
303 #
303 #
304 # - uint32: total size of the marker (including this field)
304 # - uint32: total size of the marker (including this field)
305 #
305 #
306 # - float64: date in seconds since epoch
306 # - float64: date in seconds since epoch
307 #
307 #
308 # - int16: timezone offset in minutes
308 # - int16: timezone offset in minutes
309 #
309 #
310 # - uint16: a bit field. It is reserved for flags used in common
310 # - uint16: a bit field. It is reserved for flags used in common
311 # obsolete marker operations, to avoid repeated decoding of metadata
311 # obsolete marker operations, to avoid repeated decoding of metadata
312 # entries.
312 # entries.
313 #
313 #
314 # - uint8: number of successors "N", can be zero.
314 # - uint8: number of successors "N", can be zero.
315 #
315 #
316 # - uint8: number of parents "P", can be zero.
316 # - uint8: number of parents "P", can be zero.
317 #
317 #
318 # 0: parents data stored but no parent,
318 # 0: parents data stored but no parent,
319 # 1: one parent stored,
319 # 1: one parent stored,
320 # 2: two parents stored,
320 # 2: two parents stored,
321 # 3: no parent data stored
321 # 3: no parent data stored
322 #
322 #
323 # - uint8: number of metadata entries M
323 # - uint8: number of metadata entries M
324 #
324 #
325 # - 20 or 32 bytes: predecessor changeset identifier.
325 # - 20 or 32 bytes: predecessor changeset identifier.
326 #
326 #
327 # - N*(20 or 32) bytes: successors changesets identifiers.
327 # - N*(20 or 32) bytes: successors changesets identifiers.
328 #
328 #
329 # - P*(20 or 32) bytes: parents of the predecessors changesets.
329 # - P*(20 or 32) bytes: parents of the predecessors changesets.
330 #
330 #
331 # - M*(uint8, uint8): size of all metadata entries (key and value)
331 # - M*(uint8, uint8): size of all metadata entries (key and value)
332 #
332 #
333 # - remaining bytes: the metadata, each (key, value) pair after the other.
333 # - remaining bytes: the metadata, each (key, value) pair after the other.
334 _fm1version = 1
334 _fm1version = 1
335 _fm1fixed = '>IdhHBBB20s'
335 _fm1fixed = '>IdhHBBB20s'
336 _fm1nodesha1 = '20s'
336 _fm1nodesha1 = '20s'
337 _fm1nodesha256 = '32s'
337 _fm1nodesha256 = '32s'
338 _fm1nodesha1size = _calcsize(_fm1nodesha1)
338 _fm1nodesha1size = _calcsize(_fm1nodesha1)
339 _fm1nodesha256size = _calcsize(_fm1nodesha256)
339 _fm1nodesha256size = _calcsize(_fm1nodesha256)
340 _fm1fsize = _calcsize(_fm1fixed)
340 _fm1fsize = _calcsize(_fm1fixed)
341 _fm1parentnone = 3
341 _fm1parentnone = 3
342 _fm1parentshift = 14
342 _fm1parentshift = 14
343 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
343 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
344 _fm1metapair = 'BB'
344 _fm1metapair = 'BB'
345 _fm1metapairsize = _calcsize(_fm1metapair)
345 _fm1metapairsize = _calcsize(_fm1metapair)
346
346
347 def _fm1purereadmarkers(data, off, stop):
347 def _fm1purereadmarkers(data, off, stop):
348 # make some global constants local for performance
348 # make some global constants local for performance
349 noneflag = _fm1parentnone
349 noneflag = _fm1parentnone
350 sha2flag = usingsha256
350 sha2flag = usingsha256
351 sha1size = _fm1nodesha1size
351 sha1size = _fm1nodesha1size
352 sha2size = _fm1nodesha256size
352 sha2size = _fm1nodesha256size
353 sha1fmt = _fm1nodesha1
353 sha1fmt = _fm1nodesha1
354 sha2fmt = _fm1nodesha256
354 sha2fmt = _fm1nodesha256
355 metasize = _fm1metapairsize
355 metasize = _fm1metapairsize
356 metafmt = _fm1metapair
356 metafmt = _fm1metapair
357 fsize = _fm1fsize
357 fsize = _fm1fsize
358 unpack = _unpack
358 unpack = _unpack
359
359
360 # Loop on markers
360 # Loop on markers
361 ufixed = struct.Struct(_fm1fixed).unpack
361 ufixed = struct.Struct(_fm1fixed).unpack
362
362
363 while off < stop:
363 while off < stop:
364 # read fixed part
364 # read fixed part
365 o1 = off + fsize
365 o1 = off + fsize
366 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
366 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
367
367
368 if flags & sha2flag:
368 if flags & sha2flag:
369 # FIXME: prec was read as a SHA1, needs to be amended
369 # FIXME: prec was read as a SHA1, needs to be amended
370
370
371 # read 0 or more successors
371 # read 0 or more successors
372 if numsuc == 1:
372 if numsuc == 1:
373 o2 = o1 + sha2size
373 o2 = o1 + sha2size
374 sucs = (data[o1:o2],)
374 sucs = (data[o1:o2],)
375 else:
375 else:
376 o2 = o1 + sha2size * numsuc
376 o2 = o1 + sha2size * numsuc
377 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
377 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
378
378
379 # read parents
379 # read parents
380 if numpar == noneflag:
380 if numpar == noneflag:
381 o3 = o2
381 o3 = o2
382 parents = None
382 parents = None
383 elif numpar == 1:
383 elif numpar == 1:
384 o3 = o2 + sha2size
384 o3 = o2 + sha2size
385 parents = (data[o2:o3],)
385 parents = (data[o2:o3],)
386 else:
386 else:
387 o3 = o2 + sha2size * numpar
387 o3 = o2 + sha2size * numpar
388 parents = unpack(sha2fmt * numpar, data[o2:o3])
388 parents = unpack(sha2fmt * numpar, data[o2:o3])
389 else:
389 else:
390 # read 0 or more successors
390 # read 0 or more successors
391 if numsuc == 1:
391 if numsuc == 1:
392 o2 = o1 + sha1size
392 o2 = o1 + sha1size
393 sucs = (data[o1:o2],)
393 sucs = (data[o1:o2],)
394 else:
394 else:
395 o2 = o1 + sha1size * numsuc
395 o2 = o1 + sha1size * numsuc
396 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
396 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
397
397
398 # read parents
398 # read parents
399 if numpar == noneflag:
399 if numpar == noneflag:
400 o3 = o2
400 o3 = o2
401 parents = None
401 parents = None
402 elif numpar == 1:
402 elif numpar == 1:
403 o3 = o2 + sha1size
403 o3 = o2 + sha1size
404 parents = (data[o2:o3],)
404 parents = (data[o2:o3],)
405 else:
405 else:
406 o3 = o2 + sha1size * numpar
406 o3 = o2 + sha1size * numpar
407 parents = unpack(sha1fmt * numpar, data[o2:o3])
407 parents = unpack(sha1fmt * numpar, data[o2:o3])
408
408
409 # read metadata
409 # read metadata
410 off = o3 + metasize * nummeta
410 off = o3 + metasize * nummeta
411 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
411 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
412 metadata = []
412 metadata = []
413 for idx in xrange(0, len(metapairsize), 2):
413 for idx in xrange(0, len(metapairsize), 2):
414 o1 = off + metapairsize[idx]
414 o1 = off + metapairsize[idx]
415 o2 = o1 + metapairsize[idx + 1]
415 o2 = o1 + metapairsize[idx + 1]
416 metadata.append((data[off:o1], data[o1:o2]))
416 metadata.append((data[off:o1], data[o1:o2]))
417 off = o2
417 off = o2
418
418
419 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
419 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
420
420
421 def _fm1encodeonemarker(marker):
421 def _fm1encodeonemarker(marker):
422 pre, sucs, flags, metadata, date, parents = marker
422 pre, sucs, flags, metadata, date, parents = marker
423 # determine node size
423 # determine node size
424 _fm1node = _fm1nodesha1
424 _fm1node = _fm1nodesha1
425 if flags & usingsha256:
425 if flags & usingsha256:
426 _fm1node = _fm1nodesha256
426 _fm1node = _fm1nodesha256
427 numsuc = len(sucs)
427 numsuc = len(sucs)
428 numextranodes = numsuc
428 numextranodes = numsuc
429 if parents is None:
429 if parents is None:
430 numpar = _fm1parentnone
430 numpar = _fm1parentnone
431 else:
431 else:
432 numpar = len(parents)
432 numpar = len(parents)
433 numextranodes += numpar
433 numextranodes += numpar
434 formatnodes = _fm1node * numextranodes
434 formatnodes = _fm1node * numextranodes
435 formatmeta = _fm1metapair * len(metadata)
435 formatmeta = _fm1metapair * len(metadata)
436 format = _fm1fixed + formatnodes + formatmeta
436 format = _fm1fixed + formatnodes + formatmeta
437 # tz is stored in minutes so we divide by 60
437 # tz is stored in minutes so we divide by 60
438 tz = date[1]//60
438 tz = date[1]//60
439 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
439 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
440 data.extend(sucs)
440 data.extend(sucs)
441 if parents is not None:
441 if parents is not None:
442 data.extend(parents)
442 data.extend(parents)
443 totalsize = _calcsize(format)
443 totalsize = _calcsize(format)
444 for key, value in metadata:
444 for key, value in metadata:
445 lk = len(key)
445 lk = len(key)
446 lv = len(value)
446 lv = len(value)
447 if lk > 255:
447 if lk > 255:
448 msg = ('obsstore metadata key cannot be longer than 255 bytes'
448 msg = ('obsstore metadata key cannot be longer than 255 bytes'
449 ' (key "%s" is %u bytes)') % (key, lk)
449 ' (key "%s" is %u bytes)') % (key, lk)
450 raise error.ProgrammingError(msg)
450 raise error.ProgrammingError(msg)
451 if lv > 255:
451 if lv > 255:
452 msg = ('obsstore metadata value cannot be longer than 255 bytes'
452 msg = ('obsstore metadata value cannot be longer than 255 bytes'
453 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
453 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
454 raise error.ProgrammingError(msg)
454 raise error.ProgrammingError(msg)
455 data.append(lk)
455 data.append(lk)
456 data.append(lv)
456 data.append(lv)
457 totalsize += lk + lv
457 totalsize += lk + lv
458 data[0] = totalsize
458 data[0] = totalsize
459 data = [_pack(format, *data)]
459 data = [_pack(format, *data)]
460 for key, value in metadata:
460 for key, value in metadata:
461 data.append(key)
461 data.append(key)
462 data.append(value)
462 data.append(value)
463 return ''.join(data)
463 return ''.join(data)
464
464
465 def _fm1readmarkers(data, off, stop):
465 def _fm1readmarkers(data, off, stop):
466 native = getattr(parsers, 'fm1readmarkers', None)
466 native = getattr(parsers, 'fm1readmarkers', None)
467 if not native:
467 if not native:
468 return _fm1purereadmarkers(data, off, stop)
468 return _fm1purereadmarkers(data, off, stop)
469 return native(data, off, stop)
469 return native(data, off, stop)
470
470
471 # mapping to read/write various marker formats
471 # mapping to read/write various marker formats
472 # <version> -> (decoder, encoder)
472 # <version> -> (decoder, encoder)
473 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
473 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
474 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
474 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
475
475
476 def _readmarkerversion(data):
476 def _readmarkerversion(data):
477 return _unpack('>B', data[0:1])[0]
477 return _unpack('>B', data[0:1])[0]
478
478
479 @util.nogc
479 @util.nogc
480 def _readmarkers(data, off=None, stop=None):
480 def _readmarkers(data, off=None, stop=None):
481 """Read and enumerate markers from raw data"""
481 """Read and enumerate markers from raw data"""
482 diskversion = _readmarkerversion(data)
482 diskversion = _readmarkerversion(data)
483 if not off:
483 if not off:
484 off = 1 # skip 1 byte version number
484 off = 1 # skip 1 byte version number
485 if stop is None:
485 if stop is None:
486 stop = len(data)
486 stop = len(data)
487 if diskversion not in formats:
487 if diskversion not in formats:
488 msg = _('parsing obsolete marker: unknown version %r') % diskversion
488 msg = _('parsing obsolete marker: unknown version %r') % diskversion
489 raise error.UnknownVersion(msg, version=diskversion)
489 raise error.UnknownVersion(msg, version=diskversion)
490 return diskversion, formats[diskversion][0](data, off, stop)
490 return diskversion, formats[diskversion][0](data, off, stop)
491
491
492 def encodeheader(version=_fm0version):
492 def encodeheader(version=_fm0version):
493 return _pack('>B', version)
493 return _pack('>B', version)
494
494
495 def encodemarkers(markers, addheader=False, version=_fm0version):
495 def encodemarkers(markers, addheader=False, version=_fm0version):
496 # Kept separate from flushmarkers(), it will be reused for
496 # Kept separate from flushmarkers(), it will be reused for
497 # markers exchange.
497 # markers exchange.
498 encodeone = formats[version][1]
498 encodeone = formats[version][1]
499 if addheader:
499 if addheader:
500 yield encodeheader(version)
500 yield encodeheader(version)
501 for marker in markers:
501 for marker in markers:
502 yield encodeone(marker)
502 yield encodeone(marker)
503
503
504 @util.nogc
504 @util.nogc
505 def _addsuccessors(successors, markers):
505 def _addsuccessors(successors, markers):
506 for mark in markers:
506 for mark in markers:
507 successors.setdefault(mark[0], set()).add(mark)
507 successors.setdefault(mark[0], set()).add(mark)
508
508
509 def _addprecursors(*args, **kwargs):
509 def _addprecursors(*args, **kwargs):
510 msg = ("'obsolete._addprecursors' is deprecated, "
510 msg = ("'obsolete._addprecursors' is deprecated, "
511 "use 'obsolete._addpredecessors'")
511 "use 'obsolete._addpredecessors'")
512 util.nouideprecwarn(msg, '4.4')
512 util.nouideprecwarn(msg, '4.4')
513
513
514 return _addpredecessors(*args, **kwargs)
514 return _addpredecessors(*args, **kwargs)
515
515
516 @util.nogc
516 @util.nogc
517 def _addpredecessors(predecessors, markers):
517 def _addpredecessors(predecessors, markers):
518 for mark in markers:
518 for mark in markers:
519 for suc in mark[1]:
519 for suc in mark[1]:
520 predecessors.setdefault(suc, set()).add(mark)
520 predecessors.setdefault(suc, set()).add(mark)
521
521
522 @util.nogc
522 @util.nogc
523 def _addchildren(children, markers):
523 def _addchildren(children, markers):
524 for mark in markers:
524 for mark in markers:
525 parents = mark[5]
525 parents = mark[5]
526 if parents is not None:
526 if parents is not None:
527 for p in parents:
527 for p in parents:
528 children.setdefault(p, set()).add(mark)
528 children.setdefault(p, set()).add(mark)
529
529
530 def _checkinvalidmarkers(markers):
530 def _checkinvalidmarkers(markers):
531 """search for marker with invalid data and raise error if needed
531 """search for marker with invalid data and raise error if needed
532
532
533 Exist as a separated function to allow the evolve extension for a more
533 Exist as a separated function to allow the evolve extension for a more
534 subtle handling.
534 subtle handling.
535 """
535 """
536 for mark in markers:
536 for mark in markers:
537 if node.nullid in mark[1]:
537 if node.nullid in mark[1]:
538 raise error.Abort(_('bad obsolescence marker detected: '
538 raise error.Abort(_('bad obsolescence marker detected: '
539 'invalid successors nullid'))
539 'invalid successors nullid'))
540
540
541 class obsstore(object):
541 class obsstore(object):
542 """Store obsolete markers
542 """Store obsolete markers
543
543
544 Markers can be accessed with two mappings:
544 Markers can be accessed with two mappings:
545 - predecessors[x] -> set(markers on predecessors edges of x)
545 - predecessors[x] -> set(markers on predecessors edges of x)
546 - successors[x] -> set(markers on successors edges of x)
546 - successors[x] -> set(markers on successors edges of x)
547 - children[x] -> set(markers on predecessors edges of children(x)
547 - children[x] -> set(markers on predecessors edges of children(x)
548 """
548 """
549
549
550 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
550 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
551 # prec: nodeid, predecessors changesets
551 # prec: nodeid, predecessors changesets
552 # succs: tuple of nodeid, successor changesets (0-N length)
552 # succs: tuple of nodeid, successor changesets (0-N length)
553 # flag: integer, flag field carrying modifier for the markers (see doc)
553 # flag: integer, flag field carrying modifier for the markers (see doc)
554 # meta: binary blob, encoded metadata dictionary
554 # meta: binary blob, encoded metadata dictionary
555 # date: (float, int) tuple, date of marker creation
555 # date: (float, int) tuple, date of marker creation
556 # parents: (tuple of nodeid) or None, parents of predecessors
556 # parents: (tuple of nodeid) or None, parents of predecessors
557 # None is used when no data has been recorded
557 # None is used when no data has been recorded
558
558
559 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
559 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
560 # caches for various obsolescence related cache
560 # caches for various obsolescence related cache
561 self.caches = {}
561 self.caches = {}
562 self.svfs = svfs
562 self.svfs = svfs
563 self._defaultformat = defaultformat
563 self._defaultformat = defaultformat
564 self._readonly = readonly
564 self._readonly = readonly
565
565
566 def __iter__(self):
566 def __iter__(self):
567 return iter(self._all)
567 return iter(self._all)
568
568
569 def __len__(self):
569 def __len__(self):
570 return len(self._all)
570 return len(self._all)
571
571
572 def __nonzero__(self):
572 def __nonzero__(self):
573 if not self._cached('_all'):
573 if not self._cached(r'_all'):
574 try:
574 try:
575 return self.svfs.stat('obsstore').st_size > 1
575 return self.svfs.stat('obsstore').st_size > 1
576 except OSError as inst:
576 except OSError as inst:
577 if inst.errno != errno.ENOENT:
577 if inst.errno != errno.ENOENT:
578 raise
578 raise
579 # just build an empty _all list if no obsstore exists, which
579 # just build an empty _all list if no obsstore exists, which
580 # avoids further stat() syscalls
580 # avoids further stat() syscalls
581 return bool(self._all)
581 return bool(self._all)
582
582
583 __bool__ = __nonzero__
583 __bool__ = __nonzero__
584
584
585 @property
585 @property
586 def readonly(self):
586 def readonly(self):
587 """True if marker creation is disabled
587 """True if marker creation is disabled
588
588
589 Remove me in the future when obsolete marker is always on."""
589 Remove me in the future when obsolete marker is always on."""
590 return self._readonly
590 return self._readonly
591
591
592 def create(self, transaction, prec, succs=(), flag=0, parents=None,
592 def create(self, transaction, prec, succs=(), flag=0, parents=None,
593 date=None, metadata=None, ui=None):
593 date=None, metadata=None, ui=None):
594 """obsolete: add a new obsolete marker
594 """obsolete: add a new obsolete marker
595
595
596 * ensuring it is hashable
596 * ensuring it is hashable
597 * check mandatory metadata
597 * check mandatory metadata
598 * encode metadata
598 * encode metadata
599
599
600 If you are a human writing code creating marker you want to use the
600 If you are a human writing code creating marker you want to use the
601 `createmarkers` function in this module instead.
601 `createmarkers` function in this module instead.
602
602
603 return True if a new marker have been added, False if the markers
603 return True if a new marker have been added, False if the markers
604 already existed (no op).
604 already existed (no op).
605 """
605 """
606 if metadata is None:
606 if metadata is None:
607 metadata = {}
607 metadata = {}
608 if date is None:
608 if date is None:
609 if 'date' in metadata:
609 if 'date' in metadata:
610 # as a courtesy for out-of-tree extensions
610 # as a courtesy for out-of-tree extensions
611 date = util.parsedate(metadata.pop('date'))
611 date = util.parsedate(metadata.pop('date'))
612 elif ui is not None:
612 elif ui is not None:
613 date = ui.configdate('devel', 'default-date')
613 date = ui.configdate('devel', 'default-date')
614 if date is None:
614 if date is None:
615 date = util.makedate()
615 date = util.makedate()
616 else:
616 else:
617 date = util.makedate()
617 date = util.makedate()
618 if len(prec) != 20:
618 if len(prec) != 20:
619 raise ValueError(prec)
619 raise ValueError(prec)
620 for succ in succs:
620 for succ in succs:
621 if len(succ) != 20:
621 if len(succ) != 20:
622 raise ValueError(succ)
622 raise ValueError(succ)
623 if prec in succs:
623 if prec in succs:
624 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
624 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
625
625
626 metadata = tuple(sorted(metadata.iteritems()))
626 metadata = tuple(sorted(metadata.iteritems()))
627
627
628 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
628 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
629 return bool(self.add(transaction, [marker]))
629 return bool(self.add(transaction, [marker]))
630
630
631 def add(self, transaction, markers):
631 def add(self, transaction, markers):
632 """Add new markers to the store
632 """Add new markers to the store
633
633
634 Take care of filtering duplicate.
634 Take care of filtering duplicate.
635 Return the number of new marker."""
635 Return the number of new marker."""
636 if self._readonly:
636 if self._readonly:
637 raise error.Abort(_('creating obsolete markers is not enabled on '
637 raise error.Abort(_('creating obsolete markers is not enabled on '
638 'this repo'))
638 'this repo'))
639 known = set()
639 known = set()
640 getsuccessors = self.successors.get
640 getsuccessors = self.successors.get
641 new = []
641 new = []
642 for m in markers:
642 for m in markers:
643 if m not in getsuccessors(m[0], ()) and m not in known:
643 if m not in getsuccessors(m[0], ()) and m not in known:
644 known.add(m)
644 known.add(m)
645 new.append(m)
645 new.append(m)
646 if new:
646 if new:
647 f = self.svfs('obsstore', 'ab')
647 f = self.svfs('obsstore', 'ab')
648 try:
648 try:
649 offset = f.tell()
649 offset = f.tell()
650 transaction.add('obsstore', offset)
650 transaction.add('obsstore', offset)
651 # offset == 0: new file - add the version header
651 # offset == 0: new file - add the version header
652 data = b''.join(encodemarkers(new, offset == 0, self._version))
652 data = b''.join(encodemarkers(new, offset == 0, self._version))
653 f.write(data)
653 f.write(data)
654 finally:
654 finally:
655 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
655 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
656 # call 'filecacheentry.refresh()' here
656 # call 'filecacheentry.refresh()' here
657 f.close()
657 f.close()
658 addedmarkers = transaction.changes.get('obsmarkers')
658 addedmarkers = transaction.changes.get('obsmarkers')
659 if addedmarkers is not None:
659 if addedmarkers is not None:
660 addedmarkers.update(new)
660 addedmarkers.update(new)
661 self._addmarkers(new, data)
661 self._addmarkers(new, data)
662 # new marker *may* have changed several set. invalidate the cache.
662 # new marker *may* have changed several set. invalidate the cache.
663 self.caches.clear()
663 self.caches.clear()
664 # records the number of new markers for the transaction hooks
664 # records the number of new markers for the transaction hooks
665 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
665 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
666 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
666 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
667 return len(new)
667 return len(new)
668
668
669 def mergemarkers(self, transaction, data):
669 def mergemarkers(self, transaction, data):
670 """merge a binary stream of markers inside the obsstore
670 """merge a binary stream of markers inside the obsstore
671
671
672 Returns the number of new markers added."""
672 Returns the number of new markers added."""
673 version, markers = _readmarkers(data)
673 version, markers = _readmarkers(data)
674 return self.add(transaction, markers)
674 return self.add(transaction, markers)
675
675
676 @propertycache
676 @propertycache
677 def _data(self):
677 def _data(self):
678 return self.svfs.tryread('obsstore')
678 return self.svfs.tryread('obsstore')
679
679
680 @propertycache
680 @propertycache
681 def _version(self):
681 def _version(self):
682 if len(self._data) >= 1:
682 if len(self._data) >= 1:
683 return _readmarkerversion(self._data)
683 return _readmarkerversion(self._data)
684 else:
684 else:
685 return self._defaultformat
685 return self._defaultformat
686
686
687 @propertycache
687 @propertycache
688 def _all(self):
688 def _all(self):
689 data = self._data
689 data = self._data
690 if not data:
690 if not data:
691 return []
691 return []
692 self._version, markers = _readmarkers(data)
692 self._version, markers = _readmarkers(data)
693 markers = list(markers)
693 markers = list(markers)
694 _checkinvalidmarkers(markers)
694 _checkinvalidmarkers(markers)
695 return markers
695 return markers
696
696
697 @propertycache
697 @propertycache
698 def successors(self):
698 def successors(self):
699 successors = {}
699 successors = {}
700 _addsuccessors(successors, self._all)
700 _addsuccessors(successors, self._all)
701 return successors
701 return successors
702
702
703 @property
703 @property
704 def precursors(self):
704 def precursors(self):
705 msg = ("'obsstore.precursors' is deprecated, "
705 msg = ("'obsstore.precursors' is deprecated, "
706 "use 'obsstore.predecessors'")
706 "use 'obsstore.predecessors'")
707 util.nouideprecwarn(msg, '4.4')
707 util.nouideprecwarn(msg, '4.4')
708
708
709 return self.predecessors
709 return self.predecessors
710
710
711 @propertycache
711 @propertycache
712 def predecessors(self):
712 def predecessors(self):
713 predecessors = {}
713 predecessors = {}
714 _addpredecessors(predecessors, self._all)
714 _addpredecessors(predecessors, self._all)
715 return predecessors
715 return predecessors
716
716
717 @propertycache
717 @propertycache
718 def children(self):
718 def children(self):
719 children = {}
719 children = {}
720 _addchildren(children, self._all)
720 _addchildren(children, self._all)
721 return children
721 return children
722
722
723 def _cached(self, attr):
723 def _cached(self, attr):
724 return attr in self.__dict__
724 return attr in self.__dict__
725
725
726 def _addmarkers(self, markers, rawdata):
726 def _addmarkers(self, markers, rawdata):
727 markers = list(markers) # to allow repeated iteration
727 markers = list(markers) # to allow repeated iteration
728 self._data = self._data + rawdata
728 self._data = self._data + rawdata
729 self._all.extend(markers)
729 self._all.extend(markers)
730 if self._cached('successors'):
730 if self._cached(r'successors'):
731 _addsuccessors(self.successors, markers)
731 _addsuccessors(self.successors, markers)
732 if self._cached('predecessors'):
732 if self._cached(r'predecessors'):
733 _addpredecessors(self.predecessors, markers)
733 _addpredecessors(self.predecessors, markers)
734 if self._cached('children'):
734 if self._cached(r'children'):
735 _addchildren(self.children, markers)
735 _addchildren(self.children, markers)
736 _checkinvalidmarkers(markers)
736 _checkinvalidmarkers(markers)
737
737
738 def relevantmarkers(self, nodes):
738 def relevantmarkers(self, nodes):
739 """return a set of all obsolescence markers relevant to a set of nodes.
739 """return a set of all obsolescence markers relevant to a set of nodes.
740
740
741 "relevant" to a set of nodes mean:
741 "relevant" to a set of nodes mean:
742
742
743 - marker that use this changeset as successor
743 - marker that use this changeset as successor
744 - prune marker of direct children on this changeset
744 - prune marker of direct children on this changeset
745 - recursive application of the two rules on predecessors of these
745 - recursive application of the two rules on predecessors of these
746 markers
746 markers
747
747
748 It is a set so you cannot rely on order."""
748 It is a set so you cannot rely on order."""
749
749
750 pendingnodes = set(nodes)
750 pendingnodes = set(nodes)
751 seenmarkers = set()
751 seenmarkers = set()
752 seennodes = set(pendingnodes)
752 seennodes = set(pendingnodes)
753 precursorsmarkers = self.predecessors
753 precursorsmarkers = self.predecessors
754 succsmarkers = self.successors
754 succsmarkers = self.successors
755 children = self.children
755 children = self.children
756 while pendingnodes:
756 while pendingnodes:
757 direct = set()
757 direct = set()
758 for current in pendingnodes:
758 for current in pendingnodes:
759 direct.update(precursorsmarkers.get(current, ()))
759 direct.update(precursorsmarkers.get(current, ()))
760 pruned = [m for m in children.get(current, ()) if not m[1]]
760 pruned = [m for m in children.get(current, ()) if not m[1]]
761 direct.update(pruned)
761 direct.update(pruned)
762 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
762 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
763 direct.update(pruned)
763 direct.update(pruned)
764 direct -= seenmarkers
764 direct -= seenmarkers
765 pendingnodes = set([m[0] for m in direct])
765 pendingnodes = set([m[0] for m in direct])
766 seenmarkers |= direct
766 seenmarkers |= direct
767 pendingnodes -= seennodes
767 pendingnodes -= seennodes
768 seennodes |= pendingnodes
768 seennodes |= pendingnodes
769 return seenmarkers
769 return seenmarkers
770
770
771 def makestore(ui, repo):
771 def makestore(ui, repo):
772 """Create an obsstore instance from a repo."""
772 """Create an obsstore instance from a repo."""
773 # read default format for new obsstore.
773 # read default format for new obsstore.
774 # developer config: format.obsstore-version
774 # developer config: format.obsstore-version
775 defaultformat = ui.configint('format', 'obsstore-version')
775 defaultformat = ui.configint('format', 'obsstore-version')
776 # rely on obsstore class default when possible.
776 # rely on obsstore class default when possible.
777 kwargs = {}
777 kwargs = {}
778 if defaultformat is not None:
778 if defaultformat is not None:
779 kwargs[r'defaultformat'] = defaultformat
779 kwargs[r'defaultformat'] = defaultformat
780 readonly = not isenabled(repo, createmarkersopt)
780 readonly = not isenabled(repo, createmarkersopt)
781 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
781 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
782 if store and readonly:
782 if store and readonly:
783 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
783 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
784 % len(list(store)))
784 % len(list(store)))
785 return store
785 return store
786
786
787 def commonversion(versions):
787 def commonversion(versions):
788 """Return the newest version listed in both versions and our local formats.
788 """Return the newest version listed in both versions and our local formats.
789
789
790 Returns None if no common version exists.
790 Returns None if no common version exists.
791 """
791 """
792 versions.sort(reverse=True)
792 versions.sort(reverse=True)
793 # search for highest version known on both side
793 # search for highest version known on both side
794 for v in versions:
794 for v in versions:
795 if v in formats:
795 if v in formats:
796 return v
796 return v
797 return None
797 return None
798
798
799 # arbitrary picked to fit into 8K limit from HTTP server
799 # arbitrary picked to fit into 8K limit from HTTP server
800 # you have to take in account:
800 # you have to take in account:
801 # - the version header
801 # - the version header
802 # - the base85 encoding
802 # - the base85 encoding
803 _maxpayload = 5300
803 _maxpayload = 5300
804
804
805 def _pushkeyescape(markers):
805 def _pushkeyescape(markers):
806 """encode markers into a dict suitable for pushkey exchange
806 """encode markers into a dict suitable for pushkey exchange
807
807
808 - binary data is base85 encoded
808 - binary data is base85 encoded
809 - split in chunks smaller than 5300 bytes"""
809 - split in chunks smaller than 5300 bytes"""
810 keys = {}
810 keys = {}
811 parts = []
811 parts = []
812 currentlen = _maxpayload * 2 # ensure we create a new part
812 currentlen = _maxpayload * 2 # ensure we create a new part
813 for marker in markers:
813 for marker in markers:
814 nextdata = _fm0encodeonemarker(marker)
814 nextdata = _fm0encodeonemarker(marker)
815 if (len(nextdata) + currentlen > _maxpayload):
815 if (len(nextdata) + currentlen > _maxpayload):
816 currentpart = []
816 currentpart = []
817 currentlen = 0
817 currentlen = 0
818 parts.append(currentpart)
818 parts.append(currentpart)
819 currentpart.append(nextdata)
819 currentpart.append(nextdata)
820 currentlen += len(nextdata)
820 currentlen += len(nextdata)
821 for idx, part in enumerate(reversed(parts)):
821 for idx, part in enumerate(reversed(parts)):
822 data = ''.join([_pack('>B', _fm0version)] + part)
822 data = ''.join([_pack('>B', _fm0version)] + part)
823 keys['dump%i' % idx] = util.b85encode(data)
823 keys['dump%i' % idx] = util.b85encode(data)
824 return keys
824 return keys
825
825
826 def listmarkers(repo):
826 def listmarkers(repo):
827 """List markers over pushkey"""
827 """List markers over pushkey"""
828 if not repo.obsstore:
828 if not repo.obsstore:
829 return {}
829 return {}
830 return _pushkeyescape(sorted(repo.obsstore))
830 return _pushkeyescape(sorted(repo.obsstore))
831
831
832 def pushmarker(repo, key, old, new):
832 def pushmarker(repo, key, old, new):
833 """Push markers over pushkey"""
833 """Push markers over pushkey"""
834 if not key.startswith('dump'):
834 if not key.startswith('dump'):
835 repo.ui.warn(_('unknown key: %r') % key)
835 repo.ui.warn(_('unknown key: %r') % key)
836 return False
836 return False
837 if old:
837 if old:
838 repo.ui.warn(_('unexpected old value for %r') % key)
838 repo.ui.warn(_('unexpected old value for %r') % key)
839 return False
839 return False
840 data = util.b85decode(new)
840 data = util.b85decode(new)
841 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
841 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
842 repo.obsstore.mergemarkers(tr, data)
842 repo.obsstore.mergemarkers(tr, data)
843 repo.invalidatevolatilesets()
843 repo.invalidatevolatilesets()
844 return True
844 return True
845
845
846 # keep compatibility for the 4.3 cycle
846 # keep compatibility for the 4.3 cycle
847 def allprecursors(obsstore, nodes, ignoreflags=0):
847 def allprecursors(obsstore, nodes, ignoreflags=0):
848 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
848 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
849 util.nouideprecwarn(movemsg, '4.3')
849 util.nouideprecwarn(movemsg, '4.3')
850 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
850 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
851
851
852 def allsuccessors(obsstore, nodes, ignoreflags=0):
852 def allsuccessors(obsstore, nodes, ignoreflags=0):
853 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
853 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
854 util.nouideprecwarn(movemsg, '4.3')
854 util.nouideprecwarn(movemsg, '4.3')
855 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
855 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
856
856
857 def marker(repo, data):
857 def marker(repo, data):
858 movemsg = 'obsolete.marker moved to obsutil.marker'
858 movemsg = 'obsolete.marker moved to obsutil.marker'
859 repo.ui.deprecwarn(movemsg, '4.3')
859 repo.ui.deprecwarn(movemsg, '4.3')
860 return obsutil.marker(repo, data)
860 return obsutil.marker(repo, data)
861
861
862 def getmarkers(repo, nodes=None, exclusive=False):
862 def getmarkers(repo, nodes=None, exclusive=False):
863 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
863 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
864 repo.ui.deprecwarn(movemsg, '4.3')
864 repo.ui.deprecwarn(movemsg, '4.3')
865 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
865 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
866
866
867 def exclusivemarkers(repo, nodes):
867 def exclusivemarkers(repo, nodes):
868 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
868 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
869 repo.ui.deprecwarn(movemsg, '4.3')
869 repo.ui.deprecwarn(movemsg, '4.3')
870 return obsutil.exclusivemarkers(repo, nodes)
870 return obsutil.exclusivemarkers(repo, nodes)
871
871
872 def foreground(repo, nodes):
872 def foreground(repo, nodes):
873 movemsg = 'obsolete.foreground moved to obsutil.foreground'
873 movemsg = 'obsolete.foreground moved to obsutil.foreground'
874 repo.ui.deprecwarn(movemsg, '4.3')
874 repo.ui.deprecwarn(movemsg, '4.3')
875 return obsutil.foreground(repo, nodes)
875 return obsutil.foreground(repo, nodes)
876
876
877 def successorssets(repo, initialnode, cache=None):
877 def successorssets(repo, initialnode, cache=None):
878 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
878 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
879 repo.ui.deprecwarn(movemsg, '4.3')
879 repo.ui.deprecwarn(movemsg, '4.3')
880 return obsutil.successorssets(repo, initialnode, cache=cache)
880 return obsutil.successorssets(repo, initialnode, cache=cache)
881
881
882 # mapping of 'set-name' -> <function to compute this set>
882 # mapping of 'set-name' -> <function to compute this set>
883 cachefuncs = {}
883 cachefuncs = {}
884 def cachefor(name):
884 def cachefor(name):
885 """Decorator to register a function as computing the cache for a set"""
885 """Decorator to register a function as computing the cache for a set"""
886 def decorator(func):
886 def decorator(func):
887 if name in cachefuncs:
887 if name in cachefuncs:
888 msg = "duplicated registration for volatileset '%s' (existing: %r)"
888 msg = "duplicated registration for volatileset '%s' (existing: %r)"
889 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
889 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
890 cachefuncs[name] = func
890 cachefuncs[name] = func
891 return func
891 return func
892 return decorator
892 return decorator
893
893
894 def getrevs(repo, name):
894 def getrevs(repo, name):
895 """Return the set of revision that belong to the <name> set
895 """Return the set of revision that belong to the <name> set
896
896
897 Such access may compute the set and cache it for future use"""
897 Such access may compute the set and cache it for future use"""
898 repo = repo.unfiltered()
898 repo = repo.unfiltered()
899 if not repo.obsstore:
899 if not repo.obsstore:
900 return frozenset()
900 return frozenset()
901 if name not in repo.obsstore.caches:
901 if name not in repo.obsstore.caches:
902 repo.obsstore.caches[name] = cachefuncs[name](repo)
902 repo.obsstore.caches[name] = cachefuncs[name](repo)
903 return repo.obsstore.caches[name]
903 return repo.obsstore.caches[name]
904
904
905 # To be simple we need to invalidate obsolescence cache when:
905 # To be simple we need to invalidate obsolescence cache when:
906 #
906 #
907 # - new changeset is added:
907 # - new changeset is added:
908 # - public phase is changed
908 # - public phase is changed
909 # - obsolescence marker are added
909 # - obsolescence marker are added
910 # - strip is used a repo
910 # - strip is used a repo
911 def clearobscaches(repo):
911 def clearobscaches(repo):
912 """Remove all obsolescence related cache from a repo
912 """Remove all obsolescence related cache from a repo
913
913
914 This remove all cache in obsstore is the obsstore already exist on the
914 This remove all cache in obsstore is the obsstore already exist on the
915 repo.
915 repo.
916
916
917 (We could be smarter here given the exact event that trigger the cache
917 (We could be smarter here given the exact event that trigger the cache
918 clearing)"""
918 clearing)"""
919 # only clear cache is there is obsstore data in this repo
919 # only clear cache is there is obsstore data in this repo
920 if 'obsstore' in repo._filecache:
920 if 'obsstore' in repo._filecache:
921 repo.obsstore.caches.clear()
921 repo.obsstore.caches.clear()
922
922
923 def _mutablerevs(repo):
923 def _mutablerevs(repo):
924 """the set of mutable revision in the repository"""
924 """the set of mutable revision in the repository"""
925 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
925 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
926
926
927 @cachefor('obsolete')
927 @cachefor('obsolete')
928 def _computeobsoleteset(repo):
928 def _computeobsoleteset(repo):
929 """the set of obsolete revisions"""
929 """the set of obsolete revisions"""
930 getnode = repo.changelog.node
930 getnode = repo.changelog.node
931 notpublic = _mutablerevs(repo)
931 notpublic = _mutablerevs(repo)
932 isobs = repo.obsstore.successors.__contains__
932 isobs = repo.obsstore.successors.__contains__
933 obs = set(r for r in notpublic if isobs(getnode(r)))
933 obs = set(r for r in notpublic if isobs(getnode(r)))
934 return obs
934 return obs
935
935
936 @cachefor('unstable')
936 @cachefor('unstable')
937 def _computeunstableset(repo):
937 def _computeunstableset(repo):
938 msg = ("'unstable' volatile set is deprecated, "
938 msg = ("'unstable' volatile set is deprecated, "
939 "use 'orphan'")
939 "use 'orphan'")
940 repo.ui.deprecwarn(msg, '4.4')
940 repo.ui.deprecwarn(msg, '4.4')
941
941
942 return _computeorphanset(repo)
942 return _computeorphanset(repo)
943
943
944 @cachefor('orphan')
944 @cachefor('orphan')
945 def _computeorphanset(repo):
945 def _computeorphanset(repo):
946 """the set of non obsolete revisions with obsolete parents"""
946 """the set of non obsolete revisions with obsolete parents"""
947 pfunc = repo.changelog.parentrevs
947 pfunc = repo.changelog.parentrevs
948 mutable = _mutablerevs(repo)
948 mutable = _mutablerevs(repo)
949 obsolete = getrevs(repo, 'obsolete')
949 obsolete = getrevs(repo, 'obsolete')
950 others = mutable - obsolete
950 others = mutable - obsolete
951 unstable = set()
951 unstable = set()
952 for r in sorted(others):
952 for r in sorted(others):
953 # A rev is unstable if one of its parent is obsolete or unstable
953 # A rev is unstable if one of its parent is obsolete or unstable
954 # this works since we traverse following growing rev order
954 # this works since we traverse following growing rev order
955 for p in pfunc(r):
955 for p in pfunc(r):
956 if p in obsolete or p in unstable:
956 if p in obsolete or p in unstable:
957 unstable.add(r)
957 unstable.add(r)
958 break
958 break
959 return unstable
959 return unstable
960
960
961 @cachefor('suspended')
961 @cachefor('suspended')
962 def _computesuspendedset(repo):
962 def _computesuspendedset(repo):
963 """the set of obsolete parents with non obsolete descendants"""
963 """the set of obsolete parents with non obsolete descendants"""
964 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
964 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
965 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
965 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
966
966
967 @cachefor('extinct')
967 @cachefor('extinct')
968 def _computeextinctset(repo):
968 def _computeextinctset(repo):
969 """the set of obsolete parents without non obsolete descendants"""
969 """the set of obsolete parents without non obsolete descendants"""
970 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
970 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
971
971
972 @cachefor('bumped')
972 @cachefor('bumped')
973 def _computebumpedset(repo):
973 def _computebumpedset(repo):
974 msg = ("'bumped' volatile set is deprecated, "
974 msg = ("'bumped' volatile set is deprecated, "
975 "use 'phasedivergent'")
975 "use 'phasedivergent'")
976 repo.ui.deprecwarn(msg, '4.4')
976 repo.ui.deprecwarn(msg, '4.4')
977
977
978 return _computephasedivergentset(repo)
978 return _computephasedivergentset(repo)
979
979
980 @cachefor('phasedivergent')
980 @cachefor('phasedivergent')
981 def _computephasedivergentset(repo):
981 def _computephasedivergentset(repo):
982 """the set of revs trying to obsolete public revisions"""
982 """the set of revs trying to obsolete public revisions"""
983 bumped = set()
983 bumped = set()
984 # util function (avoid attribute lookup in the loop)
984 # util function (avoid attribute lookup in the loop)
985 phase = repo._phasecache.phase # would be faster to grab the full list
985 phase = repo._phasecache.phase # would be faster to grab the full list
986 public = phases.public
986 public = phases.public
987 cl = repo.changelog
987 cl = repo.changelog
988 torev = cl.nodemap.get
988 torev = cl.nodemap.get
989 tonode = cl.node
989 tonode = cl.node
990 for rev in repo.revs('(not public()) and (not obsolete())'):
990 for rev in repo.revs('(not public()) and (not obsolete())'):
991 # We only evaluate mutable, non-obsolete revision
991 # We only evaluate mutable, non-obsolete revision
992 node = tonode(rev)
992 node = tonode(rev)
993 # (future) A cache of predecessors may worth if split is very common
993 # (future) A cache of predecessors may worth if split is very common
994 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
994 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
995 ignoreflags=bumpedfix):
995 ignoreflags=bumpedfix):
996 prev = torev(pnode) # unfiltered! but so is phasecache
996 prev = torev(pnode) # unfiltered! but so is phasecache
997 if (prev is not None) and (phase(repo, prev) <= public):
997 if (prev is not None) and (phase(repo, prev) <= public):
998 # we have a public predecessor
998 # we have a public predecessor
999 bumped.add(rev)
999 bumped.add(rev)
1000 break # Next draft!
1000 break # Next draft!
1001 return bumped
1001 return bumped
1002
1002
1003 @cachefor('divergent')
1003 @cachefor('divergent')
1004 def _computedivergentset(repo):
1004 def _computedivergentset(repo):
1005 msg = ("'divergent' volatile set is deprecated, "
1005 msg = ("'divergent' volatile set is deprecated, "
1006 "use 'contentdivergent'")
1006 "use 'contentdivergent'")
1007 repo.ui.deprecwarn(msg, '4.4')
1007 repo.ui.deprecwarn(msg, '4.4')
1008
1008
1009 return _computecontentdivergentset(repo)
1009 return _computecontentdivergentset(repo)
1010
1010
1011 @cachefor('contentdivergent')
1011 @cachefor('contentdivergent')
1012 def _computecontentdivergentset(repo):
1012 def _computecontentdivergentset(repo):
1013 """the set of rev that compete to be the final successors of some revision.
1013 """the set of rev that compete to be the final successors of some revision.
1014 """
1014 """
1015 divergent = set()
1015 divergent = set()
1016 obsstore = repo.obsstore
1016 obsstore = repo.obsstore
1017 newermap = {}
1017 newermap = {}
1018 tonode = repo.changelog.node
1018 tonode = repo.changelog.node
1019 for rev in repo.revs('(not public()) - obsolete()'):
1019 for rev in repo.revs('(not public()) - obsolete()'):
1020 node = tonode(rev)
1020 node = tonode(rev)
1021 mark = obsstore.predecessors.get(node, ())
1021 mark = obsstore.predecessors.get(node, ())
1022 toprocess = set(mark)
1022 toprocess = set(mark)
1023 seen = set()
1023 seen = set()
1024 while toprocess:
1024 while toprocess:
1025 prec = toprocess.pop()[0]
1025 prec = toprocess.pop()[0]
1026 if prec in seen:
1026 if prec in seen:
1027 continue # emergency cycle hanging prevention
1027 continue # emergency cycle hanging prevention
1028 seen.add(prec)
1028 seen.add(prec)
1029 if prec not in newermap:
1029 if prec not in newermap:
1030 obsutil.successorssets(repo, prec, cache=newermap)
1030 obsutil.successorssets(repo, prec, cache=newermap)
1031 newer = [n for n in newermap[prec] if n]
1031 newer = [n for n in newermap[prec] if n]
1032 if len(newer) > 1:
1032 if len(newer) > 1:
1033 divergent.add(rev)
1033 divergent.add(rev)
1034 break
1034 break
1035 toprocess.update(obsstore.predecessors.get(prec, ()))
1035 toprocess.update(obsstore.predecessors.get(prec, ()))
1036 return divergent
1036 return divergent
1037
1037
1038
1038
1039 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1039 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1040 operation=None):
1040 operation=None):
1041 """Add obsolete markers between changesets in a repo
1041 """Add obsolete markers between changesets in a repo
1042
1042
1043 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1043 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1044 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1044 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1045 containing metadata for this marker only. It is merged with the global
1045 containing metadata for this marker only. It is merged with the global
1046 metadata specified through the `metadata` argument of this function,
1046 metadata specified through the `metadata` argument of this function,
1047
1047
1048 Trying to obsolete a public changeset will raise an exception.
1048 Trying to obsolete a public changeset will raise an exception.
1049
1049
1050 Current user and date are used except if specified otherwise in the
1050 Current user and date are used except if specified otherwise in the
1051 metadata attribute.
1051 metadata attribute.
1052
1052
1053 This function operates within a transaction of its own, but does
1053 This function operates within a transaction of its own, but does
1054 not take any lock on the repo.
1054 not take any lock on the repo.
1055 """
1055 """
1056 # prepare metadata
1056 # prepare metadata
1057 if metadata is None:
1057 if metadata is None:
1058 metadata = {}
1058 metadata = {}
1059 if 'user' not in metadata:
1059 if 'user' not in metadata:
1060 develuser = repo.ui.config('devel', 'user.obsmarker')
1060 develuser = repo.ui.config('devel', 'user.obsmarker')
1061 if develuser:
1061 if develuser:
1062 metadata['user'] = develuser
1062 metadata['user'] = develuser
1063 else:
1063 else:
1064 metadata['user'] = repo.ui.username()
1064 metadata['user'] = repo.ui.username()
1065
1065
1066 # Operation metadata handling
1066 # Operation metadata handling
1067 useoperation = repo.ui.configbool('experimental',
1067 useoperation = repo.ui.configbool('experimental',
1068 'evolution.track-operation')
1068 'evolution.track-operation')
1069 if useoperation and operation:
1069 if useoperation and operation:
1070 metadata['operation'] = operation
1070 metadata['operation'] = operation
1071
1071
1072 # Effect flag metadata handling
1072 # Effect flag metadata handling
1073 saveeffectflag = repo.ui.configbool('experimental',
1073 saveeffectflag = repo.ui.configbool('experimental',
1074 'evolution.effect-flags')
1074 'evolution.effect-flags')
1075
1075
1076 with repo.transaction('add-obsolescence-marker') as tr:
1076 with repo.transaction('add-obsolescence-marker') as tr:
1077 markerargs = []
1077 markerargs = []
1078 for rel in relations:
1078 for rel in relations:
1079 prec = rel[0]
1079 prec = rel[0]
1080 sucs = rel[1]
1080 sucs = rel[1]
1081 localmetadata = metadata.copy()
1081 localmetadata = metadata.copy()
1082 if 2 < len(rel):
1082 if 2 < len(rel):
1083 localmetadata.update(rel[2])
1083 localmetadata.update(rel[2])
1084
1084
1085 if not prec.mutable():
1085 if not prec.mutable():
1086 raise error.Abort(_("cannot obsolete public changeset: %s")
1086 raise error.Abort(_("cannot obsolete public changeset: %s")
1087 % prec,
1087 % prec,
1088 hint="see 'hg help phases' for details")
1088 hint="see 'hg help phases' for details")
1089 nprec = prec.node()
1089 nprec = prec.node()
1090 nsucs = tuple(s.node() for s in sucs)
1090 nsucs = tuple(s.node() for s in sucs)
1091 npare = None
1091 npare = None
1092 if not nsucs:
1092 if not nsucs:
1093 npare = tuple(p.node() for p in prec.parents())
1093 npare = tuple(p.node() for p in prec.parents())
1094 if nprec in nsucs:
1094 if nprec in nsucs:
1095 raise error.Abort(_("changeset %s cannot obsolete itself")
1095 raise error.Abort(_("changeset %s cannot obsolete itself")
1096 % prec)
1096 % prec)
1097
1097
1098 # Effect flag can be different by relation
1098 # Effect flag can be different by relation
1099 if saveeffectflag:
1099 if saveeffectflag:
1100 # The effect flag is saved in a versioned field name for future
1100 # The effect flag is saved in a versioned field name for future
1101 # evolution
1101 # evolution
1102 effectflag = obsutil.geteffectflag(rel)
1102 effectflag = obsutil.geteffectflag(rel)
1103 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1103 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1104
1104
1105 # Creating the marker causes the hidden cache to become invalid,
1105 # Creating the marker causes the hidden cache to become invalid,
1106 # which causes recomputation when we ask for prec.parents() above.
1106 # which causes recomputation when we ask for prec.parents() above.
1107 # Resulting in n^2 behavior. So let's prepare all of the args
1107 # Resulting in n^2 behavior. So let's prepare all of the args
1108 # first, then create the markers.
1108 # first, then create the markers.
1109 markerargs.append((nprec, nsucs, npare, localmetadata))
1109 markerargs.append((nprec, nsucs, npare, localmetadata))
1110
1110
1111 for args in markerargs:
1111 for args in markerargs:
1112 nprec, nsucs, npare, localmetadata = args
1112 nprec, nsucs, npare, localmetadata = args
1113 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1113 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1114 date=date, metadata=localmetadata,
1114 date=date, metadata=localmetadata,
1115 ui=repo.ui)
1115 ui=repo.ui)
1116 repo.filteredrevcache.clear()
1116 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now