##// END OF EJS Templates
obsolete: simplify relevantmarker...
Joerg Sonnenberger -
r52538:ff523675 default
parent child Browse files
Show More
@@ -1,1170 +1,1169 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70
70
71 import binascii
71 import binascii
72 import struct
72 import struct
73 import weakref
73 import weakref
74
74
75 from .i18n import _
75 from .i18n import _
76 from .node import (
76 from .node import (
77 bin,
77 bin,
78 hex,
78 hex,
79 )
79 )
80 from . import (
80 from . import (
81 encoding,
81 encoding,
82 error,
82 error,
83 obsutil,
83 obsutil,
84 phases,
84 phases,
85 policy,
85 policy,
86 pycompat,
86 pycompat,
87 util,
87 util,
88 )
88 )
89 from .utils import (
89 from .utils import (
90 dateutil,
90 dateutil,
91 hashutil,
91 hashutil,
92 )
92 )
93
93
94 parsers = policy.importmod('parsers')
94 parsers = policy.importmod('parsers')
95
95
96 _pack = struct.pack
96 _pack = struct.pack
97 _unpack = struct.unpack
97 _unpack = struct.unpack
98 _calcsize = struct.calcsize
98 _calcsize = struct.calcsize
99 propertycache = util.propertycache
99 propertycache = util.propertycache
100
100
101 # Options for obsolescence
101 # Options for obsolescence
102 createmarkersopt = b'createmarkers'
102 createmarkersopt = b'createmarkers'
103 allowunstableopt = b'allowunstable'
103 allowunstableopt = b'allowunstable'
104 allowdivergenceopt = b'allowdivergence'
104 allowdivergenceopt = b'allowdivergence'
105 exchangeopt = b'exchange'
105 exchangeopt = b'exchange'
106
106
107
107
108 def _getoptionvalue(repo, option):
108 def _getoptionvalue(repo, option):
109 """Returns True if the given repository has the given obsolete option
109 """Returns True if the given repository has the given obsolete option
110 enabled.
110 enabled.
111 """
111 """
112 configkey = b'evolution.%s' % option
112 configkey = b'evolution.%s' % option
113 newconfig = repo.ui.configbool(b'experimental', configkey)
113 newconfig = repo.ui.configbool(b'experimental', configkey)
114
114
115 # Return the value only if defined
115 # Return the value only if defined
116 if newconfig is not None:
116 if newconfig is not None:
117 return newconfig
117 return newconfig
118
118
119 # Fallback on generic option
119 # Fallback on generic option
120 try:
120 try:
121 return repo.ui.configbool(b'experimental', b'evolution')
121 return repo.ui.configbool(b'experimental', b'evolution')
122 except (error.ConfigError, AttributeError):
122 except (error.ConfigError, AttributeError):
123 # Fallback on old-fashion config
123 # Fallback on old-fashion config
124 # inconsistent config: experimental.evolution
124 # inconsistent config: experimental.evolution
125 result = set(repo.ui.configlist(b'experimental', b'evolution'))
125 result = set(repo.ui.configlist(b'experimental', b'evolution'))
126
126
127 if b'all' in result:
127 if b'all' in result:
128 return True
128 return True
129
129
130 # Temporary hack for next check
130 # Temporary hack for next check
131 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
131 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
132 if newconfig:
132 if newconfig:
133 result.add(b'createmarkers')
133 result.add(b'createmarkers')
134
134
135 return option in result
135 return option in result
136
136
137
137
138 def getoptions(repo):
138 def getoptions(repo):
139 """Returns dicts showing state of obsolescence features."""
139 """Returns dicts showing state of obsolescence features."""
140
140
141 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
141 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
142 if createmarkersvalue:
142 if createmarkersvalue:
143 unstablevalue = _getoptionvalue(repo, allowunstableopt)
143 unstablevalue = _getoptionvalue(repo, allowunstableopt)
144 divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
144 divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
145 exchangevalue = _getoptionvalue(repo, exchangeopt)
145 exchangevalue = _getoptionvalue(repo, exchangeopt)
146 else:
146 else:
147 # if we cannot create obsolescence markers, we shouldn't exchange them
147 # if we cannot create obsolescence markers, we shouldn't exchange them
148 # or perform operations that lead to instability or divergence
148 # or perform operations that lead to instability or divergence
149 unstablevalue = False
149 unstablevalue = False
150 divergencevalue = False
150 divergencevalue = False
151 exchangevalue = False
151 exchangevalue = False
152
152
153 return {
153 return {
154 createmarkersopt: createmarkersvalue,
154 createmarkersopt: createmarkersvalue,
155 allowunstableopt: unstablevalue,
155 allowunstableopt: unstablevalue,
156 allowdivergenceopt: divergencevalue,
156 allowdivergenceopt: divergencevalue,
157 exchangeopt: exchangevalue,
157 exchangeopt: exchangevalue,
158 }
158 }
159
159
160
160
161 def isenabled(repo, option):
161 def isenabled(repo, option):
162 """Returns True if the given repository has the given obsolete option
162 """Returns True if the given repository has the given obsolete option
163 enabled.
163 enabled.
164 """
164 """
165 return getoptions(repo)[option]
165 return getoptions(repo)[option]
166
166
167
167
168 # Creating aliases for marker flags because evolve extension looks for
168 # Creating aliases for marker flags because evolve extension looks for
169 # bumpedfix in obsolete.py
169 # bumpedfix in obsolete.py
170 bumpedfix = obsutil.bumpedfix
170 bumpedfix = obsutil.bumpedfix
171 usingsha256 = obsutil.usingsha256
171 usingsha256 = obsutil.usingsha256
172
172
173 ## Parsing and writing of version "0"
173 ## Parsing and writing of version "0"
174 #
174 #
175 # The header is followed by the markers. Each marker is made of:
175 # The header is followed by the markers. Each marker is made of:
176 #
176 #
177 # - 1 uint8 : number of new changesets "N", can be zero.
177 # - 1 uint8 : number of new changesets "N", can be zero.
178 #
178 #
179 # - 1 uint32: metadata size "M" in bytes.
179 # - 1 uint32: metadata size "M" in bytes.
180 #
180 #
181 # - 1 byte: a bit field. It is reserved for flags used in common
181 # - 1 byte: a bit field. It is reserved for flags used in common
182 # obsolete marker operations, to avoid repeated decoding of metadata
182 # obsolete marker operations, to avoid repeated decoding of metadata
183 # entries.
183 # entries.
184 #
184 #
185 # - 20 bytes: obsoleted changeset identifier.
185 # - 20 bytes: obsoleted changeset identifier.
186 #
186 #
187 # - N*20 bytes: new changesets identifiers.
187 # - N*20 bytes: new changesets identifiers.
188 #
188 #
189 # - M bytes: metadata as a sequence of nul-terminated strings. Each
189 # - M bytes: metadata as a sequence of nul-terminated strings. Each
190 # string contains a key and a value, separated by a colon ':', without
190 # string contains a key and a value, separated by a colon ':', without
191 # additional encoding. Keys cannot contain '\0' or ':' and values
191 # additional encoding. Keys cannot contain '\0' or ':' and values
192 # cannot contain '\0'.
192 # cannot contain '\0'.
193 _fm0version = 0
193 _fm0version = 0
194 _fm0fixed = b'>BIB20s'
194 _fm0fixed = b'>BIB20s'
195 _fm0node = b'20s'
195 _fm0node = b'20s'
196 _fm0fsize = _calcsize(_fm0fixed)
196 _fm0fsize = _calcsize(_fm0fixed)
197 _fm0fnodesize = _calcsize(_fm0node)
197 _fm0fnodesize = _calcsize(_fm0node)
198
198
199
199
200 def _fm0readmarkers(data, off, stop):
200 def _fm0readmarkers(data, off, stop):
201 # Loop on markers
201 # Loop on markers
202 while off < stop:
202 while off < stop:
203 # read fixed part
203 # read fixed part
204 cur = data[off : off + _fm0fsize]
204 cur = data[off : off + _fm0fsize]
205 off += _fm0fsize
205 off += _fm0fsize
206 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
206 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
207 # read replacement
207 # read replacement
208 sucs = ()
208 sucs = ()
209 if numsuc:
209 if numsuc:
210 s = _fm0fnodesize * numsuc
210 s = _fm0fnodesize * numsuc
211 cur = data[off : off + s]
211 cur = data[off : off + s]
212 sucs = _unpack(_fm0node * numsuc, cur)
212 sucs = _unpack(_fm0node * numsuc, cur)
213 off += s
213 off += s
214 # read metadata
214 # read metadata
215 # (metadata will be decoded on demand)
215 # (metadata will be decoded on demand)
216 metadata = data[off : off + mdsize]
216 metadata = data[off : off + mdsize]
217 if len(metadata) != mdsize:
217 if len(metadata) != mdsize:
218 raise error.Abort(
218 raise error.Abort(
219 _(
219 _(
220 b'parsing obsolete marker: metadata is too '
220 b'parsing obsolete marker: metadata is too '
221 b'short, %d bytes expected, got %d'
221 b'short, %d bytes expected, got %d'
222 )
222 )
223 % (mdsize, len(metadata))
223 % (mdsize, len(metadata))
224 )
224 )
225 off += mdsize
225 off += mdsize
226 metadata = _fm0decodemeta(metadata)
226 metadata = _fm0decodemeta(metadata)
227 try:
227 try:
228 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
228 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
229 date = float(when), int(offset)
229 date = float(when), int(offset)
230 except ValueError:
230 except ValueError:
231 date = (0.0, 0)
231 date = (0.0, 0)
232 parents = None
232 parents = None
233 if b'p2' in metadata:
233 if b'p2' in metadata:
234 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
234 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
235 elif b'p1' in metadata:
235 elif b'p1' in metadata:
236 parents = (metadata.pop(b'p1', None),)
236 parents = (metadata.pop(b'p1', None),)
237 elif b'p0' in metadata:
237 elif b'p0' in metadata:
238 parents = ()
238 parents = ()
239 if parents is not None:
239 if parents is not None:
240 try:
240 try:
241 parents = tuple(bin(p) for p in parents)
241 parents = tuple(bin(p) for p in parents)
242 # if parent content is not a nodeid, drop the data
242 # if parent content is not a nodeid, drop the data
243 for p in parents:
243 for p in parents:
244 if len(p) != 20:
244 if len(p) != 20:
245 parents = None
245 parents = None
246 break
246 break
247 except binascii.Error:
247 except binascii.Error:
248 # if content cannot be translated to nodeid drop the data.
248 # if content cannot be translated to nodeid drop the data.
249 parents = None
249 parents = None
250
250
251 metadata = tuple(sorted(metadata.items()))
251 metadata = tuple(sorted(metadata.items()))
252
252
253 yield (pre, sucs, flags, metadata, date, parents)
253 yield (pre, sucs, flags, metadata, date, parents)
254
254
255
255
256 def _fm0encodeonemarker(marker):
256 def _fm0encodeonemarker(marker):
257 pre, sucs, flags, metadata, date, parents = marker
257 pre, sucs, flags, metadata, date, parents = marker
258 if flags & usingsha256:
258 if flags & usingsha256:
259 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
259 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
260 metadata = dict(metadata)
260 metadata = dict(metadata)
261 time, tz = date
261 time, tz = date
262 metadata[b'date'] = b'%r %i' % (time, tz)
262 metadata[b'date'] = b'%r %i' % (time, tz)
263 if parents is not None:
263 if parents is not None:
264 if not parents:
264 if not parents:
265 # mark that we explicitly recorded no parents
265 # mark that we explicitly recorded no parents
266 metadata[b'p0'] = b''
266 metadata[b'p0'] = b''
267 for i, p in enumerate(parents, 1):
267 for i, p in enumerate(parents, 1):
268 metadata[b'p%i' % i] = hex(p)
268 metadata[b'p%i' % i] = hex(p)
269 metadata = _fm0encodemeta(metadata)
269 metadata = _fm0encodemeta(metadata)
270 numsuc = len(sucs)
270 numsuc = len(sucs)
271 format = _fm0fixed + (_fm0node * numsuc)
271 format = _fm0fixed + (_fm0node * numsuc)
272 data = [numsuc, len(metadata), flags, pre]
272 data = [numsuc, len(metadata), flags, pre]
273 data.extend(sucs)
273 data.extend(sucs)
274 return _pack(format, *data) + metadata
274 return _pack(format, *data) + metadata
275
275
276
276
277 def _fm0encodemeta(meta):
277 def _fm0encodemeta(meta):
278 """Return encoded metadata string to string mapping.
278 """Return encoded metadata string to string mapping.
279
279
280 Assume no ':' in key and no '\0' in both key and value."""
280 Assume no ':' in key and no '\0' in both key and value."""
281 for key, value in meta.items():
281 for key, value in meta.items():
282 if b':' in key or b'\0' in key:
282 if b':' in key or b'\0' in key:
283 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
283 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
284 if b'\0' in value:
284 if b'\0' in value:
285 raise ValueError(b"':' is forbidden in metadata value'")
285 raise ValueError(b"':' is forbidden in metadata value'")
286 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
286 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
287
287
288
288
289 def _fm0decodemeta(data):
289 def _fm0decodemeta(data):
290 """Return string to string dictionary from encoded version."""
290 """Return string to string dictionary from encoded version."""
291 d = {}
291 d = {}
292 for l in data.split(b'\0'):
292 for l in data.split(b'\0'):
293 if l:
293 if l:
294 key, value = l.split(b':', 1)
294 key, value = l.split(b':', 1)
295 d[key] = value
295 d[key] = value
296 return d
296 return d
297
297
298
298
299 ## Parsing and writing of version "1"
299 ## Parsing and writing of version "1"
300 #
300 #
301 # The header is followed by the markers. Each marker is made of:
301 # The header is followed by the markers. Each marker is made of:
302 #
302 #
303 # - uint32: total size of the marker (including this field)
303 # - uint32: total size of the marker (including this field)
304 #
304 #
305 # - float64: date in seconds since epoch
305 # - float64: date in seconds since epoch
306 #
306 #
307 # - int16: timezone offset in minutes
307 # - int16: timezone offset in minutes
308 #
308 #
309 # - uint16: a bit field. It is reserved for flags used in common
309 # - uint16: a bit field. It is reserved for flags used in common
310 # obsolete marker operations, to avoid repeated decoding of metadata
310 # obsolete marker operations, to avoid repeated decoding of metadata
311 # entries.
311 # entries.
312 #
312 #
313 # - uint8: number of successors "N", can be zero.
313 # - uint8: number of successors "N", can be zero.
314 #
314 #
315 # - uint8: number of parents "P", can be zero.
315 # - uint8: number of parents "P", can be zero.
316 #
316 #
317 # 0: parents data stored but no parent,
317 # 0: parents data stored but no parent,
318 # 1: one parent stored,
318 # 1: one parent stored,
319 # 2: two parents stored,
319 # 2: two parents stored,
320 # 3: no parent data stored
320 # 3: no parent data stored
321 #
321 #
322 # - uint8: number of metadata entries M
322 # - uint8: number of metadata entries M
323 #
323 #
324 # - 20 or 32 bytes: predecessor changeset identifier.
324 # - 20 or 32 bytes: predecessor changeset identifier.
325 #
325 #
326 # - N*(20 or 32) bytes: successors changesets identifiers.
326 # - N*(20 or 32) bytes: successors changesets identifiers.
327 #
327 #
328 # - P*(20 or 32) bytes: parents of the predecessors changesets.
328 # - P*(20 or 32) bytes: parents of the predecessors changesets.
329 #
329 #
330 # - M*(uint8, uint8): size of all metadata entries (key and value)
330 # - M*(uint8, uint8): size of all metadata entries (key and value)
331 #
331 #
332 # - remaining bytes: the metadata, each (key, value) pair after the other.
332 # - remaining bytes: the metadata, each (key, value) pair after the other.
333 _fm1version = 1
333 _fm1version = 1
334 _fm1fixed = b'>IdhHBBB'
334 _fm1fixed = b'>IdhHBBB'
335 _fm1nodesha1 = b'20s'
335 _fm1nodesha1 = b'20s'
336 _fm1nodesha256 = b'32s'
336 _fm1nodesha256 = b'32s'
337 _fm1nodesha1size = _calcsize(_fm1nodesha1)
337 _fm1nodesha1size = _calcsize(_fm1nodesha1)
338 _fm1nodesha256size = _calcsize(_fm1nodesha256)
338 _fm1nodesha256size = _calcsize(_fm1nodesha256)
339 _fm1fsize = _calcsize(_fm1fixed)
339 _fm1fsize = _calcsize(_fm1fixed)
340 _fm1parentnone = 3
340 _fm1parentnone = 3
341 _fm1metapair = b'BB'
341 _fm1metapair = b'BB'
342 _fm1metapairsize = _calcsize(_fm1metapair)
342 _fm1metapairsize = _calcsize(_fm1metapair)
343
343
344
344
345 def _fm1purereadmarkers(data, off, stop):
345 def _fm1purereadmarkers(data, off, stop):
346 # make some global constants local for performance
346 # make some global constants local for performance
347 noneflag = _fm1parentnone
347 noneflag = _fm1parentnone
348 sha2flag = usingsha256
348 sha2flag = usingsha256
349 sha1size = _fm1nodesha1size
349 sha1size = _fm1nodesha1size
350 sha2size = _fm1nodesha256size
350 sha2size = _fm1nodesha256size
351 sha1fmt = _fm1nodesha1
351 sha1fmt = _fm1nodesha1
352 sha2fmt = _fm1nodesha256
352 sha2fmt = _fm1nodesha256
353 metasize = _fm1metapairsize
353 metasize = _fm1metapairsize
354 metafmt = _fm1metapair
354 metafmt = _fm1metapair
355 fsize = _fm1fsize
355 fsize = _fm1fsize
356 unpack = _unpack
356 unpack = _unpack
357
357
358 # Loop on markers
358 # Loop on markers
359 ufixed = struct.Struct(_fm1fixed).unpack
359 ufixed = struct.Struct(_fm1fixed).unpack
360
360
361 while off < stop:
361 while off < stop:
362 # read fixed part
362 # read fixed part
363 o1 = off + fsize
363 o1 = off + fsize
364 t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1])
364 t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1])
365
365
366 if flags & sha2flag:
366 if flags & sha2flag:
367 nodefmt = sha2fmt
367 nodefmt = sha2fmt
368 nodesize = sha2size
368 nodesize = sha2size
369 else:
369 else:
370 nodefmt = sha1fmt
370 nodefmt = sha1fmt
371 nodesize = sha1size
371 nodesize = sha1size
372
372
373 (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize])
373 (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize])
374 o1 += nodesize
374 o1 += nodesize
375
375
376 # read 0 or more successors
376 # read 0 or more successors
377 if numsuc == 1:
377 if numsuc == 1:
378 o2 = o1 + nodesize
378 o2 = o1 + nodesize
379 sucs = (data[o1:o2],)
379 sucs = (data[o1:o2],)
380 else:
380 else:
381 o2 = o1 + nodesize * numsuc
381 o2 = o1 + nodesize * numsuc
382 sucs = unpack(nodefmt * numsuc, data[o1:o2])
382 sucs = unpack(nodefmt * numsuc, data[o1:o2])
383
383
384 # read parents
384 # read parents
385 if numpar == noneflag:
385 if numpar == noneflag:
386 o3 = o2
386 o3 = o2
387 parents = None
387 parents = None
388 elif numpar == 1:
388 elif numpar == 1:
389 o3 = o2 + nodesize
389 o3 = o2 + nodesize
390 parents = (data[o2:o3],)
390 parents = (data[o2:o3],)
391 else:
391 else:
392 o3 = o2 + nodesize * numpar
392 o3 = o2 + nodesize * numpar
393 parents = unpack(nodefmt * numpar, data[o2:o3])
393 parents = unpack(nodefmt * numpar, data[o2:o3])
394
394
395 # read metadata
395 # read metadata
396 off = o3 + metasize * nummeta
396 off = o3 + metasize * nummeta
397 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
397 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
398 metadata = []
398 metadata = []
399 for idx in range(0, len(metapairsize), 2):
399 for idx in range(0, len(metapairsize), 2):
400 o1 = off + metapairsize[idx]
400 o1 = off + metapairsize[idx]
401 o2 = o1 + metapairsize[idx + 1]
401 o2 = o1 + metapairsize[idx + 1]
402 metadata.append((data[off:o1], data[o1:o2]))
402 metadata.append((data[off:o1], data[o1:o2]))
403 off = o2
403 off = o2
404
404
405 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
405 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
406
406
407
407
408 def _fm1encodeonemarker(marker):
408 def _fm1encodeonemarker(marker):
409 pre, sucs, flags, metadata, date, parents = marker
409 pre, sucs, flags, metadata, date, parents = marker
410 # determine node size
410 # determine node size
411 _fm1node = _fm1nodesha1
411 _fm1node = _fm1nodesha1
412 if flags & usingsha256:
412 if flags & usingsha256:
413 _fm1node = _fm1nodesha256
413 _fm1node = _fm1nodesha256
414 numsuc = len(sucs)
414 numsuc = len(sucs)
415 numextranodes = 1 + numsuc
415 numextranodes = 1 + numsuc
416 if parents is None:
416 if parents is None:
417 numpar = _fm1parentnone
417 numpar = _fm1parentnone
418 else:
418 else:
419 numpar = len(parents)
419 numpar = len(parents)
420 numextranodes += numpar
420 numextranodes += numpar
421 formatnodes = _fm1node * numextranodes
421 formatnodes = _fm1node * numextranodes
422 formatmeta = _fm1metapair * len(metadata)
422 formatmeta = _fm1metapair * len(metadata)
423 format = _fm1fixed + formatnodes + formatmeta
423 format = _fm1fixed + formatnodes + formatmeta
424 # tz is stored in minutes so we divide by 60
424 # tz is stored in minutes so we divide by 60
425 tz = date[1] // 60
425 tz = date[1] // 60
426 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
426 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
427 data.extend(sucs)
427 data.extend(sucs)
428 if parents is not None:
428 if parents is not None:
429 data.extend(parents)
429 data.extend(parents)
430 totalsize = _calcsize(format)
430 totalsize = _calcsize(format)
431 for key, value in metadata:
431 for key, value in metadata:
432 lk = len(key)
432 lk = len(key)
433 lv = len(value)
433 lv = len(value)
434 if lk > 255:
434 if lk > 255:
435 msg = (
435 msg = (
436 b'obsstore metadata key cannot be longer than 255 bytes'
436 b'obsstore metadata key cannot be longer than 255 bytes'
437 b' (key "%s" is %u bytes)'
437 b' (key "%s" is %u bytes)'
438 ) % (key, lk)
438 ) % (key, lk)
439 raise error.ProgrammingError(msg)
439 raise error.ProgrammingError(msg)
440 if lv > 255:
440 if lv > 255:
441 msg = (
441 msg = (
442 b'obsstore metadata value cannot be longer than 255 bytes'
442 b'obsstore metadata value cannot be longer than 255 bytes'
443 b' (value "%s" for key "%s" is %u bytes)'
443 b' (value "%s" for key "%s" is %u bytes)'
444 ) % (value, key, lv)
444 ) % (value, key, lv)
445 raise error.ProgrammingError(msg)
445 raise error.ProgrammingError(msg)
446 data.append(lk)
446 data.append(lk)
447 data.append(lv)
447 data.append(lv)
448 totalsize += lk + lv
448 totalsize += lk + lv
449 data[0] = totalsize
449 data[0] = totalsize
450 data = [_pack(format, *data)]
450 data = [_pack(format, *data)]
451 for key, value in metadata:
451 for key, value in metadata:
452 data.append(key)
452 data.append(key)
453 data.append(value)
453 data.append(value)
454 return b''.join(data)
454 return b''.join(data)
455
455
456
456
457 def _fm1readmarkers(data, off, stop):
457 def _fm1readmarkers(data, off, stop):
458 native = getattr(parsers, 'fm1readmarkers', None)
458 native = getattr(parsers, 'fm1readmarkers', None)
459 if not native:
459 if not native:
460 return _fm1purereadmarkers(data, off, stop)
460 return _fm1purereadmarkers(data, off, stop)
461 return native(data, off, stop)
461 return native(data, off, stop)
462
462
463
463
464 # mapping to read/write various marker formats
464 # mapping to read/write various marker formats
465 # <version> -> (decoder, encoder)
465 # <version> -> (decoder, encoder)
466 formats = {
466 formats = {
467 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
467 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
468 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
468 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
469 }
469 }
470
470
471
471
472 def _readmarkerversion(data):
472 def _readmarkerversion(data):
473 return _unpack(b'>B', data[0:1])[0]
473 return _unpack(b'>B', data[0:1])[0]
474
474
475
475
476 @util.nogc
476 @util.nogc
477 def _readmarkers(data, off=None, stop=None):
477 def _readmarkers(data, off=None, stop=None):
478 """Read and enumerate markers from raw data"""
478 """Read and enumerate markers from raw data"""
479 diskversion = _readmarkerversion(data)
479 diskversion = _readmarkerversion(data)
480 if not off:
480 if not off:
481 off = 1 # skip 1 byte version number
481 off = 1 # skip 1 byte version number
482 if stop is None:
482 if stop is None:
483 stop = len(data)
483 stop = len(data)
484 if diskversion not in formats:
484 if diskversion not in formats:
485 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
485 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
486 raise error.UnknownVersion(msg, version=diskversion)
486 raise error.UnknownVersion(msg, version=diskversion)
487 return diskversion, formats[diskversion][0](data, off, stop)
487 return diskversion, formats[diskversion][0](data, off, stop)
488
488
489
489
490 def encodeheader(version=_fm0version):
490 def encodeheader(version=_fm0version):
491 return _pack(b'>B', version)
491 return _pack(b'>B', version)
492
492
493
493
494 def encodemarkers(markers, addheader=False, version=_fm0version):
494 def encodemarkers(markers, addheader=False, version=_fm0version):
495 # Kept separate from flushmarkers(), it will be reused for
495 # Kept separate from flushmarkers(), it will be reused for
496 # markers exchange.
496 # markers exchange.
497 encodeone = formats[version][1]
497 encodeone = formats[version][1]
498 if addheader:
498 if addheader:
499 yield encodeheader(version)
499 yield encodeheader(version)
500 for marker in markers:
500 for marker in markers:
501 yield encodeone(marker)
501 yield encodeone(marker)
502
502
503
503
504 @util.nogc
504 @util.nogc
505 def _addsuccessors(successors, markers):
505 def _addsuccessors(successors, markers):
506 for mark in markers:
506 for mark in markers:
507 successors.setdefault(mark[0], set()).add(mark)
507 successors.setdefault(mark[0], set()).add(mark)
508
508
509
509
510 @util.nogc
510 @util.nogc
511 def _addpredecessors(predecessors, markers):
511 def _addpredecessors(predecessors, markers):
512 for mark in markers:
512 for mark in markers:
513 for suc in mark[1]:
513 for suc in mark[1]:
514 predecessors.setdefault(suc, set()).add(mark)
514 predecessors.setdefault(suc, set()).add(mark)
515
515
516
516
517 @util.nogc
517 @util.nogc
518 def _addchildren(children, markers):
518 def _addchildren(children, markers):
519 for mark in markers:
519 for mark in markers:
520 parents = mark[5]
520 parents = mark[5]
521 if parents is not None:
521 if parents is not None:
522 for p in parents:
522 for p in parents:
523 children.setdefault(p, set()).add(mark)
523 children.setdefault(p, set()).add(mark)
524
524
525
525
526 def _checkinvalidmarkers(repo, markers):
526 def _checkinvalidmarkers(repo, markers):
527 """search for marker with invalid data and raise error if needed
527 """search for marker with invalid data and raise error if needed
528
528
529 Exist as a separated function to allow the evolve extension for a more
529 Exist as a separated function to allow the evolve extension for a more
530 subtle handling.
530 subtle handling.
531 """
531 """
532 for mark in markers:
532 for mark in markers:
533 if repo.nullid in mark[1]:
533 if repo.nullid in mark[1]:
534 raise error.Abort(
534 raise error.Abort(
535 _(
535 _(
536 b'bad obsolescence marker detected: '
536 b'bad obsolescence marker detected: '
537 b'invalid successors nullid'
537 b'invalid successors nullid'
538 )
538 )
539 )
539 )
540
540
541
541
542 class obsstore:
542 class obsstore:
543 """Store obsolete markers
543 """Store obsolete markers
544
544
545 Markers can be accessed with two mappings:
545 Markers can be accessed with two mappings:
546 - predecessors[x] -> set(markers on predecessors edges of x)
546 - predecessors[x] -> set(markers on predecessors edges of x)
547 - successors[x] -> set(markers on successors edges of x)
547 - successors[x] -> set(markers on successors edges of x)
548 - children[x] -> set(markers on predecessors edges of children(x)
548 - children[x] -> set(markers on predecessors edges of children(x)
549 """
549 """
550
550
551 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
551 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
552 # prec: nodeid, predecessors changesets
552 # prec: nodeid, predecessors changesets
553 # succs: tuple of nodeid, successor changesets (0-N length)
553 # succs: tuple of nodeid, successor changesets (0-N length)
554 # flag: integer, flag field carrying modifier for the markers (see doc)
554 # flag: integer, flag field carrying modifier for the markers (see doc)
555 # meta: binary blob in UTF-8, encoded metadata dictionary
555 # meta: binary blob in UTF-8, encoded metadata dictionary
556 # date: (float, int) tuple, date of marker creation
556 # date: (float, int) tuple, date of marker creation
557 # parents: (tuple of nodeid) or None, parents of predecessors
557 # parents: (tuple of nodeid) or None, parents of predecessors
558 # None is used when no data has been recorded
558 # None is used when no data has been recorded
559
559
560 def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False):
560 def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False):
561 # caches for various obsolescence related cache
561 # caches for various obsolescence related cache
562 self.caches = {}
562 self.caches = {}
563 self.svfs = svfs
563 self.svfs = svfs
564 self._repo = weakref.ref(repo)
564 self._repo = weakref.ref(repo)
565 self._defaultformat = defaultformat
565 self._defaultformat = defaultformat
566 self._readonly = readonly
566 self._readonly = readonly
567
567
568 @property
568 @property
569 def repo(self):
569 def repo(self):
570 r = self._repo()
570 r = self._repo()
571 if r is None:
571 if r is None:
572 msg = "using the obsstore of a deallocated repo"
572 msg = "using the obsstore of a deallocated repo"
573 raise error.ProgrammingError(msg)
573 raise error.ProgrammingError(msg)
574 return r
574 return r
575
575
576 def __iter__(self):
576 def __iter__(self):
577 return iter(self._all)
577 return iter(self._all)
578
578
579 def __len__(self):
579 def __len__(self):
580 return len(self._all)
580 return len(self._all)
581
581
582 def __nonzero__(self):
582 def __nonzero__(self):
583 from . import statichttprepo
583 from . import statichttprepo
584
584
585 if isinstance(self.repo, statichttprepo.statichttprepository):
585 if isinstance(self.repo, statichttprepo.statichttprepository):
586 # If repo is accessed via static HTTP, then we can't use os.stat()
586 # If repo is accessed via static HTTP, then we can't use os.stat()
587 # to just peek at the file size.
587 # to just peek at the file size.
588 return len(self._data) > 1
588 return len(self._data) > 1
589 if not self._cached('_all'):
589 if not self._cached('_all'):
590 try:
590 try:
591 return self.svfs.stat(b'obsstore').st_size > 1
591 return self.svfs.stat(b'obsstore').st_size > 1
592 except FileNotFoundError:
592 except FileNotFoundError:
593 # just build an empty _all list if no obsstore exists, which
593 # just build an empty _all list if no obsstore exists, which
594 # avoids further stat() syscalls
594 # avoids further stat() syscalls
595 pass
595 pass
596 return bool(self._all)
596 return bool(self._all)
597
597
598 __bool__ = __nonzero__
598 __bool__ = __nonzero__
599
599
600 @property
600 @property
601 def readonly(self):
601 def readonly(self):
602 """True if marker creation is disabled
602 """True if marker creation is disabled
603
603
604 Remove me in the future when obsolete marker is always on."""
604 Remove me in the future when obsolete marker is always on."""
605 return self._readonly
605 return self._readonly
606
606
607 def create(
607 def create(
608 self,
608 self,
609 transaction,
609 transaction,
610 prec,
610 prec,
611 succs=(),
611 succs=(),
612 flag=0,
612 flag=0,
613 parents=None,
613 parents=None,
614 date=None,
614 date=None,
615 metadata=None,
615 metadata=None,
616 ui=None,
616 ui=None,
617 ):
617 ):
618 """obsolete: add a new obsolete marker
618 """obsolete: add a new obsolete marker
619
619
620 * ensuring it is hashable
620 * ensuring it is hashable
621 * check mandatory metadata
621 * check mandatory metadata
622 * encode metadata
622 * encode metadata
623
623
624 If you are a human writing code creating marker you want to use the
624 If you are a human writing code creating marker you want to use the
625 `createmarkers` function in this module instead.
625 `createmarkers` function in this module instead.
626
626
627 return True if a new marker have been added, False if the markers
627 return True if a new marker have been added, False if the markers
628 already existed (no op).
628 already existed (no op).
629 """
629 """
630 flag = int(flag)
630 flag = int(flag)
631 if metadata is None:
631 if metadata is None:
632 metadata = {}
632 metadata = {}
633 if date is None:
633 if date is None:
634 if b'date' in metadata:
634 if b'date' in metadata:
635 # as a courtesy for out-of-tree extensions
635 # as a courtesy for out-of-tree extensions
636 date = dateutil.parsedate(metadata.pop(b'date'))
636 date = dateutil.parsedate(metadata.pop(b'date'))
637 elif ui is not None:
637 elif ui is not None:
638 date = ui.configdate(b'devel', b'default-date')
638 date = ui.configdate(b'devel', b'default-date')
639 if date is None:
639 if date is None:
640 date = dateutil.makedate()
640 date = dateutil.makedate()
641 else:
641 else:
642 date = dateutil.makedate()
642 date = dateutil.makedate()
643 if flag & usingsha256:
643 if flag & usingsha256:
644 if len(prec) != 32:
644 if len(prec) != 32:
645 raise ValueError(prec)
645 raise ValueError(prec)
646 for succ in succs:
646 for succ in succs:
647 if len(succ) != 32:
647 if len(succ) != 32:
648 raise ValueError(succ)
648 raise ValueError(succ)
649 else:
649 else:
650 if len(prec) != 20:
650 if len(prec) != 20:
651 raise ValueError(prec)
651 raise ValueError(prec)
652 for succ in succs:
652 for succ in succs:
653 if len(succ) != 20:
653 if len(succ) != 20:
654 raise ValueError(succ)
654 raise ValueError(succ)
655 if prec in succs:
655 if prec in succs:
656 raise ValueError('in-marker cycle with %s' % prec.hex())
656 raise ValueError('in-marker cycle with %s' % prec.hex())
657
657
658 metadata = tuple(sorted(metadata.items()))
658 metadata = tuple(sorted(metadata.items()))
659 for k, v in metadata:
659 for k, v in metadata:
660 try:
660 try:
661 # might be better to reject non-ASCII keys
661 # might be better to reject non-ASCII keys
662 k.decode('utf-8')
662 k.decode('utf-8')
663 v.decode('utf-8')
663 v.decode('utf-8')
664 except UnicodeDecodeError:
664 except UnicodeDecodeError:
665 raise error.ProgrammingError(
665 raise error.ProgrammingError(
666 b'obsstore metadata must be valid UTF-8 sequence '
666 b'obsstore metadata must be valid UTF-8 sequence '
667 b'(key = %r, value = %r)'
667 b'(key = %r, value = %r)'
668 % (pycompat.bytestr(k), pycompat.bytestr(v))
668 % (pycompat.bytestr(k), pycompat.bytestr(v))
669 )
669 )
670
670
671 marker = (bytes(prec), tuple(succs), flag, metadata, date, parents)
671 marker = (bytes(prec), tuple(succs), flag, metadata, date, parents)
672 return bool(self.add(transaction, [marker]))
672 return bool(self.add(transaction, [marker]))
673
673
674 def add(self, transaction, markers):
674 def add(self, transaction, markers):
675 """Add new markers to the store
675 """Add new markers to the store
676
676
677 Take care of filtering duplicate.
677 Take care of filtering duplicate.
678 Return the number of new marker."""
678 Return the number of new marker."""
679 if self._readonly:
679 if self._readonly:
680 raise error.Abort(
680 raise error.Abort(
681 _(b'creating obsolete markers is not enabled on this repo')
681 _(b'creating obsolete markers is not enabled on this repo')
682 )
682 )
683 known = set()
683 known = set()
684 getsuccessors = self.successors.get
684 getsuccessors = self.successors.get
685 new = []
685 new = []
686 for m in markers:
686 for m in markers:
687 if m not in getsuccessors(m[0], ()) and m not in known:
687 if m not in getsuccessors(m[0], ()) and m not in known:
688 known.add(m)
688 known.add(m)
689 new.append(m)
689 new.append(m)
690 if new:
690 if new:
691 f = self.svfs(b'obsstore', b'ab')
691 f = self.svfs(b'obsstore', b'ab')
692 try:
692 try:
693 offset = f.tell()
693 offset = f.tell()
694 transaction.add(b'obsstore', offset)
694 transaction.add(b'obsstore', offset)
695 # offset == 0: new file - add the version header
695 # offset == 0: new file - add the version header
696 data = b''.join(encodemarkers(new, offset == 0, self._version))
696 data = b''.join(encodemarkers(new, offset == 0, self._version))
697 f.write(data)
697 f.write(data)
698 finally:
698 finally:
699 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
699 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
700 # call 'filecacheentry.refresh()' here
700 # call 'filecacheentry.refresh()' here
701 f.close()
701 f.close()
702 addedmarkers = transaction.changes.get(b'obsmarkers')
702 addedmarkers = transaction.changes.get(b'obsmarkers')
703 if addedmarkers is not None:
703 if addedmarkers is not None:
704 addedmarkers.update(new)
704 addedmarkers.update(new)
705 self._addmarkers(new, data)
705 self._addmarkers(new, data)
706 # new marker *may* have changed several set. invalidate the cache.
706 # new marker *may* have changed several set. invalidate the cache.
707 self.caches.clear()
707 self.caches.clear()
708 # records the number of new markers for the transaction hooks
708 # records the number of new markers for the transaction hooks
709 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
709 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
710 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
710 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
711 return len(new)
711 return len(new)
712
712
713 def mergemarkers(self, transaction, data):
713 def mergemarkers(self, transaction, data):
714 """merge a binary stream of markers inside the obsstore
714 """merge a binary stream of markers inside the obsstore
715
715
716 Returns the number of new markers added."""
716 Returns the number of new markers added."""
717 version, markers = _readmarkers(data)
717 version, markers = _readmarkers(data)
718 return self.add(transaction, markers)
718 return self.add(transaction, markers)
719
719
720 @propertycache
720 @propertycache
721 def _data(self):
721 def _data(self):
722 return self.svfs.tryread(b'obsstore')
722 return self.svfs.tryread(b'obsstore')
723
723
724 @propertycache
724 @propertycache
725 def _version(self):
725 def _version(self):
726 if len(self._data) >= 1:
726 if len(self._data) >= 1:
727 return _readmarkerversion(self._data)
727 return _readmarkerversion(self._data)
728 else:
728 else:
729 return self._defaultformat
729 return self._defaultformat
730
730
731 @propertycache
731 @propertycache
732 def _all(self):
732 def _all(self):
733 data = self._data
733 data = self._data
734 if not data:
734 if not data:
735 return []
735 return []
736 self._version, markers = _readmarkers(data)
736 self._version, markers = _readmarkers(data)
737 markers = list(markers)
737 markers = list(markers)
738 _checkinvalidmarkers(self.repo, markers)
738 _checkinvalidmarkers(self.repo, markers)
739 return markers
739 return markers
740
740
741 @propertycache
741 @propertycache
742 def successors(self):
742 def successors(self):
743 successors = {}
743 successors = {}
744 _addsuccessors(successors, self._all)
744 _addsuccessors(successors, self._all)
745 return successors
745 return successors
746
746
747 @propertycache
747 @propertycache
748 def predecessors(self):
748 def predecessors(self):
749 predecessors = {}
749 predecessors = {}
750 _addpredecessors(predecessors, self._all)
750 _addpredecessors(predecessors, self._all)
751 return predecessors
751 return predecessors
752
752
753 @propertycache
753 @propertycache
754 def children(self):
754 def children(self):
755 children = {}
755 children = {}
756 _addchildren(children, self._all)
756 _addchildren(children, self._all)
757 return children
757 return children
758
758
759 def _cached(self, attr):
759 def _cached(self, attr):
760 return attr in self.__dict__
760 return attr in self.__dict__
761
761
762 def _addmarkers(self, markers, rawdata):
762 def _addmarkers(self, markers, rawdata):
763 markers = list(markers) # to allow repeated iteration
763 markers = list(markers) # to allow repeated iteration
764 self._data = self._data + rawdata
764 self._data = self._data + rawdata
765 self._all.extend(markers)
765 self._all.extend(markers)
766 if self._cached('successors'):
766 if self._cached('successors'):
767 _addsuccessors(self.successors, markers)
767 _addsuccessors(self.successors, markers)
768 if self._cached('predecessors'):
768 if self._cached('predecessors'):
769 _addpredecessors(self.predecessors, markers)
769 _addpredecessors(self.predecessors, markers)
770 if self._cached('children'):
770 if self._cached('children'):
771 _addchildren(self.children, markers)
771 _addchildren(self.children, markers)
772 _checkinvalidmarkers(self.repo, markers)
772 _checkinvalidmarkers(self.repo, markers)
773
773
774 def relevantmarkers(self, nodes=None, revs=None):
774 def relevantmarkers(self, nodes=None, revs=None):
775 """return a set of all obsolescence markers relevant to a set of
775 """return a set of all obsolescence markers relevant to a set of
776 nodes or revisions.
776 nodes or revisions.
777
777
778 "relevant" to a set of nodes or revisions mean:
778 "relevant" to a set of nodes or revisions mean:
779
779
780 - marker that use this changeset as successor
780 - marker that use this changeset as successor
781 - prune marker of direct children on this changeset
781 - prune marker of direct children on this changeset
782 - recursive application of the two rules on predecessors of these
782 - recursive application of the two rules on predecessors of these
783 markers
783 markers
784
784
785 It is a set so you cannot rely on order."""
785 It is a set so you cannot rely on order."""
786 if nodes is None:
786 if nodes is None:
787 nodes = set()
787 nodes = set()
788 if revs is None:
788 if revs is None:
789 revs = set()
789 revs = set()
790
790
791 get_rev = self.repo.unfiltered().changelog.index.get_rev
791 get_rev = self.repo.unfiltered().changelog.index.get_rev
792 pendingnodes = set()
792 pendingnodes = set()
793 for marker in self._all:
793 for marker in self._all:
794 for node in (marker[0],) + marker[1] + (marker[5] or ()):
794 for node in (marker[0],) + marker[1] + (marker[5] or ()):
795 if node in nodes:
795 if node in nodes:
796 pendingnodes.add(node)
796 pendingnodes.add(node)
797 elif revs:
797 elif revs:
798 rev = get_rev(node)
798 rev = get_rev(node)
799 if rev is not None and rev in revs:
799 if rev is not None and rev in revs:
800 pendingnodes.add(node)
800 pendingnodes.add(node)
801 seenmarkers = set()
801 seenmarkers = set()
802 seenmarkers = set()
802 seennodes = set(pendingnodes)
803 seennodes = set()
804 precursorsmarkers = self.predecessors
803 precursorsmarkers = self.predecessors
805 succsmarkers = self.successors
804 succsmarkers = self.successors
806 children = self.children
805 children = self.children
807 while pendingnodes:
806 while pendingnodes:
808 direct = set()
807 direct = set()
809 for current in pendingnodes:
808 for current in pendingnodes:
810 direct.update(precursorsmarkers.get(current, ()))
809 direct.update(precursorsmarkers.get(current, ()))
811 pruned = [m for m in children.get(current, ()) if not m[1]]
810 pruned = [m for m in children.get(current, ()) if not m[1]]
812 direct.update(pruned)
811 direct.update(pruned)
813 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
812 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
814 direct.update(pruned)
813 direct.update(pruned)
815 direct -= seenmarkers
814 direct -= seenmarkers
816 pendingnodes = {m[0] for m in direct}
815 pendingnodes = {m[0] for m in direct}
817 seenmarkers |= direct
816 seenmarkers |= direct
818 pendingnodes -= seennodes
817 pendingnodes -= seennodes
819 seennodes |= pendingnodes
818 seennodes |= pendingnodes
820 return seenmarkers
819 return seenmarkers
821
820
822
821
823 def makestore(ui, repo):
822 def makestore(ui, repo):
824 """Create an obsstore instance from a repo."""
823 """Create an obsstore instance from a repo."""
825 # read default format for new obsstore.
824 # read default format for new obsstore.
826 # developer config: format.obsstore-version
825 # developer config: format.obsstore-version
827 defaultformat = ui.configint(b'format', b'obsstore-version')
826 defaultformat = ui.configint(b'format', b'obsstore-version')
828 # rely on obsstore class default when possible.
827 # rely on obsstore class default when possible.
829 kwargs = {}
828 kwargs = {}
830 if defaultformat is not None:
829 if defaultformat is not None:
831 kwargs['defaultformat'] = defaultformat
830 kwargs['defaultformat'] = defaultformat
832 readonly = not isenabled(repo, createmarkersopt)
831 readonly = not isenabled(repo, createmarkersopt)
833 store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs)
832 store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs)
834 if store and readonly:
833 if store and readonly:
835 ui.warn(
834 ui.warn(
836 _(b'"obsolete" feature not enabled but %i markers found!\n')
835 _(b'"obsolete" feature not enabled but %i markers found!\n')
837 % len(list(store))
836 % len(list(store))
838 )
837 )
839 return store
838 return store
840
839
841
840
842 def commonversion(versions):
841 def commonversion(versions):
843 """Return the newest version listed in both versions and our local formats.
842 """Return the newest version listed in both versions and our local formats.
844
843
845 Returns None if no common version exists.
844 Returns None if no common version exists.
846 """
845 """
847 versions.sort(reverse=True)
846 versions.sort(reverse=True)
848 # search for highest version known on both side
847 # search for highest version known on both side
849 for v in versions:
848 for v in versions:
850 if v in formats:
849 if v in formats:
851 return v
850 return v
852 return None
851 return None
853
852
854
853
855 # arbitrary picked to fit into 8K limit from HTTP server
854 # arbitrary picked to fit into 8K limit from HTTP server
856 # you have to take in account:
855 # you have to take in account:
857 # - the version header
856 # - the version header
858 # - the base85 encoding
857 # - the base85 encoding
859 _maxpayload = 5300
858 _maxpayload = 5300
860
859
861
860
862 def _pushkeyescape(markers):
861 def _pushkeyescape(markers):
863 """encode markers into a dict suitable for pushkey exchange
862 """encode markers into a dict suitable for pushkey exchange
864
863
865 - binary data is base85 encoded
864 - binary data is base85 encoded
866 - split in chunks smaller than 5300 bytes"""
865 - split in chunks smaller than 5300 bytes"""
867 keys = {}
866 keys = {}
868 parts = []
867 parts = []
869 currentlen = _maxpayload * 2 # ensure we create a new part
868 currentlen = _maxpayload * 2 # ensure we create a new part
870 for marker in markers:
869 for marker in markers:
871 nextdata = _fm0encodeonemarker(marker)
870 nextdata = _fm0encodeonemarker(marker)
872 if len(nextdata) + currentlen > _maxpayload:
871 if len(nextdata) + currentlen > _maxpayload:
873 currentpart = []
872 currentpart = []
874 currentlen = 0
873 currentlen = 0
875 parts.append(currentpart)
874 parts.append(currentpart)
876 currentpart.append(nextdata)
875 currentpart.append(nextdata)
877 currentlen += len(nextdata)
876 currentlen += len(nextdata)
878 for idx, part in enumerate(reversed(parts)):
877 for idx, part in enumerate(reversed(parts)):
879 data = b''.join([_pack(b'>B', _fm0version)] + part)
878 data = b''.join([_pack(b'>B', _fm0version)] + part)
880 keys[b'dump%i' % idx] = util.b85encode(data)
879 keys[b'dump%i' % idx] = util.b85encode(data)
881 return keys
880 return keys
882
881
883
882
884 def listmarkers(repo):
883 def listmarkers(repo):
885 """List markers over pushkey"""
884 """List markers over pushkey"""
886 if not repo.obsstore:
885 if not repo.obsstore:
887 return {}
886 return {}
888 return _pushkeyescape(sorted(repo.obsstore))
887 return _pushkeyescape(sorted(repo.obsstore))
889
888
890
889
891 def pushmarker(repo, key, old, new):
890 def pushmarker(repo, key, old, new):
892 """Push markers over pushkey"""
891 """Push markers over pushkey"""
893 if not key.startswith(b'dump'):
892 if not key.startswith(b'dump'):
894 repo.ui.warn(_(b'unknown key: %r') % key)
893 repo.ui.warn(_(b'unknown key: %r') % key)
895 return False
894 return False
896 if old:
895 if old:
897 repo.ui.warn(_(b'unexpected old value for %r') % key)
896 repo.ui.warn(_(b'unexpected old value for %r') % key)
898 return False
897 return False
899 data = util.b85decode(new)
898 data = util.b85decode(new)
900 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
899 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
901 repo.obsstore.mergemarkers(tr, data)
900 repo.obsstore.mergemarkers(tr, data)
902 repo.invalidatevolatilesets()
901 repo.invalidatevolatilesets()
903 return True
902 return True
904
903
905
904
906 # mapping of 'set-name' -> <function to compute this set>
905 # mapping of 'set-name' -> <function to compute this set>
907 cachefuncs = {}
906 cachefuncs = {}
908
907
909
908
910 def cachefor(name):
909 def cachefor(name):
911 """Decorator to register a function as computing the cache for a set"""
910 """Decorator to register a function as computing the cache for a set"""
912
911
913 def decorator(func):
912 def decorator(func):
914 if name in cachefuncs:
913 if name in cachefuncs:
915 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
914 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
916 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
915 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
917 cachefuncs[name] = func
916 cachefuncs[name] = func
918 return func
917 return func
919
918
920 return decorator
919 return decorator
921
920
922
921
923 def getrevs(repo, name):
922 def getrevs(repo, name):
924 """Return the set of revision that belong to the <name> set
923 """Return the set of revision that belong to the <name> set
925
924
926 Such access may compute the set and cache it for future use"""
925 Such access may compute the set and cache it for future use"""
927 repo = repo.unfiltered()
926 repo = repo.unfiltered()
928 with util.timedcm('getrevs %s', name):
927 with util.timedcm('getrevs %s', name):
929 if not repo.obsstore:
928 if not repo.obsstore:
930 return frozenset()
929 return frozenset()
931 if name not in repo.obsstore.caches:
930 if name not in repo.obsstore.caches:
932 repo.obsstore.caches[name] = cachefuncs[name](repo)
931 repo.obsstore.caches[name] = cachefuncs[name](repo)
933 return repo.obsstore.caches[name]
932 return repo.obsstore.caches[name]
934
933
935
934
936 # To be simple we need to invalidate obsolescence cache when:
935 # To be simple we need to invalidate obsolescence cache when:
937 #
936 #
938 # - new changeset is added:
937 # - new changeset is added:
939 # - public phase is changed
938 # - public phase is changed
940 # - obsolescence marker are added
939 # - obsolescence marker are added
941 # - strip is used a repo
940 # - strip is used a repo
942 def clearobscaches(repo):
941 def clearobscaches(repo):
943 """Remove all obsolescence related cache from a repo
942 """Remove all obsolescence related cache from a repo
944
943
945 This remove all cache in obsstore is the obsstore already exist on the
944 This remove all cache in obsstore is the obsstore already exist on the
946 repo.
945 repo.
947
946
948 (We could be smarter here given the exact event that trigger the cache
947 (We could be smarter here given the exact event that trigger the cache
949 clearing)"""
948 clearing)"""
950 # only clear cache is there is obsstore data in this repo
949 # only clear cache is there is obsstore data in this repo
951 if b'obsstore' in repo._filecache:
950 if b'obsstore' in repo._filecache:
952 repo.obsstore.caches.clear()
951 repo.obsstore.caches.clear()
953
952
954
953
955 def _mutablerevs(repo):
954 def _mutablerevs(repo):
956 """the set of mutable revision in the repository"""
955 """the set of mutable revision in the repository"""
957 return repo._phasecache.getrevset(repo, phases.relevant_mutable_phases)
956 return repo._phasecache.getrevset(repo, phases.relevant_mutable_phases)
958
957
959
958
960 @cachefor(b'obsolete')
959 @cachefor(b'obsolete')
961 def _computeobsoleteset(repo):
960 def _computeobsoleteset(repo):
962 """the set of obsolete revisions"""
961 """the set of obsolete revisions"""
963 getnode = repo.changelog.node
962 getnode = repo.changelog.node
964 notpublic = _mutablerevs(repo)
963 notpublic = _mutablerevs(repo)
965 isobs = repo.obsstore.successors.__contains__
964 isobs = repo.obsstore.successors.__contains__
966 return frozenset(r for r in notpublic if isobs(getnode(r)))
965 return frozenset(r for r in notpublic if isobs(getnode(r)))
967
966
968
967
969 @cachefor(b'orphan')
968 @cachefor(b'orphan')
970 def _computeorphanset(repo):
969 def _computeorphanset(repo):
971 """the set of non obsolete revisions with obsolete parents"""
970 """the set of non obsolete revisions with obsolete parents"""
972 pfunc = repo.changelog.parentrevs
971 pfunc = repo.changelog.parentrevs
973 mutable = _mutablerevs(repo)
972 mutable = _mutablerevs(repo)
974 obsolete = getrevs(repo, b'obsolete')
973 obsolete = getrevs(repo, b'obsolete')
975 others = mutable - obsolete
974 others = mutable - obsolete
976 unstable = set()
975 unstable = set()
977 for r in sorted(others):
976 for r in sorted(others):
978 # A rev is unstable if one of its parent is obsolete or unstable
977 # A rev is unstable if one of its parent is obsolete or unstable
979 # this works since we traverse following growing rev order
978 # this works since we traverse following growing rev order
980 for p in pfunc(r):
979 for p in pfunc(r):
981 if p in obsolete or p in unstable:
980 if p in obsolete or p in unstable:
982 unstable.add(r)
981 unstable.add(r)
983 break
982 break
984 return frozenset(unstable)
983 return frozenset(unstable)
985
984
986
985
987 @cachefor(b'suspended')
986 @cachefor(b'suspended')
988 def _computesuspendedset(repo):
987 def _computesuspendedset(repo):
989 """the set of obsolete parents with non obsolete descendants"""
988 """the set of obsolete parents with non obsolete descendants"""
990 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
989 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
991 return frozenset(r for r in getrevs(repo, b'obsolete') if r in suspended)
990 return frozenset(r for r in getrevs(repo, b'obsolete') if r in suspended)
992
991
993
992
994 @cachefor(b'extinct')
993 @cachefor(b'extinct')
995 def _computeextinctset(repo):
994 def _computeextinctset(repo):
996 """the set of obsolete parents without non obsolete descendants"""
995 """the set of obsolete parents without non obsolete descendants"""
997 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
996 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
998
997
999
998
1000 @cachefor(b'phasedivergent')
999 @cachefor(b'phasedivergent')
1001 def _computephasedivergentset(repo):
1000 def _computephasedivergentset(repo):
1002 """the set of revs trying to obsolete public revisions"""
1001 """the set of revs trying to obsolete public revisions"""
1003 bumped = set()
1002 bumped = set()
1004 # util function (avoid attribute lookup in the loop)
1003 # util function (avoid attribute lookup in the loop)
1005 phase = repo._phasecache.phase # would be faster to grab the full list
1004 phase = repo._phasecache.phase # would be faster to grab the full list
1006 public = phases.public
1005 public = phases.public
1007 cl = repo.changelog
1006 cl = repo.changelog
1008 torev = cl.index.get_rev
1007 torev = cl.index.get_rev
1009 tonode = cl.node
1008 tonode = cl.node
1010 obsstore = repo.obsstore
1009 obsstore = repo.obsstore
1011 candidates = sorted(_mutablerevs(repo) - getrevs(repo, b"obsolete"))
1010 candidates = sorted(_mutablerevs(repo) - getrevs(repo, b"obsolete"))
1012 for rev in candidates:
1011 for rev in candidates:
1013 # We only evaluate mutable, non-obsolete revision
1012 # We only evaluate mutable, non-obsolete revision
1014 node = tonode(rev)
1013 node = tonode(rev)
1015 # (future) A cache of predecessors may worth if split is very common
1014 # (future) A cache of predecessors may worth if split is very common
1016 for pnode in obsutil.allpredecessors(
1015 for pnode in obsutil.allpredecessors(
1017 obsstore, [node], ignoreflags=bumpedfix
1016 obsstore, [node], ignoreflags=bumpedfix
1018 ):
1017 ):
1019 prev = torev(pnode) # unfiltered! but so is phasecache
1018 prev = torev(pnode) # unfiltered! but so is phasecache
1020 if (prev is not None) and (phase(repo, prev) <= public):
1019 if (prev is not None) and (phase(repo, prev) <= public):
1021 # we have a public predecessor
1020 # we have a public predecessor
1022 bumped.add(rev)
1021 bumped.add(rev)
1023 break # Next draft!
1022 break # Next draft!
1024 return frozenset(bumped)
1023 return frozenset(bumped)
1025
1024
1026
1025
1027 @cachefor(b'contentdivergent')
1026 @cachefor(b'contentdivergent')
1028 def _computecontentdivergentset(repo):
1027 def _computecontentdivergentset(repo):
1029 """the set of rev that compete to be the final successors of some revision."""
1028 """the set of rev that compete to be the final successors of some revision."""
1030 divergent = set()
1029 divergent = set()
1031 obsstore = repo.obsstore
1030 obsstore = repo.obsstore
1032 newermap = {}
1031 newermap = {}
1033 tonode = repo.changelog.node
1032 tonode = repo.changelog.node
1034 candidates = sorted(_mutablerevs(repo) - getrevs(repo, b"obsolete"))
1033 candidates = sorted(_mutablerevs(repo) - getrevs(repo, b"obsolete"))
1035 for rev in candidates:
1034 for rev in candidates:
1036 node = tonode(rev)
1035 node = tonode(rev)
1037 mark = obsstore.predecessors.get(node, ())
1036 mark = obsstore.predecessors.get(node, ())
1038 toprocess = set(mark)
1037 toprocess = set(mark)
1039 seen = set()
1038 seen = set()
1040 while toprocess:
1039 while toprocess:
1041 prec = toprocess.pop()[0]
1040 prec = toprocess.pop()[0]
1042 if prec in seen:
1041 if prec in seen:
1043 continue # emergency cycle hanging prevention
1042 continue # emergency cycle hanging prevention
1044 seen.add(prec)
1043 seen.add(prec)
1045 if prec not in newermap:
1044 if prec not in newermap:
1046 obsutil.successorssets(repo, prec, cache=newermap)
1045 obsutil.successorssets(repo, prec, cache=newermap)
1047 newer = [n for n in newermap[prec] if n]
1046 newer = [n for n in newermap[prec] if n]
1048 if len(newer) > 1:
1047 if len(newer) > 1:
1049 divergent.add(rev)
1048 divergent.add(rev)
1050 break
1049 break
1051 toprocess.update(obsstore.predecessors.get(prec, ()))
1050 toprocess.update(obsstore.predecessors.get(prec, ()))
1052 return frozenset(divergent)
1051 return frozenset(divergent)
1053
1052
1054
1053
1055 def makefoldid(relation, user):
1054 def makefoldid(relation, user):
1056
1055
1057 folddigest = hashutil.sha1(user)
1056 folddigest = hashutil.sha1(user)
1058 for p in relation[0] + relation[1]:
1057 for p in relation[0] + relation[1]:
1059 folddigest.update(b'%d' % p.rev())
1058 folddigest.update(b'%d' % p.rev())
1060 folddigest.update(p.node())
1059 folddigest.update(p.node())
1061 # Since fold only has to compete against fold for the same successors, it
1060 # Since fold only has to compete against fold for the same successors, it
1062 # seems fine to use a small ID. Smaller ID save space.
1061 # seems fine to use a small ID. Smaller ID save space.
1063 return hex(folddigest.digest())[:8]
1062 return hex(folddigest.digest())[:8]
1064
1063
1065
1064
1066 def createmarkers(
1065 def createmarkers(
1067 repo, relations, flag=0, date=None, metadata=None, operation=None
1066 repo, relations, flag=0, date=None, metadata=None, operation=None
1068 ):
1067 ):
1069 """Add obsolete markers between changesets in a repo
1068 """Add obsolete markers between changesets in a repo
1070
1069
1071 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1070 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1072 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1071 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1073 containing metadata for this marker only. It is merged with the global
1072 containing metadata for this marker only. It is merged with the global
1074 metadata specified through the `metadata` argument of this function.
1073 metadata specified through the `metadata` argument of this function.
1075 Any string values in metadata must be UTF-8 bytes.
1074 Any string values in metadata must be UTF-8 bytes.
1076
1075
1077 Trying to obsolete a public changeset will raise an exception.
1076 Trying to obsolete a public changeset will raise an exception.
1078
1077
1079 Current user and date are used except if specified otherwise in the
1078 Current user and date are used except if specified otherwise in the
1080 metadata attribute.
1079 metadata attribute.
1081
1080
1082 This function operates within a transaction of its own, but does
1081 This function operates within a transaction of its own, but does
1083 not take any lock on the repo.
1082 not take any lock on the repo.
1084 """
1083 """
1085 # prepare metadata
1084 # prepare metadata
1086 if metadata is None:
1085 if metadata is None:
1087 metadata = {}
1086 metadata = {}
1088 if b'user' not in metadata:
1087 if b'user' not in metadata:
1089 luser = (
1088 luser = (
1090 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1089 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1091 )
1090 )
1092 metadata[b'user'] = encoding.fromlocal(luser)
1091 metadata[b'user'] = encoding.fromlocal(luser)
1093
1092
1094 # Operation metadata handling
1093 # Operation metadata handling
1095 useoperation = repo.ui.configbool(
1094 useoperation = repo.ui.configbool(
1096 b'experimental', b'evolution.track-operation'
1095 b'experimental', b'evolution.track-operation'
1097 )
1096 )
1098 if useoperation and operation:
1097 if useoperation and operation:
1099 metadata[b'operation'] = operation
1098 metadata[b'operation'] = operation
1100
1099
1101 # Effect flag metadata handling
1100 # Effect flag metadata handling
1102 saveeffectflag = repo.ui.configbool(
1101 saveeffectflag = repo.ui.configbool(
1103 b'experimental', b'evolution.effect-flags'
1102 b'experimental', b'evolution.effect-flags'
1104 )
1103 )
1105
1104
1106 with repo.transaction(b'add-obsolescence-marker') as tr:
1105 with repo.transaction(b'add-obsolescence-marker') as tr:
1107 markerargs = []
1106 markerargs = []
1108 for rel in relations:
1107 for rel in relations:
1109 predecessors = rel[0]
1108 predecessors = rel[0]
1110 if not isinstance(predecessors, tuple):
1109 if not isinstance(predecessors, tuple):
1111 # preserve compat with old API until all caller are migrated
1110 # preserve compat with old API until all caller are migrated
1112 predecessors = (predecessors,)
1111 predecessors = (predecessors,)
1113 if len(predecessors) > 1 and len(rel[1]) != 1:
1112 if len(predecessors) > 1 and len(rel[1]) != 1:
1114 msg = b'Fold markers can only have 1 successors, not %d'
1113 msg = b'Fold markers can only have 1 successors, not %d'
1115 raise error.ProgrammingError(msg % len(rel[1]))
1114 raise error.ProgrammingError(msg % len(rel[1]))
1116 foldid = None
1115 foldid = None
1117 foldsize = len(predecessors)
1116 foldsize = len(predecessors)
1118 if 1 < foldsize:
1117 if 1 < foldsize:
1119 foldid = makefoldid(rel, metadata[b'user'])
1118 foldid = makefoldid(rel, metadata[b'user'])
1120 for foldidx, prec in enumerate(predecessors, 1):
1119 for foldidx, prec in enumerate(predecessors, 1):
1121 sucs = rel[1]
1120 sucs = rel[1]
1122 localmetadata = metadata.copy()
1121 localmetadata = metadata.copy()
1123 if len(rel) > 2:
1122 if len(rel) > 2:
1124 localmetadata.update(rel[2])
1123 localmetadata.update(rel[2])
1125 if foldid is not None:
1124 if foldid is not None:
1126 localmetadata[b'fold-id'] = foldid
1125 localmetadata[b'fold-id'] = foldid
1127 localmetadata[b'fold-idx'] = b'%d' % foldidx
1126 localmetadata[b'fold-idx'] = b'%d' % foldidx
1128 localmetadata[b'fold-size'] = b'%d' % foldsize
1127 localmetadata[b'fold-size'] = b'%d' % foldsize
1129
1128
1130 if not prec.mutable():
1129 if not prec.mutable():
1131 raise error.Abort(
1130 raise error.Abort(
1132 _(b"cannot obsolete public changeset: %s") % prec,
1131 _(b"cannot obsolete public changeset: %s") % prec,
1133 hint=b"see 'hg help phases' for details",
1132 hint=b"see 'hg help phases' for details",
1134 )
1133 )
1135 nprec = prec.node()
1134 nprec = prec.node()
1136 nsucs = tuple(s.node() for s in sucs)
1135 nsucs = tuple(s.node() for s in sucs)
1137 npare = None
1136 npare = None
1138 if not nsucs:
1137 if not nsucs:
1139 npare = tuple(p.node() for p in prec.parents())
1138 npare = tuple(p.node() for p in prec.parents())
1140 if nprec in nsucs:
1139 if nprec in nsucs:
1141 raise error.Abort(
1140 raise error.Abort(
1142 _(b"changeset %s cannot obsolete itself") % prec
1141 _(b"changeset %s cannot obsolete itself") % prec
1143 )
1142 )
1144
1143
1145 # Effect flag can be different by relation
1144 # Effect flag can be different by relation
1146 if saveeffectflag:
1145 if saveeffectflag:
1147 # The effect flag is saved in a versioned field name for
1146 # The effect flag is saved in a versioned field name for
1148 # future evolution
1147 # future evolution
1149 effectflag = obsutil.geteffectflag(prec, sucs)
1148 effectflag = obsutil.geteffectflag(prec, sucs)
1150 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1149 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1151
1150
1152 # Creating the marker causes the hidden cache to become
1151 # Creating the marker causes the hidden cache to become
1153 # invalid, which causes recomputation when we ask for
1152 # invalid, which causes recomputation when we ask for
1154 # prec.parents() above. Resulting in n^2 behavior. So let's
1153 # prec.parents() above. Resulting in n^2 behavior. So let's
1155 # prepare all of the args first, then create the markers.
1154 # prepare all of the args first, then create the markers.
1156 markerargs.append((nprec, nsucs, npare, localmetadata))
1155 markerargs.append((nprec, nsucs, npare, localmetadata))
1157
1156
1158 for args in markerargs:
1157 for args in markerargs:
1159 nprec, nsucs, npare, localmetadata = args
1158 nprec, nsucs, npare, localmetadata = args
1160 repo.obsstore.create(
1159 repo.obsstore.create(
1161 tr,
1160 tr,
1162 nprec,
1161 nprec,
1163 nsucs,
1162 nsucs,
1164 flag,
1163 flag,
1165 parents=npare,
1164 parents=npare,
1166 date=date,
1165 date=date,
1167 metadata=localmetadata,
1166 metadata=localmetadata,
1168 ui=repo.ui,
1167 ui=repo.ui,
1169 )
1168 )
1170 repo.filteredrevcache.clear()
1169 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now