##// END OF EJS Templates
obsstore: refactor v1 logic to fix 32 byte hash support...
Joerg Sonnenberger -
r46035:145cfe84 default
parent child Browse files
Show More
@@ -1,1146 +1,1142 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from .pycompat import getattr
76 from .pycompat import getattr
77 from . import (
77 from . import (
78 encoding,
78 encoding,
79 error,
79 error,
80 node,
80 node,
81 obsutil,
81 obsutil,
82 phases,
82 phases,
83 policy,
83 policy,
84 pycompat,
84 pycompat,
85 util,
85 util,
86 )
86 )
87 from .utils import (
87 from .utils import (
88 dateutil,
88 dateutil,
89 hashutil,
89 hashutil,
90 )
90 )
91
91
92 parsers = policy.importmod('parsers')
92 parsers = policy.importmod('parsers')
93
93
94 _pack = struct.pack
94 _pack = struct.pack
95 _unpack = struct.unpack
95 _unpack = struct.unpack
96 _calcsize = struct.calcsize
96 _calcsize = struct.calcsize
97 propertycache = util.propertycache
97 propertycache = util.propertycache
98
98
99 # Options for obsolescence
99 # Options for obsolescence
100 createmarkersopt = b'createmarkers'
100 createmarkersopt = b'createmarkers'
101 allowunstableopt = b'allowunstable'
101 allowunstableopt = b'allowunstable'
102 exchangeopt = b'exchange'
102 exchangeopt = b'exchange'
103
103
104
104
105 def _getoptionvalue(repo, option):
105 def _getoptionvalue(repo, option):
106 """Returns True if the given repository has the given obsolete option
106 """Returns True if the given repository has the given obsolete option
107 enabled.
107 enabled.
108 """
108 """
109 configkey = b'evolution.%s' % option
109 configkey = b'evolution.%s' % option
110 newconfig = repo.ui.configbool(b'experimental', configkey)
110 newconfig = repo.ui.configbool(b'experimental', configkey)
111
111
112 # Return the value only if defined
112 # Return the value only if defined
113 if newconfig is not None:
113 if newconfig is not None:
114 return newconfig
114 return newconfig
115
115
116 # Fallback on generic option
116 # Fallback on generic option
117 try:
117 try:
118 return repo.ui.configbool(b'experimental', b'evolution')
118 return repo.ui.configbool(b'experimental', b'evolution')
119 except (error.ConfigError, AttributeError):
119 except (error.ConfigError, AttributeError):
120 # Fallback on old-fashion config
120 # Fallback on old-fashion config
121 # inconsistent config: experimental.evolution
121 # inconsistent config: experimental.evolution
122 result = set(repo.ui.configlist(b'experimental', b'evolution'))
122 result = set(repo.ui.configlist(b'experimental', b'evolution'))
123
123
124 if b'all' in result:
124 if b'all' in result:
125 return True
125 return True
126
126
127 # Temporary hack for next check
127 # Temporary hack for next check
128 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
128 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
129 if newconfig:
129 if newconfig:
130 result.add(b'createmarkers')
130 result.add(b'createmarkers')
131
131
132 return option in result
132 return option in result
133
133
134
134
135 def getoptions(repo):
135 def getoptions(repo):
136 """Returns dicts showing state of obsolescence features."""
136 """Returns dicts showing state of obsolescence features."""
137
137
138 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
138 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
139 unstablevalue = _getoptionvalue(repo, allowunstableopt)
139 unstablevalue = _getoptionvalue(repo, allowunstableopt)
140 exchangevalue = _getoptionvalue(repo, exchangeopt)
140 exchangevalue = _getoptionvalue(repo, exchangeopt)
141
141
142 # createmarkers must be enabled if other options are enabled
142 # createmarkers must be enabled if other options are enabled
143 if (unstablevalue or exchangevalue) and not createmarkersvalue:
143 if (unstablevalue or exchangevalue) and not createmarkersvalue:
144 raise error.Abort(
144 raise error.Abort(
145 _(
145 _(
146 b"'createmarkers' obsolete option must be enabled "
146 b"'createmarkers' obsolete option must be enabled "
147 b"if other obsolete options are enabled"
147 b"if other obsolete options are enabled"
148 )
148 )
149 )
149 )
150
150
151 return {
151 return {
152 createmarkersopt: createmarkersvalue,
152 createmarkersopt: createmarkersvalue,
153 allowunstableopt: unstablevalue,
153 allowunstableopt: unstablevalue,
154 exchangeopt: exchangevalue,
154 exchangeopt: exchangevalue,
155 }
155 }
156
156
157
157
158 def isenabled(repo, option):
158 def isenabled(repo, option):
159 """Returns True if the given repository has the given obsolete option
159 """Returns True if the given repository has the given obsolete option
160 enabled.
160 enabled.
161 """
161 """
162 return getoptions(repo)[option]
162 return getoptions(repo)[option]
163
163
164
164
165 # Creating aliases for marker flags because evolve extension looks for
165 # Creating aliases for marker flags because evolve extension looks for
166 # bumpedfix in obsolete.py
166 # bumpedfix in obsolete.py
167 bumpedfix = obsutil.bumpedfix
167 bumpedfix = obsutil.bumpedfix
168 usingsha256 = obsutil.usingsha256
168 usingsha256 = obsutil.usingsha256
169
169
170 ## Parsing and writing of version "0"
170 ## Parsing and writing of version "0"
171 #
171 #
172 # The header is followed by the markers. Each marker is made of:
172 # The header is followed by the markers. Each marker is made of:
173 #
173 #
174 # - 1 uint8 : number of new changesets "N", can be zero.
174 # - 1 uint8 : number of new changesets "N", can be zero.
175 #
175 #
176 # - 1 uint32: metadata size "M" in bytes.
176 # - 1 uint32: metadata size "M" in bytes.
177 #
177 #
178 # - 1 byte: a bit field. It is reserved for flags used in common
178 # - 1 byte: a bit field. It is reserved for flags used in common
179 # obsolete marker operations, to avoid repeated decoding of metadata
179 # obsolete marker operations, to avoid repeated decoding of metadata
180 # entries.
180 # entries.
181 #
181 #
182 # - 20 bytes: obsoleted changeset identifier.
182 # - 20 bytes: obsoleted changeset identifier.
183 #
183 #
184 # - N*20 bytes: new changesets identifiers.
184 # - N*20 bytes: new changesets identifiers.
185 #
185 #
186 # - M bytes: metadata as a sequence of nul-terminated strings. Each
186 # - M bytes: metadata as a sequence of nul-terminated strings. Each
187 # string contains a key and a value, separated by a colon ':', without
187 # string contains a key and a value, separated by a colon ':', without
188 # additional encoding. Keys cannot contain '\0' or ':' and values
188 # additional encoding. Keys cannot contain '\0' or ':' and values
189 # cannot contain '\0'.
189 # cannot contain '\0'.
190 _fm0version = 0
190 _fm0version = 0
191 _fm0fixed = b'>BIB20s'
191 _fm0fixed = b'>BIB20s'
192 _fm0node = b'20s'
192 _fm0node = b'20s'
193 _fm0fsize = _calcsize(_fm0fixed)
193 _fm0fsize = _calcsize(_fm0fixed)
194 _fm0fnodesize = _calcsize(_fm0node)
194 _fm0fnodesize = _calcsize(_fm0node)
195
195
196
196
197 def _fm0readmarkers(data, off, stop):
197 def _fm0readmarkers(data, off, stop):
198 # Loop on markers
198 # Loop on markers
199 while off < stop:
199 while off < stop:
200 # read fixed part
200 # read fixed part
201 cur = data[off : off + _fm0fsize]
201 cur = data[off : off + _fm0fsize]
202 off += _fm0fsize
202 off += _fm0fsize
203 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
203 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
204 # read replacement
204 # read replacement
205 sucs = ()
205 sucs = ()
206 if numsuc:
206 if numsuc:
207 s = _fm0fnodesize * numsuc
207 s = _fm0fnodesize * numsuc
208 cur = data[off : off + s]
208 cur = data[off : off + s]
209 sucs = _unpack(_fm0node * numsuc, cur)
209 sucs = _unpack(_fm0node * numsuc, cur)
210 off += s
210 off += s
211 # read metadata
211 # read metadata
212 # (metadata will be decoded on demand)
212 # (metadata will be decoded on demand)
213 metadata = data[off : off + mdsize]
213 metadata = data[off : off + mdsize]
214 if len(metadata) != mdsize:
214 if len(metadata) != mdsize:
215 raise error.Abort(
215 raise error.Abort(
216 _(
216 _(
217 b'parsing obsolete marker: metadata is too '
217 b'parsing obsolete marker: metadata is too '
218 b'short, %d bytes expected, got %d'
218 b'short, %d bytes expected, got %d'
219 )
219 )
220 % (mdsize, len(metadata))
220 % (mdsize, len(metadata))
221 )
221 )
222 off += mdsize
222 off += mdsize
223 metadata = _fm0decodemeta(metadata)
223 metadata = _fm0decodemeta(metadata)
224 try:
224 try:
225 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
225 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
226 date = float(when), int(offset)
226 date = float(when), int(offset)
227 except ValueError:
227 except ValueError:
228 date = (0.0, 0)
228 date = (0.0, 0)
229 parents = None
229 parents = None
230 if b'p2' in metadata:
230 if b'p2' in metadata:
231 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
231 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
232 elif b'p1' in metadata:
232 elif b'p1' in metadata:
233 parents = (metadata.pop(b'p1', None),)
233 parents = (metadata.pop(b'p1', None),)
234 elif b'p0' in metadata:
234 elif b'p0' in metadata:
235 parents = ()
235 parents = ()
236 if parents is not None:
236 if parents is not None:
237 try:
237 try:
238 parents = tuple(node.bin(p) for p in parents)
238 parents = tuple(node.bin(p) for p in parents)
239 # if parent content is not a nodeid, drop the data
239 # if parent content is not a nodeid, drop the data
240 for p in parents:
240 for p in parents:
241 if len(p) != 20:
241 if len(p) != 20:
242 parents = None
242 parents = None
243 break
243 break
244 except TypeError:
244 except TypeError:
245 # if content cannot be translated to nodeid drop the data.
245 # if content cannot be translated to nodeid drop the data.
246 parents = None
246 parents = None
247
247
248 metadata = tuple(sorted(pycompat.iteritems(metadata)))
248 metadata = tuple(sorted(pycompat.iteritems(metadata)))
249
249
250 yield (pre, sucs, flags, metadata, date, parents)
250 yield (pre, sucs, flags, metadata, date, parents)
251
251
252
252
253 def _fm0encodeonemarker(marker):
253 def _fm0encodeonemarker(marker):
254 pre, sucs, flags, metadata, date, parents = marker
254 pre, sucs, flags, metadata, date, parents = marker
255 if flags & usingsha256:
255 if flags & usingsha256:
256 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
256 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
257 metadata = dict(metadata)
257 metadata = dict(metadata)
258 time, tz = date
258 time, tz = date
259 metadata[b'date'] = b'%r %i' % (time, tz)
259 metadata[b'date'] = b'%r %i' % (time, tz)
260 if parents is not None:
260 if parents is not None:
261 if not parents:
261 if not parents:
262 # mark that we explicitly recorded no parents
262 # mark that we explicitly recorded no parents
263 metadata[b'p0'] = b''
263 metadata[b'p0'] = b''
264 for i, p in enumerate(parents, 1):
264 for i, p in enumerate(parents, 1):
265 metadata[b'p%i' % i] = node.hex(p)
265 metadata[b'p%i' % i] = node.hex(p)
266 metadata = _fm0encodemeta(metadata)
266 metadata = _fm0encodemeta(metadata)
267 numsuc = len(sucs)
267 numsuc = len(sucs)
268 format = _fm0fixed + (_fm0node * numsuc)
268 format = _fm0fixed + (_fm0node * numsuc)
269 data = [numsuc, len(metadata), flags, pre]
269 data = [numsuc, len(metadata), flags, pre]
270 data.extend(sucs)
270 data.extend(sucs)
271 return _pack(format, *data) + metadata
271 return _pack(format, *data) + metadata
272
272
273
273
274 def _fm0encodemeta(meta):
274 def _fm0encodemeta(meta):
275 """Return encoded metadata string to string mapping.
275 """Return encoded metadata string to string mapping.
276
276
277 Assume no ':' in key and no '\0' in both key and value."""
277 Assume no ':' in key and no '\0' in both key and value."""
278 for key, value in pycompat.iteritems(meta):
278 for key, value in pycompat.iteritems(meta):
279 if b':' in key or b'\0' in key:
279 if b':' in key or b'\0' in key:
280 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
280 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
281 if b'\0' in value:
281 if b'\0' in value:
282 raise ValueError(b"':' is forbidden in metadata value'")
282 raise ValueError(b"':' is forbidden in metadata value'")
283 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
283 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
284
284
285
285
286 def _fm0decodemeta(data):
286 def _fm0decodemeta(data):
287 """Return string to string dictionary from encoded version."""
287 """Return string to string dictionary from encoded version."""
288 d = {}
288 d = {}
289 for l in data.split(b'\0'):
289 for l in data.split(b'\0'):
290 if l:
290 if l:
291 key, value = l.split(b':', 1)
291 key, value = l.split(b':', 1)
292 d[key] = value
292 d[key] = value
293 return d
293 return d
294
294
295
295
296 ## Parsing and writing of version "1"
296 ## Parsing and writing of version "1"
297 #
297 #
298 # The header is followed by the markers. Each marker is made of:
298 # The header is followed by the markers. Each marker is made of:
299 #
299 #
300 # - uint32: total size of the marker (including this field)
300 # - uint32: total size of the marker (including this field)
301 #
301 #
302 # - float64: date in seconds since epoch
302 # - float64: date in seconds since epoch
303 #
303 #
304 # - int16: timezone offset in minutes
304 # - int16: timezone offset in minutes
305 #
305 #
306 # - uint16: a bit field. It is reserved for flags used in common
306 # - uint16: a bit field. It is reserved for flags used in common
307 # obsolete marker operations, to avoid repeated decoding of metadata
307 # obsolete marker operations, to avoid repeated decoding of metadata
308 # entries.
308 # entries.
309 #
309 #
310 # - uint8: number of successors "N", can be zero.
310 # - uint8: number of successors "N", can be zero.
311 #
311 #
312 # - uint8: number of parents "P", can be zero.
312 # - uint8: number of parents "P", can be zero.
313 #
313 #
314 # 0: parents data stored but no parent,
314 # 0: parents data stored but no parent,
315 # 1: one parent stored,
315 # 1: one parent stored,
316 # 2: two parents stored,
316 # 2: two parents stored,
317 # 3: no parent data stored
317 # 3: no parent data stored
318 #
318 #
319 # - uint8: number of metadata entries M
319 # - uint8: number of metadata entries M
320 #
320 #
321 # - 20 or 32 bytes: predecessor changeset identifier.
321 # - 20 or 32 bytes: predecessor changeset identifier.
322 #
322 #
323 # - N*(20 or 32) bytes: successors changesets identifiers.
323 # - N*(20 or 32) bytes: successors changesets identifiers.
324 #
324 #
325 # - P*(20 or 32) bytes: parents of the predecessors changesets.
325 # - P*(20 or 32) bytes: parents of the predecessors changesets.
326 #
326 #
327 # - M*(uint8, uint8): size of all metadata entries (key and value)
327 # - M*(uint8, uint8): size of all metadata entries (key and value)
328 #
328 #
329 # - remaining bytes: the metadata, each (key, value) pair after the other.
329 # - remaining bytes: the metadata, each (key, value) pair after the other.
330 _fm1version = 1
330 _fm1version = 1
331 _fm1fixed = b'>IdhHBBB20s'
331 _fm1fixed = b'>IdhHBBB'
332 _fm1nodesha1 = b'20s'
332 _fm1nodesha1 = b'20s'
333 _fm1nodesha256 = b'32s'
333 _fm1nodesha256 = b'32s'
334 _fm1nodesha1size = _calcsize(_fm1nodesha1)
334 _fm1nodesha1size = _calcsize(_fm1nodesha1)
335 _fm1nodesha256size = _calcsize(_fm1nodesha256)
335 _fm1nodesha256size = _calcsize(_fm1nodesha256)
336 _fm1fsize = _calcsize(_fm1fixed)
336 _fm1fsize = _calcsize(_fm1fixed)
337 _fm1parentnone = 3
337 _fm1parentnone = 3
338 _fm1parentshift = 14
338 _fm1parentshift = 14
339 _fm1parentmask = _fm1parentnone << _fm1parentshift
339 _fm1parentmask = _fm1parentnone << _fm1parentshift
340 _fm1metapair = b'BB'
340 _fm1metapair = b'BB'
341 _fm1metapairsize = _calcsize(_fm1metapair)
341 _fm1metapairsize = _calcsize(_fm1metapair)
342
342
343
343
344 def _fm1purereadmarkers(data, off, stop):
344 def _fm1purereadmarkers(data, off, stop):
345 # make some global constants local for performance
345 # make some global constants local for performance
346 noneflag = _fm1parentnone
346 noneflag = _fm1parentnone
347 sha2flag = usingsha256
347 sha2flag = usingsha256
348 sha1size = _fm1nodesha1size
348 sha1size = _fm1nodesha1size
349 sha2size = _fm1nodesha256size
349 sha2size = _fm1nodesha256size
350 sha1fmt = _fm1nodesha1
350 sha1fmt = _fm1nodesha1
351 sha2fmt = _fm1nodesha256
351 sha2fmt = _fm1nodesha256
352 metasize = _fm1metapairsize
352 metasize = _fm1metapairsize
353 metafmt = _fm1metapair
353 metafmt = _fm1metapair
354 fsize = _fm1fsize
354 fsize = _fm1fsize
355 unpack = _unpack
355 unpack = _unpack
356
356
357 # Loop on markers
357 # Loop on markers
358 ufixed = struct.Struct(_fm1fixed).unpack
358 ufixed = struct.Struct(_fm1fixed).unpack
359
359
360 while off < stop:
360 while off < stop:
361 # read fixed part
361 # read fixed part
362 o1 = off + fsize
362 o1 = off + fsize
363 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
363 t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1])
364
364
365 if flags & sha2flag:
365 if flags & sha2flag:
366 # FIXME: prec was read as a SHA1, needs to be amended
366 nodefmt = sha2fmt
367 nodesize = sha2size
368 else:
369 nodefmt = sha1fmt
370 nodesize = sha1size
371
372 (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize])
373 o1 += nodesize
367
374
368 # read 0 or more successors
375 # read 0 or more successors
369 if numsuc == 1:
376 if numsuc == 1:
370 o2 = o1 + sha2size
377 o2 = o1 + nodesize
371 sucs = (data[o1:o2],)
378 sucs = (data[o1:o2],)
372 else:
379 else:
373 o2 = o1 + sha2size * numsuc
380 o2 = o1 + nodesize * numsuc
374 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
381 sucs = unpack(nodefmt * numsuc, data[o1:o2])
375
382
376 # read parents
383 # read parents
377 if numpar == noneflag:
384 if numpar == noneflag:
378 o3 = o2
385 o3 = o2
379 parents = None
386 parents = None
380 elif numpar == 1:
387 elif numpar == 1:
381 o3 = o2 + sha2size
388 o3 = o2 + nodesize
382 parents = (data[o2:o3],)
389 parents = (data[o2:o3],)
383 else:
390 else:
384 o3 = o2 + sha2size * numpar
391 o3 = o2 + nodesize * numpar
385 parents = unpack(sha2fmt * numpar, data[o2:o3])
392 parents = unpack(nodefmt * numpar, data[o2:o3])
386 else:
387 # read 0 or more successors
388 if numsuc == 1:
389 o2 = o1 + sha1size
390 sucs = (data[o1:o2],)
391 else:
392 o2 = o1 + sha1size * numsuc
393 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
394
395 # read parents
396 if numpar == noneflag:
397 o3 = o2
398 parents = None
399 elif numpar == 1:
400 o3 = o2 + sha1size
401 parents = (data[o2:o3],)
402 else:
403 o3 = o2 + sha1size * numpar
404 parents = unpack(sha1fmt * numpar, data[o2:o3])
405
393
406 # read metadata
394 # read metadata
407 off = o3 + metasize * nummeta
395 off = o3 + metasize * nummeta
408 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
396 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
409 metadata = []
397 metadata = []
410 for idx in pycompat.xrange(0, len(metapairsize), 2):
398 for idx in pycompat.xrange(0, len(metapairsize), 2):
411 o1 = off + metapairsize[idx]
399 o1 = off + metapairsize[idx]
412 o2 = o1 + metapairsize[idx + 1]
400 o2 = o1 + metapairsize[idx + 1]
413 metadata.append((data[off:o1], data[o1:o2]))
401 metadata.append((data[off:o1], data[o1:o2]))
414 off = o2
402 off = o2
415
403
416 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
404 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
417
405
418
406
419 def _fm1encodeonemarker(marker):
407 def _fm1encodeonemarker(marker):
420 pre, sucs, flags, metadata, date, parents = marker
408 pre, sucs, flags, metadata, date, parents = marker
421 # determine node size
409 # determine node size
422 _fm1node = _fm1nodesha1
410 _fm1node = _fm1nodesha1
423 if flags & usingsha256:
411 if flags & usingsha256:
424 _fm1node = _fm1nodesha256
412 _fm1node = _fm1nodesha256
425 numsuc = len(sucs)
413 numsuc = len(sucs)
426 numextranodes = numsuc
414 numextranodes = 1 + numsuc
427 if parents is None:
415 if parents is None:
428 numpar = _fm1parentnone
416 numpar = _fm1parentnone
429 else:
417 else:
430 numpar = len(parents)
418 numpar = len(parents)
431 numextranodes += numpar
419 numextranodes += numpar
432 formatnodes = _fm1node * numextranodes
420 formatnodes = _fm1node * numextranodes
433 formatmeta = _fm1metapair * len(metadata)
421 formatmeta = _fm1metapair * len(metadata)
434 format = _fm1fixed + formatnodes + formatmeta
422 format = _fm1fixed + formatnodes + formatmeta
435 # tz is stored in minutes so we divide by 60
423 # tz is stored in minutes so we divide by 60
436 tz = date[1] // 60
424 tz = date[1] // 60
437 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
425 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
438 data.extend(sucs)
426 data.extend(sucs)
439 if parents is not None:
427 if parents is not None:
440 data.extend(parents)
428 data.extend(parents)
441 totalsize = _calcsize(format)
429 totalsize = _calcsize(format)
442 for key, value in metadata:
430 for key, value in metadata:
443 lk = len(key)
431 lk = len(key)
444 lv = len(value)
432 lv = len(value)
445 if lk > 255:
433 if lk > 255:
446 msg = (
434 msg = (
447 b'obsstore metadata key cannot be longer than 255 bytes'
435 b'obsstore metadata key cannot be longer than 255 bytes'
448 b' (key "%s" is %u bytes)'
436 b' (key "%s" is %u bytes)'
449 ) % (key, lk)
437 ) % (key, lk)
450 raise error.ProgrammingError(msg)
438 raise error.ProgrammingError(msg)
451 if lv > 255:
439 if lv > 255:
452 msg = (
440 msg = (
453 b'obsstore metadata value cannot be longer than 255 bytes'
441 b'obsstore metadata value cannot be longer than 255 bytes'
454 b' (value "%s" for key "%s" is %u bytes)'
442 b' (value "%s" for key "%s" is %u bytes)'
455 ) % (value, key, lv)
443 ) % (value, key, lv)
456 raise error.ProgrammingError(msg)
444 raise error.ProgrammingError(msg)
457 data.append(lk)
445 data.append(lk)
458 data.append(lv)
446 data.append(lv)
459 totalsize += lk + lv
447 totalsize += lk + lv
460 data[0] = totalsize
448 data[0] = totalsize
461 data = [_pack(format, *data)]
449 data = [_pack(format, *data)]
462 for key, value in metadata:
450 for key, value in metadata:
463 data.append(key)
451 data.append(key)
464 data.append(value)
452 data.append(value)
465 return b''.join(data)
453 return b''.join(data)
466
454
467
455
468 def _fm1readmarkers(data, off, stop):
456 def _fm1readmarkers(data, off, stop):
469 native = getattr(parsers, 'fm1readmarkers', None)
457 native = getattr(parsers, 'fm1readmarkers', None)
470 if not native:
458 if not native:
471 return _fm1purereadmarkers(data, off, stop)
459 return _fm1purereadmarkers(data, off, stop)
472 return native(data, off, stop)
460 return native(data, off, stop)
473
461
474
462
475 # mapping to read/write various marker formats
463 # mapping to read/write various marker formats
476 # <version> -> (decoder, encoder)
464 # <version> -> (decoder, encoder)
477 formats = {
465 formats = {
478 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
466 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
479 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
467 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
480 }
468 }
481
469
482
470
483 def _readmarkerversion(data):
471 def _readmarkerversion(data):
484 return _unpack(b'>B', data[0:1])[0]
472 return _unpack(b'>B', data[0:1])[0]
485
473
486
474
487 @util.nogc
475 @util.nogc
488 def _readmarkers(data, off=None, stop=None):
476 def _readmarkers(data, off=None, stop=None):
489 """Read and enumerate markers from raw data"""
477 """Read and enumerate markers from raw data"""
490 diskversion = _readmarkerversion(data)
478 diskversion = _readmarkerversion(data)
491 if not off:
479 if not off:
492 off = 1 # skip 1 byte version number
480 off = 1 # skip 1 byte version number
493 if stop is None:
481 if stop is None:
494 stop = len(data)
482 stop = len(data)
495 if diskversion not in formats:
483 if diskversion not in formats:
496 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
484 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
497 raise error.UnknownVersion(msg, version=diskversion)
485 raise error.UnknownVersion(msg, version=diskversion)
498 return diskversion, formats[diskversion][0](data, off, stop)
486 return diskversion, formats[diskversion][0](data, off, stop)
499
487
500
488
501 def encodeheader(version=_fm0version):
489 def encodeheader(version=_fm0version):
502 return _pack(b'>B', version)
490 return _pack(b'>B', version)
503
491
504
492
505 def encodemarkers(markers, addheader=False, version=_fm0version):
493 def encodemarkers(markers, addheader=False, version=_fm0version):
506 # Kept separate from flushmarkers(), it will be reused for
494 # Kept separate from flushmarkers(), it will be reused for
507 # markers exchange.
495 # markers exchange.
508 encodeone = formats[version][1]
496 encodeone = formats[version][1]
509 if addheader:
497 if addheader:
510 yield encodeheader(version)
498 yield encodeheader(version)
511 for marker in markers:
499 for marker in markers:
512 yield encodeone(marker)
500 yield encodeone(marker)
513
501
514
502
515 @util.nogc
503 @util.nogc
516 def _addsuccessors(successors, markers):
504 def _addsuccessors(successors, markers):
517 for mark in markers:
505 for mark in markers:
518 successors.setdefault(mark[0], set()).add(mark)
506 successors.setdefault(mark[0], set()).add(mark)
519
507
520
508
521 @util.nogc
509 @util.nogc
522 def _addpredecessors(predecessors, markers):
510 def _addpredecessors(predecessors, markers):
523 for mark in markers:
511 for mark in markers:
524 for suc in mark[1]:
512 for suc in mark[1]:
525 predecessors.setdefault(suc, set()).add(mark)
513 predecessors.setdefault(suc, set()).add(mark)
526
514
527
515
528 @util.nogc
516 @util.nogc
529 def _addchildren(children, markers):
517 def _addchildren(children, markers):
530 for mark in markers:
518 for mark in markers:
531 parents = mark[5]
519 parents = mark[5]
532 if parents is not None:
520 if parents is not None:
533 for p in parents:
521 for p in parents:
534 children.setdefault(p, set()).add(mark)
522 children.setdefault(p, set()).add(mark)
535
523
536
524
537 def _checkinvalidmarkers(markers):
525 def _checkinvalidmarkers(markers):
538 """search for marker with invalid data and raise error if needed
526 """search for marker with invalid data and raise error if needed
539
527
540 Exist as a separated function to allow the evolve extension for a more
528 Exist as a separated function to allow the evolve extension for a more
541 subtle handling.
529 subtle handling.
542 """
530 """
543 for mark in markers:
531 for mark in markers:
544 if node.nullid in mark[1]:
532 if node.nullid in mark[1]:
545 raise error.Abort(
533 raise error.Abort(
546 _(
534 _(
547 b'bad obsolescence marker detected: '
535 b'bad obsolescence marker detected: '
548 b'invalid successors nullid'
536 b'invalid successors nullid'
549 )
537 )
550 )
538 )
551
539
552
540
553 class obsstore(object):
541 class obsstore(object):
554 """Store obsolete markers
542 """Store obsolete markers
555
543
556 Markers can be accessed with two mappings:
544 Markers can be accessed with two mappings:
557 - predecessors[x] -> set(markers on predecessors edges of x)
545 - predecessors[x] -> set(markers on predecessors edges of x)
558 - successors[x] -> set(markers on successors edges of x)
546 - successors[x] -> set(markers on successors edges of x)
559 - children[x] -> set(markers on predecessors edges of children(x)
547 - children[x] -> set(markers on predecessors edges of children(x)
560 """
548 """
561
549
562 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
550 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
563 # prec: nodeid, predecessors changesets
551 # prec: nodeid, predecessors changesets
564 # succs: tuple of nodeid, successor changesets (0-N length)
552 # succs: tuple of nodeid, successor changesets (0-N length)
565 # flag: integer, flag field carrying modifier for the markers (see doc)
553 # flag: integer, flag field carrying modifier for the markers (see doc)
566 # meta: binary blob in UTF-8, encoded metadata dictionary
554 # meta: binary blob in UTF-8, encoded metadata dictionary
567 # date: (float, int) tuple, date of marker creation
555 # date: (float, int) tuple, date of marker creation
568 # parents: (tuple of nodeid) or None, parents of predecessors
556 # parents: (tuple of nodeid) or None, parents of predecessors
569 # None is used when no data has been recorded
557 # None is used when no data has been recorded
570
558
571 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
559 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
572 # caches for various obsolescence related cache
560 # caches for various obsolescence related cache
573 self.caches = {}
561 self.caches = {}
574 self.svfs = svfs
562 self.svfs = svfs
575 self._defaultformat = defaultformat
563 self._defaultformat = defaultformat
576 self._readonly = readonly
564 self._readonly = readonly
577
565
578 def __iter__(self):
566 def __iter__(self):
579 return iter(self._all)
567 return iter(self._all)
580
568
581 def __len__(self):
569 def __len__(self):
582 return len(self._all)
570 return len(self._all)
583
571
584 def __nonzero__(self):
572 def __nonzero__(self):
585 if not self._cached('_all'):
573 if not self._cached('_all'):
586 try:
574 try:
587 return self.svfs.stat(b'obsstore').st_size > 1
575 return self.svfs.stat(b'obsstore').st_size > 1
588 except OSError as inst:
576 except OSError as inst:
589 if inst.errno != errno.ENOENT:
577 if inst.errno != errno.ENOENT:
590 raise
578 raise
591 # just build an empty _all list if no obsstore exists, which
579 # just build an empty _all list if no obsstore exists, which
592 # avoids further stat() syscalls
580 # avoids further stat() syscalls
593 return bool(self._all)
581 return bool(self._all)
594
582
595 __bool__ = __nonzero__
583 __bool__ = __nonzero__
596
584
597 @property
585 @property
598 def readonly(self):
586 def readonly(self):
599 """True if marker creation is disabled
587 """True if marker creation is disabled
600
588
601 Remove me in the future when obsolete marker is always on."""
589 Remove me in the future when obsolete marker is always on."""
602 return self._readonly
590 return self._readonly
603
591
604 def create(
592 def create(
605 self,
593 self,
606 transaction,
594 transaction,
607 prec,
595 prec,
608 succs=(),
596 succs=(),
609 flag=0,
597 flag=0,
610 parents=None,
598 parents=None,
611 date=None,
599 date=None,
612 metadata=None,
600 metadata=None,
613 ui=None,
601 ui=None,
614 ):
602 ):
615 """obsolete: add a new obsolete marker
603 """obsolete: add a new obsolete marker
616
604
617 * ensuring it is hashable
605 * ensuring it is hashable
618 * check mandatory metadata
606 * check mandatory metadata
619 * encode metadata
607 * encode metadata
620
608
621 If you are a human writing code creating marker you want to use the
609 If you are a human writing code creating marker you want to use the
622 `createmarkers` function in this module instead.
610 `createmarkers` function in this module instead.
623
611
624 return True if a new marker have been added, False if the markers
612 return True if a new marker have been added, False if the markers
625 already existed (no op).
613 already existed (no op).
626 """
614 """
615 flag = int(flag)
627 if metadata is None:
616 if metadata is None:
628 metadata = {}
617 metadata = {}
629 if date is None:
618 if date is None:
630 if b'date' in metadata:
619 if b'date' in metadata:
631 # as a courtesy for out-of-tree extensions
620 # as a courtesy for out-of-tree extensions
632 date = dateutil.parsedate(metadata.pop(b'date'))
621 date = dateutil.parsedate(metadata.pop(b'date'))
633 elif ui is not None:
622 elif ui is not None:
634 date = ui.configdate(b'devel', b'default-date')
623 date = ui.configdate(b'devel', b'default-date')
635 if date is None:
624 if date is None:
636 date = dateutil.makedate()
625 date = dateutil.makedate()
637 else:
626 else:
638 date = dateutil.makedate()
627 date = dateutil.makedate()
628 if flag & usingsha256:
629 if len(prec) != 32:
630 raise ValueError(prec)
631 for succ in succs:
632 if len(succ) != 32:
633 raise ValueError(succ)
634 else:
639 if len(prec) != 20:
635 if len(prec) != 20:
640 raise ValueError(prec)
636 raise ValueError(prec)
641 for succ in succs:
637 for succ in succs:
642 if len(succ) != 20:
638 if len(succ) != 20:
643 raise ValueError(succ)
639 raise ValueError(succ)
644 if prec in succs:
640 if prec in succs:
645 raise ValueError(
641 raise ValueError(
646 'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec))
642 'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec))
647 )
643 )
648
644
649 metadata = tuple(sorted(pycompat.iteritems(metadata)))
645 metadata = tuple(sorted(pycompat.iteritems(metadata)))
650 for k, v in metadata:
646 for k, v in metadata:
651 try:
647 try:
652 # might be better to reject non-ASCII keys
648 # might be better to reject non-ASCII keys
653 k.decode('utf-8')
649 k.decode('utf-8')
654 v.decode('utf-8')
650 v.decode('utf-8')
655 except UnicodeDecodeError:
651 except UnicodeDecodeError:
656 raise error.ProgrammingError(
652 raise error.ProgrammingError(
657 b'obsstore metadata must be valid UTF-8 sequence '
653 b'obsstore metadata must be valid UTF-8 sequence '
658 b'(key = %r, value = %r)'
654 b'(key = %r, value = %r)'
659 % (pycompat.bytestr(k), pycompat.bytestr(v))
655 % (pycompat.bytestr(k), pycompat.bytestr(v))
660 )
656 )
661
657
662 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
658 marker = (bytes(prec), tuple(succs), flag, metadata, date, parents)
663 return bool(self.add(transaction, [marker]))
659 return bool(self.add(transaction, [marker]))
664
660
665 def add(self, transaction, markers):
661 def add(self, transaction, markers):
666 """Add new markers to the store
662 """Add new markers to the store
667
663
668 Take care of filtering duplicate.
664 Take care of filtering duplicate.
669 Return the number of new marker."""
665 Return the number of new marker."""
670 if self._readonly:
666 if self._readonly:
671 raise error.Abort(
667 raise error.Abort(
672 _(b'creating obsolete markers is not enabled on this repo')
668 _(b'creating obsolete markers is not enabled on this repo')
673 )
669 )
674 known = set()
670 known = set()
675 getsuccessors = self.successors.get
671 getsuccessors = self.successors.get
676 new = []
672 new = []
677 for m in markers:
673 for m in markers:
678 if m not in getsuccessors(m[0], ()) and m not in known:
674 if m not in getsuccessors(m[0], ()) and m not in known:
679 known.add(m)
675 known.add(m)
680 new.append(m)
676 new.append(m)
681 if new:
677 if new:
682 f = self.svfs(b'obsstore', b'ab')
678 f = self.svfs(b'obsstore', b'ab')
683 try:
679 try:
684 offset = f.tell()
680 offset = f.tell()
685 transaction.add(b'obsstore', offset)
681 transaction.add(b'obsstore', offset)
686 # offset == 0: new file - add the version header
682 # offset == 0: new file - add the version header
687 data = b''.join(encodemarkers(new, offset == 0, self._version))
683 data = b''.join(encodemarkers(new, offset == 0, self._version))
688 f.write(data)
684 f.write(data)
689 finally:
685 finally:
690 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
686 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
691 # call 'filecacheentry.refresh()' here
687 # call 'filecacheentry.refresh()' here
692 f.close()
688 f.close()
693 addedmarkers = transaction.changes.get(b'obsmarkers')
689 addedmarkers = transaction.changes.get(b'obsmarkers')
694 if addedmarkers is not None:
690 if addedmarkers is not None:
695 addedmarkers.update(new)
691 addedmarkers.update(new)
696 self._addmarkers(new, data)
692 self._addmarkers(new, data)
697 # new marker *may* have changed several set. invalidate the cache.
693 # new marker *may* have changed several set. invalidate the cache.
698 self.caches.clear()
694 self.caches.clear()
699 # records the number of new markers for the transaction hooks
695 # records the number of new markers for the transaction hooks
700 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
696 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
701 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
697 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
702 return len(new)
698 return len(new)
703
699
704 def mergemarkers(self, transaction, data):
700 def mergemarkers(self, transaction, data):
705 """merge a binary stream of markers inside the obsstore
701 """merge a binary stream of markers inside the obsstore
706
702
707 Returns the number of new markers added."""
703 Returns the number of new markers added."""
708 version, markers = _readmarkers(data)
704 version, markers = _readmarkers(data)
709 return self.add(transaction, markers)
705 return self.add(transaction, markers)
710
706
711 @propertycache
707 @propertycache
712 def _data(self):
708 def _data(self):
713 return self.svfs.tryread(b'obsstore')
709 return self.svfs.tryread(b'obsstore')
714
710
715 @propertycache
711 @propertycache
716 def _version(self):
712 def _version(self):
717 if len(self._data) >= 1:
713 if len(self._data) >= 1:
718 return _readmarkerversion(self._data)
714 return _readmarkerversion(self._data)
719 else:
715 else:
720 return self._defaultformat
716 return self._defaultformat
721
717
722 @propertycache
718 @propertycache
723 def _all(self):
719 def _all(self):
724 data = self._data
720 data = self._data
725 if not data:
721 if not data:
726 return []
722 return []
727 self._version, markers = _readmarkers(data)
723 self._version, markers = _readmarkers(data)
728 markers = list(markers)
724 markers = list(markers)
729 _checkinvalidmarkers(markers)
725 _checkinvalidmarkers(markers)
730 return markers
726 return markers
731
727
732 @propertycache
728 @propertycache
733 def successors(self):
729 def successors(self):
734 successors = {}
730 successors = {}
735 _addsuccessors(successors, self._all)
731 _addsuccessors(successors, self._all)
736 return successors
732 return successors
737
733
738 @propertycache
734 @propertycache
739 def predecessors(self):
735 def predecessors(self):
740 predecessors = {}
736 predecessors = {}
741 _addpredecessors(predecessors, self._all)
737 _addpredecessors(predecessors, self._all)
742 return predecessors
738 return predecessors
743
739
744 @propertycache
740 @propertycache
745 def children(self):
741 def children(self):
746 children = {}
742 children = {}
747 _addchildren(children, self._all)
743 _addchildren(children, self._all)
748 return children
744 return children
749
745
750 def _cached(self, attr):
746 def _cached(self, attr):
751 return attr in self.__dict__
747 return attr in self.__dict__
752
748
753 def _addmarkers(self, markers, rawdata):
749 def _addmarkers(self, markers, rawdata):
754 markers = list(markers) # to allow repeated iteration
750 markers = list(markers) # to allow repeated iteration
755 self._data = self._data + rawdata
751 self._data = self._data + rawdata
756 self._all.extend(markers)
752 self._all.extend(markers)
757 if self._cached('successors'):
753 if self._cached('successors'):
758 _addsuccessors(self.successors, markers)
754 _addsuccessors(self.successors, markers)
759 if self._cached('predecessors'):
755 if self._cached('predecessors'):
760 _addpredecessors(self.predecessors, markers)
756 _addpredecessors(self.predecessors, markers)
761 if self._cached('children'):
757 if self._cached('children'):
762 _addchildren(self.children, markers)
758 _addchildren(self.children, markers)
763 _checkinvalidmarkers(markers)
759 _checkinvalidmarkers(markers)
764
760
765 def relevantmarkers(self, nodes):
761 def relevantmarkers(self, nodes):
766 """return a set of all obsolescence markers relevant to a set of nodes.
762 """return a set of all obsolescence markers relevant to a set of nodes.
767
763
768 "relevant" to a set of nodes mean:
764 "relevant" to a set of nodes mean:
769
765
770 - marker that use this changeset as successor
766 - marker that use this changeset as successor
771 - prune marker of direct children on this changeset
767 - prune marker of direct children on this changeset
772 - recursive application of the two rules on predecessors of these
768 - recursive application of the two rules on predecessors of these
773 markers
769 markers
774
770
775 It is a set so you cannot rely on order."""
771 It is a set so you cannot rely on order."""
776
772
777 pendingnodes = set(nodes)
773 pendingnodes = set(nodes)
778 seenmarkers = set()
774 seenmarkers = set()
779 seennodes = set(pendingnodes)
775 seennodes = set(pendingnodes)
780 precursorsmarkers = self.predecessors
776 precursorsmarkers = self.predecessors
781 succsmarkers = self.successors
777 succsmarkers = self.successors
782 children = self.children
778 children = self.children
783 while pendingnodes:
779 while pendingnodes:
784 direct = set()
780 direct = set()
785 for current in pendingnodes:
781 for current in pendingnodes:
786 direct.update(precursorsmarkers.get(current, ()))
782 direct.update(precursorsmarkers.get(current, ()))
787 pruned = [m for m in children.get(current, ()) if not m[1]]
783 pruned = [m for m in children.get(current, ()) if not m[1]]
788 direct.update(pruned)
784 direct.update(pruned)
789 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
785 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
790 direct.update(pruned)
786 direct.update(pruned)
791 direct -= seenmarkers
787 direct -= seenmarkers
792 pendingnodes = {m[0] for m in direct}
788 pendingnodes = {m[0] for m in direct}
793 seenmarkers |= direct
789 seenmarkers |= direct
794 pendingnodes -= seennodes
790 pendingnodes -= seennodes
795 seennodes |= pendingnodes
791 seennodes |= pendingnodes
796 return seenmarkers
792 return seenmarkers
797
793
798
794
799 def makestore(ui, repo):
795 def makestore(ui, repo):
800 """Create an obsstore instance from a repo."""
796 """Create an obsstore instance from a repo."""
801 # read default format for new obsstore.
797 # read default format for new obsstore.
802 # developer config: format.obsstore-version
798 # developer config: format.obsstore-version
803 defaultformat = ui.configint(b'format', b'obsstore-version')
799 defaultformat = ui.configint(b'format', b'obsstore-version')
804 # rely on obsstore class default when possible.
800 # rely on obsstore class default when possible.
805 kwargs = {}
801 kwargs = {}
806 if defaultformat is not None:
802 if defaultformat is not None:
807 kwargs['defaultformat'] = defaultformat
803 kwargs['defaultformat'] = defaultformat
808 readonly = not isenabled(repo, createmarkersopt)
804 readonly = not isenabled(repo, createmarkersopt)
809 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
805 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
810 if store and readonly:
806 if store and readonly:
811 ui.warn(
807 ui.warn(
812 _(b'obsolete feature not enabled but %i markers found!\n')
808 _(b'obsolete feature not enabled but %i markers found!\n')
813 % len(list(store))
809 % len(list(store))
814 )
810 )
815 return store
811 return store
816
812
817
813
818 def commonversion(versions):
814 def commonversion(versions):
819 """Return the newest version listed in both versions and our local formats.
815 """Return the newest version listed in both versions and our local formats.
820
816
821 Returns None if no common version exists.
817 Returns None if no common version exists.
822 """
818 """
823 versions.sort(reverse=True)
819 versions.sort(reverse=True)
824 # search for highest version known on both side
820 # search for highest version known on both side
825 for v in versions:
821 for v in versions:
826 if v in formats:
822 if v in formats:
827 return v
823 return v
828 return None
824 return None
829
825
830
826
831 # arbitrary picked to fit into 8K limit from HTTP server
827 # arbitrary picked to fit into 8K limit from HTTP server
832 # you have to take in account:
828 # you have to take in account:
833 # - the version header
829 # - the version header
834 # - the base85 encoding
830 # - the base85 encoding
835 _maxpayload = 5300
831 _maxpayload = 5300
836
832
837
833
838 def _pushkeyescape(markers):
834 def _pushkeyescape(markers):
839 """encode markers into a dict suitable for pushkey exchange
835 """encode markers into a dict suitable for pushkey exchange
840
836
841 - binary data is base85 encoded
837 - binary data is base85 encoded
842 - split in chunks smaller than 5300 bytes"""
838 - split in chunks smaller than 5300 bytes"""
843 keys = {}
839 keys = {}
844 parts = []
840 parts = []
845 currentlen = _maxpayload * 2 # ensure we create a new part
841 currentlen = _maxpayload * 2 # ensure we create a new part
846 for marker in markers:
842 for marker in markers:
847 nextdata = _fm0encodeonemarker(marker)
843 nextdata = _fm0encodeonemarker(marker)
848 if len(nextdata) + currentlen > _maxpayload:
844 if len(nextdata) + currentlen > _maxpayload:
849 currentpart = []
845 currentpart = []
850 currentlen = 0
846 currentlen = 0
851 parts.append(currentpart)
847 parts.append(currentpart)
852 currentpart.append(nextdata)
848 currentpart.append(nextdata)
853 currentlen += len(nextdata)
849 currentlen += len(nextdata)
854 for idx, part in enumerate(reversed(parts)):
850 for idx, part in enumerate(reversed(parts)):
855 data = b''.join([_pack(b'>B', _fm0version)] + part)
851 data = b''.join([_pack(b'>B', _fm0version)] + part)
856 keys[b'dump%i' % idx] = util.b85encode(data)
852 keys[b'dump%i' % idx] = util.b85encode(data)
857 return keys
853 return keys
858
854
859
855
860 def listmarkers(repo):
856 def listmarkers(repo):
861 """List markers over pushkey"""
857 """List markers over pushkey"""
862 if not repo.obsstore:
858 if not repo.obsstore:
863 return {}
859 return {}
864 return _pushkeyescape(sorted(repo.obsstore))
860 return _pushkeyescape(sorted(repo.obsstore))
865
861
866
862
867 def pushmarker(repo, key, old, new):
863 def pushmarker(repo, key, old, new):
868 """Push markers over pushkey"""
864 """Push markers over pushkey"""
869 if not key.startswith(b'dump'):
865 if not key.startswith(b'dump'):
870 repo.ui.warn(_(b'unknown key: %r') % key)
866 repo.ui.warn(_(b'unknown key: %r') % key)
871 return False
867 return False
872 if old:
868 if old:
873 repo.ui.warn(_(b'unexpected old value for %r') % key)
869 repo.ui.warn(_(b'unexpected old value for %r') % key)
874 return False
870 return False
875 data = util.b85decode(new)
871 data = util.b85decode(new)
876 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
872 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
877 repo.obsstore.mergemarkers(tr, data)
873 repo.obsstore.mergemarkers(tr, data)
878 repo.invalidatevolatilesets()
874 repo.invalidatevolatilesets()
879 return True
875 return True
880
876
881
877
882 # mapping of 'set-name' -> <function to compute this set>
878 # mapping of 'set-name' -> <function to compute this set>
883 cachefuncs = {}
879 cachefuncs = {}
884
880
885
881
886 def cachefor(name):
882 def cachefor(name):
887 """Decorator to register a function as computing the cache for a set"""
883 """Decorator to register a function as computing the cache for a set"""
888
884
889 def decorator(func):
885 def decorator(func):
890 if name in cachefuncs:
886 if name in cachefuncs:
891 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
887 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
892 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
888 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
893 cachefuncs[name] = func
889 cachefuncs[name] = func
894 return func
890 return func
895
891
896 return decorator
892 return decorator
897
893
898
894
899 def getrevs(repo, name):
895 def getrevs(repo, name):
900 """Return the set of revision that belong to the <name> set
896 """Return the set of revision that belong to the <name> set
901
897
902 Such access may compute the set and cache it for future use"""
898 Such access may compute the set and cache it for future use"""
903 repo = repo.unfiltered()
899 repo = repo.unfiltered()
904 with util.timedcm('getrevs %s', name):
900 with util.timedcm('getrevs %s', name):
905 if not repo.obsstore:
901 if not repo.obsstore:
906 return frozenset()
902 return frozenset()
907 if name not in repo.obsstore.caches:
903 if name not in repo.obsstore.caches:
908 repo.obsstore.caches[name] = cachefuncs[name](repo)
904 repo.obsstore.caches[name] = cachefuncs[name](repo)
909 return repo.obsstore.caches[name]
905 return repo.obsstore.caches[name]
910
906
911
907
912 # To be simple we need to invalidate obsolescence cache when:
908 # To be simple we need to invalidate obsolescence cache when:
913 #
909 #
914 # - new changeset is added:
910 # - new changeset is added:
915 # - public phase is changed
911 # - public phase is changed
916 # - obsolescence marker are added
912 # - obsolescence marker are added
917 # - strip is used a repo
913 # - strip is used a repo
918 def clearobscaches(repo):
914 def clearobscaches(repo):
919 """Remove all obsolescence related cache from a repo
915 """Remove all obsolescence related cache from a repo
920
916
921 This remove all cache in obsstore is the obsstore already exist on the
917 This remove all cache in obsstore is the obsstore already exist on the
922 repo.
918 repo.
923
919
924 (We could be smarter here given the exact event that trigger the cache
920 (We could be smarter here given the exact event that trigger the cache
925 clearing)"""
921 clearing)"""
926 # only clear cache is there is obsstore data in this repo
922 # only clear cache is there is obsstore data in this repo
927 if b'obsstore' in repo._filecache:
923 if b'obsstore' in repo._filecache:
928 repo.obsstore.caches.clear()
924 repo.obsstore.caches.clear()
929
925
930
926
931 def _mutablerevs(repo):
927 def _mutablerevs(repo):
932 """the set of mutable revision in the repository"""
928 """the set of mutable revision in the repository"""
933 return repo._phasecache.getrevset(repo, phases.mutablephases)
929 return repo._phasecache.getrevset(repo, phases.mutablephases)
934
930
935
931
936 @cachefor(b'obsolete')
932 @cachefor(b'obsolete')
937 def _computeobsoleteset(repo):
933 def _computeobsoleteset(repo):
938 """the set of obsolete revisions"""
934 """the set of obsolete revisions"""
939 getnode = repo.changelog.node
935 getnode = repo.changelog.node
940 notpublic = _mutablerevs(repo)
936 notpublic = _mutablerevs(repo)
941 isobs = repo.obsstore.successors.__contains__
937 isobs = repo.obsstore.successors.__contains__
942 obs = {r for r in notpublic if isobs(getnode(r))}
938 obs = {r for r in notpublic if isobs(getnode(r))}
943 return obs
939 return obs
944
940
945
941
946 @cachefor(b'orphan')
942 @cachefor(b'orphan')
947 def _computeorphanset(repo):
943 def _computeorphanset(repo):
948 """the set of non obsolete revisions with obsolete parents"""
944 """the set of non obsolete revisions with obsolete parents"""
949 pfunc = repo.changelog.parentrevs
945 pfunc = repo.changelog.parentrevs
950 mutable = _mutablerevs(repo)
946 mutable = _mutablerevs(repo)
951 obsolete = getrevs(repo, b'obsolete')
947 obsolete = getrevs(repo, b'obsolete')
952 others = mutable - obsolete
948 others = mutable - obsolete
953 unstable = set()
949 unstable = set()
954 for r in sorted(others):
950 for r in sorted(others):
955 # A rev is unstable if one of its parent is obsolete or unstable
951 # A rev is unstable if one of its parent is obsolete or unstable
956 # this works since we traverse following growing rev order
952 # this works since we traverse following growing rev order
957 for p in pfunc(r):
953 for p in pfunc(r):
958 if p in obsolete or p in unstable:
954 if p in obsolete or p in unstable:
959 unstable.add(r)
955 unstable.add(r)
960 break
956 break
961 return unstable
957 return unstable
962
958
963
959
964 @cachefor(b'suspended')
960 @cachefor(b'suspended')
965 def _computesuspendedset(repo):
961 def _computesuspendedset(repo):
966 """the set of obsolete parents with non obsolete descendants"""
962 """the set of obsolete parents with non obsolete descendants"""
967 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
963 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
968 return {r for r in getrevs(repo, b'obsolete') if r in suspended}
964 return {r for r in getrevs(repo, b'obsolete') if r in suspended}
969
965
970
966
971 @cachefor(b'extinct')
967 @cachefor(b'extinct')
972 def _computeextinctset(repo):
968 def _computeextinctset(repo):
973 """the set of obsolete parents without non obsolete descendants"""
969 """the set of obsolete parents without non obsolete descendants"""
974 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
970 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
975
971
976
972
977 @cachefor(b'phasedivergent')
973 @cachefor(b'phasedivergent')
978 def _computephasedivergentset(repo):
974 def _computephasedivergentset(repo):
979 """the set of revs trying to obsolete public revisions"""
975 """the set of revs trying to obsolete public revisions"""
980 bumped = set()
976 bumped = set()
981 # util function (avoid attribute lookup in the loop)
977 # util function (avoid attribute lookup in the loop)
982 phase = repo._phasecache.phase # would be faster to grab the full list
978 phase = repo._phasecache.phase # would be faster to grab the full list
983 public = phases.public
979 public = phases.public
984 cl = repo.changelog
980 cl = repo.changelog
985 torev = cl.index.get_rev
981 torev = cl.index.get_rev
986 tonode = cl.node
982 tonode = cl.node
987 obsstore = repo.obsstore
983 obsstore = repo.obsstore
988 for rev in repo.revs(b'(not public()) and (not obsolete())'):
984 for rev in repo.revs(b'(not public()) and (not obsolete())'):
989 # We only evaluate mutable, non-obsolete revision
985 # We only evaluate mutable, non-obsolete revision
990 node = tonode(rev)
986 node = tonode(rev)
991 # (future) A cache of predecessors may worth if split is very common
987 # (future) A cache of predecessors may worth if split is very common
992 for pnode in obsutil.allpredecessors(
988 for pnode in obsutil.allpredecessors(
993 obsstore, [node], ignoreflags=bumpedfix
989 obsstore, [node], ignoreflags=bumpedfix
994 ):
990 ):
995 prev = torev(pnode) # unfiltered! but so is phasecache
991 prev = torev(pnode) # unfiltered! but so is phasecache
996 if (prev is not None) and (phase(repo, prev) <= public):
992 if (prev is not None) and (phase(repo, prev) <= public):
997 # we have a public predecessor
993 # we have a public predecessor
998 bumped.add(rev)
994 bumped.add(rev)
999 break # Next draft!
995 break # Next draft!
1000 return bumped
996 return bumped
1001
997
1002
998
1003 @cachefor(b'contentdivergent')
999 @cachefor(b'contentdivergent')
1004 def _computecontentdivergentset(repo):
1000 def _computecontentdivergentset(repo):
1005 """the set of rev that compete to be the final successors of some revision.
1001 """the set of rev that compete to be the final successors of some revision.
1006 """
1002 """
1007 divergent = set()
1003 divergent = set()
1008 obsstore = repo.obsstore
1004 obsstore = repo.obsstore
1009 newermap = {}
1005 newermap = {}
1010 tonode = repo.changelog.node
1006 tonode = repo.changelog.node
1011 for rev in repo.revs(b'(not public()) - obsolete()'):
1007 for rev in repo.revs(b'(not public()) - obsolete()'):
1012 node = tonode(rev)
1008 node = tonode(rev)
1013 mark = obsstore.predecessors.get(node, ())
1009 mark = obsstore.predecessors.get(node, ())
1014 toprocess = set(mark)
1010 toprocess = set(mark)
1015 seen = set()
1011 seen = set()
1016 while toprocess:
1012 while toprocess:
1017 prec = toprocess.pop()[0]
1013 prec = toprocess.pop()[0]
1018 if prec in seen:
1014 if prec in seen:
1019 continue # emergency cycle hanging prevention
1015 continue # emergency cycle hanging prevention
1020 seen.add(prec)
1016 seen.add(prec)
1021 if prec not in newermap:
1017 if prec not in newermap:
1022 obsutil.successorssets(repo, prec, cache=newermap)
1018 obsutil.successorssets(repo, prec, cache=newermap)
1023 newer = [n for n in newermap[prec] if n]
1019 newer = [n for n in newermap[prec] if n]
1024 if len(newer) > 1:
1020 if len(newer) > 1:
1025 divergent.add(rev)
1021 divergent.add(rev)
1026 break
1022 break
1027 toprocess.update(obsstore.predecessors.get(prec, ()))
1023 toprocess.update(obsstore.predecessors.get(prec, ()))
1028 return divergent
1024 return divergent
1029
1025
1030
1026
1031 def makefoldid(relation, user):
1027 def makefoldid(relation, user):
1032
1028
1033 folddigest = hashutil.sha1(user)
1029 folddigest = hashutil.sha1(user)
1034 for p in relation[0] + relation[1]:
1030 for p in relation[0] + relation[1]:
1035 folddigest.update(b'%d' % p.rev())
1031 folddigest.update(b'%d' % p.rev())
1036 folddigest.update(p.node())
1032 folddigest.update(p.node())
1037 # Since fold only has to compete against fold for the same successors, it
1033 # Since fold only has to compete against fold for the same successors, it
1038 # seems fine to use a small ID. Smaller ID save space.
1034 # seems fine to use a small ID. Smaller ID save space.
1039 return node.hex(folddigest.digest())[:8]
1035 return node.hex(folddigest.digest())[:8]
1040
1036
1041
1037
1042 def createmarkers(
1038 def createmarkers(
1043 repo, relations, flag=0, date=None, metadata=None, operation=None
1039 repo, relations, flag=0, date=None, metadata=None, operation=None
1044 ):
1040 ):
1045 """Add obsolete markers between changesets in a repo
1041 """Add obsolete markers between changesets in a repo
1046
1042
1047 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1043 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1048 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1044 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1049 containing metadata for this marker only. It is merged with the global
1045 containing metadata for this marker only. It is merged with the global
1050 metadata specified through the `metadata` argument of this function.
1046 metadata specified through the `metadata` argument of this function.
1051 Any string values in metadata must be UTF-8 bytes.
1047 Any string values in metadata must be UTF-8 bytes.
1052
1048
1053 Trying to obsolete a public changeset will raise an exception.
1049 Trying to obsolete a public changeset will raise an exception.
1054
1050
1055 Current user and date are used except if specified otherwise in the
1051 Current user and date are used except if specified otherwise in the
1056 metadata attribute.
1052 metadata attribute.
1057
1053
1058 This function operates within a transaction of its own, but does
1054 This function operates within a transaction of its own, but does
1059 not take any lock on the repo.
1055 not take any lock on the repo.
1060 """
1056 """
1061 # prepare metadata
1057 # prepare metadata
1062 if metadata is None:
1058 if metadata is None:
1063 metadata = {}
1059 metadata = {}
1064 if b'user' not in metadata:
1060 if b'user' not in metadata:
1065 luser = (
1061 luser = (
1066 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1062 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1067 )
1063 )
1068 metadata[b'user'] = encoding.fromlocal(luser)
1064 metadata[b'user'] = encoding.fromlocal(luser)
1069
1065
1070 # Operation metadata handling
1066 # Operation metadata handling
1071 useoperation = repo.ui.configbool(
1067 useoperation = repo.ui.configbool(
1072 b'experimental', b'evolution.track-operation'
1068 b'experimental', b'evolution.track-operation'
1073 )
1069 )
1074 if useoperation and operation:
1070 if useoperation and operation:
1075 metadata[b'operation'] = operation
1071 metadata[b'operation'] = operation
1076
1072
1077 # Effect flag metadata handling
1073 # Effect flag metadata handling
1078 saveeffectflag = repo.ui.configbool(
1074 saveeffectflag = repo.ui.configbool(
1079 b'experimental', b'evolution.effect-flags'
1075 b'experimental', b'evolution.effect-flags'
1080 )
1076 )
1081
1077
1082 with repo.transaction(b'add-obsolescence-marker') as tr:
1078 with repo.transaction(b'add-obsolescence-marker') as tr:
1083 markerargs = []
1079 markerargs = []
1084 for rel in relations:
1080 for rel in relations:
1085 predecessors = rel[0]
1081 predecessors = rel[0]
1086 if not isinstance(predecessors, tuple):
1082 if not isinstance(predecessors, tuple):
1087 # preserve compat with old API until all caller are migrated
1083 # preserve compat with old API until all caller are migrated
1088 predecessors = (predecessors,)
1084 predecessors = (predecessors,)
1089 if len(predecessors) > 1 and len(rel[1]) != 1:
1085 if len(predecessors) > 1 and len(rel[1]) != 1:
1090 msg = b'Fold markers can only have 1 successors, not %d'
1086 msg = b'Fold markers can only have 1 successors, not %d'
1091 raise error.ProgrammingError(msg % len(rel[1]))
1087 raise error.ProgrammingError(msg % len(rel[1]))
1092 foldid = None
1088 foldid = None
1093 foldsize = len(predecessors)
1089 foldsize = len(predecessors)
1094 if 1 < foldsize:
1090 if 1 < foldsize:
1095 foldid = makefoldid(rel, metadata[b'user'])
1091 foldid = makefoldid(rel, metadata[b'user'])
1096 for foldidx, prec in enumerate(predecessors, 1):
1092 for foldidx, prec in enumerate(predecessors, 1):
1097 sucs = rel[1]
1093 sucs = rel[1]
1098 localmetadata = metadata.copy()
1094 localmetadata = metadata.copy()
1099 if len(rel) > 2:
1095 if len(rel) > 2:
1100 localmetadata.update(rel[2])
1096 localmetadata.update(rel[2])
1101 if foldid is not None:
1097 if foldid is not None:
1102 localmetadata[b'fold-id'] = foldid
1098 localmetadata[b'fold-id'] = foldid
1103 localmetadata[b'fold-idx'] = b'%d' % foldidx
1099 localmetadata[b'fold-idx'] = b'%d' % foldidx
1104 localmetadata[b'fold-size'] = b'%d' % foldsize
1100 localmetadata[b'fold-size'] = b'%d' % foldsize
1105
1101
1106 if not prec.mutable():
1102 if not prec.mutable():
1107 raise error.Abort(
1103 raise error.Abort(
1108 _(b"cannot obsolete public changeset: %s") % prec,
1104 _(b"cannot obsolete public changeset: %s") % prec,
1109 hint=b"see 'hg help phases' for details",
1105 hint=b"see 'hg help phases' for details",
1110 )
1106 )
1111 nprec = prec.node()
1107 nprec = prec.node()
1112 nsucs = tuple(s.node() for s in sucs)
1108 nsucs = tuple(s.node() for s in sucs)
1113 npare = None
1109 npare = None
1114 if not nsucs:
1110 if not nsucs:
1115 npare = tuple(p.node() for p in prec.parents())
1111 npare = tuple(p.node() for p in prec.parents())
1116 if nprec in nsucs:
1112 if nprec in nsucs:
1117 raise error.Abort(
1113 raise error.Abort(
1118 _(b"changeset %s cannot obsolete itself") % prec
1114 _(b"changeset %s cannot obsolete itself") % prec
1119 )
1115 )
1120
1116
1121 # Effect flag can be different by relation
1117 # Effect flag can be different by relation
1122 if saveeffectflag:
1118 if saveeffectflag:
1123 # The effect flag is saved in a versioned field name for
1119 # The effect flag is saved in a versioned field name for
1124 # future evolution
1120 # future evolution
1125 effectflag = obsutil.geteffectflag(prec, sucs)
1121 effectflag = obsutil.geteffectflag(prec, sucs)
1126 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1122 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1127
1123
1128 # Creating the marker causes the hidden cache to become
1124 # Creating the marker causes the hidden cache to become
1129 # invalid, which causes recomputation when we ask for
1125 # invalid, which causes recomputation when we ask for
1130 # prec.parents() above. Resulting in n^2 behavior. So let's
1126 # prec.parents() above. Resulting in n^2 behavior. So let's
1131 # prepare all of the args first, then create the markers.
1127 # prepare all of the args first, then create the markers.
1132 markerargs.append((nprec, nsucs, npare, localmetadata))
1128 markerargs.append((nprec, nsucs, npare, localmetadata))
1133
1129
1134 for args in markerargs:
1130 for args in markerargs:
1135 nprec, nsucs, npare, localmetadata = args
1131 nprec, nsucs, npare, localmetadata = args
1136 repo.obsstore.create(
1132 repo.obsstore.create(
1137 tr,
1133 tr,
1138 nprec,
1134 nprec,
1139 nsucs,
1135 nsucs,
1140 flag,
1136 flag,
1141 parents=npare,
1137 parents=npare,
1142 date=date,
1138 date=date,
1143 metadata=localmetadata,
1139 metadata=localmetadata,
1144 ui=repo.ui,
1140 ui=repo.ui,
1145 )
1141 )
1146 repo.filteredrevcache.clear()
1142 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now