##// END OF EJS Templates
index: use `index.get_rev` in `obsolete._computephasedivergentset`...
marmoute -
r43958:1542773f default
parent child Browse files
Show More
@@ -1,1144 +1,1144 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import hashlib
73 import hashlib
74 import struct
74 import struct
75
75
76 from .i18n import _
76 from .i18n import _
77 from .pycompat import getattr
77 from .pycompat import getattr
78 from . import (
78 from . import (
79 encoding,
79 encoding,
80 error,
80 error,
81 node,
81 node,
82 obsutil,
82 obsutil,
83 phases,
83 phases,
84 policy,
84 policy,
85 pycompat,
85 pycompat,
86 util,
86 util,
87 )
87 )
88 from .utils import dateutil
88 from .utils import dateutil
89
89
90 parsers = policy.importmod('parsers')
90 parsers = policy.importmod('parsers')
91
91
92 _pack = struct.pack
92 _pack = struct.pack
93 _unpack = struct.unpack
93 _unpack = struct.unpack
94 _calcsize = struct.calcsize
94 _calcsize = struct.calcsize
95 propertycache = util.propertycache
95 propertycache = util.propertycache
96
96
97 # Options for obsolescence
97 # Options for obsolescence
98 createmarkersopt = b'createmarkers'
98 createmarkersopt = b'createmarkers'
99 allowunstableopt = b'allowunstable'
99 allowunstableopt = b'allowunstable'
100 exchangeopt = b'exchange'
100 exchangeopt = b'exchange'
101
101
102
102
103 def _getoptionvalue(repo, option):
103 def _getoptionvalue(repo, option):
104 """Returns True if the given repository has the given obsolete option
104 """Returns True if the given repository has the given obsolete option
105 enabled.
105 enabled.
106 """
106 """
107 configkey = b'evolution.%s' % option
107 configkey = b'evolution.%s' % option
108 newconfig = repo.ui.configbool(b'experimental', configkey)
108 newconfig = repo.ui.configbool(b'experimental', configkey)
109
109
110 # Return the value only if defined
110 # Return the value only if defined
111 if newconfig is not None:
111 if newconfig is not None:
112 return newconfig
112 return newconfig
113
113
114 # Fallback on generic option
114 # Fallback on generic option
115 try:
115 try:
116 return repo.ui.configbool(b'experimental', b'evolution')
116 return repo.ui.configbool(b'experimental', b'evolution')
117 except (error.ConfigError, AttributeError):
117 except (error.ConfigError, AttributeError):
118 # Fallback on old-fashion config
118 # Fallback on old-fashion config
119 # inconsistent config: experimental.evolution
119 # inconsistent config: experimental.evolution
120 result = set(repo.ui.configlist(b'experimental', b'evolution'))
120 result = set(repo.ui.configlist(b'experimental', b'evolution'))
121
121
122 if b'all' in result:
122 if b'all' in result:
123 return True
123 return True
124
124
125 # Temporary hack for next check
125 # Temporary hack for next check
126 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
126 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
127 if newconfig:
127 if newconfig:
128 result.add(b'createmarkers')
128 result.add(b'createmarkers')
129
129
130 return option in result
130 return option in result
131
131
132
132
133 def getoptions(repo):
133 def getoptions(repo):
134 """Returns dicts showing state of obsolescence features."""
134 """Returns dicts showing state of obsolescence features."""
135
135
136 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
136 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
137 unstablevalue = _getoptionvalue(repo, allowunstableopt)
137 unstablevalue = _getoptionvalue(repo, allowunstableopt)
138 exchangevalue = _getoptionvalue(repo, exchangeopt)
138 exchangevalue = _getoptionvalue(repo, exchangeopt)
139
139
140 # createmarkers must be enabled if other options are enabled
140 # createmarkers must be enabled if other options are enabled
141 if (unstablevalue or exchangevalue) and not createmarkersvalue:
141 if (unstablevalue or exchangevalue) and not createmarkersvalue:
142 raise error.Abort(
142 raise error.Abort(
143 _(
143 _(
144 b"'createmarkers' obsolete option must be enabled "
144 b"'createmarkers' obsolete option must be enabled "
145 b"if other obsolete options are enabled"
145 b"if other obsolete options are enabled"
146 )
146 )
147 )
147 )
148
148
149 return {
149 return {
150 createmarkersopt: createmarkersvalue,
150 createmarkersopt: createmarkersvalue,
151 allowunstableopt: unstablevalue,
151 allowunstableopt: unstablevalue,
152 exchangeopt: exchangevalue,
152 exchangeopt: exchangevalue,
153 }
153 }
154
154
155
155
156 def isenabled(repo, option):
156 def isenabled(repo, option):
157 """Returns True if the given repository has the given obsolete option
157 """Returns True if the given repository has the given obsolete option
158 enabled.
158 enabled.
159 """
159 """
160 return getoptions(repo)[option]
160 return getoptions(repo)[option]
161
161
162
162
163 # Creating aliases for marker flags because evolve extension looks for
163 # Creating aliases for marker flags because evolve extension looks for
164 # bumpedfix in obsolete.py
164 # bumpedfix in obsolete.py
165 bumpedfix = obsutil.bumpedfix
165 bumpedfix = obsutil.bumpedfix
166 usingsha256 = obsutil.usingsha256
166 usingsha256 = obsutil.usingsha256
167
167
168 ## Parsing and writing of version "0"
168 ## Parsing and writing of version "0"
169 #
169 #
170 # The header is followed by the markers. Each marker is made of:
170 # The header is followed by the markers. Each marker is made of:
171 #
171 #
172 # - 1 uint8 : number of new changesets "N", can be zero.
172 # - 1 uint8 : number of new changesets "N", can be zero.
173 #
173 #
174 # - 1 uint32: metadata size "M" in bytes.
174 # - 1 uint32: metadata size "M" in bytes.
175 #
175 #
176 # - 1 byte: a bit field. It is reserved for flags used in common
176 # - 1 byte: a bit field. It is reserved for flags used in common
177 # obsolete marker operations, to avoid repeated decoding of metadata
177 # obsolete marker operations, to avoid repeated decoding of metadata
178 # entries.
178 # entries.
179 #
179 #
180 # - 20 bytes: obsoleted changeset identifier.
180 # - 20 bytes: obsoleted changeset identifier.
181 #
181 #
182 # - N*20 bytes: new changesets identifiers.
182 # - N*20 bytes: new changesets identifiers.
183 #
183 #
184 # - M bytes: metadata as a sequence of nul-terminated strings. Each
184 # - M bytes: metadata as a sequence of nul-terminated strings. Each
185 # string contains a key and a value, separated by a colon ':', without
185 # string contains a key and a value, separated by a colon ':', without
186 # additional encoding. Keys cannot contain '\0' or ':' and values
186 # additional encoding. Keys cannot contain '\0' or ':' and values
187 # cannot contain '\0'.
187 # cannot contain '\0'.
188 _fm0version = 0
188 _fm0version = 0
189 _fm0fixed = b'>BIB20s'
189 _fm0fixed = b'>BIB20s'
190 _fm0node = b'20s'
190 _fm0node = b'20s'
191 _fm0fsize = _calcsize(_fm0fixed)
191 _fm0fsize = _calcsize(_fm0fixed)
192 _fm0fnodesize = _calcsize(_fm0node)
192 _fm0fnodesize = _calcsize(_fm0node)
193
193
194
194
195 def _fm0readmarkers(data, off, stop):
195 def _fm0readmarkers(data, off, stop):
196 # Loop on markers
196 # Loop on markers
197 while off < stop:
197 while off < stop:
198 # read fixed part
198 # read fixed part
199 cur = data[off : off + _fm0fsize]
199 cur = data[off : off + _fm0fsize]
200 off += _fm0fsize
200 off += _fm0fsize
201 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
201 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
202 # read replacement
202 # read replacement
203 sucs = ()
203 sucs = ()
204 if numsuc:
204 if numsuc:
205 s = _fm0fnodesize * numsuc
205 s = _fm0fnodesize * numsuc
206 cur = data[off : off + s]
206 cur = data[off : off + s]
207 sucs = _unpack(_fm0node * numsuc, cur)
207 sucs = _unpack(_fm0node * numsuc, cur)
208 off += s
208 off += s
209 # read metadata
209 # read metadata
210 # (metadata will be decoded on demand)
210 # (metadata will be decoded on demand)
211 metadata = data[off : off + mdsize]
211 metadata = data[off : off + mdsize]
212 if len(metadata) != mdsize:
212 if len(metadata) != mdsize:
213 raise error.Abort(
213 raise error.Abort(
214 _(
214 _(
215 b'parsing obsolete marker: metadata is too '
215 b'parsing obsolete marker: metadata is too '
216 b'short, %d bytes expected, got %d'
216 b'short, %d bytes expected, got %d'
217 )
217 )
218 % (mdsize, len(metadata))
218 % (mdsize, len(metadata))
219 )
219 )
220 off += mdsize
220 off += mdsize
221 metadata = _fm0decodemeta(metadata)
221 metadata = _fm0decodemeta(metadata)
222 try:
222 try:
223 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
223 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
224 date = float(when), int(offset)
224 date = float(when), int(offset)
225 except ValueError:
225 except ValueError:
226 date = (0.0, 0)
226 date = (0.0, 0)
227 parents = None
227 parents = None
228 if b'p2' in metadata:
228 if b'p2' in metadata:
229 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
229 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
230 elif b'p1' in metadata:
230 elif b'p1' in metadata:
231 parents = (metadata.pop(b'p1', None),)
231 parents = (metadata.pop(b'p1', None),)
232 elif b'p0' in metadata:
232 elif b'p0' in metadata:
233 parents = ()
233 parents = ()
234 if parents is not None:
234 if parents is not None:
235 try:
235 try:
236 parents = tuple(node.bin(p) for p in parents)
236 parents = tuple(node.bin(p) for p in parents)
237 # if parent content is not a nodeid, drop the data
237 # if parent content is not a nodeid, drop the data
238 for p in parents:
238 for p in parents:
239 if len(p) != 20:
239 if len(p) != 20:
240 parents = None
240 parents = None
241 break
241 break
242 except TypeError:
242 except TypeError:
243 # if content cannot be translated to nodeid drop the data.
243 # if content cannot be translated to nodeid drop the data.
244 parents = None
244 parents = None
245
245
246 metadata = tuple(sorted(pycompat.iteritems(metadata)))
246 metadata = tuple(sorted(pycompat.iteritems(metadata)))
247
247
248 yield (pre, sucs, flags, metadata, date, parents)
248 yield (pre, sucs, flags, metadata, date, parents)
249
249
250
250
251 def _fm0encodeonemarker(marker):
251 def _fm0encodeonemarker(marker):
252 pre, sucs, flags, metadata, date, parents = marker
252 pre, sucs, flags, metadata, date, parents = marker
253 if flags & usingsha256:
253 if flags & usingsha256:
254 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
254 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
255 metadata = dict(metadata)
255 metadata = dict(metadata)
256 time, tz = date
256 time, tz = date
257 metadata[b'date'] = b'%r %i' % (time, tz)
257 metadata[b'date'] = b'%r %i' % (time, tz)
258 if parents is not None:
258 if parents is not None:
259 if not parents:
259 if not parents:
260 # mark that we explicitly recorded no parents
260 # mark that we explicitly recorded no parents
261 metadata[b'p0'] = b''
261 metadata[b'p0'] = b''
262 for i, p in enumerate(parents, 1):
262 for i, p in enumerate(parents, 1):
263 metadata[b'p%i' % i] = node.hex(p)
263 metadata[b'p%i' % i] = node.hex(p)
264 metadata = _fm0encodemeta(metadata)
264 metadata = _fm0encodemeta(metadata)
265 numsuc = len(sucs)
265 numsuc = len(sucs)
266 format = _fm0fixed + (_fm0node * numsuc)
266 format = _fm0fixed + (_fm0node * numsuc)
267 data = [numsuc, len(metadata), flags, pre]
267 data = [numsuc, len(metadata), flags, pre]
268 data.extend(sucs)
268 data.extend(sucs)
269 return _pack(format, *data) + metadata
269 return _pack(format, *data) + metadata
270
270
271
271
272 def _fm0encodemeta(meta):
272 def _fm0encodemeta(meta):
273 """Return encoded metadata string to string mapping.
273 """Return encoded metadata string to string mapping.
274
274
275 Assume no ':' in key and no '\0' in both key and value."""
275 Assume no ':' in key and no '\0' in both key and value."""
276 for key, value in pycompat.iteritems(meta):
276 for key, value in pycompat.iteritems(meta):
277 if b':' in key or b'\0' in key:
277 if b':' in key or b'\0' in key:
278 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
278 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
279 if b'\0' in value:
279 if b'\0' in value:
280 raise ValueError(b"':' is forbidden in metadata value'")
280 raise ValueError(b"':' is forbidden in metadata value'")
281 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
281 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
282
282
283
283
284 def _fm0decodemeta(data):
284 def _fm0decodemeta(data):
285 """Return string to string dictionary from encoded version."""
285 """Return string to string dictionary from encoded version."""
286 d = {}
286 d = {}
287 for l in data.split(b'\0'):
287 for l in data.split(b'\0'):
288 if l:
288 if l:
289 key, value = l.split(b':', 1)
289 key, value = l.split(b':', 1)
290 d[key] = value
290 d[key] = value
291 return d
291 return d
292
292
293
293
294 ## Parsing and writing of version "1"
294 ## Parsing and writing of version "1"
295 #
295 #
296 # The header is followed by the markers. Each marker is made of:
296 # The header is followed by the markers. Each marker is made of:
297 #
297 #
298 # - uint32: total size of the marker (including this field)
298 # - uint32: total size of the marker (including this field)
299 #
299 #
300 # - float64: date in seconds since epoch
300 # - float64: date in seconds since epoch
301 #
301 #
302 # - int16: timezone offset in minutes
302 # - int16: timezone offset in minutes
303 #
303 #
304 # - uint16: a bit field. It is reserved for flags used in common
304 # - uint16: a bit field. It is reserved for flags used in common
305 # obsolete marker operations, to avoid repeated decoding of metadata
305 # obsolete marker operations, to avoid repeated decoding of metadata
306 # entries.
306 # entries.
307 #
307 #
308 # - uint8: number of successors "N", can be zero.
308 # - uint8: number of successors "N", can be zero.
309 #
309 #
310 # - uint8: number of parents "P", can be zero.
310 # - uint8: number of parents "P", can be zero.
311 #
311 #
312 # 0: parents data stored but no parent,
312 # 0: parents data stored but no parent,
313 # 1: one parent stored,
313 # 1: one parent stored,
314 # 2: two parents stored,
314 # 2: two parents stored,
315 # 3: no parent data stored
315 # 3: no parent data stored
316 #
316 #
317 # - uint8: number of metadata entries M
317 # - uint8: number of metadata entries M
318 #
318 #
319 # - 20 or 32 bytes: predecessor changeset identifier.
319 # - 20 or 32 bytes: predecessor changeset identifier.
320 #
320 #
321 # - N*(20 or 32) bytes: successors changesets identifiers.
321 # - N*(20 or 32) bytes: successors changesets identifiers.
322 #
322 #
323 # - P*(20 or 32) bytes: parents of the predecessors changesets.
323 # - P*(20 or 32) bytes: parents of the predecessors changesets.
324 #
324 #
325 # - M*(uint8, uint8): size of all metadata entries (key and value)
325 # - M*(uint8, uint8): size of all metadata entries (key and value)
326 #
326 #
327 # - remaining bytes: the metadata, each (key, value) pair after the other.
327 # - remaining bytes: the metadata, each (key, value) pair after the other.
328 _fm1version = 1
328 _fm1version = 1
329 _fm1fixed = b'>IdhHBBB20s'
329 _fm1fixed = b'>IdhHBBB20s'
330 _fm1nodesha1 = b'20s'
330 _fm1nodesha1 = b'20s'
331 _fm1nodesha256 = b'32s'
331 _fm1nodesha256 = b'32s'
332 _fm1nodesha1size = _calcsize(_fm1nodesha1)
332 _fm1nodesha1size = _calcsize(_fm1nodesha1)
333 _fm1nodesha256size = _calcsize(_fm1nodesha256)
333 _fm1nodesha256size = _calcsize(_fm1nodesha256)
334 _fm1fsize = _calcsize(_fm1fixed)
334 _fm1fsize = _calcsize(_fm1fixed)
335 _fm1parentnone = 3
335 _fm1parentnone = 3
336 _fm1parentshift = 14
336 _fm1parentshift = 14
337 _fm1parentmask = _fm1parentnone << _fm1parentshift
337 _fm1parentmask = _fm1parentnone << _fm1parentshift
338 _fm1metapair = b'BB'
338 _fm1metapair = b'BB'
339 _fm1metapairsize = _calcsize(_fm1metapair)
339 _fm1metapairsize = _calcsize(_fm1metapair)
340
340
341
341
342 def _fm1purereadmarkers(data, off, stop):
342 def _fm1purereadmarkers(data, off, stop):
343 # make some global constants local for performance
343 # make some global constants local for performance
344 noneflag = _fm1parentnone
344 noneflag = _fm1parentnone
345 sha2flag = usingsha256
345 sha2flag = usingsha256
346 sha1size = _fm1nodesha1size
346 sha1size = _fm1nodesha1size
347 sha2size = _fm1nodesha256size
347 sha2size = _fm1nodesha256size
348 sha1fmt = _fm1nodesha1
348 sha1fmt = _fm1nodesha1
349 sha2fmt = _fm1nodesha256
349 sha2fmt = _fm1nodesha256
350 metasize = _fm1metapairsize
350 metasize = _fm1metapairsize
351 metafmt = _fm1metapair
351 metafmt = _fm1metapair
352 fsize = _fm1fsize
352 fsize = _fm1fsize
353 unpack = _unpack
353 unpack = _unpack
354
354
355 # Loop on markers
355 # Loop on markers
356 ufixed = struct.Struct(_fm1fixed).unpack
356 ufixed = struct.Struct(_fm1fixed).unpack
357
357
358 while off < stop:
358 while off < stop:
359 # read fixed part
359 # read fixed part
360 o1 = off + fsize
360 o1 = off + fsize
361 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
361 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
362
362
363 if flags & sha2flag:
363 if flags & sha2flag:
364 # FIXME: prec was read as a SHA1, needs to be amended
364 # FIXME: prec was read as a SHA1, needs to be amended
365
365
366 # read 0 or more successors
366 # read 0 or more successors
367 if numsuc == 1:
367 if numsuc == 1:
368 o2 = o1 + sha2size
368 o2 = o1 + sha2size
369 sucs = (data[o1:o2],)
369 sucs = (data[o1:o2],)
370 else:
370 else:
371 o2 = o1 + sha2size * numsuc
371 o2 = o1 + sha2size * numsuc
372 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
372 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
373
373
374 # read parents
374 # read parents
375 if numpar == noneflag:
375 if numpar == noneflag:
376 o3 = o2
376 o3 = o2
377 parents = None
377 parents = None
378 elif numpar == 1:
378 elif numpar == 1:
379 o3 = o2 + sha2size
379 o3 = o2 + sha2size
380 parents = (data[o2:o3],)
380 parents = (data[o2:o3],)
381 else:
381 else:
382 o3 = o2 + sha2size * numpar
382 o3 = o2 + sha2size * numpar
383 parents = unpack(sha2fmt * numpar, data[o2:o3])
383 parents = unpack(sha2fmt * numpar, data[o2:o3])
384 else:
384 else:
385 # read 0 or more successors
385 # read 0 or more successors
386 if numsuc == 1:
386 if numsuc == 1:
387 o2 = o1 + sha1size
387 o2 = o1 + sha1size
388 sucs = (data[o1:o2],)
388 sucs = (data[o1:o2],)
389 else:
389 else:
390 o2 = o1 + sha1size * numsuc
390 o2 = o1 + sha1size * numsuc
391 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
391 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
392
392
393 # read parents
393 # read parents
394 if numpar == noneflag:
394 if numpar == noneflag:
395 o3 = o2
395 o3 = o2
396 parents = None
396 parents = None
397 elif numpar == 1:
397 elif numpar == 1:
398 o3 = o2 + sha1size
398 o3 = o2 + sha1size
399 parents = (data[o2:o3],)
399 parents = (data[o2:o3],)
400 else:
400 else:
401 o3 = o2 + sha1size * numpar
401 o3 = o2 + sha1size * numpar
402 parents = unpack(sha1fmt * numpar, data[o2:o3])
402 parents = unpack(sha1fmt * numpar, data[o2:o3])
403
403
404 # read metadata
404 # read metadata
405 off = o3 + metasize * nummeta
405 off = o3 + metasize * nummeta
406 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
406 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
407 metadata = []
407 metadata = []
408 for idx in pycompat.xrange(0, len(metapairsize), 2):
408 for idx in pycompat.xrange(0, len(metapairsize), 2):
409 o1 = off + metapairsize[idx]
409 o1 = off + metapairsize[idx]
410 o2 = o1 + metapairsize[idx + 1]
410 o2 = o1 + metapairsize[idx + 1]
411 metadata.append((data[off:o1], data[o1:o2]))
411 metadata.append((data[off:o1], data[o1:o2]))
412 off = o2
412 off = o2
413
413
414 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
414 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
415
415
416
416
417 def _fm1encodeonemarker(marker):
417 def _fm1encodeonemarker(marker):
418 pre, sucs, flags, metadata, date, parents = marker
418 pre, sucs, flags, metadata, date, parents = marker
419 # determine node size
419 # determine node size
420 _fm1node = _fm1nodesha1
420 _fm1node = _fm1nodesha1
421 if flags & usingsha256:
421 if flags & usingsha256:
422 _fm1node = _fm1nodesha256
422 _fm1node = _fm1nodesha256
423 numsuc = len(sucs)
423 numsuc = len(sucs)
424 numextranodes = numsuc
424 numextranodes = numsuc
425 if parents is None:
425 if parents is None:
426 numpar = _fm1parentnone
426 numpar = _fm1parentnone
427 else:
427 else:
428 numpar = len(parents)
428 numpar = len(parents)
429 numextranodes += numpar
429 numextranodes += numpar
430 formatnodes = _fm1node * numextranodes
430 formatnodes = _fm1node * numextranodes
431 formatmeta = _fm1metapair * len(metadata)
431 formatmeta = _fm1metapair * len(metadata)
432 format = _fm1fixed + formatnodes + formatmeta
432 format = _fm1fixed + formatnodes + formatmeta
433 # tz is stored in minutes so we divide by 60
433 # tz is stored in minutes so we divide by 60
434 tz = date[1] // 60
434 tz = date[1] // 60
435 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
435 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
436 data.extend(sucs)
436 data.extend(sucs)
437 if parents is not None:
437 if parents is not None:
438 data.extend(parents)
438 data.extend(parents)
439 totalsize = _calcsize(format)
439 totalsize = _calcsize(format)
440 for key, value in metadata:
440 for key, value in metadata:
441 lk = len(key)
441 lk = len(key)
442 lv = len(value)
442 lv = len(value)
443 if lk > 255:
443 if lk > 255:
444 msg = (
444 msg = (
445 b'obsstore metadata key cannot be longer than 255 bytes'
445 b'obsstore metadata key cannot be longer than 255 bytes'
446 b' (key "%s" is %u bytes)'
446 b' (key "%s" is %u bytes)'
447 ) % (key, lk)
447 ) % (key, lk)
448 raise error.ProgrammingError(msg)
448 raise error.ProgrammingError(msg)
449 if lv > 255:
449 if lv > 255:
450 msg = (
450 msg = (
451 b'obsstore metadata value cannot be longer than 255 bytes'
451 b'obsstore metadata value cannot be longer than 255 bytes'
452 b' (value "%s" for key "%s" is %u bytes)'
452 b' (value "%s" for key "%s" is %u bytes)'
453 ) % (value, key, lv)
453 ) % (value, key, lv)
454 raise error.ProgrammingError(msg)
454 raise error.ProgrammingError(msg)
455 data.append(lk)
455 data.append(lk)
456 data.append(lv)
456 data.append(lv)
457 totalsize += lk + lv
457 totalsize += lk + lv
458 data[0] = totalsize
458 data[0] = totalsize
459 data = [_pack(format, *data)]
459 data = [_pack(format, *data)]
460 for key, value in metadata:
460 for key, value in metadata:
461 data.append(key)
461 data.append(key)
462 data.append(value)
462 data.append(value)
463 return b''.join(data)
463 return b''.join(data)
464
464
465
465
466 def _fm1readmarkers(data, off, stop):
466 def _fm1readmarkers(data, off, stop):
467 native = getattr(parsers, 'fm1readmarkers', None)
467 native = getattr(parsers, 'fm1readmarkers', None)
468 if not native:
468 if not native:
469 return _fm1purereadmarkers(data, off, stop)
469 return _fm1purereadmarkers(data, off, stop)
470 return native(data, off, stop)
470 return native(data, off, stop)
471
471
472
472
473 # mapping to read/write various marker formats
473 # mapping to read/write various marker formats
474 # <version> -> (decoder, encoder)
474 # <version> -> (decoder, encoder)
475 formats = {
475 formats = {
476 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
476 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
477 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
477 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
478 }
478 }
479
479
480
480
481 def _readmarkerversion(data):
481 def _readmarkerversion(data):
482 return _unpack(b'>B', data[0:1])[0]
482 return _unpack(b'>B', data[0:1])[0]
483
483
484
484
485 @util.nogc
485 @util.nogc
486 def _readmarkers(data, off=None, stop=None):
486 def _readmarkers(data, off=None, stop=None):
487 """Read and enumerate markers from raw data"""
487 """Read and enumerate markers from raw data"""
488 diskversion = _readmarkerversion(data)
488 diskversion = _readmarkerversion(data)
489 if not off:
489 if not off:
490 off = 1 # skip 1 byte version number
490 off = 1 # skip 1 byte version number
491 if stop is None:
491 if stop is None:
492 stop = len(data)
492 stop = len(data)
493 if diskversion not in formats:
493 if diskversion not in formats:
494 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
494 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
495 raise error.UnknownVersion(msg, version=diskversion)
495 raise error.UnknownVersion(msg, version=diskversion)
496 return diskversion, formats[diskversion][0](data, off, stop)
496 return diskversion, formats[diskversion][0](data, off, stop)
497
497
498
498
499 def encodeheader(version=_fm0version):
499 def encodeheader(version=_fm0version):
500 return _pack(b'>B', version)
500 return _pack(b'>B', version)
501
501
502
502
503 def encodemarkers(markers, addheader=False, version=_fm0version):
503 def encodemarkers(markers, addheader=False, version=_fm0version):
504 # Kept separate from flushmarkers(), it will be reused for
504 # Kept separate from flushmarkers(), it will be reused for
505 # markers exchange.
505 # markers exchange.
506 encodeone = formats[version][1]
506 encodeone = formats[version][1]
507 if addheader:
507 if addheader:
508 yield encodeheader(version)
508 yield encodeheader(version)
509 for marker in markers:
509 for marker in markers:
510 yield encodeone(marker)
510 yield encodeone(marker)
511
511
512
512
513 @util.nogc
513 @util.nogc
514 def _addsuccessors(successors, markers):
514 def _addsuccessors(successors, markers):
515 for mark in markers:
515 for mark in markers:
516 successors.setdefault(mark[0], set()).add(mark)
516 successors.setdefault(mark[0], set()).add(mark)
517
517
518
518
519 @util.nogc
519 @util.nogc
520 def _addpredecessors(predecessors, markers):
520 def _addpredecessors(predecessors, markers):
521 for mark in markers:
521 for mark in markers:
522 for suc in mark[1]:
522 for suc in mark[1]:
523 predecessors.setdefault(suc, set()).add(mark)
523 predecessors.setdefault(suc, set()).add(mark)
524
524
525
525
526 @util.nogc
526 @util.nogc
527 def _addchildren(children, markers):
527 def _addchildren(children, markers):
528 for mark in markers:
528 for mark in markers:
529 parents = mark[5]
529 parents = mark[5]
530 if parents is not None:
530 if parents is not None:
531 for p in parents:
531 for p in parents:
532 children.setdefault(p, set()).add(mark)
532 children.setdefault(p, set()).add(mark)
533
533
534
534
535 def _checkinvalidmarkers(markers):
535 def _checkinvalidmarkers(markers):
536 """search for marker with invalid data and raise error if needed
536 """search for marker with invalid data and raise error if needed
537
537
538 Exist as a separated function to allow the evolve extension for a more
538 Exist as a separated function to allow the evolve extension for a more
539 subtle handling.
539 subtle handling.
540 """
540 """
541 for mark in markers:
541 for mark in markers:
542 if node.nullid in mark[1]:
542 if node.nullid in mark[1]:
543 raise error.Abort(
543 raise error.Abort(
544 _(
544 _(
545 b'bad obsolescence marker detected: '
545 b'bad obsolescence marker detected: '
546 b'invalid successors nullid'
546 b'invalid successors nullid'
547 )
547 )
548 )
548 )
549
549
550
550
551 class obsstore(object):
551 class obsstore(object):
552 """Store obsolete markers
552 """Store obsolete markers
553
553
554 Markers can be accessed with two mappings:
554 Markers can be accessed with two mappings:
555 - predecessors[x] -> set(markers on predecessors edges of x)
555 - predecessors[x] -> set(markers on predecessors edges of x)
556 - successors[x] -> set(markers on successors edges of x)
556 - successors[x] -> set(markers on successors edges of x)
557 - children[x] -> set(markers on predecessors edges of children(x)
557 - children[x] -> set(markers on predecessors edges of children(x)
558 """
558 """
559
559
560 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
560 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
561 # prec: nodeid, predecessors changesets
561 # prec: nodeid, predecessors changesets
562 # succs: tuple of nodeid, successor changesets (0-N length)
562 # succs: tuple of nodeid, successor changesets (0-N length)
563 # flag: integer, flag field carrying modifier for the markers (see doc)
563 # flag: integer, flag field carrying modifier for the markers (see doc)
564 # meta: binary blob in UTF-8, encoded metadata dictionary
564 # meta: binary blob in UTF-8, encoded metadata dictionary
565 # date: (float, int) tuple, date of marker creation
565 # date: (float, int) tuple, date of marker creation
566 # parents: (tuple of nodeid) or None, parents of predecessors
566 # parents: (tuple of nodeid) or None, parents of predecessors
567 # None is used when no data has been recorded
567 # None is used when no data has been recorded
568
568
569 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
569 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
570 # caches for various obsolescence related cache
570 # caches for various obsolescence related cache
571 self.caches = {}
571 self.caches = {}
572 self.svfs = svfs
572 self.svfs = svfs
573 self._defaultformat = defaultformat
573 self._defaultformat = defaultformat
574 self._readonly = readonly
574 self._readonly = readonly
575
575
576 def __iter__(self):
576 def __iter__(self):
577 return iter(self._all)
577 return iter(self._all)
578
578
579 def __len__(self):
579 def __len__(self):
580 return len(self._all)
580 return len(self._all)
581
581
582 def __nonzero__(self):
582 def __nonzero__(self):
583 if not self._cached('_all'):
583 if not self._cached('_all'):
584 try:
584 try:
585 return self.svfs.stat(b'obsstore').st_size > 1
585 return self.svfs.stat(b'obsstore').st_size > 1
586 except OSError as inst:
586 except OSError as inst:
587 if inst.errno != errno.ENOENT:
587 if inst.errno != errno.ENOENT:
588 raise
588 raise
589 # just build an empty _all list if no obsstore exists, which
589 # just build an empty _all list if no obsstore exists, which
590 # avoids further stat() syscalls
590 # avoids further stat() syscalls
591 return bool(self._all)
591 return bool(self._all)
592
592
593 __bool__ = __nonzero__
593 __bool__ = __nonzero__
594
594
595 @property
595 @property
596 def readonly(self):
596 def readonly(self):
597 """True if marker creation is disabled
597 """True if marker creation is disabled
598
598
599 Remove me in the future when obsolete marker is always on."""
599 Remove me in the future when obsolete marker is always on."""
600 return self._readonly
600 return self._readonly
601
601
602 def create(
602 def create(
603 self,
603 self,
604 transaction,
604 transaction,
605 prec,
605 prec,
606 succs=(),
606 succs=(),
607 flag=0,
607 flag=0,
608 parents=None,
608 parents=None,
609 date=None,
609 date=None,
610 metadata=None,
610 metadata=None,
611 ui=None,
611 ui=None,
612 ):
612 ):
613 """obsolete: add a new obsolete marker
613 """obsolete: add a new obsolete marker
614
614
615 * ensuring it is hashable
615 * ensuring it is hashable
616 * check mandatory metadata
616 * check mandatory metadata
617 * encode metadata
617 * encode metadata
618
618
619 If you are a human writing code creating marker you want to use the
619 If you are a human writing code creating marker you want to use the
620 `createmarkers` function in this module instead.
620 `createmarkers` function in this module instead.
621
621
622 return True if a new marker have been added, False if the markers
622 return True if a new marker have been added, False if the markers
623 already existed (no op).
623 already existed (no op).
624 """
624 """
625 if metadata is None:
625 if metadata is None:
626 metadata = {}
626 metadata = {}
627 if date is None:
627 if date is None:
628 if b'date' in metadata:
628 if b'date' in metadata:
629 # as a courtesy for out-of-tree extensions
629 # as a courtesy for out-of-tree extensions
630 date = dateutil.parsedate(metadata.pop(b'date'))
630 date = dateutil.parsedate(metadata.pop(b'date'))
631 elif ui is not None:
631 elif ui is not None:
632 date = ui.configdate(b'devel', b'default-date')
632 date = ui.configdate(b'devel', b'default-date')
633 if date is None:
633 if date is None:
634 date = dateutil.makedate()
634 date = dateutil.makedate()
635 else:
635 else:
636 date = dateutil.makedate()
636 date = dateutil.makedate()
637 if len(prec) != 20:
637 if len(prec) != 20:
638 raise ValueError(prec)
638 raise ValueError(prec)
639 for succ in succs:
639 for succ in succs:
640 if len(succ) != 20:
640 if len(succ) != 20:
641 raise ValueError(succ)
641 raise ValueError(succ)
642 if prec in succs:
642 if prec in succs:
643 raise ValueError(
643 raise ValueError(
644 'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec))
644 'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec))
645 )
645 )
646
646
647 metadata = tuple(sorted(pycompat.iteritems(metadata)))
647 metadata = tuple(sorted(pycompat.iteritems(metadata)))
648 for k, v in metadata:
648 for k, v in metadata:
649 try:
649 try:
650 # might be better to reject non-ASCII keys
650 # might be better to reject non-ASCII keys
651 k.decode('utf-8')
651 k.decode('utf-8')
652 v.decode('utf-8')
652 v.decode('utf-8')
653 except UnicodeDecodeError:
653 except UnicodeDecodeError:
654 raise error.ProgrammingError(
654 raise error.ProgrammingError(
655 b'obsstore metadata must be valid UTF-8 sequence '
655 b'obsstore metadata must be valid UTF-8 sequence '
656 b'(key = %r, value = %r)'
656 b'(key = %r, value = %r)'
657 % (pycompat.bytestr(k), pycompat.bytestr(v))
657 % (pycompat.bytestr(k), pycompat.bytestr(v))
658 )
658 )
659
659
660 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
660 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
661 return bool(self.add(transaction, [marker]))
661 return bool(self.add(transaction, [marker]))
662
662
663 def add(self, transaction, markers):
663 def add(self, transaction, markers):
664 """Add new markers to the store
664 """Add new markers to the store
665
665
666 Take care of filtering duplicate.
666 Take care of filtering duplicate.
667 Return the number of new marker."""
667 Return the number of new marker."""
668 if self._readonly:
668 if self._readonly:
669 raise error.Abort(
669 raise error.Abort(
670 _(b'creating obsolete markers is not enabled on this repo')
670 _(b'creating obsolete markers is not enabled on this repo')
671 )
671 )
672 known = set()
672 known = set()
673 getsuccessors = self.successors.get
673 getsuccessors = self.successors.get
674 new = []
674 new = []
675 for m in markers:
675 for m in markers:
676 if m not in getsuccessors(m[0], ()) and m not in known:
676 if m not in getsuccessors(m[0], ()) and m not in known:
677 known.add(m)
677 known.add(m)
678 new.append(m)
678 new.append(m)
679 if new:
679 if new:
680 f = self.svfs(b'obsstore', b'ab')
680 f = self.svfs(b'obsstore', b'ab')
681 try:
681 try:
682 offset = f.tell()
682 offset = f.tell()
683 transaction.add(b'obsstore', offset)
683 transaction.add(b'obsstore', offset)
684 # offset == 0: new file - add the version header
684 # offset == 0: new file - add the version header
685 data = b''.join(encodemarkers(new, offset == 0, self._version))
685 data = b''.join(encodemarkers(new, offset == 0, self._version))
686 f.write(data)
686 f.write(data)
687 finally:
687 finally:
688 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
688 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
689 # call 'filecacheentry.refresh()' here
689 # call 'filecacheentry.refresh()' here
690 f.close()
690 f.close()
691 addedmarkers = transaction.changes.get(b'obsmarkers')
691 addedmarkers = transaction.changes.get(b'obsmarkers')
692 if addedmarkers is not None:
692 if addedmarkers is not None:
693 addedmarkers.update(new)
693 addedmarkers.update(new)
694 self._addmarkers(new, data)
694 self._addmarkers(new, data)
695 # new marker *may* have changed several set. invalidate the cache.
695 # new marker *may* have changed several set. invalidate the cache.
696 self.caches.clear()
696 self.caches.clear()
697 # records the number of new markers for the transaction hooks
697 # records the number of new markers for the transaction hooks
698 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
698 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
699 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
699 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
700 return len(new)
700 return len(new)
701
701
702 def mergemarkers(self, transaction, data):
702 def mergemarkers(self, transaction, data):
703 """merge a binary stream of markers inside the obsstore
703 """merge a binary stream of markers inside the obsstore
704
704
705 Returns the number of new markers added."""
705 Returns the number of new markers added."""
706 version, markers = _readmarkers(data)
706 version, markers = _readmarkers(data)
707 return self.add(transaction, markers)
707 return self.add(transaction, markers)
708
708
709 @propertycache
709 @propertycache
710 def _data(self):
710 def _data(self):
711 return self.svfs.tryread(b'obsstore')
711 return self.svfs.tryread(b'obsstore')
712
712
713 @propertycache
713 @propertycache
714 def _version(self):
714 def _version(self):
715 if len(self._data) >= 1:
715 if len(self._data) >= 1:
716 return _readmarkerversion(self._data)
716 return _readmarkerversion(self._data)
717 else:
717 else:
718 return self._defaultformat
718 return self._defaultformat
719
719
720 @propertycache
720 @propertycache
721 def _all(self):
721 def _all(self):
722 data = self._data
722 data = self._data
723 if not data:
723 if not data:
724 return []
724 return []
725 self._version, markers = _readmarkers(data)
725 self._version, markers = _readmarkers(data)
726 markers = list(markers)
726 markers = list(markers)
727 _checkinvalidmarkers(markers)
727 _checkinvalidmarkers(markers)
728 return markers
728 return markers
729
729
730 @propertycache
730 @propertycache
731 def successors(self):
731 def successors(self):
732 successors = {}
732 successors = {}
733 _addsuccessors(successors, self._all)
733 _addsuccessors(successors, self._all)
734 return successors
734 return successors
735
735
736 @propertycache
736 @propertycache
737 def predecessors(self):
737 def predecessors(self):
738 predecessors = {}
738 predecessors = {}
739 _addpredecessors(predecessors, self._all)
739 _addpredecessors(predecessors, self._all)
740 return predecessors
740 return predecessors
741
741
742 @propertycache
742 @propertycache
743 def children(self):
743 def children(self):
744 children = {}
744 children = {}
745 _addchildren(children, self._all)
745 _addchildren(children, self._all)
746 return children
746 return children
747
747
748 def _cached(self, attr):
748 def _cached(self, attr):
749 return attr in self.__dict__
749 return attr in self.__dict__
750
750
751 def _addmarkers(self, markers, rawdata):
751 def _addmarkers(self, markers, rawdata):
752 markers = list(markers) # to allow repeated iteration
752 markers = list(markers) # to allow repeated iteration
753 self._data = self._data + rawdata
753 self._data = self._data + rawdata
754 self._all.extend(markers)
754 self._all.extend(markers)
755 if self._cached('successors'):
755 if self._cached('successors'):
756 _addsuccessors(self.successors, markers)
756 _addsuccessors(self.successors, markers)
757 if self._cached('predecessors'):
757 if self._cached('predecessors'):
758 _addpredecessors(self.predecessors, markers)
758 _addpredecessors(self.predecessors, markers)
759 if self._cached('children'):
759 if self._cached('children'):
760 _addchildren(self.children, markers)
760 _addchildren(self.children, markers)
761 _checkinvalidmarkers(markers)
761 _checkinvalidmarkers(markers)
762
762
763 def relevantmarkers(self, nodes):
763 def relevantmarkers(self, nodes):
764 """return a set of all obsolescence markers relevant to a set of nodes.
764 """return a set of all obsolescence markers relevant to a set of nodes.
765
765
766 "relevant" to a set of nodes mean:
766 "relevant" to a set of nodes mean:
767
767
768 - marker that use this changeset as successor
768 - marker that use this changeset as successor
769 - prune marker of direct children on this changeset
769 - prune marker of direct children on this changeset
770 - recursive application of the two rules on predecessors of these
770 - recursive application of the two rules on predecessors of these
771 markers
771 markers
772
772
773 It is a set so you cannot rely on order."""
773 It is a set so you cannot rely on order."""
774
774
775 pendingnodes = set(nodes)
775 pendingnodes = set(nodes)
776 seenmarkers = set()
776 seenmarkers = set()
777 seennodes = set(pendingnodes)
777 seennodes = set(pendingnodes)
778 precursorsmarkers = self.predecessors
778 precursorsmarkers = self.predecessors
779 succsmarkers = self.successors
779 succsmarkers = self.successors
780 children = self.children
780 children = self.children
781 while pendingnodes:
781 while pendingnodes:
782 direct = set()
782 direct = set()
783 for current in pendingnodes:
783 for current in pendingnodes:
784 direct.update(precursorsmarkers.get(current, ()))
784 direct.update(precursorsmarkers.get(current, ()))
785 pruned = [m for m in children.get(current, ()) if not m[1]]
785 pruned = [m for m in children.get(current, ()) if not m[1]]
786 direct.update(pruned)
786 direct.update(pruned)
787 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
787 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
788 direct.update(pruned)
788 direct.update(pruned)
789 direct -= seenmarkers
789 direct -= seenmarkers
790 pendingnodes = {m[0] for m in direct}
790 pendingnodes = {m[0] for m in direct}
791 seenmarkers |= direct
791 seenmarkers |= direct
792 pendingnodes -= seennodes
792 pendingnodes -= seennodes
793 seennodes |= pendingnodes
793 seennodes |= pendingnodes
794 return seenmarkers
794 return seenmarkers
795
795
796
796
797 def makestore(ui, repo):
797 def makestore(ui, repo):
798 """Create an obsstore instance from a repo."""
798 """Create an obsstore instance from a repo."""
799 # read default format for new obsstore.
799 # read default format for new obsstore.
800 # developer config: format.obsstore-version
800 # developer config: format.obsstore-version
801 defaultformat = ui.configint(b'format', b'obsstore-version')
801 defaultformat = ui.configint(b'format', b'obsstore-version')
802 # rely on obsstore class default when possible.
802 # rely on obsstore class default when possible.
803 kwargs = {}
803 kwargs = {}
804 if defaultformat is not None:
804 if defaultformat is not None:
805 kwargs['defaultformat'] = defaultformat
805 kwargs['defaultformat'] = defaultformat
806 readonly = not isenabled(repo, createmarkersopt)
806 readonly = not isenabled(repo, createmarkersopt)
807 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
807 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
808 if store and readonly:
808 if store and readonly:
809 ui.warn(
809 ui.warn(
810 _(b'obsolete feature not enabled but %i markers found!\n')
810 _(b'obsolete feature not enabled but %i markers found!\n')
811 % len(list(store))
811 % len(list(store))
812 )
812 )
813 return store
813 return store
814
814
815
815
816 def commonversion(versions):
816 def commonversion(versions):
817 """Return the newest version listed in both versions and our local formats.
817 """Return the newest version listed in both versions and our local formats.
818
818
819 Returns None if no common version exists.
819 Returns None if no common version exists.
820 """
820 """
821 versions.sort(reverse=True)
821 versions.sort(reverse=True)
822 # search for highest version known on both side
822 # search for highest version known on both side
823 for v in versions:
823 for v in versions:
824 if v in formats:
824 if v in formats:
825 return v
825 return v
826 return None
826 return None
827
827
828
828
829 # arbitrary picked to fit into 8K limit from HTTP server
829 # arbitrary picked to fit into 8K limit from HTTP server
830 # you have to take in account:
830 # you have to take in account:
831 # - the version header
831 # - the version header
832 # - the base85 encoding
832 # - the base85 encoding
833 _maxpayload = 5300
833 _maxpayload = 5300
834
834
835
835
836 def _pushkeyescape(markers):
836 def _pushkeyescape(markers):
837 """encode markers into a dict suitable for pushkey exchange
837 """encode markers into a dict suitable for pushkey exchange
838
838
839 - binary data is base85 encoded
839 - binary data is base85 encoded
840 - split in chunks smaller than 5300 bytes"""
840 - split in chunks smaller than 5300 bytes"""
841 keys = {}
841 keys = {}
842 parts = []
842 parts = []
843 currentlen = _maxpayload * 2 # ensure we create a new part
843 currentlen = _maxpayload * 2 # ensure we create a new part
844 for marker in markers:
844 for marker in markers:
845 nextdata = _fm0encodeonemarker(marker)
845 nextdata = _fm0encodeonemarker(marker)
846 if len(nextdata) + currentlen > _maxpayload:
846 if len(nextdata) + currentlen > _maxpayload:
847 currentpart = []
847 currentpart = []
848 currentlen = 0
848 currentlen = 0
849 parts.append(currentpart)
849 parts.append(currentpart)
850 currentpart.append(nextdata)
850 currentpart.append(nextdata)
851 currentlen += len(nextdata)
851 currentlen += len(nextdata)
852 for idx, part in enumerate(reversed(parts)):
852 for idx, part in enumerate(reversed(parts)):
853 data = b''.join([_pack(b'>B', _fm0version)] + part)
853 data = b''.join([_pack(b'>B', _fm0version)] + part)
854 keys[b'dump%i' % idx] = util.b85encode(data)
854 keys[b'dump%i' % idx] = util.b85encode(data)
855 return keys
855 return keys
856
856
857
857
858 def listmarkers(repo):
858 def listmarkers(repo):
859 """List markers over pushkey"""
859 """List markers over pushkey"""
860 if not repo.obsstore:
860 if not repo.obsstore:
861 return {}
861 return {}
862 return _pushkeyescape(sorted(repo.obsstore))
862 return _pushkeyescape(sorted(repo.obsstore))
863
863
864
864
865 def pushmarker(repo, key, old, new):
865 def pushmarker(repo, key, old, new):
866 """Push markers over pushkey"""
866 """Push markers over pushkey"""
867 if not key.startswith(b'dump'):
867 if not key.startswith(b'dump'):
868 repo.ui.warn(_(b'unknown key: %r') % key)
868 repo.ui.warn(_(b'unknown key: %r') % key)
869 return False
869 return False
870 if old:
870 if old:
871 repo.ui.warn(_(b'unexpected old value for %r') % key)
871 repo.ui.warn(_(b'unexpected old value for %r') % key)
872 return False
872 return False
873 data = util.b85decode(new)
873 data = util.b85decode(new)
874 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
874 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
875 repo.obsstore.mergemarkers(tr, data)
875 repo.obsstore.mergemarkers(tr, data)
876 repo.invalidatevolatilesets()
876 repo.invalidatevolatilesets()
877 return True
877 return True
878
878
879
879
880 # mapping of 'set-name' -> <function to compute this set>
880 # mapping of 'set-name' -> <function to compute this set>
881 cachefuncs = {}
881 cachefuncs = {}
882
882
883
883
884 def cachefor(name):
884 def cachefor(name):
885 """Decorator to register a function as computing the cache for a set"""
885 """Decorator to register a function as computing the cache for a set"""
886
886
887 def decorator(func):
887 def decorator(func):
888 if name in cachefuncs:
888 if name in cachefuncs:
889 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
889 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
890 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
890 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
891 cachefuncs[name] = func
891 cachefuncs[name] = func
892 return func
892 return func
893
893
894 return decorator
894 return decorator
895
895
896
896
897 def getrevs(repo, name):
897 def getrevs(repo, name):
898 """Return the set of revision that belong to the <name> set
898 """Return the set of revision that belong to the <name> set
899
899
900 Such access may compute the set and cache it for future use"""
900 Such access may compute the set and cache it for future use"""
901 repo = repo.unfiltered()
901 repo = repo.unfiltered()
902 with util.timedcm('getrevs %s', name):
902 with util.timedcm('getrevs %s', name):
903 if not repo.obsstore:
903 if not repo.obsstore:
904 return frozenset()
904 return frozenset()
905 if name not in repo.obsstore.caches:
905 if name not in repo.obsstore.caches:
906 repo.obsstore.caches[name] = cachefuncs[name](repo)
906 repo.obsstore.caches[name] = cachefuncs[name](repo)
907 return repo.obsstore.caches[name]
907 return repo.obsstore.caches[name]
908
908
909
909
910 # To be simple we need to invalidate obsolescence cache when:
910 # To be simple we need to invalidate obsolescence cache when:
911 #
911 #
912 # - new changeset is added:
912 # - new changeset is added:
913 # - public phase is changed
913 # - public phase is changed
914 # - obsolescence marker are added
914 # - obsolescence marker are added
915 # - strip is used a repo
915 # - strip is used a repo
916 def clearobscaches(repo):
916 def clearobscaches(repo):
917 """Remove all obsolescence related cache from a repo
917 """Remove all obsolescence related cache from a repo
918
918
919 This remove all cache in obsstore is the obsstore already exist on the
919 This remove all cache in obsstore is the obsstore already exist on the
920 repo.
920 repo.
921
921
922 (We could be smarter here given the exact event that trigger the cache
922 (We could be smarter here given the exact event that trigger the cache
923 clearing)"""
923 clearing)"""
924 # only clear cache is there is obsstore data in this repo
924 # only clear cache is there is obsstore data in this repo
925 if b'obsstore' in repo._filecache:
925 if b'obsstore' in repo._filecache:
926 repo.obsstore.caches.clear()
926 repo.obsstore.caches.clear()
927
927
928
928
929 def _mutablerevs(repo):
929 def _mutablerevs(repo):
930 """the set of mutable revision in the repository"""
930 """the set of mutable revision in the repository"""
931 return repo._phasecache.getrevset(repo, phases.mutablephases)
931 return repo._phasecache.getrevset(repo, phases.mutablephases)
932
932
933
933
934 @cachefor(b'obsolete')
934 @cachefor(b'obsolete')
935 def _computeobsoleteset(repo):
935 def _computeobsoleteset(repo):
936 """the set of obsolete revisions"""
936 """the set of obsolete revisions"""
937 getnode = repo.changelog.node
937 getnode = repo.changelog.node
938 notpublic = _mutablerevs(repo)
938 notpublic = _mutablerevs(repo)
939 isobs = repo.obsstore.successors.__contains__
939 isobs = repo.obsstore.successors.__contains__
940 obs = set(r for r in notpublic if isobs(getnode(r)))
940 obs = set(r for r in notpublic if isobs(getnode(r)))
941 return obs
941 return obs
942
942
943
943
944 @cachefor(b'orphan')
944 @cachefor(b'orphan')
945 def _computeorphanset(repo):
945 def _computeorphanset(repo):
946 """the set of non obsolete revisions with obsolete parents"""
946 """the set of non obsolete revisions with obsolete parents"""
947 pfunc = repo.changelog.parentrevs
947 pfunc = repo.changelog.parentrevs
948 mutable = _mutablerevs(repo)
948 mutable = _mutablerevs(repo)
949 obsolete = getrevs(repo, b'obsolete')
949 obsolete = getrevs(repo, b'obsolete')
950 others = mutable - obsolete
950 others = mutable - obsolete
951 unstable = set()
951 unstable = set()
952 for r in sorted(others):
952 for r in sorted(others):
953 # A rev is unstable if one of its parent is obsolete or unstable
953 # A rev is unstable if one of its parent is obsolete or unstable
954 # this works since we traverse following growing rev order
954 # this works since we traverse following growing rev order
955 for p in pfunc(r):
955 for p in pfunc(r):
956 if p in obsolete or p in unstable:
956 if p in obsolete or p in unstable:
957 unstable.add(r)
957 unstable.add(r)
958 break
958 break
959 return unstable
959 return unstable
960
960
961
961
962 @cachefor(b'suspended')
962 @cachefor(b'suspended')
963 def _computesuspendedset(repo):
963 def _computesuspendedset(repo):
964 """the set of obsolete parents with non obsolete descendants"""
964 """the set of obsolete parents with non obsolete descendants"""
965 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
965 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
966 return set(r for r in getrevs(repo, b'obsolete') if r in suspended)
966 return set(r for r in getrevs(repo, b'obsolete') if r in suspended)
967
967
968
968
969 @cachefor(b'extinct')
969 @cachefor(b'extinct')
970 def _computeextinctset(repo):
970 def _computeextinctset(repo):
971 """the set of obsolete parents without non obsolete descendants"""
971 """the set of obsolete parents without non obsolete descendants"""
972 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
972 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
973
973
974
974
975 @cachefor(b'phasedivergent')
975 @cachefor(b'phasedivergent')
976 def _computephasedivergentset(repo):
976 def _computephasedivergentset(repo):
977 """the set of revs trying to obsolete public revisions"""
977 """the set of revs trying to obsolete public revisions"""
978 bumped = set()
978 bumped = set()
979 # util function (avoid attribute lookup in the loop)
979 # util function (avoid attribute lookup in the loop)
980 phase = repo._phasecache.phase # would be faster to grab the full list
980 phase = repo._phasecache.phase # would be faster to grab the full list
981 public = phases.public
981 public = phases.public
982 cl = repo.changelog
982 cl = repo.changelog
983 torev = cl.nodemap.get
983 torev = cl.index.get_rev
984 tonode = cl.node
984 tonode = cl.node
985 obsstore = repo.obsstore
985 obsstore = repo.obsstore
986 for rev in repo.revs(b'(not public()) and (not obsolete())'):
986 for rev in repo.revs(b'(not public()) and (not obsolete())'):
987 # We only evaluate mutable, non-obsolete revision
987 # We only evaluate mutable, non-obsolete revision
988 node = tonode(rev)
988 node = tonode(rev)
989 # (future) A cache of predecessors may worth if split is very common
989 # (future) A cache of predecessors may worth if split is very common
990 for pnode in obsutil.allpredecessors(
990 for pnode in obsutil.allpredecessors(
991 obsstore, [node], ignoreflags=bumpedfix
991 obsstore, [node], ignoreflags=bumpedfix
992 ):
992 ):
993 prev = torev(pnode) # unfiltered! but so is phasecache
993 prev = torev(pnode) # unfiltered! but so is phasecache
994 if (prev is not None) and (phase(repo, prev) <= public):
994 if (prev is not None) and (phase(repo, prev) <= public):
995 # we have a public predecessor
995 # we have a public predecessor
996 bumped.add(rev)
996 bumped.add(rev)
997 break # Next draft!
997 break # Next draft!
998 return bumped
998 return bumped
999
999
1000
1000
1001 @cachefor(b'contentdivergent')
1001 @cachefor(b'contentdivergent')
1002 def _computecontentdivergentset(repo):
1002 def _computecontentdivergentset(repo):
1003 """the set of rev that compete to be the final successors of some revision.
1003 """the set of rev that compete to be the final successors of some revision.
1004 """
1004 """
1005 divergent = set()
1005 divergent = set()
1006 obsstore = repo.obsstore
1006 obsstore = repo.obsstore
1007 newermap = {}
1007 newermap = {}
1008 tonode = repo.changelog.node
1008 tonode = repo.changelog.node
1009 for rev in repo.revs(b'(not public()) - obsolete()'):
1009 for rev in repo.revs(b'(not public()) - obsolete()'):
1010 node = tonode(rev)
1010 node = tonode(rev)
1011 mark = obsstore.predecessors.get(node, ())
1011 mark = obsstore.predecessors.get(node, ())
1012 toprocess = set(mark)
1012 toprocess = set(mark)
1013 seen = set()
1013 seen = set()
1014 while toprocess:
1014 while toprocess:
1015 prec = toprocess.pop()[0]
1015 prec = toprocess.pop()[0]
1016 if prec in seen:
1016 if prec in seen:
1017 continue # emergency cycle hanging prevention
1017 continue # emergency cycle hanging prevention
1018 seen.add(prec)
1018 seen.add(prec)
1019 if prec not in newermap:
1019 if prec not in newermap:
1020 obsutil.successorssets(repo, prec, cache=newermap)
1020 obsutil.successorssets(repo, prec, cache=newermap)
1021 newer = [n for n in newermap[prec] if n]
1021 newer = [n for n in newermap[prec] if n]
1022 if len(newer) > 1:
1022 if len(newer) > 1:
1023 divergent.add(rev)
1023 divergent.add(rev)
1024 break
1024 break
1025 toprocess.update(obsstore.predecessors.get(prec, ()))
1025 toprocess.update(obsstore.predecessors.get(prec, ()))
1026 return divergent
1026 return divergent
1027
1027
1028
1028
1029 def makefoldid(relation, user):
1029 def makefoldid(relation, user):
1030
1030
1031 folddigest = hashlib.sha1(user)
1031 folddigest = hashlib.sha1(user)
1032 for p in relation[0] + relation[1]:
1032 for p in relation[0] + relation[1]:
1033 folddigest.update(b'%d' % p.rev())
1033 folddigest.update(b'%d' % p.rev())
1034 folddigest.update(p.node())
1034 folddigest.update(p.node())
1035 # Since fold only has to compete against fold for the same successors, it
1035 # Since fold only has to compete against fold for the same successors, it
1036 # seems fine to use a small ID. Smaller ID save space.
1036 # seems fine to use a small ID. Smaller ID save space.
1037 return node.hex(folddigest.digest())[:8]
1037 return node.hex(folddigest.digest())[:8]
1038
1038
1039
1039
1040 def createmarkers(
1040 def createmarkers(
1041 repo, relations, flag=0, date=None, metadata=None, operation=None
1041 repo, relations, flag=0, date=None, metadata=None, operation=None
1042 ):
1042 ):
1043 """Add obsolete markers between changesets in a repo
1043 """Add obsolete markers between changesets in a repo
1044
1044
1045 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1045 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1046 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1046 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1047 containing metadata for this marker only. It is merged with the global
1047 containing metadata for this marker only. It is merged with the global
1048 metadata specified through the `metadata` argument of this function.
1048 metadata specified through the `metadata` argument of this function.
1049 Any string values in metadata must be UTF-8 bytes.
1049 Any string values in metadata must be UTF-8 bytes.
1050
1050
1051 Trying to obsolete a public changeset will raise an exception.
1051 Trying to obsolete a public changeset will raise an exception.
1052
1052
1053 Current user and date are used except if specified otherwise in the
1053 Current user and date are used except if specified otherwise in the
1054 metadata attribute.
1054 metadata attribute.
1055
1055
1056 This function operates within a transaction of its own, but does
1056 This function operates within a transaction of its own, but does
1057 not take any lock on the repo.
1057 not take any lock on the repo.
1058 """
1058 """
1059 # prepare metadata
1059 # prepare metadata
1060 if metadata is None:
1060 if metadata is None:
1061 metadata = {}
1061 metadata = {}
1062 if b'user' not in metadata:
1062 if b'user' not in metadata:
1063 luser = (
1063 luser = (
1064 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1064 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1065 )
1065 )
1066 metadata[b'user'] = encoding.fromlocal(luser)
1066 metadata[b'user'] = encoding.fromlocal(luser)
1067
1067
1068 # Operation metadata handling
1068 # Operation metadata handling
1069 useoperation = repo.ui.configbool(
1069 useoperation = repo.ui.configbool(
1070 b'experimental', b'evolution.track-operation'
1070 b'experimental', b'evolution.track-operation'
1071 )
1071 )
1072 if useoperation and operation:
1072 if useoperation and operation:
1073 metadata[b'operation'] = operation
1073 metadata[b'operation'] = operation
1074
1074
1075 # Effect flag metadata handling
1075 # Effect flag metadata handling
1076 saveeffectflag = repo.ui.configbool(
1076 saveeffectflag = repo.ui.configbool(
1077 b'experimental', b'evolution.effect-flags'
1077 b'experimental', b'evolution.effect-flags'
1078 )
1078 )
1079
1079
1080 with repo.transaction(b'add-obsolescence-marker') as tr:
1080 with repo.transaction(b'add-obsolescence-marker') as tr:
1081 markerargs = []
1081 markerargs = []
1082 for rel in relations:
1082 for rel in relations:
1083 predecessors = rel[0]
1083 predecessors = rel[0]
1084 if not isinstance(predecessors, tuple):
1084 if not isinstance(predecessors, tuple):
1085 # preserve compat with old API until all caller are migrated
1085 # preserve compat with old API until all caller are migrated
1086 predecessors = (predecessors,)
1086 predecessors = (predecessors,)
1087 if len(predecessors) > 1 and len(rel[1]) != 1:
1087 if len(predecessors) > 1 and len(rel[1]) != 1:
1088 msg = b'Fold markers can only have 1 successors, not %d'
1088 msg = b'Fold markers can only have 1 successors, not %d'
1089 raise error.ProgrammingError(msg % len(rel[1]))
1089 raise error.ProgrammingError(msg % len(rel[1]))
1090 foldid = None
1090 foldid = None
1091 foldsize = len(predecessors)
1091 foldsize = len(predecessors)
1092 if 1 < foldsize:
1092 if 1 < foldsize:
1093 foldid = makefoldid(rel, metadata[b'user'])
1093 foldid = makefoldid(rel, metadata[b'user'])
1094 for foldidx, prec in enumerate(predecessors, 1):
1094 for foldidx, prec in enumerate(predecessors, 1):
1095 sucs = rel[1]
1095 sucs = rel[1]
1096 localmetadata = metadata.copy()
1096 localmetadata = metadata.copy()
1097 if len(rel) > 2:
1097 if len(rel) > 2:
1098 localmetadata.update(rel[2])
1098 localmetadata.update(rel[2])
1099 if foldid is not None:
1099 if foldid is not None:
1100 localmetadata[b'fold-id'] = foldid
1100 localmetadata[b'fold-id'] = foldid
1101 localmetadata[b'fold-idx'] = b'%d' % foldidx
1101 localmetadata[b'fold-idx'] = b'%d' % foldidx
1102 localmetadata[b'fold-size'] = b'%d' % foldsize
1102 localmetadata[b'fold-size'] = b'%d' % foldsize
1103
1103
1104 if not prec.mutable():
1104 if not prec.mutable():
1105 raise error.Abort(
1105 raise error.Abort(
1106 _(b"cannot obsolete public changeset: %s") % prec,
1106 _(b"cannot obsolete public changeset: %s") % prec,
1107 hint=b"see 'hg help phases' for details",
1107 hint=b"see 'hg help phases' for details",
1108 )
1108 )
1109 nprec = prec.node()
1109 nprec = prec.node()
1110 nsucs = tuple(s.node() for s in sucs)
1110 nsucs = tuple(s.node() for s in sucs)
1111 npare = None
1111 npare = None
1112 if not nsucs:
1112 if not nsucs:
1113 npare = tuple(p.node() for p in prec.parents())
1113 npare = tuple(p.node() for p in prec.parents())
1114 if nprec in nsucs:
1114 if nprec in nsucs:
1115 raise error.Abort(
1115 raise error.Abort(
1116 _(b"changeset %s cannot obsolete itself") % prec
1116 _(b"changeset %s cannot obsolete itself") % prec
1117 )
1117 )
1118
1118
1119 # Effect flag can be different by relation
1119 # Effect flag can be different by relation
1120 if saveeffectflag:
1120 if saveeffectflag:
1121 # The effect flag is saved in a versioned field name for
1121 # The effect flag is saved in a versioned field name for
1122 # future evolution
1122 # future evolution
1123 effectflag = obsutil.geteffectflag(prec, sucs)
1123 effectflag = obsutil.geteffectflag(prec, sucs)
1124 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1124 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1125
1125
1126 # Creating the marker causes the hidden cache to become
1126 # Creating the marker causes the hidden cache to become
1127 # invalid, which causes recomputation when we ask for
1127 # invalid, which causes recomputation when we ask for
1128 # prec.parents() above. Resulting in n^2 behavior. So let's
1128 # prec.parents() above. Resulting in n^2 behavior. So let's
1129 # prepare all of the args first, then create the markers.
1129 # prepare all of the args first, then create the markers.
1130 markerargs.append((nprec, nsucs, npare, localmetadata))
1130 markerargs.append((nprec, nsucs, npare, localmetadata))
1131
1131
1132 for args in markerargs:
1132 for args in markerargs:
1133 nprec, nsucs, npare, localmetadata = args
1133 nprec, nsucs, npare, localmetadata = args
1134 repo.obsstore.create(
1134 repo.obsstore.create(
1135 tr,
1135 tr,
1136 nprec,
1136 nprec,
1137 nsucs,
1137 nsucs,
1138 flag,
1138 flag,
1139 parents=npare,
1139 parents=npare,
1140 date=date,
1140 date=date,
1141 metadata=localmetadata,
1141 metadata=localmetadata,
1142 ui=repo.ui,
1142 ui=repo.ui,
1143 )
1143 )
1144 repo.filteredrevcache.clear()
1144 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now