##// END OF EJS Templates
obsstore: break the repo → obstore → repo loop...
marmoute -
r50319:360c156e default
parent child Browse files
Show More
@@ -1,1145 +1,1154 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70
70
71 import binascii
71 import binascii
72 import struct
72 import struct
73 import weakref
73
74
74 from .i18n import _
75 from .i18n import _
75 from .pycompat import getattr
76 from .pycompat import getattr
76 from .node import (
77 from .node import (
77 bin,
78 bin,
78 hex,
79 hex,
79 )
80 )
80 from . import (
81 from . import (
81 encoding,
82 encoding,
82 error,
83 error,
83 obsutil,
84 obsutil,
84 phases,
85 phases,
85 policy,
86 policy,
86 pycompat,
87 pycompat,
87 util,
88 util,
88 )
89 )
89 from .utils import (
90 from .utils import (
90 dateutil,
91 dateutil,
91 hashutil,
92 hashutil,
92 )
93 )
93
94
94 parsers = policy.importmod('parsers')
95 parsers = policy.importmod('parsers')
95
96
96 _pack = struct.pack
97 _pack = struct.pack
97 _unpack = struct.unpack
98 _unpack = struct.unpack
98 _calcsize = struct.calcsize
99 _calcsize = struct.calcsize
99 propertycache = util.propertycache
100 propertycache = util.propertycache
100
101
101 # Options for obsolescence
102 # Options for obsolescence
102 createmarkersopt = b'createmarkers'
103 createmarkersopt = b'createmarkers'
103 allowunstableopt = b'allowunstable'
104 allowunstableopt = b'allowunstable'
104 allowdivergenceopt = b'allowdivergence'
105 allowdivergenceopt = b'allowdivergence'
105 exchangeopt = b'exchange'
106 exchangeopt = b'exchange'
106
107
107
108
108 def _getoptionvalue(repo, option):
109 def _getoptionvalue(repo, option):
109 """Returns True if the given repository has the given obsolete option
110 """Returns True if the given repository has the given obsolete option
110 enabled.
111 enabled.
111 """
112 """
112 configkey = b'evolution.%s' % option
113 configkey = b'evolution.%s' % option
113 newconfig = repo.ui.configbool(b'experimental', configkey)
114 newconfig = repo.ui.configbool(b'experimental', configkey)
114
115
115 # Return the value only if defined
116 # Return the value only if defined
116 if newconfig is not None:
117 if newconfig is not None:
117 return newconfig
118 return newconfig
118
119
119 # Fallback on generic option
120 # Fallback on generic option
120 try:
121 try:
121 return repo.ui.configbool(b'experimental', b'evolution')
122 return repo.ui.configbool(b'experimental', b'evolution')
122 except (error.ConfigError, AttributeError):
123 except (error.ConfigError, AttributeError):
123 # Fallback on old-fashion config
124 # Fallback on old-fashion config
124 # inconsistent config: experimental.evolution
125 # inconsistent config: experimental.evolution
125 result = set(repo.ui.configlist(b'experimental', b'evolution'))
126 result = set(repo.ui.configlist(b'experimental', b'evolution'))
126
127
127 if b'all' in result:
128 if b'all' in result:
128 return True
129 return True
129
130
130 # Temporary hack for next check
131 # Temporary hack for next check
131 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
132 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
132 if newconfig:
133 if newconfig:
133 result.add(b'createmarkers')
134 result.add(b'createmarkers')
134
135
135 return option in result
136 return option in result
136
137
137
138
138 def getoptions(repo):
139 def getoptions(repo):
139 """Returns dicts showing state of obsolescence features."""
140 """Returns dicts showing state of obsolescence features."""
140
141
141 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
142 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
142 if createmarkersvalue:
143 if createmarkersvalue:
143 unstablevalue = _getoptionvalue(repo, allowunstableopt)
144 unstablevalue = _getoptionvalue(repo, allowunstableopt)
144 divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
145 divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
145 exchangevalue = _getoptionvalue(repo, exchangeopt)
146 exchangevalue = _getoptionvalue(repo, exchangeopt)
146 else:
147 else:
147 # if we cannot create obsolescence markers, we shouldn't exchange them
148 # if we cannot create obsolescence markers, we shouldn't exchange them
148 # or perform operations that lead to instability or divergence
149 # or perform operations that lead to instability or divergence
149 unstablevalue = False
150 unstablevalue = False
150 divergencevalue = False
151 divergencevalue = False
151 exchangevalue = False
152 exchangevalue = False
152
153
153 return {
154 return {
154 createmarkersopt: createmarkersvalue,
155 createmarkersopt: createmarkersvalue,
155 allowunstableopt: unstablevalue,
156 allowunstableopt: unstablevalue,
156 allowdivergenceopt: divergencevalue,
157 allowdivergenceopt: divergencevalue,
157 exchangeopt: exchangevalue,
158 exchangeopt: exchangevalue,
158 }
159 }
159
160
160
161
161 def isenabled(repo, option):
162 def isenabled(repo, option):
162 """Returns True if the given repository has the given obsolete option
163 """Returns True if the given repository has the given obsolete option
163 enabled.
164 enabled.
164 """
165 """
165 return getoptions(repo)[option]
166 return getoptions(repo)[option]
166
167
167
168
168 # Creating aliases for marker flags because evolve extension looks for
169 # Creating aliases for marker flags because evolve extension looks for
169 # bumpedfix in obsolete.py
170 # bumpedfix in obsolete.py
170 bumpedfix = obsutil.bumpedfix
171 bumpedfix = obsutil.bumpedfix
171 usingsha256 = obsutil.usingsha256
172 usingsha256 = obsutil.usingsha256
172
173
173 ## Parsing and writing of version "0"
174 ## Parsing and writing of version "0"
174 #
175 #
175 # The header is followed by the markers. Each marker is made of:
176 # The header is followed by the markers. Each marker is made of:
176 #
177 #
177 # - 1 uint8 : number of new changesets "N", can be zero.
178 # - 1 uint8 : number of new changesets "N", can be zero.
178 #
179 #
179 # - 1 uint32: metadata size "M" in bytes.
180 # - 1 uint32: metadata size "M" in bytes.
180 #
181 #
181 # - 1 byte: a bit field. It is reserved for flags used in common
182 # - 1 byte: a bit field. It is reserved for flags used in common
182 # obsolete marker operations, to avoid repeated decoding of metadata
183 # obsolete marker operations, to avoid repeated decoding of metadata
183 # entries.
184 # entries.
184 #
185 #
185 # - 20 bytes: obsoleted changeset identifier.
186 # - 20 bytes: obsoleted changeset identifier.
186 #
187 #
187 # - N*20 bytes: new changesets identifiers.
188 # - N*20 bytes: new changesets identifiers.
188 #
189 #
189 # - M bytes: metadata as a sequence of nul-terminated strings. Each
190 # - M bytes: metadata as a sequence of nul-terminated strings. Each
190 # string contains a key and a value, separated by a colon ':', without
191 # string contains a key and a value, separated by a colon ':', without
191 # additional encoding. Keys cannot contain '\0' or ':' and values
192 # additional encoding. Keys cannot contain '\0' or ':' and values
192 # cannot contain '\0'.
193 # cannot contain '\0'.
193 _fm0version = 0
194 _fm0version = 0
194 _fm0fixed = b'>BIB20s'
195 _fm0fixed = b'>BIB20s'
195 _fm0node = b'20s'
196 _fm0node = b'20s'
196 _fm0fsize = _calcsize(_fm0fixed)
197 _fm0fsize = _calcsize(_fm0fixed)
197 _fm0fnodesize = _calcsize(_fm0node)
198 _fm0fnodesize = _calcsize(_fm0node)
198
199
199
200
200 def _fm0readmarkers(data, off, stop):
201 def _fm0readmarkers(data, off, stop):
201 # Loop on markers
202 # Loop on markers
202 while off < stop:
203 while off < stop:
203 # read fixed part
204 # read fixed part
204 cur = data[off : off + _fm0fsize]
205 cur = data[off : off + _fm0fsize]
205 off += _fm0fsize
206 off += _fm0fsize
206 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
207 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
207 # read replacement
208 # read replacement
208 sucs = ()
209 sucs = ()
209 if numsuc:
210 if numsuc:
210 s = _fm0fnodesize * numsuc
211 s = _fm0fnodesize * numsuc
211 cur = data[off : off + s]
212 cur = data[off : off + s]
212 sucs = _unpack(_fm0node * numsuc, cur)
213 sucs = _unpack(_fm0node * numsuc, cur)
213 off += s
214 off += s
214 # read metadata
215 # read metadata
215 # (metadata will be decoded on demand)
216 # (metadata will be decoded on demand)
216 metadata = data[off : off + mdsize]
217 metadata = data[off : off + mdsize]
217 if len(metadata) != mdsize:
218 if len(metadata) != mdsize:
218 raise error.Abort(
219 raise error.Abort(
219 _(
220 _(
220 b'parsing obsolete marker: metadata is too '
221 b'parsing obsolete marker: metadata is too '
221 b'short, %d bytes expected, got %d'
222 b'short, %d bytes expected, got %d'
222 )
223 )
223 % (mdsize, len(metadata))
224 % (mdsize, len(metadata))
224 )
225 )
225 off += mdsize
226 off += mdsize
226 metadata = _fm0decodemeta(metadata)
227 metadata = _fm0decodemeta(metadata)
227 try:
228 try:
228 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
229 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
229 date = float(when), int(offset)
230 date = float(when), int(offset)
230 except ValueError:
231 except ValueError:
231 date = (0.0, 0)
232 date = (0.0, 0)
232 parents = None
233 parents = None
233 if b'p2' in metadata:
234 if b'p2' in metadata:
234 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
235 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
235 elif b'p1' in metadata:
236 elif b'p1' in metadata:
236 parents = (metadata.pop(b'p1', None),)
237 parents = (metadata.pop(b'p1', None),)
237 elif b'p0' in metadata:
238 elif b'p0' in metadata:
238 parents = ()
239 parents = ()
239 if parents is not None:
240 if parents is not None:
240 try:
241 try:
241 parents = tuple(bin(p) for p in parents)
242 parents = tuple(bin(p) for p in parents)
242 # if parent content is not a nodeid, drop the data
243 # if parent content is not a nodeid, drop the data
243 for p in parents:
244 for p in parents:
244 if len(p) != 20:
245 if len(p) != 20:
245 parents = None
246 parents = None
246 break
247 break
247 except binascii.Error:
248 except binascii.Error:
248 # if content cannot be translated to nodeid drop the data.
249 # if content cannot be translated to nodeid drop the data.
249 parents = None
250 parents = None
250
251
251 metadata = tuple(sorted(metadata.items()))
252 metadata = tuple(sorted(metadata.items()))
252
253
253 yield (pre, sucs, flags, metadata, date, parents)
254 yield (pre, sucs, flags, metadata, date, parents)
254
255
255
256
256 def _fm0encodeonemarker(marker):
257 def _fm0encodeonemarker(marker):
257 pre, sucs, flags, metadata, date, parents = marker
258 pre, sucs, flags, metadata, date, parents = marker
258 if flags & usingsha256:
259 if flags & usingsha256:
259 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
260 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
260 metadata = dict(metadata)
261 metadata = dict(metadata)
261 time, tz = date
262 time, tz = date
262 metadata[b'date'] = b'%r %i' % (time, tz)
263 metadata[b'date'] = b'%r %i' % (time, tz)
263 if parents is not None:
264 if parents is not None:
264 if not parents:
265 if not parents:
265 # mark that we explicitly recorded no parents
266 # mark that we explicitly recorded no parents
266 metadata[b'p0'] = b''
267 metadata[b'p0'] = b''
267 for i, p in enumerate(parents, 1):
268 for i, p in enumerate(parents, 1):
268 metadata[b'p%i' % i] = hex(p)
269 metadata[b'p%i' % i] = hex(p)
269 metadata = _fm0encodemeta(metadata)
270 metadata = _fm0encodemeta(metadata)
270 numsuc = len(sucs)
271 numsuc = len(sucs)
271 format = _fm0fixed + (_fm0node * numsuc)
272 format = _fm0fixed + (_fm0node * numsuc)
272 data = [numsuc, len(metadata), flags, pre]
273 data = [numsuc, len(metadata), flags, pre]
273 data.extend(sucs)
274 data.extend(sucs)
274 return _pack(format, *data) + metadata
275 return _pack(format, *data) + metadata
275
276
276
277
277 def _fm0encodemeta(meta):
278 def _fm0encodemeta(meta):
278 """Return encoded metadata string to string mapping.
279 """Return encoded metadata string to string mapping.
279
280
280 Assume no ':' in key and no '\0' in both key and value."""
281 Assume no ':' in key and no '\0' in both key and value."""
281 for key, value in meta.items():
282 for key, value in meta.items():
282 if b':' in key or b'\0' in key:
283 if b':' in key or b'\0' in key:
283 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
284 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
284 if b'\0' in value:
285 if b'\0' in value:
285 raise ValueError(b"':' is forbidden in metadata value'")
286 raise ValueError(b"':' is forbidden in metadata value'")
286 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
287 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
287
288
288
289
289 def _fm0decodemeta(data):
290 def _fm0decodemeta(data):
290 """Return string to string dictionary from encoded version."""
291 """Return string to string dictionary from encoded version."""
291 d = {}
292 d = {}
292 for l in data.split(b'\0'):
293 for l in data.split(b'\0'):
293 if l:
294 if l:
294 key, value = l.split(b':', 1)
295 key, value = l.split(b':', 1)
295 d[key] = value
296 d[key] = value
296 return d
297 return d
297
298
298
299
299 ## Parsing and writing of version "1"
300 ## Parsing and writing of version "1"
300 #
301 #
301 # The header is followed by the markers. Each marker is made of:
302 # The header is followed by the markers. Each marker is made of:
302 #
303 #
303 # - uint32: total size of the marker (including this field)
304 # - uint32: total size of the marker (including this field)
304 #
305 #
305 # - float64: date in seconds since epoch
306 # - float64: date in seconds since epoch
306 #
307 #
307 # - int16: timezone offset in minutes
308 # - int16: timezone offset in minutes
308 #
309 #
309 # - uint16: a bit field. It is reserved for flags used in common
310 # - uint16: a bit field. It is reserved for flags used in common
310 # obsolete marker operations, to avoid repeated decoding of metadata
311 # obsolete marker operations, to avoid repeated decoding of metadata
311 # entries.
312 # entries.
312 #
313 #
313 # - uint8: number of successors "N", can be zero.
314 # - uint8: number of successors "N", can be zero.
314 #
315 #
315 # - uint8: number of parents "P", can be zero.
316 # - uint8: number of parents "P", can be zero.
316 #
317 #
317 # 0: parents data stored but no parent,
318 # 0: parents data stored but no parent,
318 # 1: one parent stored,
319 # 1: one parent stored,
319 # 2: two parents stored,
320 # 2: two parents stored,
320 # 3: no parent data stored
321 # 3: no parent data stored
321 #
322 #
322 # - uint8: number of metadata entries M
323 # - uint8: number of metadata entries M
323 #
324 #
324 # - 20 or 32 bytes: predecessor changeset identifier.
325 # - 20 or 32 bytes: predecessor changeset identifier.
325 #
326 #
326 # - N*(20 or 32) bytes: successors changesets identifiers.
327 # - N*(20 or 32) bytes: successors changesets identifiers.
327 #
328 #
328 # - P*(20 or 32) bytes: parents of the predecessors changesets.
329 # - P*(20 or 32) bytes: parents of the predecessors changesets.
329 #
330 #
330 # - M*(uint8, uint8): size of all metadata entries (key and value)
331 # - M*(uint8, uint8): size of all metadata entries (key and value)
331 #
332 #
332 # - remaining bytes: the metadata, each (key, value) pair after the other.
333 # - remaining bytes: the metadata, each (key, value) pair after the other.
333 _fm1version = 1
334 _fm1version = 1
334 _fm1fixed = b'>IdhHBBB'
335 _fm1fixed = b'>IdhHBBB'
335 _fm1nodesha1 = b'20s'
336 _fm1nodesha1 = b'20s'
336 _fm1nodesha256 = b'32s'
337 _fm1nodesha256 = b'32s'
337 _fm1nodesha1size = _calcsize(_fm1nodesha1)
338 _fm1nodesha1size = _calcsize(_fm1nodesha1)
338 _fm1nodesha256size = _calcsize(_fm1nodesha256)
339 _fm1nodesha256size = _calcsize(_fm1nodesha256)
339 _fm1fsize = _calcsize(_fm1fixed)
340 _fm1fsize = _calcsize(_fm1fixed)
340 _fm1parentnone = 3
341 _fm1parentnone = 3
341 _fm1metapair = b'BB'
342 _fm1metapair = b'BB'
342 _fm1metapairsize = _calcsize(_fm1metapair)
343 _fm1metapairsize = _calcsize(_fm1metapair)
343
344
344
345
345 def _fm1purereadmarkers(data, off, stop):
346 def _fm1purereadmarkers(data, off, stop):
346 # make some global constants local for performance
347 # make some global constants local for performance
347 noneflag = _fm1parentnone
348 noneflag = _fm1parentnone
348 sha2flag = usingsha256
349 sha2flag = usingsha256
349 sha1size = _fm1nodesha1size
350 sha1size = _fm1nodesha1size
350 sha2size = _fm1nodesha256size
351 sha2size = _fm1nodesha256size
351 sha1fmt = _fm1nodesha1
352 sha1fmt = _fm1nodesha1
352 sha2fmt = _fm1nodesha256
353 sha2fmt = _fm1nodesha256
353 metasize = _fm1metapairsize
354 metasize = _fm1metapairsize
354 metafmt = _fm1metapair
355 metafmt = _fm1metapair
355 fsize = _fm1fsize
356 fsize = _fm1fsize
356 unpack = _unpack
357 unpack = _unpack
357
358
358 # Loop on markers
359 # Loop on markers
359 ufixed = struct.Struct(_fm1fixed).unpack
360 ufixed = struct.Struct(_fm1fixed).unpack
360
361
361 while off < stop:
362 while off < stop:
362 # read fixed part
363 # read fixed part
363 o1 = off + fsize
364 o1 = off + fsize
364 t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1])
365 t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1])
365
366
366 if flags & sha2flag:
367 if flags & sha2flag:
367 nodefmt = sha2fmt
368 nodefmt = sha2fmt
368 nodesize = sha2size
369 nodesize = sha2size
369 else:
370 else:
370 nodefmt = sha1fmt
371 nodefmt = sha1fmt
371 nodesize = sha1size
372 nodesize = sha1size
372
373
373 (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize])
374 (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize])
374 o1 += nodesize
375 o1 += nodesize
375
376
376 # read 0 or more successors
377 # read 0 or more successors
377 if numsuc == 1:
378 if numsuc == 1:
378 o2 = o1 + nodesize
379 o2 = o1 + nodesize
379 sucs = (data[o1:o2],)
380 sucs = (data[o1:o2],)
380 else:
381 else:
381 o2 = o1 + nodesize * numsuc
382 o2 = o1 + nodesize * numsuc
382 sucs = unpack(nodefmt * numsuc, data[o1:o2])
383 sucs = unpack(nodefmt * numsuc, data[o1:o2])
383
384
384 # read parents
385 # read parents
385 if numpar == noneflag:
386 if numpar == noneflag:
386 o3 = o2
387 o3 = o2
387 parents = None
388 parents = None
388 elif numpar == 1:
389 elif numpar == 1:
389 o3 = o2 + nodesize
390 o3 = o2 + nodesize
390 parents = (data[o2:o3],)
391 parents = (data[o2:o3],)
391 else:
392 else:
392 o3 = o2 + nodesize * numpar
393 o3 = o2 + nodesize * numpar
393 parents = unpack(nodefmt * numpar, data[o2:o3])
394 parents = unpack(nodefmt * numpar, data[o2:o3])
394
395
395 # read metadata
396 # read metadata
396 off = o3 + metasize * nummeta
397 off = o3 + metasize * nummeta
397 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
398 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
398 metadata = []
399 metadata = []
399 for idx in range(0, len(metapairsize), 2):
400 for idx in range(0, len(metapairsize), 2):
400 o1 = off + metapairsize[idx]
401 o1 = off + metapairsize[idx]
401 o2 = o1 + metapairsize[idx + 1]
402 o2 = o1 + metapairsize[idx + 1]
402 metadata.append((data[off:o1], data[o1:o2]))
403 metadata.append((data[off:o1], data[o1:o2]))
403 off = o2
404 off = o2
404
405
405 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
406 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
406
407
407
408
408 def _fm1encodeonemarker(marker):
409 def _fm1encodeonemarker(marker):
409 pre, sucs, flags, metadata, date, parents = marker
410 pre, sucs, flags, metadata, date, parents = marker
410 # determine node size
411 # determine node size
411 _fm1node = _fm1nodesha1
412 _fm1node = _fm1nodesha1
412 if flags & usingsha256:
413 if flags & usingsha256:
413 _fm1node = _fm1nodesha256
414 _fm1node = _fm1nodesha256
414 numsuc = len(sucs)
415 numsuc = len(sucs)
415 numextranodes = 1 + numsuc
416 numextranodes = 1 + numsuc
416 if parents is None:
417 if parents is None:
417 numpar = _fm1parentnone
418 numpar = _fm1parentnone
418 else:
419 else:
419 numpar = len(parents)
420 numpar = len(parents)
420 numextranodes += numpar
421 numextranodes += numpar
421 formatnodes = _fm1node * numextranodes
422 formatnodes = _fm1node * numextranodes
422 formatmeta = _fm1metapair * len(metadata)
423 formatmeta = _fm1metapair * len(metadata)
423 format = _fm1fixed + formatnodes + formatmeta
424 format = _fm1fixed + formatnodes + formatmeta
424 # tz is stored in minutes so we divide by 60
425 # tz is stored in minutes so we divide by 60
425 tz = date[1] // 60
426 tz = date[1] // 60
426 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
427 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
427 data.extend(sucs)
428 data.extend(sucs)
428 if parents is not None:
429 if parents is not None:
429 data.extend(parents)
430 data.extend(parents)
430 totalsize = _calcsize(format)
431 totalsize = _calcsize(format)
431 for key, value in metadata:
432 for key, value in metadata:
432 lk = len(key)
433 lk = len(key)
433 lv = len(value)
434 lv = len(value)
434 if lk > 255:
435 if lk > 255:
435 msg = (
436 msg = (
436 b'obsstore metadata key cannot be longer than 255 bytes'
437 b'obsstore metadata key cannot be longer than 255 bytes'
437 b' (key "%s" is %u bytes)'
438 b' (key "%s" is %u bytes)'
438 ) % (key, lk)
439 ) % (key, lk)
439 raise error.ProgrammingError(msg)
440 raise error.ProgrammingError(msg)
440 if lv > 255:
441 if lv > 255:
441 msg = (
442 msg = (
442 b'obsstore metadata value cannot be longer than 255 bytes'
443 b'obsstore metadata value cannot be longer than 255 bytes'
443 b' (value "%s" for key "%s" is %u bytes)'
444 b' (value "%s" for key "%s" is %u bytes)'
444 ) % (value, key, lv)
445 ) % (value, key, lv)
445 raise error.ProgrammingError(msg)
446 raise error.ProgrammingError(msg)
446 data.append(lk)
447 data.append(lk)
447 data.append(lv)
448 data.append(lv)
448 totalsize += lk + lv
449 totalsize += lk + lv
449 data[0] = totalsize
450 data[0] = totalsize
450 data = [_pack(format, *data)]
451 data = [_pack(format, *data)]
451 for key, value in metadata:
452 for key, value in metadata:
452 data.append(key)
453 data.append(key)
453 data.append(value)
454 data.append(value)
454 return b''.join(data)
455 return b''.join(data)
455
456
456
457
457 def _fm1readmarkers(data, off, stop):
458 def _fm1readmarkers(data, off, stop):
458 native = getattr(parsers, 'fm1readmarkers', None)
459 native = getattr(parsers, 'fm1readmarkers', None)
459 if not native:
460 if not native:
460 return _fm1purereadmarkers(data, off, stop)
461 return _fm1purereadmarkers(data, off, stop)
461 return native(data, off, stop)
462 return native(data, off, stop)
462
463
463
464
464 # mapping to read/write various marker formats
465 # mapping to read/write various marker formats
465 # <version> -> (decoder, encoder)
466 # <version> -> (decoder, encoder)
466 formats = {
467 formats = {
467 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
468 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
468 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
469 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
469 }
470 }
470
471
471
472
472 def _readmarkerversion(data):
473 def _readmarkerversion(data):
473 return _unpack(b'>B', data[0:1])[0]
474 return _unpack(b'>B', data[0:1])[0]
474
475
475
476
476 @util.nogc
477 @util.nogc
477 def _readmarkers(data, off=None, stop=None):
478 def _readmarkers(data, off=None, stop=None):
478 """Read and enumerate markers from raw data"""
479 """Read and enumerate markers from raw data"""
479 diskversion = _readmarkerversion(data)
480 diskversion = _readmarkerversion(data)
480 if not off:
481 if not off:
481 off = 1 # skip 1 byte version number
482 off = 1 # skip 1 byte version number
482 if stop is None:
483 if stop is None:
483 stop = len(data)
484 stop = len(data)
484 if diskversion not in formats:
485 if diskversion not in formats:
485 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
486 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
486 raise error.UnknownVersion(msg, version=diskversion)
487 raise error.UnknownVersion(msg, version=diskversion)
487 return diskversion, formats[diskversion][0](data, off, stop)
488 return diskversion, formats[diskversion][0](data, off, stop)
488
489
489
490
490 def encodeheader(version=_fm0version):
491 def encodeheader(version=_fm0version):
491 return _pack(b'>B', version)
492 return _pack(b'>B', version)
492
493
493
494
494 def encodemarkers(markers, addheader=False, version=_fm0version):
495 def encodemarkers(markers, addheader=False, version=_fm0version):
495 # Kept separate from flushmarkers(), it will be reused for
496 # Kept separate from flushmarkers(), it will be reused for
496 # markers exchange.
497 # markers exchange.
497 encodeone = formats[version][1]
498 encodeone = formats[version][1]
498 if addheader:
499 if addheader:
499 yield encodeheader(version)
500 yield encodeheader(version)
500 for marker in markers:
501 for marker in markers:
501 yield encodeone(marker)
502 yield encodeone(marker)
502
503
503
504
504 @util.nogc
505 @util.nogc
505 def _addsuccessors(successors, markers):
506 def _addsuccessors(successors, markers):
506 for mark in markers:
507 for mark in markers:
507 successors.setdefault(mark[0], set()).add(mark)
508 successors.setdefault(mark[0], set()).add(mark)
508
509
509
510
510 @util.nogc
511 @util.nogc
511 def _addpredecessors(predecessors, markers):
512 def _addpredecessors(predecessors, markers):
512 for mark in markers:
513 for mark in markers:
513 for suc in mark[1]:
514 for suc in mark[1]:
514 predecessors.setdefault(suc, set()).add(mark)
515 predecessors.setdefault(suc, set()).add(mark)
515
516
516
517
517 @util.nogc
518 @util.nogc
518 def _addchildren(children, markers):
519 def _addchildren(children, markers):
519 for mark in markers:
520 for mark in markers:
520 parents = mark[5]
521 parents = mark[5]
521 if parents is not None:
522 if parents is not None:
522 for p in parents:
523 for p in parents:
523 children.setdefault(p, set()).add(mark)
524 children.setdefault(p, set()).add(mark)
524
525
525
526
526 def _checkinvalidmarkers(repo, markers):
527 def _checkinvalidmarkers(repo, markers):
527 """search for marker with invalid data and raise error if needed
528 """search for marker with invalid data and raise error if needed
528
529
529 Exist as a separated function to allow the evolve extension for a more
530 Exist as a separated function to allow the evolve extension for a more
530 subtle handling.
531 subtle handling.
531 """
532 """
532 for mark in markers:
533 for mark in markers:
533 if repo.nullid in mark[1]:
534 if repo.nullid in mark[1]:
534 raise error.Abort(
535 raise error.Abort(
535 _(
536 _(
536 b'bad obsolescence marker detected: '
537 b'bad obsolescence marker detected: '
537 b'invalid successors nullid'
538 b'invalid successors nullid'
538 )
539 )
539 )
540 )
540
541
541
542
542 class obsstore:
543 class obsstore:
543 """Store obsolete markers
544 """Store obsolete markers
544
545
545 Markers can be accessed with two mappings:
546 Markers can be accessed with two mappings:
546 - predecessors[x] -> set(markers on predecessors edges of x)
547 - predecessors[x] -> set(markers on predecessors edges of x)
547 - successors[x] -> set(markers on successors edges of x)
548 - successors[x] -> set(markers on successors edges of x)
548 - children[x] -> set(markers on predecessors edges of children(x)
549 - children[x] -> set(markers on predecessors edges of children(x)
549 """
550 """
550
551
551 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
552 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
552 # prec: nodeid, predecessors changesets
553 # prec: nodeid, predecessors changesets
553 # succs: tuple of nodeid, successor changesets (0-N length)
554 # succs: tuple of nodeid, successor changesets (0-N length)
554 # flag: integer, flag field carrying modifier for the markers (see doc)
555 # flag: integer, flag field carrying modifier for the markers (see doc)
555 # meta: binary blob in UTF-8, encoded metadata dictionary
556 # meta: binary blob in UTF-8, encoded metadata dictionary
556 # date: (float, int) tuple, date of marker creation
557 # date: (float, int) tuple, date of marker creation
557 # parents: (tuple of nodeid) or None, parents of predecessors
558 # parents: (tuple of nodeid) or None, parents of predecessors
558 # None is used when no data has been recorded
559 # None is used when no data has been recorded
559
560
560 def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False):
561 def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False):
561 # caches for various obsolescence related cache
562 # caches for various obsolescence related cache
562 self.caches = {}
563 self.caches = {}
563 self.svfs = svfs
564 self.svfs = svfs
564 self.repo = repo
565 self._repo = weakref.ref(repo)
565 self._defaultformat = defaultformat
566 self._defaultformat = defaultformat
566 self._readonly = readonly
567 self._readonly = readonly
567
568
569 @property
570 def repo(self):
571 r = self._repo()
572 if r is None:
573 msg = "using the obsstore of a deallocated repo"
574 raise error.ProgrammingError(msg)
575 return r
576
568 def __iter__(self):
577 def __iter__(self):
569 return iter(self._all)
578 return iter(self._all)
570
579
571 def __len__(self):
580 def __len__(self):
572 return len(self._all)
581 return len(self._all)
573
582
574 def __nonzero__(self):
583 def __nonzero__(self):
575 from . import statichttprepo
584 from . import statichttprepo
576
585
577 if isinstance(self.repo, statichttprepo.statichttprepository):
586 if isinstance(self.repo, statichttprepo.statichttprepository):
578 # If repo is accessed via static HTTP, then we can't use os.stat()
587 # If repo is accessed via static HTTP, then we can't use os.stat()
579 # to just peek at the file size.
588 # to just peek at the file size.
580 return len(self._data) > 1
589 return len(self._data) > 1
581 if not self._cached('_all'):
590 if not self._cached('_all'):
582 try:
591 try:
583 return self.svfs.stat(b'obsstore').st_size > 1
592 return self.svfs.stat(b'obsstore').st_size > 1
584 except FileNotFoundError:
593 except FileNotFoundError:
585 # just build an empty _all list if no obsstore exists, which
594 # just build an empty _all list if no obsstore exists, which
586 # avoids further stat() syscalls
595 # avoids further stat() syscalls
587 pass
596 pass
588 return bool(self._all)
597 return bool(self._all)
589
598
590 __bool__ = __nonzero__
599 __bool__ = __nonzero__
591
600
592 @property
601 @property
593 def readonly(self):
602 def readonly(self):
594 """True if marker creation is disabled
603 """True if marker creation is disabled
595
604
596 Remove me in the future when obsolete marker is always on."""
605 Remove me in the future when obsolete marker is always on."""
597 return self._readonly
606 return self._readonly
598
607
599 def create(
608 def create(
600 self,
609 self,
601 transaction,
610 transaction,
602 prec,
611 prec,
603 succs=(),
612 succs=(),
604 flag=0,
613 flag=0,
605 parents=None,
614 parents=None,
606 date=None,
615 date=None,
607 metadata=None,
616 metadata=None,
608 ui=None,
617 ui=None,
609 ):
618 ):
610 """obsolete: add a new obsolete marker
619 """obsolete: add a new obsolete marker
611
620
612 * ensuring it is hashable
621 * ensuring it is hashable
613 * check mandatory metadata
622 * check mandatory metadata
614 * encode metadata
623 * encode metadata
615
624
616 If you are a human writing code creating marker you want to use the
625 If you are a human writing code creating marker you want to use the
617 `createmarkers` function in this module instead.
626 `createmarkers` function in this module instead.
618
627
619 return True if a new marker have been added, False if the markers
628 return True if a new marker have been added, False if the markers
620 already existed (no op).
629 already existed (no op).
621 """
630 """
622 flag = int(flag)
631 flag = int(flag)
623 if metadata is None:
632 if metadata is None:
624 metadata = {}
633 metadata = {}
625 if date is None:
634 if date is None:
626 if b'date' in metadata:
635 if b'date' in metadata:
627 # as a courtesy for out-of-tree extensions
636 # as a courtesy for out-of-tree extensions
628 date = dateutil.parsedate(metadata.pop(b'date'))
637 date = dateutil.parsedate(metadata.pop(b'date'))
629 elif ui is not None:
638 elif ui is not None:
630 date = ui.configdate(b'devel', b'default-date')
639 date = ui.configdate(b'devel', b'default-date')
631 if date is None:
640 if date is None:
632 date = dateutil.makedate()
641 date = dateutil.makedate()
633 else:
642 else:
634 date = dateutil.makedate()
643 date = dateutil.makedate()
635 if flag & usingsha256:
644 if flag & usingsha256:
636 if len(prec) != 32:
645 if len(prec) != 32:
637 raise ValueError(prec)
646 raise ValueError(prec)
638 for succ in succs:
647 for succ in succs:
639 if len(succ) != 32:
648 if len(succ) != 32:
640 raise ValueError(succ)
649 raise ValueError(succ)
641 else:
650 else:
642 if len(prec) != 20:
651 if len(prec) != 20:
643 raise ValueError(prec)
652 raise ValueError(prec)
644 for succ in succs:
653 for succ in succs:
645 if len(succ) != 20:
654 if len(succ) != 20:
646 raise ValueError(succ)
655 raise ValueError(succ)
647 if prec in succs:
656 if prec in succs:
648 raise ValueError('in-marker cycle with %s' % prec.hex())
657 raise ValueError('in-marker cycle with %s' % prec.hex())
649
658
650 metadata = tuple(sorted(metadata.items()))
659 metadata = tuple(sorted(metadata.items()))
651 for k, v in metadata:
660 for k, v in metadata:
652 try:
661 try:
653 # might be better to reject non-ASCII keys
662 # might be better to reject non-ASCII keys
654 k.decode('utf-8')
663 k.decode('utf-8')
655 v.decode('utf-8')
664 v.decode('utf-8')
656 except UnicodeDecodeError:
665 except UnicodeDecodeError:
657 raise error.ProgrammingError(
666 raise error.ProgrammingError(
658 b'obsstore metadata must be valid UTF-8 sequence '
667 b'obsstore metadata must be valid UTF-8 sequence '
659 b'(key = %r, value = %r)'
668 b'(key = %r, value = %r)'
660 % (pycompat.bytestr(k), pycompat.bytestr(v))
669 % (pycompat.bytestr(k), pycompat.bytestr(v))
661 )
670 )
662
671
663 marker = (bytes(prec), tuple(succs), flag, metadata, date, parents)
672 marker = (bytes(prec), tuple(succs), flag, metadata, date, parents)
664 return bool(self.add(transaction, [marker]))
673 return bool(self.add(transaction, [marker]))
665
674
666 def add(self, transaction, markers):
675 def add(self, transaction, markers):
667 """Add new markers to the store
676 """Add new markers to the store
668
677
669 Take care of filtering duplicate.
678 Take care of filtering duplicate.
670 Return the number of new marker."""
679 Return the number of new marker."""
671 if self._readonly:
680 if self._readonly:
672 raise error.Abort(
681 raise error.Abort(
673 _(b'creating obsolete markers is not enabled on this repo')
682 _(b'creating obsolete markers is not enabled on this repo')
674 )
683 )
675 known = set()
684 known = set()
676 getsuccessors = self.successors.get
685 getsuccessors = self.successors.get
677 new = []
686 new = []
678 for m in markers:
687 for m in markers:
679 if m not in getsuccessors(m[0], ()) and m not in known:
688 if m not in getsuccessors(m[0], ()) and m not in known:
680 known.add(m)
689 known.add(m)
681 new.append(m)
690 new.append(m)
682 if new:
691 if new:
683 f = self.svfs(b'obsstore', b'ab')
692 f = self.svfs(b'obsstore', b'ab')
684 try:
693 try:
685 offset = f.tell()
694 offset = f.tell()
686 transaction.add(b'obsstore', offset)
695 transaction.add(b'obsstore', offset)
687 # offset == 0: new file - add the version header
696 # offset == 0: new file - add the version header
688 data = b''.join(encodemarkers(new, offset == 0, self._version))
697 data = b''.join(encodemarkers(new, offset == 0, self._version))
689 f.write(data)
698 f.write(data)
690 finally:
699 finally:
691 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
700 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
692 # call 'filecacheentry.refresh()' here
701 # call 'filecacheentry.refresh()' here
693 f.close()
702 f.close()
694 addedmarkers = transaction.changes.get(b'obsmarkers')
703 addedmarkers = transaction.changes.get(b'obsmarkers')
695 if addedmarkers is not None:
704 if addedmarkers is not None:
696 addedmarkers.update(new)
705 addedmarkers.update(new)
697 self._addmarkers(new, data)
706 self._addmarkers(new, data)
698 # new marker *may* have changed several set. invalidate the cache.
707 # new marker *may* have changed several set. invalidate the cache.
699 self.caches.clear()
708 self.caches.clear()
700 # records the number of new markers for the transaction hooks
709 # records the number of new markers for the transaction hooks
701 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
710 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
702 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
711 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
703 return len(new)
712 return len(new)
704
713
705 def mergemarkers(self, transaction, data):
714 def mergemarkers(self, transaction, data):
706 """merge a binary stream of markers inside the obsstore
715 """merge a binary stream of markers inside the obsstore
707
716
708 Returns the number of new markers added."""
717 Returns the number of new markers added."""
709 version, markers = _readmarkers(data)
718 version, markers = _readmarkers(data)
710 return self.add(transaction, markers)
719 return self.add(transaction, markers)
711
720
712 @propertycache
721 @propertycache
713 def _data(self):
722 def _data(self):
714 return self.svfs.tryread(b'obsstore')
723 return self.svfs.tryread(b'obsstore')
715
724
716 @propertycache
725 @propertycache
717 def _version(self):
726 def _version(self):
718 if len(self._data) >= 1:
727 if len(self._data) >= 1:
719 return _readmarkerversion(self._data)
728 return _readmarkerversion(self._data)
720 else:
729 else:
721 return self._defaultformat
730 return self._defaultformat
722
731
723 @propertycache
732 @propertycache
724 def _all(self):
733 def _all(self):
725 data = self._data
734 data = self._data
726 if not data:
735 if not data:
727 return []
736 return []
728 self._version, markers = _readmarkers(data)
737 self._version, markers = _readmarkers(data)
729 markers = list(markers)
738 markers = list(markers)
730 _checkinvalidmarkers(self.repo, markers)
739 _checkinvalidmarkers(self.repo, markers)
731 return markers
740 return markers
732
741
733 @propertycache
742 @propertycache
734 def successors(self):
743 def successors(self):
735 successors = {}
744 successors = {}
736 _addsuccessors(successors, self._all)
745 _addsuccessors(successors, self._all)
737 return successors
746 return successors
738
747
739 @propertycache
748 @propertycache
740 def predecessors(self):
749 def predecessors(self):
741 predecessors = {}
750 predecessors = {}
742 _addpredecessors(predecessors, self._all)
751 _addpredecessors(predecessors, self._all)
743 return predecessors
752 return predecessors
744
753
745 @propertycache
754 @propertycache
746 def children(self):
755 def children(self):
747 children = {}
756 children = {}
748 _addchildren(children, self._all)
757 _addchildren(children, self._all)
749 return children
758 return children
750
759
751 def _cached(self, attr):
760 def _cached(self, attr):
752 return attr in self.__dict__
761 return attr in self.__dict__
753
762
754 def _addmarkers(self, markers, rawdata):
763 def _addmarkers(self, markers, rawdata):
755 markers = list(markers) # to allow repeated iteration
764 markers = list(markers) # to allow repeated iteration
756 self._data = self._data + rawdata
765 self._data = self._data + rawdata
757 self._all.extend(markers)
766 self._all.extend(markers)
758 if self._cached('successors'):
767 if self._cached('successors'):
759 _addsuccessors(self.successors, markers)
768 _addsuccessors(self.successors, markers)
760 if self._cached('predecessors'):
769 if self._cached('predecessors'):
761 _addpredecessors(self.predecessors, markers)
770 _addpredecessors(self.predecessors, markers)
762 if self._cached('children'):
771 if self._cached('children'):
763 _addchildren(self.children, markers)
772 _addchildren(self.children, markers)
764 _checkinvalidmarkers(self.repo, markers)
773 _checkinvalidmarkers(self.repo, markers)
765
774
766 def relevantmarkers(self, nodes):
775 def relevantmarkers(self, nodes):
767 """return a set of all obsolescence markers relevant to a set of nodes.
776 """return a set of all obsolescence markers relevant to a set of nodes.
768
777
769 "relevant" to a set of nodes mean:
778 "relevant" to a set of nodes mean:
770
779
771 - marker that use this changeset as successor
780 - marker that use this changeset as successor
772 - prune marker of direct children on this changeset
781 - prune marker of direct children on this changeset
773 - recursive application of the two rules on predecessors of these
782 - recursive application of the two rules on predecessors of these
774 markers
783 markers
775
784
776 It is a set so you cannot rely on order."""
785 It is a set so you cannot rely on order."""
777
786
778 pendingnodes = set(nodes)
787 pendingnodes = set(nodes)
779 seenmarkers = set()
788 seenmarkers = set()
780 seennodes = set(pendingnodes)
789 seennodes = set(pendingnodes)
781 precursorsmarkers = self.predecessors
790 precursorsmarkers = self.predecessors
782 succsmarkers = self.successors
791 succsmarkers = self.successors
783 children = self.children
792 children = self.children
784 while pendingnodes:
793 while pendingnodes:
785 direct = set()
794 direct = set()
786 for current in pendingnodes:
795 for current in pendingnodes:
787 direct.update(precursorsmarkers.get(current, ()))
796 direct.update(precursorsmarkers.get(current, ()))
788 pruned = [m for m in children.get(current, ()) if not m[1]]
797 pruned = [m for m in children.get(current, ()) if not m[1]]
789 direct.update(pruned)
798 direct.update(pruned)
790 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
799 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
791 direct.update(pruned)
800 direct.update(pruned)
792 direct -= seenmarkers
801 direct -= seenmarkers
793 pendingnodes = {m[0] for m in direct}
802 pendingnodes = {m[0] for m in direct}
794 seenmarkers |= direct
803 seenmarkers |= direct
795 pendingnodes -= seennodes
804 pendingnodes -= seennodes
796 seennodes |= pendingnodes
805 seennodes |= pendingnodes
797 return seenmarkers
806 return seenmarkers
798
807
799
808
800 def makestore(ui, repo):
809 def makestore(ui, repo):
801 """Create an obsstore instance from a repo."""
810 """Create an obsstore instance from a repo."""
802 # read default format for new obsstore.
811 # read default format for new obsstore.
803 # developer config: format.obsstore-version
812 # developer config: format.obsstore-version
804 defaultformat = ui.configint(b'format', b'obsstore-version')
813 defaultformat = ui.configint(b'format', b'obsstore-version')
805 # rely on obsstore class default when possible.
814 # rely on obsstore class default when possible.
806 kwargs = {}
815 kwargs = {}
807 if defaultformat is not None:
816 if defaultformat is not None:
808 kwargs['defaultformat'] = defaultformat
817 kwargs['defaultformat'] = defaultformat
809 readonly = not isenabled(repo, createmarkersopt)
818 readonly = not isenabled(repo, createmarkersopt)
810 store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs)
819 store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs)
811 if store and readonly:
820 if store and readonly:
812 ui.warn(
821 ui.warn(
813 _(b'obsolete feature not enabled but %i markers found!\n')
822 _(b'obsolete feature not enabled but %i markers found!\n')
814 % len(list(store))
823 % len(list(store))
815 )
824 )
816 return store
825 return store
817
826
818
827
819 def commonversion(versions):
828 def commonversion(versions):
820 """Return the newest version listed in both versions and our local formats.
829 """Return the newest version listed in both versions and our local formats.
821
830
822 Returns None if no common version exists.
831 Returns None if no common version exists.
823 """
832 """
824 versions.sort(reverse=True)
833 versions.sort(reverse=True)
825 # search for highest version known on both side
834 # search for highest version known on both side
826 for v in versions:
835 for v in versions:
827 if v in formats:
836 if v in formats:
828 return v
837 return v
829 return None
838 return None
830
839
831
840
832 # arbitrary picked to fit into 8K limit from HTTP server
841 # arbitrary picked to fit into 8K limit from HTTP server
833 # you have to take in account:
842 # you have to take in account:
834 # - the version header
843 # - the version header
835 # - the base85 encoding
844 # - the base85 encoding
836 _maxpayload = 5300
845 _maxpayload = 5300
837
846
838
847
839 def _pushkeyescape(markers):
848 def _pushkeyescape(markers):
840 """encode markers into a dict suitable for pushkey exchange
849 """encode markers into a dict suitable for pushkey exchange
841
850
842 - binary data is base85 encoded
851 - binary data is base85 encoded
843 - split in chunks smaller than 5300 bytes"""
852 - split in chunks smaller than 5300 bytes"""
844 keys = {}
853 keys = {}
845 parts = []
854 parts = []
846 currentlen = _maxpayload * 2 # ensure we create a new part
855 currentlen = _maxpayload * 2 # ensure we create a new part
847 for marker in markers:
856 for marker in markers:
848 nextdata = _fm0encodeonemarker(marker)
857 nextdata = _fm0encodeonemarker(marker)
849 if len(nextdata) + currentlen > _maxpayload:
858 if len(nextdata) + currentlen > _maxpayload:
850 currentpart = []
859 currentpart = []
851 currentlen = 0
860 currentlen = 0
852 parts.append(currentpart)
861 parts.append(currentpart)
853 currentpart.append(nextdata)
862 currentpart.append(nextdata)
854 currentlen += len(nextdata)
863 currentlen += len(nextdata)
855 for idx, part in enumerate(reversed(parts)):
864 for idx, part in enumerate(reversed(parts)):
856 data = b''.join([_pack(b'>B', _fm0version)] + part)
865 data = b''.join([_pack(b'>B', _fm0version)] + part)
857 keys[b'dump%i' % idx] = util.b85encode(data)
866 keys[b'dump%i' % idx] = util.b85encode(data)
858 return keys
867 return keys
859
868
860
869
861 def listmarkers(repo):
870 def listmarkers(repo):
862 """List markers over pushkey"""
871 """List markers over pushkey"""
863 if not repo.obsstore:
872 if not repo.obsstore:
864 return {}
873 return {}
865 return _pushkeyescape(sorted(repo.obsstore))
874 return _pushkeyescape(sorted(repo.obsstore))
866
875
867
876
868 def pushmarker(repo, key, old, new):
877 def pushmarker(repo, key, old, new):
869 """Push markers over pushkey"""
878 """Push markers over pushkey"""
870 if not key.startswith(b'dump'):
879 if not key.startswith(b'dump'):
871 repo.ui.warn(_(b'unknown key: %r') % key)
880 repo.ui.warn(_(b'unknown key: %r') % key)
872 return False
881 return False
873 if old:
882 if old:
874 repo.ui.warn(_(b'unexpected old value for %r') % key)
883 repo.ui.warn(_(b'unexpected old value for %r') % key)
875 return False
884 return False
876 data = util.b85decode(new)
885 data = util.b85decode(new)
877 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
886 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
878 repo.obsstore.mergemarkers(tr, data)
887 repo.obsstore.mergemarkers(tr, data)
879 repo.invalidatevolatilesets()
888 repo.invalidatevolatilesets()
880 return True
889 return True
881
890
882
891
883 # mapping of 'set-name' -> <function to compute this set>
892 # mapping of 'set-name' -> <function to compute this set>
884 cachefuncs = {}
893 cachefuncs = {}
885
894
886
895
887 def cachefor(name):
896 def cachefor(name):
888 """Decorator to register a function as computing the cache for a set"""
897 """Decorator to register a function as computing the cache for a set"""
889
898
890 def decorator(func):
899 def decorator(func):
891 if name in cachefuncs:
900 if name in cachefuncs:
892 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
901 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
893 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
902 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
894 cachefuncs[name] = func
903 cachefuncs[name] = func
895 return func
904 return func
896
905
897 return decorator
906 return decorator
898
907
899
908
900 def getrevs(repo, name):
909 def getrevs(repo, name):
901 """Return the set of revision that belong to the <name> set
910 """Return the set of revision that belong to the <name> set
902
911
903 Such access may compute the set and cache it for future use"""
912 Such access may compute the set and cache it for future use"""
904 repo = repo.unfiltered()
913 repo = repo.unfiltered()
905 with util.timedcm('getrevs %s', name):
914 with util.timedcm('getrevs %s', name):
906 if not repo.obsstore:
915 if not repo.obsstore:
907 return frozenset()
916 return frozenset()
908 if name not in repo.obsstore.caches:
917 if name not in repo.obsstore.caches:
909 repo.obsstore.caches[name] = cachefuncs[name](repo)
918 repo.obsstore.caches[name] = cachefuncs[name](repo)
910 return repo.obsstore.caches[name]
919 return repo.obsstore.caches[name]
911
920
912
921
913 # To be simple we need to invalidate obsolescence cache when:
922 # To be simple we need to invalidate obsolescence cache when:
914 #
923 #
915 # - new changeset is added:
924 # - new changeset is added:
916 # - public phase is changed
925 # - public phase is changed
917 # - obsolescence marker are added
926 # - obsolescence marker are added
918 # - strip is used a repo
927 # - strip is used a repo
919 def clearobscaches(repo):
928 def clearobscaches(repo):
920 """Remove all obsolescence related cache from a repo
929 """Remove all obsolescence related cache from a repo
921
930
922 This remove all cache in obsstore is the obsstore already exist on the
931 This remove all cache in obsstore is the obsstore already exist on the
923 repo.
932 repo.
924
933
925 (We could be smarter here given the exact event that trigger the cache
934 (We could be smarter here given the exact event that trigger the cache
926 clearing)"""
935 clearing)"""
927 # only clear cache is there is obsstore data in this repo
936 # only clear cache is there is obsstore data in this repo
928 if b'obsstore' in repo._filecache:
937 if b'obsstore' in repo._filecache:
929 repo.obsstore.caches.clear()
938 repo.obsstore.caches.clear()
930
939
931
940
932 def _mutablerevs(repo):
941 def _mutablerevs(repo):
933 """the set of mutable revision in the repository"""
942 """the set of mutable revision in the repository"""
934 return repo._phasecache.getrevset(repo, phases.mutablephases)
943 return repo._phasecache.getrevset(repo, phases.mutablephases)
935
944
936
945
937 @cachefor(b'obsolete')
946 @cachefor(b'obsolete')
938 def _computeobsoleteset(repo):
947 def _computeobsoleteset(repo):
939 """the set of obsolete revisions"""
948 """the set of obsolete revisions"""
940 getnode = repo.changelog.node
949 getnode = repo.changelog.node
941 notpublic = _mutablerevs(repo)
950 notpublic = _mutablerevs(repo)
942 isobs = repo.obsstore.successors.__contains__
951 isobs = repo.obsstore.successors.__contains__
943 return frozenset(r for r in notpublic if isobs(getnode(r)))
952 return frozenset(r for r in notpublic if isobs(getnode(r)))
944
953
945
954
946 @cachefor(b'orphan')
955 @cachefor(b'orphan')
947 def _computeorphanset(repo):
956 def _computeorphanset(repo):
948 """the set of non obsolete revisions with obsolete parents"""
957 """the set of non obsolete revisions with obsolete parents"""
949 pfunc = repo.changelog.parentrevs
958 pfunc = repo.changelog.parentrevs
950 mutable = _mutablerevs(repo)
959 mutable = _mutablerevs(repo)
951 obsolete = getrevs(repo, b'obsolete')
960 obsolete = getrevs(repo, b'obsolete')
952 others = mutable - obsolete
961 others = mutable - obsolete
953 unstable = set()
962 unstable = set()
954 for r in sorted(others):
963 for r in sorted(others):
955 # A rev is unstable if one of its parent is obsolete or unstable
964 # A rev is unstable if one of its parent is obsolete or unstable
956 # this works since we traverse following growing rev order
965 # this works since we traverse following growing rev order
957 for p in pfunc(r):
966 for p in pfunc(r):
958 if p in obsolete or p in unstable:
967 if p in obsolete or p in unstable:
959 unstable.add(r)
968 unstable.add(r)
960 break
969 break
961 return frozenset(unstable)
970 return frozenset(unstable)
962
971
963
972
964 @cachefor(b'suspended')
973 @cachefor(b'suspended')
965 def _computesuspendedset(repo):
974 def _computesuspendedset(repo):
966 """the set of obsolete parents with non obsolete descendants"""
975 """the set of obsolete parents with non obsolete descendants"""
967 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
976 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
968 return frozenset(r for r in getrevs(repo, b'obsolete') if r in suspended)
977 return frozenset(r for r in getrevs(repo, b'obsolete') if r in suspended)
969
978
970
979
971 @cachefor(b'extinct')
980 @cachefor(b'extinct')
972 def _computeextinctset(repo):
981 def _computeextinctset(repo):
973 """the set of obsolete parents without non obsolete descendants"""
982 """the set of obsolete parents without non obsolete descendants"""
974 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
983 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
975
984
976
985
977 @cachefor(b'phasedivergent')
986 @cachefor(b'phasedivergent')
978 def _computephasedivergentset(repo):
987 def _computephasedivergentset(repo):
979 """the set of revs trying to obsolete public revisions"""
988 """the set of revs trying to obsolete public revisions"""
980 bumped = set()
989 bumped = set()
981 # util function (avoid attribute lookup in the loop)
990 # util function (avoid attribute lookup in the loop)
982 phase = repo._phasecache.phase # would be faster to grab the full list
991 phase = repo._phasecache.phase # would be faster to grab the full list
983 public = phases.public
992 public = phases.public
984 cl = repo.changelog
993 cl = repo.changelog
985 torev = cl.index.get_rev
994 torev = cl.index.get_rev
986 tonode = cl.node
995 tonode = cl.node
987 obsstore = repo.obsstore
996 obsstore = repo.obsstore
988 for rev in repo.revs(b'(not public()) and (not obsolete())'):
997 for rev in repo.revs(b'(not public()) and (not obsolete())'):
989 # We only evaluate mutable, non-obsolete revision
998 # We only evaluate mutable, non-obsolete revision
990 node = tonode(rev)
999 node = tonode(rev)
991 # (future) A cache of predecessors may worth if split is very common
1000 # (future) A cache of predecessors may worth if split is very common
992 for pnode in obsutil.allpredecessors(
1001 for pnode in obsutil.allpredecessors(
993 obsstore, [node], ignoreflags=bumpedfix
1002 obsstore, [node], ignoreflags=bumpedfix
994 ):
1003 ):
995 prev = torev(pnode) # unfiltered! but so is phasecache
1004 prev = torev(pnode) # unfiltered! but so is phasecache
996 if (prev is not None) and (phase(repo, prev) <= public):
1005 if (prev is not None) and (phase(repo, prev) <= public):
997 # we have a public predecessor
1006 # we have a public predecessor
998 bumped.add(rev)
1007 bumped.add(rev)
999 break # Next draft!
1008 break # Next draft!
1000 return frozenset(bumped)
1009 return frozenset(bumped)
1001
1010
1002
1011
1003 @cachefor(b'contentdivergent')
1012 @cachefor(b'contentdivergent')
1004 def _computecontentdivergentset(repo):
1013 def _computecontentdivergentset(repo):
1005 """the set of rev that compete to be the final successors of some revision."""
1014 """the set of rev that compete to be the final successors of some revision."""
1006 divergent = set()
1015 divergent = set()
1007 obsstore = repo.obsstore
1016 obsstore = repo.obsstore
1008 newermap = {}
1017 newermap = {}
1009 tonode = repo.changelog.node
1018 tonode = repo.changelog.node
1010 for rev in repo.revs(b'(not public()) - obsolete()'):
1019 for rev in repo.revs(b'(not public()) - obsolete()'):
1011 node = tonode(rev)
1020 node = tonode(rev)
1012 mark = obsstore.predecessors.get(node, ())
1021 mark = obsstore.predecessors.get(node, ())
1013 toprocess = set(mark)
1022 toprocess = set(mark)
1014 seen = set()
1023 seen = set()
1015 while toprocess:
1024 while toprocess:
1016 prec = toprocess.pop()[0]
1025 prec = toprocess.pop()[0]
1017 if prec in seen:
1026 if prec in seen:
1018 continue # emergency cycle hanging prevention
1027 continue # emergency cycle hanging prevention
1019 seen.add(prec)
1028 seen.add(prec)
1020 if prec not in newermap:
1029 if prec not in newermap:
1021 obsutil.successorssets(repo, prec, cache=newermap)
1030 obsutil.successorssets(repo, prec, cache=newermap)
1022 newer = [n for n in newermap[prec] if n]
1031 newer = [n for n in newermap[prec] if n]
1023 if len(newer) > 1:
1032 if len(newer) > 1:
1024 divergent.add(rev)
1033 divergent.add(rev)
1025 break
1034 break
1026 toprocess.update(obsstore.predecessors.get(prec, ()))
1035 toprocess.update(obsstore.predecessors.get(prec, ()))
1027 return frozenset(divergent)
1036 return frozenset(divergent)
1028
1037
1029
1038
1030 def makefoldid(relation, user):
1039 def makefoldid(relation, user):
1031
1040
1032 folddigest = hashutil.sha1(user)
1041 folddigest = hashutil.sha1(user)
1033 for p in relation[0] + relation[1]:
1042 for p in relation[0] + relation[1]:
1034 folddigest.update(b'%d' % p.rev())
1043 folddigest.update(b'%d' % p.rev())
1035 folddigest.update(p.node())
1044 folddigest.update(p.node())
1036 # Since fold only has to compete against fold for the same successors, it
1045 # Since fold only has to compete against fold for the same successors, it
1037 # seems fine to use a small ID. Smaller ID save space.
1046 # seems fine to use a small ID. Smaller ID save space.
1038 return hex(folddigest.digest())[:8]
1047 return hex(folddigest.digest())[:8]
1039
1048
1040
1049
1041 def createmarkers(
1050 def createmarkers(
1042 repo, relations, flag=0, date=None, metadata=None, operation=None
1051 repo, relations, flag=0, date=None, metadata=None, operation=None
1043 ):
1052 ):
1044 """Add obsolete markers between changesets in a repo
1053 """Add obsolete markers between changesets in a repo
1045
1054
1046 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1055 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1047 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1056 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1048 containing metadata for this marker only. It is merged with the global
1057 containing metadata for this marker only. It is merged with the global
1049 metadata specified through the `metadata` argument of this function.
1058 metadata specified through the `metadata` argument of this function.
1050 Any string values in metadata must be UTF-8 bytes.
1059 Any string values in metadata must be UTF-8 bytes.
1051
1060
1052 Trying to obsolete a public changeset will raise an exception.
1061 Trying to obsolete a public changeset will raise an exception.
1053
1062
1054 Current user and date are used except if specified otherwise in the
1063 Current user and date are used except if specified otherwise in the
1055 metadata attribute.
1064 metadata attribute.
1056
1065
1057 This function operates within a transaction of its own, but does
1066 This function operates within a transaction of its own, but does
1058 not take any lock on the repo.
1067 not take any lock on the repo.
1059 """
1068 """
1060 # prepare metadata
1069 # prepare metadata
1061 if metadata is None:
1070 if metadata is None:
1062 metadata = {}
1071 metadata = {}
1063 if b'user' not in metadata:
1072 if b'user' not in metadata:
1064 luser = (
1073 luser = (
1065 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1074 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1066 )
1075 )
1067 metadata[b'user'] = encoding.fromlocal(luser)
1076 metadata[b'user'] = encoding.fromlocal(luser)
1068
1077
1069 # Operation metadata handling
1078 # Operation metadata handling
1070 useoperation = repo.ui.configbool(
1079 useoperation = repo.ui.configbool(
1071 b'experimental', b'evolution.track-operation'
1080 b'experimental', b'evolution.track-operation'
1072 )
1081 )
1073 if useoperation and operation:
1082 if useoperation and operation:
1074 metadata[b'operation'] = operation
1083 metadata[b'operation'] = operation
1075
1084
1076 # Effect flag metadata handling
1085 # Effect flag metadata handling
1077 saveeffectflag = repo.ui.configbool(
1086 saveeffectflag = repo.ui.configbool(
1078 b'experimental', b'evolution.effect-flags'
1087 b'experimental', b'evolution.effect-flags'
1079 )
1088 )
1080
1089
1081 with repo.transaction(b'add-obsolescence-marker') as tr:
1090 with repo.transaction(b'add-obsolescence-marker') as tr:
1082 markerargs = []
1091 markerargs = []
1083 for rel in relations:
1092 for rel in relations:
1084 predecessors = rel[0]
1093 predecessors = rel[0]
1085 if not isinstance(predecessors, tuple):
1094 if not isinstance(predecessors, tuple):
1086 # preserve compat with old API until all caller are migrated
1095 # preserve compat with old API until all caller are migrated
1087 predecessors = (predecessors,)
1096 predecessors = (predecessors,)
1088 if len(predecessors) > 1 and len(rel[1]) != 1:
1097 if len(predecessors) > 1 and len(rel[1]) != 1:
1089 msg = b'Fold markers can only have 1 successors, not %d'
1098 msg = b'Fold markers can only have 1 successors, not %d'
1090 raise error.ProgrammingError(msg % len(rel[1]))
1099 raise error.ProgrammingError(msg % len(rel[1]))
1091 foldid = None
1100 foldid = None
1092 foldsize = len(predecessors)
1101 foldsize = len(predecessors)
1093 if 1 < foldsize:
1102 if 1 < foldsize:
1094 foldid = makefoldid(rel, metadata[b'user'])
1103 foldid = makefoldid(rel, metadata[b'user'])
1095 for foldidx, prec in enumerate(predecessors, 1):
1104 for foldidx, prec in enumerate(predecessors, 1):
1096 sucs = rel[1]
1105 sucs = rel[1]
1097 localmetadata = metadata.copy()
1106 localmetadata = metadata.copy()
1098 if len(rel) > 2:
1107 if len(rel) > 2:
1099 localmetadata.update(rel[2])
1108 localmetadata.update(rel[2])
1100 if foldid is not None:
1109 if foldid is not None:
1101 localmetadata[b'fold-id'] = foldid
1110 localmetadata[b'fold-id'] = foldid
1102 localmetadata[b'fold-idx'] = b'%d' % foldidx
1111 localmetadata[b'fold-idx'] = b'%d' % foldidx
1103 localmetadata[b'fold-size'] = b'%d' % foldsize
1112 localmetadata[b'fold-size'] = b'%d' % foldsize
1104
1113
1105 if not prec.mutable():
1114 if not prec.mutable():
1106 raise error.Abort(
1115 raise error.Abort(
1107 _(b"cannot obsolete public changeset: %s") % prec,
1116 _(b"cannot obsolete public changeset: %s") % prec,
1108 hint=b"see 'hg help phases' for details",
1117 hint=b"see 'hg help phases' for details",
1109 )
1118 )
1110 nprec = prec.node()
1119 nprec = prec.node()
1111 nsucs = tuple(s.node() for s in sucs)
1120 nsucs = tuple(s.node() for s in sucs)
1112 npare = None
1121 npare = None
1113 if not nsucs:
1122 if not nsucs:
1114 npare = tuple(p.node() for p in prec.parents())
1123 npare = tuple(p.node() for p in prec.parents())
1115 if nprec in nsucs:
1124 if nprec in nsucs:
1116 raise error.Abort(
1125 raise error.Abort(
1117 _(b"changeset %s cannot obsolete itself") % prec
1126 _(b"changeset %s cannot obsolete itself") % prec
1118 )
1127 )
1119
1128
1120 # Effect flag can be different by relation
1129 # Effect flag can be different by relation
1121 if saveeffectflag:
1130 if saveeffectflag:
1122 # The effect flag is saved in a versioned field name for
1131 # The effect flag is saved in a versioned field name for
1123 # future evolution
1132 # future evolution
1124 effectflag = obsutil.geteffectflag(prec, sucs)
1133 effectflag = obsutil.geteffectflag(prec, sucs)
1125 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1134 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1126
1135
1127 # Creating the marker causes the hidden cache to become
1136 # Creating the marker causes the hidden cache to become
1128 # invalid, which causes recomputation when we ask for
1137 # invalid, which causes recomputation when we ask for
1129 # prec.parents() above. Resulting in n^2 behavior. So let's
1138 # prec.parents() above. Resulting in n^2 behavior. So let's
1130 # prepare all of the args first, then create the markers.
1139 # prepare all of the args first, then create the markers.
1131 markerargs.append((nprec, nsucs, npare, localmetadata))
1140 markerargs.append((nprec, nsucs, npare, localmetadata))
1132
1141
1133 for args in markerargs:
1142 for args in markerargs:
1134 nprec, nsucs, npare, localmetadata = args
1143 nprec, nsucs, npare, localmetadata = args
1135 repo.obsstore.create(
1144 repo.obsstore.create(
1136 tr,
1145 tr,
1137 nprec,
1146 nprec,
1138 nsucs,
1147 nsucs,
1139 flag,
1148 flag,
1140 parents=npare,
1149 parents=npare,
1141 date=date,
1150 date=date,
1142 metadata=localmetadata,
1151 metadata=localmetadata,
1143 ui=repo.ui,
1152 ui=repo.ui,
1144 )
1153 )
1145 repo.filteredrevcache.clear()
1154 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now