##// END OF EJS Templates
obsolete: make sure windows tests pass when stat() is given a URL...
av6 -
r49920:4507bc00 default
parent child Browse files
Show More
@@ -1,1150 +1,1150 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from .node import (
76 from .node import (
77 bin,
77 bin,
78 hex,
78 hex,
79 )
79 )
80 from .pycompat import getattr
80 from .pycompat import getattr
81 from .node import (
81 from .node import (
82 bin,
82 bin,
83 hex,
83 hex,
84 )
84 )
85 from . import (
85 from . import (
86 encoding,
86 encoding,
87 error,
87 error,
88 obsutil,
88 obsutil,
89 phases,
89 phases,
90 policy,
90 policy,
91 pycompat,
91 pycompat,
92 util,
92 util,
93 )
93 )
94 from .utils import (
94 from .utils import (
95 dateutil,
95 dateutil,
96 hashutil,
96 hashutil,
97 )
97 )
98
98
99 parsers = policy.importmod('parsers')
99 parsers = policy.importmod('parsers')
100
100
101 _pack = struct.pack
101 _pack = struct.pack
102 _unpack = struct.unpack
102 _unpack = struct.unpack
103 _calcsize = struct.calcsize
103 _calcsize = struct.calcsize
104 propertycache = util.propertycache
104 propertycache = util.propertycache
105
105
106 # Options for obsolescence
106 # Options for obsolescence
107 createmarkersopt = b'createmarkers'
107 createmarkersopt = b'createmarkers'
108 allowunstableopt = b'allowunstable'
108 allowunstableopt = b'allowunstable'
109 allowdivergenceopt = b'allowdivergence'
109 allowdivergenceopt = b'allowdivergence'
110 exchangeopt = b'exchange'
110 exchangeopt = b'exchange'
111
111
112
112
113 def _getoptionvalue(repo, option):
113 def _getoptionvalue(repo, option):
114 """Returns True if the given repository has the given obsolete option
114 """Returns True if the given repository has the given obsolete option
115 enabled.
115 enabled.
116 """
116 """
117 configkey = b'evolution.%s' % option
117 configkey = b'evolution.%s' % option
118 newconfig = repo.ui.configbool(b'experimental', configkey)
118 newconfig = repo.ui.configbool(b'experimental', configkey)
119
119
120 # Return the value only if defined
120 # Return the value only if defined
121 if newconfig is not None:
121 if newconfig is not None:
122 return newconfig
122 return newconfig
123
123
124 # Fallback on generic option
124 # Fallback on generic option
125 try:
125 try:
126 return repo.ui.configbool(b'experimental', b'evolution')
126 return repo.ui.configbool(b'experimental', b'evolution')
127 except (error.ConfigError, AttributeError):
127 except (error.ConfigError, AttributeError):
128 # Fallback on old-fashion config
128 # Fallback on old-fashion config
129 # inconsistent config: experimental.evolution
129 # inconsistent config: experimental.evolution
130 result = set(repo.ui.configlist(b'experimental', b'evolution'))
130 result = set(repo.ui.configlist(b'experimental', b'evolution'))
131
131
132 if b'all' in result:
132 if b'all' in result:
133 return True
133 return True
134
134
135 # Temporary hack for next check
135 # Temporary hack for next check
136 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
136 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
137 if newconfig:
137 if newconfig:
138 result.add(b'createmarkers')
138 result.add(b'createmarkers')
139
139
140 return option in result
140 return option in result
141
141
142
142
143 def getoptions(repo):
143 def getoptions(repo):
144 """Returns dicts showing state of obsolescence features."""
144 """Returns dicts showing state of obsolescence features."""
145
145
146 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
146 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
147 if createmarkersvalue:
147 if createmarkersvalue:
148 unstablevalue = _getoptionvalue(repo, allowunstableopt)
148 unstablevalue = _getoptionvalue(repo, allowunstableopt)
149 divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
149 divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
150 exchangevalue = _getoptionvalue(repo, exchangeopt)
150 exchangevalue = _getoptionvalue(repo, exchangeopt)
151 else:
151 else:
152 # if we cannot create obsolescence markers, we shouldn't exchange them
152 # if we cannot create obsolescence markers, we shouldn't exchange them
153 # or perform operations that lead to instability or divergence
153 # or perform operations that lead to instability or divergence
154 unstablevalue = False
154 unstablevalue = False
155 divergencevalue = False
155 divergencevalue = False
156 exchangevalue = False
156 exchangevalue = False
157
157
158 return {
158 return {
159 createmarkersopt: createmarkersvalue,
159 createmarkersopt: createmarkersvalue,
160 allowunstableopt: unstablevalue,
160 allowunstableopt: unstablevalue,
161 allowdivergenceopt: divergencevalue,
161 allowdivergenceopt: divergencevalue,
162 exchangeopt: exchangevalue,
162 exchangeopt: exchangevalue,
163 }
163 }
164
164
165
165
166 def isenabled(repo, option):
166 def isenabled(repo, option):
167 """Returns True if the given repository has the given obsolete option
167 """Returns True if the given repository has the given obsolete option
168 enabled.
168 enabled.
169 """
169 """
170 return getoptions(repo)[option]
170 return getoptions(repo)[option]
171
171
172
172
173 # Creating aliases for marker flags because evolve extension looks for
173 # Creating aliases for marker flags because evolve extension looks for
174 # bumpedfix in obsolete.py
174 # bumpedfix in obsolete.py
175 bumpedfix = obsutil.bumpedfix
175 bumpedfix = obsutil.bumpedfix
176 usingsha256 = obsutil.usingsha256
176 usingsha256 = obsutil.usingsha256
177
177
178 ## Parsing and writing of version "0"
178 ## Parsing and writing of version "0"
179 #
179 #
180 # The header is followed by the markers. Each marker is made of:
180 # The header is followed by the markers. Each marker is made of:
181 #
181 #
182 # - 1 uint8 : number of new changesets "N", can be zero.
182 # - 1 uint8 : number of new changesets "N", can be zero.
183 #
183 #
184 # - 1 uint32: metadata size "M" in bytes.
184 # - 1 uint32: metadata size "M" in bytes.
185 #
185 #
186 # - 1 byte: a bit field. It is reserved for flags used in common
186 # - 1 byte: a bit field. It is reserved for flags used in common
187 # obsolete marker operations, to avoid repeated decoding of metadata
187 # obsolete marker operations, to avoid repeated decoding of metadata
188 # entries.
188 # entries.
189 #
189 #
190 # - 20 bytes: obsoleted changeset identifier.
190 # - 20 bytes: obsoleted changeset identifier.
191 #
191 #
192 # - N*20 bytes: new changesets identifiers.
192 # - N*20 bytes: new changesets identifiers.
193 #
193 #
194 # - M bytes: metadata as a sequence of nul-terminated strings. Each
194 # - M bytes: metadata as a sequence of nul-terminated strings. Each
195 # string contains a key and a value, separated by a colon ':', without
195 # string contains a key and a value, separated by a colon ':', without
196 # additional encoding. Keys cannot contain '\0' or ':' and values
196 # additional encoding. Keys cannot contain '\0' or ':' and values
197 # cannot contain '\0'.
197 # cannot contain '\0'.
198 _fm0version = 0
198 _fm0version = 0
199 _fm0fixed = b'>BIB20s'
199 _fm0fixed = b'>BIB20s'
200 _fm0node = b'20s'
200 _fm0node = b'20s'
201 _fm0fsize = _calcsize(_fm0fixed)
201 _fm0fsize = _calcsize(_fm0fixed)
202 _fm0fnodesize = _calcsize(_fm0node)
202 _fm0fnodesize = _calcsize(_fm0node)
203
203
204
204
205 def _fm0readmarkers(data, off, stop):
205 def _fm0readmarkers(data, off, stop):
206 # Loop on markers
206 # Loop on markers
207 while off < stop:
207 while off < stop:
208 # read fixed part
208 # read fixed part
209 cur = data[off : off + _fm0fsize]
209 cur = data[off : off + _fm0fsize]
210 off += _fm0fsize
210 off += _fm0fsize
211 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
211 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
212 # read replacement
212 # read replacement
213 sucs = ()
213 sucs = ()
214 if numsuc:
214 if numsuc:
215 s = _fm0fnodesize * numsuc
215 s = _fm0fnodesize * numsuc
216 cur = data[off : off + s]
216 cur = data[off : off + s]
217 sucs = _unpack(_fm0node * numsuc, cur)
217 sucs = _unpack(_fm0node * numsuc, cur)
218 off += s
218 off += s
219 # read metadata
219 # read metadata
220 # (metadata will be decoded on demand)
220 # (metadata will be decoded on demand)
221 metadata = data[off : off + mdsize]
221 metadata = data[off : off + mdsize]
222 if len(metadata) != mdsize:
222 if len(metadata) != mdsize:
223 raise error.Abort(
223 raise error.Abort(
224 _(
224 _(
225 b'parsing obsolete marker: metadata is too '
225 b'parsing obsolete marker: metadata is too '
226 b'short, %d bytes expected, got %d'
226 b'short, %d bytes expected, got %d'
227 )
227 )
228 % (mdsize, len(metadata))
228 % (mdsize, len(metadata))
229 )
229 )
230 off += mdsize
230 off += mdsize
231 metadata = _fm0decodemeta(metadata)
231 metadata = _fm0decodemeta(metadata)
232 try:
232 try:
233 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
233 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
234 date = float(when), int(offset)
234 date = float(when), int(offset)
235 except ValueError:
235 except ValueError:
236 date = (0.0, 0)
236 date = (0.0, 0)
237 parents = None
237 parents = None
238 if b'p2' in metadata:
238 if b'p2' in metadata:
239 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
239 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
240 elif b'p1' in metadata:
240 elif b'p1' in metadata:
241 parents = (metadata.pop(b'p1', None),)
241 parents = (metadata.pop(b'p1', None),)
242 elif b'p0' in metadata:
242 elif b'p0' in metadata:
243 parents = ()
243 parents = ()
244 if parents is not None:
244 if parents is not None:
245 try:
245 try:
246 parents = tuple(bin(p) for p in parents)
246 parents = tuple(bin(p) for p in parents)
247 # if parent content is not a nodeid, drop the data
247 # if parent content is not a nodeid, drop the data
248 for p in parents:
248 for p in parents:
249 if len(p) != 20:
249 if len(p) != 20:
250 parents = None
250 parents = None
251 break
251 break
252 except TypeError:
252 except TypeError:
253 # if content cannot be translated to nodeid drop the data.
253 # if content cannot be translated to nodeid drop the data.
254 parents = None
254 parents = None
255
255
256 metadata = tuple(sorted(pycompat.iteritems(metadata)))
256 metadata = tuple(sorted(pycompat.iteritems(metadata)))
257
257
258 yield (pre, sucs, flags, metadata, date, parents)
258 yield (pre, sucs, flags, metadata, date, parents)
259
259
260
260
261 def _fm0encodeonemarker(marker):
261 def _fm0encodeonemarker(marker):
262 pre, sucs, flags, metadata, date, parents = marker
262 pre, sucs, flags, metadata, date, parents = marker
263 if flags & usingsha256:
263 if flags & usingsha256:
264 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
264 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
265 metadata = dict(metadata)
265 metadata = dict(metadata)
266 time, tz = date
266 time, tz = date
267 metadata[b'date'] = b'%r %i' % (time, tz)
267 metadata[b'date'] = b'%r %i' % (time, tz)
268 if parents is not None:
268 if parents is not None:
269 if not parents:
269 if not parents:
270 # mark that we explicitly recorded no parents
270 # mark that we explicitly recorded no parents
271 metadata[b'p0'] = b''
271 metadata[b'p0'] = b''
272 for i, p in enumerate(parents, 1):
272 for i, p in enumerate(parents, 1):
273 metadata[b'p%i' % i] = hex(p)
273 metadata[b'p%i' % i] = hex(p)
274 metadata = _fm0encodemeta(metadata)
274 metadata = _fm0encodemeta(metadata)
275 numsuc = len(sucs)
275 numsuc = len(sucs)
276 format = _fm0fixed + (_fm0node * numsuc)
276 format = _fm0fixed + (_fm0node * numsuc)
277 data = [numsuc, len(metadata), flags, pre]
277 data = [numsuc, len(metadata), flags, pre]
278 data.extend(sucs)
278 data.extend(sucs)
279 return _pack(format, *data) + metadata
279 return _pack(format, *data) + metadata
280
280
281
281
282 def _fm0encodemeta(meta):
282 def _fm0encodemeta(meta):
283 """Return encoded metadata string to string mapping.
283 """Return encoded metadata string to string mapping.
284
284
285 Assume no ':' in key and no '\0' in both key and value."""
285 Assume no ':' in key and no '\0' in both key and value."""
286 for key, value in pycompat.iteritems(meta):
286 for key, value in pycompat.iteritems(meta):
287 if b':' in key or b'\0' in key:
287 if b':' in key or b'\0' in key:
288 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
288 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
289 if b'\0' in value:
289 if b'\0' in value:
290 raise ValueError(b"':' is forbidden in metadata value'")
290 raise ValueError(b"':' is forbidden in metadata value'")
291 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
291 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
292
292
293
293
294 def _fm0decodemeta(data):
294 def _fm0decodemeta(data):
295 """Return string to string dictionary from encoded version."""
295 """Return string to string dictionary from encoded version."""
296 d = {}
296 d = {}
297 for l in data.split(b'\0'):
297 for l in data.split(b'\0'):
298 if l:
298 if l:
299 key, value = l.split(b':', 1)
299 key, value = l.split(b':', 1)
300 d[key] = value
300 d[key] = value
301 return d
301 return d
302
302
303
303
304 ## Parsing and writing of version "1"
304 ## Parsing and writing of version "1"
305 #
305 #
306 # The header is followed by the markers. Each marker is made of:
306 # The header is followed by the markers. Each marker is made of:
307 #
307 #
308 # - uint32: total size of the marker (including this field)
308 # - uint32: total size of the marker (including this field)
309 #
309 #
310 # - float64: date in seconds since epoch
310 # - float64: date in seconds since epoch
311 #
311 #
312 # - int16: timezone offset in minutes
312 # - int16: timezone offset in minutes
313 #
313 #
314 # - uint16: a bit field. It is reserved for flags used in common
314 # - uint16: a bit field. It is reserved for flags used in common
315 # obsolete marker operations, to avoid repeated decoding of metadata
315 # obsolete marker operations, to avoid repeated decoding of metadata
316 # entries.
316 # entries.
317 #
317 #
318 # - uint8: number of successors "N", can be zero.
318 # - uint8: number of successors "N", can be zero.
319 #
319 #
320 # - uint8: number of parents "P", can be zero.
320 # - uint8: number of parents "P", can be zero.
321 #
321 #
322 # 0: parents data stored but no parent,
322 # 0: parents data stored but no parent,
323 # 1: one parent stored,
323 # 1: one parent stored,
324 # 2: two parents stored,
324 # 2: two parents stored,
325 # 3: no parent data stored
325 # 3: no parent data stored
326 #
326 #
327 # - uint8: number of metadata entries M
327 # - uint8: number of metadata entries M
328 #
328 #
329 # - 20 or 32 bytes: predecessor changeset identifier.
329 # - 20 or 32 bytes: predecessor changeset identifier.
330 #
330 #
331 # - N*(20 or 32) bytes: successors changesets identifiers.
331 # - N*(20 or 32) bytes: successors changesets identifiers.
332 #
332 #
333 # - P*(20 or 32) bytes: parents of the predecessors changesets.
333 # - P*(20 or 32) bytes: parents of the predecessors changesets.
334 #
334 #
335 # - M*(uint8, uint8): size of all metadata entries (key and value)
335 # - M*(uint8, uint8): size of all metadata entries (key and value)
336 #
336 #
337 # - remaining bytes: the metadata, each (key, value) pair after the other.
337 # - remaining bytes: the metadata, each (key, value) pair after the other.
338 _fm1version = 1
338 _fm1version = 1
339 _fm1fixed = b'>IdhHBBB'
339 _fm1fixed = b'>IdhHBBB'
340 _fm1nodesha1 = b'20s'
340 _fm1nodesha1 = b'20s'
341 _fm1nodesha256 = b'32s'
341 _fm1nodesha256 = b'32s'
342 _fm1nodesha1size = _calcsize(_fm1nodesha1)
342 _fm1nodesha1size = _calcsize(_fm1nodesha1)
343 _fm1nodesha256size = _calcsize(_fm1nodesha256)
343 _fm1nodesha256size = _calcsize(_fm1nodesha256)
344 _fm1fsize = _calcsize(_fm1fixed)
344 _fm1fsize = _calcsize(_fm1fixed)
345 _fm1parentnone = 3
345 _fm1parentnone = 3
346 _fm1parentshift = 14
346 _fm1parentshift = 14
347 _fm1parentmask = _fm1parentnone << _fm1parentshift
347 _fm1parentmask = _fm1parentnone << _fm1parentshift
348 _fm1metapair = b'BB'
348 _fm1metapair = b'BB'
349 _fm1metapairsize = _calcsize(_fm1metapair)
349 _fm1metapairsize = _calcsize(_fm1metapair)
350
350
351
351
352 def _fm1purereadmarkers(data, off, stop):
352 def _fm1purereadmarkers(data, off, stop):
353 # make some global constants local for performance
353 # make some global constants local for performance
354 noneflag = _fm1parentnone
354 noneflag = _fm1parentnone
355 sha2flag = usingsha256
355 sha2flag = usingsha256
356 sha1size = _fm1nodesha1size
356 sha1size = _fm1nodesha1size
357 sha2size = _fm1nodesha256size
357 sha2size = _fm1nodesha256size
358 sha1fmt = _fm1nodesha1
358 sha1fmt = _fm1nodesha1
359 sha2fmt = _fm1nodesha256
359 sha2fmt = _fm1nodesha256
360 metasize = _fm1metapairsize
360 metasize = _fm1metapairsize
361 metafmt = _fm1metapair
361 metafmt = _fm1metapair
362 fsize = _fm1fsize
362 fsize = _fm1fsize
363 unpack = _unpack
363 unpack = _unpack
364
364
365 # Loop on markers
365 # Loop on markers
366 ufixed = struct.Struct(_fm1fixed).unpack
366 ufixed = struct.Struct(_fm1fixed).unpack
367
367
368 while off < stop:
368 while off < stop:
369 # read fixed part
369 # read fixed part
370 o1 = off + fsize
370 o1 = off + fsize
371 t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1])
371 t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1])
372
372
373 if flags & sha2flag:
373 if flags & sha2flag:
374 nodefmt = sha2fmt
374 nodefmt = sha2fmt
375 nodesize = sha2size
375 nodesize = sha2size
376 else:
376 else:
377 nodefmt = sha1fmt
377 nodefmt = sha1fmt
378 nodesize = sha1size
378 nodesize = sha1size
379
379
380 (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize])
380 (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize])
381 o1 += nodesize
381 o1 += nodesize
382
382
383 # read 0 or more successors
383 # read 0 or more successors
384 if numsuc == 1:
384 if numsuc == 1:
385 o2 = o1 + nodesize
385 o2 = o1 + nodesize
386 sucs = (data[o1:o2],)
386 sucs = (data[o1:o2],)
387 else:
387 else:
388 o2 = o1 + nodesize * numsuc
388 o2 = o1 + nodesize * numsuc
389 sucs = unpack(nodefmt * numsuc, data[o1:o2])
389 sucs = unpack(nodefmt * numsuc, data[o1:o2])
390
390
391 # read parents
391 # read parents
392 if numpar == noneflag:
392 if numpar == noneflag:
393 o3 = o2
393 o3 = o2
394 parents = None
394 parents = None
395 elif numpar == 1:
395 elif numpar == 1:
396 o3 = o2 + nodesize
396 o3 = o2 + nodesize
397 parents = (data[o2:o3],)
397 parents = (data[o2:o3],)
398 else:
398 else:
399 o3 = o2 + nodesize * numpar
399 o3 = o2 + nodesize * numpar
400 parents = unpack(nodefmt * numpar, data[o2:o3])
400 parents = unpack(nodefmt * numpar, data[o2:o3])
401
401
402 # read metadata
402 # read metadata
403 off = o3 + metasize * nummeta
403 off = o3 + metasize * nummeta
404 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
404 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
405 metadata = []
405 metadata = []
406 for idx in pycompat.xrange(0, len(metapairsize), 2):
406 for idx in pycompat.xrange(0, len(metapairsize), 2):
407 o1 = off + metapairsize[idx]
407 o1 = off + metapairsize[idx]
408 o2 = o1 + metapairsize[idx + 1]
408 o2 = o1 + metapairsize[idx + 1]
409 metadata.append((data[off:o1], data[o1:o2]))
409 metadata.append((data[off:o1], data[o1:o2]))
410 off = o2
410 off = o2
411
411
412 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
412 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
413
413
414
414
415 def _fm1encodeonemarker(marker):
415 def _fm1encodeonemarker(marker):
416 pre, sucs, flags, metadata, date, parents = marker
416 pre, sucs, flags, metadata, date, parents = marker
417 # determine node size
417 # determine node size
418 _fm1node = _fm1nodesha1
418 _fm1node = _fm1nodesha1
419 if flags & usingsha256:
419 if flags & usingsha256:
420 _fm1node = _fm1nodesha256
420 _fm1node = _fm1nodesha256
421 numsuc = len(sucs)
421 numsuc = len(sucs)
422 numextranodes = 1 + numsuc
422 numextranodes = 1 + numsuc
423 if parents is None:
423 if parents is None:
424 numpar = _fm1parentnone
424 numpar = _fm1parentnone
425 else:
425 else:
426 numpar = len(parents)
426 numpar = len(parents)
427 numextranodes += numpar
427 numextranodes += numpar
428 formatnodes = _fm1node * numextranodes
428 formatnodes = _fm1node * numextranodes
429 formatmeta = _fm1metapair * len(metadata)
429 formatmeta = _fm1metapair * len(metadata)
430 format = _fm1fixed + formatnodes + formatmeta
430 format = _fm1fixed + formatnodes + formatmeta
431 # tz is stored in minutes so we divide by 60
431 # tz is stored in minutes so we divide by 60
432 tz = date[1] // 60
432 tz = date[1] // 60
433 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
433 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
434 data.extend(sucs)
434 data.extend(sucs)
435 if parents is not None:
435 if parents is not None:
436 data.extend(parents)
436 data.extend(parents)
437 totalsize = _calcsize(format)
437 totalsize = _calcsize(format)
438 for key, value in metadata:
438 for key, value in metadata:
439 lk = len(key)
439 lk = len(key)
440 lv = len(value)
440 lv = len(value)
441 if lk > 255:
441 if lk > 255:
442 msg = (
442 msg = (
443 b'obsstore metadata key cannot be longer than 255 bytes'
443 b'obsstore metadata key cannot be longer than 255 bytes'
444 b' (key "%s" is %u bytes)'
444 b' (key "%s" is %u bytes)'
445 ) % (key, lk)
445 ) % (key, lk)
446 raise error.ProgrammingError(msg)
446 raise error.ProgrammingError(msg)
447 if lv > 255:
447 if lv > 255:
448 msg = (
448 msg = (
449 b'obsstore metadata value cannot be longer than 255 bytes'
449 b'obsstore metadata value cannot be longer than 255 bytes'
450 b' (value "%s" for key "%s" is %u bytes)'
450 b' (value "%s" for key "%s" is %u bytes)'
451 ) % (value, key, lv)
451 ) % (value, key, lv)
452 raise error.ProgrammingError(msg)
452 raise error.ProgrammingError(msg)
453 data.append(lk)
453 data.append(lk)
454 data.append(lv)
454 data.append(lv)
455 totalsize += lk + lv
455 totalsize += lk + lv
456 data[0] = totalsize
456 data[0] = totalsize
457 data = [_pack(format, *data)]
457 data = [_pack(format, *data)]
458 for key, value in metadata:
458 for key, value in metadata:
459 data.append(key)
459 data.append(key)
460 data.append(value)
460 data.append(value)
461 return b''.join(data)
461 return b''.join(data)
462
462
463
463
464 def _fm1readmarkers(data, off, stop):
464 def _fm1readmarkers(data, off, stop):
465 native = getattr(parsers, 'fm1readmarkers', None)
465 native = getattr(parsers, 'fm1readmarkers', None)
466 if not native:
466 if not native:
467 return _fm1purereadmarkers(data, off, stop)
467 return _fm1purereadmarkers(data, off, stop)
468 return native(data, off, stop)
468 return native(data, off, stop)
469
469
470
470
471 # mapping to read/write various marker formats
471 # mapping to read/write various marker formats
472 # <version> -> (decoder, encoder)
472 # <version> -> (decoder, encoder)
473 formats = {
473 formats = {
474 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
474 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
475 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
475 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
476 }
476 }
477
477
478
478
479 def _readmarkerversion(data):
479 def _readmarkerversion(data):
480 return _unpack(b'>B', data[0:1])[0]
480 return _unpack(b'>B', data[0:1])[0]
481
481
482
482
483 @util.nogc
483 @util.nogc
484 def _readmarkers(data, off=None, stop=None):
484 def _readmarkers(data, off=None, stop=None):
485 """Read and enumerate markers from raw data"""
485 """Read and enumerate markers from raw data"""
486 diskversion = _readmarkerversion(data)
486 diskversion = _readmarkerversion(data)
487 if not off:
487 if not off:
488 off = 1 # skip 1 byte version number
488 off = 1 # skip 1 byte version number
489 if stop is None:
489 if stop is None:
490 stop = len(data)
490 stop = len(data)
491 if diskversion not in formats:
491 if diskversion not in formats:
492 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
492 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
493 raise error.UnknownVersion(msg, version=diskversion)
493 raise error.UnknownVersion(msg, version=diskversion)
494 return diskversion, formats[diskversion][0](data, off, stop)
494 return diskversion, formats[diskversion][0](data, off, stop)
495
495
496
496
497 def encodeheader(version=_fm0version):
497 def encodeheader(version=_fm0version):
498 return _pack(b'>B', version)
498 return _pack(b'>B', version)
499
499
500
500
501 def encodemarkers(markers, addheader=False, version=_fm0version):
501 def encodemarkers(markers, addheader=False, version=_fm0version):
502 # Kept separate from flushmarkers(), it will be reused for
502 # Kept separate from flushmarkers(), it will be reused for
503 # markers exchange.
503 # markers exchange.
504 encodeone = formats[version][1]
504 encodeone = formats[version][1]
505 if addheader:
505 if addheader:
506 yield encodeheader(version)
506 yield encodeheader(version)
507 for marker in markers:
507 for marker in markers:
508 yield encodeone(marker)
508 yield encodeone(marker)
509
509
510
510
511 @util.nogc
511 @util.nogc
512 def _addsuccessors(successors, markers):
512 def _addsuccessors(successors, markers):
513 for mark in markers:
513 for mark in markers:
514 successors.setdefault(mark[0], set()).add(mark)
514 successors.setdefault(mark[0], set()).add(mark)
515
515
516
516
517 @util.nogc
517 @util.nogc
518 def _addpredecessors(predecessors, markers):
518 def _addpredecessors(predecessors, markers):
519 for mark in markers:
519 for mark in markers:
520 for suc in mark[1]:
520 for suc in mark[1]:
521 predecessors.setdefault(suc, set()).add(mark)
521 predecessors.setdefault(suc, set()).add(mark)
522
522
523
523
524 @util.nogc
524 @util.nogc
525 def _addchildren(children, markers):
525 def _addchildren(children, markers):
526 for mark in markers:
526 for mark in markers:
527 parents = mark[5]
527 parents = mark[5]
528 if parents is not None:
528 if parents is not None:
529 for p in parents:
529 for p in parents:
530 children.setdefault(p, set()).add(mark)
530 children.setdefault(p, set()).add(mark)
531
531
532
532
533 def _checkinvalidmarkers(repo, markers):
533 def _checkinvalidmarkers(repo, markers):
534 """search for marker with invalid data and raise error if needed
534 """search for marker with invalid data and raise error if needed
535
535
536 Exist as a separated function to allow the evolve extension for a more
536 Exist as a separated function to allow the evolve extension for a more
537 subtle handling.
537 subtle handling.
538 """
538 """
539 for mark in markers:
539 for mark in markers:
540 if repo.nullid in mark[1]:
540 if repo.nullid in mark[1]:
541 raise error.Abort(
541 raise error.Abort(
542 _(
542 _(
543 b'bad obsolescence marker detected: '
543 b'bad obsolescence marker detected: '
544 b'invalid successors nullid'
544 b'invalid successors nullid'
545 )
545 )
546 )
546 )
547
547
548
548
549 class obsstore(object):
549 class obsstore(object):
550 """Store obsolete markers
550 """Store obsolete markers
551
551
552 Markers can be accessed with two mappings:
552 Markers can be accessed with two mappings:
553 - predecessors[x] -> set(markers on predecessors edges of x)
553 - predecessors[x] -> set(markers on predecessors edges of x)
554 - successors[x] -> set(markers on successors edges of x)
554 - successors[x] -> set(markers on successors edges of x)
555 - children[x] -> set(markers on predecessors edges of children(x)
555 - children[x] -> set(markers on predecessors edges of children(x)
556 """
556 """
557
557
558 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
558 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
559 # prec: nodeid, predecessors changesets
559 # prec: nodeid, predecessors changesets
560 # succs: tuple of nodeid, successor changesets (0-N length)
560 # succs: tuple of nodeid, successor changesets (0-N length)
561 # flag: integer, flag field carrying modifier for the markers (see doc)
561 # flag: integer, flag field carrying modifier for the markers (see doc)
562 # meta: binary blob in UTF-8, encoded metadata dictionary
562 # meta: binary blob in UTF-8, encoded metadata dictionary
563 # date: (float, int) tuple, date of marker creation
563 # date: (float, int) tuple, date of marker creation
564 # parents: (tuple of nodeid) or None, parents of predecessors
564 # parents: (tuple of nodeid) or None, parents of predecessors
565 # None is used when no data has been recorded
565 # None is used when no data has been recorded
566
566
567 def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False):
567 def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False):
568 # caches for various obsolescence related cache
568 # caches for various obsolescence related cache
569 self.caches = {}
569 self.caches = {}
570 self.svfs = svfs
570 self.svfs = svfs
571 self.repo = repo
571 self.repo = repo
572 self._defaultformat = defaultformat
572 self._defaultformat = defaultformat
573 self._readonly = readonly
573 self._readonly = readonly
574
574
575 def __iter__(self):
575 def __iter__(self):
576 return iter(self._all)
576 return iter(self._all)
577
577
578 def __len__(self):
578 def __len__(self):
579 return len(self._all)
579 return len(self._all)
580
580
581 def __nonzero__(self):
581 def __nonzero__(self):
582 if not self._cached('_all'):
582 if not self._cached('_all'):
583 try:
583 try:
584 return self.svfs.stat(b'obsstore').st_size > 1
584 return self.svfs.stat(b'obsstore').st_size > 1
585 except OSError as inst:
585 except OSError as inst:
586 if inst.errno != errno.ENOENT:
586 if inst.errno not in (errno.ENOENT, errno.EINVAL):
587 raise
587 raise
588 # just build an empty _all list if no obsstore exists, which
588 # just build an empty _all list if no obsstore exists, which
589 # avoids further stat() syscalls
589 # avoids further stat() syscalls
590 return bool(self._all)
590 return bool(self._all)
591
591
592 __bool__ = __nonzero__
592 __bool__ = __nonzero__
593
593
594 @property
594 @property
595 def readonly(self):
595 def readonly(self):
596 """True if marker creation is disabled
596 """True if marker creation is disabled
597
597
598 Remove me in the future when obsolete marker is always on."""
598 Remove me in the future when obsolete marker is always on."""
599 return self._readonly
599 return self._readonly
600
600
601 def create(
601 def create(
602 self,
602 self,
603 transaction,
603 transaction,
604 prec,
604 prec,
605 succs=(),
605 succs=(),
606 flag=0,
606 flag=0,
607 parents=None,
607 parents=None,
608 date=None,
608 date=None,
609 metadata=None,
609 metadata=None,
610 ui=None,
610 ui=None,
611 ):
611 ):
612 """obsolete: add a new obsolete marker
612 """obsolete: add a new obsolete marker
613
613
614 * ensuring it is hashable
614 * ensuring it is hashable
615 * check mandatory metadata
615 * check mandatory metadata
616 * encode metadata
616 * encode metadata
617
617
618 If you are a human writing code creating marker you want to use the
618 If you are a human writing code creating marker you want to use the
619 `createmarkers` function in this module instead.
619 `createmarkers` function in this module instead.
620
620
621 return True if a new marker have been added, False if the markers
621 return True if a new marker have been added, False if the markers
622 already existed (no op).
622 already existed (no op).
623 """
623 """
624 flag = int(flag)
624 flag = int(flag)
625 if metadata is None:
625 if metadata is None:
626 metadata = {}
626 metadata = {}
627 if date is None:
627 if date is None:
628 if b'date' in metadata:
628 if b'date' in metadata:
629 # as a courtesy for out-of-tree extensions
629 # as a courtesy for out-of-tree extensions
630 date = dateutil.parsedate(metadata.pop(b'date'))
630 date = dateutil.parsedate(metadata.pop(b'date'))
631 elif ui is not None:
631 elif ui is not None:
632 date = ui.configdate(b'devel', b'default-date')
632 date = ui.configdate(b'devel', b'default-date')
633 if date is None:
633 if date is None:
634 date = dateutil.makedate()
634 date = dateutil.makedate()
635 else:
635 else:
636 date = dateutil.makedate()
636 date = dateutil.makedate()
637 if flag & usingsha256:
637 if flag & usingsha256:
638 if len(prec) != 32:
638 if len(prec) != 32:
639 raise ValueError(prec)
639 raise ValueError(prec)
640 for succ in succs:
640 for succ in succs:
641 if len(succ) != 32:
641 if len(succ) != 32:
642 raise ValueError(succ)
642 raise ValueError(succ)
643 else:
643 else:
644 if len(prec) != 20:
644 if len(prec) != 20:
645 raise ValueError(prec)
645 raise ValueError(prec)
646 for succ in succs:
646 for succ in succs:
647 if len(succ) != 20:
647 if len(succ) != 20:
648 raise ValueError(succ)
648 raise ValueError(succ)
649 if prec in succs:
649 if prec in succs:
650 raise ValueError(
650 raise ValueError(
651 'in-marker cycle with %s' % pycompat.sysstr(hex(prec))
651 'in-marker cycle with %s' % pycompat.sysstr(hex(prec))
652 )
652 )
653
653
654 metadata = tuple(sorted(pycompat.iteritems(metadata)))
654 metadata = tuple(sorted(pycompat.iteritems(metadata)))
655 for k, v in metadata:
655 for k, v in metadata:
656 try:
656 try:
657 # might be better to reject non-ASCII keys
657 # might be better to reject non-ASCII keys
658 k.decode('utf-8')
658 k.decode('utf-8')
659 v.decode('utf-8')
659 v.decode('utf-8')
660 except UnicodeDecodeError:
660 except UnicodeDecodeError:
661 raise error.ProgrammingError(
661 raise error.ProgrammingError(
662 b'obsstore metadata must be valid UTF-8 sequence '
662 b'obsstore metadata must be valid UTF-8 sequence '
663 b'(key = %r, value = %r)'
663 b'(key = %r, value = %r)'
664 % (pycompat.bytestr(k), pycompat.bytestr(v))
664 % (pycompat.bytestr(k), pycompat.bytestr(v))
665 )
665 )
666
666
667 marker = (bytes(prec), tuple(succs), flag, metadata, date, parents)
667 marker = (bytes(prec), tuple(succs), flag, metadata, date, parents)
668 return bool(self.add(transaction, [marker]))
668 return bool(self.add(transaction, [marker]))
669
669
670 def add(self, transaction, markers):
670 def add(self, transaction, markers):
671 """Add new markers to the store
671 """Add new markers to the store
672
672
673 Take care of filtering duplicate.
673 Take care of filtering duplicate.
674 Return the number of new marker."""
674 Return the number of new marker."""
675 if self._readonly:
675 if self._readonly:
676 raise error.Abort(
676 raise error.Abort(
677 _(b'creating obsolete markers is not enabled on this repo')
677 _(b'creating obsolete markers is not enabled on this repo')
678 )
678 )
679 known = set()
679 known = set()
680 getsuccessors = self.successors.get
680 getsuccessors = self.successors.get
681 new = []
681 new = []
682 for m in markers:
682 for m in markers:
683 if m not in getsuccessors(m[0], ()) and m not in known:
683 if m not in getsuccessors(m[0], ()) and m not in known:
684 known.add(m)
684 known.add(m)
685 new.append(m)
685 new.append(m)
686 if new:
686 if new:
687 f = self.svfs(b'obsstore', b'ab')
687 f = self.svfs(b'obsstore', b'ab')
688 try:
688 try:
689 offset = f.tell()
689 offset = f.tell()
690 transaction.add(b'obsstore', offset)
690 transaction.add(b'obsstore', offset)
691 # offset == 0: new file - add the version header
691 # offset == 0: new file - add the version header
692 data = b''.join(encodemarkers(new, offset == 0, self._version))
692 data = b''.join(encodemarkers(new, offset == 0, self._version))
693 f.write(data)
693 f.write(data)
694 finally:
694 finally:
695 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
695 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
696 # call 'filecacheentry.refresh()' here
696 # call 'filecacheentry.refresh()' here
697 f.close()
697 f.close()
698 addedmarkers = transaction.changes.get(b'obsmarkers')
698 addedmarkers = transaction.changes.get(b'obsmarkers')
699 if addedmarkers is not None:
699 if addedmarkers is not None:
700 addedmarkers.update(new)
700 addedmarkers.update(new)
701 self._addmarkers(new, data)
701 self._addmarkers(new, data)
702 # new marker *may* have changed several set. invalidate the cache.
702 # new marker *may* have changed several set. invalidate the cache.
703 self.caches.clear()
703 self.caches.clear()
704 # records the number of new markers for the transaction hooks
704 # records the number of new markers for the transaction hooks
705 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
705 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
706 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
706 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
707 return len(new)
707 return len(new)
708
708
709 def mergemarkers(self, transaction, data):
709 def mergemarkers(self, transaction, data):
710 """merge a binary stream of markers inside the obsstore
710 """merge a binary stream of markers inside the obsstore
711
711
712 Returns the number of new markers added."""
712 Returns the number of new markers added."""
713 version, markers = _readmarkers(data)
713 version, markers = _readmarkers(data)
714 return self.add(transaction, markers)
714 return self.add(transaction, markers)
715
715
716 @propertycache
716 @propertycache
717 def _data(self):
717 def _data(self):
718 return self.svfs.tryread(b'obsstore')
718 return self.svfs.tryread(b'obsstore')
719
719
720 @propertycache
720 @propertycache
721 def _version(self):
721 def _version(self):
722 if len(self._data) >= 1:
722 if len(self._data) >= 1:
723 return _readmarkerversion(self._data)
723 return _readmarkerversion(self._data)
724 else:
724 else:
725 return self._defaultformat
725 return self._defaultformat
726
726
727 @propertycache
727 @propertycache
728 def _all(self):
728 def _all(self):
729 data = self._data
729 data = self._data
730 if not data:
730 if not data:
731 return []
731 return []
732 self._version, markers = _readmarkers(data)
732 self._version, markers = _readmarkers(data)
733 markers = list(markers)
733 markers = list(markers)
734 _checkinvalidmarkers(self.repo, markers)
734 _checkinvalidmarkers(self.repo, markers)
735 return markers
735 return markers
736
736
737 @propertycache
737 @propertycache
738 def successors(self):
738 def successors(self):
739 successors = {}
739 successors = {}
740 _addsuccessors(successors, self._all)
740 _addsuccessors(successors, self._all)
741 return successors
741 return successors
742
742
743 @propertycache
743 @propertycache
744 def predecessors(self):
744 def predecessors(self):
745 predecessors = {}
745 predecessors = {}
746 _addpredecessors(predecessors, self._all)
746 _addpredecessors(predecessors, self._all)
747 return predecessors
747 return predecessors
748
748
749 @propertycache
749 @propertycache
750 def children(self):
750 def children(self):
751 children = {}
751 children = {}
752 _addchildren(children, self._all)
752 _addchildren(children, self._all)
753 return children
753 return children
754
754
755 def _cached(self, attr):
755 def _cached(self, attr):
756 return attr in self.__dict__
756 return attr in self.__dict__
757
757
758 def _addmarkers(self, markers, rawdata):
758 def _addmarkers(self, markers, rawdata):
759 markers = list(markers) # to allow repeated iteration
759 markers = list(markers) # to allow repeated iteration
760 self._data = self._data + rawdata
760 self._data = self._data + rawdata
761 self._all.extend(markers)
761 self._all.extend(markers)
762 if self._cached('successors'):
762 if self._cached('successors'):
763 _addsuccessors(self.successors, markers)
763 _addsuccessors(self.successors, markers)
764 if self._cached('predecessors'):
764 if self._cached('predecessors'):
765 _addpredecessors(self.predecessors, markers)
765 _addpredecessors(self.predecessors, markers)
766 if self._cached('children'):
766 if self._cached('children'):
767 _addchildren(self.children, markers)
767 _addchildren(self.children, markers)
768 _checkinvalidmarkers(self.repo, markers)
768 _checkinvalidmarkers(self.repo, markers)
769
769
770 def relevantmarkers(self, nodes):
770 def relevantmarkers(self, nodes):
771 """return a set of all obsolescence markers relevant to a set of nodes.
771 """return a set of all obsolescence markers relevant to a set of nodes.
772
772
773 "relevant" to a set of nodes mean:
773 "relevant" to a set of nodes mean:
774
774
775 - marker that use this changeset as successor
775 - marker that use this changeset as successor
776 - prune marker of direct children on this changeset
776 - prune marker of direct children on this changeset
777 - recursive application of the two rules on predecessors of these
777 - recursive application of the two rules on predecessors of these
778 markers
778 markers
779
779
780 It is a set so you cannot rely on order."""
780 It is a set so you cannot rely on order."""
781
781
782 pendingnodes = set(nodes)
782 pendingnodes = set(nodes)
783 seenmarkers = set()
783 seenmarkers = set()
784 seennodes = set(pendingnodes)
784 seennodes = set(pendingnodes)
785 precursorsmarkers = self.predecessors
785 precursorsmarkers = self.predecessors
786 succsmarkers = self.successors
786 succsmarkers = self.successors
787 children = self.children
787 children = self.children
788 while pendingnodes:
788 while pendingnodes:
789 direct = set()
789 direct = set()
790 for current in pendingnodes:
790 for current in pendingnodes:
791 direct.update(precursorsmarkers.get(current, ()))
791 direct.update(precursorsmarkers.get(current, ()))
792 pruned = [m for m in children.get(current, ()) if not m[1]]
792 pruned = [m for m in children.get(current, ()) if not m[1]]
793 direct.update(pruned)
793 direct.update(pruned)
794 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
794 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
795 direct.update(pruned)
795 direct.update(pruned)
796 direct -= seenmarkers
796 direct -= seenmarkers
797 pendingnodes = {m[0] for m in direct}
797 pendingnodes = {m[0] for m in direct}
798 seenmarkers |= direct
798 seenmarkers |= direct
799 pendingnodes -= seennodes
799 pendingnodes -= seennodes
800 seennodes |= pendingnodes
800 seennodes |= pendingnodes
801 return seenmarkers
801 return seenmarkers
802
802
803
803
804 def makestore(ui, repo):
804 def makestore(ui, repo):
805 """Create an obsstore instance from a repo."""
805 """Create an obsstore instance from a repo."""
806 # read default format for new obsstore.
806 # read default format for new obsstore.
807 # developer config: format.obsstore-version
807 # developer config: format.obsstore-version
808 defaultformat = ui.configint(b'format', b'obsstore-version')
808 defaultformat = ui.configint(b'format', b'obsstore-version')
809 # rely on obsstore class default when possible.
809 # rely on obsstore class default when possible.
810 kwargs = {}
810 kwargs = {}
811 if defaultformat is not None:
811 if defaultformat is not None:
812 kwargs['defaultformat'] = defaultformat
812 kwargs['defaultformat'] = defaultformat
813 readonly = not isenabled(repo, createmarkersopt)
813 readonly = not isenabled(repo, createmarkersopt)
814 store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs)
814 store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs)
815 if store and readonly:
815 if store and readonly:
816 ui.warn(
816 ui.warn(
817 _(b'obsolete feature not enabled but %i markers found!\n')
817 _(b'obsolete feature not enabled but %i markers found!\n')
818 % len(list(store))
818 % len(list(store))
819 )
819 )
820 return store
820 return store
821
821
822
822
823 def commonversion(versions):
823 def commonversion(versions):
824 """Return the newest version listed in both versions and our local formats.
824 """Return the newest version listed in both versions and our local formats.
825
825
826 Returns None if no common version exists.
826 Returns None if no common version exists.
827 """
827 """
828 versions.sort(reverse=True)
828 versions.sort(reverse=True)
829 # search for highest version known on both side
829 # search for highest version known on both side
830 for v in versions:
830 for v in versions:
831 if v in formats:
831 if v in formats:
832 return v
832 return v
833 return None
833 return None
834
834
835
835
836 # arbitrary picked to fit into 8K limit from HTTP server
836 # arbitrary picked to fit into 8K limit from HTTP server
837 # you have to take in account:
837 # you have to take in account:
838 # - the version header
838 # - the version header
839 # - the base85 encoding
839 # - the base85 encoding
840 _maxpayload = 5300
840 _maxpayload = 5300
841
841
842
842
843 def _pushkeyescape(markers):
843 def _pushkeyescape(markers):
844 """encode markers into a dict suitable for pushkey exchange
844 """encode markers into a dict suitable for pushkey exchange
845
845
846 - binary data is base85 encoded
846 - binary data is base85 encoded
847 - split in chunks smaller than 5300 bytes"""
847 - split in chunks smaller than 5300 bytes"""
848 keys = {}
848 keys = {}
849 parts = []
849 parts = []
850 currentlen = _maxpayload * 2 # ensure we create a new part
850 currentlen = _maxpayload * 2 # ensure we create a new part
851 for marker in markers:
851 for marker in markers:
852 nextdata = _fm0encodeonemarker(marker)
852 nextdata = _fm0encodeonemarker(marker)
853 if len(nextdata) + currentlen > _maxpayload:
853 if len(nextdata) + currentlen > _maxpayload:
854 currentpart = []
854 currentpart = []
855 currentlen = 0
855 currentlen = 0
856 parts.append(currentpart)
856 parts.append(currentpart)
857 currentpart.append(nextdata)
857 currentpart.append(nextdata)
858 currentlen += len(nextdata)
858 currentlen += len(nextdata)
859 for idx, part in enumerate(reversed(parts)):
859 for idx, part in enumerate(reversed(parts)):
860 data = b''.join([_pack(b'>B', _fm0version)] + part)
860 data = b''.join([_pack(b'>B', _fm0version)] + part)
861 keys[b'dump%i' % idx] = util.b85encode(data)
861 keys[b'dump%i' % idx] = util.b85encode(data)
862 return keys
862 return keys
863
863
864
864
865 def listmarkers(repo):
865 def listmarkers(repo):
866 """List markers over pushkey"""
866 """List markers over pushkey"""
867 if not repo.obsstore:
867 if not repo.obsstore:
868 return {}
868 return {}
869 return _pushkeyescape(sorted(repo.obsstore))
869 return _pushkeyescape(sorted(repo.obsstore))
870
870
871
871
872 def pushmarker(repo, key, old, new):
872 def pushmarker(repo, key, old, new):
873 """Push markers over pushkey"""
873 """Push markers over pushkey"""
874 if not key.startswith(b'dump'):
874 if not key.startswith(b'dump'):
875 repo.ui.warn(_(b'unknown key: %r') % key)
875 repo.ui.warn(_(b'unknown key: %r') % key)
876 return False
876 return False
877 if old:
877 if old:
878 repo.ui.warn(_(b'unexpected old value for %r') % key)
878 repo.ui.warn(_(b'unexpected old value for %r') % key)
879 return False
879 return False
880 data = util.b85decode(new)
880 data = util.b85decode(new)
881 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
881 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
882 repo.obsstore.mergemarkers(tr, data)
882 repo.obsstore.mergemarkers(tr, data)
883 repo.invalidatevolatilesets()
883 repo.invalidatevolatilesets()
884 return True
884 return True
885
885
886
886
887 # mapping of 'set-name' -> <function to compute this set>
887 # mapping of 'set-name' -> <function to compute this set>
888 cachefuncs = {}
888 cachefuncs = {}
889
889
890
890
891 def cachefor(name):
891 def cachefor(name):
892 """Decorator to register a function as computing the cache for a set"""
892 """Decorator to register a function as computing the cache for a set"""
893
893
894 def decorator(func):
894 def decorator(func):
895 if name in cachefuncs:
895 if name in cachefuncs:
896 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
896 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
897 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
897 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
898 cachefuncs[name] = func
898 cachefuncs[name] = func
899 return func
899 return func
900
900
901 return decorator
901 return decorator
902
902
903
903
904 def getrevs(repo, name):
904 def getrevs(repo, name):
905 """Return the set of revision that belong to the <name> set
905 """Return the set of revision that belong to the <name> set
906
906
907 Such access may compute the set and cache it for future use"""
907 Such access may compute the set and cache it for future use"""
908 repo = repo.unfiltered()
908 repo = repo.unfiltered()
909 with util.timedcm('getrevs %s', name):
909 with util.timedcm('getrevs %s', name):
910 if not repo.obsstore:
910 if not repo.obsstore:
911 return frozenset()
911 return frozenset()
912 if name not in repo.obsstore.caches:
912 if name not in repo.obsstore.caches:
913 repo.obsstore.caches[name] = cachefuncs[name](repo)
913 repo.obsstore.caches[name] = cachefuncs[name](repo)
914 return repo.obsstore.caches[name]
914 return repo.obsstore.caches[name]
915
915
916
916
917 # To be simple we need to invalidate obsolescence cache when:
917 # To be simple we need to invalidate obsolescence cache when:
918 #
918 #
919 # - new changeset is added:
919 # - new changeset is added:
920 # - public phase is changed
920 # - public phase is changed
921 # - obsolescence marker are added
921 # - obsolescence marker are added
922 # - strip is used a repo
922 # - strip is used a repo
923 def clearobscaches(repo):
923 def clearobscaches(repo):
924 """Remove all obsolescence related cache from a repo
924 """Remove all obsolescence related cache from a repo
925
925
926 This remove all cache in obsstore is the obsstore already exist on the
926 This remove all cache in obsstore is the obsstore already exist on the
927 repo.
927 repo.
928
928
929 (We could be smarter here given the exact event that trigger the cache
929 (We could be smarter here given the exact event that trigger the cache
930 clearing)"""
930 clearing)"""
931 # only clear cache is there is obsstore data in this repo
931 # only clear cache is there is obsstore data in this repo
932 if b'obsstore' in repo._filecache:
932 if b'obsstore' in repo._filecache:
933 repo.obsstore.caches.clear()
933 repo.obsstore.caches.clear()
934
934
935
935
936 def _mutablerevs(repo):
936 def _mutablerevs(repo):
937 """the set of mutable revision in the repository"""
937 """the set of mutable revision in the repository"""
938 return repo._phasecache.getrevset(repo, phases.mutablephases)
938 return repo._phasecache.getrevset(repo, phases.mutablephases)
939
939
940
940
941 @cachefor(b'obsolete')
941 @cachefor(b'obsolete')
942 def _computeobsoleteset(repo):
942 def _computeobsoleteset(repo):
943 """the set of obsolete revisions"""
943 """the set of obsolete revisions"""
944 getnode = repo.changelog.node
944 getnode = repo.changelog.node
945 notpublic = _mutablerevs(repo)
945 notpublic = _mutablerevs(repo)
946 isobs = repo.obsstore.successors.__contains__
946 isobs = repo.obsstore.successors.__contains__
947 obs = {r for r in notpublic if isobs(getnode(r))}
947 obs = {r for r in notpublic if isobs(getnode(r))}
948 return obs
948 return obs
949
949
950
950
951 @cachefor(b'orphan')
951 @cachefor(b'orphan')
952 def _computeorphanset(repo):
952 def _computeorphanset(repo):
953 """the set of non obsolete revisions with obsolete parents"""
953 """the set of non obsolete revisions with obsolete parents"""
954 pfunc = repo.changelog.parentrevs
954 pfunc = repo.changelog.parentrevs
955 mutable = _mutablerevs(repo)
955 mutable = _mutablerevs(repo)
956 obsolete = getrevs(repo, b'obsolete')
956 obsolete = getrevs(repo, b'obsolete')
957 others = mutable - obsolete
957 others = mutable - obsolete
958 unstable = set()
958 unstable = set()
959 for r in sorted(others):
959 for r in sorted(others):
960 # A rev is unstable if one of its parent is obsolete or unstable
960 # A rev is unstable if one of its parent is obsolete or unstable
961 # this works since we traverse following growing rev order
961 # this works since we traverse following growing rev order
962 for p in pfunc(r):
962 for p in pfunc(r):
963 if p in obsolete or p in unstable:
963 if p in obsolete or p in unstable:
964 unstable.add(r)
964 unstable.add(r)
965 break
965 break
966 return unstable
966 return unstable
967
967
968
968
969 @cachefor(b'suspended')
969 @cachefor(b'suspended')
970 def _computesuspendedset(repo):
970 def _computesuspendedset(repo):
971 """the set of obsolete parents with non obsolete descendants"""
971 """the set of obsolete parents with non obsolete descendants"""
972 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
972 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
973 return {r for r in getrevs(repo, b'obsolete') if r in suspended}
973 return {r for r in getrevs(repo, b'obsolete') if r in suspended}
974
974
975
975
976 @cachefor(b'extinct')
976 @cachefor(b'extinct')
977 def _computeextinctset(repo):
977 def _computeextinctset(repo):
978 """the set of obsolete parents without non obsolete descendants"""
978 """the set of obsolete parents without non obsolete descendants"""
979 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
979 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
980
980
981
981
982 @cachefor(b'phasedivergent')
982 @cachefor(b'phasedivergent')
983 def _computephasedivergentset(repo):
983 def _computephasedivergentset(repo):
984 """the set of revs trying to obsolete public revisions"""
984 """the set of revs trying to obsolete public revisions"""
985 bumped = set()
985 bumped = set()
986 # util function (avoid attribute lookup in the loop)
986 # util function (avoid attribute lookup in the loop)
987 phase = repo._phasecache.phase # would be faster to grab the full list
987 phase = repo._phasecache.phase # would be faster to grab the full list
988 public = phases.public
988 public = phases.public
989 cl = repo.changelog
989 cl = repo.changelog
990 torev = cl.index.get_rev
990 torev = cl.index.get_rev
991 tonode = cl.node
991 tonode = cl.node
992 obsstore = repo.obsstore
992 obsstore = repo.obsstore
993 for rev in repo.revs(b'(not public()) and (not obsolete())'):
993 for rev in repo.revs(b'(not public()) and (not obsolete())'):
994 # We only evaluate mutable, non-obsolete revision
994 # We only evaluate mutable, non-obsolete revision
995 node = tonode(rev)
995 node = tonode(rev)
996 # (future) A cache of predecessors may worth if split is very common
996 # (future) A cache of predecessors may worth if split is very common
997 for pnode in obsutil.allpredecessors(
997 for pnode in obsutil.allpredecessors(
998 obsstore, [node], ignoreflags=bumpedfix
998 obsstore, [node], ignoreflags=bumpedfix
999 ):
999 ):
1000 prev = torev(pnode) # unfiltered! but so is phasecache
1000 prev = torev(pnode) # unfiltered! but so is phasecache
1001 if (prev is not None) and (phase(repo, prev) <= public):
1001 if (prev is not None) and (phase(repo, prev) <= public):
1002 # we have a public predecessor
1002 # we have a public predecessor
1003 bumped.add(rev)
1003 bumped.add(rev)
1004 break # Next draft!
1004 break # Next draft!
1005 return bumped
1005 return bumped
1006
1006
1007
1007
1008 @cachefor(b'contentdivergent')
1008 @cachefor(b'contentdivergent')
1009 def _computecontentdivergentset(repo):
1009 def _computecontentdivergentset(repo):
1010 """the set of rev that compete to be the final successors of some revision."""
1010 """the set of rev that compete to be the final successors of some revision."""
1011 divergent = set()
1011 divergent = set()
1012 obsstore = repo.obsstore
1012 obsstore = repo.obsstore
1013 newermap = {}
1013 newermap = {}
1014 tonode = repo.changelog.node
1014 tonode = repo.changelog.node
1015 for rev in repo.revs(b'(not public()) - obsolete()'):
1015 for rev in repo.revs(b'(not public()) - obsolete()'):
1016 node = tonode(rev)
1016 node = tonode(rev)
1017 mark = obsstore.predecessors.get(node, ())
1017 mark = obsstore.predecessors.get(node, ())
1018 toprocess = set(mark)
1018 toprocess = set(mark)
1019 seen = set()
1019 seen = set()
1020 while toprocess:
1020 while toprocess:
1021 prec = toprocess.pop()[0]
1021 prec = toprocess.pop()[0]
1022 if prec in seen:
1022 if prec in seen:
1023 continue # emergency cycle hanging prevention
1023 continue # emergency cycle hanging prevention
1024 seen.add(prec)
1024 seen.add(prec)
1025 if prec not in newermap:
1025 if prec not in newermap:
1026 obsutil.successorssets(repo, prec, cache=newermap)
1026 obsutil.successorssets(repo, prec, cache=newermap)
1027 newer = [n for n in newermap[prec] if n]
1027 newer = [n for n in newermap[prec] if n]
1028 if len(newer) > 1:
1028 if len(newer) > 1:
1029 divergent.add(rev)
1029 divergent.add(rev)
1030 break
1030 break
1031 toprocess.update(obsstore.predecessors.get(prec, ()))
1031 toprocess.update(obsstore.predecessors.get(prec, ()))
1032 return divergent
1032 return divergent
1033
1033
1034
1034
1035 def makefoldid(relation, user):
1035 def makefoldid(relation, user):
1036
1036
1037 folddigest = hashutil.sha1(user)
1037 folddigest = hashutil.sha1(user)
1038 for p in relation[0] + relation[1]:
1038 for p in relation[0] + relation[1]:
1039 folddigest.update(b'%d' % p.rev())
1039 folddigest.update(b'%d' % p.rev())
1040 folddigest.update(p.node())
1040 folddigest.update(p.node())
1041 # Since fold only has to compete against fold for the same successors, it
1041 # Since fold only has to compete against fold for the same successors, it
1042 # seems fine to use a small ID. Smaller ID save space.
1042 # seems fine to use a small ID. Smaller ID save space.
1043 return hex(folddigest.digest())[:8]
1043 return hex(folddigest.digest())[:8]
1044
1044
1045
1045
1046 def createmarkers(
1046 def createmarkers(
1047 repo, relations, flag=0, date=None, metadata=None, operation=None
1047 repo, relations, flag=0, date=None, metadata=None, operation=None
1048 ):
1048 ):
1049 """Add obsolete markers between changesets in a repo
1049 """Add obsolete markers between changesets in a repo
1050
1050
1051 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1051 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1052 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1052 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1053 containing metadata for this marker only. It is merged with the global
1053 containing metadata for this marker only. It is merged with the global
1054 metadata specified through the `metadata` argument of this function.
1054 metadata specified through the `metadata` argument of this function.
1055 Any string values in metadata must be UTF-8 bytes.
1055 Any string values in metadata must be UTF-8 bytes.
1056
1056
1057 Trying to obsolete a public changeset will raise an exception.
1057 Trying to obsolete a public changeset will raise an exception.
1058
1058
1059 Current user and date are used except if specified otherwise in the
1059 Current user and date are used except if specified otherwise in the
1060 metadata attribute.
1060 metadata attribute.
1061
1061
1062 This function operates within a transaction of its own, but does
1062 This function operates within a transaction of its own, but does
1063 not take any lock on the repo.
1063 not take any lock on the repo.
1064 """
1064 """
1065 # prepare metadata
1065 # prepare metadata
1066 if metadata is None:
1066 if metadata is None:
1067 metadata = {}
1067 metadata = {}
1068 if b'user' not in metadata:
1068 if b'user' not in metadata:
1069 luser = (
1069 luser = (
1070 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1070 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1071 )
1071 )
1072 metadata[b'user'] = encoding.fromlocal(luser)
1072 metadata[b'user'] = encoding.fromlocal(luser)
1073
1073
1074 # Operation metadata handling
1074 # Operation metadata handling
1075 useoperation = repo.ui.configbool(
1075 useoperation = repo.ui.configbool(
1076 b'experimental', b'evolution.track-operation'
1076 b'experimental', b'evolution.track-operation'
1077 )
1077 )
1078 if useoperation and operation:
1078 if useoperation and operation:
1079 metadata[b'operation'] = operation
1079 metadata[b'operation'] = operation
1080
1080
1081 # Effect flag metadata handling
1081 # Effect flag metadata handling
1082 saveeffectflag = repo.ui.configbool(
1082 saveeffectflag = repo.ui.configbool(
1083 b'experimental', b'evolution.effect-flags'
1083 b'experimental', b'evolution.effect-flags'
1084 )
1084 )
1085
1085
1086 with repo.transaction(b'add-obsolescence-marker') as tr:
1086 with repo.transaction(b'add-obsolescence-marker') as tr:
1087 markerargs = []
1087 markerargs = []
1088 for rel in relations:
1088 for rel in relations:
1089 predecessors = rel[0]
1089 predecessors = rel[0]
1090 if not isinstance(predecessors, tuple):
1090 if not isinstance(predecessors, tuple):
1091 # preserve compat with old API until all caller are migrated
1091 # preserve compat with old API until all caller are migrated
1092 predecessors = (predecessors,)
1092 predecessors = (predecessors,)
1093 if len(predecessors) > 1 and len(rel[1]) != 1:
1093 if len(predecessors) > 1 and len(rel[1]) != 1:
1094 msg = b'Fold markers can only have 1 successors, not %d'
1094 msg = b'Fold markers can only have 1 successors, not %d'
1095 raise error.ProgrammingError(msg % len(rel[1]))
1095 raise error.ProgrammingError(msg % len(rel[1]))
1096 foldid = None
1096 foldid = None
1097 foldsize = len(predecessors)
1097 foldsize = len(predecessors)
1098 if 1 < foldsize:
1098 if 1 < foldsize:
1099 foldid = makefoldid(rel, metadata[b'user'])
1099 foldid = makefoldid(rel, metadata[b'user'])
1100 for foldidx, prec in enumerate(predecessors, 1):
1100 for foldidx, prec in enumerate(predecessors, 1):
1101 sucs = rel[1]
1101 sucs = rel[1]
1102 localmetadata = metadata.copy()
1102 localmetadata = metadata.copy()
1103 if len(rel) > 2:
1103 if len(rel) > 2:
1104 localmetadata.update(rel[2])
1104 localmetadata.update(rel[2])
1105 if foldid is not None:
1105 if foldid is not None:
1106 localmetadata[b'fold-id'] = foldid
1106 localmetadata[b'fold-id'] = foldid
1107 localmetadata[b'fold-idx'] = b'%d' % foldidx
1107 localmetadata[b'fold-idx'] = b'%d' % foldidx
1108 localmetadata[b'fold-size'] = b'%d' % foldsize
1108 localmetadata[b'fold-size'] = b'%d' % foldsize
1109
1109
1110 if not prec.mutable():
1110 if not prec.mutable():
1111 raise error.Abort(
1111 raise error.Abort(
1112 _(b"cannot obsolete public changeset: %s") % prec,
1112 _(b"cannot obsolete public changeset: %s") % prec,
1113 hint=b"see 'hg help phases' for details",
1113 hint=b"see 'hg help phases' for details",
1114 )
1114 )
1115 nprec = prec.node()
1115 nprec = prec.node()
1116 nsucs = tuple(s.node() for s in sucs)
1116 nsucs = tuple(s.node() for s in sucs)
1117 npare = None
1117 npare = None
1118 if not nsucs:
1118 if not nsucs:
1119 npare = tuple(p.node() for p in prec.parents())
1119 npare = tuple(p.node() for p in prec.parents())
1120 if nprec in nsucs:
1120 if nprec in nsucs:
1121 raise error.Abort(
1121 raise error.Abort(
1122 _(b"changeset %s cannot obsolete itself") % prec
1122 _(b"changeset %s cannot obsolete itself") % prec
1123 )
1123 )
1124
1124
1125 # Effect flag can be different by relation
1125 # Effect flag can be different by relation
1126 if saveeffectflag:
1126 if saveeffectflag:
1127 # The effect flag is saved in a versioned field name for
1127 # The effect flag is saved in a versioned field name for
1128 # future evolution
1128 # future evolution
1129 effectflag = obsutil.geteffectflag(prec, sucs)
1129 effectflag = obsutil.geteffectflag(prec, sucs)
1130 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1130 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1131
1131
1132 # Creating the marker causes the hidden cache to become
1132 # Creating the marker causes the hidden cache to become
1133 # invalid, which causes recomputation when we ask for
1133 # invalid, which causes recomputation when we ask for
1134 # prec.parents() above. Resulting in n^2 behavior. So let's
1134 # prec.parents() above. Resulting in n^2 behavior. So let's
1135 # prepare all of the args first, then create the markers.
1135 # prepare all of the args first, then create the markers.
1136 markerargs.append((nprec, nsucs, npare, localmetadata))
1136 markerargs.append((nprec, nsucs, npare, localmetadata))
1137
1137
1138 for args in markerargs:
1138 for args in markerargs:
1139 nprec, nsucs, npare, localmetadata = args
1139 nprec, nsucs, npare, localmetadata = args
1140 repo.obsstore.create(
1140 repo.obsstore.create(
1141 tr,
1141 tr,
1142 nprec,
1142 nprec,
1143 nsucs,
1143 nsucs,
1144 flag,
1144 flag,
1145 parents=npare,
1145 parents=npare,
1146 date=date,
1146 date=date,
1147 metadata=localmetadata,
1147 metadata=localmetadata,
1148 ui=repo.ui,
1148 ui=repo.ui,
1149 )
1149 )
1150 repo.filteredrevcache.clear()
1150 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now