##// END OF EJS Templates
obsolete: don't use os.stat in repo.obsstore.__nonzero__ if it's static HTTP...
av6 -
r49640:ef50a62e default
parent child Browse files
Show More
@@ -1,1145 +1,1151 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from .pycompat import getattr
76 from .pycompat import getattr
77 from .node import (
77 from .node import (
78 bin,
78 bin,
79 hex,
79 hex,
80 )
80 )
81 from . import (
81 from . import (
82 encoding,
82 encoding,
83 error,
83 error,
84 obsutil,
84 obsutil,
85 phases,
85 phases,
86 policy,
86 policy,
87 pycompat,
87 pycompat,
88 util,
88 util,
89 )
89 )
90 from .utils import (
90 from .utils import (
91 dateutil,
91 dateutil,
92 hashutil,
92 hashutil,
93 )
93 )
94
94
95 parsers = policy.importmod('parsers')
95 parsers = policy.importmod('parsers')
96
96
97 _pack = struct.pack
97 _pack = struct.pack
98 _unpack = struct.unpack
98 _unpack = struct.unpack
99 _calcsize = struct.calcsize
99 _calcsize = struct.calcsize
100 propertycache = util.propertycache
100 propertycache = util.propertycache
101
101
102 # Options for obsolescence
102 # Options for obsolescence
103 createmarkersopt = b'createmarkers'
103 createmarkersopt = b'createmarkers'
104 allowunstableopt = b'allowunstable'
104 allowunstableopt = b'allowunstable'
105 allowdivergenceopt = b'allowdivergence'
105 allowdivergenceopt = b'allowdivergence'
106 exchangeopt = b'exchange'
106 exchangeopt = b'exchange'
107
107
108
108
109 def _getoptionvalue(repo, option):
109 def _getoptionvalue(repo, option):
110 """Returns True if the given repository has the given obsolete option
110 """Returns True if the given repository has the given obsolete option
111 enabled.
111 enabled.
112 """
112 """
113 configkey = b'evolution.%s' % option
113 configkey = b'evolution.%s' % option
114 newconfig = repo.ui.configbool(b'experimental', configkey)
114 newconfig = repo.ui.configbool(b'experimental', configkey)
115
115
116 # Return the value only if defined
116 # Return the value only if defined
117 if newconfig is not None:
117 if newconfig is not None:
118 return newconfig
118 return newconfig
119
119
120 # Fallback on generic option
120 # Fallback on generic option
121 try:
121 try:
122 return repo.ui.configbool(b'experimental', b'evolution')
122 return repo.ui.configbool(b'experimental', b'evolution')
123 except (error.ConfigError, AttributeError):
123 except (error.ConfigError, AttributeError):
124 # Fallback on old-fashion config
124 # Fallback on old-fashion config
125 # inconsistent config: experimental.evolution
125 # inconsistent config: experimental.evolution
126 result = set(repo.ui.configlist(b'experimental', b'evolution'))
126 result = set(repo.ui.configlist(b'experimental', b'evolution'))
127
127
128 if b'all' in result:
128 if b'all' in result:
129 return True
129 return True
130
130
131 # Temporary hack for next check
131 # Temporary hack for next check
132 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
132 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
133 if newconfig:
133 if newconfig:
134 result.add(b'createmarkers')
134 result.add(b'createmarkers')
135
135
136 return option in result
136 return option in result
137
137
138
138
139 def getoptions(repo):
139 def getoptions(repo):
140 """Returns dicts showing state of obsolescence features."""
140 """Returns dicts showing state of obsolescence features."""
141
141
142 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
142 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
143 if createmarkersvalue:
143 if createmarkersvalue:
144 unstablevalue = _getoptionvalue(repo, allowunstableopt)
144 unstablevalue = _getoptionvalue(repo, allowunstableopt)
145 divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
145 divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
146 exchangevalue = _getoptionvalue(repo, exchangeopt)
146 exchangevalue = _getoptionvalue(repo, exchangeopt)
147 else:
147 else:
148 # if we cannot create obsolescence markers, we shouldn't exchange them
148 # if we cannot create obsolescence markers, we shouldn't exchange them
149 # or perform operations that lead to instability or divergence
149 # or perform operations that lead to instability or divergence
150 unstablevalue = False
150 unstablevalue = False
151 divergencevalue = False
151 divergencevalue = False
152 exchangevalue = False
152 exchangevalue = False
153
153
154 return {
154 return {
155 createmarkersopt: createmarkersvalue,
155 createmarkersopt: createmarkersvalue,
156 allowunstableopt: unstablevalue,
156 allowunstableopt: unstablevalue,
157 allowdivergenceopt: divergencevalue,
157 allowdivergenceopt: divergencevalue,
158 exchangeopt: exchangevalue,
158 exchangeopt: exchangevalue,
159 }
159 }
160
160
161
161
162 def isenabled(repo, option):
162 def isenabled(repo, option):
163 """Returns True if the given repository has the given obsolete option
163 """Returns True if the given repository has the given obsolete option
164 enabled.
164 enabled.
165 """
165 """
166 return getoptions(repo)[option]
166 return getoptions(repo)[option]
167
167
168
168
169 # Creating aliases for marker flags because evolve extension looks for
169 # Creating aliases for marker flags because evolve extension looks for
170 # bumpedfix in obsolete.py
170 # bumpedfix in obsolete.py
171 bumpedfix = obsutil.bumpedfix
171 bumpedfix = obsutil.bumpedfix
172 usingsha256 = obsutil.usingsha256
172 usingsha256 = obsutil.usingsha256
173
173
174 ## Parsing and writing of version "0"
174 ## Parsing and writing of version "0"
175 #
175 #
176 # The header is followed by the markers. Each marker is made of:
176 # The header is followed by the markers. Each marker is made of:
177 #
177 #
178 # - 1 uint8 : number of new changesets "N", can be zero.
178 # - 1 uint8 : number of new changesets "N", can be zero.
179 #
179 #
180 # - 1 uint32: metadata size "M" in bytes.
180 # - 1 uint32: metadata size "M" in bytes.
181 #
181 #
182 # - 1 byte: a bit field. It is reserved for flags used in common
182 # - 1 byte: a bit field. It is reserved for flags used in common
183 # obsolete marker operations, to avoid repeated decoding of metadata
183 # obsolete marker operations, to avoid repeated decoding of metadata
184 # entries.
184 # entries.
185 #
185 #
186 # - 20 bytes: obsoleted changeset identifier.
186 # - 20 bytes: obsoleted changeset identifier.
187 #
187 #
188 # - N*20 bytes: new changesets identifiers.
188 # - N*20 bytes: new changesets identifiers.
189 #
189 #
190 # - M bytes: metadata as a sequence of nul-terminated strings. Each
190 # - M bytes: metadata as a sequence of nul-terminated strings. Each
191 # string contains a key and a value, separated by a colon ':', without
191 # string contains a key and a value, separated by a colon ':', without
192 # additional encoding. Keys cannot contain '\0' or ':' and values
192 # additional encoding. Keys cannot contain '\0' or ':' and values
193 # cannot contain '\0'.
193 # cannot contain '\0'.
194 _fm0version = 0
194 _fm0version = 0
195 _fm0fixed = b'>BIB20s'
195 _fm0fixed = b'>BIB20s'
196 _fm0node = b'20s'
196 _fm0node = b'20s'
197 _fm0fsize = _calcsize(_fm0fixed)
197 _fm0fsize = _calcsize(_fm0fixed)
198 _fm0fnodesize = _calcsize(_fm0node)
198 _fm0fnodesize = _calcsize(_fm0node)
199
199
200
200
201 def _fm0readmarkers(data, off, stop):
201 def _fm0readmarkers(data, off, stop):
202 # Loop on markers
202 # Loop on markers
203 while off < stop:
203 while off < stop:
204 # read fixed part
204 # read fixed part
205 cur = data[off : off + _fm0fsize]
205 cur = data[off : off + _fm0fsize]
206 off += _fm0fsize
206 off += _fm0fsize
207 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
207 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
208 # read replacement
208 # read replacement
209 sucs = ()
209 sucs = ()
210 if numsuc:
210 if numsuc:
211 s = _fm0fnodesize * numsuc
211 s = _fm0fnodesize * numsuc
212 cur = data[off : off + s]
212 cur = data[off : off + s]
213 sucs = _unpack(_fm0node * numsuc, cur)
213 sucs = _unpack(_fm0node * numsuc, cur)
214 off += s
214 off += s
215 # read metadata
215 # read metadata
216 # (metadata will be decoded on demand)
216 # (metadata will be decoded on demand)
217 metadata = data[off : off + mdsize]
217 metadata = data[off : off + mdsize]
218 if len(metadata) != mdsize:
218 if len(metadata) != mdsize:
219 raise error.Abort(
219 raise error.Abort(
220 _(
220 _(
221 b'parsing obsolete marker: metadata is too '
221 b'parsing obsolete marker: metadata is too '
222 b'short, %d bytes expected, got %d'
222 b'short, %d bytes expected, got %d'
223 )
223 )
224 % (mdsize, len(metadata))
224 % (mdsize, len(metadata))
225 )
225 )
226 off += mdsize
226 off += mdsize
227 metadata = _fm0decodemeta(metadata)
227 metadata = _fm0decodemeta(metadata)
228 try:
228 try:
229 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
229 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
230 date = float(when), int(offset)
230 date = float(when), int(offset)
231 except ValueError:
231 except ValueError:
232 date = (0.0, 0)
232 date = (0.0, 0)
233 parents = None
233 parents = None
234 if b'p2' in metadata:
234 if b'p2' in metadata:
235 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
235 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
236 elif b'p1' in metadata:
236 elif b'p1' in metadata:
237 parents = (metadata.pop(b'p1', None),)
237 parents = (metadata.pop(b'p1', None),)
238 elif b'p0' in metadata:
238 elif b'p0' in metadata:
239 parents = ()
239 parents = ()
240 if parents is not None:
240 if parents is not None:
241 try:
241 try:
242 parents = tuple(bin(p) for p in parents)
242 parents = tuple(bin(p) for p in parents)
243 # if parent content is not a nodeid, drop the data
243 # if parent content is not a nodeid, drop the data
244 for p in parents:
244 for p in parents:
245 if len(p) != 20:
245 if len(p) != 20:
246 parents = None
246 parents = None
247 break
247 break
248 except TypeError:
248 except TypeError:
249 # if content cannot be translated to nodeid drop the data.
249 # if content cannot be translated to nodeid drop the data.
250 parents = None
250 parents = None
251
251
252 metadata = tuple(sorted(pycompat.iteritems(metadata)))
252 metadata = tuple(sorted(pycompat.iteritems(metadata)))
253
253
254 yield (pre, sucs, flags, metadata, date, parents)
254 yield (pre, sucs, flags, metadata, date, parents)
255
255
256
256
257 def _fm0encodeonemarker(marker):
257 def _fm0encodeonemarker(marker):
258 pre, sucs, flags, metadata, date, parents = marker
258 pre, sucs, flags, metadata, date, parents = marker
259 if flags & usingsha256:
259 if flags & usingsha256:
260 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
260 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
261 metadata = dict(metadata)
261 metadata = dict(metadata)
262 time, tz = date
262 time, tz = date
263 metadata[b'date'] = b'%r %i' % (time, tz)
263 metadata[b'date'] = b'%r %i' % (time, tz)
264 if parents is not None:
264 if parents is not None:
265 if not parents:
265 if not parents:
266 # mark that we explicitly recorded no parents
266 # mark that we explicitly recorded no parents
267 metadata[b'p0'] = b''
267 metadata[b'p0'] = b''
268 for i, p in enumerate(parents, 1):
268 for i, p in enumerate(parents, 1):
269 metadata[b'p%i' % i] = hex(p)
269 metadata[b'p%i' % i] = hex(p)
270 metadata = _fm0encodemeta(metadata)
270 metadata = _fm0encodemeta(metadata)
271 numsuc = len(sucs)
271 numsuc = len(sucs)
272 format = _fm0fixed + (_fm0node * numsuc)
272 format = _fm0fixed + (_fm0node * numsuc)
273 data = [numsuc, len(metadata), flags, pre]
273 data = [numsuc, len(metadata), flags, pre]
274 data.extend(sucs)
274 data.extend(sucs)
275 return _pack(format, *data) + metadata
275 return _pack(format, *data) + metadata
276
276
277
277
278 def _fm0encodemeta(meta):
278 def _fm0encodemeta(meta):
279 """Return encoded metadata string to string mapping.
279 """Return encoded metadata string to string mapping.
280
280
281 Assume no ':' in key and no '\0' in both key and value."""
281 Assume no ':' in key and no '\0' in both key and value."""
282 for key, value in pycompat.iteritems(meta):
282 for key, value in pycompat.iteritems(meta):
283 if b':' in key or b'\0' in key:
283 if b':' in key or b'\0' in key:
284 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
284 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
285 if b'\0' in value:
285 if b'\0' in value:
286 raise ValueError(b"':' is forbidden in metadata value'")
286 raise ValueError(b"':' is forbidden in metadata value'")
287 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
287 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
288
288
289
289
290 def _fm0decodemeta(data):
290 def _fm0decodemeta(data):
291 """Return string to string dictionary from encoded version."""
291 """Return string to string dictionary from encoded version."""
292 d = {}
292 d = {}
293 for l in data.split(b'\0'):
293 for l in data.split(b'\0'):
294 if l:
294 if l:
295 key, value = l.split(b':', 1)
295 key, value = l.split(b':', 1)
296 d[key] = value
296 d[key] = value
297 return d
297 return d
298
298
299
299
300 ## Parsing and writing of version "1"
300 ## Parsing and writing of version "1"
301 #
301 #
302 # The header is followed by the markers. Each marker is made of:
302 # The header is followed by the markers. Each marker is made of:
303 #
303 #
304 # - uint32: total size of the marker (including this field)
304 # - uint32: total size of the marker (including this field)
305 #
305 #
306 # - float64: date in seconds since epoch
306 # - float64: date in seconds since epoch
307 #
307 #
308 # - int16: timezone offset in minutes
308 # - int16: timezone offset in minutes
309 #
309 #
310 # - uint16: a bit field. It is reserved for flags used in common
310 # - uint16: a bit field. It is reserved for flags used in common
311 # obsolete marker operations, to avoid repeated decoding of metadata
311 # obsolete marker operations, to avoid repeated decoding of metadata
312 # entries.
312 # entries.
313 #
313 #
314 # - uint8: number of successors "N", can be zero.
314 # - uint8: number of successors "N", can be zero.
315 #
315 #
316 # - uint8: number of parents "P", can be zero.
316 # - uint8: number of parents "P", can be zero.
317 #
317 #
318 # 0: parents data stored but no parent,
318 # 0: parents data stored but no parent,
319 # 1: one parent stored,
319 # 1: one parent stored,
320 # 2: two parents stored,
320 # 2: two parents stored,
321 # 3: no parent data stored
321 # 3: no parent data stored
322 #
322 #
323 # - uint8: number of metadata entries M
323 # - uint8: number of metadata entries M
324 #
324 #
325 # - 20 or 32 bytes: predecessor changeset identifier.
325 # - 20 or 32 bytes: predecessor changeset identifier.
326 #
326 #
327 # - N*(20 or 32) bytes: successors changesets identifiers.
327 # - N*(20 or 32) bytes: successors changesets identifiers.
328 #
328 #
329 # - P*(20 or 32) bytes: parents of the predecessors changesets.
329 # - P*(20 or 32) bytes: parents of the predecessors changesets.
330 #
330 #
331 # - M*(uint8, uint8): size of all metadata entries (key and value)
331 # - M*(uint8, uint8): size of all metadata entries (key and value)
332 #
332 #
333 # - remaining bytes: the metadata, each (key, value) pair after the other.
333 # - remaining bytes: the metadata, each (key, value) pair after the other.
334 _fm1version = 1
334 _fm1version = 1
335 _fm1fixed = b'>IdhHBBB'
335 _fm1fixed = b'>IdhHBBB'
336 _fm1nodesha1 = b'20s'
336 _fm1nodesha1 = b'20s'
337 _fm1nodesha256 = b'32s'
337 _fm1nodesha256 = b'32s'
338 _fm1nodesha1size = _calcsize(_fm1nodesha1)
338 _fm1nodesha1size = _calcsize(_fm1nodesha1)
339 _fm1nodesha256size = _calcsize(_fm1nodesha256)
339 _fm1nodesha256size = _calcsize(_fm1nodesha256)
340 _fm1fsize = _calcsize(_fm1fixed)
340 _fm1fsize = _calcsize(_fm1fixed)
341 _fm1parentnone = 3
341 _fm1parentnone = 3
342 _fm1parentshift = 14
342 _fm1parentshift = 14
343 _fm1parentmask = _fm1parentnone << _fm1parentshift
343 _fm1parentmask = _fm1parentnone << _fm1parentshift
344 _fm1metapair = b'BB'
344 _fm1metapair = b'BB'
345 _fm1metapairsize = _calcsize(_fm1metapair)
345 _fm1metapairsize = _calcsize(_fm1metapair)
346
346
347
347
348 def _fm1purereadmarkers(data, off, stop):
348 def _fm1purereadmarkers(data, off, stop):
349 # make some global constants local for performance
349 # make some global constants local for performance
350 noneflag = _fm1parentnone
350 noneflag = _fm1parentnone
351 sha2flag = usingsha256
351 sha2flag = usingsha256
352 sha1size = _fm1nodesha1size
352 sha1size = _fm1nodesha1size
353 sha2size = _fm1nodesha256size
353 sha2size = _fm1nodesha256size
354 sha1fmt = _fm1nodesha1
354 sha1fmt = _fm1nodesha1
355 sha2fmt = _fm1nodesha256
355 sha2fmt = _fm1nodesha256
356 metasize = _fm1metapairsize
356 metasize = _fm1metapairsize
357 metafmt = _fm1metapair
357 metafmt = _fm1metapair
358 fsize = _fm1fsize
358 fsize = _fm1fsize
359 unpack = _unpack
359 unpack = _unpack
360
360
361 # Loop on markers
361 # Loop on markers
362 ufixed = struct.Struct(_fm1fixed).unpack
362 ufixed = struct.Struct(_fm1fixed).unpack
363
363
364 while off < stop:
364 while off < stop:
365 # read fixed part
365 # read fixed part
366 o1 = off + fsize
366 o1 = off + fsize
367 t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1])
367 t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1])
368
368
369 if flags & sha2flag:
369 if flags & sha2flag:
370 nodefmt = sha2fmt
370 nodefmt = sha2fmt
371 nodesize = sha2size
371 nodesize = sha2size
372 else:
372 else:
373 nodefmt = sha1fmt
373 nodefmt = sha1fmt
374 nodesize = sha1size
374 nodesize = sha1size
375
375
376 (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize])
376 (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize])
377 o1 += nodesize
377 o1 += nodesize
378
378
379 # read 0 or more successors
379 # read 0 or more successors
380 if numsuc == 1:
380 if numsuc == 1:
381 o2 = o1 + nodesize
381 o2 = o1 + nodesize
382 sucs = (data[o1:o2],)
382 sucs = (data[o1:o2],)
383 else:
383 else:
384 o2 = o1 + nodesize * numsuc
384 o2 = o1 + nodesize * numsuc
385 sucs = unpack(nodefmt * numsuc, data[o1:o2])
385 sucs = unpack(nodefmt * numsuc, data[o1:o2])
386
386
387 # read parents
387 # read parents
388 if numpar == noneflag:
388 if numpar == noneflag:
389 o3 = o2
389 o3 = o2
390 parents = None
390 parents = None
391 elif numpar == 1:
391 elif numpar == 1:
392 o3 = o2 + nodesize
392 o3 = o2 + nodesize
393 parents = (data[o2:o3],)
393 parents = (data[o2:o3],)
394 else:
394 else:
395 o3 = o2 + nodesize * numpar
395 o3 = o2 + nodesize * numpar
396 parents = unpack(nodefmt * numpar, data[o2:o3])
396 parents = unpack(nodefmt * numpar, data[o2:o3])
397
397
398 # read metadata
398 # read metadata
399 off = o3 + metasize * nummeta
399 off = o3 + metasize * nummeta
400 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
400 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
401 metadata = []
401 metadata = []
402 for idx in pycompat.xrange(0, len(metapairsize), 2):
402 for idx in pycompat.xrange(0, len(metapairsize), 2):
403 o1 = off + metapairsize[idx]
403 o1 = off + metapairsize[idx]
404 o2 = o1 + metapairsize[idx + 1]
404 o2 = o1 + metapairsize[idx + 1]
405 metadata.append((data[off:o1], data[o1:o2]))
405 metadata.append((data[off:o1], data[o1:o2]))
406 off = o2
406 off = o2
407
407
408 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
408 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
409
409
410
410
411 def _fm1encodeonemarker(marker):
411 def _fm1encodeonemarker(marker):
412 pre, sucs, flags, metadata, date, parents = marker
412 pre, sucs, flags, metadata, date, parents = marker
413 # determine node size
413 # determine node size
414 _fm1node = _fm1nodesha1
414 _fm1node = _fm1nodesha1
415 if flags & usingsha256:
415 if flags & usingsha256:
416 _fm1node = _fm1nodesha256
416 _fm1node = _fm1nodesha256
417 numsuc = len(sucs)
417 numsuc = len(sucs)
418 numextranodes = 1 + numsuc
418 numextranodes = 1 + numsuc
419 if parents is None:
419 if parents is None:
420 numpar = _fm1parentnone
420 numpar = _fm1parentnone
421 else:
421 else:
422 numpar = len(parents)
422 numpar = len(parents)
423 numextranodes += numpar
423 numextranodes += numpar
424 formatnodes = _fm1node * numextranodes
424 formatnodes = _fm1node * numextranodes
425 formatmeta = _fm1metapair * len(metadata)
425 formatmeta = _fm1metapair * len(metadata)
426 format = _fm1fixed + formatnodes + formatmeta
426 format = _fm1fixed + formatnodes + formatmeta
427 # tz is stored in minutes so we divide by 60
427 # tz is stored in minutes so we divide by 60
428 tz = date[1] // 60
428 tz = date[1] // 60
429 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
429 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
430 data.extend(sucs)
430 data.extend(sucs)
431 if parents is not None:
431 if parents is not None:
432 data.extend(parents)
432 data.extend(parents)
433 totalsize = _calcsize(format)
433 totalsize = _calcsize(format)
434 for key, value in metadata:
434 for key, value in metadata:
435 lk = len(key)
435 lk = len(key)
436 lv = len(value)
436 lv = len(value)
437 if lk > 255:
437 if lk > 255:
438 msg = (
438 msg = (
439 b'obsstore metadata key cannot be longer than 255 bytes'
439 b'obsstore metadata key cannot be longer than 255 bytes'
440 b' (key "%s" is %u bytes)'
440 b' (key "%s" is %u bytes)'
441 ) % (key, lk)
441 ) % (key, lk)
442 raise error.ProgrammingError(msg)
442 raise error.ProgrammingError(msg)
443 if lv > 255:
443 if lv > 255:
444 msg = (
444 msg = (
445 b'obsstore metadata value cannot be longer than 255 bytes'
445 b'obsstore metadata value cannot be longer than 255 bytes'
446 b' (value "%s" for key "%s" is %u bytes)'
446 b' (value "%s" for key "%s" is %u bytes)'
447 ) % (value, key, lv)
447 ) % (value, key, lv)
448 raise error.ProgrammingError(msg)
448 raise error.ProgrammingError(msg)
449 data.append(lk)
449 data.append(lk)
450 data.append(lv)
450 data.append(lv)
451 totalsize += lk + lv
451 totalsize += lk + lv
452 data[0] = totalsize
452 data[0] = totalsize
453 data = [_pack(format, *data)]
453 data = [_pack(format, *data)]
454 for key, value in metadata:
454 for key, value in metadata:
455 data.append(key)
455 data.append(key)
456 data.append(value)
456 data.append(value)
457 return b''.join(data)
457 return b''.join(data)
458
458
459
459
460 def _fm1readmarkers(data, off, stop):
460 def _fm1readmarkers(data, off, stop):
461 native = getattr(parsers, 'fm1readmarkers', None)
461 native = getattr(parsers, 'fm1readmarkers', None)
462 if not native:
462 if not native:
463 return _fm1purereadmarkers(data, off, stop)
463 return _fm1purereadmarkers(data, off, stop)
464 return native(data, off, stop)
464 return native(data, off, stop)
465
465
466
466
467 # mapping to read/write various marker formats
467 # mapping to read/write various marker formats
468 # <version> -> (decoder, encoder)
468 # <version> -> (decoder, encoder)
469 formats = {
469 formats = {
470 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
470 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
471 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
471 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
472 }
472 }
473
473
474
474
475 def _readmarkerversion(data):
475 def _readmarkerversion(data):
476 return _unpack(b'>B', data[0:1])[0]
476 return _unpack(b'>B', data[0:1])[0]
477
477
478
478
479 @util.nogc
479 @util.nogc
480 def _readmarkers(data, off=None, stop=None):
480 def _readmarkers(data, off=None, stop=None):
481 """Read and enumerate markers from raw data"""
481 """Read and enumerate markers from raw data"""
482 diskversion = _readmarkerversion(data)
482 diskversion = _readmarkerversion(data)
483 if not off:
483 if not off:
484 off = 1 # skip 1 byte version number
484 off = 1 # skip 1 byte version number
485 if stop is None:
485 if stop is None:
486 stop = len(data)
486 stop = len(data)
487 if diskversion not in formats:
487 if diskversion not in formats:
488 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
488 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
489 raise error.UnknownVersion(msg, version=diskversion)
489 raise error.UnknownVersion(msg, version=diskversion)
490 return diskversion, formats[diskversion][0](data, off, stop)
490 return diskversion, formats[diskversion][0](data, off, stop)
491
491
492
492
493 def encodeheader(version=_fm0version):
493 def encodeheader(version=_fm0version):
494 return _pack(b'>B', version)
494 return _pack(b'>B', version)
495
495
496
496
497 def encodemarkers(markers, addheader=False, version=_fm0version):
497 def encodemarkers(markers, addheader=False, version=_fm0version):
498 # Kept separate from flushmarkers(), it will be reused for
498 # Kept separate from flushmarkers(), it will be reused for
499 # markers exchange.
499 # markers exchange.
500 encodeone = formats[version][1]
500 encodeone = formats[version][1]
501 if addheader:
501 if addheader:
502 yield encodeheader(version)
502 yield encodeheader(version)
503 for marker in markers:
503 for marker in markers:
504 yield encodeone(marker)
504 yield encodeone(marker)
505
505
506
506
507 @util.nogc
507 @util.nogc
508 def _addsuccessors(successors, markers):
508 def _addsuccessors(successors, markers):
509 for mark in markers:
509 for mark in markers:
510 successors.setdefault(mark[0], set()).add(mark)
510 successors.setdefault(mark[0], set()).add(mark)
511
511
512
512
513 @util.nogc
513 @util.nogc
514 def _addpredecessors(predecessors, markers):
514 def _addpredecessors(predecessors, markers):
515 for mark in markers:
515 for mark in markers:
516 for suc in mark[1]:
516 for suc in mark[1]:
517 predecessors.setdefault(suc, set()).add(mark)
517 predecessors.setdefault(suc, set()).add(mark)
518
518
519
519
520 @util.nogc
520 @util.nogc
521 def _addchildren(children, markers):
521 def _addchildren(children, markers):
522 for mark in markers:
522 for mark in markers:
523 parents = mark[5]
523 parents = mark[5]
524 if parents is not None:
524 if parents is not None:
525 for p in parents:
525 for p in parents:
526 children.setdefault(p, set()).add(mark)
526 children.setdefault(p, set()).add(mark)
527
527
528
528
529 def _checkinvalidmarkers(repo, markers):
529 def _checkinvalidmarkers(repo, markers):
530 """search for marker with invalid data and raise error if needed
530 """search for marker with invalid data and raise error if needed
531
531
532 Exist as a separated function to allow the evolve extension for a more
532 Exist as a separated function to allow the evolve extension for a more
533 subtle handling.
533 subtle handling.
534 """
534 """
535 for mark in markers:
535 for mark in markers:
536 if repo.nullid in mark[1]:
536 if repo.nullid in mark[1]:
537 raise error.Abort(
537 raise error.Abort(
538 _(
538 _(
539 b'bad obsolescence marker detected: '
539 b'bad obsolescence marker detected: '
540 b'invalid successors nullid'
540 b'invalid successors nullid'
541 )
541 )
542 )
542 )
543
543
544
544
545 class obsstore(object):
545 class obsstore(object):
546 """Store obsolete markers
546 """Store obsolete markers
547
547
548 Markers can be accessed with two mappings:
548 Markers can be accessed with two mappings:
549 - predecessors[x] -> set(markers on predecessors edges of x)
549 - predecessors[x] -> set(markers on predecessors edges of x)
550 - successors[x] -> set(markers on successors edges of x)
550 - successors[x] -> set(markers on successors edges of x)
551 - children[x] -> set(markers on predecessors edges of children(x)
551 - children[x] -> set(markers on predecessors edges of children(x)
552 """
552 """
553
553
554 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
554 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
555 # prec: nodeid, predecessors changesets
555 # prec: nodeid, predecessors changesets
556 # succs: tuple of nodeid, successor changesets (0-N length)
556 # succs: tuple of nodeid, successor changesets (0-N length)
557 # flag: integer, flag field carrying modifier for the markers (see doc)
557 # flag: integer, flag field carrying modifier for the markers (see doc)
558 # meta: binary blob in UTF-8, encoded metadata dictionary
558 # meta: binary blob in UTF-8, encoded metadata dictionary
559 # date: (float, int) tuple, date of marker creation
559 # date: (float, int) tuple, date of marker creation
560 # parents: (tuple of nodeid) or None, parents of predecessors
560 # parents: (tuple of nodeid) or None, parents of predecessors
561 # None is used when no data has been recorded
561 # None is used when no data has been recorded
562
562
563 def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False):
563 def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False):
564 # caches for various obsolescence related cache
564 # caches for various obsolescence related cache
565 self.caches = {}
565 self.caches = {}
566 self.svfs = svfs
566 self.svfs = svfs
567 self.repo = repo
567 self.repo = repo
568 self._defaultformat = defaultformat
568 self._defaultformat = defaultformat
569 self._readonly = readonly
569 self._readonly = readonly
570
570
571 def __iter__(self):
571 def __iter__(self):
572 return iter(self._all)
572 return iter(self._all)
573
573
574 def __len__(self):
574 def __len__(self):
575 return len(self._all)
575 return len(self._all)
576
576
577 def __nonzero__(self):
577 def __nonzero__(self):
578 from . import statichttprepo
579
580 if isinstance(self.repo, statichttprepo.statichttprepository):
581 # If repo is accessed via static HTTP, then we can't use os.stat()
582 # to just peek at the file size.
583 return len(self._data) > 1
578 if not self._cached('_all'):
584 if not self._cached('_all'):
579 try:
585 try:
580 return self.svfs.stat(b'obsstore').st_size > 1
586 return self.svfs.stat(b'obsstore').st_size > 1
581 except OSError as inst:
587 except OSError as inst:
582 if inst.errno not in (errno.ENOENT, errno.EINVAL):
588 if inst.errno != errno.ENOENT:
583 raise
589 raise
584 # just build an empty _all list if no obsstore exists, which
590 # just build an empty _all list if no obsstore exists, which
585 # avoids further stat() syscalls
591 # avoids further stat() syscalls
586 return bool(self._all)
592 return bool(self._all)
587
593
588 __bool__ = __nonzero__
594 __bool__ = __nonzero__
589
595
590 @property
596 @property
591 def readonly(self):
597 def readonly(self):
592 """True if marker creation is disabled
598 """True if marker creation is disabled
593
599
594 Remove me in the future when obsolete marker is always on."""
600 Remove me in the future when obsolete marker is always on."""
595 return self._readonly
601 return self._readonly
596
602
597 def create(
603 def create(
598 self,
604 self,
599 transaction,
605 transaction,
600 prec,
606 prec,
601 succs=(),
607 succs=(),
602 flag=0,
608 flag=0,
603 parents=None,
609 parents=None,
604 date=None,
610 date=None,
605 metadata=None,
611 metadata=None,
606 ui=None,
612 ui=None,
607 ):
613 ):
608 """obsolete: add a new obsolete marker
614 """obsolete: add a new obsolete marker
609
615
610 * ensuring it is hashable
616 * ensuring it is hashable
611 * check mandatory metadata
617 * check mandatory metadata
612 * encode metadata
618 * encode metadata
613
619
614 If you are a human writing code creating marker you want to use the
620 If you are a human writing code creating marker you want to use the
615 `createmarkers` function in this module instead.
621 `createmarkers` function in this module instead.
616
622
617 return True if a new marker have been added, False if the markers
623 return True if a new marker have been added, False if the markers
618 already existed (no op).
624 already existed (no op).
619 """
625 """
620 flag = int(flag)
626 flag = int(flag)
621 if metadata is None:
627 if metadata is None:
622 metadata = {}
628 metadata = {}
623 if date is None:
629 if date is None:
624 if b'date' in metadata:
630 if b'date' in metadata:
625 # as a courtesy for out-of-tree extensions
631 # as a courtesy for out-of-tree extensions
626 date = dateutil.parsedate(metadata.pop(b'date'))
632 date = dateutil.parsedate(metadata.pop(b'date'))
627 elif ui is not None:
633 elif ui is not None:
628 date = ui.configdate(b'devel', b'default-date')
634 date = ui.configdate(b'devel', b'default-date')
629 if date is None:
635 if date is None:
630 date = dateutil.makedate()
636 date = dateutil.makedate()
631 else:
637 else:
632 date = dateutil.makedate()
638 date = dateutil.makedate()
633 if flag & usingsha256:
639 if flag & usingsha256:
634 if len(prec) != 32:
640 if len(prec) != 32:
635 raise ValueError(prec)
641 raise ValueError(prec)
636 for succ in succs:
642 for succ in succs:
637 if len(succ) != 32:
643 if len(succ) != 32:
638 raise ValueError(succ)
644 raise ValueError(succ)
639 else:
645 else:
640 if len(prec) != 20:
646 if len(prec) != 20:
641 raise ValueError(prec)
647 raise ValueError(prec)
642 for succ in succs:
648 for succ in succs:
643 if len(succ) != 20:
649 if len(succ) != 20:
644 raise ValueError(succ)
650 raise ValueError(succ)
645 if prec in succs:
651 if prec in succs:
646 raise ValueError(
652 raise ValueError(
647 'in-marker cycle with %s' % pycompat.sysstr(hex(prec))
653 'in-marker cycle with %s' % pycompat.sysstr(hex(prec))
648 )
654 )
649
655
650 metadata = tuple(sorted(pycompat.iteritems(metadata)))
656 metadata = tuple(sorted(pycompat.iteritems(metadata)))
651 for k, v in metadata:
657 for k, v in metadata:
652 try:
658 try:
653 # might be better to reject non-ASCII keys
659 # might be better to reject non-ASCII keys
654 k.decode('utf-8')
660 k.decode('utf-8')
655 v.decode('utf-8')
661 v.decode('utf-8')
656 except UnicodeDecodeError:
662 except UnicodeDecodeError:
657 raise error.ProgrammingError(
663 raise error.ProgrammingError(
658 b'obsstore metadata must be valid UTF-8 sequence '
664 b'obsstore metadata must be valid UTF-8 sequence '
659 b'(key = %r, value = %r)'
665 b'(key = %r, value = %r)'
660 % (pycompat.bytestr(k), pycompat.bytestr(v))
666 % (pycompat.bytestr(k), pycompat.bytestr(v))
661 )
667 )
662
668
663 marker = (bytes(prec), tuple(succs), flag, metadata, date, parents)
669 marker = (bytes(prec), tuple(succs), flag, metadata, date, parents)
664 return bool(self.add(transaction, [marker]))
670 return bool(self.add(transaction, [marker]))
665
671
666 def add(self, transaction, markers):
672 def add(self, transaction, markers):
667 """Add new markers to the store
673 """Add new markers to the store
668
674
669 Take care of filtering duplicate.
675 Take care of filtering duplicate.
670 Return the number of new marker."""
676 Return the number of new marker."""
671 if self._readonly:
677 if self._readonly:
672 raise error.Abort(
678 raise error.Abort(
673 _(b'creating obsolete markers is not enabled on this repo')
679 _(b'creating obsolete markers is not enabled on this repo')
674 )
680 )
675 known = set()
681 known = set()
676 getsuccessors = self.successors.get
682 getsuccessors = self.successors.get
677 new = []
683 new = []
678 for m in markers:
684 for m in markers:
679 if m not in getsuccessors(m[0], ()) and m not in known:
685 if m not in getsuccessors(m[0], ()) and m not in known:
680 known.add(m)
686 known.add(m)
681 new.append(m)
687 new.append(m)
682 if new:
688 if new:
683 f = self.svfs(b'obsstore', b'ab')
689 f = self.svfs(b'obsstore', b'ab')
684 try:
690 try:
685 offset = f.tell()
691 offset = f.tell()
686 transaction.add(b'obsstore', offset)
692 transaction.add(b'obsstore', offset)
687 # offset == 0: new file - add the version header
693 # offset == 0: new file - add the version header
688 data = b''.join(encodemarkers(new, offset == 0, self._version))
694 data = b''.join(encodemarkers(new, offset == 0, self._version))
689 f.write(data)
695 f.write(data)
690 finally:
696 finally:
691 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
697 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
692 # call 'filecacheentry.refresh()' here
698 # call 'filecacheentry.refresh()' here
693 f.close()
699 f.close()
694 addedmarkers = transaction.changes.get(b'obsmarkers')
700 addedmarkers = transaction.changes.get(b'obsmarkers')
695 if addedmarkers is not None:
701 if addedmarkers is not None:
696 addedmarkers.update(new)
702 addedmarkers.update(new)
697 self._addmarkers(new, data)
703 self._addmarkers(new, data)
698 # new marker *may* have changed several set. invalidate the cache.
704 # new marker *may* have changed several set. invalidate the cache.
699 self.caches.clear()
705 self.caches.clear()
700 # records the number of new markers for the transaction hooks
706 # records the number of new markers for the transaction hooks
701 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
707 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
702 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
708 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
703 return len(new)
709 return len(new)
704
710
705 def mergemarkers(self, transaction, data):
711 def mergemarkers(self, transaction, data):
706 """merge a binary stream of markers inside the obsstore
712 """merge a binary stream of markers inside the obsstore
707
713
708 Returns the number of new markers added."""
714 Returns the number of new markers added."""
709 version, markers = _readmarkers(data)
715 version, markers = _readmarkers(data)
710 return self.add(transaction, markers)
716 return self.add(transaction, markers)
711
717
712 @propertycache
718 @propertycache
713 def _data(self):
719 def _data(self):
714 return self.svfs.tryread(b'obsstore')
720 return self.svfs.tryread(b'obsstore')
715
721
716 @propertycache
722 @propertycache
717 def _version(self):
723 def _version(self):
718 if len(self._data) >= 1:
724 if len(self._data) >= 1:
719 return _readmarkerversion(self._data)
725 return _readmarkerversion(self._data)
720 else:
726 else:
721 return self._defaultformat
727 return self._defaultformat
722
728
723 @propertycache
729 @propertycache
724 def _all(self):
730 def _all(self):
725 data = self._data
731 data = self._data
726 if not data:
732 if not data:
727 return []
733 return []
728 self._version, markers = _readmarkers(data)
734 self._version, markers = _readmarkers(data)
729 markers = list(markers)
735 markers = list(markers)
730 _checkinvalidmarkers(self.repo, markers)
736 _checkinvalidmarkers(self.repo, markers)
731 return markers
737 return markers
732
738
733 @propertycache
739 @propertycache
734 def successors(self):
740 def successors(self):
735 successors = {}
741 successors = {}
736 _addsuccessors(successors, self._all)
742 _addsuccessors(successors, self._all)
737 return successors
743 return successors
738
744
739 @propertycache
745 @propertycache
740 def predecessors(self):
746 def predecessors(self):
741 predecessors = {}
747 predecessors = {}
742 _addpredecessors(predecessors, self._all)
748 _addpredecessors(predecessors, self._all)
743 return predecessors
749 return predecessors
744
750
745 @propertycache
751 @propertycache
746 def children(self):
752 def children(self):
747 children = {}
753 children = {}
748 _addchildren(children, self._all)
754 _addchildren(children, self._all)
749 return children
755 return children
750
756
751 def _cached(self, attr):
757 def _cached(self, attr):
752 return attr in self.__dict__
758 return attr in self.__dict__
753
759
754 def _addmarkers(self, markers, rawdata):
760 def _addmarkers(self, markers, rawdata):
755 markers = list(markers) # to allow repeated iteration
761 markers = list(markers) # to allow repeated iteration
756 self._data = self._data + rawdata
762 self._data = self._data + rawdata
757 self._all.extend(markers)
763 self._all.extend(markers)
758 if self._cached('successors'):
764 if self._cached('successors'):
759 _addsuccessors(self.successors, markers)
765 _addsuccessors(self.successors, markers)
760 if self._cached('predecessors'):
766 if self._cached('predecessors'):
761 _addpredecessors(self.predecessors, markers)
767 _addpredecessors(self.predecessors, markers)
762 if self._cached('children'):
768 if self._cached('children'):
763 _addchildren(self.children, markers)
769 _addchildren(self.children, markers)
764 _checkinvalidmarkers(self.repo, markers)
770 _checkinvalidmarkers(self.repo, markers)
765
771
766 def relevantmarkers(self, nodes):
772 def relevantmarkers(self, nodes):
767 """return a set of all obsolescence markers relevant to a set of nodes.
773 """return a set of all obsolescence markers relevant to a set of nodes.
768
774
769 "relevant" to a set of nodes mean:
775 "relevant" to a set of nodes mean:
770
776
771 - marker that use this changeset as successor
777 - marker that use this changeset as successor
772 - prune marker of direct children on this changeset
778 - prune marker of direct children on this changeset
773 - recursive application of the two rules on predecessors of these
779 - recursive application of the two rules on predecessors of these
774 markers
780 markers
775
781
776 It is a set so you cannot rely on order."""
782 It is a set so you cannot rely on order."""
777
783
778 pendingnodes = set(nodes)
784 pendingnodes = set(nodes)
779 seenmarkers = set()
785 seenmarkers = set()
780 seennodes = set(pendingnodes)
786 seennodes = set(pendingnodes)
781 precursorsmarkers = self.predecessors
787 precursorsmarkers = self.predecessors
782 succsmarkers = self.successors
788 succsmarkers = self.successors
783 children = self.children
789 children = self.children
784 while pendingnodes:
790 while pendingnodes:
785 direct = set()
791 direct = set()
786 for current in pendingnodes:
792 for current in pendingnodes:
787 direct.update(precursorsmarkers.get(current, ()))
793 direct.update(precursorsmarkers.get(current, ()))
788 pruned = [m for m in children.get(current, ()) if not m[1]]
794 pruned = [m for m in children.get(current, ()) if not m[1]]
789 direct.update(pruned)
795 direct.update(pruned)
790 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
796 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
791 direct.update(pruned)
797 direct.update(pruned)
792 direct -= seenmarkers
798 direct -= seenmarkers
793 pendingnodes = {m[0] for m in direct}
799 pendingnodes = {m[0] for m in direct}
794 seenmarkers |= direct
800 seenmarkers |= direct
795 pendingnodes -= seennodes
801 pendingnodes -= seennodes
796 seennodes |= pendingnodes
802 seennodes |= pendingnodes
797 return seenmarkers
803 return seenmarkers
798
804
799
805
800 def makestore(ui, repo):
806 def makestore(ui, repo):
801 """Create an obsstore instance from a repo."""
807 """Create an obsstore instance from a repo."""
802 # read default format for new obsstore.
808 # read default format for new obsstore.
803 # developer config: format.obsstore-version
809 # developer config: format.obsstore-version
804 defaultformat = ui.configint(b'format', b'obsstore-version')
810 defaultformat = ui.configint(b'format', b'obsstore-version')
805 # rely on obsstore class default when possible.
811 # rely on obsstore class default when possible.
806 kwargs = {}
812 kwargs = {}
807 if defaultformat is not None:
813 if defaultformat is not None:
808 kwargs['defaultformat'] = defaultformat
814 kwargs['defaultformat'] = defaultformat
809 readonly = not isenabled(repo, createmarkersopt)
815 readonly = not isenabled(repo, createmarkersopt)
810 store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs)
816 store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs)
811 if store and readonly:
817 if store and readonly:
812 ui.warn(
818 ui.warn(
813 _(b'obsolete feature not enabled but %i markers found!\n')
819 _(b'obsolete feature not enabled but %i markers found!\n')
814 % len(list(store))
820 % len(list(store))
815 )
821 )
816 return store
822 return store
817
823
818
824
819 def commonversion(versions):
825 def commonversion(versions):
820 """Return the newest version listed in both versions and our local formats.
826 """Return the newest version listed in both versions and our local formats.
821
827
822 Returns None if no common version exists.
828 Returns None if no common version exists.
823 """
829 """
824 versions.sort(reverse=True)
830 versions.sort(reverse=True)
825 # search for highest version known on both side
831 # search for highest version known on both side
826 for v in versions:
832 for v in versions:
827 if v in formats:
833 if v in formats:
828 return v
834 return v
829 return None
835 return None
830
836
831
837
832 # arbitrary picked to fit into 8K limit from HTTP server
838 # arbitrary picked to fit into 8K limit from HTTP server
833 # you have to take in account:
839 # you have to take in account:
834 # - the version header
840 # - the version header
835 # - the base85 encoding
841 # - the base85 encoding
836 _maxpayload = 5300
842 _maxpayload = 5300
837
843
838
844
839 def _pushkeyescape(markers):
845 def _pushkeyescape(markers):
840 """encode markers into a dict suitable for pushkey exchange
846 """encode markers into a dict suitable for pushkey exchange
841
847
842 - binary data is base85 encoded
848 - binary data is base85 encoded
843 - split in chunks smaller than 5300 bytes"""
849 - split in chunks smaller than 5300 bytes"""
844 keys = {}
850 keys = {}
845 parts = []
851 parts = []
846 currentlen = _maxpayload * 2 # ensure we create a new part
852 currentlen = _maxpayload * 2 # ensure we create a new part
847 for marker in markers:
853 for marker in markers:
848 nextdata = _fm0encodeonemarker(marker)
854 nextdata = _fm0encodeonemarker(marker)
849 if len(nextdata) + currentlen > _maxpayload:
855 if len(nextdata) + currentlen > _maxpayload:
850 currentpart = []
856 currentpart = []
851 currentlen = 0
857 currentlen = 0
852 parts.append(currentpart)
858 parts.append(currentpart)
853 currentpart.append(nextdata)
859 currentpart.append(nextdata)
854 currentlen += len(nextdata)
860 currentlen += len(nextdata)
855 for idx, part in enumerate(reversed(parts)):
861 for idx, part in enumerate(reversed(parts)):
856 data = b''.join([_pack(b'>B', _fm0version)] + part)
862 data = b''.join([_pack(b'>B', _fm0version)] + part)
857 keys[b'dump%i' % idx] = util.b85encode(data)
863 keys[b'dump%i' % idx] = util.b85encode(data)
858 return keys
864 return keys
859
865
860
866
861 def listmarkers(repo):
867 def listmarkers(repo):
862 """List markers over pushkey"""
868 """List markers over pushkey"""
863 if not repo.obsstore:
869 if not repo.obsstore:
864 return {}
870 return {}
865 return _pushkeyescape(sorted(repo.obsstore))
871 return _pushkeyescape(sorted(repo.obsstore))
866
872
867
873
868 def pushmarker(repo, key, old, new):
874 def pushmarker(repo, key, old, new):
869 """Push markers over pushkey"""
875 """Push markers over pushkey"""
870 if not key.startswith(b'dump'):
876 if not key.startswith(b'dump'):
871 repo.ui.warn(_(b'unknown key: %r') % key)
877 repo.ui.warn(_(b'unknown key: %r') % key)
872 return False
878 return False
873 if old:
879 if old:
874 repo.ui.warn(_(b'unexpected old value for %r') % key)
880 repo.ui.warn(_(b'unexpected old value for %r') % key)
875 return False
881 return False
876 data = util.b85decode(new)
882 data = util.b85decode(new)
877 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
883 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
878 repo.obsstore.mergemarkers(tr, data)
884 repo.obsstore.mergemarkers(tr, data)
879 repo.invalidatevolatilesets()
885 repo.invalidatevolatilesets()
880 return True
886 return True
881
887
882
888
883 # mapping of 'set-name' -> <function to compute this set>
889 # mapping of 'set-name' -> <function to compute this set>
884 cachefuncs = {}
890 cachefuncs = {}
885
891
886
892
887 def cachefor(name):
893 def cachefor(name):
888 """Decorator to register a function as computing the cache for a set"""
894 """Decorator to register a function as computing the cache for a set"""
889
895
890 def decorator(func):
896 def decorator(func):
891 if name in cachefuncs:
897 if name in cachefuncs:
892 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
898 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
893 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
899 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
894 cachefuncs[name] = func
900 cachefuncs[name] = func
895 return func
901 return func
896
902
897 return decorator
903 return decorator
898
904
899
905
900 def getrevs(repo, name):
906 def getrevs(repo, name):
901 """Return the set of revision that belong to the <name> set
907 """Return the set of revision that belong to the <name> set
902
908
903 Such access may compute the set and cache it for future use"""
909 Such access may compute the set and cache it for future use"""
904 repo = repo.unfiltered()
910 repo = repo.unfiltered()
905 with util.timedcm('getrevs %s', name):
911 with util.timedcm('getrevs %s', name):
906 if not repo.obsstore:
912 if not repo.obsstore:
907 return frozenset()
913 return frozenset()
908 if name not in repo.obsstore.caches:
914 if name not in repo.obsstore.caches:
909 repo.obsstore.caches[name] = cachefuncs[name](repo)
915 repo.obsstore.caches[name] = cachefuncs[name](repo)
910 return repo.obsstore.caches[name]
916 return repo.obsstore.caches[name]
911
917
912
918
913 # To be simple we need to invalidate obsolescence cache when:
919 # To be simple we need to invalidate obsolescence cache when:
914 #
920 #
915 # - new changeset is added:
921 # - new changeset is added:
916 # - public phase is changed
922 # - public phase is changed
917 # - obsolescence marker are added
923 # - obsolescence marker are added
918 # - strip is used a repo
924 # - strip is used a repo
919 def clearobscaches(repo):
925 def clearobscaches(repo):
920 """Remove all obsolescence related cache from a repo
926 """Remove all obsolescence related cache from a repo
921
927
922 This remove all cache in obsstore is the obsstore already exist on the
928 This remove all cache in obsstore is the obsstore already exist on the
923 repo.
929 repo.
924
930
925 (We could be smarter here given the exact event that trigger the cache
931 (We could be smarter here given the exact event that trigger the cache
926 clearing)"""
932 clearing)"""
927 # only clear cache is there is obsstore data in this repo
933 # only clear cache is there is obsstore data in this repo
928 if b'obsstore' in repo._filecache:
934 if b'obsstore' in repo._filecache:
929 repo.obsstore.caches.clear()
935 repo.obsstore.caches.clear()
930
936
931
937
932 def _mutablerevs(repo):
938 def _mutablerevs(repo):
933 """the set of mutable revision in the repository"""
939 """the set of mutable revision in the repository"""
934 return repo._phasecache.getrevset(repo, phases.mutablephases)
940 return repo._phasecache.getrevset(repo, phases.mutablephases)
935
941
936
942
937 @cachefor(b'obsolete')
943 @cachefor(b'obsolete')
938 def _computeobsoleteset(repo):
944 def _computeobsoleteset(repo):
939 """the set of obsolete revisions"""
945 """the set of obsolete revisions"""
940 getnode = repo.changelog.node
946 getnode = repo.changelog.node
941 notpublic = _mutablerevs(repo)
947 notpublic = _mutablerevs(repo)
942 isobs = repo.obsstore.successors.__contains__
948 isobs = repo.obsstore.successors.__contains__
943 return frozenset(r for r in notpublic if isobs(getnode(r)))
949 return frozenset(r for r in notpublic if isobs(getnode(r)))
944
950
945
951
946 @cachefor(b'orphan')
952 @cachefor(b'orphan')
947 def _computeorphanset(repo):
953 def _computeorphanset(repo):
948 """the set of non obsolete revisions with obsolete parents"""
954 """the set of non obsolete revisions with obsolete parents"""
949 pfunc = repo.changelog.parentrevs
955 pfunc = repo.changelog.parentrevs
950 mutable = _mutablerevs(repo)
956 mutable = _mutablerevs(repo)
951 obsolete = getrevs(repo, b'obsolete')
957 obsolete = getrevs(repo, b'obsolete')
952 others = mutable - obsolete
958 others = mutable - obsolete
953 unstable = set()
959 unstable = set()
954 for r in sorted(others):
960 for r in sorted(others):
955 # A rev is unstable if one of its parent is obsolete or unstable
961 # A rev is unstable if one of its parent is obsolete or unstable
956 # this works since we traverse following growing rev order
962 # this works since we traverse following growing rev order
957 for p in pfunc(r):
963 for p in pfunc(r):
958 if p in obsolete or p in unstable:
964 if p in obsolete or p in unstable:
959 unstable.add(r)
965 unstable.add(r)
960 break
966 break
961 return frozenset(unstable)
967 return frozenset(unstable)
962
968
963
969
964 @cachefor(b'suspended')
970 @cachefor(b'suspended')
965 def _computesuspendedset(repo):
971 def _computesuspendedset(repo):
966 """the set of obsolete parents with non obsolete descendants"""
972 """the set of obsolete parents with non obsolete descendants"""
967 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
973 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
968 return frozenset(r for r in getrevs(repo, b'obsolete') if r in suspended)
974 return frozenset(r for r in getrevs(repo, b'obsolete') if r in suspended)
969
975
970
976
971 @cachefor(b'extinct')
977 @cachefor(b'extinct')
972 def _computeextinctset(repo):
978 def _computeextinctset(repo):
973 """the set of obsolete parents without non obsolete descendants"""
979 """the set of obsolete parents without non obsolete descendants"""
974 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
980 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
975
981
976
982
977 @cachefor(b'phasedivergent')
983 @cachefor(b'phasedivergent')
978 def _computephasedivergentset(repo):
984 def _computephasedivergentset(repo):
979 """the set of revs trying to obsolete public revisions"""
985 """the set of revs trying to obsolete public revisions"""
980 bumped = set()
986 bumped = set()
981 # util function (avoid attribute lookup in the loop)
987 # util function (avoid attribute lookup in the loop)
982 phase = repo._phasecache.phase # would be faster to grab the full list
988 phase = repo._phasecache.phase # would be faster to grab the full list
983 public = phases.public
989 public = phases.public
984 cl = repo.changelog
990 cl = repo.changelog
985 torev = cl.index.get_rev
991 torev = cl.index.get_rev
986 tonode = cl.node
992 tonode = cl.node
987 obsstore = repo.obsstore
993 obsstore = repo.obsstore
988 for rev in repo.revs(b'(not public()) and (not obsolete())'):
994 for rev in repo.revs(b'(not public()) and (not obsolete())'):
989 # We only evaluate mutable, non-obsolete revision
995 # We only evaluate mutable, non-obsolete revision
990 node = tonode(rev)
996 node = tonode(rev)
991 # (future) A cache of predecessors may worth if split is very common
997 # (future) A cache of predecessors may worth if split is very common
992 for pnode in obsutil.allpredecessors(
998 for pnode in obsutil.allpredecessors(
993 obsstore, [node], ignoreflags=bumpedfix
999 obsstore, [node], ignoreflags=bumpedfix
994 ):
1000 ):
995 prev = torev(pnode) # unfiltered! but so is phasecache
1001 prev = torev(pnode) # unfiltered! but so is phasecache
996 if (prev is not None) and (phase(repo, prev) <= public):
1002 if (prev is not None) and (phase(repo, prev) <= public):
997 # we have a public predecessor
1003 # we have a public predecessor
998 bumped.add(rev)
1004 bumped.add(rev)
999 break # Next draft!
1005 break # Next draft!
1000 return frozenset(bumped)
1006 return frozenset(bumped)
1001
1007
1002
1008
1003 @cachefor(b'contentdivergent')
1009 @cachefor(b'contentdivergent')
1004 def _computecontentdivergentset(repo):
1010 def _computecontentdivergentset(repo):
1005 """the set of rev that compete to be the final successors of some revision."""
1011 """the set of rev that compete to be the final successors of some revision."""
1006 divergent = set()
1012 divergent = set()
1007 obsstore = repo.obsstore
1013 obsstore = repo.obsstore
1008 newermap = {}
1014 newermap = {}
1009 tonode = repo.changelog.node
1015 tonode = repo.changelog.node
1010 for rev in repo.revs(b'(not public()) - obsolete()'):
1016 for rev in repo.revs(b'(not public()) - obsolete()'):
1011 node = tonode(rev)
1017 node = tonode(rev)
1012 mark = obsstore.predecessors.get(node, ())
1018 mark = obsstore.predecessors.get(node, ())
1013 toprocess = set(mark)
1019 toprocess = set(mark)
1014 seen = set()
1020 seen = set()
1015 while toprocess:
1021 while toprocess:
1016 prec = toprocess.pop()[0]
1022 prec = toprocess.pop()[0]
1017 if prec in seen:
1023 if prec in seen:
1018 continue # emergency cycle hanging prevention
1024 continue # emergency cycle hanging prevention
1019 seen.add(prec)
1025 seen.add(prec)
1020 if prec not in newermap:
1026 if prec not in newermap:
1021 obsutil.successorssets(repo, prec, cache=newermap)
1027 obsutil.successorssets(repo, prec, cache=newermap)
1022 newer = [n for n in newermap[prec] if n]
1028 newer = [n for n in newermap[prec] if n]
1023 if len(newer) > 1:
1029 if len(newer) > 1:
1024 divergent.add(rev)
1030 divergent.add(rev)
1025 break
1031 break
1026 toprocess.update(obsstore.predecessors.get(prec, ()))
1032 toprocess.update(obsstore.predecessors.get(prec, ()))
1027 return frozenset(divergent)
1033 return frozenset(divergent)
1028
1034
1029
1035
1030 def makefoldid(relation, user):
1036 def makefoldid(relation, user):
1031
1037
1032 folddigest = hashutil.sha1(user)
1038 folddigest = hashutil.sha1(user)
1033 for p in relation[0] + relation[1]:
1039 for p in relation[0] + relation[1]:
1034 folddigest.update(b'%d' % p.rev())
1040 folddigest.update(b'%d' % p.rev())
1035 folddigest.update(p.node())
1041 folddigest.update(p.node())
1036 # Since fold only has to compete against fold for the same successors, it
1042 # Since fold only has to compete against fold for the same successors, it
1037 # seems fine to use a small ID. Smaller ID save space.
1043 # seems fine to use a small ID. Smaller ID save space.
1038 return hex(folddigest.digest())[:8]
1044 return hex(folddigest.digest())[:8]
1039
1045
1040
1046
1041 def createmarkers(
1047 def createmarkers(
1042 repo, relations, flag=0, date=None, metadata=None, operation=None
1048 repo, relations, flag=0, date=None, metadata=None, operation=None
1043 ):
1049 ):
1044 """Add obsolete markers between changesets in a repo
1050 """Add obsolete markers between changesets in a repo
1045
1051
1046 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1052 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1047 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1053 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1048 containing metadata for this marker only. It is merged with the global
1054 containing metadata for this marker only. It is merged with the global
1049 metadata specified through the `metadata` argument of this function.
1055 metadata specified through the `metadata` argument of this function.
1050 Any string values in metadata must be UTF-8 bytes.
1056 Any string values in metadata must be UTF-8 bytes.
1051
1057
1052 Trying to obsolete a public changeset will raise an exception.
1058 Trying to obsolete a public changeset will raise an exception.
1053
1059
1054 Current user and date are used except if specified otherwise in the
1060 Current user and date are used except if specified otherwise in the
1055 metadata attribute.
1061 metadata attribute.
1056
1062
1057 This function operates within a transaction of its own, but does
1063 This function operates within a transaction of its own, but does
1058 not take any lock on the repo.
1064 not take any lock on the repo.
1059 """
1065 """
1060 # prepare metadata
1066 # prepare metadata
1061 if metadata is None:
1067 if metadata is None:
1062 metadata = {}
1068 metadata = {}
1063 if b'user' not in metadata:
1069 if b'user' not in metadata:
1064 luser = (
1070 luser = (
1065 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1071 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1066 )
1072 )
1067 metadata[b'user'] = encoding.fromlocal(luser)
1073 metadata[b'user'] = encoding.fromlocal(luser)
1068
1074
1069 # Operation metadata handling
1075 # Operation metadata handling
1070 useoperation = repo.ui.configbool(
1076 useoperation = repo.ui.configbool(
1071 b'experimental', b'evolution.track-operation'
1077 b'experimental', b'evolution.track-operation'
1072 )
1078 )
1073 if useoperation and operation:
1079 if useoperation and operation:
1074 metadata[b'operation'] = operation
1080 metadata[b'operation'] = operation
1075
1081
1076 # Effect flag metadata handling
1082 # Effect flag metadata handling
1077 saveeffectflag = repo.ui.configbool(
1083 saveeffectflag = repo.ui.configbool(
1078 b'experimental', b'evolution.effect-flags'
1084 b'experimental', b'evolution.effect-flags'
1079 )
1085 )
1080
1086
1081 with repo.transaction(b'add-obsolescence-marker') as tr:
1087 with repo.transaction(b'add-obsolescence-marker') as tr:
1082 markerargs = []
1088 markerargs = []
1083 for rel in relations:
1089 for rel in relations:
1084 predecessors = rel[0]
1090 predecessors = rel[0]
1085 if not isinstance(predecessors, tuple):
1091 if not isinstance(predecessors, tuple):
1086 # preserve compat with old API until all caller are migrated
1092 # preserve compat with old API until all caller are migrated
1087 predecessors = (predecessors,)
1093 predecessors = (predecessors,)
1088 if len(predecessors) > 1 and len(rel[1]) != 1:
1094 if len(predecessors) > 1 and len(rel[1]) != 1:
1089 msg = b'Fold markers can only have 1 successors, not %d'
1095 msg = b'Fold markers can only have 1 successors, not %d'
1090 raise error.ProgrammingError(msg % len(rel[1]))
1096 raise error.ProgrammingError(msg % len(rel[1]))
1091 foldid = None
1097 foldid = None
1092 foldsize = len(predecessors)
1098 foldsize = len(predecessors)
1093 if 1 < foldsize:
1099 if 1 < foldsize:
1094 foldid = makefoldid(rel, metadata[b'user'])
1100 foldid = makefoldid(rel, metadata[b'user'])
1095 for foldidx, prec in enumerate(predecessors, 1):
1101 for foldidx, prec in enumerate(predecessors, 1):
1096 sucs = rel[1]
1102 sucs = rel[1]
1097 localmetadata = metadata.copy()
1103 localmetadata = metadata.copy()
1098 if len(rel) > 2:
1104 if len(rel) > 2:
1099 localmetadata.update(rel[2])
1105 localmetadata.update(rel[2])
1100 if foldid is not None:
1106 if foldid is not None:
1101 localmetadata[b'fold-id'] = foldid
1107 localmetadata[b'fold-id'] = foldid
1102 localmetadata[b'fold-idx'] = b'%d' % foldidx
1108 localmetadata[b'fold-idx'] = b'%d' % foldidx
1103 localmetadata[b'fold-size'] = b'%d' % foldsize
1109 localmetadata[b'fold-size'] = b'%d' % foldsize
1104
1110
1105 if not prec.mutable():
1111 if not prec.mutable():
1106 raise error.Abort(
1112 raise error.Abort(
1107 _(b"cannot obsolete public changeset: %s") % prec,
1113 _(b"cannot obsolete public changeset: %s") % prec,
1108 hint=b"see 'hg help phases' for details",
1114 hint=b"see 'hg help phases' for details",
1109 )
1115 )
1110 nprec = prec.node()
1116 nprec = prec.node()
1111 nsucs = tuple(s.node() for s in sucs)
1117 nsucs = tuple(s.node() for s in sucs)
1112 npare = None
1118 npare = None
1113 if not nsucs:
1119 if not nsucs:
1114 npare = tuple(p.node() for p in prec.parents())
1120 npare = tuple(p.node() for p in prec.parents())
1115 if nprec in nsucs:
1121 if nprec in nsucs:
1116 raise error.Abort(
1122 raise error.Abort(
1117 _(b"changeset %s cannot obsolete itself") % prec
1123 _(b"changeset %s cannot obsolete itself") % prec
1118 )
1124 )
1119
1125
1120 # Effect flag can be different by relation
1126 # Effect flag can be different by relation
1121 if saveeffectflag:
1127 if saveeffectflag:
1122 # The effect flag is saved in a versioned field name for
1128 # The effect flag is saved in a versioned field name for
1123 # future evolution
1129 # future evolution
1124 effectflag = obsutil.geteffectflag(prec, sucs)
1130 effectflag = obsutil.geteffectflag(prec, sucs)
1125 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1131 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1126
1132
1127 # Creating the marker causes the hidden cache to become
1133 # Creating the marker causes the hidden cache to become
1128 # invalid, which causes recomputation when we ask for
1134 # invalid, which causes recomputation when we ask for
1129 # prec.parents() above. Resulting in n^2 behavior. So let's
1135 # prec.parents() above. Resulting in n^2 behavior. So let's
1130 # prepare all of the args first, then create the markers.
1136 # prepare all of the args first, then create the markers.
1131 markerargs.append((nprec, nsucs, npare, localmetadata))
1137 markerargs.append((nprec, nsucs, npare, localmetadata))
1132
1138
1133 for args in markerargs:
1139 for args in markerargs:
1134 nprec, nsucs, npare, localmetadata = args
1140 nprec, nsucs, npare, localmetadata = args
1135 repo.obsstore.create(
1141 repo.obsstore.create(
1136 tr,
1142 tr,
1137 nprec,
1143 nprec,
1138 nsucs,
1144 nsucs,
1139 flag,
1145 flag,
1140 parents=npare,
1146 parents=npare,
1141 date=date,
1147 date=date,
1142 metadata=localmetadata,
1148 metadata=localmetadata,
1143 ui=repo.ui,
1149 ui=repo.ui,
1144 )
1150 )
1145 repo.filteredrevcache.clear()
1151 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now