##// END OF EJS Templates
unstable: use the `_mutablerevs` function when computing phase divergent...
marmoute -
r52015:5f9af842 default
parent child Browse files
Show More
@@ -1,1153 +1,1154 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70
70
71 import binascii
71 import binascii
72 import struct
72 import struct
73 import weakref
73 import weakref
74
74
75 from .i18n import _
75 from .i18n import _
76 from .node import (
76 from .node import (
77 bin,
77 bin,
78 hex,
78 hex,
79 )
79 )
80 from . import (
80 from . import (
81 encoding,
81 encoding,
82 error,
82 error,
83 obsutil,
83 obsutil,
84 phases,
84 phases,
85 policy,
85 policy,
86 pycompat,
86 pycompat,
87 util,
87 util,
88 )
88 )
89 from .utils import (
89 from .utils import (
90 dateutil,
90 dateutil,
91 hashutil,
91 hashutil,
92 )
92 )
93
93
94 parsers = policy.importmod('parsers')
94 parsers = policy.importmod('parsers')
95
95
96 _pack = struct.pack
96 _pack = struct.pack
97 _unpack = struct.unpack
97 _unpack = struct.unpack
98 _calcsize = struct.calcsize
98 _calcsize = struct.calcsize
99 propertycache = util.propertycache
99 propertycache = util.propertycache
100
100
101 # Options for obsolescence
101 # Options for obsolescence
102 createmarkersopt = b'createmarkers'
102 createmarkersopt = b'createmarkers'
103 allowunstableopt = b'allowunstable'
103 allowunstableopt = b'allowunstable'
104 allowdivergenceopt = b'allowdivergence'
104 allowdivergenceopt = b'allowdivergence'
105 exchangeopt = b'exchange'
105 exchangeopt = b'exchange'
106
106
107
107
108 def _getoptionvalue(repo, option):
108 def _getoptionvalue(repo, option):
109 """Returns True if the given repository has the given obsolete option
109 """Returns True if the given repository has the given obsolete option
110 enabled.
110 enabled.
111 """
111 """
112 configkey = b'evolution.%s' % option
112 configkey = b'evolution.%s' % option
113 newconfig = repo.ui.configbool(b'experimental', configkey)
113 newconfig = repo.ui.configbool(b'experimental', configkey)
114
114
115 # Return the value only if defined
115 # Return the value only if defined
116 if newconfig is not None:
116 if newconfig is not None:
117 return newconfig
117 return newconfig
118
118
119 # Fallback on generic option
119 # Fallback on generic option
120 try:
120 try:
121 return repo.ui.configbool(b'experimental', b'evolution')
121 return repo.ui.configbool(b'experimental', b'evolution')
122 except (error.ConfigError, AttributeError):
122 except (error.ConfigError, AttributeError):
123 # Fallback on old-fashion config
123 # Fallback on old-fashion config
124 # inconsistent config: experimental.evolution
124 # inconsistent config: experimental.evolution
125 result = set(repo.ui.configlist(b'experimental', b'evolution'))
125 result = set(repo.ui.configlist(b'experimental', b'evolution'))
126
126
127 if b'all' in result:
127 if b'all' in result:
128 return True
128 return True
129
129
130 # Temporary hack for next check
130 # Temporary hack for next check
131 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
131 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
132 if newconfig:
132 if newconfig:
133 result.add(b'createmarkers')
133 result.add(b'createmarkers')
134
134
135 return option in result
135 return option in result
136
136
137
137
138 def getoptions(repo):
138 def getoptions(repo):
139 """Returns dicts showing state of obsolescence features."""
139 """Returns dicts showing state of obsolescence features."""
140
140
141 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
141 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
142 if createmarkersvalue:
142 if createmarkersvalue:
143 unstablevalue = _getoptionvalue(repo, allowunstableopt)
143 unstablevalue = _getoptionvalue(repo, allowunstableopt)
144 divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
144 divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
145 exchangevalue = _getoptionvalue(repo, exchangeopt)
145 exchangevalue = _getoptionvalue(repo, exchangeopt)
146 else:
146 else:
147 # if we cannot create obsolescence markers, we shouldn't exchange them
147 # if we cannot create obsolescence markers, we shouldn't exchange them
148 # or perform operations that lead to instability or divergence
148 # or perform operations that lead to instability or divergence
149 unstablevalue = False
149 unstablevalue = False
150 divergencevalue = False
150 divergencevalue = False
151 exchangevalue = False
151 exchangevalue = False
152
152
153 return {
153 return {
154 createmarkersopt: createmarkersvalue,
154 createmarkersopt: createmarkersvalue,
155 allowunstableopt: unstablevalue,
155 allowunstableopt: unstablevalue,
156 allowdivergenceopt: divergencevalue,
156 allowdivergenceopt: divergencevalue,
157 exchangeopt: exchangevalue,
157 exchangeopt: exchangevalue,
158 }
158 }
159
159
160
160
161 def isenabled(repo, option):
161 def isenabled(repo, option):
162 """Returns True if the given repository has the given obsolete option
162 """Returns True if the given repository has the given obsolete option
163 enabled.
163 enabled.
164 """
164 """
165 return getoptions(repo)[option]
165 return getoptions(repo)[option]
166
166
167
167
168 # Creating aliases for marker flags because evolve extension looks for
168 # Creating aliases for marker flags because evolve extension looks for
169 # bumpedfix in obsolete.py
169 # bumpedfix in obsolete.py
170 bumpedfix = obsutil.bumpedfix
170 bumpedfix = obsutil.bumpedfix
171 usingsha256 = obsutil.usingsha256
171 usingsha256 = obsutil.usingsha256
172
172
173 ## Parsing and writing of version "0"
173 ## Parsing and writing of version "0"
174 #
174 #
175 # The header is followed by the markers. Each marker is made of:
175 # The header is followed by the markers. Each marker is made of:
176 #
176 #
177 # - 1 uint8 : number of new changesets "N", can be zero.
177 # - 1 uint8 : number of new changesets "N", can be zero.
178 #
178 #
179 # - 1 uint32: metadata size "M" in bytes.
179 # - 1 uint32: metadata size "M" in bytes.
180 #
180 #
181 # - 1 byte: a bit field. It is reserved for flags used in common
181 # - 1 byte: a bit field. It is reserved for flags used in common
182 # obsolete marker operations, to avoid repeated decoding of metadata
182 # obsolete marker operations, to avoid repeated decoding of metadata
183 # entries.
183 # entries.
184 #
184 #
185 # - 20 bytes: obsoleted changeset identifier.
185 # - 20 bytes: obsoleted changeset identifier.
186 #
186 #
187 # - N*20 bytes: new changesets identifiers.
187 # - N*20 bytes: new changesets identifiers.
188 #
188 #
189 # - M bytes: metadata as a sequence of nul-terminated strings. Each
189 # - M bytes: metadata as a sequence of nul-terminated strings. Each
190 # string contains a key and a value, separated by a colon ':', without
190 # string contains a key and a value, separated by a colon ':', without
191 # additional encoding. Keys cannot contain '\0' or ':' and values
191 # additional encoding. Keys cannot contain '\0' or ':' and values
192 # cannot contain '\0'.
192 # cannot contain '\0'.
193 _fm0version = 0
193 _fm0version = 0
194 _fm0fixed = b'>BIB20s'
194 _fm0fixed = b'>BIB20s'
195 _fm0node = b'20s'
195 _fm0node = b'20s'
196 _fm0fsize = _calcsize(_fm0fixed)
196 _fm0fsize = _calcsize(_fm0fixed)
197 _fm0fnodesize = _calcsize(_fm0node)
197 _fm0fnodesize = _calcsize(_fm0node)
198
198
199
199
200 def _fm0readmarkers(data, off, stop):
200 def _fm0readmarkers(data, off, stop):
201 # Loop on markers
201 # Loop on markers
202 while off < stop:
202 while off < stop:
203 # read fixed part
203 # read fixed part
204 cur = data[off : off + _fm0fsize]
204 cur = data[off : off + _fm0fsize]
205 off += _fm0fsize
205 off += _fm0fsize
206 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
206 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
207 # read replacement
207 # read replacement
208 sucs = ()
208 sucs = ()
209 if numsuc:
209 if numsuc:
210 s = _fm0fnodesize * numsuc
210 s = _fm0fnodesize * numsuc
211 cur = data[off : off + s]
211 cur = data[off : off + s]
212 sucs = _unpack(_fm0node * numsuc, cur)
212 sucs = _unpack(_fm0node * numsuc, cur)
213 off += s
213 off += s
214 # read metadata
214 # read metadata
215 # (metadata will be decoded on demand)
215 # (metadata will be decoded on demand)
216 metadata = data[off : off + mdsize]
216 metadata = data[off : off + mdsize]
217 if len(metadata) != mdsize:
217 if len(metadata) != mdsize:
218 raise error.Abort(
218 raise error.Abort(
219 _(
219 _(
220 b'parsing obsolete marker: metadata is too '
220 b'parsing obsolete marker: metadata is too '
221 b'short, %d bytes expected, got %d'
221 b'short, %d bytes expected, got %d'
222 )
222 )
223 % (mdsize, len(metadata))
223 % (mdsize, len(metadata))
224 )
224 )
225 off += mdsize
225 off += mdsize
226 metadata = _fm0decodemeta(metadata)
226 metadata = _fm0decodemeta(metadata)
227 try:
227 try:
228 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
228 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
229 date = float(when), int(offset)
229 date = float(when), int(offset)
230 except ValueError:
230 except ValueError:
231 date = (0.0, 0)
231 date = (0.0, 0)
232 parents = None
232 parents = None
233 if b'p2' in metadata:
233 if b'p2' in metadata:
234 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
234 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
235 elif b'p1' in metadata:
235 elif b'p1' in metadata:
236 parents = (metadata.pop(b'p1', None),)
236 parents = (metadata.pop(b'p1', None),)
237 elif b'p0' in metadata:
237 elif b'p0' in metadata:
238 parents = ()
238 parents = ()
239 if parents is not None:
239 if parents is not None:
240 try:
240 try:
241 parents = tuple(bin(p) for p in parents)
241 parents = tuple(bin(p) for p in parents)
242 # if parent content is not a nodeid, drop the data
242 # if parent content is not a nodeid, drop the data
243 for p in parents:
243 for p in parents:
244 if len(p) != 20:
244 if len(p) != 20:
245 parents = None
245 parents = None
246 break
246 break
247 except binascii.Error:
247 except binascii.Error:
248 # if content cannot be translated to nodeid drop the data.
248 # if content cannot be translated to nodeid drop the data.
249 parents = None
249 parents = None
250
250
251 metadata = tuple(sorted(metadata.items()))
251 metadata = tuple(sorted(metadata.items()))
252
252
253 yield (pre, sucs, flags, metadata, date, parents)
253 yield (pre, sucs, flags, metadata, date, parents)
254
254
255
255
256 def _fm0encodeonemarker(marker):
256 def _fm0encodeonemarker(marker):
257 pre, sucs, flags, metadata, date, parents = marker
257 pre, sucs, flags, metadata, date, parents = marker
258 if flags & usingsha256:
258 if flags & usingsha256:
259 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
259 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
260 metadata = dict(metadata)
260 metadata = dict(metadata)
261 time, tz = date
261 time, tz = date
262 metadata[b'date'] = b'%r %i' % (time, tz)
262 metadata[b'date'] = b'%r %i' % (time, tz)
263 if parents is not None:
263 if parents is not None:
264 if not parents:
264 if not parents:
265 # mark that we explicitly recorded no parents
265 # mark that we explicitly recorded no parents
266 metadata[b'p0'] = b''
266 metadata[b'p0'] = b''
267 for i, p in enumerate(parents, 1):
267 for i, p in enumerate(parents, 1):
268 metadata[b'p%i' % i] = hex(p)
268 metadata[b'p%i' % i] = hex(p)
269 metadata = _fm0encodemeta(metadata)
269 metadata = _fm0encodemeta(metadata)
270 numsuc = len(sucs)
270 numsuc = len(sucs)
271 format = _fm0fixed + (_fm0node * numsuc)
271 format = _fm0fixed + (_fm0node * numsuc)
272 data = [numsuc, len(metadata), flags, pre]
272 data = [numsuc, len(metadata), flags, pre]
273 data.extend(sucs)
273 data.extend(sucs)
274 return _pack(format, *data) + metadata
274 return _pack(format, *data) + metadata
275
275
276
276
277 def _fm0encodemeta(meta):
277 def _fm0encodemeta(meta):
278 """Return encoded metadata string to string mapping.
278 """Return encoded metadata string to string mapping.
279
279
280 Assume no ':' in key and no '\0' in both key and value."""
280 Assume no ':' in key and no '\0' in both key and value."""
281 for key, value in meta.items():
281 for key, value in meta.items():
282 if b':' in key or b'\0' in key:
282 if b':' in key or b'\0' in key:
283 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
283 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
284 if b'\0' in value:
284 if b'\0' in value:
285 raise ValueError(b"':' is forbidden in metadata value'")
285 raise ValueError(b"':' is forbidden in metadata value'")
286 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
286 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
287
287
288
288
289 def _fm0decodemeta(data):
289 def _fm0decodemeta(data):
290 """Return string to string dictionary from encoded version."""
290 """Return string to string dictionary from encoded version."""
291 d = {}
291 d = {}
292 for l in data.split(b'\0'):
292 for l in data.split(b'\0'):
293 if l:
293 if l:
294 key, value = l.split(b':', 1)
294 key, value = l.split(b':', 1)
295 d[key] = value
295 d[key] = value
296 return d
296 return d
297
297
298
298
299 ## Parsing and writing of version "1"
299 ## Parsing and writing of version "1"
300 #
300 #
301 # The header is followed by the markers. Each marker is made of:
301 # The header is followed by the markers. Each marker is made of:
302 #
302 #
303 # - uint32: total size of the marker (including this field)
303 # - uint32: total size of the marker (including this field)
304 #
304 #
305 # - float64: date in seconds since epoch
305 # - float64: date in seconds since epoch
306 #
306 #
307 # - int16: timezone offset in minutes
307 # - int16: timezone offset in minutes
308 #
308 #
309 # - uint16: a bit field. It is reserved for flags used in common
309 # - uint16: a bit field. It is reserved for flags used in common
310 # obsolete marker operations, to avoid repeated decoding of metadata
310 # obsolete marker operations, to avoid repeated decoding of metadata
311 # entries.
311 # entries.
312 #
312 #
313 # - uint8: number of successors "N", can be zero.
313 # - uint8: number of successors "N", can be zero.
314 #
314 #
315 # - uint8: number of parents "P", can be zero.
315 # - uint8: number of parents "P", can be zero.
316 #
316 #
317 # 0: parents data stored but no parent,
317 # 0: parents data stored but no parent,
318 # 1: one parent stored,
318 # 1: one parent stored,
319 # 2: two parents stored,
319 # 2: two parents stored,
320 # 3: no parent data stored
320 # 3: no parent data stored
321 #
321 #
322 # - uint8: number of metadata entries M
322 # - uint8: number of metadata entries M
323 #
323 #
324 # - 20 or 32 bytes: predecessor changeset identifier.
324 # - 20 or 32 bytes: predecessor changeset identifier.
325 #
325 #
326 # - N*(20 or 32) bytes: successors changesets identifiers.
326 # - N*(20 or 32) bytes: successors changesets identifiers.
327 #
327 #
328 # - P*(20 or 32) bytes: parents of the predecessors changesets.
328 # - P*(20 or 32) bytes: parents of the predecessors changesets.
329 #
329 #
330 # - M*(uint8, uint8): size of all metadata entries (key and value)
330 # - M*(uint8, uint8): size of all metadata entries (key and value)
331 #
331 #
332 # - remaining bytes: the metadata, each (key, value) pair after the other.
332 # - remaining bytes: the metadata, each (key, value) pair after the other.
333 _fm1version = 1
333 _fm1version = 1
334 _fm1fixed = b'>IdhHBBB'
334 _fm1fixed = b'>IdhHBBB'
335 _fm1nodesha1 = b'20s'
335 _fm1nodesha1 = b'20s'
336 _fm1nodesha256 = b'32s'
336 _fm1nodesha256 = b'32s'
337 _fm1nodesha1size = _calcsize(_fm1nodesha1)
337 _fm1nodesha1size = _calcsize(_fm1nodesha1)
338 _fm1nodesha256size = _calcsize(_fm1nodesha256)
338 _fm1nodesha256size = _calcsize(_fm1nodesha256)
339 _fm1fsize = _calcsize(_fm1fixed)
339 _fm1fsize = _calcsize(_fm1fixed)
340 _fm1parentnone = 3
340 _fm1parentnone = 3
341 _fm1metapair = b'BB'
341 _fm1metapair = b'BB'
342 _fm1metapairsize = _calcsize(_fm1metapair)
342 _fm1metapairsize = _calcsize(_fm1metapair)
343
343
344
344
345 def _fm1purereadmarkers(data, off, stop):
345 def _fm1purereadmarkers(data, off, stop):
346 # make some global constants local for performance
346 # make some global constants local for performance
347 noneflag = _fm1parentnone
347 noneflag = _fm1parentnone
348 sha2flag = usingsha256
348 sha2flag = usingsha256
349 sha1size = _fm1nodesha1size
349 sha1size = _fm1nodesha1size
350 sha2size = _fm1nodesha256size
350 sha2size = _fm1nodesha256size
351 sha1fmt = _fm1nodesha1
351 sha1fmt = _fm1nodesha1
352 sha2fmt = _fm1nodesha256
352 sha2fmt = _fm1nodesha256
353 metasize = _fm1metapairsize
353 metasize = _fm1metapairsize
354 metafmt = _fm1metapair
354 metafmt = _fm1metapair
355 fsize = _fm1fsize
355 fsize = _fm1fsize
356 unpack = _unpack
356 unpack = _unpack
357
357
358 # Loop on markers
358 # Loop on markers
359 ufixed = struct.Struct(_fm1fixed).unpack
359 ufixed = struct.Struct(_fm1fixed).unpack
360
360
361 while off < stop:
361 while off < stop:
362 # read fixed part
362 # read fixed part
363 o1 = off + fsize
363 o1 = off + fsize
364 t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1])
364 t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1])
365
365
366 if flags & sha2flag:
366 if flags & sha2flag:
367 nodefmt = sha2fmt
367 nodefmt = sha2fmt
368 nodesize = sha2size
368 nodesize = sha2size
369 else:
369 else:
370 nodefmt = sha1fmt
370 nodefmt = sha1fmt
371 nodesize = sha1size
371 nodesize = sha1size
372
372
373 (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize])
373 (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize])
374 o1 += nodesize
374 o1 += nodesize
375
375
376 # read 0 or more successors
376 # read 0 or more successors
377 if numsuc == 1:
377 if numsuc == 1:
378 o2 = o1 + nodesize
378 o2 = o1 + nodesize
379 sucs = (data[o1:o2],)
379 sucs = (data[o1:o2],)
380 else:
380 else:
381 o2 = o1 + nodesize * numsuc
381 o2 = o1 + nodesize * numsuc
382 sucs = unpack(nodefmt * numsuc, data[o1:o2])
382 sucs = unpack(nodefmt * numsuc, data[o1:o2])
383
383
384 # read parents
384 # read parents
385 if numpar == noneflag:
385 if numpar == noneflag:
386 o3 = o2
386 o3 = o2
387 parents = None
387 parents = None
388 elif numpar == 1:
388 elif numpar == 1:
389 o3 = o2 + nodesize
389 o3 = o2 + nodesize
390 parents = (data[o2:o3],)
390 parents = (data[o2:o3],)
391 else:
391 else:
392 o3 = o2 + nodesize * numpar
392 o3 = o2 + nodesize * numpar
393 parents = unpack(nodefmt * numpar, data[o2:o3])
393 parents = unpack(nodefmt * numpar, data[o2:o3])
394
394
395 # read metadata
395 # read metadata
396 off = o3 + metasize * nummeta
396 off = o3 + metasize * nummeta
397 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
397 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
398 metadata = []
398 metadata = []
399 for idx in range(0, len(metapairsize), 2):
399 for idx in range(0, len(metapairsize), 2):
400 o1 = off + metapairsize[idx]
400 o1 = off + metapairsize[idx]
401 o2 = o1 + metapairsize[idx + 1]
401 o2 = o1 + metapairsize[idx + 1]
402 metadata.append((data[off:o1], data[o1:o2]))
402 metadata.append((data[off:o1], data[o1:o2]))
403 off = o2
403 off = o2
404
404
405 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
405 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
406
406
407
407
408 def _fm1encodeonemarker(marker):
408 def _fm1encodeonemarker(marker):
409 pre, sucs, flags, metadata, date, parents = marker
409 pre, sucs, flags, metadata, date, parents = marker
410 # determine node size
410 # determine node size
411 _fm1node = _fm1nodesha1
411 _fm1node = _fm1nodesha1
412 if flags & usingsha256:
412 if flags & usingsha256:
413 _fm1node = _fm1nodesha256
413 _fm1node = _fm1nodesha256
414 numsuc = len(sucs)
414 numsuc = len(sucs)
415 numextranodes = 1 + numsuc
415 numextranodes = 1 + numsuc
416 if parents is None:
416 if parents is None:
417 numpar = _fm1parentnone
417 numpar = _fm1parentnone
418 else:
418 else:
419 numpar = len(parents)
419 numpar = len(parents)
420 numextranodes += numpar
420 numextranodes += numpar
421 formatnodes = _fm1node * numextranodes
421 formatnodes = _fm1node * numextranodes
422 formatmeta = _fm1metapair * len(metadata)
422 formatmeta = _fm1metapair * len(metadata)
423 format = _fm1fixed + formatnodes + formatmeta
423 format = _fm1fixed + formatnodes + formatmeta
424 # tz is stored in minutes so we divide by 60
424 # tz is stored in minutes so we divide by 60
425 tz = date[1] // 60
425 tz = date[1] // 60
426 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
426 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
427 data.extend(sucs)
427 data.extend(sucs)
428 if parents is not None:
428 if parents is not None:
429 data.extend(parents)
429 data.extend(parents)
430 totalsize = _calcsize(format)
430 totalsize = _calcsize(format)
431 for key, value in metadata:
431 for key, value in metadata:
432 lk = len(key)
432 lk = len(key)
433 lv = len(value)
433 lv = len(value)
434 if lk > 255:
434 if lk > 255:
435 msg = (
435 msg = (
436 b'obsstore metadata key cannot be longer than 255 bytes'
436 b'obsstore metadata key cannot be longer than 255 bytes'
437 b' (key "%s" is %u bytes)'
437 b' (key "%s" is %u bytes)'
438 ) % (key, lk)
438 ) % (key, lk)
439 raise error.ProgrammingError(msg)
439 raise error.ProgrammingError(msg)
440 if lv > 255:
440 if lv > 255:
441 msg = (
441 msg = (
442 b'obsstore metadata value cannot be longer than 255 bytes'
442 b'obsstore metadata value cannot be longer than 255 bytes'
443 b' (value "%s" for key "%s" is %u bytes)'
443 b' (value "%s" for key "%s" is %u bytes)'
444 ) % (value, key, lv)
444 ) % (value, key, lv)
445 raise error.ProgrammingError(msg)
445 raise error.ProgrammingError(msg)
446 data.append(lk)
446 data.append(lk)
447 data.append(lv)
447 data.append(lv)
448 totalsize += lk + lv
448 totalsize += lk + lv
449 data[0] = totalsize
449 data[0] = totalsize
450 data = [_pack(format, *data)]
450 data = [_pack(format, *data)]
451 for key, value in metadata:
451 for key, value in metadata:
452 data.append(key)
452 data.append(key)
453 data.append(value)
453 data.append(value)
454 return b''.join(data)
454 return b''.join(data)
455
455
456
456
457 def _fm1readmarkers(data, off, stop):
457 def _fm1readmarkers(data, off, stop):
458 native = getattr(parsers, 'fm1readmarkers', None)
458 native = getattr(parsers, 'fm1readmarkers', None)
459 if not native:
459 if not native:
460 return _fm1purereadmarkers(data, off, stop)
460 return _fm1purereadmarkers(data, off, stop)
461 return native(data, off, stop)
461 return native(data, off, stop)
462
462
463
463
464 # mapping to read/write various marker formats
464 # mapping to read/write various marker formats
465 # <version> -> (decoder, encoder)
465 # <version> -> (decoder, encoder)
466 formats = {
466 formats = {
467 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
467 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
468 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
468 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
469 }
469 }
470
470
471
471
472 def _readmarkerversion(data):
472 def _readmarkerversion(data):
473 return _unpack(b'>B', data[0:1])[0]
473 return _unpack(b'>B', data[0:1])[0]
474
474
475
475
476 @util.nogc
476 @util.nogc
477 def _readmarkers(data, off=None, stop=None):
477 def _readmarkers(data, off=None, stop=None):
478 """Read and enumerate markers from raw data"""
478 """Read and enumerate markers from raw data"""
479 diskversion = _readmarkerversion(data)
479 diskversion = _readmarkerversion(data)
480 if not off:
480 if not off:
481 off = 1 # skip 1 byte version number
481 off = 1 # skip 1 byte version number
482 if stop is None:
482 if stop is None:
483 stop = len(data)
483 stop = len(data)
484 if diskversion not in formats:
484 if diskversion not in formats:
485 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
485 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
486 raise error.UnknownVersion(msg, version=diskversion)
486 raise error.UnknownVersion(msg, version=diskversion)
487 return diskversion, formats[diskversion][0](data, off, stop)
487 return diskversion, formats[diskversion][0](data, off, stop)
488
488
489
489
490 def encodeheader(version=_fm0version):
490 def encodeheader(version=_fm0version):
491 return _pack(b'>B', version)
491 return _pack(b'>B', version)
492
492
493
493
494 def encodemarkers(markers, addheader=False, version=_fm0version):
494 def encodemarkers(markers, addheader=False, version=_fm0version):
495 # Kept separate from flushmarkers(), it will be reused for
495 # Kept separate from flushmarkers(), it will be reused for
496 # markers exchange.
496 # markers exchange.
497 encodeone = formats[version][1]
497 encodeone = formats[version][1]
498 if addheader:
498 if addheader:
499 yield encodeheader(version)
499 yield encodeheader(version)
500 for marker in markers:
500 for marker in markers:
501 yield encodeone(marker)
501 yield encodeone(marker)
502
502
503
503
504 @util.nogc
504 @util.nogc
505 def _addsuccessors(successors, markers):
505 def _addsuccessors(successors, markers):
506 for mark in markers:
506 for mark in markers:
507 successors.setdefault(mark[0], set()).add(mark)
507 successors.setdefault(mark[0], set()).add(mark)
508
508
509
509
510 @util.nogc
510 @util.nogc
511 def _addpredecessors(predecessors, markers):
511 def _addpredecessors(predecessors, markers):
512 for mark in markers:
512 for mark in markers:
513 for suc in mark[1]:
513 for suc in mark[1]:
514 predecessors.setdefault(suc, set()).add(mark)
514 predecessors.setdefault(suc, set()).add(mark)
515
515
516
516
517 @util.nogc
517 @util.nogc
518 def _addchildren(children, markers):
518 def _addchildren(children, markers):
519 for mark in markers:
519 for mark in markers:
520 parents = mark[5]
520 parents = mark[5]
521 if parents is not None:
521 if parents is not None:
522 for p in parents:
522 for p in parents:
523 children.setdefault(p, set()).add(mark)
523 children.setdefault(p, set()).add(mark)
524
524
525
525
526 def _checkinvalidmarkers(repo, markers):
526 def _checkinvalidmarkers(repo, markers):
527 """search for marker with invalid data and raise error if needed
527 """search for marker with invalid data and raise error if needed
528
528
529 Exist as a separated function to allow the evolve extension for a more
529 Exist as a separated function to allow the evolve extension for a more
530 subtle handling.
530 subtle handling.
531 """
531 """
532 for mark in markers:
532 for mark in markers:
533 if repo.nullid in mark[1]:
533 if repo.nullid in mark[1]:
534 raise error.Abort(
534 raise error.Abort(
535 _(
535 _(
536 b'bad obsolescence marker detected: '
536 b'bad obsolescence marker detected: '
537 b'invalid successors nullid'
537 b'invalid successors nullid'
538 )
538 )
539 )
539 )
540
540
541
541
542 class obsstore:
542 class obsstore:
543 """Store obsolete markers
543 """Store obsolete markers
544
544
545 Markers can be accessed with two mappings:
545 Markers can be accessed with two mappings:
546 - predecessors[x] -> set(markers on predecessors edges of x)
546 - predecessors[x] -> set(markers on predecessors edges of x)
547 - successors[x] -> set(markers on successors edges of x)
547 - successors[x] -> set(markers on successors edges of x)
548 - children[x] -> set(markers on predecessors edges of children(x)
548 - children[x] -> set(markers on predecessors edges of children(x)
549 """
549 """
550
550
551 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
551 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
552 # prec: nodeid, predecessors changesets
552 # prec: nodeid, predecessors changesets
553 # succs: tuple of nodeid, successor changesets (0-N length)
553 # succs: tuple of nodeid, successor changesets (0-N length)
554 # flag: integer, flag field carrying modifier for the markers (see doc)
554 # flag: integer, flag field carrying modifier for the markers (see doc)
555 # meta: binary blob in UTF-8, encoded metadata dictionary
555 # meta: binary blob in UTF-8, encoded metadata dictionary
556 # date: (float, int) tuple, date of marker creation
556 # date: (float, int) tuple, date of marker creation
557 # parents: (tuple of nodeid) or None, parents of predecessors
557 # parents: (tuple of nodeid) or None, parents of predecessors
558 # None is used when no data has been recorded
558 # None is used when no data has been recorded
559
559
560 def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False):
560 def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False):
561 # caches for various obsolescence related cache
561 # caches for various obsolescence related cache
562 self.caches = {}
562 self.caches = {}
563 self.svfs = svfs
563 self.svfs = svfs
564 self._repo = weakref.ref(repo)
564 self._repo = weakref.ref(repo)
565 self._defaultformat = defaultformat
565 self._defaultformat = defaultformat
566 self._readonly = readonly
566 self._readonly = readonly
567
567
568 @property
568 @property
569 def repo(self):
569 def repo(self):
570 r = self._repo()
570 r = self._repo()
571 if r is None:
571 if r is None:
572 msg = "using the obsstore of a deallocated repo"
572 msg = "using the obsstore of a deallocated repo"
573 raise error.ProgrammingError(msg)
573 raise error.ProgrammingError(msg)
574 return r
574 return r
575
575
576 def __iter__(self):
576 def __iter__(self):
577 return iter(self._all)
577 return iter(self._all)
578
578
579 def __len__(self):
579 def __len__(self):
580 return len(self._all)
580 return len(self._all)
581
581
582 def __nonzero__(self):
582 def __nonzero__(self):
583 from . import statichttprepo
583 from . import statichttprepo
584
584
585 if isinstance(self.repo, statichttprepo.statichttprepository):
585 if isinstance(self.repo, statichttprepo.statichttprepository):
586 # If repo is accessed via static HTTP, then we can't use os.stat()
586 # If repo is accessed via static HTTP, then we can't use os.stat()
587 # to just peek at the file size.
587 # to just peek at the file size.
588 return len(self._data) > 1
588 return len(self._data) > 1
589 if not self._cached('_all'):
589 if not self._cached('_all'):
590 try:
590 try:
591 return self.svfs.stat(b'obsstore').st_size > 1
591 return self.svfs.stat(b'obsstore').st_size > 1
592 except FileNotFoundError:
592 except FileNotFoundError:
593 # just build an empty _all list if no obsstore exists, which
593 # just build an empty _all list if no obsstore exists, which
594 # avoids further stat() syscalls
594 # avoids further stat() syscalls
595 pass
595 pass
596 return bool(self._all)
596 return bool(self._all)
597
597
598 __bool__ = __nonzero__
598 __bool__ = __nonzero__
599
599
600 @property
600 @property
601 def readonly(self):
601 def readonly(self):
602 """True if marker creation is disabled
602 """True if marker creation is disabled
603
603
604 Remove me in the future when obsolete marker is always on."""
604 Remove me in the future when obsolete marker is always on."""
605 return self._readonly
605 return self._readonly
606
606
607 def create(
607 def create(
608 self,
608 self,
609 transaction,
609 transaction,
610 prec,
610 prec,
611 succs=(),
611 succs=(),
612 flag=0,
612 flag=0,
613 parents=None,
613 parents=None,
614 date=None,
614 date=None,
615 metadata=None,
615 metadata=None,
616 ui=None,
616 ui=None,
617 ):
617 ):
618 """obsolete: add a new obsolete marker
618 """obsolete: add a new obsolete marker
619
619
620 * ensuring it is hashable
620 * ensuring it is hashable
621 * check mandatory metadata
621 * check mandatory metadata
622 * encode metadata
622 * encode metadata
623
623
624 If you are a human writing code creating marker you want to use the
624 If you are a human writing code creating marker you want to use the
625 `createmarkers` function in this module instead.
625 `createmarkers` function in this module instead.
626
626
627 return True if a new marker have been added, False if the markers
627 return True if a new marker have been added, False if the markers
628 already existed (no op).
628 already existed (no op).
629 """
629 """
630 flag = int(flag)
630 flag = int(flag)
631 if metadata is None:
631 if metadata is None:
632 metadata = {}
632 metadata = {}
633 if date is None:
633 if date is None:
634 if b'date' in metadata:
634 if b'date' in metadata:
635 # as a courtesy for out-of-tree extensions
635 # as a courtesy for out-of-tree extensions
636 date = dateutil.parsedate(metadata.pop(b'date'))
636 date = dateutil.parsedate(metadata.pop(b'date'))
637 elif ui is not None:
637 elif ui is not None:
638 date = ui.configdate(b'devel', b'default-date')
638 date = ui.configdate(b'devel', b'default-date')
639 if date is None:
639 if date is None:
640 date = dateutil.makedate()
640 date = dateutil.makedate()
641 else:
641 else:
642 date = dateutil.makedate()
642 date = dateutil.makedate()
643 if flag & usingsha256:
643 if flag & usingsha256:
644 if len(prec) != 32:
644 if len(prec) != 32:
645 raise ValueError(prec)
645 raise ValueError(prec)
646 for succ in succs:
646 for succ in succs:
647 if len(succ) != 32:
647 if len(succ) != 32:
648 raise ValueError(succ)
648 raise ValueError(succ)
649 else:
649 else:
650 if len(prec) != 20:
650 if len(prec) != 20:
651 raise ValueError(prec)
651 raise ValueError(prec)
652 for succ in succs:
652 for succ in succs:
653 if len(succ) != 20:
653 if len(succ) != 20:
654 raise ValueError(succ)
654 raise ValueError(succ)
655 if prec in succs:
655 if prec in succs:
656 raise ValueError('in-marker cycle with %s' % prec.hex())
656 raise ValueError('in-marker cycle with %s' % prec.hex())
657
657
658 metadata = tuple(sorted(metadata.items()))
658 metadata = tuple(sorted(metadata.items()))
659 for k, v in metadata:
659 for k, v in metadata:
660 try:
660 try:
661 # might be better to reject non-ASCII keys
661 # might be better to reject non-ASCII keys
662 k.decode('utf-8')
662 k.decode('utf-8')
663 v.decode('utf-8')
663 v.decode('utf-8')
664 except UnicodeDecodeError:
664 except UnicodeDecodeError:
665 raise error.ProgrammingError(
665 raise error.ProgrammingError(
666 b'obsstore metadata must be valid UTF-8 sequence '
666 b'obsstore metadata must be valid UTF-8 sequence '
667 b'(key = %r, value = %r)'
667 b'(key = %r, value = %r)'
668 % (pycompat.bytestr(k), pycompat.bytestr(v))
668 % (pycompat.bytestr(k), pycompat.bytestr(v))
669 )
669 )
670
670
671 marker = (bytes(prec), tuple(succs), flag, metadata, date, parents)
671 marker = (bytes(prec), tuple(succs), flag, metadata, date, parents)
672 return bool(self.add(transaction, [marker]))
672 return bool(self.add(transaction, [marker]))
673
673
674 def add(self, transaction, markers):
674 def add(self, transaction, markers):
675 """Add new markers to the store
675 """Add new markers to the store
676
676
677 Take care of filtering duplicate.
677 Take care of filtering duplicate.
678 Return the number of new marker."""
678 Return the number of new marker."""
679 if self._readonly:
679 if self._readonly:
680 raise error.Abort(
680 raise error.Abort(
681 _(b'creating obsolete markers is not enabled on this repo')
681 _(b'creating obsolete markers is not enabled on this repo')
682 )
682 )
683 known = set()
683 known = set()
684 getsuccessors = self.successors.get
684 getsuccessors = self.successors.get
685 new = []
685 new = []
686 for m in markers:
686 for m in markers:
687 if m not in getsuccessors(m[0], ()) and m not in known:
687 if m not in getsuccessors(m[0], ()) and m not in known:
688 known.add(m)
688 known.add(m)
689 new.append(m)
689 new.append(m)
690 if new:
690 if new:
691 f = self.svfs(b'obsstore', b'ab')
691 f = self.svfs(b'obsstore', b'ab')
692 try:
692 try:
693 offset = f.tell()
693 offset = f.tell()
694 transaction.add(b'obsstore', offset)
694 transaction.add(b'obsstore', offset)
695 # offset == 0: new file - add the version header
695 # offset == 0: new file - add the version header
696 data = b''.join(encodemarkers(new, offset == 0, self._version))
696 data = b''.join(encodemarkers(new, offset == 0, self._version))
697 f.write(data)
697 f.write(data)
698 finally:
698 finally:
699 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
699 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
700 # call 'filecacheentry.refresh()' here
700 # call 'filecacheentry.refresh()' here
701 f.close()
701 f.close()
702 addedmarkers = transaction.changes.get(b'obsmarkers')
702 addedmarkers = transaction.changes.get(b'obsmarkers')
703 if addedmarkers is not None:
703 if addedmarkers is not None:
704 addedmarkers.update(new)
704 addedmarkers.update(new)
705 self._addmarkers(new, data)
705 self._addmarkers(new, data)
706 # new marker *may* have changed several set. invalidate the cache.
706 # new marker *may* have changed several set. invalidate the cache.
707 self.caches.clear()
707 self.caches.clear()
708 # records the number of new markers for the transaction hooks
708 # records the number of new markers for the transaction hooks
709 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
709 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
710 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
710 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
711 return len(new)
711 return len(new)
712
712
713 def mergemarkers(self, transaction, data):
713 def mergemarkers(self, transaction, data):
714 """merge a binary stream of markers inside the obsstore
714 """merge a binary stream of markers inside the obsstore
715
715
716 Returns the number of new markers added."""
716 Returns the number of new markers added."""
717 version, markers = _readmarkers(data)
717 version, markers = _readmarkers(data)
718 return self.add(transaction, markers)
718 return self.add(transaction, markers)
719
719
720 @propertycache
720 @propertycache
721 def _data(self):
721 def _data(self):
722 return self.svfs.tryread(b'obsstore')
722 return self.svfs.tryread(b'obsstore')
723
723
724 @propertycache
724 @propertycache
725 def _version(self):
725 def _version(self):
726 if len(self._data) >= 1:
726 if len(self._data) >= 1:
727 return _readmarkerversion(self._data)
727 return _readmarkerversion(self._data)
728 else:
728 else:
729 return self._defaultformat
729 return self._defaultformat
730
730
731 @propertycache
731 @propertycache
732 def _all(self):
732 def _all(self):
733 data = self._data
733 data = self._data
734 if not data:
734 if not data:
735 return []
735 return []
736 self._version, markers = _readmarkers(data)
736 self._version, markers = _readmarkers(data)
737 markers = list(markers)
737 markers = list(markers)
738 _checkinvalidmarkers(self.repo, markers)
738 _checkinvalidmarkers(self.repo, markers)
739 return markers
739 return markers
740
740
741 @propertycache
741 @propertycache
742 def successors(self):
742 def successors(self):
743 successors = {}
743 successors = {}
744 _addsuccessors(successors, self._all)
744 _addsuccessors(successors, self._all)
745 return successors
745 return successors
746
746
747 @propertycache
747 @propertycache
748 def predecessors(self):
748 def predecessors(self):
749 predecessors = {}
749 predecessors = {}
750 _addpredecessors(predecessors, self._all)
750 _addpredecessors(predecessors, self._all)
751 return predecessors
751 return predecessors
752
752
753 @propertycache
753 @propertycache
754 def children(self):
754 def children(self):
755 children = {}
755 children = {}
756 _addchildren(children, self._all)
756 _addchildren(children, self._all)
757 return children
757 return children
758
758
759 def _cached(self, attr):
759 def _cached(self, attr):
760 return attr in self.__dict__
760 return attr in self.__dict__
761
761
762 def _addmarkers(self, markers, rawdata):
762 def _addmarkers(self, markers, rawdata):
763 markers = list(markers) # to allow repeated iteration
763 markers = list(markers) # to allow repeated iteration
764 self._data = self._data + rawdata
764 self._data = self._data + rawdata
765 self._all.extend(markers)
765 self._all.extend(markers)
766 if self._cached('successors'):
766 if self._cached('successors'):
767 _addsuccessors(self.successors, markers)
767 _addsuccessors(self.successors, markers)
768 if self._cached('predecessors'):
768 if self._cached('predecessors'):
769 _addpredecessors(self.predecessors, markers)
769 _addpredecessors(self.predecessors, markers)
770 if self._cached('children'):
770 if self._cached('children'):
771 _addchildren(self.children, markers)
771 _addchildren(self.children, markers)
772 _checkinvalidmarkers(self.repo, markers)
772 _checkinvalidmarkers(self.repo, markers)
773
773
774 def relevantmarkers(self, nodes):
774 def relevantmarkers(self, nodes):
775 """return a set of all obsolescence markers relevant to a set of nodes.
775 """return a set of all obsolescence markers relevant to a set of nodes.
776
776
777 "relevant" to a set of nodes mean:
777 "relevant" to a set of nodes mean:
778
778
779 - marker that use this changeset as successor
779 - marker that use this changeset as successor
780 - prune marker of direct children on this changeset
780 - prune marker of direct children on this changeset
781 - recursive application of the two rules on predecessors of these
781 - recursive application of the two rules on predecessors of these
782 markers
782 markers
783
783
784 It is a set so you cannot rely on order."""
784 It is a set so you cannot rely on order."""
785
785
786 pendingnodes = set(nodes)
786 pendingnodes = set(nodes)
787 seenmarkers = set()
787 seenmarkers = set()
788 seennodes = set(pendingnodes)
788 seennodes = set(pendingnodes)
789 precursorsmarkers = self.predecessors
789 precursorsmarkers = self.predecessors
790 succsmarkers = self.successors
790 succsmarkers = self.successors
791 children = self.children
791 children = self.children
792 while pendingnodes:
792 while pendingnodes:
793 direct = set()
793 direct = set()
794 for current in pendingnodes:
794 for current in pendingnodes:
795 direct.update(precursorsmarkers.get(current, ()))
795 direct.update(precursorsmarkers.get(current, ()))
796 pruned = [m for m in children.get(current, ()) if not m[1]]
796 pruned = [m for m in children.get(current, ()) if not m[1]]
797 direct.update(pruned)
797 direct.update(pruned)
798 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
798 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
799 direct.update(pruned)
799 direct.update(pruned)
800 direct -= seenmarkers
800 direct -= seenmarkers
801 pendingnodes = {m[0] for m in direct}
801 pendingnodes = {m[0] for m in direct}
802 seenmarkers |= direct
802 seenmarkers |= direct
803 pendingnodes -= seennodes
803 pendingnodes -= seennodes
804 seennodes |= pendingnodes
804 seennodes |= pendingnodes
805 return seenmarkers
805 return seenmarkers
806
806
807
807
808 def makestore(ui, repo):
808 def makestore(ui, repo):
809 """Create an obsstore instance from a repo."""
809 """Create an obsstore instance from a repo."""
810 # read default format for new obsstore.
810 # read default format for new obsstore.
811 # developer config: format.obsstore-version
811 # developer config: format.obsstore-version
812 defaultformat = ui.configint(b'format', b'obsstore-version')
812 defaultformat = ui.configint(b'format', b'obsstore-version')
813 # rely on obsstore class default when possible.
813 # rely on obsstore class default when possible.
814 kwargs = {}
814 kwargs = {}
815 if defaultformat is not None:
815 if defaultformat is not None:
816 kwargs['defaultformat'] = defaultformat
816 kwargs['defaultformat'] = defaultformat
817 readonly = not isenabled(repo, createmarkersopt)
817 readonly = not isenabled(repo, createmarkersopt)
818 store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs)
818 store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs)
819 if store and readonly:
819 if store and readonly:
820 ui.warn(
820 ui.warn(
821 _(b'obsolete feature not enabled but %i markers found!\n')
821 _(b'obsolete feature not enabled but %i markers found!\n')
822 % len(list(store))
822 % len(list(store))
823 )
823 )
824 return store
824 return store
825
825
826
826
827 def commonversion(versions):
827 def commonversion(versions):
828 """Return the newest version listed in both versions and our local formats.
828 """Return the newest version listed in both versions and our local formats.
829
829
830 Returns None if no common version exists.
830 Returns None if no common version exists.
831 """
831 """
832 versions.sort(reverse=True)
832 versions.sort(reverse=True)
833 # search for highest version known on both side
833 # search for highest version known on both side
834 for v in versions:
834 for v in versions:
835 if v in formats:
835 if v in formats:
836 return v
836 return v
837 return None
837 return None
838
838
839
839
840 # arbitrary picked to fit into 8K limit from HTTP server
840 # arbitrary picked to fit into 8K limit from HTTP server
841 # you have to take in account:
841 # you have to take in account:
842 # - the version header
842 # - the version header
843 # - the base85 encoding
843 # - the base85 encoding
844 _maxpayload = 5300
844 _maxpayload = 5300
845
845
846
846
847 def _pushkeyescape(markers):
847 def _pushkeyescape(markers):
848 """encode markers into a dict suitable for pushkey exchange
848 """encode markers into a dict suitable for pushkey exchange
849
849
850 - binary data is base85 encoded
850 - binary data is base85 encoded
851 - split in chunks smaller than 5300 bytes"""
851 - split in chunks smaller than 5300 bytes"""
852 keys = {}
852 keys = {}
853 parts = []
853 parts = []
854 currentlen = _maxpayload * 2 # ensure we create a new part
854 currentlen = _maxpayload * 2 # ensure we create a new part
855 for marker in markers:
855 for marker in markers:
856 nextdata = _fm0encodeonemarker(marker)
856 nextdata = _fm0encodeonemarker(marker)
857 if len(nextdata) + currentlen > _maxpayload:
857 if len(nextdata) + currentlen > _maxpayload:
858 currentpart = []
858 currentpart = []
859 currentlen = 0
859 currentlen = 0
860 parts.append(currentpart)
860 parts.append(currentpart)
861 currentpart.append(nextdata)
861 currentpart.append(nextdata)
862 currentlen += len(nextdata)
862 currentlen += len(nextdata)
863 for idx, part in enumerate(reversed(parts)):
863 for idx, part in enumerate(reversed(parts)):
864 data = b''.join([_pack(b'>B', _fm0version)] + part)
864 data = b''.join([_pack(b'>B', _fm0version)] + part)
865 keys[b'dump%i' % idx] = util.b85encode(data)
865 keys[b'dump%i' % idx] = util.b85encode(data)
866 return keys
866 return keys
867
867
868
868
869 def listmarkers(repo):
869 def listmarkers(repo):
870 """List markers over pushkey"""
870 """List markers over pushkey"""
871 if not repo.obsstore:
871 if not repo.obsstore:
872 return {}
872 return {}
873 return _pushkeyescape(sorted(repo.obsstore))
873 return _pushkeyescape(sorted(repo.obsstore))
874
874
875
875
876 def pushmarker(repo, key, old, new):
876 def pushmarker(repo, key, old, new):
877 """Push markers over pushkey"""
877 """Push markers over pushkey"""
878 if not key.startswith(b'dump'):
878 if not key.startswith(b'dump'):
879 repo.ui.warn(_(b'unknown key: %r') % key)
879 repo.ui.warn(_(b'unknown key: %r') % key)
880 return False
880 return False
881 if old:
881 if old:
882 repo.ui.warn(_(b'unexpected old value for %r') % key)
882 repo.ui.warn(_(b'unexpected old value for %r') % key)
883 return False
883 return False
884 data = util.b85decode(new)
884 data = util.b85decode(new)
885 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
885 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
886 repo.obsstore.mergemarkers(tr, data)
886 repo.obsstore.mergemarkers(tr, data)
887 repo.invalidatevolatilesets()
887 repo.invalidatevolatilesets()
888 return True
888 return True
889
889
890
890
891 # mapping of 'set-name' -> <function to compute this set>
891 # mapping of 'set-name' -> <function to compute this set>
892 cachefuncs = {}
892 cachefuncs = {}
893
893
894
894
895 def cachefor(name):
895 def cachefor(name):
896 """Decorator to register a function as computing the cache for a set"""
896 """Decorator to register a function as computing the cache for a set"""
897
897
898 def decorator(func):
898 def decorator(func):
899 if name in cachefuncs:
899 if name in cachefuncs:
900 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
900 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
901 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
901 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
902 cachefuncs[name] = func
902 cachefuncs[name] = func
903 return func
903 return func
904
904
905 return decorator
905 return decorator
906
906
907
907
908 def getrevs(repo, name):
908 def getrevs(repo, name):
909 """Return the set of revision that belong to the <name> set
909 """Return the set of revision that belong to the <name> set
910
910
911 Such access may compute the set and cache it for future use"""
911 Such access may compute the set and cache it for future use"""
912 repo = repo.unfiltered()
912 repo = repo.unfiltered()
913 with util.timedcm('getrevs %s', name):
913 with util.timedcm('getrevs %s', name):
914 if not repo.obsstore:
914 if not repo.obsstore:
915 return frozenset()
915 return frozenset()
916 if name not in repo.obsstore.caches:
916 if name not in repo.obsstore.caches:
917 repo.obsstore.caches[name] = cachefuncs[name](repo)
917 repo.obsstore.caches[name] = cachefuncs[name](repo)
918 return repo.obsstore.caches[name]
918 return repo.obsstore.caches[name]
919
919
920
920
921 # To be simple we need to invalidate obsolescence cache when:
921 # To be simple we need to invalidate obsolescence cache when:
922 #
922 #
923 # - new changeset is added:
923 # - new changeset is added:
924 # - public phase is changed
924 # - public phase is changed
925 # - obsolescence marker are added
925 # - obsolescence marker are added
926 # - strip is used a repo
926 # - strip is used a repo
927 def clearobscaches(repo):
927 def clearobscaches(repo):
928 """Remove all obsolescence related cache from a repo
928 """Remove all obsolescence related cache from a repo
929
929
930 This remove all cache in obsstore is the obsstore already exist on the
930 This remove all cache in obsstore is the obsstore already exist on the
931 repo.
931 repo.
932
932
933 (We could be smarter here given the exact event that trigger the cache
933 (We could be smarter here given the exact event that trigger the cache
934 clearing)"""
934 clearing)"""
935 # only clear cache is there is obsstore data in this repo
935 # only clear cache is there is obsstore data in this repo
936 if b'obsstore' in repo._filecache:
936 if b'obsstore' in repo._filecache:
937 repo.obsstore.caches.clear()
937 repo.obsstore.caches.clear()
938
938
939
939
940 def _mutablerevs(repo):
940 def _mutablerevs(repo):
941 """the set of mutable revision in the repository"""
941 """the set of mutable revision in the repository"""
942 return repo._phasecache.getrevset(repo, phases.mutablephases)
942 return repo._phasecache.getrevset(repo, phases.mutablephases)
943
943
944
944
945 @cachefor(b'obsolete')
945 @cachefor(b'obsolete')
946 def _computeobsoleteset(repo):
946 def _computeobsoleteset(repo):
947 """the set of obsolete revisions"""
947 """the set of obsolete revisions"""
948 getnode = repo.changelog.node
948 getnode = repo.changelog.node
949 notpublic = _mutablerevs(repo)
949 notpublic = _mutablerevs(repo)
950 isobs = repo.obsstore.successors.__contains__
950 isobs = repo.obsstore.successors.__contains__
951 return frozenset(r for r in notpublic if isobs(getnode(r)))
951 return frozenset(r for r in notpublic if isobs(getnode(r)))
952
952
953
953
954 @cachefor(b'orphan')
954 @cachefor(b'orphan')
955 def _computeorphanset(repo):
955 def _computeorphanset(repo):
956 """the set of non obsolete revisions with obsolete parents"""
956 """the set of non obsolete revisions with obsolete parents"""
957 pfunc = repo.changelog.parentrevs
957 pfunc = repo.changelog.parentrevs
958 mutable = _mutablerevs(repo)
958 mutable = _mutablerevs(repo)
959 obsolete = getrevs(repo, b'obsolete')
959 obsolete = getrevs(repo, b'obsolete')
960 others = mutable - obsolete
960 others = mutable - obsolete
961 unstable = set()
961 unstable = set()
962 for r in sorted(others):
962 for r in sorted(others):
963 # A rev is unstable if one of its parent is obsolete or unstable
963 # A rev is unstable if one of its parent is obsolete or unstable
964 # this works since we traverse following growing rev order
964 # this works since we traverse following growing rev order
965 for p in pfunc(r):
965 for p in pfunc(r):
966 if p in obsolete or p in unstable:
966 if p in obsolete or p in unstable:
967 unstable.add(r)
967 unstable.add(r)
968 break
968 break
969 return frozenset(unstable)
969 return frozenset(unstable)
970
970
971
971
972 @cachefor(b'suspended')
972 @cachefor(b'suspended')
973 def _computesuspendedset(repo):
973 def _computesuspendedset(repo):
974 """the set of obsolete parents with non obsolete descendants"""
974 """the set of obsolete parents with non obsolete descendants"""
975 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
975 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
976 return frozenset(r for r in getrevs(repo, b'obsolete') if r in suspended)
976 return frozenset(r for r in getrevs(repo, b'obsolete') if r in suspended)
977
977
978
978
979 @cachefor(b'extinct')
979 @cachefor(b'extinct')
980 def _computeextinctset(repo):
980 def _computeextinctset(repo):
981 """the set of obsolete parents without non obsolete descendants"""
981 """the set of obsolete parents without non obsolete descendants"""
982 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
982 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
983
983
984
984
985 @cachefor(b'phasedivergent')
985 @cachefor(b'phasedivergent')
986 def _computephasedivergentset(repo):
986 def _computephasedivergentset(repo):
987 """the set of revs trying to obsolete public revisions"""
987 """the set of revs trying to obsolete public revisions"""
988 bumped = set()
988 bumped = set()
989 # util function (avoid attribute lookup in the loop)
989 # util function (avoid attribute lookup in the loop)
990 phase = repo._phasecache.phase # would be faster to grab the full list
990 phase = repo._phasecache.phase # would be faster to grab the full list
991 public = phases.public
991 public = phases.public
992 cl = repo.changelog
992 cl = repo.changelog
993 torev = cl.index.get_rev
993 torev = cl.index.get_rev
994 tonode = cl.node
994 tonode = cl.node
995 obsstore = repo.obsstore
995 obsstore = repo.obsstore
996 for rev in repo.revs(b'(not public()) and (not obsolete())'):
996 candidates = sorted(_mutablerevs(repo) - getrevs(repo, b"obsolete"))
997 for rev in candidates:
997 # We only evaluate mutable, non-obsolete revision
998 # We only evaluate mutable, non-obsolete revision
998 node = tonode(rev)
999 node = tonode(rev)
999 # (future) A cache of predecessors may worth if split is very common
1000 # (future) A cache of predecessors may worth if split is very common
1000 for pnode in obsutil.allpredecessors(
1001 for pnode in obsutil.allpredecessors(
1001 obsstore, [node], ignoreflags=bumpedfix
1002 obsstore, [node], ignoreflags=bumpedfix
1002 ):
1003 ):
1003 prev = torev(pnode) # unfiltered! but so is phasecache
1004 prev = torev(pnode) # unfiltered! but so is phasecache
1004 if (prev is not None) and (phase(repo, prev) <= public):
1005 if (prev is not None) and (phase(repo, prev) <= public):
1005 # we have a public predecessor
1006 # we have a public predecessor
1006 bumped.add(rev)
1007 bumped.add(rev)
1007 break # Next draft!
1008 break # Next draft!
1008 return frozenset(bumped)
1009 return frozenset(bumped)
1009
1010
1010
1011
1011 @cachefor(b'contentdivergent')
1012 @cachefor(b'contentdivergent')
1012 def _computecontentdivergentset(repo):
1013 def _computecontentdivergentset(repo):
1013 """the set of rev that compete to be the final successors of some revision."""
1014 """the set of rev that compete to be the final successors of some revision."""
1014 divergent = set()
1015 divergent = set()
1015 obsstore = repo.obsstore
1016 obsstore = repo.obsstore
1016 newermap = {}
1017 newermap = {}
1017 tonode = repo.changelog.node
1018 tonode = repo.changelog.node
1018 for rev in repo.revs(b'(not public()) - obsolete()'):
1019 for rev in repo.revs(b'(not public()) - obsolete()'):
1019 node = tonode(rev)
1020 node = tonode(rev)
1020 mark = obsstore.predecessors.get(node, ())
1021 mark = obsstore.predecessors.get(node, ())
1021 toprocess = set(mark)
1022 toprocess = set(mark)
1022 seen = set()
1023 seen = set()
1023 while toprocess:
1024 while toprocess:
1024 prec = toprocess.pop()[0]
1025 prec = toprocess.pop()[0]
1025 if prec in seen:
1026 if prec in seen:
1026 continue # emergency cycle hanging prevention
1027 continue # emergency cycle hanging prevention
1027 seen.add(prec)
1028 seen.add(prec)
1028 if prec not in newermap:
1029 if prec not in newermap:
1029 obsutil.successorssets(repo, prec, cache=newermap)
1030 obsutil.successorssets(repo, prec, cache=newermap)
1030 newer = [n for n in newermap[prec] if n]
1031 newer = [n for n in newermap[prec] if n]
1031 if len(newer) > 1:
1032 if len(newer) > 1:
1032 divergent.add(rev)
1033 divergent.add(rev)
1033 break
1034 break
1034 toprocess.update(obsstore.predecessors.get(prec, ()))
1035 toprocess.update(obsstore.predecessors.get(prec, ()))
1035 return frozenset(divergent)
1036 return frozenset(divergent)
1036
1037
1037
1038
1038 def makefoldid(relation, user):
1039 def makefoldid(relation, user):
1039
1040
1040 folddigest = hashutil.sha1(user)
1041 folddigest = hashutil.sha1(user)
1041 for p in relation[0] + relation[1]:
1042 for p in relation[0] + relation[1]:
1042 folddigest.update(b'%d' % p.rev())
1043 folddigest.update(b'%d' % p.rev())
1043 folddigest.update(p.node())
1044 folddigest.update(p.node())
1044 # Since fold only has to compete against fold for the same successors, it
1045 # Since fold only has to compete against fold for the same successors, it
1045 # seems fine to use a small ID. Smaller ID save space.
1046 # seems fine to use a small ID. Smaller ID save space.
1046 return hex(folddigest.digest())[:8]
1047 return hex(folddigest.digest())[:8]
1047
1048
1048
1049
1049 def createmarkers(
1050 def createmarkers(
1050 repo, relations, flag=0, date=None, metadata=None, operation=None
1051 repo, relations, flag=0, date=None, metadata=None, operation=None
1051 ):
1052 ):
1052 """Add obsolete markers between changesets in a repo
1053 """Add obsolete markers between changesets in a repo
1053
1054
1054 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1055 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1055 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1056 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1056 containing metadata for this marker only. It is merged with the global
1057 containing metadata for this marker only. It is merged with the global
1057 metadata specified through the `metadata` argument of this function.
1058 metadata specified through the `metadata` argument of this function.
1058 Any string values in metadata must be UTF-8 bytes.
1059 Any string values in metadata must be UTF-8 bytes.
1059
1060
1060 Trying to obsolete a public changeset will raise an exception.
1061 Trying to obsolete a public changeset will raise an exception.
1061
1062
1062 Current user and date are used except if specified otherwise in the
1063 Current user and date are used except if specified otherwise in the
1063 metadata attribute.
1064 metadata attribute.
1064
1065
1065 This function operates within a transaction of its own, but does
1066 This function operates within a transaction of its own, but does
1066 not take any lock on the repo.
1067 not take any lock on the repo.
1067 """
1068 """
1068 # prepare metadata
1069 # prepare metadata
1069 if metadata is None:
1070 if metadata is None:
1070 metadata = {}
1071 metadata = {}
1071 if b'user' not in metadata:
1072 if b'user' not in metadata:
1072 luser = (
1073 luser = (
1073 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1074 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1074 )
1075 )
1075 metadata[b'user'] = encoding.fromlocal(luser)
1076 metadata[b'user'] = encoding.fromlocal(luser)
1076
1077
1077 # Operation metadata handling
1078 # Operation metadata handling
1078 useoperation = repo.ui.configbool(
1079 useoperation = repo.ui.configbool(
1079 b'experimental', b'evolution.track-operation'
1080 b'experimental', b'evolution.track-operation'
1080 )
1081 )
1081 if useoperation and operation:
1082 if useoperation and operation:
1082 metadata[b'operation'] = operation
1083 metadata[b'operation'] = operation
1083
1084
1084 # Effect flag metadata handling
1085 # Effect flag metadata handling
1085 saveeffectflag = repo.ui.configbool(
1086 saveeffectflag = repo.ui.configbool(
1086 b'experimental', b'evolution.effect-flags'
1087 b'experimental', b'evolution.effect-flags'
1087 )
1088 )
1088
1089
1089 with repo.transaction(b'add-obsolescence-marker') as tr:
1090 with repo.transaction(b'add-obsolescence-marker') as tr:
1090 markerargs = []
1091 markerargs = []
1091 for rel in relations:
1092 for rel in relations:
1092 predecessors = rel[0]
1093 predecessors = rel[0]
1093 if not isinstance(predecessors, tuple):
1094 if not isinstance(predecessors, tuple):
1094 # preserve compat with old API until all caller are migrated
1095 # preserve compat with old API until all caller are migrated
1095 predecessors = (predecessors,)
1096 predecessors = (predecessors,)
1096 if len(predecessors) > 1 and len(rel[1]) != 1:
1097 if len(predecessors) > 1 and len(rel[1]) != 1:
1097 msg = b'Fold markers can only have 1 successors, not %d'
1098 msg = b'Fold markers can only have 1 successors, not %d'
1098 raise error.ProgrammingError(msg % len(rel[1]))
1099 raise error.ProgrammingError(msg % len(rel[1]))
1099 foldid = None
1100 foldid = None
1100 foldsize = len(predecessors)
1101 foldsize = len(predecessors)
1101 if 1 < foldsize:
1102 if 1 < foldsize:
1102 foldid = makefoldid(rel, metadata[b'user'])
1103 foldid = makefoldid(rel, metadata[b'user'])
1103 for foldidx, prec in enumerate(predecessors, 1):
1104 for foldidx, prec in enumerate(predecessors, 1):
1104 sucs = rel[1]
1105 sucs = rel[1]
1105 localmetadata = metadata.copy()
1106 localmetadata = metadata.copy()
1106 if len(rel) > 2:
1107 if len(rel) > 2:
1107 localmetadata.update(rel[2])
1108 localmetadata.update(rel[2])
1108 if foldid is not None:
1109 if foldid is not None:
1109 localmetadata[b'fold-id'] = foldid
1110 localmetadata[b'fold-id'] = foldid
1110 localmetadata[b'fold-idx'] = b'%d' % foldidx
1111 localmetadata[b'fold-idx'] = b'%d' % foldidx
1111 localmetadata[b'fold-size'] = b'%d' % foldsize
1112 localmetadata[b'fold-size'] = b'%d' % foldsize
1112
1113
1113 if not prec.mutable():
1114 if not prec.mutable():
1114 raise error.Abort(
1115 raise error.Abort(
1115 _(b"cannot obsolete public changeset: %s") % prec,
1116 _(b"cannot obsolete public changeset: %s") % prec,
1116 hint=b"see 'hg help phases' for details",
1117 hint=b"see 'hg help phases' for details",
1117 )
1118 )
1118 nprec = prec.node()
1119 nprec = prec.node()
1119 nsucs = tuple(s.node() for s in sucs)
1120 nsucs = tuple(s.node() for s in sucs)
1120 npare = None
1121 npare = None
1121 if not nsucs:
1122 if not nsucs:
1122 npare = tuple(p.node() for p in prec.parents())
1123 npare = tuple(p.node() for p in prec.parents())
1123 if nprec in nsucs:
1124 if nprec in nsucs:
1124 raise error.Abort(
1125 raise error.Abort(
1125 _(b"changeset %s cannot obsolete itself") % prec
1126 _(b"changeset %s cannot obsolete itself") % prec
1126 )
1127 )
1127
1128
1128 # Effect flag can be different by relation
1129 # Effect flag can be different by relation
1129 if saveeffectflag:
1130 if saveeffectflag:
1130 # The effect flag is saved in a versioned field name for
1131 # The effect flag is saved in a versioned field name for
1131 # future evolution
1132 # future evolution
1132 effectflag = obsutil.geteffectflag(prec, sucs)
1133 effectflag = obsutil.geteffectflag(prec, sucs)
1133 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1134 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1134
1135
1135 # Creating the marker causes the hidden cache to become
1136 # Creating the marker causes the hidden cache to become
1136 # invalid, which causes recomputation when we ask for
1137 # invalid, which causes recomputation when we ask for
1137 # prec.parents() above. Resulting in n^2 behavior. So let's
1138 # prec.parents() above. Resulting in n^2 behavior. So let's
1138 # prepare all of the args first, then create the markers.
1139 # prepare all of the args first, then create the markers.
1139 markerargs.append((nprec, nsucs, npare, localmetadata))
1140 markerargs.append((nprec, nsucs, npare, localmetadata))
1140
1141
1141 for args in markerargs:
1142 for args in markerargs:
1142 nprec, nsucs, npare, localmetadata = args
1143 nprec, nsucs, npare, localmetadata = args
1143 repo.obsstore.create(
1144 repo.obsstore.create(
1144 tr,
1145 tr,
1145 nprec,
1146 nprec,
1146 nsucs,
1147 nsucs,
1147 flag,
1148 flag,
1148 parents=npare,
1149 parents=npare,
1149 date=date,
1150 date=date,
1150 metadata=localmetadata,
1151 metadata=localmetadata,
1151 ui=repo.ui,
1152 ui=repo.ui,
1152 )
1153 )
1153 repo.filteredrevcache.clear()
1154 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now