##// END OF EJS Templates
obsolete: remove two unused constants...
Augie Fackler -
r50071:3cd3aaba default
parent child Browse files
Show More
@@ -1,1150 +1,1148 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70
70
71 import errno
71 import errno
72 import struct
72 import struct
73
73
74 from .i18n import _
74 from .i18n import _
75 from .pycompat import getattr
75 from .pycompat import getattr
76 from .node import (
76 from .node import (
77 bin,
77 bin,
78 hex,
78 hex,
79 )
79 )
80 from . import (
80 from . import (
81 encoding,
81 encoding,
82 error,
82 error,
83 obsutil,
83 obsutil,
84 phases,
84 phases,
85 policy,
85 policy,
86 pycompat,
86 pycompat,
87 util,
87 util,
88 )
88 )
89 from .utils import (
89 from .utils import (
90 dateutil,
90 dateutil,
91 hashutil,
91 hashutil,
92 )
92 )
93
93
94 parsers = policy.importmod('parsers')
94 parsers = policy.importmod('parsers')
95
95
96 _pack = struct.pack
96 _pack = struct.pack
97 _unpack = struct.unpack
97 _unpack = struct.unpack
98 _calcsize = struct.calcsize
98 _calcsize = struct.calcsize
99 propertycache = util.propertycache
99 propertycache = util.propertycache
100
100
101 # Options for obsolescence
101 # Options for obsolescence
102 createmarkersopt = b'createmarkers'
102 createmarkersopt = b'createmarkers'
103 allowunstableopt = b'allowunstable'
103 allowunstableopt = b'allowunstable'
104 allowdivergenceopt = b'allowdivergence'
104 allowdivergenceopt = b'allowdivergence'
105 exchangeopt = b'exchange'
105 exchangeopt = b'exchange'
106
106
107
107
108 def _getoptionvalue(repo, option):
108 def _getoptionvalue(repo, option):
109 """Returns True if the given repository has the given obsolete option
109 """Returns True if the given repository has the given obsolete option
110 enabled.
110 enabled.
111 """
111 """
112 configkey = b'evolution.%s' % option
112 configkey = b'evolution.%s' % option
113 newconfig = repo.ui.configbool(b'experimental', configkey)
113 newconfig = repo.ui.configbool(b'experimental', configkey)
114
114
115 # Return the value only if defined
115 # Return the value only if defined
116 if newconfig is not None:
116 if newconfig is not None:
117 return newconfig
117 return newconfig
118
118
119 # Fallback on generic option
119 # Fallback on generic option
120 try:
120 try:
121 return repo.ui.configbool(b'experimental', b'evolution')
121 return repo.ui.configbool(b'experimental', b'evolution')
122 except (error.ConfigError, AttributeError):
122 except (error.ConfigError, AttributeError):
123 # Fallback on old-fashion config
123 # Fallback on old-fashion config
124 # inconsistent config: experimental.evolution
124 # inconsistent config: experimental.evolution
125 result = set(repo.ui.configlist(b'experimental', b'evolution'))
125 result = set(repo.ui.configlist(b'experimental', b'evolution'))
126
126
127 if b'all' in result:
127 if b'all' in result:
128 return True
128 return True
129
129
130 # Temporary hack for next check
130 # Temporary hack for next check
131 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
131 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
132 if newconfig:
132 if newconfig:
133 result.add(b'createmarkers')
133 result.add(b'createmarkers')
134
134
135 return option in result
135 return option in result
136
136
137
137
138 def getoptions(repo):
138 def getoptions(repo):
139 """Returns dicts showing state of obsolescence features."""
139 """Returns dicts showing state of obsolescence features."""
140
140
141 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
141 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
142 if createmarkersvalue:
142 if createmarkersvalue:
143 unstablevalue = _getoptionvalue(repo, allowunstableopt)
143 unstablevalue = _getoptionvalue(repo, allowunstableopt)
144 divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
144 divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
145 exchangevalue = _getoptionvalue(repo, exchangeopt)
145 exchangevalue = _getoptionvalue(repo, exchangeopt)
146 else:
146 else:
147 # if we cannot create obsolescence markers, we shouldn't exchange them
147 # if we cannot create obsolescence markers, we shouldn't exchange them
148 # or perform operations that lead to instability or divergence
148 # or perform operations that lead to instability or divergence
149 unstablevalue = False
149 unstablevalue = False
150 divergencevalue = False
150 divergencevalue = False
151 exchangevalue = False
151 exchangevalue = False
152
152
153 return {
153 return {
154 createmarkersopt: createmarkersvalue,
154 createmarkersopt: createmarkersvalue,
155 allowunstableopt: unstablevalue,
155 allowunstableopt: unstablevalue,
156 allowdivergenceopt: divergencevalue,
156 allowdivergenceopt: divergencevalue,
157 exchangeopt: exchangevalue,
157 exchangeopt: exchangevalue,
158 }
158 }
159
159
160
160
161 def isenabled(repo, option):
161 def isenabled(repo, option):
162 """Returns True if the given repository has the given obsolete option
162 """Returns True if the given repository has the given obsolete option
163 enabled.
163 enabled.
164 """
164 """
165 return getoptions(repo)[option]
165 return getoptions(repo)[option]
166
166
167
167
168 # Creating aliases for marker flags because evolve extension looks for
168 # Creating aliases for marker flags because evolve extension looks for
169 # bumpedfix in obsolete.py
169 # bumpedfix in obsolete.py
170 bumpedfix = obsutil.bumpedfix
170 bumpedfix = obsutil.bumpedfix
171 usingsha256 = obsutil.usingsha256
171 usingsha256 = obsutil.usingsha256
172
172
173 ## Parsing and writing of version "0"
173 ## Parsing and writing of version "0"
174 #
174 #
175 # The header is followed by the markers. Each marker is made of:
175 # The header is followed by the markers. Each marker is made of:
176 #
176 #
177 # - 1 uint8 : number of new changesets "N", can be zero.
177 # - 1 uint8 : number of new changesets "N", can be zero.
178 #
178 #
179 # - 1 uint32: metadata size "M" in bytes.
179 # - 1 uint32: metadata size "M" in bytes.
180 #
180 #
181 # - 1 byte: a bit field. It is reserved for flags used in common
181 # - 1 byte: a bit field. It is reserved for flags used in common
182 # obsolete marker operations, to avoid repeated decoding of metadata
182 # obsolete marker operations, to avoid repeated decoding of metadata
183 # entries.
183 # entries.
184 #
184 #
185 # - 20 bytes: obsoleted changeset identifier.
185 # - 20 bytes: obsoleted changeset identifier.
186 #
186 #
187 # - N*20 bytes: new changesets identifiers.
187 # - N*20 bytes: new changesets identifiers.
188 #
188 #
189 # - M bytes: metadata as a sequence of nul-terminated strings. Each
189 # - M bytes: metadata as a sequence of nul-terminated strings. Each
190 # string contains a key and a value, separated by a colon ':', without
190 # string contains a key and a value, separated by a colon ':', without
191 # additional encoding. Keys cannot contain '\0' or ':' and values
191 # additional encoding. Keys cannot contain '\0' or ':' and values
192 # cannot contain '\0'.
192 # cannot contain '\0'.
193 _fm0version = 0
193 _fm0version = 0
194 _fm0fixed = b'>BIB20s'
194 _fm0fixed = b'>BIB20s'
195 _fm0node = b'20s'
195 _fm0node = b'20s'
196 _fm0fsize = _calcsize(_fm0fixed)
196 _fm0fsize = _calcsize(_fm0fixed)
197 _fm0fnodesize = _calcsize(_fm0node)
197 _fm0fnodesize = _calcsize(_fm0node)
198
198
199
199
200 def _fm0readmarkers(data, off, stop):
200 def _fm0readmarkers(data, off, stop):
201 # Loop on markers
201 # Loop on markers
202 while off < stop:
202 while off < stop:
203 # read fixed part
203 # read fixed part
204 cur = data[off : off + _fm0fsize]
204 cur = data[off : off + _fm0fsize]
205 off += _fm0fsize
205 off += _fm0fsize
206 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
206 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
207 # read replacement
207 # read replacement
208 sucs = ()
208 sucs = ()
209 if numsuc:
209 if numsuc:
210 s = _fm0fnodesize * numsuc
210 s = _fm0fnodesize * numsuc
211 cur = data[off : off + s]
211 cur = data[off : off + s]
212 sucs = _unpack(_fm0node * numsuc, cur)
212 sucs = _unpack(_fm0node * numsuc, cur)
213 off += s
213 off += s
214 # read metadata
214 # read metadata
215 # (metadata will be decoded on demand)
215 # (metadata will be decoded on demand)
216 metadata = data[off : off + mdsize]
216 metadata = data[off : off + mdsize]
217 if len(metadata) != mdsize:
217 if len(metadata) != mdsize:
218 raise error.Abort(
218 raise error.Abort(
219 _(
219 _(
220 b'parsing obsolete marker: metadata is too '
220 b'parsing obsolete marker: metadata is too '
221 b'short, %d bytes expected, got %d'
221 b'short, %d bytes expected, got %d'
222 )
222 )
223 % (mdsize, len(metadata))
223 % (mdsize, len(metadata))
224 )
224 )
225 off += mdsize
225 off += mdsize
226 metadata = _fm0decodemeta(metadata)
226 metadata = _fm0decodemeta(metadata)
227 try:
227 try:
228 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
228 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
229 date = float(when), int(offset)
229 date = float(when), int(offset)
230 except ValueError:
230 except ValueError:
231 date = (0.0, 0)
231 date = (0.0, 0)
232 parents = None
232 parents = None
233 if b'p2' in metadata:
233 if b'p2' in metadata:
234 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
234 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
235 elif b'p1' in metadata:
235 elif b'p1' in metadata:
236 parents = (metadata.pop(b'p1', None),)
236 parents = (metadata.pop(b'p1', None),)
237 elif b'p0' in metadata:
237 elif b'p0' in metadata:
238 parents = ()
238 parents = ()
239 if parents is not None:
239 if parents is not None:
240 try:
240 try:
241 parents = tuple(bin(p) for p in parents)
241 parents = tuple(bin(p) for p in parents)
242 # if parent content is not a nodeid, drop the data
242 # if parent content is not a nodeid, drop the data
243 for p in parents:
243 for p in parents:
244 if len(p) != 20:
244 if len(p) != 20:
245 parents = None
245 parents = None
246 break
246 break
247 except TypeError:
247 except TypeError:
248 # if content cannot be translated to nodeid drop the data.
248 # if content cannot be translated to nodeid drop the data.
249 parents = None
249 parents = None
250
250
251 metadata = tuple(sorted(metadata.items()))
251 metadata = tuple(sorted(metadata.items()))
252
252
253 yield (pre, sucs, flags, metadata, date, parents)
253 yield (pre, sucs, flags, metadata, date, parents)
254
254
255
255
256 def _fm0encodeonemarker(marker):
256 def _fm0encodeonemarker(marker):
257 pre, sucs, flags, metadata, date, parents = marker
257 pre, sucs, flags, metadata, date, parents = marker
258 if flags & usingsha256:
258 if flags & usingsha256:
259 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
259 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
260 metadata = dict(metadata)
260 metadata = dict(metadata)
261 time, tz = date
261 time, tz = date
262 metadata[b'date'] = b'%r %i' % (time, tz)
262 metadata[b'date'] = b'%r %i' % (time, tz)
263 if parents is not None:
263 if parents is not None:
264 if not parents:
264 if not parents:
265 # mark that we explicitly recorded no parents
265 # mark that we explicitly recorded no parents
266 metadata[b'p0'] = b''
266 metadata[b'p0'] = b''
267 for i, p in enumerate(parents, 1):
267 for i, p in enumerate(parents, 1):
268 metadata[b'p%i' % i] = hex(p)
268 metadata[b'p%i' % i] = hex(p)
269 metadata = _fm0encodemeta(metadata)
269 metadata = _fm0encodemeta(metadata)
270 numsuc = len(sucs)
270 numsuc = len(sucs)
271 format = _fm0fixed + (_fm0node * numsuc)
271 format = _fm0fixed + (_fm0node * numsuc)
272 data = [numsuc, len(metadata), flags, pre]
272 data = [numsuc, len(metadata), flags, pre]
273 data.extend(sucs)
273 data.extend(sucs)
274 return _pack(format, *data) + metadata
274 return _pack(format, *data) + metadata
275
275
276
276
277 def _fm0encodemeta(meta):
277 def _fm0encodemeta(meta):
278 """Return encoded metadata string to string mapping.
278 """Return encoded metadata string to string mapping.
279
279
280 Assume no ':' in key and no '\0' in both key and value."""
280 Assume no ':' in key and no '\0' in both key and value."""
281 for key, value in meta.items():
281 for key, value in meta.items():
282 if b':' in key or b'\0' in key:
282 if b':' in key or b'\0' in key:
283 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
283 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
284 if b'\0' in value:
284 if b'\0' in value:
285 raise ValueError(b"':' is forbidden in metadata value'")
285 raise ValueError(b"':' is forbidden in metadata value'")
286 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
286 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
287
287
288
288
289 def _fm0decodemeta(data):
289 def _fm0decodemeta(data):
290 """Return string to string dictionary from encoded version."""
290 """Return string to string dictionary from encoded version."""
291 d = {}
291 d = {}
292 for l in data.split(b'\0'):
292 for l in data.split(b'\0'):
293 if l:
293 if l:
294 key, value = l.split(b':', 1)
294 key, value = l.split(b':', 1)
295 d[key] = value
295 d[key] = value
296 return d
296 return d
297
297
298
298
299 ## Parsing and writing of version "1"
299 ## Parsing and writing of version "1"
300 #
300 #
301 # The header is followed by the markers. Each marker is made of:
301 # The header is followed by the markers. Each marker is made of:
302 #
302 #
303 # - uint32: total size of the marker (including this field)
303 # - uint32: total size of the marker (including this field)
304 #
304 #
305 # - float64: date in seconds since epoch
305 # - float64: date in seconds since epoch
306 #
306 #
307 # - int16: timezone offset in minutes
307 # - int16: timezone offset in minutes
308 #
308 #
309 # - uint16: a bit field. It is reserved for flags used in common
309 # - uint16: a bit field. It is reserved for flags used in common
310 # obsolete marker operations, to avoid repeated decoding of metadata
310 # obsolete marker operations, to avoid repeated decoding of metadata
311 # entries.
311 # entries.
312 #
312 #
313 # - uint8: number of successors "N", can be zero.
313 # - uint8: number of successors "N", can be zero.
314 #
314 #
315 # - uint8: number of parents "P", can be zero.
315 # - uint8: number of parents "P", can be zero.
316 #
316 #
317 # 0: parents data stored but no parent,
317 # 0: parents data stored but no parent,
318 # 1: one parent stored,
318 # 1: one parent stored,
319 # 2: two parents stored,
319 # 2: two parents stored,
320 # 3: no parent data stored
320 # 3: no parent data stored
321 #
321 #
322 # - uint8: number of metadata entries M
322 # - uint8: number of metadata entries M
323 #
323 #
324 # - 20 or 32 bytes: predecessor changeset identifier.
324 # - 20 or 32 bytes: predecessor changeset identifier.
325 #
325 #
326 # - N*(20 or 32) bytes: successors changesets identifiers.
326 # - N*(20 or 32) bytes: successors changesets identifiers.
327 #
327 #
328 # - P*(20 or 32) bytes: parents of the predecessors changesets.
328 # - P*(20 or 32) bytes: parents of the predecessors changesets.
329 #
329 #
330 # - M*(uint8, uint8): size of all metadata entries (key and value)
330 # - M*(uint8, uint8): size of all metadata entries (key and value)
331 #
331 #
332 # - remaining bytes: the metadata, each (key, value) pair after the other.
332 # - remaining bytes: the metadata, each (key, value) pair after the other.
333 _fm1version = 1
333 _fm1version = 1
334 _fm1fixed = b'>IdhHBBB'
334 _fm1fixed = b'>IdhHBBB'
335 _fm1nodesha1 = b'20s'
335 _fm1nodesha1 = b'20s'
336 _fm1nodesha256 = b'32s'
336 _fm1nodesha256 = b'32s'
337 _fm1nodesha1size = _calcsize(_fm1nodesha1)
337 _fm1nodesha1size = _calcsize(_fm1nodesha1)
338 _fm1nodesha256size = _calcsize(_fm1nodesha256)
338 _fm1nodesha256size = _calcsize(_fm1nodesha256)
339 _fm1fsize = _calcsize(_fm1fixed)
339 _fm1fsize = _calcsize(_fm1fixed)
340 _fm1parentnone = 3
340 _fm1parentnone = 3
341 _fm1parentshift = 14
342 _fm1parentmask = _fm1parentnone << _fm1parentshift
343 _fm1metapair = b'BB'
341 _fm1metapair = b'BB'
344 _fm1metapairsize = _calcsize(_fm1metapair)
342 _fm1metapairsize = _calcsize(_fm1metapair)
345
343
346
344
347 def _fm1purereadmarkers(data, off, stop):
345 def _fm1purereadmarkers(data, off, stop):
348 # make some global constants local for performance
346 # make some global constants local for performance
349 noneflag = _fm1parentnone
347 noneflag = _fm1parentnone
350 sha2flag = usingsha256
348 sha2flag = usingsha256
351 sha1size = _fm1nodesha1size
349 sha1size = _fm1nodesha1size
352 sha2size = _fm1nodesha256size
350 sha2size = _fm1nodesha256size
353 sha1fmt = _fm1nodesha1
351 sha1fmt = _fm1nodesha1
354 sha2fmt = _fm1nodesha256
352 sha2fmt = _fm1nodesha256
355 metasize = _fm1metapairsize
353 metasize = _fm1metapairsize
356 metafmt = _fm1metapair
354 metafmt = _fm1metapair
357 fsize = _fm1fsize
355 fsize = _fm1fsize
358 unpack = _unpack
356 unpack = _unpack
359
357
360 # Loop on markers
358 # Loop on markers
361 ufixed = struct.Struct(_fm1fixed).unpack
359 ufixed = struct.Struct(_fm1fixed).unpack
362
360
363 while off < stop:
361 while off < stop:
364 # read fixed part
362 # read fixed part
365 o1 = off + fsize
363 o1 = off + fsize
366 t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1])
364 t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1])
367
365
368 if flags & sha2flag:
366 if flags & sha2flag:
369 nodefmt = sha2fmt
367 nodefmt = sha2fmt
370 nodesize = sha2size
368 nodesize = sha2size
371 else:
369 else:
372 nodefmt = sha1fmt
370 nodefmt = sha1fmt
373 nodesize = sha1size
371 nodesize = sha1size
374
372
375 (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize])
373 (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize])
376 o1 += nodesize
374 o1 += nodesize
377
375
378 # read 0 or more successors
376 # read 0 or more successors
379 if numsuc == 1:
377 if numsuc == 1:
380 o2 = o1 + nodesize
378 o2 = o1 + nodesize
381 sucs = (data[o1:o2],)
379 sucs = (data[o1:o2],)
382 else:
380 else:
383 o2 = o1 + nodesize * numsuc
381 o2 = o1 + nodesize * numsuc
384 sucs = unpack(nodefmt * numsuc, data[o1:o2])
382 sucs = unpack(nodefmt * numsuc, data[o1:o2])
385
383
386 # read parents
384 # read parents
387 if numpar == noneflag:
385 if numpar == noneflag:
388 o3 = o2
386 o3 = o2
389 parents = None
387 parents = None
390 elif numpar == 1:
388 elif numpar == 1:
391 o3 = o2 + nodesize
389 o3 = o2 + nodesize
392 parents = (data[o2:o3],)
390 parents = (data[o2:o3],)
393 else:
391 else:
394 o3 = o2 + nodesize * numpar
392 o3 = o2 + nodesize * numpar
395 parents = unpack(nodefmt * numpar, data[o2:o3])
393 parents = unpack(nodefmt * numpar, data[o2:o3])
396
394
397 # read metadata
395 # read metadata
398 off = o3 + metasize * nummeta
396 off = o3 + metasize * nummeta
399 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
397 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
400 metadata = []
398 metadata = []
401 for idx in pycompat.xrange(0, len(metapairsize), 2):
399 for idx in pycompat.xrange(0, len(metapairsize), 2):
402 o1 = off + metapairsize[idx]
400 o1 = off + metapairsize[idx]
403 o2 = o1 + metapairsize[idx + 1]
401 o2 = o1 + metapairsize[idx + 1]
404 metadata.append((data[off:o1], data[o1:o2]))
402 metadata.append((data[off:o1], data[o1:o2]))
405 off = o2
403 off = o2
406
404
407 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
405 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
408
406
409
407
410 def _fm1encodeonemarker(marker):
408 def _fm1encodeonemarker(marker):
411 pre, sucs, flags, metadata, date, parents = marker
409 pre, sucs, flags, metadata, date, parents = marker
412 # determine node size
410 # determine node size
413 _fm1node = _fm1nodesha1
411 _fm1node = _fm1nodesha1
414 if flags & usingsha256:
412 if flags & usingsha256:
415 _fm1node = _fm1nodesha256
413 _fm1node = _fm1nodesha256
416 numsuc = len(sucs)
414 numsuc = len(sucs)
417 numextranodes = 1 + numsuc
415 numextranodes = 1 + numsuc
418 if parents is None:
416 if parents is None:
419 numpar = _fm1parentnone
417 numpar = _fm1parentnone
420 else:
418 else:
421 numpar = len(parents)
419 numpar = len(parents)
422 numextranodes += numpar
420 numextranodes += numpar
423 formatnodes = _fm1node * numextranodes
421 formatnodes = _fm1node * numextranodes
424 formatmeta = _fm1metapair * len(metadata)
422 formatmeta = _fm1metapair * len(metadata)
425 format = _fm1fixed + formatnodes + formatmeta
423 format = _fm1fixed + formatnodes + formatmeta
426 # tz is stored in minutes so we divide by 60
424 # tz is stored in minutes so we divide by 60
427 tz = date[1] // 60
425 tz = date[1] // 60
428 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
426 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
429 data.extend(sucs)
427 data.extend(sucs)
430 if parents is not None:
428 if parents is not None:
431 data.extend(parents)
429 data.extend(parents)
432 totalsize = _calcsize(format)
430 totalsize = _calcsize(format)
433 for key, value in metadata:
431 for key, value in metadata:
434 lk = len(key)
432 lk = len(key)
435 lv = len(value)
433 lv = len(value)
436 if lk > 255:
434 if lk > 255:
437 msg = (
435 msg = (
438 b'obsstore metadata key cannot be longer than 255 bytes'
436 b'obsstore metadata key cannot be longer than 255 bytes'
439 b' (key "%s" is %u bytes)'
437 b' (key "%s" is %u bytes)'
440 ) % (key, lk)
438 ) % (key, lk)
441 raise error.ProgrammingError(msg)
439 raise error.ProgrammingError(msg)
442 if lv > 255:
440 if lv > 255:
443 msg = (
441 msg = (
444 b'obsstore metadata value cannot be longer than 255 bytes'
442 b'obsstore metadata value cannot be longer than 255 bytes'
445 b' (value "%s" for key "%s" is %u bytes)'
443 b' (value "%s" for key "%s" is %u bytes)'
446 ) % (value, key, lv)
444 ) % (value, key, lv)
447 raise error.ProgrammingError(msg)
445 raise error.ProgrammingError(msg)
448 data.append(lk)
446 data.append(lk)
449 data.append(lv)
447 data.append(lv)
450 totalsize += lk + lv
448 totalsize += lk + lv
451 data[0] = totalsize
449 data[0] = totalsize
452 data = [_pack(format, *data)]
450 data = [_pack(format, *data)]
453 for key, value in metadata:
451 for key, value in metadata:
454 data.append(key)
452 data.append(key)
455 data.append(value)
453 data.append(value)
456 return b''.join(data)
454 return b''.join(data)
457
455
458
456
459 def _fm1readmarkers(data, off, stop):
457 def _fm1readmarkers(data, off, stop):
460 native = getattr(parsers, 'fm1readmarkers', None)
458 native = getattr(parsers, 'fm1readmarkers', None)
461 if not native:
459 if not native:
462 return _fm1purereadmarkers(data, off, stop)
460 return _fm1purereadmarkers(data, off, stop)
463 return native(data, off, stop)
461 return native(data, off, stop)
464
462
465
463
466 # mapping to read/write various marker formats
464 # mapping to read/write various marker formats
467 # <version> -> (decoder, encoder)
465 # <version> -> (decoder, encoder)
468 formats = {
466 formats = {
469 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
467 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
470 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
468 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
471 }
469 }
472
470
473
471
474 def _readmarkerversion(data):
472 def _readmarkerversion(data):
475 return _unpack(b'>B', data[0:1])[0]
473 return _unpack(b'>B', data[0:1])[0]
476
474
477
475
478 @util.nogc
476 @util.nogc
479 def _readmarkers(data, off=None, stop=None):
477 def _readmarkers(data, off=None, stop=None):
480 """Read and enumerate markers from raw data"""
478 """Read and enumerate markers from raw data"""
481 diskversion = _readmarkerversion(data)
479 diskversion = _readmarkerversion(data)
482 if not off:
480 if not off:
483 off = 1 # skip 1 byte version number
481 off = 1 # skip 1 byte version number
484 if stop is None:
482 if stop is None:
485 stop = len(data)
483 stop = len(data)
486 if diskversion not in formats:
484 if diskversion not in formats:
487 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
485 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
488 raise error.UnknownVersion(msg, version=diskversion)
486 raise error.UnknownVersion(msg, version=diskversion)
489 return diskversion, formats[diskversion][0](data, off, stop)
487 return diskversion, formats[diskversion][0](data, off, stop)
490
488
491
489
492 def encodeheader(version=_fm0version):
490 def encodeheader(version=_fm0version):
493 return _pack(b'>B', version)
491 return _pack(b'>B', version)
494
492
495
493
496 def encodemarkers(markers, addheader=False, version=_fm0version):
494 def encodemarkers(markers, addheader=False, version=_fm0version):
497 # Kept separate from flushmarkers(), it will be reused for
495 # Kept separate from flushmarkers(), it will be reused for
498 # markers exchange.
496 # markers exchange.
499 encodeone = formats[version][1]
497 encodeone = formats[version][1]
500 if addheader:
498 if addheader:
501 yield encodeheader(version)
499 yield encodeheader(version)
502 for marker in markers:
500 for marker in markers:
503 yield encodeone(marker)
501 yield encodeone(marker)
504
502
505
503
506 @util.nogc
504 @util.nogc
507 def _addsuccessors(successors, markers):
505 def _addsuccessors(successors, markers):
508 for mark in markers:
506 for mark in markers:
509 successors.setdefault(mark[0], set()).add(mark)
507 successors.setdefault(mark[0], set()).add(mark)
510
508
511
509
512 @util.nogc
510 @util.nogc
513 def _addpredecessors(predecessors, markers):
511 def _addpredecessors(predecessors, markers):
514 for mark in markers:
512 for mark in markers:
515 for suc in mark[1]:
513 for suc in mark[1]:
516 predecessors.setdefault(suc, set()).add(mark)
514 predecessors.setdefault(suc, set()).add(mark)
517
515
518
516
519 @util.nogc
517 @util.nogc
520 def _addchildren(children, markers):
518 def _addchildren(children, markers):
521 for mark in markers:
519 for mark in markers:
522 parents = mark[5]
520 parents = mark[5]
523 if parents is not None:
521 if parents is not None:
524 for p in parents:
522 for p in parents:
525 children.setdefault(p, set()).add(mark)
523 children.setdefault(p, set()).add(mark)
526
524
527
525
528 def _checkinvalidmarkers(repo, markers):
526 def _checkinvalidmarkers(repo, markers):
529 """search for marker with invalid data and raise error if needed
527 """search for marker with invalid data and raise error if needed
530
528
531 Exist as a separated function to allow the evolve extension for a more
529 Exist as a separated function to allow the evolve extension for a more
532 subtle handling.
530 subtle handling.
533 """
531 """
534 for mark in markers:
532 for mark in markers:
535 if repo.nullid in mark[1]:
533 if repo.nullid in mark[1]:
536 raise error.Abort(
534 raise error.Abort(
537 _(
535 _(
538 b'bad obsolescence marker detected: '
536 b'bad obsolescence marker detected: '
539 b'invalid successors nullid'
537 b'invalid successors nullid'
540 )
538 )
541 )
539 )
542
540
543
541
544 class obsstore:
542 class obsstore:
545 """Store obsolete markers
543 """Store obsolete markers
546
544
547 Markers can be accessed with two mappings:
545 Markers can be accessed with two mappings:
548 - predecessors[x] -> set(markers on predecessors edges of x)
546 - predecessors[x] -> set(markers on predecessors edges of x)
549 - successors[x] -> set(markers on successors edges of x)
547 - successors[x] -> set(markers on successors edges of x)
550 - children[x] -> set(markers on predecessors edges of children(x)
548 - children[x] -> set(markers on predecessors edges of children(x)
551 """
549 """
552
550
553 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
551 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
554 # prec: nodeid, predecessors changesets
552 # prec: nodeid, predecessors changesets
555 # succs: tuple of nodeid, successor changesets (0-N length)
553 # succs: tuple of nodeid, successor changesets (0-N length)
556 # flag: integer, flag field carrying modifier for the markers (see doc)
554 # flag: integer, flag field carrying modifier for the markers (see doc)
557 # meta: binary blob in UTF-8, encoded metadata dictionary
555 # meta: binary blob in UTF-8, encoded metadata dictionary
558 # date: (float, int) tuple, date of marker creation
556 # date: (float, int) tuple, date of marker creation
559 # parents: (tuple of nodeid) or None, parents of predecessors
557 # parents: (tuple of nodeid) or None, parents of predecessors
560 # None is used when no data has been recorded
558 # None is used when no data has been recorded
561
559
562 def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False):
560 def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False):
563 # caches for various obsolescence related cache
561 # caches for various obsolescence related cache
564 self.caches = {}
562 self.caches = {}
565 self.svfs = svfs
563 self.svfs = svfs
566 self.repo = repo
564 self.repo = repo
567 self._defaultformat = defaultformat
565 self._defaultformat = defaultformat
568 self._readonly = readonly
566 self._readonly = readonly
569
567
570 def __iter__(self):
568 def __iter__(self):
571 return iter(self._all)
569 return iter(self._all)
572
570
573 def __len__(self):
571 def __len__(self):
574 return len(self._all)
572 return len(self._all)
575
573
576 def __nonzero__(self):
574 def __nonzero__(self):
577 from . import statichttprepo
575 from . import statichttprepo
578
576
579 if isinstance(self.repo, statichttprepo.statichttprepository):
577 if isinstance(self.repo, statichttprepo.statichttprepository):
580 # If repo is accessed via static HTTP, then we can't use os.stat()
578 # If repo is accessed via static HTTP, then we can't use os.stat()
581 # to just peek at the file size.
579 # to just peek at the file size.
582 return len(self._data) > 1
580 return len(self._data) > 1
583 if not self._cached('_all'):
581 if not self._cached('_all'):
584 try:
582 try:
585 return self.svfs.stat(b'obsstore').st_size > 1
583 return self.svfs.stat(b'obsstore').st_size > 1
586 except OSError as inst:
584 except OSError as inst:
587 if inst.errno != errno.ENOENT:
585 if inst.errno != errno.ENOENT:
588 raise
586 raise
589 # just build an empty _all list if no obsstore exists, which
587 # just build an empty _all list if no obsstore exists, which
590 # avoids further stat() syscalls
588 # avoids further stat() syscalls
591 return bool(self._all)
589 return bool(self._all)
592
590
593 __bool__ = __nonzero__
591 __bool__ = __nonzero__
594
592
595 @property
593 @property
596 def readonly(self):
594 def readonly(self):
597 """True if marker creation is disabled
595 """True if marker creation is disabled
598
596
599 Remove me in the future when obsolete marker is always on."""
597 Remove me in the future when obsolete marker is always on."""
600 return self._readonly
598 return self._readonly
601
599
602 def create(
600 def create(
603 self,
601 self,
604 transaction,
602 transaction,
605 prec,
603 prec,
606 succs=(),
604 succs=(),
607 flag=0,
605 flag=0,
608 parents=None,
606 parents=None,
609 date=None,
607 date=None,
610 metadata=None,
608 metadata=None,
611 ui=None,
609 ui=None,
612 ):
610 ):
613 """obsolete: add a new obsolete marker
611 """obsolete: add a new obsolete marker
614
612
615 * ensuring it is hashable
613 * ensuring it is hashable
616 * check mandatory metadata
614 * check mandatory metadata
617 * encode metadata
615 * encode metadata
618
616
619 If you are a human writing code creating marker you want to use the
617 If you are a human writing code creating marker you want to use the
620 `createmarkers` function in this module instead.
618 `createmarkers` function in this module instead.
621
619
622 return True if a new marker have been added, False if the markers
620 return True if a new marker have been added, False if the markers
623 already existed (no op).
621 already existed (no op).
624 """
622 """
625 flag = int(flag)
623 flag = int(flag)
626 if metadata is None:
624 if metadata is None:
627 metadata = {}
625 metadata = {}
628 if date is None:
626 if date is None:
629 if b'date' in metadata:
627 if b'date' in metadata:
630 # as a courtesy for out-of-tree extensions
628 # as a courtesy for out-of-tree extensions
631 date = dateutil.parsedate(metadata.pop(b'date'))
629 date = dateutil.parsedate(metadata.pop(b'date'))
632 elif ui is not None:
630 elif ui is not None:
633 date = ui.configdate(b'devel', b'default-date')
631 date = ui.configdate(b'devel', b'default-date')
634 if date is None:
632 if date is None:
635 date = dateutil.makedate()
633 date = dateutil.makedate()
636 else:
634 else:
637 date = dateutil.makedate()
635 date = dateutil.makedate()
638 if flag & usingsha256:
636 if flag & usingsha256:
639 if len(prec) != 32:
637 if len(prec) != 32:
640 raise ValueError(prec)
638 raise ValueError(prec)
641 for succ in succs:
639 for succ in succs:
642 if len(succ) != 32:
640 if len(succ) != 32:
643 raise ValueError(succ)
641 raise ValueError(succ)
644 else:
642 else:
645 if len(prec) != 20:
643 if len(prec) != 20:
646 raise ValueError(prec)
644 raise ValueError(prec)
647 for succ in succs:
645 for succ in succs:
648 if len(succ) != 20:
646 if len(succ) != 20:
649 raise ValueError(succ)
647 raise ValueError(succ)
650 if prec in succs:
648 if prec in succs:
651 raise ValueError(
649 raise ValueError(
652 'in-marker cycle with %s' % pycompat.sysstr(hex(prec))
650 'in-marker cycle with %s' % pycompat.sysstr(hex(prec))
653 )
651 )
654
652
655 metadata = tuple(sorted(metadata.items()))
653 metadata = tuple(sorted(metadata.items()))
656 for k, v in metadata:
654 for k, v in metadata:
657 try:
655 try:
658 # might be better to reject non-ASCII keys
656 # might be better to reject non-ASCII keys
659 k.decode('utf-8')
657 k.decode('utf-8')
660 v.decode('utf-8')
658 v.decode('utf-8')
661 except UnicodeDecodeError:
659 except UnicodeDecodeError:
662 raise error.ProgrammingError(
660 raise error.ProgrammingError(
663 b'obsstore metadata must be valid UTF-8 sequence '
661 b'obsstore metadata must be valid UTF-8 sequence '
664 b'(key = %r, value = %r)'
662 b'(key = %r, value = %r)'
665 % (pycompat.bytestr(k), pycompat.bytestr(v))
663 % (pycompat.bytestr(k), pycompat.bytestr(v))
666 )
664 )
667
665
668 marker = (bytes(prec), tuple(succs), flag, metadata, date, parents)
666 marker = (bytes(prec), tuple(succs), flag, metadata, date, parents)
669 return bool(self.add(transaction, [marker]))
667 return bool(self.add(transaction, [marker]))
670
668
671 def add(self, transaction, markers):
669 def add(self, transaction, markers):
672 """Add new markers to the store
670 """Add new markers to the store
673
671
674 Take care of filtering duplicate.
672 Take care of filtering duplicate.
675 Return the number of new marker."""
673 Return the number of new marker."""
676 if self._readonly:
674 if self._readonly:
677 raise error.Abort(
675 raise error.Abort(
678 _(b'creating obsolete markers is not enabled on this repo')
676 _(b'creating obsolete markers is not enabled on this repo')
679 )
677 )
680 known = set()
678 known = set()
681 getsuccessors = self.successors.get
679 getsuccessors = self.successors.get
682 new = []
680 new = []
683 for m in markers:
681 for m in markers:
684 if m not in getsuccessors(m[0], ()) and m not in known:
682 if m not in getsuccessors(m[0], ()) and m not in known:
685 known.add(m)
683 known.add(m)
686 new.append(m)
684 new.append(m)
687 if new:
685 if new:
688 f = self.svfs(b'obsstore', b'ab')
686 f = self.svfs(b'obsstore', b'ab')
689 try:
687 try:
690 offset = f.tell()
688 offset = f.tell()
691 transaction.add(b'obsstore', offset)
689 transaction.add(b'obsstore', offset)
692 # offset == 0: new file - add the version header
690 # offset == 0: new file - add the version header
693 data = b''.join(encodemarkers(new, offset == 0, self._version))
691 data = b''.join(encodemarkers(new, offset == 0, self._version))
694 f.write(data)
692 f.write(data)
695 finally:
693 finally:
696 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
694 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
697 # call 'filecacheentry.refresh()' here
695 # call 'filecacheentry.refresh()' here
698 f.close()
696 f.close()
699 addedmarkers = transaction.changes.get(b'obsmarkers')
697 addedmarkers = transaction.changes.get(b'obsmarkers')
700 if addedmarkers is not None:
698 if addedmarkers is not None:
701 addedmarkers.update(new)
699 addedmarkers.update(new)
702 self._addmarkers(new, data)
700 self._addmarkers(new, data)
703 # new marker *may* have changed several set. invalidate the cache.
701 # new marker *may* have changed several set. invalidate the cache.
704 self.caches.clear()
702 self.caches.clear()
705 # records the number of new markers for the transaction hooks
703 # records the number of new markers for the transaction hooks
706 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
704 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
707 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
705 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
708 return len(new)
706 return len(new)
709
707
710 def mergemarkers(self, transaction, data):
708 def mergemarkers(self, transaction, data):
711 """merge a binary stream of markers inside the obsstore
709 """merge a binary stream of markers inside the obsstore
712
710
713 Returns the number of new markers added."""
711 Returns the number of new markers added."""
714 version, markers = _readmarkers(data)
712 version, markers = _readmarkers(data)
715 return self.add(transaction, markers)
713 return self.add(transaction, markers)
716
714
717 @propertycache
715 @propertycache
718 def _data(self):
716 def _data(self):
719 return self.svfs.tryread(b'obsstore')
717 return self.svfs.tryread(b'obsstore')
720
718
721 @propertycache
719 @propertycache
722 def _version(self):
720 def _version(self):
723 if len(self._data) >= 1:
721 if len(self._data) >= 1:
724 return _readmarkerversion(self._data)
722 return _readmarkerversion(self._data)
725 else:
723 else:
726 return self._defaultformat
724 return self._defaultformat
727
725
728 @propertycache
726 @propertycache
729 def _all(self):
727 def _all(self):
730 data = self._data
728 data = self._data
731 if not data:
729 if not data:
732 return []
730 return []
733 self._version, markers = _readmarkers(data)
731 self._version, markers = _readmarkers(data)
734 markers = list(markers)
732 markers = list(markers)
735 _checkinvalidmarkers(self.repo, markers)
733 _checkinvalidmarkers(self.repo, markers)
736 return markers
734 return markers
737
735
738 @propertycache
736 @propertycache
739 def successors(self):
737 def successors(self):
740 successors = {}
738 successors = {}
741 _addsuccessors(successors, self._all)
739 _addsuccessors(successors, self._all)
742 return successors
740 return successors
743
741
744 @propertycache
742 @propertycache
745 def predecessors(self):
743 def predecessors(self):
746 predecessors = {}
744 predecessors = {}
747 _addpredecessors(predecessors, self._all)
745 _addpredecessors(predecessors, self._all)
748 return predecessors
746 return predecessors
749
747
750 @propertycache
748 @propertycache
751 def children(self):
749 def children(self):
752 children = {}
750 children = {}
753 _addchildren(children, self._all)
751 _addchildren(children, self._all)
754 return children
752 return children
755
753
756 def _cached(self, attr):
754 def _cached(self, attr):
757 return attr in self.__dict__
755 return attr in self.__dict__
758
756
759 def _addmarkers(self, markers, rawdata):
757 def _addmarkers(self, markers, rawdata):
760 markers = list(markers) # to allow repeated iteration
758 markers = list(markers) # to allow repeated iteration
761 self._data = self._data + rawdata
759 self._data = self._data + rawdata
762 self._all.extend(markers)
760 self._all.extend(markers)
763 if self._cached('successors'):
761 if self._cached('successors'):
764 _addsuccessors(self.successors, markers)
762 _addsuccessors(self.successors, markers)
765 if self._cached('predecessors'):
763 if self._cached('predecessors'):
766 _addpredecessors(self.predecessors, markers)
764 _addpredecessors(self.predecessors, markers)
767 if self._cached('children'):
765 if self._cached('children'):
768 _addchildren(self.children, markers)
766 _addchildren(self.children, markers)
769 _checkinvalidmarkers(self.repo, markers)
767 _checkinvalidmarkers(self.repo, markers)
770
768
771 def relevantmarkers(self, nodes):
769 def relevantmarkers(self, nodes):
772 """return a set of all obsolescence markers relevant to a set of nodes.
770 """return a set of all obsolescence markers relevant to a set of nodes.
773
771
774 "relevant" to a set of nodes mean:
772 "relevant" to a set of nodes mean:
775
773
776 - marker that use this changeset as successor
774 - marker that use this changeset as successor
777 - prune marker of direct children on this changeset
775 - prune marker of direct children on this changeset
778 - recursive application of the two rules on predecessors of these
776 - recursive application of the two rules on predecessors of these
779 markers
777 markers
780
778
781 It is a set so you cannot rely on order."""
779 It is a set so you cannot rely on order."""
782
780
783 pendingnodes = set(nodes)
781 pendingnodes = set(nodes)
784 seenmarkers = set()
782 seenmarkers = set()
785 seennodes = set(pendingnodes)
783 seennodes = set(pendingnodes)
786 precursorsmarkers = self.predecessors
784 precursorsmarkers = self.predecessors
787 succsmarkers = self.successors
785 succsmarkers = self.successors
788 children = self.children
786 children = self.children
789 while pendingnodes:
787 while pendingnodes:
790 direct = set()
788 direct = set()
791 for current in pendingnodes:
789 for current in pendingnodes:
792 direct.update(precursorsmarkers.get(current, ()))
790 direct.update(precursorsmarkers.get(current, ()))
793 pruned = [m for m in children.get(current, ()) if not m[1]]
791 pruned = [m for m in children.get(current, ()) if not m[1]]
794 direct.update(pruned)
792 direct.update(pruned)
795 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
793 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
796 direct.update(pruned)
794 direct.update(pruned)
797 direct -= seenmarkers
795 direct -= seenmarkers
798 pendingnodes = {m[0] for m in direct}
796 pendingnodes = {m[0] for m in direct}
799 seenmarkers |= direct
797 seenmarkers |= direct
800 pendingnodes -= seennodes
798 pendingnodes -= seennodes
801 seennodes |= pendingnodes
799 seennodes |= pendingnodes
802 return seenmarkers
800 return seenmarkers
803
801
804
802
805 def makestore(ui, repo):
803 def makestore(ui, repo):
806 """Create an obsstore instance from a repo."""
804 """Create an obsstore instance from a repo."""
807 # read default format for new obsstore.
805 # read default format for new obsstore.
808 # developer config: format.obsstore-version
806 # developer config: format.obsstore-version
809 defaultformat = ui.configint(b'format', b'obsstore-version')
807 defaultformat = ui.configint(b'format', b'obsstore-version')
810 # rely on obsstore class default when possible.
808 # rely on obsstore class default when possible.
811 kwargs = {}
809 kwargs = {}
812 if defaultformat is not None:
810 if defaultformat is not None:
813 kwargs['defaultformat'] = defaultformat
811 kwargs['defaultformat'] = defaultformat
814 readonly = not isenabled(repo, createmarkersopt)
812 readonly = not isenabled(repo, createmarkersopt)
815 store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs)
813 store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs)
816 if store and readonly:
814 if store and readonly:
817 ui.warn(
815 ui.warn(
818 _(b'obsolete feature not enabled but %i markers found!\n')
816 _(b'obsolete feature not enabled but %i markers found!\n')
819 % len(list(store))
817 % len(list(store))
820 )
818 )
821 return store
819 return store
822
820
823
821
824 def commonversion(versions):
822 def commonversion(versions):
825 """Return the newest version listed in both versions and our local formats.
823 """Return the newest version listed in both versions and our local formats.
826
824
827 Returns None if no common version exists.
825 Returns None if no common version exists.
828 """
826 """
829 versions.sort(reverse=True)
827 versions.sort(reverse=True)
830 # search for highest version known on both side
828 # search for highest version known on both side
831 for v in versions:
829 for v in versions:
832 if v in formats:
830 if v in formats:
833 return v
831 return v
834 return None
832 return None
835
833
836
834
837 # arbitrary picked to fit into 8K limit from HTTP server
835 # arbitrary picked to fit into 8K limit from HTTP server
838 # you have to take in account:
836 # you have to take in account:
839 # - the version header
837 # - the version header
840 # - the base85 encoding
838 # - the base85 encoding
841 _maxpayload = 5300
839 _maxpayload = 5300
842
840
843
841
844 def _pushkeyescape(markers):
842 def _pushkeyescape(markers):
845 """encode markers into a dict suitable for pushkey exchange
843 """encode markers into a dict suitable for pushkey exchange
846
844
847 - binary data is base85 encoded
845 - binary data is base85 encoded
848 - split in chunks smaller than 5300 bytes"""
846 - split in chunks smaller than 5300 bytes"""
849 keys = {}
847 keys = {}
850 parts = []
848 parts = []
851 currentlen = _maxpayload * 2 # ensure we create a new part
849 currentlen = _maxpayload * 2 # ensure we create a new part
852 for marker in markers:
850 for marker in markers:
853 nextdata = _fm0encodeonemarker(marker)
851 nextdata = _fm0encodeonemarker(marker)
854 if len(nextdata) + currentlen > _maxpayload:
852 if len(nextdata) + currentlen > _maxpayload:
855 currentpart = []
853 currentpart = []
856 currentlen = 0
854 currentlen = 0
857 parts.append(currentpart)
855 parts.append(currentpart)
858 currentpart.append(nextdata)
856 currentpart.append(nextdata)
859 currentlen += len(nextdata)
857 currentlen += len(nextdata)
860 for idx, part in enumerate(reversed(parts)):
858 for idx, part in enumerate(reversed(parts)):
861 data = b''.join([_pack(b'>B', _fm0version)] + part)
859 data = b''.join([_pack(b'>B', _fm0version)] + part)
862 keys[b'dump%i' % idx] = util.b85encode(data)
860 keys[b'dump%i' % idx] = util.b85encode(data)
863 return keys
861 return keys
864
862
865
863
866 def listmarkers(repo):
864 def listmarkers(repo):
867 """List markers over pushkey"""
865 """List markers over pushkey"""
868 if not repo.obsstore:
866 if not repo.obsstore:
869 return {}
867 return {}
870 return _pushkeyescape(sorted(repo.obsstore))
868 return _pushkeyescape(sorted(repo.obsstore))
871
869
872
870
873 def pushmarker(repo, key, old, new):
871 def pushmarker(repo, key, old, new):
874 """Push markers over pushkey"""
872 """Push markers over pushkey"""
875 if not key.startswith(b'dump'):
873 if not key.startswith(b'dump'):
876 repo.ui.warn(_(b'unknown key: %r') % key)
874 repo.ui.warn(_(b'unknown key: %r') % key)
877 return False
875 return False
878 if old:
876 if old:
879 repo.ui.warn(_(b'unexpected old value for %r') % key)
877 repo.ui.warn(_(b'unexpected old value for %r') % key)
880 return False
878 return False
881 data = util.b85decode(new)
879 data = util.b85decode(new)
882 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
880 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
883 repo.obsstore.mergemarkers(tr, data)
881 repo.obsstore.mergemarkers(tr, data)
884 repo.invalidatevolatilesets()
882 repo.invalidatevolatilesets()
885 return True
883 return True
886
884
887
885
888 # mapping of 'set-name' -> <function to compute this set>
886 # mapping of 'set-name' -> <function to compute this set>
889 cachefuncs = {}
887 cachefuncs = {}
890
888
891
889
892 def cachefor(name):
890 def cachefor(name):
893 """Decorator to register a function as computing the cache for a set"""
891 """Decorator to register a function as computing the cache for a set"""
894
892
895 def decorator(func):
893 def decorator(func):
896 if name in cachefuncs:
894 if name in cachefuncs:
897 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
895 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
898 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
896 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
899 cachefuncs[name] = func
897 cachefuncs[name] = func
900 return func
898 return func
901
899
902 return decorator
900 return decorator
903
901
904
902
905 def getrevs(repo, name):
903 def getrevs(repo, name):
906 """Return the set of revision that belong to the <name> set
904 """Return the set of revision that belong to the <name> set
907
905
908 Such access may compute the set and cache it for future use"""
906 Such access may compute the set and cache it for future use"""
909 repo = repo.unfiltered()
907 repo = repo.unfiltered()
910 with util.timedcm('getrevs %s', name):
908 with util.timedcm('getrevs %s', name):
911 if not repo.obsstore:
909 if not repo.obsstore:
912 return frozenset()
910 return frozenset()
913 if name not in repo.obsstore.caches:
911 if name not in repo.obsstore.caches:
914 repo.obsstore.caches[name] = cachefuncs[name](repo)
912 repo.obsstore.caches[name] = cachefuncs[name](repo)
915 return repo.obsstore.caches[name]
913 return repo.obsstore.caches[name]
916
914
917
915
918 # To be simple we need to invalidate obsolescence cache when:
916 # To be simple we need to invalidate obsolescence cache when:
919 #
917 #
920 # - new changeset is added:
918 # - new changeset is added:
921 # - public phase is changed
919 # - public phase is changed
922 # - obsolescence marker are added
920 # - obsolescence marker are added
923 # - strip is used a repo
921 # - strip is used a repo
924 def clearobscaches(repo):
922 def clearobscaches(repo):
925 """Remove all obsolescence related cache from a repo
923 """Remove all obsolescence related cache from a repo
926
924
927 This remove all cache in obsstore is the obsstore already exist on the
925 This remove all cache in obsstore is the obsstore already exist on the
928 repo.
926 repo.
929
927
930 (We could be smarter here given the exact event that trigger the cache
928 (We could be smarter here given the exact event that trigger the cache
931 clearing)"""
929 clearing)"""
932 # only clear cache is there is obsstore data in this repo
930 # only clear cache is there is obsstore data in this repo
933 if b'obsstore' in repo._filecache:
931 if b'obsstore' in repo._filecache:
934 repo.obsstore.caches.clear()
932 repo.obsstore.caches.clear()
935
933
936
934
937 def _mutablerevs(repo):
935 def _mutablerevs(repo):
938 """the set of mutable revision in the repository"""
936 """the set of mutable revision in the repository"""
939 return repo._phasecache.getrevset(repo, phases.mutablephases)
937 return repo._phasecache.getrevset(repo, phases.mutablephases)
940
938
941
939
942 @cachefor(b'obsolete')
940 @cachefor(b'obsolete')
943 def _computeobsoleteset(repo):
941 def _computeobsoleteset(repo):
944 """the set of obsolete revisions"""
942 """the set of obsolete revisions"""
945 getnode = repo.changelog.node
943 getnode = repo.changelog.node
946 notpublic = _mutablerevs(repo)
944 notpublic = _mutablerevs(repo)
947 isobs = repo.obsstore.successors.__contains__
945 isobs = repo.obsstore.successors.__contains__
948 return frozenset(r for r in notpublic if isobs(getnode(r)))
946 return frozenset(r for r in notpublic if isobs(getnode(r)))
949
947
950
948
951 @cachefor(b'orphan')
949 @cachefor(b'orphan')
952 def _computeorphanset(repo):
950 def _computeorphanset(repo):
953 """the set of non obsolete revisions with obsolete parents"""
951 """the set of non obsolete revisions with obsolete parents"""
954 pfunc = repo.changelog.parentrevs
952 pfunc = repo.changelog.parentrevs
955 mutable = _mutablerevs(repo)
953 mutable = _mutablerevs(repo)
956 obsolete = getrevs(repo, b'obsolete')
954 obsolete = getrevs(repo, b'obsolete')
957 others = mutable - obsolete
955 others = mutable - obsolete
958 unstable = set()
956 unstable = set()
959 for r in sorted(others):
957 for r in sorted(others):
960 # A rev is unstable if one of its parent is obsolete or unstable
958 # A rev is unstable if one of its parent is obsolete or unstable
961 # this works since we traverse following growing rev order
959 # this works since we traverse following growing rev order
962 for p in pfunc(r):
960 for p in pfunc(r):
963 if p in obsolete or p in unstable:
961 if p in obsolete or p in unstable:
964 unstable.add(r)
962 unstable.add(r)
965 break
963 break
966 return frozenset(unstable)
964 return frozenset(unstable)
967
965
968
966
969 @cachefor(b'suspended')
967 @cachefor(b'suspended')
970 def _computesuspendedset(repo):
968 def _computesuspendedset(repo):
971 """the set of obsolete parents with non obsolete descendants"""
969 """the set of obsolete parents with non obsolete descendants"""
972 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
970 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
973 return frozenset(r for r in getrevs(repo, b'obsolete') if r in suspended)
971 return frozenset(r for r in getrevs(repo, b'obsolete') if r in suspended)
974
972
975
973
976 @cachefor(b'extinct')
974 @cachefor(b'extinct')
977 def _computeextinctset(repo):
975 def _computeextinctset(repo):
978 """the set of obsolete parents without non obsolete descendants"""
976 """the set of obsolete parents without non obsolete descendants"""
979 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
977 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
980
978
981
979
982 @cachefor(b'phasedivergent')
980 @cachefor(b'phasedivergent')
983 def _computephasedivergentset(repo):
981 def _computephasedivergentset(repo):
984 """the set of revs trying to obsolete public revisions"""
982 """the set of revs trying to obsolete public revisions"""
985 bumped = set()
983 bumped = set()
986 # util function (avoid attribute lookup in the loop)
984 # util function (avoid attribute lookup in the loop)
987 phase = repo._phasecache.phase # would be faster to grab the full list
985 phase = repo._phasecache.phase # would be faster to grab the full list
988 public = phases.public
986 public = phases.public
989 cl = repo.changelog
987 cl = repo.changelog
990 torev = cl.index.get_rev
988 torev = cl.index.get_rev
991 tonode = cl.node
989 tonode = cl.node
992 obsstore = repo.obsstore
990 obsstore = repo.obsstore
993 for rev in repo.revs(b'(not public()) and (not obsolete())'):
991 for rev in repo.revs(b'(not public()) and (not obsolete())'):
994 # We only evaluate mutable, non-obsolete revision
992 # We only evaluate mutable, non-obsolete revision
995 node = tonode(rev)
993 node = tonode(rev)
996 # (future) A cache of predecessors may worth if split is very common
994 # (future) A cache of predecessors may worth if split is very common
997 for pnode in obsutil.allpredecessors(
995 for pnode in obsutil.allpredecessors(
998 obsstore, [node], ignoreflags=bumpedfix
996 obsstore, [node], ignoreflags=bumpedfix
999 ):
997 ):
1000 prev = torev(pnode) # unfiltered! but so is phasecache
998 prev = torev(pnode) # unfiltered! but so is phasecache
1001 if (prev is not None) and (phase(repo, prev) <= public):
999 if (prev is not None) and (phase(repo, prev) <= public):
1002 # we have a public predecessor
1000 # we have a public predecessor
1003 bumped.add(rev)
1001 bumped.add(rev)
1004 break # Next draft!
1002 break # Next draft!
1005 return frozenset(bumped)
1003 return frozenset(bumped)
1006
1004
1007
1005
1008 @cachefor(b'contentdivergent')
1006 @cachefor(b'contentdivergent')
1009 def _computecontentdivergentset(repo):
1007 def _computecontentdivergentset(repo):
1010 """the set of rev that compete to be the final successors of some revision."""
1008 """the set of rev that compete to be the final successors of some revision."""
1011 divergent = set()
1009 divergent = set()
1012 obsstore = repo.obsstore
1010 obsstore = repo.obsstore
1013 newermap = {}
1011 newermap = {}
1014 tonode = repo.changelog.node
1012 tonode = repo.changelog.node
1015 for rev in repo.revs(b'(not public()) - obsolete()'):
1013 for rev in repo.revs(b'(not public()) - obsolete()'):
1016 node = tonode(rev)
1014 node = tonode(rev)
1017 mark = obsstore.predecessors.get(node, ())
1015 mark = obsstore.predecessors.get(node, ())
1018 toprocess = set(mark)
1016 toprocess = set(mark)
1019 seen = set()
1017 seen = set()
1020 while toprocess:
1018 while toprocess:
1021 prec = toprocess.pop()[0]
1019 prec = toprocess.pop()[0]
1022 if prec in seen:
1020 if prec in seen:
1023 continue # emergency cycle hanging prevention
1021 continue # emergency cycle hanging prevention
1024 seen.add(prec)
1022 seen.add(prec)
1025 if prec not in newermap:
1023 if prec not in newermap:
1026 obsutil.successorssets(repo, prec, cache=newermap)
1024 obsutil.successorssets(repo, prec, cache=newermap)
1027 newer = [n for n in newermap[prec] if n]
1025 newer = [n for n in newermap[prec] if n]
1028 if len(newer) > 1:
1026 if len(newer) > 1:
1029 divergent.add(rev)
1027 divergent.add(rev)
1030 break
1028 break
1031 toprocess.update(obsstore.predecessors.get(prec, ()))
1029 toprocess.update(obsstore.predecessors.get(prec, ()))
1032 return frozenset(divergent)
1030 return frozenset(divergent)
1033
1031
1034
1032
1035 def makefoldid(relation, user):
1033 def makefoldid(relation, user):
1036
1034
1037 folddigest = hashutil.sha1(user)
1035 folddigest = hashutil.sha1(user)
1038 for p in relation[0] + relation[1]:
1036 for p in relation[0] + relation[1]:
1039 folddigest.update(b'%d' % p.rev())
1037 folddigest.update(b'%d' % p.rev())
1040 folddigest.update(p.node())
1038 folddigest.update(p.node())
1041 # Since fold only has to compete against fold for the same successors, it
1039 # Since fold only has to compete against fold for the same successors, it
1042 # seems fine to use a small ID. Smaller ID save space.
1040 # seems fine to use a small ID. Smaller ID save space.
1043 return hex(folddigest.digest())[:8]
1041 return hex(folddigest.digest())[:8]
1044
1042
1045
1043
1046 def createmarkers(
1044 def createmarkers(
1047 repo, relations, flag=0, date=None, metadata=None, operation=None
1045 repo, relations, flag=0, date=None, metadata=None, operation=None
1048 ):
1046 ):
1049 """Add obsolete markers between changesets in a repo
1047 """Add obsolete markers between changesets in a repo
1050
1048
1051 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1049 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1052 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1050 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1053 containing metadata for this marker only. It is merged with the global
1051 containing metadata for this marker only. It is merged with the global
1054 metadata specified through the `metadata` argument of this function.
1052 metadata specified through the `metadata` argument of this function.
1055 Any string values in metadata must be UTF-8 bytes.
1053 Any string values in metadata must be UTF-8 bytes.
1056
1054
1057 Trying to obsolete a public changeset will raise an exception.
1055 Trying to obsolete a public changeset will raise an exception.
1058
1056
1059 Current user and date are used except if specified otherwise in the
1057 Current user and date are used except if specified otherwise in the
1060 metadata attribute.
1058 metadata attribute.
1061
1059
1062 This function operates within a transaction of its own, but does
1060 This function operates within a transaction of its own, but does
1063 not take any lock on the repo.
1061 not take any lock on the repo.
1064 """
1062 """
1065 # prepare metadata
1063 # prepare metadata
1066 if metadata is None:
1064 if metadata is None:
1067 metadata = {}
1065 metadata = {}
1068 if b'user' not in metadata:
1066 if b'user' not in metadata:
1069 luser = (
1067 luser = (
1070 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1068 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1071 )
1069 )
1072 metadata[b'user'] = encoding.fromlocal(luser)
1070 metadata[b'user'] = encoding.fromlocal(luser)
1073
1071
1074 # Operation metadata handling
1072 # Operation metadata handling
1075 useoperation = repo.ui.configbool(
1073 useoperation = repo.ui.configbool(
1076 b'experimental', b'evolution.track-operation'
1074 b'experimental', b'evolution.track-operation'
1077 )
1075 )
1078 if useoperation and operation:
1076 if useoperation and operation:
1079 metadata[b'operation'] = operation
1077 metadata[b'operation'] = operation
1080
1078
1081 # Effect flag metadata handling
1079 # Effect flag metadata handling
1082 saveeffectflag = repo.ui.configbool(
1080 saveeffectflag = repo.ui.configbool(
1083 b'experimental', b'evolution.effect-flags'
1081 b'experimental', b'evolution.effect-flags'
1084 )
1082 )
1085
1083
1086 with repo.transaction(b'add-obsolescence-marker') as tr:
1084 with repo.transaction(b'add-obsolescence-marker') as tr:
1087 markerargs = []
1085 markerargs = []
1088 for rel in relations:
1086 for rel in relations:
1089 predecessors = rel[0]
1087 predecessors = rel[0]
1090 if not isinstance(predecessors, tuple):
1088 if not isinstance(predecessors, tuple):
1091 # preserve compat with old API until all caller are migrated
1089 # preserve compat with old API until all caller are migrated
1092 predecessors = (predecessors,)
1090 predecessors = (predecessors,)
1093 if len(predecessors) > 1 and len(rel[1]) != 1:
1091 if len(predecessors) > 1 and len(rel[1]) != 1:
1094 msg = b'Fold markers can only have 1 successors, not %d'
1092 msg = b'Fold markers can only have 1 successors, not %d'
1095 raise error.ProgrammingError(msg % len(rel[1]))
1093 raise error.ProgrammingError(msg % len(rel[1]))
1096 foldid = None
1094 foldid = None
1097 foldsize = len(predecessors)
1095 foldsize = len(predecessors)
1098 if 1 < foldsize:
1096 if 1 < foldsize:
1099 foldid = makefoldid(rel, metadata[b'user'])
1097 foldid = makefoldid(rel, metadata[b'user'])
1100 for foldidx, prec in enumerate(predecessors, 1):
1098 for foldidx, prec in enumerate(predecessors, 1):
1101 sucs = rel[1]
1099 sucs = rel[1]
1102 localmetadata = metadata.copy()
1100 localmetadata = metadata.copy()
1103 if len(rel) > 2:
1101 if len(rel) > 2:
1104 localmetadata.update(rel[2])
1102 localmetadata.update(rel[2])
1105 if foldid is not None:
1103 if foldid is not None:
1106 localmetadata[b'fold-id'] = foldid
1104 localmetadata[b'fold-id'] = foldid
1107 localmetadata[b'fold-idx'] = b'%d' % foldidx
1105 localmetadata[b'fold-idx'] = b'%d' % foldidx
1108 localmetadata[b'fold-size'] = b'%d' % foldsize
1106 localmetadata[b'fold-size'] = b'%d' % foldsize
1109
1107
1110 if not prec.mutable():
1108 if not prec.mutable():
1111 raise error.Abort(
1109 raise error.Abort(
1112 _(b"cannot obsolete public changeset: %s") % prec,
1110 _(b"cannot obsolete public changeset: %s") % prec,
1113 hint=b"see 'hg help phases' for details",
1111 hint=b"see 'hg help phases' for details",
1114 )
1112 )
1115 nprec = prec.node()
1113 nprec = prec.node()
1116 nsucs = tuple(s.node() for s in sucs)
1114 nsucs = tuple(s.node() for s in sucs)
1117 npare = None
1115 npare = None
1118 if not nsucs:
1116 if not nsucs:
1119 npare = tuple(p.node() for p in prec.parents())
1117 npare = tuple(p.node() for p in prec.parents())
1120 if nprec in nsucs:
1118 if nprec in nsucs:
1121 raise error.Abort(
1119 raise error.Abort(
1122 _(b"changeset %s cannot obsolete itself") % prec
1120 _(b"changeset %s cannot obsolete itself") % prec
1123 )
1121 )
1124
1122
1125 # Effect flag can be different by relation
1123 # Effect flag can be different by relation
1126 if saveeffectflag:
1124 if saveeffectflag:
1127 # The effect flag is saved in a versioned field name for
1125 # The effect flag is saved in a versioned field name for
1128 # future evolution
1126 # future evolution
1129 effectflag = obsutil.geteffectflag(prec, sucs)
1127 effectflag = obsutil.geteffectflag(prec, sucs)
1130 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1128 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1131
1129
1132 # Creating the marker causes the hidden cache to become
1130 # Creating the marker causes the hidden cache to become
1133 # invalid, which causes recomputation when we ask for
1131 # invalid, which causes recomputation when we ask for
1134 # prec.parents() above. Resulting in n^2 behavior. So let's
1132 # prec.parents() above. Resulting in n^2 behavior. So let's
1135 # prepare all of the args first, then create the markers.
1133 # prepare all of the args first, then create the markers.
1136 markerargs.append((nprec, nsucs, npare, localmetadata))
1134 markerargs.append((nprec, nsucs, npare, localmetadata))
1137
1135
1138 for args in markerargs:
1136 for args in markerargs:
1139 nprec, nsucs, npare, localmetadata = args
1137 nprec, nsucs, npare, localmetadata = args
1140 repo.obsstore.create(
1138 repo.obsstore.create(
1141 tr,
1139 tr,
1142 nprec,
1140 nprec,
1143 nsucs,
1141 nsucs,
1144 flag,
1142 flag,
1145 parents=npare,
1143 parents=npare,
1146 date=date,
1144 date=date,
1147 metadata=localmetadata,
1145 metadata=localmetadata,
1148 ui=repo.ui,
1146 ui=repo.ui,
1149 )
1147 )
1150 repo.filteredrevcache.clear()
1148 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now