##// END OF EJS Templates
tracing: add a couple of trace points on obsolete and repoview...
Augie Fackler -
r43534:4353942b default
parent child Browse files
Show More
@@ -1,1143 +1,1144 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import hashlib
73 import hashlib
74 import struct
74 import struct
75
75
76 from .i18n import _
76 from .i18n import _
77 from .pycompat import getattr
77 from .pycompat import getattr
78 from . import (
78 from . import (
79 encoding,
79 encoding,
80 error,
80 error,
81 node,
81 node,
82 obsutil,
82 obsutil,
83 phases,
83 phases,
84 policy,
84 policy,
85 pycompat,
85 pycompat,
86 util,
86 util,
87 )
87 )
88 from .utils import dateutil
88 from .utils import dateutil
89
89
90 parsers = policy.importmod(r'parsers')
90 parsers = policy.importmod(r'parsers')
91
91
92 _pack = struct.pack
92 _pack = struct.pack
93 _unpack = struct.unpack
93 _unpack = struct.unpack
94 _calcsize = struct.calcsize
94 _calcsize = struct.calcsize
95 propertycache = util.propertycache
95 propertycache = util.propertycache
96
96
97 # Options for obsolescence
97 # Options for obsolescence
98 createmarkersopt = b'createmarkers'
98 createmarkersopt = b'createmarkers'
99 allowunstableopt = b'allowunstable'
99 allowunstableopt = b'allowunstable'
100 exchangeopt = b'exchange'
100 exchangeopt = b'exchange'
101
101
102
102
103 def _getoptionvalue(repo, option):
103 def _getoptionvalue(repo, option):
104 """Returns True if the given repository has the given obsolete option
104 """Returns True if the given repository has the given obsolete option
105 enabled.
105 enabled.
106 """
106 """
107 configkey = b'evolution.%s' % option
107 configkey = b'evolution.%s' % option
108 newconfig = repo.ui.configbool(b'experimental', configkey)
108 newconfig = repo.ui.configbool(b'experimental', configkey)
109
109
110 # Return the value only if defined
110 # Return the value only if defined
111 if newconfig is not None:
111 if newconfig is not None:
112 return newconfig
112 return newconfig
113
113
114 # Fallback on generic option
114 # Fallback on generic option
115 try:
115 try:
116 return repo.ui.configbool(b'experimental', b'evolution')
116 return repo.ui.configbool(b'experimental', b'evolution')
117 except (error.ConfigError, AttributeError):
117 except (error.ConfigError, AttributeError):
118 # Fallback on old-fashion config
118 # Fallback on old-fashion config
119 # inconsistent config: experimental.evolution
119 # inconsistent config: experimental.evolution
120 result = set(repo.ui.configlist(b'experimental', b'evolution'))
120 result = set(repo.ui.configlist(b'experimental', b'evolution'))
121
121
122 if b'all' in result:
122 if b'all' in result:
123 return True
123 return True
124
124
125 # Temporary hack for next check
125 # Temporary hack for next check
126 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
126 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
127 if newconfig:
127 if newconfig:
128 result.add(b'createmarkers')
128 result.add(b'createmarkers')
129
129
130 return option in result
130 return option in result
131
131
132
132
133 def getoptions(repo):
133 def getoptions(repo):
134 """Returns dicts showing state of obsolescence features."""
134 """Returns dicts showing state of obsolescence features."""
135
135
136 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
136 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
137 unstablevalue = _getoptionvalue(repo, allowunstableopt)
137 unstablevalue = _getoptionvalue(repo, allowunstableopt)
138 exchangevalue = _getoptionvalue(repo, exchangeopt)
138 exchangevalue = _getoptionvalue(repo, exchangeopt)
139
139
140 # createmarkers must be enabled if other options are enabled
140 # createmarkers must be enabled if other options are enabled
141 if (unstablevalue or exchangevalue) and not createmarkersvalue:
141 if (unstablevalue or exchangevalue) and not createmarkersvalue:
142 raise error.Abort(
142 raise error.Abort(
143 _(
143 _(
144 b"'createmarkers' obsolete option must be enabled "
144 b"'createmarkers' obsolete option must be enabled "
145 b"if other obsolete options are enabled"
145 b"if other obsolete options are enabled"
146 )
146 )
147 )
147 )
148
148
149 return {
149 return {
150 createmarkersopt: createmarkersvalue,
150 createmarkersopt: createmarkersvalue,
151 allowunstableopt: unstablevalue,
151 allowunstableopt: unstablevalue,
152 exchangeopt: exchangevalue,
152 exchangeopt: exchangevalue,
153 }
153 }
154
154
155
155
156 def isenabled(repo, option):
156 def isenabled(repo, option):
157 """Returns True if the given repository has the given obsolete option
157 """Returns True if the given repository has the given obsolete option
158 enabled.
158 enabled.
159 """
159 """
160 return getoptions(repo)[option]
160 return getoptions(repo)[option]
161
161
162
162
163 # Creating aliases for marker flags because evolve extension looks for
163 # Creating aliases for marker flags because evolve extension looks for
164 # bumpedfix in obsolete.py
164 # bumpedfix in obsolete.py
165 bumpedfix = obsutil.bumpedfix
165 bumpedfix = obsutil.bumpedfix
166 usingsha256 = obsutil.usingsha256
166 usingsha256 = obsutil.usingsha256
167
167
168 ## Parsing and writing of version "0"
168 ## Parsing and writing of version "0"
169 #
169 #
170 # The header is followed by the markers. Each marker is made of:
170 # The header is followed by the markers. Each marker is made of:
171 #
171 #
172 # - 1 uint8 : number of new changesets "N", can be zero.
172 # - 1 uint8 : number of new changesets "N", can be zero.
173 #
173 #
174 # - 1 uint32: metadata size "M" in bytes.
174 # - 1 uint32: metadata size "M" in bytes.
175 #
175 #
176 # - 1 byte: a bit field. It is reserved for flags used in common
176 # - 1 byte: a bit field. It is reserved for flags used in common
177 # obsolete marker operations, to avoid repeated decoding of metadata
177 # obsolete marker operations, to avoid repeated decoding of metadata
178 # entries.
178 # entries.
179 #
179 #
180 # - 20 bytes: obsoleted changeset identifier.
180 # - 20 bytes: obsoleted changeset identifier.
181 #
181 #
182 # - N*20 bytes: new changesets identifiers.
182 # - N*20 bytes: new changesets identifiers.
183 #
183 #
184 # - M bytes: metadata as a sequence of nul-terminated strings. Each
184 # - M bytes: metadata as a sequence of nul-terminated strings. Each
185 # string contains a key and a value, separated by a colon ':', without
185 # string contains a key and a value, separated by a colon ':', without
186 # additional encoding. Keys cannot contain '\0' or ':' and values
186 # additional encoding. Keys cannot contain '\0' or ':' and values
187 # cannot contain '\0'.
187 # cannot contain '\0'.
188 _fm0version = 0
188 _fm0version = 0
189 _fm0fixed = b'>BIB20s'
189 _fm0fixed = b'>BIB20s'
190 _fm0node = b'20s'
190 _fm0node = b'20s'
191 _fm0fsize = _calcsize(_fm0fixed)
191 _fm0fsize = _calcsize(_fm0fixed)
192 _fm0fnodesize = _calcsize(_fm0node)
192 _fm0fnodesize = _calcsize(_fm0node)
193
193
194
194
195 def _fm0readmarkers(data, off, stop):
195 def _fm0readmarkers(data, off, stop):
196 # Loop on markers
196 # Loop on markers
197 while off < stop:
197 while off < stop:
198 # read fixed part
198 # read fixed part
199 cur = data[off : off + _fm0fsize]
199 cur = data[off : off + _fm0fsize]
200 off += _fm0fsize
200 off += _fm0fsize
201 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
201 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
202 # read replacement
202 # read replacement
203 sucs = ()
203 sucs = ()
204 if numsuc:
204 if numsuc:
205 s = _fm0fnodesize * numsuc
205 s = _fm0fnodesize * numsuc
206 cur = data[off : off + s]
206 cur = data[off : off + s]
207 sucs = _unpack(_fm0node * numsuc, cur)
207 sucs = _unpack(_fm0node * numsuc, cur)
208 off += s
208 off += s
209 # read metadata
209 # read metadata
210 # (metadata will be decoded on demand)
210 # (metadata will be decoded on demand)
211 metadata = data[off : off + mdsize]
211 metadata = data[off : off + mdsize]
212 if len(metadata) != mdsize:
212 if len(metadata) != mdsize:
213 raise error.Abort(
213 raise error.Abort(
214 _(
214 _(
215 b'parsing obsolete marker: metadata is too '
215 b'parsing obsolete marker: metadata is too '
216 b'short, %d bytes expected, got %d'
216 b'short, %d bytes expected, got %d'
217 )
217 )
218 % (mdsize, len(metadata))
218 % (mdsize, len(metadata))
219 )
219 )
220 off += mdsize
220 off += mdsize
221 metadata = _fm0decodemeta(metadata)
221 metadata = _fm0decodemeta(metadata)
222 try:
222 try:
223 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
223 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
224 date = float(when), int(offset)
224 date = float(when), int(offset)
225 except ValueError:
225 except ValueError:
226 date = (0.0, 0)
226 date = (0.0, 0)
227 parents = None
227 parents = None
228 if b'p2' in metadata:
228 if b'p2' in metadata:
229 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
229 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
230 elif b'p1' in metadata:
230 elif b'p1' in metadata:
231 parents = (metadata.pop(b'p1', None),)
231 parents = (metadata.pop(b'p1', None),)
232 elif b'p0' in metadata:
232 elif b'p0' in metadata:
233 parents = ()
233 parents = ()
234 if parents is not None:
234 if parents is not None:
235 try:
235 try:
236 parents = tuple(node.bin(p) for p in parents)
236 parents = tuple(node.bin(p) for p in parents)
237 # if parent content is not a nodeid, drop the data
237 # if parent content is not a nodeid, drop the data
238 for p in parents:
238 for p in parents:
239 if len(p) != 20:
239 if len(p) != 20:
240 parents = None
240 parents = None
241 break
241 break
242 except TypeError:
242 except TypeError:
243 # if content cannot be translated to nodeid drop the data.
243 # if content cannot be translated to nodeid drop the data.
244 parents = None
244 parents = None
245
245
246 metadata = tuple(sorted(pycompat.iteritems(metadata)))
246 metadata = tuple(sorted(pycompat.iteritems(metadata)))
247
247
248 yield (pre, sucs, flags, metadata, date, parents)
248 yield (pre, sucs, flags, metadata, date, parents)
249
249
250
250
251 def _fm0encodeonemarker(marker):
251 def _fm0encodeonemarker(marker):
252 pre, sucs, flags, metadata, date, parents = marker
252 pre, sucs, flags, metadata, date, parents = marker
253 if flags & usingsha256:
253 if flags & usingsha256:
254 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
254 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
255 metadata = dict(metadata)
255 metadata = dict(metadata)
256 time, tz = date
256 time, tz = date
257 metadata[b'date'] = b'%r %i' % (time, tz)
257 metadata[b'date'] = b'%r %i' % (time, tz)
258 if parents is not None:
258 if parents is not None:
259 if not parents:
259 if not parents:
260 # mark that we explicitly recorded no parents
260 # mark that we explicitly recorded no parents
261 metadata[b'p0'] = b''
261 metadata[b'p0'] = b''
262 for i, p in enumerate(parents, 1):
262 for i, p in enumerate(parents, 1):
263 metadata[b'p%i' % i] = node.hex(p)
263 metadata[b'p%i' % i] = node.hex(p)
264 metadata = _fm0encodemeta(metadata)
264 metadata = _fm0encodemeta(metadata)
265 numsuc = len(sucs)
265 numsuc = len(sucs)
266 format = _fm0fixed + (_fm0node * numsuc)
266 format = _fm0fixed + (_fm0node * numsuc)
267 data = [numsuc, len(metadata), flags, pre]
267 data = [numsuc, len(metadata), flags, pre]
268 data.extend(sucs)
268 data.extend(sucs)
269 return _pack(format, *data) + metadata
269 return _pack(format, *data) + metadata
270
270
271
271
272 def _fm0encodemeta(meta):
272 def _fm0encodemeta(meta):
273 """Return encoded metadata string to string mapping.
273 """Return encoded metadata string to string mapping.
274
274
275 Assume no ':' in key and no '\0' in both key and value."""
275 Assume no ':' in key and no '\0' in both key and value."""
276 for key, value in pycompat.iteritems(meta):
276 for key, value in pycompat.iteritems(meta):
277 if b':' in key or b'\0' in key:
277 if b':' in key or b'\0' in key:
278 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
278 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
279 if b'\0' in value:
279 if b'\0' in value:
280 raise ValueError(b"':' is forbidden in metadata value'")
280 raise ValueError(b"':' is forbidden in metadata value'")
281 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
281 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
282
282
283
283
284 def _fm0decodemeta(data):
284 def _fm0decodemeta(data):
285 """Return string to string dictionary from encoded version."""
285 """Return string to string dictionary from encoded version."""
286 d = {}
286 d = {}
287 for l in data.split(b'\0'):
287 for l in data.split(b'\0'):
288 if l:
288 if l:
289 key, value = l.split(b':', 1)
289 key, value = l.split(b':', 1)
290 d[key] = value
290 d[key] = value
291 return d
291 return d
292
292
293
293
294 ## Parsing and writing of version "1"
294 ## Parsing and writing of version "1"
295 #
295 #
296 # The header is followed by the markers. Each marker is made of:
296 # The header is followed by the markers. Each marker is made of:
297 #
297 #
298 # - uint32: total size of the marker (including this field)
298 # - uint32: total size of the marker (including this field)
299 #
299 #
300 # - float64: date in seconds since epoch
300 # - float64: date in seconds since epoch
301 #
301 #
302 # - int16: timezone offset in minutes
302 # - int16: timezone offset in minutes
303 #
303 #
304 # - uint16: a bit field. It is reserved for flags used in common
304 # - uint16: a bit field. It is reserved for flags used in common
305 # obsolete marker operations, to avoid repeated decoding of metadata
305 # obsolete marker operations, to avoid repeated decoding of metadata
306 # entries.
306 # entries.
307 #
307 #
308 # - uint8: number of successors "N", can be zero.
308 # - uint8: number of successors "N", can be zero.
309 #
309 #
310 # - uint8: number of parents "P", can be zero.
310 # - uint8: number of parents "P", can be zero.
311 #
311 #
312 # 0: parents data stored but no parent,
312 # 0: parents data stored but no parent,
313 # 1: one parent stored,
313 # 1: one parent stored,
314 # 2: two parents stored,
314 # 2: two parents stored,
315 # 3: no parent data stored
315 # 3: no parent data stored
316 #
316 #
317 # - uint8: number of metadata entries M
317 # - uint8: number of metadata entries M
318 #
318 #
319 # - 20 or 32 bytes: predecessor changeset identifier.
319 # - 20 or 32 bytes: predecessor changeset identifier.
320 #
320 #
321 # - N*(20 or 32) bytes: successors changesets identifiers.
321 # - N*(20 or 32) bytes: successors changesets identifiers.
322 #
322 #
323 # - P*(20 or 32) bytes: parents of the predecessors changesets.
323 # - P*(20 or 32) bytes: parents of the predecessors changesets.
324 #
324 #
325 # - M*(uint8, uint8): size of all metadata entries (key and value)
325 # - M*(uint8, uint8): size of all metadata entries (key and value)
326 #
326 #
327 # - remaining bytes: the metadata, each (key, value) pair after the other.
327 # - remaining bytes: the metadata, each (key, value) pair after the other.
328 _fm1version = 1
328 _fm1version = 1
329 _fm1fixed = b'>IdhHBBB20s'
329 _fm1fixed = b'>IdhHBBB20s'
330 _fm1nodesha1 = b'20s'
330 _fm1nodesha1 = b'20s'
331 _fm1nodesha256 = b'32s'
331 _fm1nodesha256 = b'32s'
332 _fm1nodesha1size = _calcsize(_fm1nodesha1)
332 _fm1nodesha1size = _calcsize(_fm1nodesha1)
333 _fm1nodesha256size = _calcsize(_fm1nodesha256)
333 _fm1nodesha256size = _calcsize(_fm1nodesha256)
334 _fm1fsize = _calcsize(_fm1fixed)
334 _fm1fsize = _calcsize(_fm1fixed)
335 _fm1parentnone = 3
335 _fm1parentnone = 3
336 _fm1parentshift = 14
336 _fm1parentshift = 14
337 _fm1parentmask = _fm1parentnone << _fm1parentshift
337 _fm1parentmask = _fm1parentnone << _fm1parentshift
338 _fm1metapair = b'BB'
338 _fm1metapair = b'BB'
339 _fm1metapairsize = _calcsize(_fm1metapair)
339 _fm1metapairsize = _calcsize(_fm1metapair)
340
340
341
341
342 def _fm1purereadmarkers(data, off, stop):
342 def _fm1purereadmarkers(data, off, stop):
343 # make some global constants local for performance
343 # make some global constants local for performance
344 noneflag = _fm1parentnone
344 noneflag = _fm1parentnone
345 sha2flag = usingsha256
345 sha2flag = usingsha256
346 sha1size = _fm1nodesha1size
346 sha1size = _fm1nodesha1size
347 sha2size = _fm1nodesha256size
347 sha2size = _fm1nodesha256size
348 sha1fmt = _fm1nodesha1
348 sha1fmt = _fm1nodesha1
349 sha2fmt = _fm1nodesha256
349 sha2fmt = _fm1nodesha256
350 metasize = _fm1metapairsize
350 metasize = _fm1metapairsize
351 metafmt = _fm1metapair
351 metafmt = _fm1metapair
352 fsize = _fm1fsize
352 fsize = _fm1fsize
353 unpack = _unpack
353 unpack = _unpack
354
354
355 # Loop on markers
355 # Loop on markers
356 ufixed = struct.Struct(_fm1fixed).unpack
356 ufixed = struct.Struct(_fm1fixed).unpack
357
357
358 while off < stop:
358 while off < stop:
359 # read fixed part
359 # read fixed part
360 o1 = off + fsize
360 o1 = off + fsize
361 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
361 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
362
362
363 if flags & sha2flag:
363 if flags & sha2flag:
364 # FIXME: prec was read as a SHA1, needs to be amended
364 # FIXME: prec was read as a SHA1, needs to be amended
365
365
366 # read 0 or more successors
366 # read 0 or more successors
367 if numsuc == 1:
367 if numsuc == 1:
368 o2 = o1 + sha2size
368 o2 = o1 + sha2size
369 sucs = (data[o1:o2],)
369 sucs = (data[o1:o2],)
370 else:
370 else:
371 o2 = o1 + sha2size * numsuc
371 o2 = o1 + sha2size * numsuc
372 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
372 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
373
373
374 # read parents
374 # read parents
375 if numpar == noneflag:
375 if numpar == noneflag:
376 o3 = o2
376 o3 = o2
377 parents = None
377 parents = None
378 elif numpar == 1:
378 elif numpar == 1:
379 o3 = o2 + sha2size
379 o3 = o2 + sha2size
380 parents = (data[o2:o3],)
380 parents = (data[o2:o3],)
381 else:
381 else:
382 o3 = o2 + sha2size * numpar
382 o3 = o2 + sha2size * numpar
383 parents = unpack(sha2fmt * numpar, data[o2:o3])
383 parents = unpack(sha2fmt * numpar, data[o2:o3])
384 else:
384 else:
385 # read 0 or more successors
385 # read 0 or more successors
386 if numsuc == 1:
386 if numsuc == 1:
387 o2 = o1 + sha1size
387 o2 = o1 + sha1size
388 sucs = (data[o1:o2],)
388 sucs = (data[o1:o2],)
389 else:
389 else:
390 o2 = o1 + sha1size * numsuc
390 o2 = o1 + sha1size * numsuc
391 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
391 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
392
392
393 # read parents
393 # read parents
394 if numpar == noneflag:
394 if numpar == noneflag:
395 o3 = o2
395 o3 = o2
396 parents = None
396 parents = None
397 elif numpar == 1:
397 elif numpar == 1:
398 o3 = o2 + sha1size
398 o3 = o2 + sha1size
399 parents = (data[o2:o3],)
399 parents = (data[o2:o3],)
400 else:
400 else:
401 o3 = o2 + sha1size * numpar
401 o3 = o2 + sha1size * numpar
402 parents = unpack(sha1fmt * numpar, data[o2:o3])
402 parents = unpack(sha1fmt * numpar, data[o2:o3])
403
403
404 # read metadata
404 # read metadata
405 off = o3 + metasize * nummeta
405 off = o3 + metasize * nummeta
406 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
406 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
407 metadata = []
407 metadata = []
408 for idx in pycompat.xrange(0, len(metapairsize), 2):
408 for idx in pycompat.xrange(0, len(metapairsize), 2):
409 o1 = off + metapairsize[idx]
409 o1 = off + metapairsize[idx]
410 o2 = o1 + metapairsize[idx + 1]
410 o2 = o1 + metapairsize[idx + 1]
411 metadata.append((data[off:o1], data[o1:o2]))
411 metadata.append((data[off:o1], data[o1:o2]))
412 off = o2
412 off = o2
413
413
414 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
414 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
415
415
416
416
417 def _fm1encodeonemarker(marker):
417 def _fm1encodeonemarker(marker):
418 pre, sucs, flags, metadata, date, parents = marker
418 pre, sucs, flags, metadata, date, parents = marker
419 # determine node size
419 # determine node size
420 _fm1node = _fm1nodesha1
420 _fm1node = _fm1nodesha1
421 if flags & usingsha256:
421 if flags & usingsha256:
422 _fm1node = _fm1nodesha256
422 _fm1node = _fm1nodesha256
423 numsuc = len(sucs)
423 numsuc = len(sucs)
424 numextranodes = numsuc
424 numextranodes = numsuc
425 if parents is None:
425 if parents is None:
426 numpar = _fm1parentnone
426 numpar = _fm1parentnone
427 else:
427 else:
428 numpar = len(parents)
428 numpar = len(parents)
429 numextranodes += numpar
429 numextranodes += numpar
430 formatnodes = _fm1node * numextranodes
430 formatnodes = _fm1node * numextranodes
431 formatmeta = _fm1metapair * len(metadata)
431 formatmeta = _fm1metapair * len(metadata)
432 format = _fm1fixed + formatnodes + formatmeta
432 format = _fm1fixed + formatnodes + formatmeta
433 # tz is stored in minutes so we divide by 60
433 # tz is stored in minutes so we divide by 60
434 tz = date[1] // 60
434 tz = date[1] // 60
435 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
435 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
436 data.extend(sucs)
436 data.extend(sucs)
437 if parents is not None:
437 if parents is not None:
438 data.extend(parents)
438 data.extend(parents)
439 totalsize = _calcsize(format)
439 totalsize = _calcsize(format)
440 for key, value in metadata:
440 for key, value in metadata:
441 lk = len(key)
441 lk = len(key)
442 lv = len(value)
442 lv = len(value)
443 if lk > 255:
443 if lk > 255:
444 msg = (
444 msg = (
445 b'obsstore metadata key cannot be longer than 255 bytes'
445 b'obsstore metadata key cannot be longer than 255 bytes'
446 b' (key "%s" is %u bytes)'
446 b' (key "%s" is %u bytes)'
447 ) % (key, lk)
447 ) % (key, lk)
448 raise error.ProgrammingError(msg)
448 raise error.ProgrammingError(msg)
449 if lv > 255:
449 if lv > 255:
450 msg = (
450 msg = (
451 b'obsstore metadata value cannot be longer than 255 bytes'
451 b'obsstore metadata value cannot be longer than 255 bytes'
452 b' (value "%s" for key "%s" is %u bytes)'
452 b' (value "%s" for key "%s" is %u bytes)'
453 ) % (value, key, lv)
453 ) % (value, key, lv)
454 raise error.ProgrammingError(msg)
454 raise error.ProgrammingError(msg)
455 data.append(lk)
455 data.append(lk)
456 data.append(lv)
456 data.append(lv)
457 totalsize += lk + lv
457 totalsize += lk + lv
458 data[0] = totalsize
458 data[0] = totalsize
459 data = [_pack(format, *data)]
459 data = [_pack(format, *data)]
460 for key, value in metadata:
460 for key, value in metadata:
461 data.append(key)
461 data.append(key)
462 data.append(value)
462 data.append(value)
463 return b''.join(data)
463 return b''.join(data)
464
464
465
465
466 def _fm1readmarkers(data, off, stop):
466 def _fm1readmarkers(data, off, stop):
467 native = getattr(parsers, 'fm1readmarkers', None)
467 native = getattr(parsers, 'fm1readmarkers', None)
468 if not native:
468 if not native:
469 return _fm1purereadmarkers(data, off, stop)
469 return _fm1purereadmarkers(data, off, stop)
470 return native(data, off, stop)
470 return native(data, off, stop)
471
471
472
472
473 # mapping to read/write various marker formats
473 # mapping to read/write various marker formats
474 # <version> -> (decoder, encoder)
474 # <version> -> (decoder, encoder)
475 formats = {
475 formats = {
476 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
476 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
477 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
477 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
478 }
478 }
479
479
480
480
481 def _readmarkerversion(data):
481 def _readmarkerversion(data):
482 return _unpack(b'>B', data[0:1])[0]
482 return _unpack(b'>B', data[0:1])[0]
483
483
484
484
485 @util.nogc
485 @util.nogc
486 def _readmarkers(data, off=None, stop=None):
486 def _readmarkers(data, off=None, stop=None):
487 """Read and enumerate markers from raw data"""
487 """Read and enumerate markers from raw data"""
488 diskversion = _readmarkerversion(data)
488 diskversion = _readmarkerversion(data)
489 if not off:
489 if not off:
490 off = 1 # skip 1 byte version number
490 off = 1 # skip 1 byte version number
491 if stop is None:
491 if stop is None:
492 stop = len(data)
492 stop = len(data)
493 if diskversion not in formats:
493 if diskversion not in formats:
494 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
494 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
495 raise error.UnknownVersion(msg, version=diskversion)
495 raise error.UnknownVersion(msg, version=diskversion)
496 return diskversion, formats[diskversion][0](data, off, stop)
496 return diskversion, formats[diskversion][0](data, off, stop)
497
497
498
498
499 def encodeheader(version=_fm0version):
499 def encodeheader(version=_fm0version):
500 return _pack(b'>B', version)
500 return _pack(b'>B', version)
501
501
502
502
503 def encodemarkers(markers, addheader=False, version=_fm0version):
503 def encodemarkers(markers, addheader=False, version=_fm0version):
504 # Kept separate from flushmarkers(), it will be reused for
504 # Kept separate from flushmarkers(), it will be reused for
505 # markers exchange.
505 # markers exchange.
506 encodeone = formats[version][1]
506 encodeone = formats[version][1]
507 if addheader:
507 if addheader:
508 yield encodeheader(version)
508 yield encodeheader(version)
509 for marker in markers:
509 for marker in markers:
510 yield encodeone(marker)
510 yield encodeone(marker)
511
511
512
512
513 @util.nogc
513 @util.nogc
514 def _addsuccessors(successors, markers):
514 def _addsuccessors(successors, markers):
515 for mark in markers:
515 for mark in markers:
516 successors.setdefault(mark[0], set()).add(mark)
516 successors.setdefault(mark[0], set()).add(mark)
517
517
518
518
519 @util.nogc
519 @util.nogc
520 def _addpredecessors(predecessors, markers):
520 def _addpredecessors(predecessors, markers):
521 for mark in markers:
521 for mark in markers:
522 for suc in mark[1]:
522 for suc in mark[1]:
523 predecessors.setdefault(suc, set()).add(mark)
523 predecessors.setdefault(suc, set()).add(mark)
524
524
525
525
526 @util.nogc
526 @util.nogc
527 def _addchildren(children, markers):
527 def _addchildren(children, markers):
528 for mark in markers:
528 for mark in markers:
529 parents = mark[5]
529 parents = mark[5]
530 if parents is not None:
530 if parents is not None:
531 for p in parents:
531 for p in parents:
532 children.setdefault(p, set()).add(mark)
532 children.setdefault(p, set()).add(mark)
533
533
534
534
535 def _checkinvalidmarkers(markers):
535 def _checkinvalidmarkers(markers):
536 """search for marker with invalid data and raise error if needed
536 """search for marker with invalid data and raise error if needed
537
537
538 Exist as a separated function to allow the evolve extension for a more
538 Exist as a separated function to allow the evolve extension for a more
539 subtle handling.
539 subtle handling.
540 """
540 """
541 for mark in markers:
541 for mark in markers:
542 if node.nullid in mark[1]:
542 if node.nullid in mark[1]:
543 raise error.Abort(
543 raise error.Abort(
544 _(
544 _(
545 b'bad obsolescence marker detected: '
545 b'bad obsolescence marker detected: '
546 b'invalid successors nullid'
546 b'invalid successors nullid'
547 )
547 )
548 )
548 )
549
549
550
550
551 class obsstore(object):
551 class obsstore(object):
552 """Store obsolete markers
552 """Store obsolete markers
553
553
554 Markers can be accessed with two mappings:
554 Markers can be accessed with two mappings:
555 - predecessors[x] -> set(markers on predecessors edges of x)
555 - predecessors[x] -> set(markers on predecessors edges of x)
556 - successors[x] -> set(markers on successors edges of x)
556 - successors[x] -> set(markers on successors edges of x)
557 - children[x] -> set(markers on predecessors edges of children(x)
557 - children[x] -> set(markers on predecessors edges of children(x)
558 """
558 """
559
559
560 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
560 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
561 # prec: nodeid, predecessors changesets
561 # prec: nodeid, predecessors changesets
562 # succs: tuple of nodeid, successor changesets (0-N length)
562 # succs: tuple of nodeid, successor changesets (0-N length)
563 # flag: integer, flag field carrying modifier for the markers (see doc)
563 # flag: integer, flag field carrying modifier for the markers (see doc)
564 # meta: binary blob in UTF-8, encoded metadata dictionary
564 # meta: binary blob in UTF-8, encoded metadata dictionary
565 # date: (float, int) tuple, date of marker creation
565 # date: (float, int) tuple, date of marker creation
566 # parents: (tuple of nodeid) or None, parents of predecessors
566 # parents: (tuple of nodeid) or None, parents of predecessors
567 # None is used when no data has been recorded
567 # None is used when no data has been recorded
568
568
569 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
569 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
570 # caches for various obsolescence related cache
570 # caches for various obsolescence related cache
571 self.caches = {}
571 self.caches = {}
572 self.svfs = svfs
572 self.svfs = svfs
573 self._defaultformat = defaultformat
573 self._defaultformat = defaultformat
574 self._readonly = readonly
574 self._readonly = readonly
575
575
576 def __iter__(self):
576 def __iter__(self):
577 return iter(self._all)
577 return iter(self._all)
578
578
579 def __len__(self):
579 def __len__(self):
580 return len(self._all)
580 return len(self._all)
581
581
582 def __nonzero__(self):
582 def __nonzero__(self):
583 if not self._cached(r'_all'):
583 if not self._cached(r'_all'):
584 try:
584 try:
585 return self.svfs.stat(b'obsstore').st_size > 1
585 return self.svfs.stat(b'obsstore').st_size > 1
586 except OSError as inst:
586 except OSError as inst:
587 if inst.errno != errno.ENOENT:
587 if inst.errno != errno.ENOENT:
588 raise
588 raise
589 # just build an empty _all list if no obsstore exists, which
589 # just build an empty _all list if no obsstore exists, which
590 # avoids further stat() syscalls
590 # avoids further stat() syscalls
591 return bool(self._all)
591 return bool(self._all)
592
592
593 __bool__ = __nonzero__
593 __bool__ = __nonzero__
594
594
595 @property
595 @property
596 def readonly(self):
596 def readonly(self):
597 """True if marker creation is disabled
597 """True if marker creation is disabled
598
598
599 Remove me in the future when obsolete marker is always on."""
599 Remove me in the future when obsolete marker is always on."""
600 return self._readonly
600 return self._readonly
601
601
602 def create(
602 def create(
603 self,
603 self,
604 transaction,
604 transaction,
605 prec,
605 prec,
606 succs=(),
606 succs=(),
607 flag=0,
607 flag=0,
608 parents=None,
608 parents=None,
609 date=None,
609 date=None,
610 metadata=None,
610 metadata=None,
611 ui=None,
611 ui=None,
612 ):
612 ):
613 """obsolete: add a new obsolete marker
613 """obsolete: add a new obsolete marker
614
614
615 * ensuring it is hashable
615 * ensuring it is hashable
616 * check mandatory metadata
616 * check mandatory metadata
617 * encode metadata
617 * encode metadata
618
618
619 If you are a human writing code creating marker you want to use the
619 If you are a human writing code creating marker you want to use the
620 `createmarkers` function in this module instead.
620 `createmarkers` function in this module instead.
621
621
622 return True if a new marker have been added, False if the markers
622 return True if a new marker have been added, False if the markers
623 already existed (no op).
623 already existed (no op).
624 """
624 """
625 if metadata is None:
625 if metadata is None:
626 metadata = {}
626 metadata = {}
627 if date is None:
627 if date is None:
628 if b'date' in metadata:
628 if b'date' in metadata:
629 # as a courtesy for out-of-tree extensions
629 # as a courtesy for out-of-tree extensions
630 date = dateutil.parsedate(metadata.pop(b'date'))
630 date = dateutil.parsedate(metadata.pop(b'date'))
631 elif ui is not None:
631 elif ui is not None:
632 date = ui.configdate(b'devel', b'default-date')
632 date = ui.configdate(b'devel', b'default-date')
633 if date is None:
633 if date is None:
634 date = dateutil.makedate()
634 date = dateutil.makedate()
635 else:
635 else:
636 date = dateutil.makedate()
636 date = dateutil.makedate()
637 if len(prec) != 20:
637 if len(prec) != 20:
638 raise ValueError(prec)
638 raise ValueError(prec)
639 for succ in succs:
639 for succ in succs:
640 if len(succ) != 20:
640 if len(succ) != 20:
641 raise ValueError(succ)
641 raise ValueError(succ)
642 if prec in succs:
642 if prec in succs:
643 raise ValueError(
643 raise ValueError(
644 r'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec))
644 r'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec))
645 )
645 )
646
646
647 metadata = tuple(sorted(pycompat.iteritems(metadata)))
647 metadata = tuple(sorted(pycompat.iteritems(metadata)))
648 for k, v in metadata:
648 for k, v in metadata:
649 try:
649 try:
650 # might be better to reject non-ASCII keys
650 # might be better to reject non-ASCII keys
651 k.decode('utf-8')
651 k.decode('utf-8')
652 v.decode('utf-8')
652 v.decode('utf-8')
653 except UnicodeDecodeError:
653 except UnicodeDecodeError:
654 raise error.ProgrammingError(
654 raise error.ProgrammingError(
655 b'obsstore metadata must be valid UTF-8 sequence '
655 b'obsstore metadata must be valid UTF-8 sequence '
656 b'(key = %r, value = %r)'
656 b'(key = %r, value = %r)'
657 % (pycompat.bytestr(k), pycompat.bytestr(v))
657 % (pycompat.bytestr(k), pycompat.bytestr(v))
658 )
658 )
659
659
660 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
660 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
661 return bool(self.add(transaction, [marker]))
661 return bool(self.add(transaction, [marker]))
662
662
663 def add(self, transaction, markers):
663 def add(self, transaction, markers):
664 """Add new markers to the store
664 """Add new markers to the store
665
665
666 Take care of filtering duplicate.
666 Take care of filtering duplicate.
667 Return the number of new marker."""
667 Return the number of new marker."""
668 if self._readonly:
668 if self._readonly:
669 raise error.Abort(
669 raise error.Abort(
670 _(b'creating obsolete markers is not enabled on this repo')
670 _(b'creating obsolete markers is not enabled on this repo')
671 )
671 )
672 known = set()
672 known = set()
673 getsuccessors = self.successors.get
673 getsuccessors = self.successors.get
674 new = []
674 new = []
675 for m in markers:
675 for m in markers:
676 if m not in getsuccessors(m[0], ()) and m not in known:
676 if m not in getsuccessors(m[0], ()) and m not in known:
677 known.add(m)
677 known.add(m)
678 new.append(m)
678 new.append(m)
679 if new:
679 if new:
680 f = self.svfs(b'obsstore', b'ab')
680 f = self.svfs(b'obsstore', b'ab')
681 try:
681 try:
682 offset = f.tell()
682 offset = f.tell()
683 transaction.add(b'obsstore', offset)
683 transaction.add(b'obsstore', offset)
684 # offset == 0: new file - add the version header
684 # offset == 0: new file - add the version header
685 data = b''.join(encodemarkers(new, offset == 0, self._version))
685 data = b''.join(encodemarkers(new, offset == 0, self._version))
686 f.write(data)
686 f.write(data)
687 finally:
687 finally:
688 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
688 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
689 # call 'filecacheentry.refresh()' here
689 # call 'filecacheentry.refresh()' here
690 f.close()
690 f.close()
691 addedmarkers = transaction.changes.get(b'obsmarkers')
691 addedmarkers = transaction.changes.get(b'obsmarkers')
692 if addedmarkers is not None:
692 if addedmarkers is not None:
693 addedmarkers.update(new)
693 addedmarkers.update(new)
694 self._addmarkers(new, data)
694 self._addmarkers(new, data)
695 # new marker *may* have changed several set. invalidate the cache.
695 # new marker *may* have changed several set. invalidate the cache.
696 self.caches.clear()
696 self.caches.clear()
697 # records the number of new markers for the transaction hooks
697 # records the number of new markers for the transaction hooks
698 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
698 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
699 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
699 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
700 return len(new)
700 return len(new)
701
701
702 def mergemarkers(self, transaction, data):
702 def mergemarkers(self, transaction, data):
703 """merge a binary stream of markers inside the obsstore
703 """merge a binary stream of markers inside the obsstore
704
704
705 Returns the number of new markers added."""
705 Returns the number of new markers added."""
706 version, markers = _readmarkers(data)
706 version, markers = _readmarkers(data)
707 return self.add(transaction, markers)
707 return self.add(transaction, markers)
708
708
709 @propertycache
709 @propertycache
710 def _data(self):
710 def _data(self):
711 return self.svfs.tryread(b'obsstore')
711 return self.svfs.tryread(b'obsstore')
712
712
713 @propertycache
713 @propertycache
714 def _version(self):
714 def _version(self):
715 if len(self._data) >= 1:
715 if len(self._data) >= 1:
716 return _readmarkerversion(self._data)
716 return _readmarkerversion(self._data)
717 else:
717 else:
718 return self._defaultformat
718 return self._defaultformat
719
719
720 @propertycache
720 @propertycache
721 def _all(self):
721 def _all(self):
722 data = self._data
722 data = self._data
723 if not data:
723 if not data:
724 return []
724 return []
725 self._version, markers = _readmarkers(data)
725 self._version, markers = _readmarkers(data)
726 markers = list(markers)
726 markers = list(markers)
727 _checkinvalidmarkers(markers)
727 _checkinvalidmarkers(markers)
728 return markers
728 return markers
729
729
730 @propertycache
730 @propertycache
731 def successors(self):
731 def successors(self):
732 successors = {}
732 successors = {}
733 _addsuccessors(successors, self._all)
733 _addsuccessors(successors, self._all)
734 return successors
734 return successors
735
735
736 @propertycache
736 @propertycache
737 def predecessors(self):
737 def predecessors(self):
738 predecessors = {}
738 predecessors = {}
739 _addpredecessors(predecessors, self._all)
739 _addpredecessors(predecessors, self._all)
740 return predecessors
740 return predecessors
741
741
742 @propertycache
742 @propertycache
743 def children(self):
743 def children(self):
744 children = {}
744 children = {}
745 _addchildren(children, self._all)
745 _addchildren(children, self._all)
746 return children
746 return children
747
747
748 def _cached(self, attr):
748 def _cached(self, attr):
749 return attr in self.__dict__
749 return attr in self.__dict__
750
750
751 def _addmarkers(self, markers, rawdata):
751 def _addmarkers(self, markers, rawdata):
752 markers = list(markers) # to allow repeated iteration
752 markers = list(markers) # to allow repeated iteration
753 self._data = self._data + rawdata
753 self._data = self._data + rawdata
754 self._all.extend(markers)
754 self._all.extend(markers)
755 if self._cached(r'successors'):
755 if self._cached(r'successors'):
756 _addsuccessors(self.successors, markers)
756 _addsuccessors(self.successors, markers)
757 if self._cached(r'predecessors'):
757 if self._cached(r'predecessors'):
758 _addpredecessors(self.predecessors, markers)
758 _addpredecessors(self.predecessors, markers)
759 if self._cached(r'children'):
759 if self._cached(r'children'):
760 _addchildren(self.children, markers)
760 _addchildren(self.children, markers)
761 _checkinvalidmarkers(markers)
761 _checkinvalidmarkers(markers)
762
762
763 def relevantmarkers(self, nodes):
763 def relevantmarkers(self, nodes):
764 """return a set of all obsolescence markers relevant to a set of nodes.
764 """return a set of all obsolescence markers relevant to a set of nodes.
765
765
766 "relevant" to a set of nodes mean:
766 "relevant" to a set of nodes mean:
767
767
768 - marker that use this changeset as successor
768 - marker that use this changeset as successor
769 - prune marker of direct children on this changeset
769 - prune marker of direct children on this changeset
770 - recursive application of the two rules on predecessors of these
770 - recursive application of the two rules on predecessors of these
771 markers
771 markers
772
772
773 It is a set so you cannot rely on order."""
773 It is a set so you cannot rely on order."""
774
774
775 pendingnodes = set(nodes)
775 pendingnodes = set(nodes)
776 seenmarkers = set()
776 seenmarkers = set()
777 seennodes = set(pendingnodes)
777 seennodes = set(pendingnodes)
778 precursorsmarkers = self.predecessors
778 precursorsmarkers = self.predecessors
779 succsmarkers = self.successors
779 succsmarkers = self.successors
780 children = self.children
780 children = self.children
781 while pendingnodes:
781 while pendingnodes:
782 direct = set()
782 direct = set()
783 for current in pendingnodes:
783 for current in pendingnodes:
784 direct.update(precursorsmarkers.get(current, ()))
784 direct.update(precursorsmarkers.get(current, ()))
785 pruned = [m for m in children.get(current, ()) if not m[1]]
785 pruned = [m for m in children.get(current, ()) if not m[1]]
786 direct.update(pruned)
786 direct.update(pruned)
787 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
787 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
788 direct.update(pruned)
788 direct.update(pruned)
789 direct -= seenmarkers
789 direct -= seenmarkers
790 pendingnodes = {m[0] for m in direct}
790 pendingnodes = {m[0] for m in direct}
791 seenmarkers |= direct
791 seenmarkers |= direct
792 pendingnodes -= seennodes
792 pendingnodes -= seennodes
793 seennodes |= pendingnodes
793 seennodes |= pendingnodes
794 return seenmarkers
794 return seenmarkers
795
795
796
796
797 def makestore(ui, repo):
797 def makestore(ui, repo):
798 """Create an obsstore instance from a repo."""
798 """Create an obsstore instance from a repo."""
799 # read default format for new obsstore.
799 # read default format for new obsstore.
800 # developer config: format.obsstore-version
800 # developer config: format.obsstore-version
801 defaultformat = ui.configint(b'format', b'obsstore-version')
801 defaultformat = ui.configint(b'format', b'obsstore-version')
802 # rely on obsstore class default when possible.
802 # rely on obsstore class default when possible.
803 kwargs = {}
803 kwargs = {}
804 if defaultformat is not None:
804 if defaultformat is not None:
805 kwargs[r'defaultformat'] = defaultformat
805 kwargs[r'defaultformat'] = defaultformat
806 readonly = not isenabled(repo, createmarkersopt)
806 readonly = not isenabled(repo, createmarkersopt)
807 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
807 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
808 if store and readonly:
808 if store and readonly:
809 ui.warn(
809 ui.warn(
810 _(b'obsolete feature not enabled but %i markers found!\n')
810 _(b'obsolete feature not enabled but %i markers found!\n')
811 % len(list(store))
811 % len(list(store))
812 )
812 )
813 return store
813 return store
814
814
815
815
816 def commonversion(versions):
816 def commonversion(versions):
817 """Return the newest version listed in both versions and our local formats.
817 """Return the newest version listed in both versions and our local formats.
818
818
819 Returns None if no common version exists.
819 Returns None if no common version exists.
820 """
820 """
821 versions.sort(reverse=True)
821 versions.sort(reverse=True)
822 # search for highest version known on both side
822 # search for highest version known on both side
823 for v in versions:
823 for v in versions:
824 if v in formats:
824 if v in formats:
825 return v
825 return v
826 return None
826 return None
827
827
828
828
829 # arbitrary picked to fit into 8K limit from HTTP server
829 # arbitrary picked to fit into 8K limit from HTTP server
830 # you have to take in account:
830 # you have to take in account:
831 # - the version header
831 # - the version header
832 # - the base85 encoding
832 # - the base85 encoding
833 _maxpayload = 5300
833 _maxpayload = 5300
834
834
835
835
836 def _pushkeyescape(markers):
836 def _pushkeyescape(markers):
837 """encode markers into a dict suitable for pushkey exchange
837 """encode markers into a dict suitable for pushkey exchange
838
838
839 - binary data is base85 encoded
839 - binary data is base85 encoded
840 - split in chunks smaller than 5300 bytes"""
840 - split in chunks smaller than 5300 bytes"""
841 keys = {}
841 keys = {}
842 parts = []
842 parts = []
843 currentlen = _maxpayload * 2 # ensure we create a new part
843 currentlen = _maxpayload * 2 # ensure we create a new part
844 for marker in markers:
844 for marker in markers:
845 nextdata = _fm0encodeonemarker(marker)
845 nextdata = _fm0encodeonemarker(marker)
846 if len(nextdata) + currentlen > _maxpayload:
846 if len(nextdata) + currentlen > _maxpayload:
847 currentpart = []
847 currentpart = []
848 currentlen = 0
848 currentlen = 0
849 parts.append(currentpart)
849 parts.append(currentpart)
850 currentpart.append(nextdata)
850 currentpart.append(nextdata)
851 currentlen += len(nextdata)
851 currentlen += len(nextdata)
852 for idx, part in enumerate(reversed(parts)):
852 for idx, part in enumerate(reversed(parts)):
853 data = b''.join([_pack(b'>B', _fm0version)] + part)
853 data = b''.join([_pack(b'>B', _fm0version)] + part)
854 keys[b'dump%i' % idx] = util.b85encode(data)
854 keys[b'dump%i' % idx] = util.b85encode(data)
855 return keys
855 return keys
856
856
857
857
858 def listmarkers(repo):
858 def listmarkers(repo):
859 """List markers over pushkey"""
859 """List markers over pushkey"""
860 if not repo.obsstore:
860 if not repo.obsstore:
861 return {}
861 return {}
862 return _pushkeyescape(sorted(repo.obsstore))
862 return _pushkeyescape(sorted(repo.obsstore))
863
863
864
864
865 def pushmarker(repo, key, old, new):
865 def pushmarker(repo, key, old, new):
866 """Push markers over pushkey"""
866 """Push markers over pushkey"""
867 if not key.startswith(b'dump'):
867 if not key.startswith(b'dump'):
868 repo.ui.warn(_(b'unknown key: %r') % key)
868 repo.ui.warn(_(b'unknown key: %r') % key)
869 return False
869 return False
870 if old:
870 if old:
871 repo.ui.warn(_(b'unexpected old value for %r') % key)
871 repo.ui.warn(_(b'unexpected old value for %r') % key)
872 return False
872 return False
873 data = util.b85decode(new)
873 data = util.b85decode(new)
874 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
874 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
875 repo.obsstore.mergemarkers(tr, data)
875 repo.obsstore.mergemarkers(tr, data)
876 repo.invalidatevolatilesets()
876 repo.invalidatevolatilesets()
877 return True
877 return True
878
878
879
879
880 # mapping of 'set-name' -> <function to compute this set>
880 # mapping of 'set-name' -> <function to compute this set>
881 cachefuncs = {}
881 cachefuncs = {}
882
882
883
883
884 def cachefor(name):
884 def cachefor(name):
885 """Decorator to register a function as computing the cache for a set"""
885 """Decorator to register a function as computing the cache for a set"""
886
886
887 def decorator(func):
887 def decorator(func):
888 if name in cachefuncs:
888 if name in cachefuncs:
889 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
889 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
890 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
890 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
891 cachefuncs[name] = func
891 cachefuncs[name] = func
892 return func
892 return func
893
893
894 return decorator
894 return decorator
895
895
896
896
897 def getrevs(repo, name):
897 def getrevs(repo, name):
898 """Return the set of revision that belong to the <name> set
898 """Return the set of revision that belong to the <name> set
899
899
900 Such access may compute the set and cache it for future use"""
900 Such access may compute the set and cache it for future use"""
901 repo = repo.unfiltered()
901 repo = repo.unfiltered()
902 if not repo.obsstore:
902 with util.timedcm('getrevs %s', name):
903 return frozenset()
903 if not repo.obsstore:
904 if name not in repo.obsstore.caches:
904 return frozenset()
905 repo.obsstore.caches[name] = cachefuncs[name](repo)
905 if name not in repo.obsstore.caches:
906 return repo.obsstore.caches[name]
906 repo.obsstore.caches[name] = cachefuncs[name](repo)
907 return repo.obsstore.caches[name]
907
908
908
909
909 # To be simple we need to invalidate obsolescence cache when:
910 # To be simple we need to invalidate obsolescence cache when:
910 #
911 #
911 # - new changeset is added:
912 # - new changeset is added:
912 # - public phase is changed
913 # - public phase is changed
913 # - obsolescence marker are added
914 # - obsolescence marker are added
914 # - strip is used a repo
915 # - strip is used a repo
915 def clearobscaches(repo):
916 def clearobscaches(repo):
916 """Remove all obsolescence related cache from a repo
917 """Remove all obsolescence related cache from a repo
917
918
918 This remove all cache in obsstore is the obsstore already exist on the
919 This remove all cache in obsstore is the obsstore already exist on the
919 repo.
920 repo.
920
921
921 (We could be smarter here given the exact event that trigger the cache
922 (We could be smarter here given the exact event that trigger the cache
922 clearing)"""
923 clearing)"""
923 # only clear cache is there is obsstore data in this repo
924 # only clear cache is there is obsstore data in this repo
924 if b'obsstore' in repo._filecache:
925 if b'obsstore' in repo._filecache:
925 repo.obsstore.caches.clear()
926 repo.obsstore.caches.clear()
926
927
927
928
928 def _mutablerevs(repo):
929 def _mutablerevs(repo):
929 """the set of mutable revision in the repository"""
930 """the set of mutable revision in the repository"""
930 return repo._phasecache.getrevset(repo, phases.mutablephases)
931 return repo._phasecache.getrevset(repo, phases.mutablephases)
931
932
932
933
933 @cachefor(b'obsolete')
934 @cachefor(b'obsolete')
934 def _computeobsoleteset(repo):
935 def _computeobsoleteset(repo):
935 """the set of obsolete revisions"""
936 """the set of obsolete revisions"""
936 getnode = repo.changelog.node
937 getnode = repo.changelog.node
937 notpublic = _mutablerevs(repo)
938 notpublic = _mutablerevs(repo)
938 isobs = repo.obsstore.successors.__contains__
939 isobs = repo.obsstore.successors.__contains__
939 obs = set(r for r in notpublic if isobs(getnode(r)))
940 obs = set(r for r in notpublic if isobs(getnode(r)))
940 return obs
941 return obs
941
942
942
943
943 @cachefor(b'orphan')
944 @cachefor(b'orphan')
944 def _computeorphanset(repo):
945 def _computeorphanset(repo):
945 """the set of non obsolete revisions with obsolete parents"""
946 """the set of non obsolete revisions with obsolete parents"""
946 pfunc = repo.changelog.parentrevs
947 pfunc = repo.changelog.parentrevs
947 mutable = _mutablerevs(repo)
948 mutable = _mutablerevs(repo)
948 obsolete = getrevs(repo, b'obsolete')
949 obsolete = getrevs(repo, b'obsolete')
949 others = mutable - obsolete
950 others = mutable - obsolete
950 unstable = set()
951 unstable = set()
951 for r in sorted(others):
952 for r in sorted(others):
952 # A rev is unstable if one of its parent is obsolete or unstable
953 # A rev is unstable if one of its parent is obsolete or unstable
953 # this works since we traverse following growing rev order
954 # this works since we traverse following growing rev order
954 for p in pfunc(r):
955 for p in pfunc(r):
955 if p in obsolete or p in unstable:
956 if p in obsolete or p in unstable:
956 unstable.add(r)
957 unstable.add(r)
957 break
958 break
958 return unstable
959 return unstable
959
960
960
961
961 @cachefor(b'suspended')
962 @cachefor(b'suspended')
962 def _computesuspendedset(repo):
963 def _computesuspendedset(repo):
963 """the set of obsolete parents with non obsolete descendants"""
964 """the set of obsolete parents with non obsolete descendants"""
964 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
965 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
965 return set(r for r in getrevs(repo, b'obsolete') if r in suspended)
966 return set(r for r in getrevs(repo, b'obsolete') if r in suspended)
966
967
967
968
968 @cachefor(b'extinct')
969 @cachefor(b'extinct')
969 def _computeextinctset(repo):
970 def _computeextinctset(repo):
970 """the set of obsolete parents without non obsolete descendants"""
971 """the set of obsolete parents without non obsolete descendants"""
971 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
972 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
972
973
973
974
974 @cachefor(b'phasedivergent')
975 @cachefor(b'phasedivergent')
975 def _computephasedivergentset(repo):
976 def _computephasedivergentset(repo):
976 """the set of revs trying to obsolete public revisions"""
977 """the set of revs trying to obsolete public revisions"""
977 bumped = set()
978 bumped = set()
978 # util function (avoid attribute lookup in the loop)
979 # util function (avoid attribute lookup in the loop)
979 phase = repo._phasecache.phase # would be faster to grab the full list
980 phase = repo._phasecache.phase # would be faster to grab the full list
980 public = phases.public
981 public = phases.public
981 cl = repo.changelog
982 cl = repo.changelog
982 torev = cl.nodemap.get
983 torev = cl.nodemap.get
983 tonode = cl.node
984 tonode = cl.node
984 obsstore = repo.obsstore
985 obsstore = repo.obsstore
985 for rev in repo.revs(b'(not public()) and (not obsolete())'):
986 for rev in repo.revs(b'(not public()) and (not obsolete())'):
986 # We only evaluate mutable, non-obsolete revision
987 # We only evaluate mutable, non-obsolete revision
987 node = tonode(rev)
988 node = tonode(rev)
988 # (future) A cache of predecessors may worth if split is very common
989 # (future) A cache of predecessors may worth if split is very common
989 for pnode in obsutil.allpredecessors(
990 for pnode in obsutil.allpredecessors(
990 obsstore, [node], ignoreflags=bumpedfix
991 obsstore, [node], ignoreflags=bumpedfix
991 ):
992 ):
992 prev = torev(pnode) # unfiltered! but so is phasecache
993 prev = torev(pnode) # unfiltered! but so is phasecache
993 if (prev is not None) and (phase(repo, prev) <= public):
994 if (prev is not None) and (phase(repo, prev) <= public):
994 # we have a public predecessor
995 # we have a public predecessor
995 bumped.add(rev)
996 bumped.add(rev)
996 break # Next draft!
997 break # Next draft!
997 return bumped
998 return bumped
998
999
999
1000
1000 @cachefor(b'contentdivergent')
1001 @cachefor(b'contentdivergent')
1001 def _computecontentdivergentset(repo):
1002 def _computecontentdivergentset(repo):
1002 """the set of rev that compete to be the final successors of some revision.
1003 """the set of rev that compete to be the final successors of some revision.
1003 """
1004 """
1004 divergent = set()
1005 divergent = set()
1005 obsstore = repo.obsstore
1006 obsstore = repo.obsstore
1006 newermap = {}
1007 newermap = {}
1007 tonode = repo.changelog.node
1008 tonode = repo.changelog.node
1008 for rev in repo.revs(b'(not public()) - obsolete()'):
1009 for rev in repo.revs(b'(not public()) - obsolete()'):
1009 node = tonode(rev)
1010 node = tonode(rev)
1010 mark = obsstore.predecessors.get(node, ())
1011 mark = obsstore.predecessors.get(node, ())
1011 toprocess = set(mark)
1012 toprocess = set(mark)
1012 seen = set()
1013 seen = set()
1013 while toprocess:
1014 while toprocess:
1014 prec = toprocess.pop()[0]
1015 prec = toprocess.pop()[0]
1015 if prec in seen:
1016 if prec in seen:
1016 continue # emergency cycle hanging prevention
1017 continue # emergency cycle hanging prevention
1017 seen.add(prec)
1018 seen.add(prec)
1018 if prec not in newermap:
1019 if prec not in newermap:
1019 obsutil.successorssets(repo, prec, cache=newermap)
1020 obsutil.successorssets(repo, prec, cache=newermap)
1020 newer = [n for n in newermap[prec] if n]
1021 newer = [n for n in newermap[prec] if n]
1021 if len(newer) > 1:
1022 if len(newer) > 1:
1022 divergent.add(rev)
1023 divergent.add(rev)
1023 break
1024 break
1024 toprocess.update(obsstore.predecessors.get(prec, ()))
1025 toprocess.update(obsstore.predecessors.get(prec, ()))
1025 return divergent
1026 return divergent
1026
1027
1027
1028
1028 def makefoldid(relation, user):
1029 def makefoldid(relation, user):
1029
1030
1030 folddigest = hashlib.sha1(user)
1031 folddigest = hashlib.sha1(user)
1031 for p in relation[0] + relation[1]:
1032 for p in relation[0] + relation[1]:
1032 folddigest.update(b'%d' % p.rev())
1033 folddigest.update(b'%d' % p.rev())
1033 folddigest.update(p.node())
1034 folddigest.update(p.node())
1034 # Since fold only has to compete against fold for the same successors, it
1035 # Since fold only has to compete against fold for the same successors, it
1035 # seems fine to use a small ID. Smaller ID save space.
1036 # seems fine to use a small ID. Smaller ID save space.
1036 return node.hex(folddigest.digest())[:8]
1037 return node.hex(folddigest.digest())[:8]
1037
1038
1038
1039
1039 def createmarkers(
1040 def createmarkers(
1040 repo, relations, flag=0, date=None, metadata=None, operation=None
1041 repo, relations, flag=0, date=None, metadata=None, operation=None
1041 ):
1042 ):
1042 """Add obsolete markers between changesets in a repo
1043 """Add obsolete markers between changesets in a repo
1043
1044
1044 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1045 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1045 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1046 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1046 containing metadata for this marker only. It is merged with the global
1047 containing metadata for this marker only. It is merged with the global
1047 metadata specified through the `metadata` argument of this function.
1048 metadata specified through the `metadata` argument of this function.
1048 Any string values in metadata must be UTF-8 bytes.
1049 Any string values in metadata must be UTF-8 bytes.
1049
1050
1050 Trying to obsolete a public changeset will raise an exception.
1051 Trying to obsolete a public changeset will raise an exception.
1051
1052
1052 Current user and date are used except if specified otherwise in the
1053 Current user and date are used except if specified otherwise in the
1053 metadata attribute.
1054 metadata attribute.
1054
1055
1055 This function operates within a transaction of its own, but does
1056 This function operates within a transaction of its own, but does
1056 not take any lock on the repo.
1057 not take any lock on the repo.
1057 """
1058 """
1058 # prepare metadata
1059 # prepare metadata
1059 if metadata is None:
1060 if metadata is None:
1060 metadata = {}
1061 metadata = {}
1061 if b'user' not in metadata:
1062 if b'user' not in metadata:
1062 luser = (
1063 luser = (
1063 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1064 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1064 )
1065 )
1065 metadata[b'user'] = encoding.fromlocal(luser)
1066 metadata[b'user'] = encoding.fromlocal(luser)
1066
1067
1067 # Operation metadata handling
1068 # Operation metadata handling
1068 useoperation = repo.ui.configbool(
1069 useoperation = repo.ui.configbool(
1069 b'experimental', b'evolution.track-operation'
1070 b'experimental', b'evolution.track-operation'
1070 )
1071 )
1071 if useoperation and operation:
1072 if useoperation and operation:
1072 metadata[b'operation'] = operation
1073 metadata[b'operation'] = operation
1073
1074
1074 # Effect flag metadata handling
1075 # Effect flag metadata handling
1075 saveeffectflag = repo.ui.configbool(
1076 saveeffectflag = repo.ui.configbool(
1076 b'experimental', b'evolution.effect-flags'
1077 b'experimental', b'evolution.effect-flags'
1077 )
1078 )
1078
1079
1079 with repo.transaction(b'add-obsolescence-marker') as tr:
1080 with repo.transaction(b'add-obsolescence-marker') as tr:
1080 markerargs = []
1081 markerargs = []
1081 for rel in relations:
1082 for rel in relations:
1082 predecessors = rel[0]
1083 predecessors = rel[0]
1083 if not isinstance(predecessors, tuple):
1084 if not isinstance(predecessors, tuple):
1084 # preserve compat with old API until all caller are migrated
1085 # preserve compat with old API until all caller are migrated
1085 predecessors = (predecessors,)
1086 predecessors = (predecessors,)
1086 if len(predecessors) > 1 and len(rel[1]) != 1:
1087 if len(predecessors) > 1 and len(rel[1]) != 1:
1087 msg = b'Fold markers can only have 1 successors, not %d'
1088 msg = b'Fold markers can only have 1 successors, not %d'
1088 raise error.ProgrammingError(msg % len(rel[1]))
1089 raise error.ProgrammingError(msg % len(rel[1]))
1089 foldid = None
1090 foldid = None
1090 foldsize = len(predecessors)
1091 foldsize = len(predecessors)
1091 if 1 < foldsize:
1092 if 1 < foldsize:
1092 foldid = makefoldid(rel, metadata[b'user'])
1093 foldid = makefoldid(rel, metadata[b'user'])
1093 for foldidx, prec in enumerate(predecessors, 1):
1094 for foldidx, prec in enumerate(predecessors, 1):
1094 sucs = rel[1]
1095 sucs = rel[1]
1095 localmetadata = metadata.copy()
1096 localmetadata = metadata.copy()
1096 if len(rel) > 2:
1097 if len(rel) > 2:
1097 localmetadata.update(rel[2])
1098 localmetadata.update(rel[2])
1098 if foldid is not None:
1099 if foldid is not None:
1099 localmetadata[b'fold-id'] = foldid
1100 localmetadata[b'fold-id'] = foldid
1100 localmetadata[b'fold-idx'] = b'%d' % foldidx
1101 localmetadata[b'fold-idx'] = b'%d' % foldidx
1101 localmetadata[b'fold-size'] = b'%d' % foldsize
1102 localmetadata[b'fold-size'] = b'%d' % foldsize
1102
1103
1103 if not prec.mutable():
1104 if not prec.mutable():
1104 raise error.Abort(
1105 raise error.Abort(
1105 _(b"cannot obsolete public changeset: %s") % prec,
1106 _(b"cannot obsolete public changeset: %s") % prec,
1106 hint=b"see 'hg help phases' for details",
1107 hint=b"see 'hg help phases' for details",
1107 )
1108 )
1108 nprec = prec.node()
1109 nprec = prec.node()
1109 nsucs = tuple(s.node() for s in sucs)
1110 nsucs = tuple(s.node() for s in sucs)
1110 npare = None
1111 npare = None
1111 if not nsucs:
1112 if not nsucs:
1112 npare = tuple(p.node() for p in prec.parents())
1113 npare = tuple(p.node() for p in prec.parents())
1113 if nprec in nsucs:
1114 if nprec in nsucs:
1114 raise error.Abort(
1115 raise error.Abort(
1115 _(b"changeset %s cannot obsolete itself") % prec
1116 _(b"changeset %s cannot obsolete itself") % prec
1116 )
1117 )
1117
1118
1118 # Effect flag can be different by relation
1119 # Effect flag can be different by relation
1119 if saveeffectflag:
1120 if saveeffectflag:
1120 # The effect flag is saved in a versioned field name for
1121 # The effect flag is saved in a versioned field name for
1121 # future evolution
1122 # future evolution
1122 effectflag = obsutil.geteffectflag(prec, sucs)
1123 effectflag = obsutil.geteffectflag(prec, sucs)
1123 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1124 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1124
1125
1125 # Creating the marker causes the hidden cache to become
1126 # Creating the marker causes the hidden cache to become
1126 # invalid, which causes recomputation when we ask for
1127 # invalid, which causes recomputation when we ask for
1127 # prec.parents() above. Resulting in n^2 behavior. So let's
1128 # prec.parents() above. Resulting in n^2 behavior. So let's
1128 # prepare all of the args first, then create the markers.
1129 # prepare all of the args first, then create the markers.
1129 markerargs.append((nprec, nsucs, npare, localmetadata))
1130 markerargs.append((nprec, nsucs, npare, localmetadata))
1130
1131
1131 for args in markerargs:
1132 for args in markerargs:
1132 nprec, nsucs, npare, localmetadata = args
1133 nprec, nsucs, npare, localmetadata = args
1133 repo.obsstore.create(
1134 repo.obsstore.create(
1134 tr,
1135 tr,
1135 nprec,
1136 nprec,
1136 nsucs,
1137 nsucs,
1137 flag,
1138 flag,
1138 parents=npare,
1139 parents=npare,
1139 date=date,
1140 date=date,
1140 metadata=localmetadata,
1141 metadata=localmetadata,
1141 ui=repo.ui,
1142 ui=repo.ui,
1142 )
1143 )
1143 repo.filteredrevcache.clear()
1144 repo.filteredrevcache.clear()
@@ -1,337 +1,337 b''
1 # repoview.py - Filtered view of a localrepo object
1 # repoview.py - Filtered view of a localrepo object
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import copy
11 import copy
12 import weakref
12 import weakref
13
13
14 from .node import nullrev
14 from .node import nullrev
15 from .pycompat import (
15 from .pycompat import (
16 delattr,
16 delattr,
17 getattr,
17 getattr,
18 setattr,
18 setattr,
19 )
19 )
20 from . import (
20 from . import (
21 obsolete,
21 obsolete,
22 phases,
22 phases,
23 pycompat,
23 pycompat,
24 tags as tagsmod,
24 tags as tagsmod,
25 util,
25 util,
26 )
26 )
27 from .utils import repoviewutil
27 from .utils import repoviewutil
28
28
29
29
30 def hideablerevs(repo):
30 def hideablerevs(repo):
31 """Revision candidates to be hidden
31 """Revision candidates to be hidden
32
32
33 This is a standalone function to allow extensions to wrap it.
33 This is a standalone function to allow extensions to wrap it.
34
34
35 Because we use the set of immutable changesets as a fallback subset in
35 Because we use the set of immutable changesets as a fallback subset in
36 branchmap (see mercurial.utils.repoviewutils.subsettable), you cannot set
36 branchmap (see mercurial.utils.repoviewutils.subsettable), you cannot set
37 "public" changesets as "hideable". Doing so would break multiple code
37 "public" changesets as "hideable". Doing so would break multiple code
38 assertions and lead to crashes."""
38 assertions and lead to crashes."""
39 obsoletes = obsolete.getrevs(repo, b'obsolete')
39 obsoletes = obsolete.getrevs(repo, b'obsolete')
40 internals = repo._phasecache.getrevset(repo, phases.localhiddenphases)
40 internals = repo._phasecache.getrevset(repo, phases.localhiddenphases)
41 internals = frozenset(internals)
41 internals = frozenset(internals)
42 return obsoletes | internals
42 return obsoletes | internals
43
43
44
44
45 def pinnedrevs(repo):
45 def pinnedrevs(repo):
46 """revisions blocking hidden changesets from being filtered
46 """revisions blocking hidden changesets from being filtered
47 """
47 """
48
48
49 cl = repo.changelog
49 cl = repo.changelog
50 pinned = set()
50 pinned = set()
51 pinned.update([par.rev() for par in repo[None].parents()])
51 pinned.update([par.rev() for par in repo[None].parents()])
52 pinned.update([cl.rev(bm) for bm in repo._bookmarks.values()])
52 pinned.update([cl.rev(bm) for bm in repo._bookmarks.values()])
53
53
54 tags = {}
54 tags = {}
55 tagsmod.readlocaltags(repo.ui, repo, tags, {})
55 tagsmod.readlocaltags(repo.ui, repo, tags, {})
56 if tags:
56 if tags:
57 rev, nodemap = cl.rev, cl.nodemap
57 rev, nodemap = cl.rev, cl.nodemap
58 pinned.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)
58 pinned.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)
59 return pinned
59 return pinned
60
60
61
61
62 def _revealancestors(pfunc, hidden, revs):
62 def _revealancestors(pfunc, hidden, revs):
63 """reveals contiguous chains of hidden ancestors of 'revs' by removing them
63 """reveals contiguous chains of hidden ancestors of 'revs' by removing them
64 from 'hidden'
64 from 'hidden'
65
65
66 - pfunc(r): a funtion returning parent of 'r',
66 - pfunc(r): a funtion returning parent of 'r',
67 - hidden: the (preliminary) hidden revisions, to be updated
67 - hidden: the (preliminary) hidden revisions, to be updated
68 - revs: iterable of revnum,
68 - revs: iterable of revnum,
69
69
70 (Ancestors are revealed exclusively, i.e. the elements in 'revs' are
70 (Ancestors are revealed exclusively, i.e. the elements in 'revs' are
71 *not* revealed)
71 *not* revealed)
72 """
72 """
73 stack = list(revs)
73 stack = list(revs)
74 while stack:
74 while stack:
75 for p in pfunc(stack.pop()):
75 for p in pfunc(stack.pop()):
76 if p != nullrev and p in hidden:
76 if p != nullrev and p in hidden:
77 hidden.remove(p)
77 hidden.remove(p)
78 stack.append(p)
78 stack.append(p)
79
79
80
80
81 def computehidden(repo, visibilityexceptions=None):
81 def computehidden(repo, visibilityexceptions=None):
82 """compute the set of hidden revision to filter
82 """compute the set of hidden revision to filter
83
83
84 During most operation hidden should be filtered."""
84 During most operation hidden should be filtered."""
85 assert not repo.changelog.filteredrevs
85 assert not repo.changelog.filteredrevs
86
86
87 hidden = hideablerevs(repo)
87 hidden = hideablerevs(repo)
88 if hidden:
88 if hidden:
89 hidden = set(hidden - pinnedrevs(repo))
89 hidden = set(hidden - pinnedrevs(repo))
90 if visibilityexceptions:
90 if visibilityexceptions:
91 hidden -= visibilityexceptions
91 hidden -= visibilityexceptions
92 pfunc = repo.changelog.parentrevs
92 pfunc = repo.changelog.parentrevs
93 mutable = repo._phasecache.getrevset(repo, phases.mutablephases)
93 mutable = repo._phasecache.getrevset(repo, phases.mutablephases)
94
94
95 visible = mutable - hidden
95 visible = mutable - hidden
96 _revealancestors(pfunc, hidden, visible)
96 _revealancestors(pfunc, hidden, visible)
97 return frozenset(hidden)
97 return frozenset(hidden)
98
98
99
99
100 def computesecret(repo, visibilityexceptions=None):
100 def computesecret(repo, visibilityexceptions=None):
101 """compute the set of revision that can never be exposed through hgweb
101 """compute the set of revision that can never be exposed through hgweb
102
102
103 Changeset in the secret phase (or above) should stay unaccessible."""
103 Changeset in the secret phase (or above) should stay unaccessible."""
104 assert not repo.changelog.filteredrevs
104 assert not repo.changelog.filteredrevs
105 secrets = repo._phasecache.getrevset(repo, phases.remotehiddenphases)
105 secrets = repo._phasecache.getrevset(repo, phases.remotehiddenphases)
106 return frozenset(secrets)
106 return frozenset(secrets)
107
107
108
108
109 def computeunserved(repo, visibilityexceptions=None):
109 def computeunserved(repo, visibilityexceptions=None):
110 """compute the set of revision that should be filtered when used a server
110 """compute the set of revision that should be filtered when used a server
111
111
112 Secret and hidden changeset should not pretend to be here."""
112 Secret and hidden changeset should not pretend to be here."""
113 assert not repo.changelog.filteredrevs
113 assert not repo.changelog.filteredrevs
114 # fast path in simple case to avoid impact of non optimised code
114 # fast path in simple case to avoid impact of non optimised code
115 hiddens = filterrevs(repo, b'visible')
115 hiddens = filterrevs(repo, b'visible')
116 secrets = filterrevs(repo, b'served.hidden')
116 secrets = filterrevs(repo, b'served.hidden')
117 if secrets:
117 if secrets:
118 return frozenset(hiddens | secrets)
118 return frozenset(hiddens | secrets)
119 else:
119 else:
120 return hiddens
120 return hiddens
121
121
122
122
123 def computemutable(repo, visibilityexceptions=None):
123 def computemutable(repo, visibilityexceptions=None):
124 assert not repo.changelog.filteredrevs
124 assert not repo.changelog.filteredrevs
125 # fast check to avoid revset call on huge repo
125 # fast check to avoid revset call on huge repo
126 if any(repo._phasecache.phaseroots[1:]):
126 if any(repo._phasecache.phaseroots[1:]):
127 getphase = repo._phasecache.phase
127 getphase = repo._phasecache.phase
128 maymutable = filterrevs(repo, b'base')
128 maymutable = filterrevs(repo, b'base')
129 return frozenset(r for r in maymutable if getphase(repo, r))
129 return frozenset(r for r in maymutable if getphase(repo, r))
130 return frozenset()
130 return frozenset()
131
131
132
132
133 def computeimpactable(repo, visibilityexceptions=None):
133 def computeimpactable(repo, visibilityexceptions=None):
134 """Everything impactable by mutable revision
134 """Everything impactable by mutable revision
135
135
136 The immutable filter still have some chance to get invalidated. This will
136 The immutable filter still have some chance to get invalidated. This will
137 happen when:
137 happen when:
138
138
139 - you garbage collect hidden changeset,
139 - you garbage collect hidden changeset,
140 - public phase is moved backward,
140 - public phase is moved backward,
141 - something is changed in the filtering (this could be fixed)
141 - something is changed in the filtering (this could be fixed)
142
142
143 This filter out any mutable changeset and any public changeset that may be
143 This filter out any mutable changeset and any public changeset that may be
144 impacted by something happening to a mutable revision.
144 impacted by something happening to a mutable revision.
145
145
146 This is achieved by filtered everything with a revision number egal or
146 This is achieved by filtered everything with a revision number egal or
147 higher than the first mutable changeset is filtered."""
147 higher than the first mutable changeset is filtered."""
148 assert not repo.changelog.filteredrevs
148 assert not repo.changelog.filteredrevs
149 cl = repo.changelog
149 cl = repo.changelog
150 firstmutable = len(cl)
150 firstmutable = len(cl)
151 for roots in repo._phasecache.phaseroots[1:]:
151 for roots in repo._phasecache.phaseroots[1:]:
152 if roots:
152 if roots:
153 firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
153 firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
154 # protect from nullrev root
154 # protect from nullrev root
155 firstmutable = max(0, firstmutable)
155 firstmutable = max(0, firstmutable)
156 return frozenset(pycompat.xrange(firstmutable, len(cl)))
156 return frozenset(pycompat.xrange(firstmutable, len(cl)))
157
157
158
158
159 # function to compute filtered set
159 # function to compute filtered set
160 #
160 #
161 # When adding a new filter you MUST update the table at:
161 # When adding a new filter you MUST update the table at:
162 # mercurial.utils.repoviewutil.subsettable
162 # mercurial.utils.repoviewutil.subsettable
163 # Otherwise your filter will have to recompute all its branches cache
163 # Otherwise your filter will have to recompute all its branches cache
164 # from scratch (very slow).
164 # from scratch (very slow).
165 filtertable = {
165 filtertable = {
166 b'visible': computehidden,
166 b'visible': computehidden,
167 b'visible-hidden': computehidden,
167 b'visible-hidden': computehidden,
168 b'served.hidden': computesecret,
168 b'served.hidden': computesecret,
169 b'served': computeunserved,
169 b'served': computeunserved,
170 b'immutable': computemutable,
170 b'immutable': computemutable,
171 b'base': computeimpactable,
171 b'base': computeimpactable,
172 }
172 }
173
173
174 _basefiltername = list(filtertable)
174 _basefiltername = list(filtertable)
175
175
176
176
177 def extrafilter(ui):
177 def extrafilter(ui):
178 """initialize extra filter and return its id
178 """initialize extra filter and return its id
179
179
180 If extra filtering is configured, we make sure the associated filtered view
180 If extra filtering is configured, we make sure the associated filtered view
181 are declared and return the associated id.
181 are declared and return the associated id.
182 """
182 """
183 frevs = ui.config(b'experimental', b'extra-filter-revs')
183 frevs = ui.config(b'experimental', b'extra-filter-revs')
184 if frevs is None:
184 if frevs is None:
185 return None
185 return None
186
186
187 fid = pycompat.sysbytes(util.DIGESTS[b'sha1'](frevs).hexdigest())[:12]
187 fid = pycompat.sysbytes(util.DIGESTS[b'sha1'](frevs).hexdigest())[:12]
188
188
189 combine = lambda fname: fname + b'%' + fid
189 combine = lambda fname: fname + b'%' + fid
190
190
191 subsettable = repoviewutil.subsettable
191 subsettable = repoviewutil.subsettable
192
192
193 if combine(b'base') not in filtertable:
193 if combine(b'base') not in filtertable:
194 for name in _basefiltername:
194 for name in _basefiltername:
195
195
196 def extrafilteredrevs(repo, *args, **kwargs):
196 def extrafilteredrevs(repo, *args, **kwargs):
197 baserevs = filtertable[name](repo, *args, **kwargs)
197 baserevs = filtertable[name](repo, *args, **kwargs)
198 extrarevs = frozenset(repo.revs(frevs))
198 extrarevs = frozenset(repo.revs(frevs))
199 return baserevs | extrarevs
199 return baserevs | extrarevs
200
200
201 filtertable[combine(name)] = extrafilteredrevs
201 filtertable[combine(name)] = extrafilteredrevs
202 if name in subsettable:
202 if name in subsettable:
203 subsettable[combine(name)] = combine(subsettable[name])
203 subsettable[combine(name)] = combine(subsettable[name])
204 return fid
204 return fid
205
205
206
206
207 def filterrevs(repo, filtername, visibilityexceptions=None):
207 def filterrevs(repo, filtername, visibilityexceptions=None):
208 """returns set of filtered revision for this filter name
208 """returns set of filtered revision for this filter name
209
209
210 visibilityexceptions is a set of revs which must are exceptions for
210 visibilityexceptions is a set of revs which must are exceptions for
211 hidden-state and must be visible. They are dynamic and hence we should not
211 hidden-state and must be visible. They are dynamic and hence we should not
212 cache it's result"""
212 cache it's result"""
213 if filtername not in repo.filteredrevcache:
213 if filtername not in repo.filteredrevcache:
214 func = filtertable[filtername]
214 func = filtertable[filtername]
215 if visibilityexceptions:
215 if visibilityexceptions:
216 return func(repo.unfiltered, visibilityexceptions)
216 return func(repo.unfiltered, visibilityexceptions)
217 repo.filteredrevcache[filtername] = func(repo.unfiltered())
217 repo.filteredrevcache[filtername] = func(repo.unfiltered())
218 return repo.filteredrevcache[filtername]
218 return repo.filteredrevcache[filtername]
219
219
220
220
221 class repoview(object):
221 class repoview(object):
222 """Provide a read/write view of a repo through a filtered changelog
222 """Provide a read/write view of a repo through a filtered changelog
223
223
224 This object is used to access a filtered version of a repository without
224 This object is used to access a filtered version of a repository without
225 altering the original repository object itself. We can not alter the
225 altering the original repository object itself. We can not alter the
226 original object for two main reasons:
226 original object for two main reasons:
227 - It prevents the use of a repo with multiple filters at the same time. In
227 - It prevents the use of a repo with multiple filters at the same time. In
228 particular when multiple threads are involved.
228 particular when multiple threads are involved.
229 - It makes scope of the filtering harder to control.
229 - It makes scope of the filtering harder to control.
230
230
231 This object behaves very closely to the original repository. All attribute
231 This object behaves very closely to the original repository. All attribute
232 operations are done on the original repository:
232 operations are done on the original repository:
233 - An access to `repoview.someattr` actually returns `repo.someattr`,
233 - An access to `repoview.someattr` actually returns `repo.someattr`,
234 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
234 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
235 - A deletion of `repoview.someattr` actually drops `someattr`
235 - A deletion of `repoview.someattr` actually drops `someattr`
236 from `repo.__dict__`.
236 from `repo.__dict__`.
237
237
238 The only exception is the `changelog` property. It is overridden to return
238 The only exception is the `changelog` property. It is overridden to return
239 a (surface) copy of `repo.changelog` with some revisions filtered. The
239 a (surface) copy of `repo.changelog` with some revisions filtered. The
240 `filtername` attribute of the view control the revisions that need to be
240 `filtername` attribute of the view control the revisions that need to be
241 filtered. (the fact the changelog is copied is an implementation detail).
241 filtered. (the fact the changelog is copied is an implementation detail).
242
242
243 Unlike attributes, this object intercepts all method calls. This means that
243 Unlike attributes, this object intercepts all method calls. This means that
244 all methods are run on the `repoview` object with the filtered `changelog`
244 all methods are run on the `repoview` object with the filtered `changelog`
245 property. For this purpose the simple `repoview` class must be mixed with
245 property. For this purpose the simple `repoview` class must be mixed with
246 the actual class of the repository. This ensures that the resulting
246 the actual class of the repository. This ensures that the resulting
247 `repoview` object have the very same methods than the repo object. This
247 `repoview` object have the very same methods than the repo object. This
248 leads to the property below.
248 leads to the property below.
249
249
250 repoview.method() --> repo.__class__.method(repoview)
250 repoview.method() --> repo.__class__.method(repoview)
251
251
252 The inheritance has to be done dynamically because `repo` can be of any
252 The inheritance has to be done dynamically because `repo` can be of any
253 subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
253 subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
254 """
254 """
255
255
256 def __init__(self, repo, filtername, visibilityexceptions=None):
256 def __init__(self, repo, filtername, visibilityexceptions=None):
257 object.__setattr__(self, r'_unfilteredrepo', repo)
257 object.__setattr__(self, r'_unfilteredrepo', repo)
258 object.__setattr__(self, r'filtername', filtername)
258 object.__setattr__(self, r'filtername', filtername)
259 object.__setattr__(self, r'_clcachekey', None)
259 object.__setattr__(self, r'_clcachekey', None)
260 object.__setattr__(self, r'_clcache', None)
260 object.__setattr__(self, r'_clcache', None)
261 # revs which are exceptions and must not be hidden
261 # revs which are exceptions and must not be hidden
262 object.__setattr__(self, r'_visibilityexceptions', visibilityexceptions)
262 object.__setattr__(self, r'_visibilityexceptions', visibilityexceptions)
263
263
264 # not a propertycache on purpose we shall implement a proper cache later
264 # not a propertycache on purpose we shall implement a proper cache later
265 @property
265 @property
266 def changelog(self):
266 def changelog(self):
267 """return a filtered version of the changeset
267 """return a filtered version of the changeset
268
268
269 this changelog must not be used for writing"""
269 this changelog must not be used for writing"""
270 # some cache may be implemented later
270 # some cache may be implemented later
271 unfi = self._unfilteredrepo
271 unfi = self._unfilteredrepo
272 unfichangelog = unfi.changelog
272 unfichangelog = unfi.changelog
273 # bypass call to changelog.method
273 # bypass call to changelog.method
274 unfiindex = unfichangelog.index
274 unfiindex = unfichangelog.index
275 unfilen = len(unfiindex)
275 unfilen = len(unfiindex)
276 unfinode = unfiindex[unfilen - 1][7]
276 unfinode = unfiindex[unfilen - 1][7]
277
277 with util.timedcm('repo filter for %s', self.filtername):
278 revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
278 revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
279 cl = self._clcache
279 cl = self._clcache
280 newkey = (unfilen, unfinode, hash(revs), unfichangelog._delayed)
280 newkey = (unfilen, unfinode, hash(revs), unfichangelog._delayed)
281 # if cl.index is not unfiindex, unfi.changelog would be
281 # if cl.index is not unfiindex, unfi.changelog would be
282 # recreated, and our clcache refers to garbage object
282 # recreated, and our clcache refers to garbage object
283 if cl is not None and (
283 if cl is not None and (
284 cl.index is not unfiindex or newkey != self._clcachekey
284 cl.index is not unfiindex or newkey != self._clcachekey
285 ):
285 ):
286 cl = None
286 cl = None
287 # could have been made None by the previous if
287 # could have been made None by the previous if
288 if cl is None:
288 if cl is None:
289 cl = copy.copy(unfichangelog)
289 cl = copy.copy(unfichangelog)
290 cl.filteredrevs = revs
290 cl.filteredrevs = revs
291 object.__setattr__(self, r'_clcache', cl)
291 object.__setattr__(self, r'_clcache', cl)
292 object.__setattr__(self, r'_clcachekey', newkey)
292 object.__setattr__(self, r'_clcachekey', newkey)
293 return cl
293 return cl
294
294
295 def unfiltered(self):
295 def unfiltered(self):
296 """Return an unfiltered version of a repo"""
296 """Return an unfiltered version of a repo"""
297 return self._unfilteredrepo
297 return self._unfilteredrepo
298
298
299 def filtered(self, name, visibilityexceptions=None):
299 def filtered(self, name, visibilityexceptions=None):
300 """Return a filtered version of a repository"""
300 """Return a filtered version of a repository"""
301 if name == self.filtername and not visibilityexceptions:
301 if name == self.filtername and not visibilityexceptions:
302 return self
302 return self
303 return self.unfiltered().filtered(name, visibilityexceptions)
303 return self.unfiltered().filtered(name, visibilityexceptions)
304
304
305 def __repr__(self):
305 def __repr__(self):
306 return r'<%s:%s %r>' % (
306 return r'<%s:%s %r>' % (
307 self.__class__.__name__,
307 self.__class__.__name__,
308 pycompat.sysstr(self.filtername),
308 pycompat.sysstr(self.filtername),
309 self.unfiltered(),
309 self.unfiltered(),
310 )
310 )
311
311
312 # everything access are forwarded to the proxied repo
312 # everything access are forwarded to the proxied repo
313 def __getattr__(self, attr):
313 def __getattr__(self, attr):
314 return getattr(self._unfilteredrepo, attr)
314 return getattr(self._unfilteredrepo, attr)
315
315
316 def __setattr__(self, attr, value):
316 def __setattr__(self, attr, value):
317 return setattr(self._unfilteredrepo, attr, value)
317 return setattr(self._unfilteredrepo, attr, value)
318
318
319 def __delattr__(self, attr):
319 def __delattr__(self, attr):
320 return delattr(self._unfilteredrepo, attr)
320 return delattr(self._unfilteredrepo, attr)
321
321
322
322
323 # Python <3.4 easily leaks types via __mro__. See
323 # Python <3.4 easily leaks types via __mro__. See
324 # https://bugs.python.org/issue17950. We cache dynamically created types
324 # https://bugs.python.org/issue17950. We cache dynamically created types
325 # so they won't be leaked on every invocation of repo.filtered().
325 # so they won't be leaked on every invocation of repo.filtered().
326 _filteredrepotypes = weakref.WeakKeyDictionary()
326 _filteredrepotypes = weakref.WeakKeyDictionary()
327
327
328
328
329 def newtype(base):
329 def newtype(base):
330 """Create a new type with the repoview mixin and the given base class"""
330 """Create a new type with the repoview mixin and the given base class"""
331 if base not in _filteredrepotypes:
331 if base not in _filteredrepotypes:
332
332
333 class filteredrepo(repoview, base):
333 class filteredrepo(repoview, base):
334 pass
334 pass
335
335
336 _filteredrepotypes[base] = filteredrepo
336 _filteredrepotypes[base] = filteredrepo
337 return _filteredrepotypes[base]
337 return _filteredrepotypes[base]
General Comments 0
You need to be logged in to leave comments. Login now