##// END OF EJS Templates
configitems: register the 'format.obsstore-version' config
marmoute -
r33241:fd50788a default
parent child Browse files
Show More
@@ -1,189 +1,192 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from . import (
13 13 error,
14 14 )
15 15
16 16 def loadconfigtable(ui, extname, configtable):
17 17 """update config item known to the ui with the extension ones"""
18 18 for section, items in configtable.items():
19 19 knownitems = ui._knownconfig.setdefault(section, {})
20 20 knownkeys = set(knownitems)
21 21 newkeys = set(items)
22 22 for key in sorted(knownkeys & newkeys):
23 23 msg = "extension '%s' overwrite config item '%s.%s'"
24 24 msg %= (extname, section, key)
25 25 ui.develwarn(msg, config='warn-config')
26 26
27 27 knownitems.update(items)
28 28
29 29 class configitem(object):
30 30 """represent a known config item
31 31
32 32 :section: the official config section where to find this item,
33 33 :name: the official name within the section,
34 34 :default: default value for this item,
35 35 """
36 36
37 37 def __init__(self, section, name, default=None):
38 38 self.section = section
39 39 self.name = name
40 40 self.default = default
41 41
42 42 coreitems = {}
43 43
44 44 def _register(configtable, *args, **kwargs):
45 45 item = configitem(*args, **kwargs)
46 46 section = configtable.setdefault(item.section, {})
47 47 if item.name in section:
48 48 msg = "duplicated config item registration for '%s.%s'"
49 49 raise error.ProgrammingError(msg % (item.section, item.name))
50 50 section[item.name] = item
51 51
52 52 # Registering actual config items
53 53
54 54 def getitemregister(configtable):
55 55 return functools.partial(_register, configtable)
56 56
57 57 coreconfigitem = getitemregister(coreitems)
58 58
59 59 coreconfigitem('auth', 'cookiefile',
60 60 default=None,
61 61 )
62 62 # bookmarks.pushing: internal hack for discovery
63 63 coreconfigitem('bookmarks', 'pushing',
64 64 default=list,
65 65 )
66 66 # bundle.mainreporoot: internal hack for bundlerepo
67 67 coreconfigitem('bundle', 'mainreporoot',
68 68 default='',
69 69 )
70 70 # bundle.reorder: experimental config
71 71 coreconfigitem('bundle', 'reorder',
72 72 default='auto',
73 73 )
74 74 coreconfigitem('color', 'mode',
75 75 default='auto',
76 76 )
77 77 coreconfigitem('devel', 'all-warnings',
78 78 default=False,
79 79 )
80 80 coreconfigitem('devel', 'bundle2.debug',
81 81 default=False,
82 82 )
83 83 coreconfigitem('devel', 'check-locks',
84 84 default=False,
85 85 )
86 86 coreconfigitem('devel', 'check-relroot',
87 87 default=False,
88 88 )
89 89 coreconfigitem('devel', 'disableloaddefaultcerts',
90 90 default=False,
91 91 )
92 92 coreconfigitem('devel', 'legacy.exchange',
93 93 default=list,
94 94 )
95 95 coreconfigitem('devel', 'servercafile',
96 96 default='',
97 97 )
98 98 coreconfigitem('devel', 'serverexactprotocol',
99 99 default='',
100 100 )
101 101 coreconfigitem('devel', 'serverrequirecert',
102 102 default=False,
103 103 )
104 104 coreconfigitem('devel', 'strip-obsmarkers',
105 105 default=True,
106 106 )
107 107 coreconfigitem('format', 'aggressivemergedeltas',
108 108 default=False,
109 109 )
110 110 coreconfigitem('format', 'chunkcachesize',
111 111 default=None,
112 112 )
113 113 coreconfigitem('format', 'dotencode',
114 114 default=True,
115 115 )
116 116 coreconfigitem('format', 'generaldelta',
117 117 default=False,
118 118 )
119 119 coreconfigitem('format', 'manifestcachesize',
120 120 default=None,
121 121 )
122 122 coreconfigitem('format', 'maxchainlen',
123 123 default=None,
124 124 )
125 coreconfigitem('format', 'obsstore-version',
126 default=None,
127 )
125 128 coreconfigitem('hostsecurity', 'ciphers',
126 129 default=None,
127 130 )
128 131 coreconfigitem('hostsecurity', 'disabletls10warning',
129 132 default=False,
130 133 )
131 134 coreconfigitem('patch', 'eol',
132 135 default='strict',
133 136 )
134 137 coreconfigitem('patch', 'fuzz',
135 138 default=2,
136 139 )
137 140 coreconfigitem('server', 'bundle1',
138 141 default=True,
139 142 )
140 143 coreconfigitem('server', 'bundle1gd',
141 144 default=None,
142 145 )
143 146 coreconfigitem('server', 'compressionengines',
144 147 default=list,
145 148 )
146 149 coreconfigitem('server', 'concurrent-push-mode',
147 150 default='strict',
148 151 )
149 152 coreconfigitem('server', 'disablefullbundle',
150 153 default=False,
151 154 )
152 155 coreconfigitem('server', 'maxhttpheaderlen',
153 156 default=1024,
154 157 )
155 158 coreconfigitem('server', 'preferuncompressed',
156 159 default=False,
157 160 )
158 161 coreconfigitem('server', 'uncompressedallowsecret',
159 162 default=False,
160 163 )
161 164 coreconfigitem('server', 'validate',
162 165 default=False,
163 166 )
164 167 coreconfigitem('server', 'zliblevel',
165 168 default=-1,
166 169 )
167 170 coreconfigitem('ui', 'clonebundleprefers',
168 171 default=list,
169 172 )
170 173 coreconfigitem('ui', 'interactive',
171 174 default=None,
172 175 )
173 176 coreconfigitem('ui', 'quiet',
174 177 default=False,
175 178 )
176 179 # Windows defaults to a limit of 512 open files. A buffer of 128
177 180 # should give us enough headway.
178 181 coreconfigitem('worker', 'backgroundclosemaxqueue',
179 182 default=384,
180 183 )
181 184 coreconfigitem('worker', 'backgroundcloseminfilecount',
182 185 default=2048,
183 186 )
184 187 coreconfigitem('worker', 'backgroundclosethreadcount',
185 188 default=4,
186 189 )
187 190 coreconfigitem('worker', 'numcpus',
188 191 default=None,
189 192 )
@@ -1,1031 +1,1031 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 obsutil,
80 80 phases,
81 81 policy,
82 82 util,
83 83 )
84 84
85 85 parsers = policy.importmod(r'parsers')
86 86
87 87 _pack = struct.pack
88 88 _unpack = struct.unpack
89 89 _calcsize = struct.calcsize
90 90 propertycache = util.propertycache
91 91
92 92 # the obsolete feature is not mature enough to be enabled by default.
93 93 # you have to rely on third party extension extension to enable this.
94 94 _enabled = False
95 95
96 96 # Options for obsolescence
97 97 createmarkersopt = 'createmarkers'
98 98 allowunstableopt = 'allowunstable'
99 99 exchangeopt = 'exchange'
100 100
101 101 def isenabled(repo, option):
102 102 """Returns True if the given repository has the given obsolete option
103 103 enabled.
104 104 """
105 105 result = set(repo.ui.configlist('experimental', 'evolution'))
106 106 if 'all' in result:
107 107 return True
108 108
109 109 # For migration purposes, temporarily return true if the config hasn't been
110 110 # set but _enabled is true.
111 111 if len(result) == 0 and _enabled:
112 112 return True
113 113
114 114 # createmarkers must be enabled if other options are enabled
115 115 if ((allowunstableopt in result or exchangeopt in result) and
116 116 not createmarkersopt in result):
117 117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 118 "if other obsolete options are enabled"))
119 119
120 120 return option in result
121 121
122 122 ### obsolescence marker flag
123 123
124 124 ## bumpedfix flag
125 125 #
126 126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 127 # "bumped" because it's a successors of a public changesets
128 128 #
129 129 # o A' (bumped)
130 130 # |`:
131 131 # | o A
132 132 # |/
133 133 # o Z
134 134 #
135 135 # The way to solve this situation is to create a new changeset Ad as children
136 136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 138 #
139 139 # o Ad
140 140 # |`:
141 141 # | x A'
142 142 # |'|
143 143 # o | A
144 144 # |/
145 145 # o Z
146 146 #
147 147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 149 # This flag mean that the successors express the changes between the public and
150 150 # bumped version and fix the situation, breaking the transitivity of
151 151 # "bumped" here.
152 152 bumpedfix = 1
153 153 usingsha256 = 2
154 154
155 155 ## Parsing and writing of version "0"
156 156 #
157 157 # The header is followed by the markers. Each marker is made of:
158 158 #
159 159 # - 1 uint8 : number of new changesets "N", can be zero.
160 160 #
161 161 # - 1 uint32: metadata size "M" in bytes.
162 162 #
163 163 # - 1 byte: a bit field. It is reserved for flags used in common
164 164 # obsolete marker operations, to avoid repeated decoding of metadata
165 165 # entries.
166 166 #
167 167 # - 20 bytes: obsoleted changeset identifier.
168 168 #
169 169 # - N*20 bytes: new changesets identifiers.
170 170 #
171 171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 172 # string contains a key and a value, separated by a colon ':', without
173 173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 174 # cannot contain '\0'.
175 175 _fm0version = 0
176 176 _fm0fixed = '>BIB20s'
177 177 _fm0node = '20s'
178 178 _fm0fsize = _calcsize(_fm0fixed)
179 179 _fm0fnodesize = _calcsize(_fm0node)
180 180
181 181 def _fm0readmarkers(data, off):
182 182 # Loop on markers
183 183 l = len(data)
184 184 while off + _fm0fsize <= l:
185 185 # read fixed part
186 186 cur = data[off:off + _fm0fsize]
187 187 off += _fm0fsize
188 188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
189 189 # read replacement
190 190 sucs = ()
191 191 if numsuc:
192 192 s = (_fm0fnodesize * numsuc)
193 193 cur = data[off:off + s]
194 194 sucs = _unpack(_fm0node * numsuc, cur)
195 195 off += s
196 196 # read metadata
197 197 # (metadata will be decoded on demand)
198 198 metadata = data[off:off + mdsize]
199 199 if len(metadata) != mdsize:
200 200 raise error.Abort(_('parsing obsolete marker: metadata is too '
201 201 'short, %d bytes expected, got %d')
202 202 % (mdsize, len(metadata)))
203 203 off += mdsize
204 204 metadata = _fm0decodemeta(metadata)
205 205 try:
206 206 when, offset = metadata.pop('date', '0 0').split(' ')
207 207 date = float(when), int(offset)
208 208 except ValueError:
209 209 date = (0., 0)
210 210 parents = None
211 211 if 'p2' in metadata:
212 212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
213 213 elif 'p1' in metadata:
214 214 parents = (metadata.pop('p1', None),)
215 215 elif 'p0' in metadata:
216 216 parents = ()
217 217 if parents is not None:
218 218 try:
219 219 parents = tuple(node.bin(p) for p in parents)
220 220 # if parent content is not a nodeid, drop the data
221 221 for p in parents:
222 222 if len(p) != 20:
223 223 parents = None
224 224 break
225 225 except TypeError:
226 226 # if content cannot be translated to nodeid drop the data.
227 227 parents = None
228 228
229 229 metadata = tuple(sorted(metadata.iteritems()))
230 230
231 231 yield (pre, sucs, flags, metadata, date, parents)
232 232
233 233 def _fm0encodeonemarker(marker):
234 234 pre, sucs, flags, metadata, date, parents = marker
235 235 if flags & usingsha256:
236 236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
237 237 metadata = dict(metadata)
238 238 time, tz = date
239 239 metadata['date'] = '%r %i' % (time, tz)
240 240 if parents is not None:
241 241 if not parents:
242 242 # mark that we explicitly recorded no parents
243 243 metadata['p0'] = ''
244 244 for i, p in enumerate(parents, 1):
245 245 metadata['p%i' % i] = node.hex(p)
246 246 metadata = _fm0encodemeta(metadata)
247 247 numsuc = len(sucs)
248 248 format = _fm0fixed + (_fm0node * numsuc)
249 249 data = [numsuc, len(metadata), flags, pre]
250 250 data.extend(sucs)
251 251 return _pack(format, *data) + metadata
252 252
253 253 def _fm0encodemeta(meta):
254 254 """Return encoded metadata string to string mapping.
255 255
256 256 Assume no ':' in key and no '\0' in both key and value."""
257 257 for key, value in meta.iteritems():
258 258 if ':' in key or '\0' in key:
259 259 raise ValueError("':' and '\0' are forbidden in metadata key'")
260 260 if '\0' in value:
261 261 raise ValueError("':' is forbidden in metadata value'")
262 262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
263 263
264 264 def _fm0decodemeta(data):
265 265 """Return string to string dictionary from encoded version."""
266 266 d = {}
267 267 for l in data.split('\0'):
268 268 if l:
269 269 key, value = l.split(':')
270 270 d[key] = value
271 271 return d
272 272
273 273 ## Parsing and writing of version "1"
274 274 #
275 275 # The header is followed by the markers. Each marker is made of:
276 276 #
277 277 # - uint32: total size of the marker (including this field)
278 278 #
279 279 # - float64: date in seconds since epoch
280 280 #
281 281 # - int16: timezone offset in minutes
282 282 #
283 283 # - uint16: a bit field. It is reserved for flags used in common
284 284 # obsolete marker operations, to avoid repeated decoding of metadata
285 285 # entries.
286 286 #
287 287 # - uint8: number of successors "N", can be zero.
288 288 #
289 289 # - uint8: number of parents "P", can be zero.
290 290 #
291 291 # 0: parents data stored but no parent,
292 292 # 1: one parent stored,
293 293 # 2: two parents stored,
294 294 # 3: no parent data stored
295 295 #
296 296 # - uint8: number of metadata entries M
297 297 #
298 298 # - 20 or 32 bytes: precursor changeset identifier.
299 299 #
300 300 # - N*(20 or 32) bytes: successors changesets identifiers.
301 301 #
302 302 # - P*(20 or 32) bytes: parents of the precursors changesets.
303 303 #
304 304 # - M*(uint8, uint8): size of all metadata entries (key and value)
305 305 #
306 306 # - remaining bytes: the metadata, each (key, value) pair after the other.
307 307 _fm1version = 1
308 308 _fm1fixed = '>IdhHBBB20s'
309 309 _fm1nodesha1 = '20s'
310 310 _fm1nodesha256 = '32s'
311 311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
312 312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
313 313 _fm1fsize = _calcsize(_fm1fixed)
314 314 _fm1parentnone = 3
315 315 _fm1parentshift = 14
316 316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
317 317 _fm1metapair = 'BB'
318 318 _fm1metapairsize = _calcsize('BB')
319 319
320 320 def _fm1purereadmarkers(data, off):
321 321 # make some global constants local for performance
322 322 noneflag = _fm1parentnone
323 323 sha2flag = usingsha256
324 324 sha1size = _fm1nodesha1size
325 325 sha2size = _fm1nodesha256size
326 326 sha1fmt = _fm1nodesha1
327 327 sha2fmt = _fm1nodesha256
328 328 metasize = _fm1metapairsize
329 329 metafmt = _fm1metapair
330 330 fsize = _fm1fsize
331 331 unpack = _unpack
332 332
333 333 # Loop on markers
334 334 stop = len(data) - _fm1fsize
335 335 ufixed = struct.Struct(_fm1fixed).unpack
336 336
337 337 while off <= stop:
338 338 # read fixed part
339 339 o1 = off + fsize
340 340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
341 341
342 342 if flags & sha2flag:
343 343 # FIXME: prec was read as a SHA1, needs to be amended
344 344
345 345 # read 0 or more successors
346 346 if numsuc == 1:
347 347 o2 = o1 + sha2size
348 348 sucs = (data[o1:o2],)
349 349 else:
350 350 o2 = o1 + sha2size * numsuc
351 351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
352 352
353 353 # read parents
354 354 if numpar == noneflag:
355 355 o3 = o2
356 356 parents = None
357 357 elif numpar == 1:
358 358 o3 = o2 + sha2size
359 359 parents = (data[o2:o3],)
360 360 else:
361 361 o3 = o2 + sha2size * numpar
362 362 parents = unpack(sha2fmt * numpar, data[o2:o3])
363 363 else:
364 364 # read 0 or more successors
365 365 if numsuc == 1:
366 366 o2 = o1 + sha1size
367 367 sucs = (data[o1:o2],)
368 368 else:
369 369 o2 = o1 + sha1size * numsuc
370 370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
371 371
372 372 # read parents
373 373 if numpar == noneflag:
374 374 o3 = o2
375 375 parents = None
376 376 elif numpar == 1:
377 377 o3 = o2 + sha1size
378 378 parents = (data[o2:o3],)
379 379 else:
380 380 o3 = o2 + sha1size * numpar
381 381 parents = unpack(sha1fmt * numpar, data[o2:o3])
382 382
383 383 # read metadata
384 384 off = o3 + metasize * nummeta
385 385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
386 386 metadata = []
387 387 for idx in xrange(0, len(metapairsize), 2):
388 388 o1 = off + metapairsize[idx]
389 389 o2 = o1 + metapairsize[idx + 1]
390 390 metadata.append((data[off:o1], data[o1:o2]))
391 391 off = o2
392 392
393 393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
394 394
395 395 def _fm1encodeonemarker(marker):
396 396 pre, sucs, flags, metadata, date, parents = marker
397 397 # determine node size
398 398 _fm1node = _fm1nodesha1
399 399 if flags & usingsha256:
400 400 _fm1node = _fm1nodesha256
401 401 numsuc = len(sucs)
402 402 numextranodes = numsuc
403 403 if parents is None:
404 404 numpar = _fm1parentnone
405 405 else:
406 406 numpar = len(parents)
407 407 numextranodes += numpar
408 408 formatnodes = _fm1node * numextranodes
409 409 formatmeta = _fm1metapair * len(metadata)
410 410 format = _fm1fixed + formatnodes + formatmeta
411 411 # tz is stored in minutes so we divide by 60
412 412 tz = date[1]//60
413 413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
414 414 data.extend(sucs)
415 415 if parents is not None:
416 416 data.extend(parents)
417 417 totalsize = _calcsize(format)
418 418 for key, value in metadata:
419 419 lk = len(key)
420 420 lv = len(value)
421 421 data.append(lk)
422 422 data.append(lv)
423 423 totalsize += lk + lv
424 424 data[0] = totalsize
425 425 data = [_pack(format, *data)]
426 426 for key, value in metadata:
427 427 data.append(key)
428 428 data.append(value)
429 429 return ''.join(data)
430 430
431 431 def _fm1readmarkers(data, off):
432 432 native = getattr(parsers, 'fm1readmarkers', None)
433 433 if not native:
434 434 return _fm1purereadmarkers(data, off)
435 435 stop = len(data) - _fm1fsize
436 436 return native(data, off, stop)
437 437
438 438 # mapping to read/write various marker formats
439 439 # <version> -> (decoder, encoder)
440 440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
441 441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
442 442
443 443 def _readmarkerversion(data):
444 444 return _unpack('>B', data[0:1])[0]
445 445
446 446 @util.nogc
447 447 def _readmarkers(data):
448 448 """Read and enumerate markers from raw data"""
449 449 diskversion = _readmarkerversion(data)
450 450 off = 1
451 451 if diskversion not in formats:
452 452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 453 raise error.UnknownVersion(msg, version=diskversion)
454 454 return diskversion, formats[diskversion][0](data, off)
455 455
456 456 def encodeheader(version=_fm0version):
457 457 return _pack('>B', version)
458 458
459 459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 460 # Kept separate from flushmarkers(), it will be reused for
461 461 # markers exchange.
462 462 encodeone = formats[version][1]
463 463 if addheader:
464 464 yield encodeheader(version)
465 465 for marker in markers:
466 466 yield encodeone(marker)
467 467
468 468 @util.nogc
469 469 def _addsuccessors(successors, markers):
470 470 for mark in markers:
471 471 successors.setdefault(mark[0], set()).add(mark)
472 472
473 473 @util.nogc
474 474 def _addprecursors(precursors, markers):
475 475 for mark in markers:
476 476 for suc in mark[1]:
477 477 precursors.setdefault(suc, set()).add(mark)
478 478
479 479 @util.nogc
480 480 def _addchildren(children, markers):
481 481 for mark in markers:
482 482 parents = mark[5]
483 483 if parents is not None:
484 484 for p in parents:
485 485 children.setdefault(p, set()).add(mark)
486 486
487 487 def _checkinvalidmarkers(markers):
488 488 """search for marker with invalid data and raise error if needed
489 489
490 490 Exist as a separated function to allow the evolve extension for a more
491 491 subtle handling.
492 492 """
493 493 for mark in markers:
494 494 if node.nullid in mark[1]:
495 495 raise error.Abort(_('bad obsolescence marker detected: '
496 496 'invalid successors nullid'))
497 497
498 498 class obsstore(object):
499 499 """Store obsolete markers
500 500
501 501 Markers can be accessed with two mappings:
502 502 - precursors[x] -> set(markers on precursors edges of x)
503 503 - successors[x] -> set(markers on successors edges of x)
504 504 - children[x] -> set(markers on precursors edges of children(x)
505 505 """
506 506
507 507 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
508 508 # prec: nodeid, precursor changesets
509 509 # succs: tuple of nodeid, successor changesets (0-N length)
510 510 # flag: integer, flag field carrying modifier for the markers (see doc)
511 511 # meta: binary blob, encoded metadata dictionary
512 512 # date: (float, int) tuple, date of marker creation
513 513 # parents: (tuple of nodeid) or None, parents of precursors
514 514 # None is used when no data has been recorded
515 515
516 516 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
517 517 # caches for various obsolescence related cache
518 518 self.caches = {}
519 519 self.svfs = svfs
520 520 self._defaultformat = defaultformat
521 521 self._readonly = readonly
522 522
523 523 def __iter__(self):
524 524 return iter(self._all)
525 525
526 526 def __len__(self):
527 527 return len(self._all)
528 528
529 529 def __nonzero__(self):
530 530 if not self._cached('_all'):
531 531 try:
532 532 return self.svfs.stat('obsstore').st_size > 1
533 533 except OSError as inst:
534 534 if inst.errno != errno.ENOENT:
535 535 raise
536 536 # just build an empty _all list if no obsstore exists, which
537 537 # avoids further stat() syscalls
538 538 pass
539 539 return bool(self._all)
540 540
541 541 __bool__ = __nonzero__
542 542
543 543 @property
544 544 def readonly(self):
545 545 """True if marker creation is disabled
546 546
547 547 Remove me in the future when obsolete marker is always on."""
548 548 return self._readonly
549 549
550 550 def create(self, transaction, prec, succs=(), flag=0, parents=None,
551 551 date=None, metadata=None, ui=None):
552 552 """obsolete: add a new obsolete marker
553 553
554 554 * ensuring it is hashable
555 555 * check mandatory metadata
556 556 * encode metadata
557 557
558 558 If you are a human writing code creating marker you want to use the
559 559 `createmarkers` function in this module instead.
560 560
561 561 return True if a new marker have been added, False if the markers
562 562 already existed (no op).
563 563 """
564 564 if metadata is None:
565 565 metadata = {}
566 566 if date is None:
567 567 if 'date' in metadata:
568 568 # as a courtesy for out-of-tree extensions
569 569 date = util.parsedate(metadata.pop('date'))
570 570 elif ui is not None:
571 571 date = ui.configdate('devel', 'default-date')
572 572 if date is None:
573 573 date = util.makedate()
574 574 else:
575 575 date = util.makedate()
576 576 if len(prec) != 20:
577 577 raise ValueError(prec)
578 578 for succ in succs:
579 579 if len(succ) != 20:
580 580 raise ValueError(succ)
581 581 if prec in succs:
582 582 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
583 583
584 584 metadata = tuple(sorted(metadata.iteritems()))
585 585
586 586 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
587 587 return bool(self.add(transaction, [marker]))
588 588
589 589 def add(self, transaction, markers):
590 590 """Add new markers to the store
591 591
592 592 Take care of filtering duplicate.
593 593 Return the number of new marker."""
594 594 if self._readonly:
595 595 raise error.Abort(_('creating obsolete markers is not enabled on '
596 596 'this repo'))
597 597 known = set()
598 598 getsuccessors = self.successors.get
599 599 new = []
600 600 for m in markers:
601 601 if m not in getsuccessors(m[0], ()) and m not in known:
602 602 known.add(m)
603 603 new.append(m)
604 604 if new:
605 605 f = self.svfs('obsstore', 'ab')
606 606 try:
607 607 offset = f.tell()
608 608 transaction.add('obsstore', offset)
609 609 # offset == 0: new file - add the version header
610 610 for bytes in encodemarkers(new, offset == 0, self._version):
611 611 f.write(bytes)
612 612 finally:
613 613 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
614 614 # call 'filecacheentry.refresh()' here
615 615 f.close()
616 616 self._addmarkers(new)
617 617 # new marker *may* have changed several set. invalidate the cache.
618 618 self.caches.clear()
619 619 # records the number of new markers for the transaction hooks
620 620 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
621 621 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
622 622 return len(new)
623 623
624 624 def mergemarkers(self, transaction, data):
625 625 """merge a binary stream of markers inside the obsstore
626 626
627 627 Returns the number of new markers added."""
628 628 version, markers = _readmarkers(data)
629 629 return self.add(transaction, markers)
630 630
631 631 @propertycache
632 632 def _data(self):
633 633 return self.svfs.tryread('obsstore')
634 634
635 635 @propertycache
636 636 def _version(self):
637 637 if len(self._data) >= 1:
638 638 return _readmarkerversion(self._data)
639 639 else:
640 640 return self._defaultformat
641 641
642 642 @propertycache
643 643 def _all(self):
644 644 data = self._data
645 645 if not data:
646 646 return []
647 647 self._version, markers = _readmarkers(data)
648 648 markers = list(markers)
649 649 _checkinvalidmarkers(markers)
650 650 return markers
651 651
652 652 @propertycache
653 653 def successors(self):
654 654 successors = {}
655 655 _addsuccessors(successors, self._all)
656 656 return successors
657 657
658 658 @propertycache
659 659 def precursors(self):
660 660 precursors = {}
661 661 _addprecursors(precursors, self._all)
662 662 return precursors
663 663
664 664 @propertycache
665 665 def children(self):
666 666 children = {}
667 667 _addchildren(children, self._all)
668 668 return children
669 669
670 670 def _cached(self, attr):
671 671 return attr in self.__dict__
672 672
673 673 def _addmarkers(self, markers):
674 674 markers = list(markers) # to allow repeated iteration
675 675 self._all.extend(markers)
676 676 if self._cached('successors'):
677 677 _addsuccessors(self.successors, markers)
678 678 if self._cached('precursors'):
679 679 _addprecursors(self.precursors, markers)
680 680 if self._cached('children'):
681 681 _addchildren(self.children, markers)
682 682 _checkinvalidmarkers(markers)
683 683
684 684 def relevantmarkers(self, nodes):
685 685 """return a set of all obsolescence markers relevant to a set of nodes.
686 686
687 687 "relevant" to a set of nodes mean:
688 688
689 689 - marker that use this changeset as successor
690 690 - prune marker of direct children on this changeset
691 691 - recursive application of the two rules on precursors of these markers
692 692
693 693 It is a set so you cannot rely on order."""
694 694
695 695 pendingnodes = set(nodes)
696 696 seenmarkers = set()
697 697 seennodes = set(pendingnodes)
698 698 precursorsmarkers = self.precursors
699 699 succsmarkers = self.successors
700 700 children = self.children
701 701 while pendingnodes:
702 702 direct = set()
703 703 for current in pendingnodes:
704 704 direct.update(precursorsmarkers.get(current, ()))
705 705 pruned = [m for m in children.get(current, ()) if not m[1]]
706 706 direct.update(pruned)
707 707 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
708 708 direct.update(pruned)
709 709 direct -= seenmarkers
710 710 pendingnodes = set([m[0] for m in direct])
711 711 seenmarkers |= direct
712 712 pendingnodes -= seennodes
713 713 seennodes |= pendingnodes
714 714 return seenmarkers
715 715
716 716 def makestore(ui, repo):
717 717 """Create an obsstore instance from a repo."""
718 718 # read default format for new obsstore.
719 719 # developer config: format.obsstore-version
720 defaultformat = ui.configint('format', 'obsstore-version', None)
720 defaultformat = ui.configint('format', 'obsstore-version')
721 721 # rely on obsstore class default when possible.
722 722 kwargs = {}
723 723 if defaultformat is not None:
724 724 kwargs['defaultformat'] = defaultformat
725 725 readonly = not isenabled(repo, createmarkersopt)
726 726 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
727 727 if store and readonly:
728 728 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
729 729 % len(list(store)))
730 730 return store
731 731
732 732 def commonversion(versions):
733 733 """Return the newest version listed in both versions and our local formats.
734 734
735 735 Returns None if no common version exists.
736 736 """
737 737 versions.sort(reverse=True)
738 738 # search for highest version known on both side
739 739 for v in versions:
740 740 if v in formats:
741 741 return v
742 742 return None
743 743
744 744 # arbitrary picked to fit into 8K limit from HTTP server
745 745 # you have to take in account:
746 746 # - the version header
747 747 # - the base85 encoding
748 748 _maxpayload = 5300
749 749
750 750 def _pushkeyescape(markers):
751 751 """encode markers into a dict suitable for pushkey exchange
752 752
753 753 - binary data is base85 encoded
754 754 - split in chunks smaller than 5300 bytes"""
755 755 keys = {}
756 756 parts = []
757 757 currentlen = _maxpayload * 2 # ensure we create a new part
758 758 for marker in markers:
759 759 nextdata = _fm0encodeonemarker(marker)
760 760 if (len(nextdata) + currentlen > _maxpayload):
761 761 currentpart = []
762 762 currentlen = 0
763 763 parts.append(currentpart)
764 764 currentpart.append(nextdata)
765 765 currentlen += len(nextdata)
766 766 for idx, part in enumerate(reversed(parts)):
767 767 data = ''.join([_pack('>B', _fm0version)] + part)
768 768 keys['dump%i' % idx] = util.b85encode(data)
769 769 return keys
770 770
771 771 def listmarkers(repo):
772 772 """List markers over pushkey"""
773 773 if not repo.obsstore:
774 774 return {}
775 775 return _pushkeyescape(sorted(repo.obsstore))
776 776
777 777 def pushmarker(repo, key, old, new):
778 778 """Push markers over pushkey"""
779 779 if not key.startswith('dump'):
780 780 repo.ui.warn(_('unknown key: %r') % key)
781 781 return False
782 782 if old:
783 783 repo.ui.warn(_('unexpected old value for %r') % key)
784 784 return False
785 785 data = util.b85decode(new)
786 786 lock = repo.lock()
787 787 try:
788 788 tr = repo.transaction('pushkey: obsolete markers')
789 789 try:
790 790 repo.obsstore.mergemarkers(tr, data)
791 791 repo.invalidatevolatilesets()
792 792 tr.close()
793 793 return True
794 794 finally:
795 795 tr.release()
796 796 finally:
797 797 lock.release()
798 798
799 799 # keep compatibility for the 4.3 cycle
800 800 def allprecursors(obsstore, nodes, ignoreflags=0):
801 801 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
802 802 util.nouideprecwarn(movemsg, '4.3')
803 803 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
804 804
805 805 def allsuccessors(obsstore, nodes, ignoreflags=0):
806 806 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
807 807 util.nouideprecwarn(movemsg, '4.3')
808 808 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
809 809
810 810 def marker(repo, data):
811 811 movemsg = 'obsolete.marker moved to obsutil.marker'
812 812 repo.ui.deprecwarn(movemsg, '4.3')
813 813 return obsutil.marker(repo, data)
814 814
815 815 def getmarkers(repo, nodes=None, exclusive=False):
816 816 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
817 817 repo.ui.deprecwarn(movemsg, '4.3')
818 818 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
819 819
820 820 def exclusivemarkers(repo, nodes):
821 821 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
822 822 repo.ui.deprecwarn(movemsg, '4.3')
823 823 return obsutil.exclusivemarkers(repo, nodes)
824 824
825 825 def foreground(repo, nodes):
826 826 movemsg = 'obsolete.foreground moved to obsutil.foreground'
827 827 repo.ui.deprecwarn(movemsg, '4.3')
828 828 return obsutil.foreground(repo, nodes)
829 829
830 830 def successorssets(repo, initialnode, cache=None):
831 831 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
832 832 repo.ui.deprecwarn(movemsg, '4.3')
833 833 return obsutil.successorssets(repo, initialnode, cache=cache)
834 834
835 835 # mapping of 'set-name' -> <function to compute this set>
836 836 cachefuncs = {}
837 837 def cachefor(name):
838 838 """Decorator to register a function as computing the cache for a set"""
839 839 def decorator(func):
840 840 if name in cachefuncs:
841 841 msg = "duplicated registration for volatileset '%s' (existing: %r)"
842 842 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
843 843 cachefuncs[name] = func
844 844 return func
845 845 return decorator
846 846
847 847 def getrevs(repo, name):
848 848 """Return the set of revision that belong to the <name> set
849 849
850 850 Such access may compute the set and cache it for future use"""
851 851 repo = repo.unfiltered()
852 852 if not repo.obsstore:
853 853 return frozenset()
854 854 if name not in repo.obsstore.caches:
855 855 repo.obsstore.caches[name] = cachefuncs[name](repo)
856 856 return repo.obsstore.caches[name]
857 857
858 858 # To be simple we need to invalidate obsolescence cache when:
859 859 #
860 860 # - new changeset is added:
861 861 # - public phase is changed
862 862 # - obsolescence marker are added
863 863 # - strip is used a repo
864 864 def clearobscaches(repo):
865 865 """Remove all obsolescence related cache from a repo
866 866
867 867 This remove all cache in obsstore is the obsstore already exist on the
868 868 repo.
869 869
870 870 (We could be smarter here given the exact event that trigger the cache
871 871 clearing)"""
872 872 # only clear cache is there is obsstore data in this repo
873 873 if 'obsstore' in repo._filecache:
874 874 repo.obsstore.caches.clear()
875 875
876 876 def _mutablerevs(repo):
877 877 """the set of mutable revision in the repository"""
878 878 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
879 879
880 880 @cachefor('obsolete')
881 881 def _computeobsoleteset(repo):
882 882 """the set of obsolete revisions"""
883 883 getnode = repo.changelog.node
884 884 notpublic = _mutablerevs(repo)
885 885 isobs = repo.obsstore.successors.__contains__
886 886 obs = set(r for r in notpublic if isobs(getnode(r)))
887 887 return obs
888 888
889 889 @cachefor('unstable')
890 890 def _computeunstableset(repo):
891 891 """the set of non obsolete revisions with obsolete parents"""
892 892 pfunc = repo.changelog.parentrevs
893 893 mutable = _mutablerevs(repo)
894 894 obsolete = getrevs(repo, 'obsolete')
895 895 others = mutable - obsolete
896 896 unstable = set()
897 897 for r in sorted(others):
898 898 # A rev is unstable if one of its parent is obsolete or unstable
899 899 # this works since we traverse following growing rev order
900 900 for p in pfunc(r):
901 901 if p in obsolete or p in unstable:
902 902 unstable.add(r)
903 903 break
904 904 return unstable
905 905
906 906 @cachefor('suspended')
907 907 def _computesuspendedset(repo):
908 908 """the set of obsolete parents with non obsolete descendants"""
909 909 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
910 910 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
911 911
912 912 @cachefor('extinct')
913 913 def _computeextinctset(repo):
914 914 """the set of obsolete parents without non obsolete descendants"""
915 915 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
916 916
917 917
918 918 @cachefor('bumped')
919 919 def _computebumpedset(repo):
920 920 """the set of revs trying to obsolete public revisions"""
921 921 bumped = set()
922 922 # util function (avoid attribute lookup in the loop)
923 923 phase = repo._phasecache.phase # would be faster to grab the full list
924 924 public = phases.public
925 925 cl = repo.changelog
926 926 torev = cl.nodemap.get
927 927 for ctx in repo.set('(not public()) and (not obsolete())'):
928 928 rev = ctx.rev()
929 929 # We only evaluate mutable, non-obsolete revision
930 930 node = ctx.node()
931 931 # (future) A cache of precursors may worth if split is very common
932 932 for pnode in obsutil.allprecursors(repo.obsstore, [node],
933 933 ignoreflags=bumpedfix):
934 934 prev = torev(pnode) # unfiltered! but so is phasecache
935 935 if (prev is not None) and (phase(repo, prev) <= public):
936 936 # we have a public precursor
937 937 bumped.add(rev)
938 938 break # Next draft!
939 939 return bumped
940 940
941 941 @cachefor('divergent')
942 942 def _computedivergentset(repo):
943 943 """the set of rev that compete to be the final successors of some revision.
944 944 """
945 945 divergent = set()
946 946 obsstore = repo.obsstore
947 947 newermap = {}
948 948 for ctx in repo.set('(not public()) - obsolete()'):
949 949 mark = obsstore.precursors.get(ctx.node(), ())
950 950 toprocess = set(mark)
951 951 seen = set()
952 952 while toprocess:
953 953 prec = toprocess.pop()[0]
954 954 if prec in seen:
955 955 continue # emergency cycle hanging prevention
956 956 seen.add(prec)
957 957 if prec not in newermap:
958 958 obsutil.successorssets(repo, prec, newermap)
959 959 newer = [n for n in newermap[prec] if n]
960 960 if len(newer) > 1:
961 961 divergent.add(ctx.rev())
962 962 break
963 963 toprocess.update(obsstore.precursors.get(prec, ()))
964 964 return divergent
965 965
966 966
967 967 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
968 968 operation=None):
969 969 """Add obsolete markers between changesets in a repo
970 970
971 971 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
972 972 tuple. `old` and `news` are changectx. metadata is an optional dictionary
973 973 containing metadata for this marker only. It is merged with the global
974 974 metadata specified through the `metadata` argument of this function,
975 975
976 976 Trying to obsolete a public changeset will raise an exception.
977 977
978 978 Current user and date are used except if specified otherwise in the
979 979 metadata attribute.
980 980
981 981 This function operates within a transaction of its own, but does
982 982 not take any lock on the repo.
983 983 """
984 984 # prepare metadata
985 985 if metadata is None:
986 986 metadata = {}
987 987 if 'user' not in metadata:
988 988 metadata['user'] = repo.ui.username()
989 989 useoperation = repo.ui.configbool('experimental',
990 990 'evolution.track-operation',
991 991 False)
992 992 if useoperation and operation:
993 993 metadata['operation'] = operation
994 994 tr = repo.transaction('add-obsolescence-marker')
995 995 try:
996 996 markerargs = []
997 997 for rel in relations:
998 998 prec = rel[0]
999 999 sucs = rel[1]
1000 1000 localmetadata = metadata.copy()
1001 1001 if 2 < len(rel):
1002 1002 localmetadata.update(rel[2])
1003 1003
1004 1004 if not prec.mutable():
1005 1005 raise error.Abort(_("cannot obsolete public changeset: %s")
1006 1006 % prec,
1007 1007 hint="see 'hg help phases' for details")
1008 1008 nprec = prec.node()
1009 1009 nsucs = tuple(s.node() for s in sucs)
1010 1010 npare = None
1011 1011 if not nsucs:
1012 1012 npare = tuple(p.node() for p in prec.parents())
1013 1013 if nprec in nsucs:
1014 1014 raise error.Abort(_("changeset %s cannot obsolete itself")
1015 1015 % prec)
1016 1016
1017 1017 # Creating the marker causes the hidden cache to become invalid,
1018 1018 # which causes recomputation when we ask for prec.parents() above.
1019 1019 # Resulting in n^2 behavior. So let's prepare all of the args
1020 1020 # first, then create the markers.
1021 1021 markerargs.append((nprec, nsucs, npare, localmetadata))
1022 1022
1023 1023 for args in markerargs:
1024 1024 nprec, nsucs, npare, localmetadata = args
1025 1025 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1026 1026 date=date, metadata=localmetadata,
1027 1027 ui=repo.ui)
1028 1028 repo.filteredrevcache.clear()
1029 1029 tr.close()
1030 1030 finally:
1031 1031 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now