##// END OF EJS Templates
obsolete: refactor function for getting obsolete options...
Gregory Szorc -
r37149:d30810d0 default
parent child Browse files
Show More
@@ -1,1014 +1,1023 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "predecessor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a predecessor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "predecessor markers of Y" because they hold
28 28 information about the predecessors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 obsutil,
80 80 phases,
81 81 policy,
82 82 util,
83 83 )
84 84 from .utils import dateutil
85 85
86 86 parsers = policy.importmod(r'parsers')
87 87
88 88 _pack = struct.pack
89 89 _unpack = struct.unpack
90 90 _calcsize = struct.calcsize
91 91 propertycache = util.propertycache
92 92
93 93 # the obsolete feature is not mature enough to be enabled by default.
94 94 # you have to rely on third party extension extension to enable this.
95 95 _enabled = False
96 96
97 97 # Options for obsolescence
98 98 createmarkersopt = 'createmarkers'
99 99 allowunstableopt = 'allowunstable'
100 100 exchangeopt = 'exchange'
101 101
102 102 def _getoptionvalue(repo, option):
103 103 """Returns True if the given repository has the given obsolete option
104 104 enabled.
105 105 """
106 106 configkey = 'evolution.%s' % option
107 107 newconfig = repo.ui.configbool('experimental', configkey)
108 108
109 109 # Return the value only if defined
110 110 if newconfig is not None:
111 111 return newconfig
112 112
113 113 # Fallback on generic option
114 114 try:
115 115 return repo.ui.configbool('experimental', 'evolution')
116 116 except (error.ConfigError, AttributeError):
117 117 # Fallback on old-fashion config
118 118 # inconsistent config: experimental.evolution
119 119 result = set(repo.ui.configlist('experimental', 'evolution'))
120 120
121 121 if 'all' in result:
122 122 return True
123 123
124 124 # For migration purposes, temporarily return true if the config hasn't
125 125 # been set but _enabled is true.
126 126 if len(result) == 0 and _enabled:
127 127 return True
128 128
129 129 # Temporary hack for next check
130 130 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
131 131 if newconfig:
132 132 result.add('createmarkers')
133 133
134 134 return option in result
135 135
136 def getoptions(repo):
137 """Returns dicts showing state of obsolescence features."""
138
139 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
140 unstablevalue = _getoptionvalue(repo, allowunstableopt)
141 exchangevalue = _getoptionvalue(repo, exchangeopt)
142
143 # createmarkers must be enabled if other options are enabled
144 if ((unstablevalue or exchangevalue) and not createmarkersvalue):
145 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
146 "if other obsolete options are enabled"))
147
148 return {
149 createmarkersopt: createmarkersvalue,
150 allowunstableopt: unstablevalue,
151 exchangeopt: exchangevalue,
152 }
153
136 154 def isenabled(repo, option):
137 155 """Returns True if the given repository has the given obsolete option
138 156 enabled.
139 157 """
140 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
141 unstabluevalue = _getoptionvalue(repo, allowunstableopt)
142 exchangevalue = _getoptionvalue(repo, exchangeopt)
143
144 # createmarkers must be enabled if other options are enabled
145 if ((unstabluevalue or exchangevalue) and not createmarkersvalue):
146 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
147 "if other obsolete options are enabled"))
148
149 return _getoptionvalue(repo, option)
158 return getoptions(repo)[option]
150 159
151 160 # Creating aliases for marker flags because evolve extension looks for
152 161 # bumpedfix in obsolete.py
153 162 bumpedfix = obsutil.bumpedfix
154 163 usingsha256 = obsutil.usingsha256
155 164
156 165 ## Parsing and writing of version "0"
157 166 #
158 167 # The header is followed by the markers. Each marker is made of:
159 168 #
160 169 # - 1 uint8 : number of new changesets "N", can be zero.
161 170 #
162 171 # - 1 uint32: metadata size "M" in bytes.
163 172 #
164 173 # - 1 byte: a bit field. It is reserved for flags used in common
165 174 # obsolete marker operations, to avoid repeated decoding of metadata
166 175 # entries.
167 176 #
168 177 # - 20 bytes: obsoleted changeset identifier.
169 178 #
170 179 # - N*20 bytes: new changesets identifiers.
171 180 #
172 181 # - M bytes: metadata as a sequence of nul-terminated strings. Each
173 182 # string contains a key and a value, separated by a colon ':', without
174 183 # additional encoding. Keys cannot contain '\0' or ':' and values
175 184 # cannot contain '\0'.
176 185 _fm0version = 0
177 186 _fm0fixed = '>BIB20s'
178 187 _fm0node = '20s'
179 188 _fm0fsize = _calcsize(_fm0fixed)
180 189 _fm0fnodesize = _calcsize(_fm0node)
181 190
182 191 def _fm0readmarkers(data, off, stop):
183 192 # Loop on markers
184 193 while off < stop:
185 194 # read fixed part
186 195 cur = data[off:off + _fm0fsize]
187 196 off += _fm0fsize
188 197 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
189 198 # read replacement
190 199 sucs = ()
191 200 if numsuc:
192 201 s = (_fm0fnodesize * numsuc)
193 202 cur = data[off:off + s]
194 203 sucs = _unpack(_fm0node * numsuc, cur)
195 204 off += s
196 205 # read metadata
197 206 # (metadata will be decoded on demand)
198 207 metadata = data[off:off + mdsize]
199 208 if len(metadata) != mdsize:
200 209 raise error.Abort(_('parsing obsolete marker: metadata is too '
201 210 'short, %d bytes expected, got %d')
202 211 % (mdsize, len(metadata)))
203 212 off += mdsize
204 213 metadata = _fm0decodemeta(metadata)
205 214 try:
206 215 when, offset = metadata.pop('date', '0 0').split(' ')
207 216 date = float(when), int(offset)
208 217 except ValueError:
209 218 date = (0., 0)
210 219 parents = None
211 220 if 'p2' in metadata:
212 221 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
213 222 elif 'p1' in metadata:
214 223 parents = (metadata.pop('p1', None),)
215 224 elif 'p0' in metadata:
216 225 parents = ()
217 226 if parents is not None:
218 227 try:
219 228 parents = tuple(node.bin(p) for p in parents)
220 229 # if parent content is not a nodeid, drop the data
221 230 for p in parents:
222 231 if len(p) != 20:
223 232 parents = None
224 233 break
225 234 except TypeError:
226 235 # if content cannot be translated to nodeid drop the data.
227 236 parents = None
228 237
229 238 metadata = tuple(sorted(metadata.iteritems()))
230 239
231 240 yield (pre, sucs, flags, metadata, date, parents)
232 241
233 242 def _fm0encodeonemarker(marker):
234 243 pre, sucs, flags, metadata, date, parents = marker
235 244 if flags & usingsha256:
236 245 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
237 246 metadata = dict(metadata)
238 247 time, tz = date
239 248 metadata['date'] = '%r %i' % (time, tz)
240 249 if parents is not None:
241 250 if not parents:
242 251 # mark that we explicitly recorded no parents
243 252 metadata['p0'] = ''
244 253 for i, p in enumerate(parents, 1):
245 254 metadata['p%i' % i] = node.hex(p)
246 255 metadata = _fm0encodemeta(metadata)
247 256 numsuc = len(sucs)
248 257 format = _fm0fixed + (_fm0node * numsuc)
249 258 data = [numsuc, len(metadata), flags, pre]
250 259 data.extend(sucs)
251 260 return _pack(format, *data) + metadata
252 261
253 262 def _fm0encodemeta(meta):
254 263 """Return encoded metadata string to string mapping.
255 264
256 265 Assume no ':' in key and no '\0' in both key and value."""
257 266 for key, value in meta.iteritems():
258 267 if ':' in key or '\0' in key:
259 268 raise ValueError("':' and '\0' are forbidden in metadata key'")
260 269 if '\0' in value:
261 270 raise ValueError("':' is forbidden in metadata value'")
262 271 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
263 272
264 273 def _fm0decodemeta(data):
265 274 """Return string to string dictionary from encoded version."""
266 275 d = {}
267 276 for l in data.split('\0'):
268 277 if l:
269 278 key, value = l.split(':')
270 279 d[key] = value
271 280 return d
272 281
273 282 ## Parsing and writing of version "1"
274 283 #
275 284 # The header is followed by the markers. Each marker is made of:
276 285 #
277 286 # - uint32: total size of the marker (including this field)
278 287 #
279 288 # - float64: date in seconds since epoch
280 289 #
281 290 # - int16: timezone offset in minutes
282 291 #
283 292 # - uint16: a bit field. It is reserved for flags used in common
284 293 # obsolete marker operations, to avoid repeated decoding of metadata
285 294 # entries.
286 295 #
287 296 # - uint8: number of successors "N", can be zero.
288 297 #
289 298 # - uint8: number of parents "P", can be zero.
290 299 #
291 300 # 0: parents data stored but no parent,
292 301 # 1: one parent stored,
293 302 # 2: two parents stored,
294 303 # 3: no parent data stored
295 304 #
296 305 # - uint8: number of metadata entries M
297 306 #
298 307 # - 20 or 32 bytes: predecessor changeset identifier.
299 308 #
300 309 # - N*(20 or 32) bytes: successors changesets identifiers.
301 310 #
302 311 # - P*(20 or 32) bytes: parents of the predecessors changesets.
303 312 #
304 313 # - M*(uint8, uint8): size of all metadata entries (key and value)
305 314 #
306 315 # - remaining bytes: the metadata, each (key, value) pair after the other.
307 316 _fm1version = 1
308 317 _fm1fixed = '>IdhHBBB20s'
309 318 _fm1nodesha1 = '20s'
310 319 _fm1nodesha256 = '32s'
311 320 _fm1nodesha1size = _calcsize(_fm1nodesha1)
312 321 _fm1nodesha256size = _calcsize(_fm1nodesha256)
313 322 _fm1fsize = _calcsize(_fm1fixed)
314 323 _fm1parentnone = 3
315 324 _fm1parentshift = 14
316 325 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
317 326 _fm1metapair = 'BB'
318 327 _fm1metapairsize = _calcsize(_fm1metapair)
319 328
320 329 def _fm1purereadmarkers(data, off, stop):
321 330 # make some global constants local for performance
322 331 noneflag = _fm1parentnone
323 332 sha2flag = usingsha256
324 333 sha1size = _fm1nodesha1size
325 334 sha2size = _fm1nodesha256size
326 335 sha1fmt = _fm1nodesha1
327 336 sha2fmt = _fm1nodesha256
328 337 metasize = _fm1metapairsize
329 338 metafmt = _fm1metapair
330 339 fsize = _fm1fsize
331 340 unpack = _unpack
332 341
333 342 # Loop on markers
334 343 ufixed = struct.Struct(_fm1fixed).unpack
335 344
336 345 while off < stop:
337 346 # read fixed part
338 347 o1 = off + fsize
339 348 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340 349
341 350 if flags & sha2flag:
342 351 # FIXME: prec was read as a SHA1, needs to be amended
343 352
344 353 # read 0 or more successors
345 354 if numsuc == 1:
346 355 o2 = o1 + sha2size
347 356 sucs = (data[o1:o2],)
348 357 else:
349 358 o2 = o1 + sha2size * numsuc
350 359 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351 360
352 361 # read parents
353 362 if numpar == noneflag:
354 363 o3 = o2
355 364 parents = None
356 365 elif numpar == 1:
357 366 o3 = o2 + sha2size
358 367 parents = (data[o2:o3],)
359 368 else:
360 369 o3 = o2 + sha2size * numpar
361 370 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 371 else:
363 372 # read 0 or more successors
364 373 if numsuc == 1:
365 374 o2 = o1 + sha1size
366 375 sucs = (data[o1:o2],)
367 376 else:
368 377 o2 = o1 + sha1size * numsuc
369 378 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370 379
371 380 # read parents
372 381 if numpar == noneflag:
373 382 o3 = o2
374 383 parents = None
375 384 elif numpar == 1:
376 385 o3 = o2 + sha1size
377 386 parents = (data[o2:o3],)
378 387 else:
379 388 o3 = o2 + sha1size * numpar
380 389 parents = unpack(sha1fmt * numpar, data[o2:o3])
381 390
382 391 # read metadata
383 392 off = o3 + metasize * nummeta
384 393 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 394 metadata = []
386 395 for idx in xrange(0, len(metapairsize), 2):
387 396 o1 = off + metapairsize[idx]
388 397 o2 = o1 + metapairsize[idx + 1]
389 398 metadata.append((data[off:o1], data[o1:o2]))
390 399 off = o2
391 400
392 401 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393 402
394 403 def _fm1encodeonemarker(marker):
395 404 pre, sucs, flags, metadata, date, parents = marker
396 405 # determine node size
397 406 _fm1node = _fm1nodesha1
398 407 if flags & usingsha256:
399 408 _fm1node = _fm1nodesha256
400 409 numsuc = len(sucs)
401 410 numextranodes = numsuc
402 411 if parents is None:
403 412 numpar = _fm1parentnone
404 413 else:
405 414 numpar = len(parents)
406 415 numextranodes += numpar
407 416 formatnodes = _fm1node * numextranodes
408 417 formatmeta = _fm1metapair * len(metadata)
409 418 format = _fm1fixed + formatnodes + formatmeta
410 419 # tz is stored in minutes so we divide by 60
411 420 tz = date[1]//60
412 421 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 422 data.extend(sucs)
414 423 if parents is not None:
415 424 data.extend(parents)
416 425 totalsize = _calcsize(format)
417 426 for key, value in metadata:
418 427 lk = len(key)
419 428 lv = len(value)
420 429 if lk > 255:
421 430 msg = ('obsstore metadata key cannot be longer than 255 bytes'
422 431 ' (key "%s" is %u bytes)') % (key, lk)
423 432 raise error.ProgrammingError(msg)
424 433 if lv > 255:
425 434 msg = ('obsstore metadata value cannot be longer than 255 bytes'
426 435 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
427 436 raise error.ProgrammingError(msg)
428 437 data.append(lk)
429 438 data.append(lv)
430 439 totalsize += lk + lv
431 440 data[0] = totalsize
432 441 data = [_pack(format, *data)]
433 442 for key, value in metadata:
434 443 data.append(key)
435 444 data.append(value)
436 445 return ''.join(data)
437 446
438 447 def _fm1readmarkers(data, off, stop):
439 448 native = getattr(parsers, 'fm1readmarkers', None)
440 449 if not native:
441 450 return _fm1purereadmarkers(data, off, stop)
442 451 return native(data, off, stop)
443 452
444 453 # mapping to read/write various marker formats
445 454 # <version> -> (decoder, encoder)
446 455 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
447 456 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
448 457
449 458 def _readmarkerversion(data):
450 459 return _unpack('>B', data[0:1])[0]
451 460
452 461 @util.nogc
453 462 def _readmarkers(data, off=None, stop=None):
454 463 """Read and enumerate markers from raw data"""
455 464 diskversion = _readmarkerversion(data)
456 465 if not off:
457 466 off = 1 # skip 1 byte version number
458 467 if stop is None:
459 468 stop = len(data)
460 469 if diskversion not in formats:
461 470 msg = _('parsing obsolete marker: unknown version %r') % diskversion
462 471 raise error.UnknownVersion(msg, version=diskversion)
463 472 return diskversion, formats[diskversion][0](data, off, stop)
464 473
465 474 def encodeheader(version=_fm0version):
466 475 return _pack('>B', version)
467 476
468 477 def encodemarkers(markers, addheader=False, version=_fm0version):
469 478 # Kept separate from flushmarkers(), it will be reused for
470 479 # markers exchange.
471 480 encodeone = formats[version][1]
472 481 if addheader:
473 482 yield encodeheader(version)
474 483 for marker in markers:
475 484 yield encodeone(marker)
476 485
477 486 @util.nogc
478 487 def _addsuccessors(successors, markers):
479 488 for mark in markers:
480 489 successors.setdefault(mark[0], set()).add(mark)
481 490
482 491 @util.nogc
483 492 def _addpredecessors(predecessors, markers):
484 493 for mark in markers:
485 494 for suc in mark[1]:
486 495 predecessors.setdefault(suc, set()).add(mark)
487 496
488 497 @util.nogc
489 498 def _addchildren(children, markers):
490 499 for mark in markers:
491 500 parents = mark[5]
492 501 if parents is not None:
493 502 for p in parents:
494 503 children.setdefault(p, set()).add(mark)
495 504
496 505 def _checkinvalidmarkers(markers):
497 506 """search for marker with invalid data and raise error if needed
498 507
499 508 Exist as a separated function to allow the evolve extension for a more
500 509 subtle handling.
501 510 """
502 511 for mark in markers:
503 512 if node.nullid in mark[1]:
504 513 raise error.Abort(_('bad obsolescence marker detected: '
505 514 'invalid successors nullid'))
506 515
507 516 class obsstore(object):
508 517 """Store obsolete markers
509 518
510 519 Markers can be accessed with two mappings:
511 520 - predecessors[x] -> set(markers on predecessors edges of x)
512 521 - successors[x] -> set(markers on successors edges of x)
513 522 - children[x] -> set(markers on predecessors edges of children(x)
514 523 """
515 524
516 525 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
517 526 # prec: nodeid, predecessors changesets
518 527 # succs: tuple of nodeid, successor changesets (0-N length)
519 528 # flag: integer, flag field carrying modifier for the markers (see doc)
520 529 # meta: binary blob, encoded metadata dictionary
521 530 # date: (float, int) tuple, date of marker creation
522 531 # parents: (tuple of nodeid) or None, parents of predecessors
523 532 # None is used when no data has been recorded
524 533
525 534 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
526 535 # caches for various obsolescence related cache
527 536 self.caches = {}
528 537 self.svfs = svfs
529 538 self._defaultformat = defaultformat
530 539 self._readonly = readonly
531 540
532 541 def __iter__(self):
533 542 return iter(self._all)
534 543
535 544 def __len__(self):
536 545 return len(self._all)
537 546
538 547 def __nonzero__(self):
539 548 if not self._cached(r'_all'):
540 549 try:
541 550 return self.svfs.stat('obsstore').st_size > 1
542 551 except OSError as inst:
543 552 if inst.errno != errno.ENOENT:
544 553 raise
545 554 # just build an empty _all list if no obsstore exists, which
546 555 # avoids further stat() syscalls
547 556 return bool(self._all)
548 557
549 558 __bool__ = __nonzero__
550 559
551 560 @property
552 561 def readonly(self):
553 562 """True if marker creation is disabled
554 563
555 564 Remove me in the future when obsolete marker is always on."""
556 565 return self._readonly
557 566
558 567 def create(self, transaction, prec, succs=(), flag=0, parents=None,
559 568 date=None, metadata=None, ui=None):
560 569 """obsolete: add a new obsolete marker
561 570
562 571 * ensuring it is hashable
563 572 * check mandatory metadata
564 573 * encode metadata
565 574
566 575 If you are a human writing code creating marker you want to use the
567 576 `createmarkers` function in this module instead.
568 577
569 578 return True if a new marker have been added, False if the markers
570 579 already existed (no op).
571 580 """
572 581 if metadata is None:
573 582 metadata = {}
574 583 if date is None:
575 584 if 'date' in metadata:
576 585 # as a courtesy for out-of-tree extensions
577 586 date = dateutil.parsedate(metadata.pop('date'))
578 587 elif ui is not None:
579 588 date = ui.configdate('devel', 'default-date')
580 589 if date is None:
581 590 date = dateutil.makedate()
582 591 else:
583 592 date = dateutil.makedate()
584 593 if len(prec) != 20:
585 594 raise ValueError(prec)
586 595 for succ in succs:
587 596 if len(succ) != 20:
588 597 raise ValueError(succ)
589 598 if prec in succs:
590 599 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
591 600
592 601 metadata = tuple(sorted(metadata.iteritems()))
593 602
594 603 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
595 604 return bool(self.add(transaction, [marker]))
596 605
597 606 def add(self, transaction, markers):
598 607 """Add new markers to the store
599 608
600 609 Take care of filtering duplicate.
601 610 Return the number of new marker."""
602 611 if self._readonly:
603 612 raise error.Abort(_('creating obsolete markers is not enabled on '
604 613 'this repo'))
605 614 known = set()
606 615 getsuccessors = self.successors.get
607 616 new = []
608 617 for m in markers:
609 618 if m not in getsuccessors(m[0], ()) and m not in known:
610 619 known.add(m)
611 620 new.append(m)
612 621 if new:
613 622 f = self.svfs('obsstore', 'ab')
614 623 try:
615 624 offset = f.tell()
616 625 transaction.add('obsstore', offset)
617 626 # offset == 0: new file - add the version header
618 627 data = b''.join(encodemarkers(new, offset == 0, self._version))
619 628 f.write(data)
620 629 finally:
621 630 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
622 631 # call 'filecacheentry.refresh()' here
623 632 f.close()
624 633 addedmarkers = transaction.changes.get('obsmarkers')
625 634 if addedmarkers is not None:
626 635 addedmarkers.update(new)
627 636 self._addmarkers(new, data)
628 637 # new marker *may* have changed several set. invalidate the cache.
629 638 self.caches.clear()
630 639 # records the number of new markers for the transaction hooks
631 640 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
632 641 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
633 642 return len(new)
634 643
635 644 def mergemarkers(self, transaction, data):
636 645 """merge a binary stream of markers inside the obsstore
637 646
638 647 Returns the number of new markers added."""
639 648 version, markers = _readmarkers(data)
640 649 return self.add(transaction, markers)
641 650
642 651 @propertycache
643 652 def _data(self):
644 653 return self.svfs.tryread('obsstore')
645 654
646 655 @propertycache
647 656 def _version(self):
648 657 if len(self._data) >= 1:
649 658 return _readmarkerversion(self._data)
650 659 else:
651 660 return self._defaultformat
652 661
653 662 @propertycache
654 663 def _all(self):
655 664 data = self._data
656 665 if not data:
657 666 return []
658 667 self._version, markers = _readmarkers(data)
659 668 markers = list(markers)
660 669 _checkinvalidmarkers(markers)
661 670 return markers
662 671
663 672 @propertycache
664 673 def successors(self):
665 674 successors = {}
666 675 _addsuccessors(successors, self._all)
667 676 return successors
668 677
669 678 @propertycache
670 679 def predecessors(self):
671 680 predecessors = {}
672 681 _addpredecessors(predecessors, self._all)
673 682 return predecessors
674 683
675 684 @propertycache
676 685 def children(self):
677 686 children = {}
678 687 _addchildren(children, self._all)
679 688 return children
680 689
681 690 def _cached(self, attr):
682 691 return attr in self.__dict__
683 692
684 693 def _addmarkers(self, markers, rawdata):
685 694 markers = list(markers) # to allow repeated iteration
686 695 self._data = self._data + rawdata
687 696 self._all.extend(markers)
688 697 if self._cached(r'successors'):
689 698 _addsuccessors(self.successors, markers)
690 699 if self._cached(r'predecessors'):
691 700 _addpredecessors(self.predecessors, markers)
692 701 if self._cached(r'children'):
693 702 _addchildren(self.children, markers)
694 703 _checkinvalidmarkers(markers)
695 704
696 705 def relevantmarkers(self, nodes):
697 706 """return a set of all obsolescence markers relevant to a set of nodes.
698 707
699 708 "relevant" to a set of nodes mean:
700 709
701 710 - marker that use this changeset as successor
702 711 - prune marker of direct children on this changeset
703 712 - recursive application of the two rules on predecessors of these
704 713 markers
705 714
706 715 It is a set so you cannot rely on order."""
707 716
708 717 pendingnodes = set(nodes)
709 718 seenmarkers = set()
710 719 seennodes = set(pendingnodes)
711 720 precursorsmarkers = self.predecessors
712 721 succsmarkers = self.successors
713 722 children = self.children
714 723 while pendingnodes:
715 724 direct = set()
716 725 for current in pendingnodes:
717 726 direct.update(precursorsmarkers.get(current, ()))
718 727 pruned = [m for m in children.get(current, ()) if not m[1]]
719 728 direct.update(pruned)
720 729 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
721 730 direct.update(pruned)
722 731 direct -= seenmarkers
723 732 pendingnodes = set([m[0] for m in direct])
724 733 seenmarkers |= direct
725 734 pendingnodes -= seennodes
726 735 seennodes |= pendingnodes
727 736 return seenmarkers
728 737
729 738 def makestore(ui, repo):
730 739 """Create an obsstore instance from a repo."""
731 740 # read default format for new obsstore.
732 741 # developer config: format.obsstore-version
733 742 defaultformat = ui.configint('format', 'obsstore-version')
734 743 # rely on obsstore class default when possible.
735 744 kwargs = {}
736 745 if defaultformat is not None:
737 746 kwargs[r'defaultformat'] = defaultformat
738 747 readonly = not isenabled(repo, createmarkersopt)
739 748 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
740 749 if store and readonly:
741 750 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
742 751 % len(list(store)))
743 752 return store
744 753
745 754 def commonversion(versions):
746 755 """Return the newest version listed in both versions and our local formats.
747 756
748 757 Returns None if no common version exists.
749 758 """
750 759 versions.sort(reverse=True)
751 760 # search for highest version known on both side
752 761 for v in versions:
753 762 if v in formats:
754 763 return v
755 764 return None
756 765
757 766 # arbitrary picked to fit into 8K limit from HTTP server
758 767 # you have to take in account:
759 768 # - the version header
760 769 # - the base85 encoding
761 770 _maxpayload = 5300
762 771
763 772 def _pushkeyescape(markers):
764 773 """encode markers into a dict suitable for pushkey exchange
765 774
766 775 - binary data is base85 encoded
767 776 - split in chunks smaller than 5300 bytes"""
768 777 keys = {}
769 778 parts = []
770 779 currentlen = _maxpayload * 2 # ensure we create a new part
771 780 for marker in markers:
772 781 nextdata = _fm0encodeonemarker(marker)
773 782 if (len(nextdata) + currentlen > _maxpayload):
774 783 currentpart = []
775 784 currentlen = 0
776 785 parts.append(currentpart)
777 786 currentpart.append(nextdata)
778 787 currentlen += len(nextdata)
779 788 for idx, part in enumerate(reversed(parts)):
780 789 data = ''.join([_pack('>B', _fm0version)] + part)
781 790 keys['dump%i' % idx] = util.b85encode(data)
782 791 return keys
783 792
784 793 def listmarkers(repo):
785 794 """List markers over pushkey"""
786 795 if not repo.obsstore:
787 796 return {}
788 797 return _pushkeyescape(sorted(repo.obsstore))
789 798
790 799 def pushmarker(repo, key, old, new):
791 800 """Push markers over pushkey"""
792 801 if not key.startswith('dump'):
793 802 repo.ui.warn(_('unknown key: %r') % key)
794 803 return False
795 804 if old:
796 805 repo.ui.warn(_('unexpected old value for %r') % key)
797 806 return False
798 807 data = util.b85decode(new)
799 808 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
800 809 repo.obsstore.mergemarkers(tr, data)
801 810 repo.invalidatevolatilesets()
802 811 return True
803 812
804 813 # mapping of 'set-name' -> <function to compute this set>
805 814 cachefuncs = {}
806 815 def cachefor(name):
807 816 """Decorator to register a function as computing the cache for a set"""
808 817 def decorator(func):
809 818 if name in cachefuncs:
810 819 msg = "duplicated registration for volatileset '%s' (existing: %r)"
811 820 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
812 821 cachefuncs[name] = func
813 822 return func
814 823 return decorator
815 824
816 825 def getrevs(repo, name):
817 826 """Return the set of revision that belong to the <name> set
818 827
819 828 Such access may compute the set and cache it for future use"""
820 829 repo = repo.unfiltered()
821 830 if not repo.obsstore:
822 831 return frozenset()
823 832 if name not in repo.obsstore.caches:
824 833 repo.obsstore.caches[name] = cachefuncs[name](repo)
825 834 return repo.obsstore.caches[name]
826 835
827 836 # To be simple we need to invalidate obsolescence cache when:
828 837 #
829 838 # - new changeset is added:
830 839 # - public phase is changed
831 840 # - obsolescence marker are added
832 841 # - strip is used a repo
833 842 def clearobscaches(repo):
834 843 """Remove all obsolescence related cache from a repo
835 844
836 845 This remove all cache in obsstore is the obsstore already exist on the
837 846 repo.
838 847
839 848 (We could be smarter here given the exact event that trigger the cache
840 849 clearing)"""
841 850 # only clear cache is there is obsstore data in this repo
842 851 if 'obsstore' in repo._filecache:
843 852 repo.obsstore.caches.clear()
844 853
845 854 def _mutablerevs(repo):
846 855 """the set of mutable revision in the repository"""
847 856 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
848 857
849 858 @cachefor('obsolete')
850 859 def _computeobsoleteset(repo):
851 860 """the set of obsolete revisions"""
852 861 getnode = repo.changelog.node
853 862 notpublic = _mutablerevs(repo)
854 863 isobs = repo.obsstore.successors.__contains__
855 864 obs = set(r for r in notpublic if isobs(getnode(r)))
856 865 return obs
857 866
858 867 @cachefor('orphan')
859 868 def _computeorphanset(repo):
860 869 """the set of non obsolete revisions with obsolete parents"""
861 870 pfunc = repo.changelog.parentrevs
862 871 mutable = _mutablerevs(repo)
863 872 obsolete = getrevs(repo, 'obsolete')
864 873 others = mutable - obsolete
865 874 unstable = set()
866 875 for r in sorted(others):
867 876 # A rev is unstable if one of its parent is obsolete or unstable
868 877 # this works since we traverse following growing rev order
869 878 for p in pfunc(r):
870 879 if p in obsolete or p in unstable:
871 880 unstable.add(r)
872 881 break
873 882 return unstable
874 883
875 884 @cachefor('suspended')
876 885 def _computesuspendedset(repo):
877 886 """the set of obsolete parents with non obsolete descendants"""
878 887 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
879 888 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
880 889
881 890 @cachefor('extinct')
882 891 def _computeextinctset(repo):
883 892 """the set of obsolete parents without non obsolete descendants"""
884 893 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
885 894
886 895 @cachefor('phasedivergent')
887 896 def _computephasedivergentset(repo):
888 897 """the set of revs trying to obsolete public revisions"""
889 898 bumped = set()
890 899 # util function (avoid attribute lookup in the loop)
891 900 phase = repo._phasecache.phase # would be faster to grab the full list
892 901 public = phases.public
893 902 cl = repo.changelog
894 903 torev = cl.nodemap.get
895 904 tonode = cl.node
896 905 for rev in repo.revs('(not public()) and (not obsolete())'):
897 906 # We only evaluate mutable, non-obsolete revision
898 907 node = tonode(rev)
899 908 # (future) A cache of predecessors may worth if split is very common
900 909 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
901 910 ignoreflags=bumpedfix):
902 911 prev = torev(pnode) # unfiltered! but so is phasecache
903 912 if (prev is not None) and (phase(repo, prev) <= public):
904 913 # we have a public predecessor
905 914 bumped.add(rev)
906 915 break # Next draft!
907 916 return bumped
908 917
909 918 @cachefor('contentdivergent')
910 919 def _computecontentdivergentset(repo):
911 920 """the set of rev that compete to be the final successors of some revision.
912 921 """
913 922 divergent = set()
914 923 obsstore = repo.obsstore
915 924 newermap = {}
916 925 tonode = repo.changelog.node
917 926 for rev in repo.revs('(not public()) - obsolete()'):
918 927 node = tonode(rev)
919 928 mark = obsstore.predecessors.get(node, ())
920 929 toprocess = set(mark)
921 930 seen = set()
922 931 while toprocess:
923 932 prec = toprocess.pop()[0]
924 933 if prec in seen:
925 934 continue # emergency cycle hanging prevention
926 935 seen.add(prec)
927 936 if prec not in newermap:
928 937 obsutil.successorssets(repo, prec, cache=newermap)
929 938 newer = [n for n in newermap[prec] if n]
930 939 if len(newer) > 1:
931 940 divergent.add(rev)
932 941 break
933 942 toprocess.update(obsstore.predecessors.get(prec, ()))
934 943 return divergent
935 944
936 945
937 946 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
938 947 operation=None):
939 948 """Add obsolete markers between changesets in a repo
940 949
941 950 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
942 951 tuple. `old` and `news` are changectx. metadata is an optional dictionary
943 952 containing metadata for this marker only. It is merged with the global
944 953 metadata specified through the `metadata` argument of this function,
945 954
946 955 Trying to obsolete a public changeset will raise an exception.
947 956
948 957 Current user and date are used except if specified otherwise in the
949 958 metadata attribute.
950 959
951 960 This function operates within a transaction of its own, but does
952 961 not take any lock on the repo.
953 962 """
954 963 # prepare metadata
955 964 if metadata is None:
956 965 metadata = {}
957 966 if 'user' not in metadata:
958 967 develuser = repo.ui.config('devel', 'user.obsmarker')
959 968 if develuser:
960 969 metadata['user'] = develuser
961 970 else:
962 971 metadata['user'] = repo.ui.username()
963 972
964 973 # Operation metadata handling
965 974 useoperation = repo.ui.configbool('experimental',
966 975 'evolution.track-operation')
967 976 if useoperation and operation:
968 977 metadata['operation'] = operation
969 978
970 979 # Effect flag metadata handling
971 980 saveeffectflag = repo.ui.configbool('experimental',
972 981 'evolution.effect-flags')
973 982
974 983 with repo.transaction('add-obsolescence-marker') as tr:
975 984 markerargs = []
976 985 for rel in relations:
977 986 prec = rel[0]
978 987 sucs = rel[1]
979 988 localmetadata = metadata.copy()
980 989 if 2 < len(rel):
981 990 localmetadata.update(rel[2])
982 991
983 992 if not prec.mutable():
984 993 raise error.Abort(_("cannot obsolete public changeset: %s")
985 994 % prec,
986 995 hint="see 'hg help phases' for details")
987 996 nprec = prec.node()
988 997 nsucs = tuple(s.node() for s in sucs)
989 998 npare = None
990 999 if not nsucs:
991 1000 npare = tuple(p.node() for p in prec.parents())
992 1001 if nprec in nsucs:
993 1002 raise error.Abort(_("changeset %s cannot obsolete itself")
994 1003 % prec)
995 1004
996 1005 # Effect flag can be different by relation
997 1006 if saveeffectflag:
998 1007 # The effect flag is saved in a versioned field name for future
999 1008 # evolution
1000 1009 effectflag = obsutil.geteffectflag(rel)
1001 1010 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1002 1011
1003 1012 # Creating the marker causes the hidden cache to become invalid,
1004 1013 # which causes recomputation when we ask for prec.parents() above.
1005 1014 # Resulting in n^2 behavior. So let's prepare all of the args
1006 1015 # first, then create the markers.
1007 1016 markerargs.append((nprec, nsucs, npare, localmetadata))
1008 1017
1009 1018 for args in markerargs:
1010 1019 nprec, nsucs, npare, localmetadata = args
1011 1020 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1012 1021 date=date, metadata=localmetadata,
1013 1022 ui=repo.ui)
1014 1023 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now