##// END OF EJS Templates
obsolete: use context manager for transaction in pushmarker()...
Martin von Zweigbergk -
r35592:09285733 default
parent child Browse files
Show More
@@ -1,1121 +1,1116 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "predecessor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a predecessor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "predecessor markers of Y" because they hold
28 28 information about the predecessors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 obsutil,
80 80 phases,
81 81 policy,
82 82 util,
83 83 )
84 84
85 85 parsers = policy.importmod(r'parsers')
86 86
87 87 _pack = struct.pack
88 88 _unpack = struct.unpack
89 89 _calcsize = struct.calcsize
90 90 propertycache = util.propertycache
91 91
92 92 # the obsolete feature is not mature enough to be enabled by default.
93 93 # you have to rely on third party extension extension to enable this.
94 94 _enabled = False
95 95
96 96 # Options for obsolescence
97 97 createmarkersopt = 'createmarkers'
98 98 allowunstableopt = 'allowunstable'
99 99 exchangeopt = 'exchange'
100 100
101 101 def _getoptionvalue(repo, option):
102 102 """Returns True if the given repository has the given obsolete option
103 103 enabled.
104 104 """
105 105 configkey = 'evolution.%s' % option
106 106 newconfig = repo.ui.configbool('experimental', configkey)
107 107
108 108 # Return the value only if defined
109 109 if newconfig is not None:
110 110 return newconfig
111 111
112 112 # Fallback on generic option
113 113 try:
114 114 return repo.ui.configbool('experimental', 'evolution')
115 115 except (error.ConfigError, AttributeError):
116 116 # Fallback on old-fashion config
117 117 # inconsistent config: experimental.evolution
118 118 result = set(repo.ui.configlist('experimental', 'evolution'))
119 119
120 120 if 'all' in result:
121 121 return True
122 122
123 123 # For migration purposes, temporarily return true if the config hasn't
124 124 # been set but _enabled is true.
125 125 if len(result) == 0 and _enabled:
126 126 return True
127 127
128 128 # Temporary hack for next check
129 129 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
130 130 if newconfig:
131 131 result.add('createmarkers')
132 132
133 133 return option in result
134 134
135 135 def isenabled(repo, option):
136 136 """Returns True if the given repository has the given obsolete option
137 137 enabled.
138 138 """
139 139 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
140 140 unstabluevalue = _getoptionvalue(repo, allowunstableopt)
141 141 exchangevalue = _getoptionvalue(repo, exchangeopt)
142 142
143 143 # createmarkers must be enabled if other options are enabled
144 144 if ((unstabluevalue or exchangevalue) and not createmarkersvalue):
145 145 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
146 146 "if other obsolete options are enabled"))
147 147
148 148 return _getoptionvalue(repo, option)
149 149
150 150 ### obsolescence marker flag
151 151
152 152 ## bumpedfix flag
153 153 #
154 154 # When a changeset A' succeed to a changeset A which became public, we call A'
155 155 # "bumped" because it's a successors of a public changesets
156 156 #
157 157 # o A' (bumped)
158 158 # |`:
159 159 # | o A
160 160 # |/
161 161 # o Z
162 162 #
163 163 # The way to solve this situation is to create a new changeset Ad as children
164 164 # of A. This changeset have the same content than A'. So the diff from A to A'
165 165 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
166 166 #
167 167 # o Ad
168 168 # |`:
169 169 # | x A'
170 170 # |'|
171 171 # o | A
172 172 # |/
173 173 # o Z
174 174 #
175 175 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
176 176 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
177 177 # This flag mean that the successors express the changes between the public and
178 178 # bumped version and fix the situation, breaking the transitivity of
179 179 # "bumped" here.
180 180 bumpedfix = 1
181 181 usingsha256 = 2
182 182
183 183 ## Parsing and writing of version "0"
184 184 #
185 185 # The header is followed by the markers. Each marker is made of:
186 186 #
187 187 # - 1 uint8 : number of new changesets "N", can be zero.
188 188 #
189 189 # - 1 uint32: metadata size "M" in bytes.
190 190 #
191 191 # - 1 byte: a bit field. It is reserved for flags used in common
192 192 # obsolete marker operations, to avoid repeated decoding of metadata
193 193 # entries.
194 194 #
195 195 # - 20 bytes: obsoleted changeset identifier.
196 196 #
197 197 # - N*20 bytes: new changesets identifiers.
198 198 #
199 199 # - M bytes: metadata as a sequence of nul-terminated strings. Each
200 200 # string contains a key and a value, separated by a colon ':', without
201 201 # additional encoding. Keys cannot contain '\0' or ':' and values
202 202 # cannot contain '\0'.
203 203 _fm0version = 0
204 204 _fm0fixed = '>BIB20s'
205 205 _fm0node = '20s'
206 206 _fm0fsize = _calcsize(_fm0fixed)
207 207 _fm0fnodesize = _calcsize(_fm0node)
208 208
209 209 def _fm0readmarkers(data, off, stop):
210 210 # Loop on markers
211 211 while off < stop:
212 212 # read fixed part
213 213 cur = data[off:off + _fm0fsize]
214 214 off += _fm0fsize
215 215 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
216 216 # read replacement
217 217 sucs = ()
218 218 if numsuc:
219 219 s = (_fm0fnodesize * numsuc)
220 220 cur = data[off:off + s]
221 221 sucs = _unpack(_fm0node * numsuc, cur)
222 222 off += s
223 223 # read metadata
224 224 # (metadata will be decoded on demand)
225 225 metadata = data[off:off + mdsize]
226 226 if len(metadata) != mdsize:
227 227 raise error.Abort(_('parsing obsolete marker: metadata is too '
228 228 'short, %d bytes expected, got %d')
229 229 % (mdsize, len(metadata)))
230 230 off += mdsize
231 231 metadata = _fm0decodemeta(metadata)
232 232 try:
233 233 when, offset = metadata.pop('date', '0 0').split(' ')
234 234 date = float(when), int(offset)
235 235 except ValueError:
236 236 date = (0., 0)
237 237 parents = None
238 238 if 'p2' in metadata:
239 239 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
240 240 elif 'p1' in metadata:
241 241 parents = (metadata.pop('p1', None),)
242 242 elif 'p0' in metadata:
243 243 parents = ()
244 244 if parents is not None:
245 245 try:
246 246 parents = tuple(node.bin(p) for p in parents)
247 247 # if parent content is not a nodeid, drop the data
248 248 for p in parents:
249 249 if len(p) != 20:
250 250 parents = None
251 251 break
252 252 except TypeError:
253 253 # if content cannot be translated to nodeid drop the data.
254 254 parents = None
255 255
256 256 metadata = tuple(sorted(metadata.iteritems()))
257 257
258 258 yield (pre, sucs, flags, metadata, date, parents)
259 259
260 260 def _fm0encodeonemarker(marker):
261 261 pre, sucs, flags, metadata, date, parents = marker
262 262 if flags & usingsha256:
263 263 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
264 264 metadata = dict(metadata)
265 265 time, tz = date
266 266 metadata['date'] = '%r %i' % (time, tz)
267 267 if parents is not None:
268 268 if not parents:
269 269 # mark that we explicitly recorded no parents
270 270 metadata['p0'] = ''
271 271 for i, p in enumerate(parents, 1):
272 272 metadata['p%i' % i] = node.hex(p)
273 273 metadata = _fm0encodemeta(metadata)
274 274 numsuc = len(sucs)
275 275 format = _fm0fixed + (_fm0node * numsuc)
276 276 data = [numsuc, len(metadata), flags, pre]
277 277 data.extend(sucs)
278 278 return _pack(format, *data) + metadata
279 279
280 280 def _fm0encodemeta(meta):
281 281 """Return encoded metadata string to string mapping.
282 282
283 283 Assume no ':' in key and no '\0' in both key and value."""
284 284 for key, value in meta.iteritems():
285 285 if ':' in key or '\0' in key:
286 286 raise ValueError("':' and '\0' are forbidden in metadata key'")
287 287 if '\0' in value:
288 288 raise ValueError("':' is forbidden in metadata value'")
289 289 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
290 290
291 291 def _fm0decodemeta(data):
292 292 """Return string to string dictionary from encoded version."""
293 293 d = {}
294 294 for l in data.split('\0'):
295 295 if l:
296 296 key, value = l.split(':')
297 297 d[key] = value
298 298 return d
299 299
300 300 ## Parsing and writing of version "1"
301 301 #
302 302 # The header is followed by the markers. Each marker is made of:
303 303 #
304 304 # - uint32: total size of the marker (including this field)
305 305 #
306 306 # - float64: date in seconds since epoch
307 307 #
308 308 # - int16: timezone offset in minutes
309 309 #
310 310 # - uint16: a bit field. It is reserved for flags used in common
311 311 # obsolete marker operations, to avoid repeated decoding of metadata
312 312 # entries.
313 313 #
314 314 # - uint8: number of successors "N", can be zero.
315 315 #
316 316 # - uint8: number of parents "P", can be zero.
317 317 #
318 318 # 0: parents data stored but no parent,
319 319 # 1: one parent stored,
320 320 # 2: two parents stored,
321 321 # 3: no parent data stored
322 322 #
323 323 # - uint8: number of metadata entries M
324 324 #
325 325 # - 20 or 32 bytes: predecessor changeset identifier.
326 326 #
327 327 # - N*(20 or 32) bytes: successors changesets identifiers.
328 328 #
329 329 # - P*(20 or 32) bytes: parents of the predecessors changesets.
330 330 #
331 331 # - M*(uint8, uint8): size of all metadata entries (key and value)
332 332 #
333 333 # - remaining bytes: the metadata, each (key, value) pair after the other.
334 334 _fm1version = 1
335 335 _fm1fixed = '>IdhHBBB20s'
336 336 _fm1nodesha1 = '20s'
337 337 _fm1nodesha256 = '32s'
338 338 _fm1nodesha1size = _calcsize(_fm1nodesha1)
339 339 _fm1nodesha256size = _calcsize(_fm1nodesha256)
340 340 _fm1fsize = _calcsize(_fm1fixed)
341 341 _fm1parentnone = 3
342 342 _fm1parentshift = 14
343 343 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
344 344 _fm1metapair = 'BB'
345 345 _fm1metapairsize = _calcsize(_fm1metapair)
346 346
347 347 def _fm1purereadmarkers(data, off, stop):
348 348 # make some global constants local for performance
349 349 noneflag = _fm1parentnone
350 350 sha2flag = usingsha256
351 351 sha1size = _fm1nodesha1size
352 352 sha2size = _fm1nodesha256size
353 353 sha1fmt = _fm1nodesha1
354 354 sha2fmt = _fm1nodesha256
355 355 metasize = _fm1metapairsize
356 356 metafmt = _fm1metapair
357 357 fsize = _fm1fsize
358 358 unpack = _unpack
359 359
360 360 # Loop on markers
361 361 ufixed = struct.Struct(_fm1fixed).unpack
362 362
363 363 while off < stop:
364 364 # read fixed part
365 365 o1 = off + fsize
366 366 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
367 367
368 368 if flags & sha2flag:
369 369 # FIXME: prec was read as a SHA1, needs to be amended
370 370
371 371 # read 0 or more successors
372 372 if numsuc == 1:
373 373 o2 = o1 + sha2size
374 374 sucs = (data[o1:o2],)
375 375 else:
376 376 o2 = o1 + sha2size * numsuc
377 377 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
378 378
379 379 # read parents
380 380 if numpar == noneflag:
381 381 o3 = o2
382 382 parents = None
383 383 elif numpar == 1:
384 384 o3 = o2 + sha2size
385 385 parents = (data[o2:o3],)
386 386 else:
387 387 o3 = o2 + sha2size * numpar
388 388 parents = unpack(sha2fmt * numpar, data[o2:o3])
389 389 else:
390 390 # read 0 or more successors
391 391 if numsuc == 1:
392 392 o2 = o1 + sha1size
393 393 sucs = (data[o1:o2],)
394 394 else:
395 395 o2 = o1 + sha1size * numsuc
396 396 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
397 397
398 398 # read parents
399 399 if numpar == noneflag:
400 400 o3 = o2
401 401 parents = None
402 402 elif numpar == 1:
403 403 o3 = o2 + sha1size
404 404 parents = (data[o2:o3],)
405 405 else:
406 406 o3 = o2 + sha1size * numpar
407 407 parents = unpack(sha1fmt * numpar, data[o2:o3])
408 408
409 409 # read metadata
410 410 off = o3 + metasize * nummeta
411 411 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
412 412 metadata = []
413 413 for idx in xrange(0, len(metapairsize), 2):
414 414 o1 = off + metapairsize[idx]
415 415 o2 = o1 + metapairsize[idx + 1]
416 416 metadata.append((data[off:o1], data[o1:o2]))
417 417 off = o2
418 418
419 419 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
420 420
421 421 def _fm1encodeonemarker(marker):
422 422 pre, sucs, flags, metadata, date, parents = marker
423 423 # determine node size
424 424 _fm1node = _fm1nodesha1
425 425 if flags & usingsha256:
426 426 _fm1node = _fm1nodesha256
427 427 numsuc = len(sucs)
428 428 numextranodes = numsuc
429 429 if parents is None:
430 430 numpar = _fm1parentnone
431 431 else:
432 432 numpar = len(parents)
433 433 numextranodes += numpar
434 434 formatnodes = _fm1node * numextranodes
435 435 formatmeta = _fm1metapair * len(metadata)
436 436 format = _fm1fixed + formatnodes + formatmeta
437 437 # tz is stored in minutes so we divide by 60
438 438 tz = date[1]//60
439 439 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
440 440 data.extend(sucs)
441 441 if parents is not None:
442 442 data.extend(parents)
443 443 totalsize = _calcsize(format)
444 444 for key, value in metadata:
445 445 lk = len(key)
446 446 lv = len(value)
447 447 if lk > 255:
448 448 msg = ('obsstore metadata key cannot be longer than 255 bytes'
449 449 ' (key "%s" is %u bytes)') % (key, lk)
450 450 raise error.ProgrammingError(msg)
451 451 if lv > 255:
452 452 msg = ('obsstore metadata value cannot be longer than 255 bytes'
453 453 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
454 454 raise error.ProgrammingError(msg)
455 455 data.append(lk)
456 456 data.append(lv)
457 457 totalsize += lk + lv
458 458 data[0] = totalsize
459 459 data = [_pack(format, *data)]
460 460 for key, value in metadata:
461 461 data.append(key)
462 462 data.append(value)
463 463 return ''.join(data)
464 464
465 465 def _fm1readmarkers(data, off, stop):
466 466 native = getattr(parsers, 'fm1readmarkers', None)
467 467 if not native:
468 468 return _fm1purereadmarkers(data, off, stop)
469 469 return native(data, off, stop)
470 470
471 471 # mapping to read/write various marker formats
472 472 # <version> -> (decoder, encoder)
473 473 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
474 474 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
475 475
476 476 def _readmarkerversion(data):
477 477 return _unpack('>B', data[0:1])[0]
478 478
479 479 @util.nogc
480 480 def _readmarkers(data, off=None, stop=None):
481 481 """Read and enumerate markers from raw data"""
482 482 diskversion = _readmarkerversion(data)
483 483 if not off:
484 484 off = 1 # skip 1 byte version number
485 485 if stop is None:
486 486 stop = len(data)
487 487 if diskversion not in formats:
488 488 msg = _('parsing obsolete marker: unknown version %r') % diskversion
489 489 raise error.UnknownVersion(msg, version=diskversion)
490 490 return diskversion, formats[diskversion][0](data, off, stop)
491 491
492 492 def encodeheader(version=_fm0version):
493 493 return _pack('>B', version)
494 494
495 495 def encodemarkers(markers, addheader=False, version=_fm0version):
496 496 # Kept separate from flushmarkers(), it will be reused for
497 497 # markers exchange.
498 498 encodeone = formats[version][1]
499 499 if addheader:
500 500 yield encodeheader(version)
501 501 for marker in markers:
502 502 yield encodeone(marker)
503 503
504 504 @util.nogc
505 505 def _addsuccessors(successors, markers):
506 506 for mark in markers:
507 507 successors.setdefault(mark[0], set()).add(mark)
508 508
509 509 def _addprecursors(*args, **kwargs):
510 510 msg = ("'obsolete._addprecursors' is deprecated, "
511 511 "use 'obsolete._addpredecessors'")
512 512 util.nouideprecwarn(msg, '4.4')
513 513
514 514 return _addpredecessors(*args, **kwargs)
515 515
516 516 @util.nogc
517 517 def _addpredecessors(predecessors, markers):
518 518 for mark in markers:
519 519 for suc in mark[1]:
520 520 predecessors.setdefault(suc, set()).add(mark)
521 521
522 522 @util.nogc
523 523 def _addchildren(children, markers):
524 524 for mark in markers:
525 525 parents = mark[5]
526 526 if parents is not None:
527 527 for p in parents:
528 528 children.setdefault(p, set()).add(mark)
529 529
530 530 def _checkinvalidmarkers(markers):
531 531 """search for marker with invalid data and raise error if needed
532 532
533 533 Exist as a separated function to allow the evolve extension for a more
534 534 subtle handling.
535 535 """
536 536 for mark in markers:
537 537 if node.nullid in mark[1]:
538 538 raise error.Abort(_('bad obsolescence marker detected: '
539 539 'invalid successors nullid'))
540 540
541 541 class obsstore(object):
542 542 """Store obsolete markers
543 543
544 544 Markers can be accessed with two mappings:
545 545 - predecessors[x] -> set(markers on predecessors edges of x)
546 546 - successors[x] -> set(markers on successors edges of x)
547 547 - children[x] -> set(markers on predecessors edges of children(x)
548 548 """
549 549
550 550 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
551 551 # prec: nodeid, predecessors changesets
552 552 # succs: tuple of nodeid, successor changesets (0-N length)
553 553 # flag: integer, flag field carrying modifier for the markers (see doc)
554 554 # meta: binary blob, encoded metadata dictionary
555 555 # date: (float, int) tuple, date of marker creation
556 556 # parents: (tuple of nodeid) or None, parents of predecessors
557 557 # None is used when no data has been recorded
558 558
559 559 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
560 560 # caches for various obsolescence related cache
561 561 self.caches = {}
562 562 self.svfs = svfs
563 563 self._defaultformat = defaultformat
564 564 self._readonly = readonly
565 565
566 566 def __iter__(self):
567 567 return iter(self._all)
568 568
569 569 def __len__(self):
570 570 return len(self._all)
571 571
572 572 def __nonzero__(self):
573 573 if not self._cached('_all'):
574 574 try:
575 575 return self.svfs.stat('obsstore').st_size > 1
576 576 except OSError as inst:
577 577 if inst.errno != errno.ENOENT:
578 578 raise
579 579 # just build an empty _all list if no obsstore exists, which
580 580 # avoids further stat() syscalls
581 581 return bool(self._all)
582 582
583 583 __bool__ = __nonzero__
584 584
585 585 @property
586 586 def readonly(self):
587 587 """True if marker creation is disabled
588 588
589 589 Remove me in the future when obsolete marker is always on."""
590 590 return self._readonly
591 591
592 592 def create(self, transaction, prec, succs=(), flag=0, parents=None,
593 593 date=None, metadata=None, ui=None):
594 594 """obsolete: add a new obsolete marker
595 595
596 596 * ensuring it is hashable
597 597 * check mandatory metadata
598 598 * encode metadata
599 599
600 600 If you are a human writing code creating marker you want to use the
601 601 `createmarkers` function in this module instead.
602 602
603 603 return True if a new marker have been added, False if the markers
604 604 already existed (no op).
605 605 """
606 606 if metadata is None:
607 607 metadata = {}
608 608 if date is None:
609 609 if 'date' in metadata:
610 610 # as a courtesy for out-of-tree extensions
611 611 date = util.parsedate(metadata.pop('date'))
612 612 elif ui is not None:
613 613 date = ui.configdate('devel', 'default-date')
614 614 if date is None:
615 615 date = util.makedate()
616 616 else:
617 617 date = util.makedate()
618 618 if len(prec) != 20:
619 619 raise ValueError(prec)
620 620 for succ in succs:
621 621 if len(succ) != 20:
622 622 raise ValueError(succ)
623 623 if prec in succs:
624 624 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
625 625
626 626 metadata = tuple(sorted(metadata.iteritems()))
627 627
628 628 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
629 629 return bool(self.add(transaction, [marker]))
630 630
631 631 def add(self, transaction, markers):
632 632 """Add new markers to the store
633 633
634 634 Take care of filtering duplicate.
635 635 Return the number of new marker."""
636 636 if self._readonly:
637 637 raise error.Abort(_('creating obsolete markers is not enabled on '
638 638 'this repo'))
639 639 known = set()
640 640 getsuccessors = self.successors.get
641 641 new = []
642 642 for m in markers:
643 643 if m not in getsuccessors(m[0], ()) and m not in known:
644 644 known.add(m)
645 645 new.append(m)
646 646 if new:
647 647 f = self.svfs('obsstore', 'ab')
648 648 try:
649 649 offset = f.tell()
650 650 transaction.add('obsstore', offset)
651 651 # offset == 0: new file - add the version header
652 652 data = b''.join(encodemarkers(new, offset == 0, self._version))
653 653 f.write(data)
654 654 finally:
655 655 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
656 656 # call 'filecacheentry.refresh()' here
657 657 f.close()
658 658 addedmarkers = transaction.changes.get('obsmarkers')
659 659 if addedmarkers is not None:
660 660 addedmarkers.update(new)
661 661 self._addmarkers(new, data)
662 662 # new marker *may* have changed several set. invalidate the cache.
663 663 self.caches.clear()
664 664 # records the number of new markers for the transaction hooks
665 665 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
666 666 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
667 667 return len(new)
668 668
669 669 def mergemarkers(self, transaction, data):
670 670 """merge a binary stream of markers inside the obsstore
671 671
672 672 Returns the number of new markers added."""
673 673 version, markers = _readmarkers(data)
674 674 return self.add(transaction, markers)
675 675
676 676 @propertycache
677 677 def _data(self):
678 678 return self.svfs.tryread('obsstore')
679 679
680 680 @propertycache
681 681 def _version(self):
682 682 if len(self._data) >= 1:
683 683 return _readmarkerversion(self._data)
684 684 else:
685 685 return self._defaultformat
686 686
687 687 @propertycache
688 688 def _all(self):
689 689 data = self._data
690 690 if not data:
691 691 return []
692 692 self._version, markers = _readmarkers(data)
693 693 markers = list(markers)
694 694 _checkinvalidmarkers(markers)
695 695 return markers
696 696
697 697 @propertycache
698 698 def successors(self):
699 699 successors = {}
700 700 _addsuccessors(successors, self._all)
701 701 return successors
702 702
703 703 @property
704 704 def precursors(self):
705 705 msg = ("'obsstore.precursors' is deprecated, "
706 706 "use 'obsstore.predecessors'")
707 707 util.nouideprecwarn(msg, '4.4')
708 708
709 709 return self.predecessors
710 710
711 711 @propertycache
712 712 def predecessors(self):
713 713 predecessors = {}
714 714 _addpredecessors(predecessors, self._all)
715 715 return predecessors
716 716
717 717 @propertycache
718 718 def children(self):
719 719 children = {}
720 720 _addchildren(children, self._all)
721 721 return children
722 722
723 723 def _cached(self, attr):
724 724 return attr in self.__dict__
725 725
726 726 def _addmarkers(self, markers, rawdata):
727 727 markers = list(markers) # to allow repeated iteration
728 728 self._data = self._data + rawdata
729 729 self._all.extend(markers)
730 730 if self._cached('successors'):
731 731 _addsuccessors(self.successors, markers)
732 732 if self._cached('predecessors'):
733 733 _addpredecessors(self.predecessors, markers)
734 734 if self._cached('children'):
735 735 _addchildren(self.children, markers)
736 736 _checkinvalidmarkers(markers)
737 737
738 738 def relevantmarkers(self, nodes):
739 739 """return a set of all obsolescence markers relevant to a set of nodes.
740 740
741 741 "relevant" to a set of nodes mean:
742 742
743 743 - marker that use this changeset as successor
744 744 - prune marker of direct children on this changeset
745 745 - recursive application of the two rules on predecessors of these
746 746 markers
747 747
748 748 It is a set so you cannot rely on order."""
749 749
750 750 pendingnodes = set(nodes)
751 751 seenmarkers = set()
752 752 seennodes = set(pendingnodes)
753 753 precursorsmarkers = self.predecessors
754 754 succsmarkers = self.successors
755 755 children = self.children
756 756 while pendingnodes:
757 757 direct = set()
758 758 for current in pendingnodes:
759 759 direct.update(precursorsmarkers.get(current, ()))
760 760 pruned = [m for m in children.get(current, ()) if not m[1]]
761 761 direct.update(pruned)
762 762 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
763 763 direct.update(pruned)
764 764 direct -= seenmarkers
765 765 pendingnodes = set([m[0] for m in direct])
766 766 seenmarkers |= direct
767 767 pendingnodes -= seennodes
768 768 seennodes |= pendingnodes
769 769 return seenmarkers
770 770
771 771 def makestore(ui, repo):
772 772 """Create an obsstore instance from a repo."""
773 773 # read default format for new obsstore.
774 774 # developer config: format.obsstore-version
775 775 defaultformat = ui.configint('format', 'obsstore-version')
776 776 # rely on obsstore class default when possible.
777 777 kwargs = {}
778 778 if defaultformat is not None:
779 779 kwargs[r'defaultformat'] = defaultformat
780 780 readonly = not isenabled(repo, createmarkersopt)
781 781 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
782 782 if store and readonly:
783 783 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
784 784 % len(list(store)))
785 785 return store
786 786
787 787 def commonversion(versions):
788 788 """Return the newest version listed in both versions and our local formats.
789 789
790 790 Returns None if no common version exists.
791 791 """
792 792 versions.sort(reverse=True)
793 793 # search for highest version known on both side
794 794 for v in versions:
795 795 if v in formats:
796 796 return v
797 797 return None
798 798
799 799 # arbitrary picked to fit into 8K limit from HTTP server
800 800 # you have to take in account:
801 801 # - the version header
802 802 # - the base85 encoding
803 803 _maxpayload = 5300
804 804
805 805 def _pushkeyescape(markers):
806 806 """encode markers into a dict suitable for pushkey exchange
807 807
808 808 - binary data is base85 encoded
809 809 - split in chunks smaller than 5300 bytes"""
810 810 keys = {}
811 811 parts = []
812 812 currentlen = _maxpayload * 2 # ensure we create a new part
813 813 for marker in markers:
814 814 nextdata = _fm0encodeonemarker(marker)
815 815 if (len(nextdata) + currentlen > _maxpayload):
816 816 currentpart = []
817 817 currentlen = 0
818 818 parts.append(currentpart)
819 819 currentpart.append(nextdata)
820 820 currentlen += len(nextdata)
821 821 for idx, part in enumerate(reversed(parts)):
822 822 data = ''.join([_pack('>B', _fm0version)] + part)
823 823 keys['dump%i' % idx] = util.b85encode(data)
824 824 return keys
825 825
826 826 def listmarkers(repo):
827 827 """List markers over pushkey"""
828 828 if not repo.obsstore:
829 829 return {}
830 830 return _pushkeyescape(sorted(repo.obsstore))
831 831
832 832 def pushmarker(repo, key, old, new):
833 833 """Push markers over pushkey"""
834 834 if not key.startswith('dump'):
835 835 repo.ui.warn(_('unknown key: %r') % key)
836 836 return False
837 837 if old:
838 838 repo.ui.warn(_('unexpected old value for %r') % key)
839 839 return False
840 840 data = util.b85decode(new)
841 with repo.lock():
842 tr = repo.transaction('pushkey: obsolete markers')
843 try:
844 repo.obsstore.mergemarkers(tr, data)
845 repo.invalidatevolatilesets()
846 tr.close()
847 return True
848 finally:
849 tr.release()
841 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
842 repo.obsstore.mergemarkers(tr, data)
843 repo.invalidatevolatilesets()
844 return True
850 845
851 846 # keep compatibility for the 4.3 cycle
852 847 def allprecursors(obsstore, nodes, ignoreflags=0):
853 848 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
854 849 util.nouideprecwarn(movemsg, '4.3')
855 850 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
856 851
857 852 def allsuccessors(obsstore, nodes, ignoreflags=0):
858 853 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
859 854 util.nouideprecwarn(movemsg, '4.3')
860 855 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
861 856
862 857 def marker(repo, data):
863 858 movemsg = 'obsolete.marker moved to obsutil.marker'
864 859 repo.ui.deprecwarn(movemsg, '4.3')
865 860 return obsutil.marker(repo, data)
866 861
867 862 def getmarkers(repo, nodes=None, exclusive=False):
868 863 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
869 864 repo.ui.deprecwarn(movemsg, '4.3')
870 865 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
871 866
872 867 def exclusivemarkers(repo, nodes):
873 868 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
874 869 repo.ui.deprecwarn(movemsg, '4.3')
875 870 return obsutil.exclusivemarkers(repo, nodes)
876 871
877 872 def foreground(repo, nodes):
878 873 movemsg = 'obsolete.foreground moved to obsutil.foreground'
879 874 repo.ui.deprecwarn(movemsg, '4.3')
880 875 return obsutil.foreground(repo, nodes)
881 876
882 877 def successorssets(repo, initialnode, cache=None):
883 878 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
884 879 repo.ui.deprecwarn(movemsg, '4.3')
885 880 return obsutil.successorssets(repo, initialnode, cache=cache)
886 881
887 882 # mapping of 'set-name' -> <function to compute this set>
888 883 cachefuncs = {}
889 884 def cachefor(name):
890 885 """Decorator to register a function as computing the cache for a set"""
891 886 def decorator(func):
892 887 if name in cachefuncs:
893 888 msg = "duplicated registration for volatileset '%s' (existing: %r)"
894 889 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
895 890 cachefuncs[name] = func
896 891 return func
897 892 return decorator
898 893
899 894 def getrevs(repo, name):
900 895 """Return the set of revision that belong to the <name> set
901 896
902 897 Such access may compute the set and cache it for future use"""
903 898 repo = repo.unfiltered()
904 899 if not repo.obsstore:
905 900 return frozenset()
906 901 if name not in repo.obsstore.caches:
907 902 repo.obsstore.caches[name] = cachefuncs[name](repo)
908 903 return repo.obsstore.caches[name]
909 904
910 905 # To be simple we need to invalidate obsolescence cache when:
911 906 #
912 907 # - new changeset is added:
913 908 # - public phase is changed
914 909 # - obsolescence marker are added
915 910 # - strip is used a repo
916 911 def clearobscaches(repo):
917 912 """Remove all obsolescence related cache from a repo
918 913
919 914 This remove all cache in obsstore is the obsstore already exist on the
920 915 repo.
921 916
922 917 (We could be smarter here given the exact event that trigger the cache
923 918 clearing)"""
924 919 # only clear cache is there is obsstore data in this repo
925 920 if 'obsstore' in repo._filecache:
926 921 repo.obsstore.caches.clear()
927 922
928 923 def _mutablerevs(repo):
929 924 """the set of mutable revision in the repository"""
930 925 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
931 926
932 927 @cachefor('obsolete')
933 928 def _computeobsoleteset(repo):
934 929 """the set of obsolete revisions"""
935 930 getnode = repo.changelog.node
936 931 notpublic = _mutablerevs(repo)
937 932 isobs = repo.obsstore.successors.__contains__
938 933 obs = set(r for r in notpublic if isobs(getnode(r)))
939 934 return obs
940 935
941 936 @cachefor('unstable')
942 937 def _computeunstableset(repo):
943 938 msg = ("'unstable' volatile set is deprecated, "
944 939 "use 'orphan'")
945 940 repo.ui.deprecwarn(msg, '4.4')
946 941
947 942 return _computeorphanset(repo)
948 943
949 944 @cachefor('orphan')
950 945 def _computeorphanset(repo):
951 946 """the set of non obsolete revisions with obsolete parents"""
952 947 pfunc = repo.changelog.parentrevs
953 948 mutable = _mutablerevs(repo)
954 949 obsolete = getrevs(repo, 'obsolete')
955 950 others = mutable - obsolete
956 951 unstable = set()
957 952 for r in sorted(others):
958 953 # A rev is unstable if one of its parent is obsolete or unstable
959 954 # this works since we traverse following growing rev order
960 955 for p in pfunc(r):
961 956 if p in obsolete or p in unstable:
962 957 unstable.add(r)
963 958 break
964 959 return unstable
965 960
966 961 @cachefor('suspended')
967 962 def _computesuspendedset(repo):
968 963 """the set of obsolete parents with non obsolete descendants"""
969 964 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
970 965 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
971 966
972 967 @cachefor('extinct')
973 968 def _computeextinctset(repo):
974 969 """the set of obsolete parents without non obsolete descendants"""
975 970 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
976 971
977 972 @cachefor('bumped')
978 973 def _computebumpedset(repo):
979 974 msg = ("'bumped' volatile set is deprecated, "
980 975 "use 'phasedivergent'")
981 976 repo.ui.deprecwarn(msg, '4.4')
982 977
983 978 return _computephasedivergentset(repo)
984 979
985 980 @cachefor('phasedivergent')
986 981 def _computephasedivergentset(repo):
987 982 """the set of revs trying to obsolete public revisions"""
988 983 bumped = set()
989 984 # util function (avoid attribute lookup in the loop)
990 985 phase = repo._phasecache.phase # would be faster to grab the full list
991 986 public = phases.public
992 987 cl = repo.changelog
993 988 torev = cl.nodemap.get
994 989 tonode = cl.node
995 990 for rev in repo.revs('(not public()) and (not obsolete())'):
996 991 # We only evaluate mutable, non-obsolete revision
997 992 node = tonode(rev)
998 993 # (future) A cache of predecessors may worth if split is very common
999 994 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
1000 995 ignoreflags=bumpedfix):
1001 996 prev = torev(pnode) # unfiltered! but so is phasecache
1002 997 if (prev is not None) and (phase(repo, prev) <= public):
1003 998 # we have a public predecessor
1004 999 bumped.add(rev)
1005 1000 break # Next draft!
1006 1001 return bumped
1007 1002
1008 1003 @cachefor('divergent')
1009 1004 def _computedivergentset(repo):
1010 1005 msg = ("'divergent' volatile set is deprecated, "
1011 1006 "use 'contentdivergent'")
1012 1007 repo.ui.deprecwarn(msg, '4.4')
1013 1008
1014 1009 return _computecontentdivergentset(repo)
1015 1010
1016 1011 @cachefor('contentdivergent')
1017 1012 def _computecontentdivergentset(repo):
1018 1013 """the set of rev that compete to be the final successors of some revision.
1019 1014 """
1020 1015 divergent = set()
1021 1016 obsstore = repo.obsstore
1022 1017 newermap = {}
1023 1018 tonode = repo.changelog.node
1024 1019 for rev in repo.revs('(not public()) - obsolete()'):
1025 1020 node = tonode(rev)
1026 1021 mark = obsstore.predecessors.get(node, ())
1027 1022 toprocess = set(mark)
1028 1023 seen = set()
1029 1024 while toprocess:
1030 1025 prec = toprocess.pop()[0]
1031 1026 if prec in seen:
1032 1027 continue # emergency cycle hanging prevention
1033 1028 seen.add(prec)
1034 1029 if prec not in newermap:
1035 1030 obsutil.successorssets(repo, prec, cache=newermap)
1036 1031 newer = [n for n in newermap[prec] if n]
1037 1032 if len(newer) > 1:
1038 1033 divergent.add(rev)
1039 1034 break
1040 1035 toprocess.update(obsstore.predecessors.get(prec, ()))
1041 1036 return divergent
1042 1037
1043 1038
1044 1039 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1045 1040 operation=None):
1046 1041 """Add obsolete markers between changesets in a repo
1047 1042
1048 1043 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1049 1044 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1050 1045 containing metadata for this marker only. It is merged with the global
1051 1046 metadata specified through the `metadata` argument of this function,
1052 1047
1053 1048 Trying to obsolete a public changeset will raise an exception.
1054 1049
1055 1050 Current user and date are used except if specified otherwise in the
1056 1051 metadata attribute.
1057 1052
1058 1053 This function operates within a transaction of its own, but does
1059 1054 not take any lock on the repo.
1060 1055 """
1061 1056 # prepare metadata
1062 1057 if metadata is None:
1063 1058 metadata = {}
1064 1059 if 'user' not in metadata:
1065 1060 develuser = repo.ui.config('devel', 'user.obsmarker')
1066 1061 if develuser:
1067 1062 metadata['user'] = develuser
1068 1063 else:
1069 1064 metadata['user'] = repo.ui.username()
1070 1065
1071 1066 # Operation metadata handling
1072 1067 useoperation = repo.ui.configbool('experimental',
1073 1068 'evolution.track-operation')
1074 1069 if useoperation and operation:
1075 1070 metadata['operation'] = operation
1076 1071
1077 1072 # Effect flag metadata handling
1078 1073 saveeffectflag = repo.ui.configbool('experimental',
1079 1074 'evolution.effect-flags')
1080 1075
1081 1076 with repo.transaction('add-obsolescence-marker') as tr:
1082 1077 markerargs = []
1083 1078 for rel in relations:
1084 1079 prec = rel[0]
1085 1080 sucs = rel[1]
1086 1081 localmetadata = metadata.copy()
1087 1082 if 2 < len(rel):
1088 1083 localmetadata.update(rel[2])
1089 1084
1090 1085 if not prec.mutable():
1091 1086 raise error.Abort(_("cannot obsolete public changeset: %s")
1092 1087 % prec,
1093 1088 hint="see 'hg help phases' for details")
1094 1089 nprec = prec.node()
1095 1090 nsucs = tuple(s.node() for s in sucs)
1096 1091 npare = None
1097 1092 if not nsucs:
1098 1093 npare = tuple(p.node() for p in prec.parents())
1099 1094 if nprec in nsucs:
1100 1095 raise error.Abort(_("changeset %s cannot obsolete itself")
1101 1096 % prec)
1102 1097
1103 1098 # Effect flag can be different by relation
1104 1099 if saveeffectflag:
1105 1100 # The effect flag is saved in a versioned field name for future
1106 1101 # evolution
1107 1102 effectflag = obsutil.geteffectflag(rel)
1108 1103 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1109 1104
1110 1105 # Creating the marker causes the hidden cache to become invalid,
1111 1106 # which causes recomputation when we ask for prec.parents() above.
1112 1107 # Resulting in n^2 behavior. So let's prepare all of the args
1113 1108 # first, then create the markers.
1114 1109 markerargs.append((nprec, nsucs, npare, localmetadata))
1115 1110
1116 1111 for args in markerargs:
1117 1112 nprec, nsucs, npare, localmetadata = args
1118 1113 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1119 1114 date=date, metadata=localmetadata,
1120 1115 ui=repo.ui)
1121 1116 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now