##// END OF EJS Templates
obsolete: don't translate internal error message...
Yuya Nishihara -
r40291:4f1f0243 default
parent child Browse files
Show More
@@ -1,1059 +1,1059 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "predecessor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a predecessor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "predecessor markers of Y" because they hold
28 28 information about the predecessors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import hashlib
74 74 import struct
75 75
76 76 from .i18n import _
77 77 from . import (
78 78 encoding,
79 79 error,
80 80 node,
81 81 obsutil,
82 82 phases,
83 83 policy,
84 84 pycompat,
85 85 util,
86 86 )
87 87 from .utils import dateutil
88 88
89 89 parsers = policy.importmod(r'parsers')
90 90
91 91 _pack = struct.pack
92 92 _unpack = struct.unpack
93 93 _calcsize = struct.calcsize
94 94 propertycache = util.propertycache
95 95
96 96 # the obsolete feature is not mature enough to be enabled by default.
97 97 # you have to rely on third party extension extension to enable this.
98 98 _enabled = False
99 99
100 100 # Options for obsolescence
101 101 createmarkersopt = 'createmarkers'
102 102 allowunstableopt = 'allowunstable'
103 103 exchangeopt = 'exchange'
104 104
105 105 def _getoptionvalue(repo, option):
106 106 """Returns True if the given repository has the given obsolete option
107 107 enabled.
108 108 """
109 109 configkey = 'evolution.%s' % option
110 110 newconfig = repo.ui.configbool('experimental', configkey)
111 111
112 112 # Return the value only if defined
113 113 if newconfig is not None:
114 114 return newconfig
115 115
116 116 # Fallback on generic option
117 117 try:
118 118 return repo.ui.configbool('experimental', 'evolution')
119 119 except (error.ConfigError, AttributeError):
120 120 # Fallback on old-fashion config
121 121 # inconsistent config: experimental.evolution
122 122 result = set(repo.ui.configlist('experimental', 'evolution'))
123 123
124 124 if 'all' in result:
125 125 return True
126 126
127 127 # For migration purposes, temporarily return true if the config hasn't
128 128 # been set but _enabled is true.
129 129 if len(result) == 0 and _enabled:
130 130 return True
131 131
132 132 # Temporary hack for next check
133 133 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
134 134 if newconfig:
135 135 result.add('createmarkers')
136 136
137 137 return option in result
138 138
139 139 def getoptions(repo):
140 140 """Returns dicts showing state of obsolescence features."""
141 141
142 142 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
143 143 unstablevalue = _getoptionvalue(repo, allowunstableopt)
144 144 exchangevalue = _getoptionvalue(repo, exchangeopt)
145 145
146 146 # createmarkers must be enabled if other options are enabled
147 147 if ((unstablevalue or exchangevalue) and not createmarkersvalue):
148 148 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
149 149 "if other obsolete options are enabled"))
150 150
151 151 return {
152 152 createmarkersopt: createmarkersvalue,
153 153 allowunstableopt: unstablevalue,
154 154 exchangeopt: exchangevalue,
155 155 }
156 156
157 157 def isenabled(repo, option):
158 158 """Returns True if the given repository has the given obsolete option
159 159 enabled.
160 160 """
161 161 return getoptions(repo)[option]
162 162
163 163 # Creating aliases for marker flags because evolve extension looks for
164 164 # bumpedfix in obsolete.py
165 165 bumpedfix = obsutil.bumpedfix
166 166 usingsha256 = obsutil.usingsha256
167 167
168 168 ## Parsing and writing of version "0"
169 169 #
170 170 # The header is followed by the markers. Each marker is made of:
171 171 #
172 172 # - 1 uint8 : number of new changesets "N", can be zero.
173 173 #
174 174 # - 1 uint32: metadata size "M" in bytes.
175 175 #
176 176 # - 1 byte: a bit field. It is reserved for flags used in common
177 177 # obsolete marker operations, to avoid repeated decoding of metadata
178 178 # entries.
179 179 #
180 180 # - 20 bytes: obsoleted changeset identifier.
181 181 #
182 182 # - N*20 bytes: new changesets identifiers.
183 183 #
184 184 # - M bytes: metadata as a sequence of nul-terminated strings. Each
185 185 # string contains a key and a value, separated by a colon ':', without
186 186 # additional encoding. Keys cannot contain '\0' or ':' and values
187 187 # cannot contain '\0'.
188 188 _fm0version = 0
189 189 _fm0fixed = '>BIB20s'
190 190 _fm0node = '20s'
191 191 _fm0fsize = _calcsize(_fm0fixed)
192 192 _fm0fnodesize = _calcsize(_fm0node)
193 193
194 194 def _fm0readmarkers(data, off, stop):
195 195 # Loop on markers
196 196 while off < stop:
197 197 # read fixed part
198 198 cur = data[off:off + _fm0fsize]
199 199 off += _fm0fsize
200 200 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
201 201 # read replacement
202 202 sucs = ()
203 203 if numsuc:
204 204 s = (_fm0fnodesize * numsuc)
205 205 cur = data[off:off + s]
206 206 sucs = _unpack(_fm0node * numsuc, cur)
207 207 off += s
208 208 # read metadata
209 209 # (metadata will be decoded on demand)
210 210 metadata = data[off:off + mdsize]
211 211 if len(metadata) != mdsize:
212 212 raise error.Abort(_('parsing obsolete marker: metadata is too '
213 213 'short, %d bytes expected, got %d')
214 214 % (mdsize, len(metadata)))
215 215 off += mdsize
216 216 metadata = _fm0decodemeta(metadata)
217 217 try:
218 218 when, offset = metadata.pop('date', '0 0').split(' ')
219 219 date = float(when), int(offset)
220 220 except ValueError:
221 221 date = (0., 0)
222 222 parents = None
223 223 if 'p2' in metadata:
224 224 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
225 225 elif 'p1' in metadata:
226 226 parents = (metadata.pop('p1', None),)
227 227 elif 'p0' in metadata:
228 228 parents = ()
229 229 if parents is not None:
230 230 try:
231 231 parents = tuple(node.bin(p) for p in parents)
232 232 # if parent content is not a nodeid, drop the data
233 233 for p in parents:
234 234 if len(p) != 20:
235 235 parents = None
236 236 break
237 237 except TypeError:
238 238 # if content cannot be translated to nodeid drop the data.
239 239 parents = None
240 240
241 241 metadata = tuple(sorted(metadata.iteritems()))
242 242
243 243 yield (pre, sucs, flags, metadata, date, parents)
244 244
245 245 def _fm0encodeonemarker(marker):
246 246 pre, sucs, flags, metadata, date, parents = marker
247 247 if flags & usingsha256:
248 248 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
249 249 metadata = dict(metadata)
250 250 time, tz = date
251 251 metadata['date'] = '%r %i' % (time, tz)
252 252 if parents is not None:
253 253 if not parents:
254 254 # mark that we explicitly recorded no parents
255 255 metadata['p0'] = ''
256 256 for i, p in enumerate(parents, 1):
257 257 metadata['p%i' % i] = node.hex(p)
258 258 metadata = _fm0encodemeta(metadata)
259 259 numsuc = len(sucs)
260 260 format = _fm0fixed + (_fm0node * numsuc)
261 261 data = [numsuc, len(metadata), flags, pre]
262 262 data.extend(sucs)
263 263 return _pack(format, *data) + metadata
264 264
265 265 def _fm0encodemeta(meta):
266 266 """Return encoded metadata string to string mapping.
267 267
268 268 Assume no ':' in key and no '\0' in both key and value."""
269 269 for key, value in meta.iteritems():
270 270 if ':' in key or '\0' in key:
271 271 raise ValueError("':' and '\0' are forbidden in metadata key'")
272 272 if '\0' in value:
273 273 raise ValueError("':' is forbidden in metadata value'")
274 274 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
275 275
276 276 def _fm0decodemeta(data):
277 277 """Return string to string dictionary from encoded version."""
278 278 d = {}
279 279 for l in data.split('\0'):
280 280 if l:
281 281 key, value = l.split(':', 1)
282 282 d[key] = value
283 283 return d
284 284
285 285 ## Parsing and writing of version "1"
286 286 #
287 287 # The header is followed by the markers. Each marker is made of:
288 288 #
289 289 # - uint32: total size of the marker (including this field)
290 290 #
291 291 # - float64: date in seconds since epoch
292 292 #
293 293 # - int16: timezone offset in minutes
294 294 #
295 295 # - uint16: a bit field. It is reserved for flags used in common
296 296 # obsolete marker operations, to avoid repeated decoding of metadata
297 297 # entries.
298 298 #
299 299 # - uint8: number of successors "N", can be zero.
300 300 #
301 301 # - uint8: number of parents "P", can be zero.
302 302 #
303 303 # 0: parents data stored but no parent,
304 304 # 1: one parent stored,
305 305 # 2: two parents stored,
306 306 # 3: no parent data stored
307 307 #
308 308 # - uint8: number of metadata entries M
309 309 #
310 310 # - 20 or 32 bytes: predecessor changeset identifier.
311 311 #
312 312 # - N*(20 or 32) bytes: successors changesets identifiers.
313 313 #
314 314 # - P*(20 or 32) bytes: parents of the predecessors changesets.
315 315 #
316 316 # - M*(uint8, uint8): size of all metadata entries (key and value)
317 317 #
318 318 # - remaining bytes: the metadata, each (key, value) pair after the other.
319 319 _fm1version = 1
320 320 _fm1fixed = '>IdhHBBB20s'
321 321 _fm1nodesha1 = '20s'
322 322 _fm1nodesha256 = '32s'
323 323 _fm1nodesha1size = _calcsize(_fm1nodesha1)
324 324 _fm1nodesha256size = _calcsize(_fm1nodesha256)
325 325 _fm1fsize = _calcsize(_fm1fixed)
326 326 _fm1parentnone = 3
327 327 _fm1parentshift = 14
328 328 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
329 329 _fm1metapair = 'BB'
330 330 _fm1metapairsize = _calcsize(_fm1metapair)
331 331
332 332 def _fm1purereadmarkers(data, off, stop):
333 333 # make some global constants local for performance
334 334 noneflag = _fm1parentnone
335 335 sha2flag = usingsha256
336 336 sha1size = _fm1nodesha1size
337 337 sha2size = _fm1nodesha256size
338 338 sha1fmt = _fm1nodesha1
339 339 sha2fmt = _fm1nodesha256
340 340 metasize = _fm1metapairsize
341 341 metafmt = _fm1metapair
342 342 fsize = _fm1fsize
343 343 unpack = _unpack
344 344
345 345 # Loop on markers
346 346 ufixed = struct.Struct(_fm1fixed).unpack
347 347
348 348 while off < stop:
349 349 # read fixed part
350 350 o1 = off + fsize
351 351 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
352 352
353 353 if flags & sha2flag:
354 354 # FIXME: prec was read as a SHA1, needs to be amended
355 355
356 356 # read 0 or more successors
357 357 if numsuc == 1:
358 358 o2 = o1 + sha2size
359 359 sucs = (data[o1:o2],)
360 360 else:
361 361 o2 = o1 + sha2size * numsuc
362 362 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
363 363
364 364 # read parents
365 365 if numpar == noneflag:
366 366 o3 = o2
367 367 parents = None
368 368 elif numpar == 1:
369 369 o3 = o2 + sha2size
370 370 parents = (data[o2:o3],)
371 371 else:
372 372 o3 = o2 + sha2size * numpar
373 373 parents = unpack(sha2fmt * numpar, data[o2:o3])
374 374 else:
375 375 # read 0 or more successors
376 376 if numsuc == 1:
377 377 o2 = o1 + sha1size
378 378 sucs = (data[o1:o2],)
379 379 else:
380 380 o2 = o1 + sha1size * numsuc
381 381 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
382 382
383 383 # read parents
384 384 if numpar == noneflag:
385 385 o3 = o2
386 386 parents = None
387 387 elif numpar == 1:
388 388 o3 = o2 + sha1size
389 389 parents = (data[o2:o3],)
390 390 else:
391 391 o3 = o2 + sha1size * numpar
392 392 parents = unpack(sha1fmt * numpar, data[o2:o3])
393 393
394 394 # read metadata
395 395 off = o3 + metasize * nummeta
396 396 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
397 397 metadata = []
398 398 for idx in pycompat.xrange(0, len(metapairsize), 2):
399 399 o1 = off + metapairsize[idx]
400 400 o2 = o1 + metapairsize[idx + 1]
401 401 metadata.append((data[off:o1], data[o1:o2]))
402 402 off = o2
403 403
404 404 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
405 405
406 406 def _fm1encodeonemarker(marker):
407 407 pre, sucs, flags, metadata, date, parents = marker
408 408 # determine node size
409 409 _fm1node = _fm1nodesha1
410 410 if flags & usingsha256:
411 411 _fm1node = _fm1nodesha256
412 412 numsuc = len(sucs)
413 413 numextranodes = numsuc
414 414 if parents is None:
415 415 numpar = _fm1parentnone
416 416 else:
417 417 numpar = len(parents)
418 418 numextranodes += numpar
419 419 formatnodes = _fm1node * numextranodes
420 420 formatmeta = _fm1metapair * len(metadata)
421 421 format = _fm1fixed + formatnodes + formatmeta
422 422 # tz is stored in minutes so we divide by 60
423 423 tz = date[1]//60
424 424 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
425 425 data.extend(sucs)
426 426 if parents is not None:
427 427 data.extend(parents)
428 428 totalsize = _calcsize(format)
429 429 for key, value in metadata:
430 430 lk = len(key)
431 431 lv = len(value)
432 432 if lk > 255:
433 433 msg = ('obsstore metadata key cannot be longer than 255 bytes'
434 434 ' (key "%s" is %u bytes)') % (key, lk)
435 435 raise error.ProgrammingError(msg)
436 436 if lv > 255:
437 437 msg = ('obsstore metadata value cannot be longer than 255 bytes'
438 438 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
439 439 raise error.ProgrammingError(msg)
440 440 data.append(lk)
441 441 data.append(lv)
442 442 totalsize += lk + lv
443 443 data[0] = totalsize
444 444 data = [_pack(format, *data)]
445 445 for key, value in metadata:
446 446 data.append(key)
447 447 data.append(value)
448 448 return ''.join(data)
449 449
450 450 def _fm1readmarkers(data, off, stop):
451 451 native = getattr(parsers, 'fm1readmarkers', None)
452 452 if not native:
453 453 return _fm1purereadmarkers(data, off, stop)
454 454 return native(data, off, stop)
455 455
456 456 # mapping to read/write various marker formats
457 457 # <version> -> (decoder, encoder)
458 458 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
459 459 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
460 460
461 461 def _readmarkerversion(data):
462 462 return _unpack('>B', data[0:1])[0]
463 463
464 464 @util.nogc
465 465 def _readmarkers(data, off=None, stop=None):
466 466 """Read and enumerate markers from raw data"""
467 467 diskversion = _readmarkerversion(data)
468 468 if not off:
469 469 off = 1 # skip 1 byte version number
470 470 if stop is None:
471 471 stop = len(data)
472 472 if diskversion not in formats:
473 473 msg = _('parsing obsolete marker: unknown version %r') % diskversion
474 474 raise error.UnknownVersion(msg, version=diskversion)
475 475 return diskversion, formats[diskversion][0](data, off, stop)
476 476
477 477 def encodeheader(version=_fm0version):
478 478 return _pack('>B', version)
479 479
480 480 def encodemarkers(markers, addheader=False, version=_fm0version):
481 481 # Kept separate from flushmarkers(), it will be reused for
482 482 # markers exchange.
483 483 encodeone = formats[version][1]
484 484 if addheader:
485 485 yield encodeheader(version)
486 486 for marker in markers:
487 487 yield encodeone(marker)
488 488
489 489 @util.nogc
490 490 def _addsuccessors(successors, markers):
491 491 for mark in markers:
492 492 successors.setdefault(mark[0], set()).add(mark)
493 493
494 494 @util.nogc
495 495 def _addpredecessors(predecessors, markers):
496 496 for mark in markers:
497 497 for suc in mark[1]:
498 498 predecessors.setdefault(suc, set()).add(mark)
499 499
500 500 @util.nogc
501 501 def _addchildren(children, markers):
502 502 for mark in markers:
503 503 parents = mark[5]
504 504 if parents is not None:
505 505 for p in parents:
506 506 children.setdefault(p, set()).add(mark)
507 507
508 508 def _checkinvalidmarkers(markers):
509 509 """search for marker with invalid data and raise error if needed
510 510
511 511 Exist as a separated function to allow the evolve extension for a more
512 512 subtle handling.
513 513 """
514 514 for mark in markers:
515 515 if node.nullid in mark[1]:
516 516 raise error.Abort(_('bad obsolescence marker detected: '
517 517 'invalid successors nullid'))
518 518
519 519 class obsstore(object):
520 520 """Store obsolete markers
521 521
522 522 Markers can be accessed with two mappings:
523 523 - predecessors[x] -> set(markers on predecessors edges of x)
524 524 - successors[x] -> set(markers on successors edges of x)
525 525 - children[x] -> set(markers on predecessors edges of children(x)
526 526 """
527 527
528 528 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
529 529 # prec: nodeid, predecessors changesets
530 530 # succs: tuple of nodeid, successor changesets (0-N length)
531 531 # flag: integer, flag field carrying modifier for the markers (see doc)
532 532 # meta: binary blob in UTF-8, encoded metadata dictionary
533 533 # date: (float, int) tuple, date of marker creation
534 534 # parents: (tuple of nodeid) or None, parents of predecessors
535 535 # None is used when no data has been recorded
536 536
537 537 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
538 538 # caches for various obsolescence related cache
539 539 self.caches = {}
540 540 self.svfs = svfs
541 541 self._defaultformat = defaultformat
542 542 self._readonly = readonly
543 543
544 544 def __iter__(self):
545 545 return iter(self._all)
546 546
547 547 def __len__(self):
548 548 return len(self._all)
549 549
550 550 def __nonzero__(self):
551 551 if not self._cached(r'_all'):
552 552 try:
553 553 return self.svfs.stat('obsstore').st_size > 1
554 554 except OSError as inst:
555 555 if inst.errno != errno.ENOENT:
556 556 raise
557 557 # just build an empty _all list if no obsstore exists, which
558 558 # avoids further stat() syscalls
559 559 return bool(self._all)
560 560
561 561 __bool__ = __nonzero__
562 562
563 563 @property
564 564 def readonly(self):
565 565 """True if marker creation is disabled
566 566
567 567 Remove me in the future when obsolete marker is always on."""
568 568 return self._readonly
569 569
570 570 def create(self, transaction, prec, succs=(), flag=0, parents=None,
571 571 date=None, metadata=None, ui=None):
572 572 """obsolete: add a new obsolete marker
573 573
574 574 * ensuring it is hashable
575 575 * check mandatory metadata
576 576 * encode metadata
577 577
578 578 If you are a human writing code creating marker you want to use the
579 579 `createmarkers` function in this module instead.
580 580
581 581 return True if a new marker have been added, False if the markers
582 582 already existed (no op).
583 583 """
584 584 if metadata is None:
585 585 metadata = {}
586 586 if date is None:
587 587 if 'date' in metadata:
588 588 # as a courtesy for out-of-tree extensions
589 589 date = dateutil.parsedate(metadata.pop('date'))
590 590 elif ui is not None:
591 591 date = ui.configdate('devel', 'default-date')
592 592 if date is None:
593 593 date = dateutil.makedate()
594 594 else:
595 595 date = dateutil.makedate()
596 596 if len(prec) != 20:
597 597 raise ValueError(prec)
598 598 for succ in succs:
599 599 if len(succ) != 20:
600 600 raise ValueError(succ)
601 601 if prec in succs:
602 602 raise ValueError(
603 pycompat.sysstr(_('in-marker cycle with %s') % node.hex(prec)))
603 r'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec)))
604 604
605 605 metadata = tuple(sorted(metadata.iteritems()))
606 606 for k, v in metadata:
607 607 try:
608 608 # might be better to reject non-ASCII keys
609 609 k.decode('utf-8')
610 610 v.decode('utf-8')
611 611 except UnicodeDecodeError:
612 612 raise error.ProgrammingError(
613 613 'obsstore metadata must be valid UTF-8 sequence '
614 614 '(key = %r, value = %r)'
615 615 % (pycompat.bytestr(k), pycompat.bytestr(v)))
616 616
617 617 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
618 618 return bool(self.add(transaction, [marker]))
619 619
620 620 def add(self, transaction, markers):
621 621 """Add new markers to the store
622 622
623 623 Take care of filtering duplicate.
624 624 Return the number of new marker."""
625 625 if self._readonly:
626 626 raise error.Abort(_('creating obsolete markers is not enabled on '
627 627 'this repo'))
628 628 known = set()
629 629 getsuccessors = self.successors.get
630 630 new = []
631 631 for m in markers:
632 632 if m not in getsuccessors(m[0], ()) and m not in known:
633 633 known.add(m)
634 634 new.append(m)
635 635 if new:
636 636 f = self.svfs('obsstore', 'ab')
637 637 try:
638 638 offset = f.tell()
639 639 transaction.add('obsstore', offset)
640 640 # offset == 0: new file - add the version header
641 641 data = b''.join(encodemarkers(new, offset == 0, self._version))
642 642 f.write(data)
643 643 finally:
644 644 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
645 645 # call 'filecacheentry.refresh()' here
646 646 f.close()
647 647 addedmarkers = transaction.changes.get('obsmarkers')
648 648 if addedmarkers is not None:
649 649 addedmarkers.update(new)
650 650 self._addmarkers(new, data)
651 651 # new marker *may* have changed several set. invalidate the cache.
652 652 self.caches.clear()
653 653 # records the number of new markers for the transaction hooks
654 654 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
655 655 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
656 656 return len(new)
657 657
658 658 def mergemarkers(self, transaction, data):
659 659 """merge a binary stream of markers inside the obsstore
660 660
661 661 Returns the number of new markers added."""
662 662 version, markers = _readmarkers(data)
663 663 return self.add(transaction, markers)
664 664
665 665 @propertycache
666 666 def _data(self):
667 667 return self.svfs.tryread('obsstore')
668 668
669 669 @propertycache
670 670 def _version(self):
671 671 if len(self._data) >= 1:
672 672 return _readmarkerversion(self._data)
673 673 else:
674 674 return self._defaultformat
675 675
676 676 @propertycache
677 677 def _all(self):
678 678 data = self._data
679 679 if not data:
680 680 return []
681 681 self._version, markers = _readmarkers(data)
682 682 markers = list(markers)
683 683 _checkinvalidmarkers(markers)
684 684 return markers
685 685
686 686 @propertycache
687 687 def successors(self):
688 688 successors = {}
689 689 _addsuccessors(successors, self._all)
690 690 return successors
691 691
692 692 @propertycache
693 693 def predecessors(self):
694 694 predecessors = {}
695 695 _addpredecessors(predecessors, self._all)
696 696 return predecessors
697 697
698 698 @propertycache
699 699 def children(self):
700 700 children = {}
701 701 _addchildren(children, self._all)
702 702 return children
703 703
704 704 def _cached(self, attr):
705 705 return attr in self.__dict__
706 706
707 707 def _addmarkers(self, markers, rawdata):
708 708 markers = list(markers) # to allow repeated iteration
709 709 self._data = self._data + rawdata
710 710 self._all.extend(markers)
711 711 if self._cached(r'successors'):
712 712 _addsuccessors(self.successors, markers)
713 713 if self._cached(r'predecessors'):
714 714 _addpredecessors(self.predecessors, markers)
715 715 if self._cached(r'children'):
716 716 _addchildren(self.children, markers)
717 717 _checkinvalidmarkers(markers)
718 718
719 719 def relevantmarkers(self, nodes):
720 720 """return a set of all obsolescence markers relevant to a set of nodes.
721 721
722 722 "relevant" to a set of nodes mean:
723 723
724 724 - marker that use this changeset as successor
725 725 - prune marker of direct children on this changeset
726 726 - recursive application of the two rules on predecessors of these
727 727 markers
728 728
729 729 It is a set so you cannot rely on order."""
730 730
731 731 pendingnodes = set(nodes)
732 732 seenmarkers = set()
733 733 seennodes = set(pendingnodes)
734 734 precursorsmarkers = self.predecessors
735 735 succsmarkers = self.successors
736 736 children = self.children
737 737 while pendingnodes:
738 738 direct = set()
739 739 for current in pendingnodes:
740 740 direct.update(precursorsmarkers.get(current, ()))
741 741 pruned = [m for m in children.get(current, ()) if not m[1]]
742 742 direct.update(pruned)
743 743 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
744 744 direct.update(pruned)
745 745 direct -= seenmarkers
746 746 pendingnodes = set([m[0] for m in direct])
747 747 seenmarkers |= direct
748 748 pendingnodes -= seennodes
749 749 seennodes |= pendingnodes
750 750 return seenmarkers
751 751
752 752 def makestore(ui, repo):
753 753 """Create an obsstore instance from a repo."""
754 754 # read default format for new obsstore.
755 755 # developer config: format.obsstore-version
756 756 defaultformat = ui.configint('format', 'obsstore-version')
757 757 # rely on obsstore class default when possible.
758 758 kwargs = {}
759 759 if defaultformat is not None:
760 760 kwargs[r'defaultformat'] = defaultformat
761 761 readonly = not isenabled(repo, createmarkersopt)
762 762 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
763 763 if store and readonly:
764 764 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
765 765 % len(list(store)))
766 766 return store
767 767
768 768 def commonversion(versions):
769 769 """Return the newest version listed in both versions and our local formats.
770 770
771 771 Returns None if no common version exists.
772 772 """
773 773 versions.sort(reverse=True)
774 774 # search for highest version known on both side
775 775 for v in versions:
776 776 if v in formats:
777 777 return v
778 778 return None
779 779
780 780 # arbitrary picked to fit into 8K limit from HTTP server
781 781 # you have to take in account:
782 782 # - the version header
783 783 # - the base85 encoding
784 784 _maxpayload = 5300
785 785
786 786 def _pushkeyescape(markers):
787 787 """encode markers into a dict suitable for pushkey exchange
788 788
789 789 - binary data is base85 encoded
790 790 - split in chunks smaller than 5300 bytes"""
791 791 keys = {}
792 792 parts = []
793 793 currentlen = _maxpayload * 2 # ensure we create a new part
794 794 for marker in markers:
795 795 nextdata = _fm0encodeonemarker(marker)
796 796 if (len(nextdata) + currentlen > _maxpayload):
797 797 currentpart = []
798 798 currentlen = 0
799 799 parts.append(currentpart)
800 800 currentpart.append(nextdata)
801 801 currentlen += len(nextdata)
802 802 for idx, part in enumerate(reversed(parts)):
803 803 data = ''.join([_pack('>B', _fm0version)] + part)
804 804 keys['dump%i' % idx] = util.b85encode(data)
805 805 return keys
806 806
807 807 def listmarkers(repo):
808 808 """List markers over pushkey"""
809 809 if not repo.obsstore:
810 810 return {}
811 811 return _pushkeyescape(sorted(repo.obsstore))
812 812
813 813 def pushmarker(repo, key, old, new):
814 814 """Push markers over pushkey"""
815 815 if not key.startswith('dump'):
816 816 repo.ui.warn(_('unknown key: %r') % key)
817 817 return False
818 818 if old:
819 819 repo.ui.warn(_('unexpected old value for %r') % key)
820 820 return False
821 821 data = util.b85decode(new)
822 822 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
823 823 repo.obsstore.mergemarkers(tr, data)
824 824 repo.invalidatevolatilesets()
825 825 return True
826 826
827 827 # mapping of 'set-name' -> <function to compute this set>
828 828 cachefuncs = {}
829 829 def cachefor(name):
830 830 """Decorator to register a function as computing the cache for a set"""
831 831 def decorator(func):
832 832 if name in cachefuncs:
833 833 msg = "duplicated registration for volatileset '%s' (existing: %r)"
834 834 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
835 835 cachefuncs[name] = func
836 836 return func
837 837 return decorator
838 838
839 839 def getrevs(repo, name):
840 840 """Return the set of revision that belong to the <name> set
841 841
842 842 Such access may compute the set and cache it for future use"""
843 843 repo = repo.unfiltered()
844 844 if not repo.obsstore:
845 845 return frozenset()
846 846 if name not in repo.obsstore.caches:
847 847 repo.obsstore.caches[name] = cachefuncs[name](repo)
848 848 return repo.obsstore.caches[name]
849 849
850 850 # To be simple we need to invalidate obsolescence cache when:
851 851 #
852 852 # - new changeset is added:
853 853 # - public phase is changed
854 854 # - obsolescence marker are added
855 855 # - strip is used a repo
856 856 def clearobscaches(repo):
857 857 """Remove all obsolescence related cache from a repo
858 858
859 859 This remove all cache in obsstore is the obsstore already exist on the
860 860 repo.
861 861
862 862 (We could be smarter here given the exact event that trigger the cache
863 863 clearing)"""
864 864 # only clear cache is there is obsstore data in this repo
865 865 if 'obsstore' in repo._filecache:
866 866 repo.obsstore.caches.clear()
867 867
868 868 def _mutablerevs(repo):
869 869 """the set of mutable revision in the repository"""
870 870 return repo._phasecache.getrevset(repo, phases.mutablephases)
871 871
872 872 @cachefor('obsolete')
873 873 def _computeobsoleteset(repo):
874 874 """the set of obsolete revisions"""
875 875 getnode = repo.changelog.node
876 876 notpublic = _mutablerevs(repo)
877 877 isobs = repo.obsstore.successors.__contains__
878 878 obs = set(r for r in notpublic if isobs(getnode(r)))
879 879 return obs
880 880
881 881 @cachefor('orphan')
882 882 def _computeorphanset(repo):
883 883 """the set of non obsolete revisions with obsolete parents"""
884 884 pfunc = repo.changelog.parentrevs
885 885 mutable = _mutablerevs(repo)
886 886 obsolete = getrevs(repo, 'obsolete')
887 887 others = mutable - obsolete
888 888 unstable = set()
889 889 for r in sorted(others):
890 890 # A rev is unstable if one of its parent is obsolete or unstable
891 891 # this works since we traverse following growing rev order
892 892 for p in pfunc(r):
893 893 if p in obsolete or p in unstable:
894 894 unstable.add(r)
895 895 break
896 896 return unstable
897 897
898 898 @cachefor('suspended')
899 899 def _computesuspendedset(repo):
900 900 """the set of obsolete parents with non obsolete descendants"""
901 901 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
902 902 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
903 903
904 904 @cachefor('extinct')
905 905 def _computeextinctset(repo):
906 906 """the set of obsolete parents without non obsolete descendants"""
907 907 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
908 908
909 909 @cachefor('phasedivergent')
910 910 def _computephasedivergentset(repo):
911 911 """the set of revs trying to obsolete public revisions"""
912 912 bumped = set()
913 913 # util function (avoid attribute lookup in the loop)
914 914 phase = repo._phasecache.phase # would be faster to grab the full list
915 915 public = phases.public
916 916 cl = repo.changelog
917 917 torev = cl.nodemap.get
918 918 tonode = cl.node
919 919 for rev in repo.revs('(not public()) and (not obsolete())'):
920 920 # We only evaluate mutable, non-obsolete revision
921 921 node = tonode(rev)
922 922 # (future) A cache of predecessors may worth if split is very common
923 923 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
924 924 ignoreflags=bumpedfix):
925 925 prev = torev(pnode) # unfiltered! but so is phasecache
926 926 if (prev is not None) and (phase(repo, prev) <= public):
927 927 # we have a public predecessor
928 928 bumped.add(rev)
929 929 break # Next draft!
930 930 return bumped
931 931
932 932 @cachefor('contentdivergent')
933 933 def _computecontentdivergentset(repo):
934 934 """the set of rev that compete to be the final successors of some revision.
935 935 """
936 936 divergent = set()
937 937 obsstore = repo.obsstore
938 938 newermap = {}
939 939 tonode = repo.changelog.node
940 940 for rev in repo.revs('(not public()) - obsolete()'):
941 941 node = tonode(rev)
942 942 mark = obsstore.predecessors.get(node, ())
943 943 toprocess = set(mark)
944 944 seen = set()
945 945 while toprocess:
946 946 prec = toprocess.pop()[0]
947 947 if prec in seen:
948 948 continue # emergency cycle hanging prevention
949 949 seen.add(prec)
950 950 if prec not in newermap:
951 951 obsutil.successorssets(repo, prec, cache=newermap)
952 952 newer = [n for n in newermap[prec] if n]
953 953 if len(newer) > 1:
954 954 divergent.add(rev)
955 955 break
956 956 toprocess.update(obsstore.predecessors.get(prec, ()))
957 957 return divergent
958 958
959 959 def makefoldid(relation, user):
960 960
961 961 folddigest = hashlib.sha1(user)
962 962 for p in relation[0] + relation[1]:
963 963 folddigest.update('%d' % p.rev())
964 964 folddigest.update(p.node())
965 965 # Since fold only has to compete against fold for the same successors, it
966 966 # seems fine to use a small ID. Smaller ID save space.
967 967 return node.hex(folddigest.digest())[:8]
968 968
969 969 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
970 970 operation=None):
971 971 """Add obsolete markers between changesets in a repo
972 972
973 973 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
974 974 tuple. `old` and `news` are changectx. metadata is an optional dictionary
975 975 containing metadata for this marker only. It is merged with the global
976 976 metadata specified through the `metadata` argument of this function.
977 977 Any string values in metadata must be UTF-8 bytes.
978 978
979 979 Trying to obsolete a public changeset will raise an exception.
980 980
981 981 Current user and date are used except if specified otherwise in the
982 982 metadata attribute.
983 983
984 984 This function operates within a transaction of its own, but does
985 985 not take any lock on the repo.
986 986 """
987 987 # prepare metadata
988 988 if metadata is None:
989 989 metadata = {}
990 990 if 'user' not in metadata:
991 991 luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username()
992 992 metadata['user'] = encoding.fromlocal(luser)
993 993
994 994 # Operation metadata handling
995 995 useoperation = repo.ui.configbool('experimental',
996 996 'evolution.track-operation')
997 997 if useoperation and operation:
998 998 metadata['operation'] = operation
999 999
1000 1000 # Effect flag metadata handling
1001 1001 saveeffectflag = repo.ui.configbool('experimental',
1002 1002 'evolution.effect-flags')
1003 1003
1004 1004 with repo.transaction('add-obsolescence-marker') as tr:
1005 1005 markerargs = []
1006 1006 for rel in relations:
1007 1007 predecessors = rel[0]
1008 1008 if not isinstance(predecessors, tuple):
1009 1009 # preserve compat with old API until all caller are migrated
1010 1010 predecessors = (predecessors,)
1011 1011 if len(predecessors) > 1 and len(rel[1]) != 1:
1012 1012 msg = 'Fold markers can only have 1 successors, not %d'
1013 1013 raise error.ProgrammingError(msg % len(rel[1]))
1014 1014 foldid = None
1015 1015 foldsize = len(predecessors)
1016 1016 if 1 < foldsize:
1017 1017 foldid = makefoldid(rel, metadata['user'])
1018 1018 for foldidx, prec in enumerate(predecessors, 1):
1019 1019 sucs = rel[1]
1020 1020 localmetadata = metadata.copy()
1021 1021 if len(rel) > 2:
1022 1022 localmetadata.update(rel[2])
1023 1023 if foldid is not None:
1024 1024 localmetadata['fold-id'] = foldid
1025 1025 localmetadata['fold-idx'] = '%d' % foldidx
1026 1026 localmetadata['fold-size'] = '%d' % foldsize
1027 1027
1028 1028 if not prec.mutable():
1029 1029 raise error.Abort(_("cannot obsolete public changeset: %s")
1030 1030 % prec,
1031 1031 hint="see 'hg help phases' for details")
1032 1032 nprec = prec.node()
1033 1033 nsucs = tuple(s.node() for s in sucs)
1034 1034 npare = None
1035 1035 if not nsucs:
1036 1036 npare = tuple(p.node() for p in prec.parents())
1037 1037 if nprec in nsucs:
1038 1038 raise error.Abort(_("changeset %s cannot obsolete itself")
1039 1039 % prec)
1040 1040
1041 1041 # Effect flag can be different by relation
1042 1042 if saveeffectflag:
1043 1043 # The effect flag is saved in a versioned field name for
1044 1044 # future evolution
1045 1045 effectflag = obsutil.geteffectflag(prec, sucs)
1046 1046 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1047 1047
1048 1048 # Creating the marker causes the hidden cache to become
1049 1049 # invalid, which causes recomputation when we ask for
1050 1050 # prec.parents() above. Resulting in n^2 behavior. So let's
1051 1051 # prepare all of the args first, then create the markers.
1052 1052 markerargs.append((nprec, nsucs, npare, localmetadata))
1053 1053
1054 1054 for args in markerargs:
1055 1055 nprec, nsucs, npare, localmetadata = args
1056 1056 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1057 1057 date=date, metadata=localmetadata,
1058 1058 ui=repo.ui)
1059 1059 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now