##// END OF EJS Templates
obsutil: move 'exclusivemarkers' to the new modules...
marmoute -
r33144:d09ae850 default
parent child Browse files
Show More
@@ -1,1267 +1,1147
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 obsutil,
80 80 phases,
81 81 policy,
82 82 util,
83 83 )
84 84
85 85 parsers = policy.importmod(r'parsers')
86 86
87 87 _pack = struct.pack
88 88 _unpack = struct.unpack
89 89 _calcsize = struct.calcsize
90 90 propertycache = util.propertycache
91 91
92 92 # the obsolete feature is not mature enough to be enabled by default.
93 93 # you have to rely on third party extension extension to enable this.
94 94 _enabled = False
95 95
96 96 # Options for obsolescence
97 97 createmarkersopt = 'createmarkers'
98 98 allowunstableopt = 'allowunstable'
99 99 exchangeopt = 'exchange'
100 100
101 101 def isenabled(repo, option):
102 102 """Returns True if the given repository has the given obsolete option
103 103 enabled.
104 104 """
105 105 result = set(repo.ui.configlist('experimental', 'evolution'))
106 106 if 'all' in result:
107 107 return True
108 108
109 109 # For migration purposes, temporarily return true if the config hasn't been
110 110 # set but _enabled is true.
111 111 if len(result) == 0 and _enabled:
112 112 return True
113 113
114 114 # createmarkers must be enabled if other options are enabled
115 115 if ((allowunstableopt in result or exchangeopt in result) and
116 116 not createmarkersopt in result):
117 117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 118 "if other obsolete options are enabled"))
119 119
120 120 return option in result
121 121
122 122 ### obsolescence marker flag
123 123
124 124 ## bumpedfix flag
125 125 #
126 126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 127 # "bumped" because it's a successors of a public changesets
128 128 #
129 129 # o A' (bumped)
130 130 # |`:
131 131 # | o A
132 132 # |/
133 133 # o Z
134 134 #
135 135 # The way to solve this situation is to create a new changeset Ad as children
136 136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 138 #
139 139 # o Ad
140 140 # |`:
141 141 # | x A'
142 142 # |'|
143 143 # o | A
144 144 # |/
145 145 # o Z
146 146 #
147 147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 149 # This flag mean that the successors express the changes between the public and
150 150 # bumped version and fix the situation, breaking the transitivity of
151 151 # "bumped" here.
152 152 bumpedfix = 1
153 153 usingsha256 = 2
154 154
155 155 ## Parsing and writing of version "0"
156 156 #
157 157 # The header is followed by the markers. Each marker is made of:
158 158 #
159 159 # - 1 uint8 : number of new changesets "N", can be zero.
160 160 #
161 161 # - 1 uint32: metadata size "M" in bytes.
162 162 #
163 163 # - 1 byte: a bit field. It is reserved for flags used in common
164 164 # obsolete marker operations, to avoid repeated decoding of metadata
165 165 # entries.
166 166 #
167 167 # - 20 bytes: obsoleted changeset identifier.
168 168 #
169 169 # - N*20 bytes: new changesets identifiers.
170 170 #
171 171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 172 # string contains a key and a value, separated by a colon ':', without
173 173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 174 # cannot contain '\0'.
175 175 _fm0version = 0
176 176 _fm0fixed = '>BIB20s'
177 177 _fm0node = '20s'
178 178 _fm0fsize = _calcsize(_fm0fixed)
179 179 _fm0fnodesize = _calcsize(_fm0node)
180 180
181 181 def _fm0readmarkers(data, off):
182 182 # Loop on markers
183 183 l = len(data)
184 184 while off + _fm0fsize <= l:
185 185 # read fixed part
186 186 cur = data[off:off + _fm0fsize]
187 187 off += _fm0fsize
188 188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
189 189 # read replacement
190 190 sucs = ()
191 191 if numsuc:
192 192 s = (_fm0fnodesize * numsuc)
193 193 cur = data[off:off + s]
194 194 sucs = _unpack(_fm0node * numsuc, cur)
195 195 off += s
196 196 # read metadata
197 197 # (metadata will be decoded on demand)
198 198 metadata = data[off:off + mdsize]
199 199 if len(metadata) != mdsize:
200 200 raise error.Abort(_('parsing obsolete marker: metadata is too '
201 201 'short, %d bytes expected, got %d')
202 202 % (mdsize, len(metadata)))
203 203 off += mdsize
204 204 metadata = _fm0decodemeta(metadata)
205 205 try:
206 206 when, offset = metadata.pop('date', '0 0').split(' ')
207 207 date = float(when), int(offset)
208 208 except ValueError:
209 209 date = (0., 0)
210 210 parents = None
211 211 if 'p2' in metadata:
212 212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
213 213 elif 'p1' in metadata:
214 214 parents = (metadata.pop('p1', None),)
215 215 elif 'p0' in metadata:
216 216 parents = ()
217 217 if parents is not None:
218 218 try:
219 219 parents = tuple(node.bin(p) for p in parents)
220 220 # if parent content is not a nodeid, drop the data
221 221 for p in parents:
222 222 if len(p) != 20:
223 223 parents = None
224 224 break
225 225 except TypeError:
226 226 # if content cannot be translated to nodeid drop the data.
227 227 parents = None
228 228
229 229 metadata = tuple(sorted(metadata.iteritems()))
230 230
231 231 yield (pre, sucs, flags, metadata, date, parents)
232 232
233 233 def _fm0encodeonemarker(marker):
234 234 pre, sucs, flags, metadata, date, parents = marker
235 235 if flags & usingsha256:
236 236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
237 237 metadata = dict(metadata)
238 238 time, tz = date
239 239 metadata['date'] = '%r %i' % (time, tz)
240 240 if parents is not None:
241 241 if not parents:
242 242 # mark that we explicitly recorded no parents
243 243 metadata['p0'] = ''
244 244 for i, p in enumerate(parents, 1):
245 245 metadata['p%i' % i] = node.hex(p)
246 246 metadata = _fm0encodemeta(metadata)
247 247 numsuc = len(sucs)
248 248 format = _fm0fixed + (_fm0node * numsuc)
249 249 data = [numsuc, len(metadata), flags, pre]
250 250 data.extend(sucs)
251 251 return _pack(format, *data) + metadata
252 252
253 253 def _fm0encodemeta(meta):
254 254 """Return encoded metadata string to string mapping.
255 255
256 256 Assume no ':' in key and no '\0' in both key and value."""
257 257 for key, value in meta.iteritems():
258 258 if ':' in key or '\0' in key:
259 259 raise ValueError("':' and '\0' are forbidden in metadata key'")
260 260 if '\0' in value:
261 261 raise ValueError("':' is forbidden in metadata value'")
262 262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
263 263
264 264 def _fm0decodemeta(data):
265 265 """Return string to string dictionary from encoded version."""
266 266 d = {}
267 267 for l in data.split('\0'):
268 268 if l:
269 269 key, value = l.split(':')
270 270 d[key] = value
271 271 return d
272 272
273 273 ## Parsing and writing of version "1"
274 274 #
275 275 # The header is followed by the markers. Each marker is made of:
276 276 #
277 277 # - uint32: total size of the marker (including this field)
278 278 #
279 279 # - float64: date in seconds since epoch
280 280 #
281 281 # - int16: timezone offset in minutes
282 282 #
283 283 # - uint16: a bit field. It is reserved for flags used in common
284 284 # obsolete marker operations, to avoid repeated decoding of metadata
285 285 # entries.
286 286 #
287 287 # - uint8: number of successors "N", can be zero.
288 288 #
289 289 # - uint8: number of parents "P", can be zero.
290 290 #
291 291 # 0: parents data stored but no parent,
292 292 # 1: one parent stored,
293 293 # 2: two parents stored,
294 294 # 3: no parent data stored
295 295 #
296 296 # - uint8: number of metadata entries M
297 297 #
298 298 # - 20 or 32 bytes: precursor changeset identifier.
299 299 #
300 300 # - N*(20 or 32) bytes: successors changesets identifiers.
301 301 #
302 302 # - P*(20 or 32) bytes: parents of the precursors changesets.
303 303 #
304 304 # - M*(uint8, uint8): size of all metadata entries (key and value)
305 305 #
306 306 # - remaining bytes: the metadata, each (key, value) pair after the other.
307 307 _fm1version = 1
308 308 _fm1fixed = '>IdhHBBB20s'
309 309 _fm1nodesha1 = '20s'
310 310 _fm1nodesha256 = '32s'
311 311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
312 312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
313 313 _fm1fsize = _calcsize(_fm1fixed)
314 314 _fm1parentnone = 3
315 315 _fm1parentshift = 14
316 316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
317 317 _fm1metapair = 'BB'
318 318 _fm1metapairsize = _calcsize('BB')
319 319
320 320 def _fm1purereadmarkers(data, off):
321 321 # make some global constants local for performance
322 322 noneflag = _fm1parentnone
323 323 sha2flag = usingsha256
324 324 sha1size = _fm1nodesha1size
325 325 sha2size = _fm1nodesha256size
326 326 sha1fmt = _fm1nodesha1
327 327 sha2fmt = _fm1nodesha256
328 328 metasize = _fm1metapairsize
329 329 metafmt = _fm1metapair
330 330 fsize = _fm1fsize
331 331 unpack = _unpack
332 332
333 333 # Loop on markers
334 334 stop = len(data) - _fm1fsize
335 335 ufixed = struct.Struct(_fm1fixed).unpack
336 336
337 337 while off <= stop:
338 338 # read fixed part
339 339 o1 = off + fsize
340 340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
341 341
342 342 if flags & sha2flag:
343 343 # FIXME: prec was read as a SHA1, needs to be amended
344 344
345 345 # read 0 or more successors
346 346 if numsuc == 1:
347 347 o2 = o1 + sha2size
348 348 sucs = (data[o1:o2],)
349 349 else:
350 350 o2 = o1 + sha2size * numsuc
351 351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
352 352
353 353 # read parents
354 354 if numpar == noneflag:
355 355 o3 = o2
356 356 parents = None
357 357 elif numpar == 1:
358 358 o3 = o2 + sha2size
359 359 parents = (data[o2:o3],)
360 360 else:
361 361 o3 = o2 + sha2size * numpar
362 362 parents = unpack(sha2fmt * numpar, data[o2:o3])
363 363 else:
364 364 # read 0 or more successors
365 365 if numsuc == 1:
366 366 o2 = o1 + sha1size
367 367 sucs = (data[o1:o2],)
368 368 else:
369 369 o2 = o1 + sha1size * numsuc
370 370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
371 371
372 372 # read parents
373 373 if numpar == noneflag:
374 374 o3 = o2
375 375 parents = None
376 376 elif numpar == 1:
377 377 o3 = o2 + sha1size
378 378 parents = (data[o2:o3],)
379 379 else:
380 380 o3 = o2 + sha1size * numpar
381 381 parents = unpack(sha1fmt * numpar, data[o2:o3])
382 382
383 383 # read metadata
384 384 off = o3 + metasize * nummeta
385 385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
386 386 metadata = []
387 387 for idx in xrange(0, len(metapairsize), 2):
388 388 o1 = off + metapairsize[idx]
389 389 o2 = o1 + metapairsize[idx + 1]
390 390 metadata.append((data[off:o1], data[o1:o2]))
391 391 off = o2
392 392
393 393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
394 394
395 395 def _fm1encodeonemarker(marker):
396 396 pre, sucs, flags, metadata, date, parents = marker
397 397 # determine node size
398 398 _fm1node = _fm1nodesha1
399 399 if flags & usingsha256:
400 400 _fm1node = _fm1nodesha256
401 401 numsuc = len(sucs)
402 402 numextranodes = numsuc
403 403 if parents is None:
404 404 numpar = _fm1parentnone
405 405 else:
406 406 numpar = len(parents)
407 407 numextranodes += numpar
408 408 formatnodes = _fm1node * numextranodes
409 409 formatmeta = _fm1metapair * len(metadata)
410 410 format = _fm1fixed + formatnodes + formatmeta
411 411 # tz is stored in minutes so we divide by 60
412 412 tz = date[1]//60
413 413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
414 414 data.extend(sucs)
415 415 if parents is not None:
416 416 data.extend(parents)
417 417 totalsize = _calcsize(format)
418 418 for key, value in metadata:
419 419 lk = len(key)
420 420 lv = len(value)
421 421 data.append(lk)
422 422 data.append(lv)
423 423 totalsize += lk + lv
424 424 data[0] = totalsize
425 425 data = [_pack(format, *data)]
426 426 for key, value in metadata:
427 427 data.append(key)
428 428 data.append(value)
429 429 return ''.join(data)
430 430
431 431 def _fm1readmarkers(data, off):
432 432 native = getattr(parsers, 'fm1readmarkers', None)
433 433 if not native:
434 434 return _fm1purereadmarkers(data, off)
435 435 stop = len(data) - _fm1fsize
436 436 return native(data, off, stop)
437 437
438 438 # mapping to read/write various marker formats
439 439 # <version> -> (decoder, encoder)
440 440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
441 441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
442 442
443 443 def _readmarkerversion(data):
444 444 return _unpack('>B', data[0:1])[0]
445 445
446 446 @util.nogc
447 447 def _readmarkers(data):
448 448 """Read and enumerate markers from raw data"""
449 449 diskversion = _readmarkerversion(data)
450 450 off = 1
451 451 if diskversion not in formats:
452 452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 453 raise error.UnknownVersion(msg, version=diskversion)
454 454 return diskversion, formats[diskversion][0](data, off)
455 455
456 456 def encodeheader(version=_fm0version):
457 457 return _pack('>B', version)
458 458
459 459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 460 # Kept separate from flushmarkers(), it will be reused for
461 461 # markers exchange.
462 462 encodeone = formats[version][1]
463 463 if addheader:
464 464 yield encodeheader(version)
465 465 for marker in markers:
466 466 yield encodeone(marker)
467 467
468 468
469 469 class marker(object):
470 470 """Wrap obsolete marker raw data"""
471 471
472 472 def __init__(self, repo, data):
473 473 # the repo argument will be used to create changectx in later version
474 474 self._repo = repo
475 475 self._data = data
476 476 self._decodedmeta = None
477 477
478 478 def __hash__(self):
479 479 return hash(self._data)
480 480
481 481 def __eq__(self, other):
482 482 if type(other) != type(self):
483 483 return False
484 484 return self._data == other._data
485 485
486 486 def precnode(self):
487 487 """Precursor changeset node identifier"""
488 488 return self._data[0]
489 489
490 490 def succnodes(self):
491 491 """List of successor changesets node identifiers"""
492 492 return self._data[1]
493 493
494 494 def parentnodes(self):
495 495 """Parents of the precursors (None if not recorded)"""
496 496 return self._data[5]
497 497
498 498 def metadata(self):
499 499 """Decoded metadata dictionary"""
500 500 return dict(self._data[3])
501 501
502 502 def date(self):
503 503 """Creation date as (unixtime, offset)"""
504 504 return self._data[4]
505 505
506 506 def flags(self):
507 507 """The flags field of the marker"""
508 508 return self._data[2]
509 509
510 510 @util.nogc
511 511 def _addsuccessors(successors, markers):
512 512 for mark in markers:
513 513 successors.setdefault(mark[0], set()).add(mark)
514 514
515 515 @util.nogc
516 516 def _addprecursors(precursors, markers):
517 517 for mark in markers:
518 518 for suc in mark[1]:
519 519 precursors.setdefault(suc, set()).add(mark)
520 520
521 521 @util.nogc
522 522 def _addchildren(children, markers):
523 523 for mark in markers:
524 524 parents = mark[5]
525 525 if parents is not None:
526 526 for p in parents:
527 527 children.setdefault(p, set()).add(mark)
528 528
529 529 def _checkinvalidmarkers(markers):
530 530 """search for marker with invalid data and raise error if needed
531 531
532 532 Exist as a separated function to allow the evolve extension for a more
533 533 subtle handling.
534 534 """
535 535 for mark in markers:
536 536 if node.nullid in mark[1]:
537 537 raise error.Abort(_('bad obsolescence marker detected: '
538 538 'invalid successors nullid'))
539 539
540 540 class obsstore(object):
541 541 """Store obsolete markers
542 542
543 543 Markers can be accessed with two mappings:
544 544 - precursors[x] -> set(markers on precursors edges of x)
545 545 - successors[x] -> set(markers on successors edges of x)
546 546 - children[x] -> set(markers on precursors edges of children(x)
547 547 """
548 548
549 549 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
550 550 # prec: nodeid, precursor changesets
551 551 # succs: tuple of nodeid, successor changesets (0-N length)
552 552 # flag: integer, flag field carrying modifier for the markers (see doc)
553 553 # meta: binary blob, encoded metadata dictionary
554 554 # date: (float, int) tuple, date of marker creation
555 555 # parents: (tuple of nodeid) or None, parents of precursors
556 556 # None is used when no data has been recorded
557 557
558 558 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
559 559 # caches for various obsolescence related cache
560 560 self.caches = {}
561 561 self.svfs = svfs
562 562 self._defaultformat = defaultformat
563 563 self._readonly = readonly
564 564
565 565 def __iter__(self):
566 566 return iter(self._all)
567 567
568 568 def __len__(self):
569 569 return len(self._all)
570 570
571 571 def __nonzero__(self):
572 572 if not self._cached('_all'):
573 573 try:
574 574 return self.svfs.stat('obsstore').st_size > 1
575 575 except OSError as inst:
576 576 if inst.errno != errno.ENOENT:
577 577 raise
578 578 # just build an empty _all list if no obsstore exists, which
579 579 # avoids further stat() syscalls
580 580 pass
581 581 return bool(self._all)
582 582
583 583 __bool__ = __nonzero__
584 584
585 585 @property
586 586 def readonly(self):
587 587 """True if marker creation is disabled
588 588
589 589 Remove me in the future when obsolete marker is always on."""
590 590 return self._readonly
591 591
592 592 def create(self, transaction, prec, succs=(), flag=0, parents=None,
593 593 date=None, metadata=None, ui=None):
594 594 """obsolete: add a new obsolete marker
595 595
596 596 * ensuring it is hashable
597 597 * check mandatory metadata
598 598 * encode metadata
599 599
600 600 If you are a human writing code creating marker you want to use the
601 601 `createmarkers` function in this module instead.
602 602
603 603 return True if a new marker have been added, False if the markers
604 604 already existed (no op).
605 605 """
606 606 if metadata is None:
607 607 metadata = {}
608 608 if date is None:
609 609 if 'date' in metadata:
610 610 # as a courtesy for out-of-tree extensions
611 611 date = util.parsedate(metadata.pop('date'))
612 612 elif ui is not None:
613 613 date = ui.configdate('devel', 'default-date')
614 614 if date is None:
615 615 date = util.makedate()
616 616 else:
617 617 date = util.makedate()
618 618 if len(prec) != 20:
619 619 raise ValueError(prec)
620 620 for succ in succs:
621 621 if len(succ) != 20:
622 622 raise ValueError(succ)
623 623 if prec in succs:
624 624 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
625 625
626 626 metadata = tuple(sorted(metadata.iteritems()))
627 627
628 628 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
629 629 return bool(self.add(transaction, [marker]))
630 630
631 631 def add(self, transaction, markers):
632 632 """Add new markers to the store
633 633
634 634 Take care of filtering duplicate.
635 635 Return the number of new marker."""
636 636 if self._readonly:
637 637 raise error.Abort(_('creating obsolete markers is not enabled on '
638 638 'this repo'))
639 639 known = set()
640 640 getsuccessors = self.successors.get
641 641 new = []
642 642 for m in markers:
643 643 if m not in getsuccessors(m[0], ()) and m not in known:
644 644 known.add(m)
645 645 new.append(m)
646 646 if new:
647 647 f = self.svfs('obsstore', 'ab')
648 648 try:
649 649 offset = f.tell()
650 650 transaction.add('obsstore', offset)
651 651 # offset == 0: new file - add the version header
652 652 for bytes in encodemarkers(new, offset == 0, self._version):
653 653 f.write(bytes)
654 654 finally:
655 655 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
656 656 # call 'filecacheentry.refresh()' here
657 657 f.close()
658 658 self._addmarkers(new)
659 659 # new marker *may* have changed several set. invalidate the cache.
660 660 self.caches.clear()
661 661 # records the number of new markers for the transaction hooks
662 662 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
663 663 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
664 664 return len(new)
665 665
666 666 def mergemarkers(self, transaction, data):
667 667 """merge a binary stream of markers inside the obsstore
668 668
669 669 Returns the number of new markers added."""
670 670 version, markers = _readmarkers(data)
671 671 return self.add(transaction, markers)
672 672
673 673 @propertycache
674 674 def _data(self):
675 675 return self.svfs.tryread('obsstore')
676 676
677 677 @propertycache
678 678 def _version(self):
679 679 if len(self._data) >= 1:
680 680 return _readmarkerversion(self._data)
681 681 else:
682 682 return self._defaultformat
683 683
684 684 @propertycache
685 685 def _all(self):
686 686 data = self._data
687 687 if not data:
688 688 return []
689 689 self._version, markers = _readmarkers(data)
690 690 markers = list(markers)
691 691 _checkinvalidmarkers(markers)
692 692 return markers
693 693
694 694 @propertycache
695 695 def successors(self):
696 696 successors = {}
697 697 _addsuccessors(successors, self._all)
698 698 return successors
699 699
700 700 @propertycache
701 701 def precursors(self):
702 702 precursors = {}
703 703 _addprecursors(precursors, self._all)
704 704 return precursors
705 705
706 706 @propertycache
707 707 def children(self):
708 708 children = {}
709 709 _addchildren(children, self._all)
710 710 return children
711 711
712 712 def _cached(self, attr):
713 713 return attr in self.__dict__
714 714
715 715 def _addmarkers(self, markers):
716 716 markers = list(markers) # to allow repeated iteration
717 717 self._all.extend(markers)
718 718 if self._cached('successors'):
719 719 _addsuccessors(self.successors, markers)
720 720 if self._cached('precursors'):
721 721 _addprecursors(self.precursors, markers)
722 722 if self._cached('children'):
723 723 _addchildren(self.children, markers)
724 724 _checkinvalidmarkers(markers)
725 725
726 726 def relevantmarkers(self, nodes):
727 727 """return a set of all obsolescence markers relevant to a set of nodes.
728 728
729 729 "relevant" to a set of nodes mean:
730 730
731 731 - marker that use this changeset as successor
732 732 - prune marker of direct children on this changeset
733 733 - recursive application of the two rules on precursors of these markers
734 734
735 735 It is a set so you cannot rely on order."""
736 736
737 737 pendingnodes = set(nodes)
738 738 seenmarkers = set()
739 739 seennodes = set(pendingnodes)
740 740 precursorsmarkers = self.precursors
741 741 succsmarkers = self.successors
742 742 children = self.children
743 743 while pendingnodes:
744 744 direct = set()
745 745 for current in pendingnodes:
746 746 direct.update(precursorsmarkers.get(current, ()))
747 747 pruned = [m for m in children.get(current, ()) if not m[1]]
748 748 direct.update(pruned)
749 749 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
750 750 direct.update(pruned)
751 751 direct -= seenmarkers
752 752 pendingnodes = set([m[0] for m in direct])
753 753 seenmarkers |= direct
754 754 pendingnodes -= seennodes
755 755 seennodes |= pendingnodes
756 756 return seenmarkers
757 757
758 758 def makestore(ui, repo):
759 759 """Create an obsstore instance from a repo."""
760 760 # read default format for new obsstore.
761 761 # developer config: format.obsstore-version
762 762 defaultformat = ui.configint('format', 'obsstore-version', None)
763 763 # rely on obsstore class default when possible.
764 764 kwargs = {}
765 765 if defaultformat is not None:
766 766 kwargs['defaultformat'] = defaultformat
767 767 readonly = not isenabled(repo, createmarkersopt)
768 768 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
769 769 if store and readonly:
770 770 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
771 771 % len(list(store)))
772 772 return store
773 773
774 def _filterprunes(markers):
775 """return a set with no prune markers"""
776 return set(m for m in markers if m[1])
777
778 def exclusivemarkers(repo, nodes):
779 """set of markers relevant to "nodes" but no other locally-known nodes
780
781 This function compute the set of markers "exclusive" to a locally-known
782 node. This means we walk the markers starting from <nodes> until we reach a
783 locally-known precursors outside of <nodes>. Element of <nodes> with
784 locally-known successors outside of <nodes> are ignored (since their
785 precursors markers are also relevant to these successors).
786
787 For example:
788
789 # (A0 rewritten as A1)
790 #
791 # A0 <-1- A1 # Marker "1" is exclusive to A1
792
793 or
794
795 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
796 #
797 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
798
799 or
800
801 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
802 #
803 # <-2- A1 # Marker "2" is exclusive to A0,A1
804 # /
805 # <-1- A0
806 # \
807 # <-3- A2 # Marker "3" is exclusive to A0,A2
808 #
809 # in addition:
810 #
811 # Markers "2,3" are exclusive to A1,A2
812 # Markers "1,2,3" are exclusive to A0,A1,A2
813
814 See test/test-obsolete-bundle-strip.t for more examples.
815
816 An example usage is strip. When stripping a changeset, we also want to
817 strip the markers exclusive to this changeset. Otherwise we would have
818 "dangling"" obsolescence markers from its precursors: Obsolescence markers
819 marking a node as obsolete without any successors available locally.
820
821 As for relevant markers, the prune markers for children will be followed.
822 Of course, they will only be followed if the pruned children is
823 locally-known. Since the prune markers are relevant to the pruned node.
824 However, while prune markers are considered relevant to the parent of the
825 pruned changesets, prune markers for locally-known changeset (with no
826 successors) are considered exclusive to the pruned nodes. This allows
827 to strip the prune markers (with the rest of the exclusive chain) alongside
828 the pruned changesets.
829 """
830 # running on a filtered repository would be dangerous as markers could be
831 # reported as exclusive when they are relevant for other filtered nodes.
832 unfi = repo.unfiltered()
833
834 # shortcut to various useful item
835 nm = unfi.changelog.nodemap
836 precursorsmarkers = unfi.obsstore.precursors
837 successormarkers = unfi.obsstore.successors
838 childrenmarkers = unfi.obsstore.children
839
840 # exclusive markers (return of the function)
841 exclmarkers = set()
842 # we need fast membership testing
843 nodes = set(nodes)
844 # looking for head in the obshistory
845 #
846 # XXX we are ignoring all issues in regard with cycle for now.
847 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
848 stack.sort()
849 # nodes already stacked
850 seennodes = set(stack)
851 while stack:
852 current = stack.pop()
853 # fetch precursors markers
854 markers = list(precursorsmarkers.get(current, ()))
855 # extend the list with prune markers
856 for mark in successormarkers.get(current, ()):
857 if not mark[1]:
858 markers.append(mark)
859 # and markers from children (looking for prune)
860 for mark in childrenmarkers.get(current, ()):
861 if not mark[1]:
862 markers.append(mark)
863 # traverse the markers
864 for mark in markers:
865 if mark in exclmarkers:
866 # markers already selected
867 continue
868
869 # If the markers is about the current node, select it
870 #
871 # (this delay the addition of markers from children)
872 if mark[1] or mark[0] == current:
873 exclmarkers.add(mark)
874
875 # should we keep traversing through the precursors?
876 prec = mark[0]
877
878 # nodes in the stack or already processed
879 if prec in seennodes:
880 continue
881
882 # is this a locally known node ?
883 known = prec in nm
884 # if locally-known and not in the <nodes> set the traversal
885 # stop here.
886 if known and prec not in nodes:
887 continue
888
889 # do not keep going if there are unselected markers pointing to this
890 # nodes. If we end up traversing these unselected markers later the
891 # node will be taken care of at that point.
892 precmarkers = _filterprunes(successormarkers.get(prec))
893 if precmarkers.issubset(exclmarkers):
894 seennodes.add(prec)
895 stack.append(prec)
896
897 return exclmarkers
898
899 774 def commonversion(versions):
900 775 """Return the newest version listed in both versions and our local formats.
901 776
902 777 Returns None if no common version exists.
903 778 """
904 779 versions.sort(reverse=True)
905 780 # search for highest version known on both side
906 781 for v in versions:
907 782 if v in formats:
908 783 return v
909 784 return None
910 785
911 786 # arbitrary picked to fit into 8K limit from HTTP server
912 787 # you have to take in account:
913 788 # - the version header
914 789 # - the base85 encoding
915 790 _maxpayload = 5300
916 791
917 792 def _pushkeyescape(markers):
918 793 """encode markers into a dict suitable for pushkey exchange
919 794
920 795 - binary data is base85 encoded
921 796 - split in chunks smaller than 5300 bytes"""
922 797 keys = {}
923 798 parts = []
924 799 currentlen = _maxpayload * 2 # ensure we create a new part
925 800 for marker in markers:
926 801 nextdata = _fm0encodeonemarker(marker)
927 802 if (len(nextdata) + currentlen > _maxpayload):
928 803 currentpart = []
929 804 currentlen = 0
930 805 parts.append(currentpart)
931 806 currentpart.append(nextdata)
932 807 currentlen += len(nextdata)
933 808 for idx, part in enumerate(reversed(parts)):
934 809 data = ''.join([_pack('>B', _fm0version)] + part)
935 810 keys['dump%i' % idx] = util.b85encode(data)
936 811 return keys
937 812
938 813 def listmarkers(repo):
939 814 """List markers over pushkey"""
940 815 if not repo.obsstore:
941 816 return {}
942 817 return _pushkeyescape(sorted(repo.obsstore))
943 818
944 819 def pushmarker(repo, key, old, new):
945 820 """Push markers over pushkey"""
946 821 if not key.startswith('dump'):
947 822 repo.ui.warn(_('unknown key: %r') % key)
948 823 return False
949 824 if old:
950 825 repo.ui.warn(_('unexpected old value for %r') % key)
951 826 return False
952 827 data = util.b85decode(new)
953 828 lock = repo.lock()
954 829 try:
955 830 tr = repo.transaction('pushkey: obsolete markers')
956 831 try:
957 832 repo.obsstore.mergemarkers(tr, data)
958 833 repo.invalidatevolatilesets()
959 834 tr.close()
960 835 return True
961 836 finally:
962 837 tr.release()
963 838 finally:
964 839 lock.release()
965 840
966 841 def getmarkers(repo, nodes=None, exclusive=False):
967 842 """returns markers known in a repository
968 843
969 844 If <nodes> is specified, only markers "relevant" to those nodes are are
970 845 returned"""
971 846 if nodes is None:
972 847 rawmarkers = repo.obsstore
973 848 elif exclusive:
974 rawmarkers = exclusivemarkers(repo, nodes)
849 rawmarkers = obsutil.exclusivemarkers(repo, nodes)
975 850 else:
976 851 rawmarkers = repo.obsstore.relevantmarkers(nodes)
977 852
978 853 for markerdata in rawmarkers:
979 854 yield marker(repo, markerdata)
980 855
981 856 def relevantmarkers(repo, node):
982 857 """all obsolete markers relevant to some revision"""
983 858 for markerdata in repo.obsstore.relevantmarkers(node):
984 859 yield marker(repo, markerdata)
985 860
986 861
987 862 def precursormarkers(ctx):
988 863 """obsolete marker marking this changeset as a successors"""
989 864 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
990 865 yield marker(ctx.repo(), data)
991 866
992 867 def successormarkers(ctx):
993 868 """obsolete marker making this changeset obsolete"""
994 869 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
995 870 yield marker(ctx.repo(), data)
996 871
997 872 def allsuccessors(obsstore, nodes, ignoreflags=0):
998 873 """Yield node for every successor of <nodes>.
999 874
1000 875 Some successors may be unknown locally.
1001 876
1002 877 This is a linear yield unsuited to detecting split changesets. It includes
1003 878 initial nodes too."""
1004 879 remaining = set(nodes)
1005 880 seen = set(remaining)
1006 881 while remaining:
1007 882 current = remaining.pop()
1008 883 yield current
1009 884 for mark in obsstore.successors.get(current, ()):
1010 885 # ignore marker flagged with specified flag
1011 886 if mark[2] & ignoreflags:
1012 887 continue
1013 888 for suc in mark[1]:
1014 889 if suc not in seen:
1015 890 seen.add(suc)
1016 891 remaining.add(suc)
1017 892
1018 893 def allprecursors(obsstore, nodes, ignoreflags=0):
1019 894 """Yield node for every precursors of <nodes>.
1020 895
1021 896 Some precursors may be unknown locally.
1022 897
1023 898 This is a linear yield unsuited to detecting folded changesets. It includes
1024 899 initial nodes too."""
1025 900
1026 901 remaining = set(nodes)
1027 902 seen = set(remaining)
1028 903 while remaining:
1029 904 current = remaining.pop()
1030 905 yield current
1031 906 for mark in obsstore.precursors.get(current, ()):
1032 907 # ignore marker flagged with specified flag
1033 908 if mark[2] & ignoreflags:
1034 909 continue
1035 910 suc = mark[0]
1036 911 if suc not in seen:
1037 912 seen.add(suc)
1038 913 remaining.add(suc)
1039 914
1040 915 def foreground(repo, nodes):
1041 916 """return all nodes in the "foreground" of other node
1042 917
1043 918 The foreground of a revision is anything reachable using parent -> children
1044 919 or precursor -> successor relation. It is very similar to "descendant" but
1045 920 augmented with obsolescence information.
1046 921
1047 922 Beware that possible obsolescence cycle may result if complex situation.
1048 923 """
1049 924 repo = repo.unfiltered()
1050 925 foreground = set(repo.set('%ln::', nodes))
1051 926 if repo.obsstore:
1052 927 # We only need this complicated logic if there is obsolescence
1053 928 # XXX will probably deserve an optimised revset.
1054 929 nm = repo.changelog.nodemap
1055 930 plen = -1
1056 931 # compute the whole set of successors or descendants
1057 932 while len(foreground) != plen:
1058 933 plen = len(foreground)
1059 934 succs = set(c.node() for c in foreground)
1060 935 mutable = [c.node() for c in foreground if c.mutable()]
1061 936 succs.update(allsuccessors(repo.obsstore, mutable))
1062 937 known = (n for n in succs if n in nm)
1063 938 foreground = set(repo.set('%ln::', known))
1064 939 return set(c.node() for c in foreground)
1065 940
941 def exclusivemarkers(repo, nodes):
942 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
943 repo.ui.deprecwarn(movemsg, '4.3')
944 return obsutil.exclusivemarkers(repo, nodes)
945
1066 946 def successorssets(repo, initialnode, cache=None):
1067 947 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
1068 948 repo.ui.deprecwarn(movemsg, '4.3')
1069 949 return obsutil.successorssets(repo, initialnode, cache=cache)
1070 950
1071 951 # mapping of 'set-name' -> <function to compute this set>
1072 952 cachefuncs = {}
1073 953 def cachefor(name):
1074 954 """Decorator to register a function as computing the cache for a set"""
1075 955 def decorator(func):
1076 956 if name in cachefuncs:
1077 957 msg = "duplicated registration for volatileset '%s' (existing: %r)"
1078 958 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
1079 959 cachefuncs[name] = func
1080 960 return func
1081 961 return decorator
1082 962
1083 963 def getrevs(repo, name):
1084 964 """Return the set of revision that belong to the <name> set
1085 965
1086 966 Such access may compute the set and cache it for future use"""
1087 967 repo = repo.unfiltered()
1088 968 if not repo.obsstore:
1089 969 return frozenset()
1090 970 if name not in repo.obsstore.caches:
1091 971 repo.obsstore.caches[name] = cachefuncs[name](repo)
1092 972 return repo.obsstore.caches[name]
1093 973
1094 974 # To be simple we need to invalidate obsolescence cache when:
1095 975 #
1096 976 # - new changeset is added:
1097 977 # - public phase is changed
1098 978 # - obsolescence marker are added
1099 979 # - strip is used a repo
1100 980 def clearobscaches(repo):
1101 981 """Remove all obsolescence related cache from a repo
1102 982
1103 983 This remove all cache in obsstore is the obsstore already exist on the
1104 984 repo.
1105 985
1106 986 (We could be smarter here given the exact event that trigger the cache
1107 987 clearing)"""
1108 988 # only clear cache is there is obsstore data in this repo
1109 989 if 'obsstore' in repo._filecache:
1110 990 repo.obsstore.caches.clear()
1111 991
1112 992 def _mutablerevs(repo):
1113 993 """the set of mutable revision in the repository"""
1114 994 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1115 995
1116 996 @cachefor('obsolete')
1117 997 def _computeobsoleteset(repo):
1118 998 """the set of obsolete revisions"""
1119 999 getnode = repo.changelog.node
1120 1000 notpublic = _mutablerevs(repo)
1121 1001 isobs = repo.obsstore.successors.__contains__
1122 1002 obs = set(r for r in notpublic if isobs(getnode(r)))
1123 1003 return obs
1124 1004
1125 1005 @cachefor('unstable')
1126 1006 def _computeunstableset(repo):
1127 1007 """the set of non obsolete revisions with obsolete parents"""
1128 1008 pfunc = repo.changelog.parentrevs
1129 1009 mutable = _mutablerevs(repo)
1130 1010 obsolete = getrevs(repo, 'obsolete')
1131 1011 others = mutable - obsolete
1132 1012 unstable = set()
1133 1013 for r in sorted(others):
1134 1014 # A rev is unstable if one of its parent is obsolete or unstable
1135 1015 # this works since we traverse following growing rev order
1136 1016 for p in pfunc(r):
1137 1017 if p in obsolete or p in unstable:
1138 1018 unstable.add(r)
1139 1019 break
1140 1020 return unstable
1141 1021
1142 1022 @cachefor('suspended')
1143 1023 def _computesuspendedset(repo):
1144 1024 """the set of obsolete parents with non obsolete descendants"""
1145 1025 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1146 1026 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1147 1027
1148 1028 @cachefor('extinct')
1149 1029 def _computeextinctset(repo):
1150 1030 """the set of obsolete parents without non obsolete descendants"""
1151 1031 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1152 1032
1153 1033
1154 1034 @cachefor('bumped')
1155 1035 def _computebumpedset(repo):
1156 1036 """the set of revs trying to obsolete public revisions"""
1157 1037 bumped = set()
1158 1038 # util function (avoid attribute lookup in the loop)
1159 1039 phase = repo._phasecache.phase # would be faster to grab the full list
1160 1040 public = phases.public
1161 1041 cl = repo.changelog
1162 1042 torev = cl.nodemap.get
1163 1043 for ctx in repo.set('(not public()) and (not obsolete())'):
1164 1044 rev = ctx.rev()
1165 1045 # We only evaluate mutable, non-obsolete revision
1166 1046 node = ctx.node()
1167 1047 # (future) A cache of precursors may worth if split is very common
1168 1048 for pnode in allprecursors(repo.obsstore, [node],
1169 1049 ignoreflags=bumpedfix):
1170 1050 prev = torev(pnode) # unfiltered! but so is phasecache
1171 1051 if (prev is not None) and (phase(repo, prev) <= public):
1172 1052 # we have a public precursor
1173 1053 bumped.add(rev)
1174 1054 break # Next draft!
1175 1055 return bumped
1176 1056
1177 1057 @cachefor('divergent')
1178 1058 def _computedivergentset(repo):
1179 1059 """the set of rev that compete to be the final successors of some revision.
1180 1060 """
1181 1061 divergent = set()
1182 1062 obsstore = repo.obsstore
1183 1063 newermap = {}
1184 1064 for ctx in repo.set('(not public()) - obsolete()'):
1185 1065 mark = obsstore.precursors.get(ctx.node(), ())
1186 1066 toprocess = set(mark)
1187 1067 seen = set()
1188 1068 while toprocess:
1189 1069 prec = toprocess.pop()[0]
1190 1070 if prec in seen:
1191 1071 continue # emergency cycle hanging prevention
1192 1072 seen.add(prec)
1193 1073 if prec not in newermap:
1194 1074 obsutil.successorssets(repo, prec, newermap)
1195 1075 newer = [n for n in newermap[prec] if n]
1196 1076 if len(newer) > 1:
1197 1077 divergent.add(ctx.rev())
1198 1078 break
1199 1079 toprocess.update(obsstore.precursors.get(prec, ()))
1200 1080 return divergent
1201 1081
1202 1082
1203 1083 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1204 1084 operation=None):
1205 1085 """Add obsolete markers between changesets in a repo
1206 1086
1207 1087 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1208 1088 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1209 1089 containing metadata for this marker only. It is merged with the global
1210 1090 metadata specified through the `metadata` argument of this function,
1211 1091
1212 1092 Trying to obsolete a public changeset will raise an exception.
1213 1093
1214 1094 Current user and date are used except if specified otherwise in the
1215 1095 metadata attribute.
1216 1096
1217 1097 This function operates within a transaction of its own, but does
1218 1098 not take any lock on the repo.
1219 1099 """
1220 1100 # prepare metadata
1221 1101 if metadata is None:
1222 1102 metadata = {}
1223 1103 if 'user' not in metadata:
1224 1104 metadata['user'] = repo.ui.username()
1225 1105 useoperation = repo.ui.configbool('experimental',
1226 1106 'evolution.track-operation',
1227 1107 False)
1228 1108 if useoperation and operation:
1229 1109 metadata['operation'] = operation
1230 1110 tr = repo.transaction('add-obsolescence-marker')
1231 1111 try:
1232 1112 markerargs = []
1233 1113 for rel in relations:
1234 1114 prec = rel[0]
1235 1115 sucs = rel[1]
1236 1116 localmetadata = metadata.copy()
1237 1117 if 2 < len(rel):
1238 1118 localmetadata.update(rel[2])
1239 1119
1240 1120 if not prec.mutable():
1241 1121 raise error.Abort(_("cannot obsolete public changeset: %s")
1242 1122 % prec,
1243 1123 hint="see 'hg help phases' for details")
1244 1124 nprec = prec.node()
1245 1125 nsucs = tuple(s.node() for s in sucs)
1246 1126 npare = None
1247 1127 if not nsucs:
1248 1128 npare = tuple(p.node() for p in prec.parents())
1249 1129 if nprec in nsucs:
1250 1130 raise error.Abort(_("changeset %s cannot obsolete itself")
1251 1131 % prec)
1252 1132
1253 1133 # Creating the marker causes the hidden cache to become invalid,
1254 1134 # which causes recomputation when we ask for prec.parents() above.
1255 1135 # Resulting in n^2 behavior. So let's prepare all of the args
1256 1136 # first, then create the markers.
1257 1137 markerargs.append((nprec, nsucs, npare, localmetadata))
1258 1138
1259 1139 for args in markerargs:
1260 1140 nprec, nsucs, npare, localmetadata = args
1261 1141 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1262 1142 date=date, metadata=localmetadata,
1263 1143 ui=repo.ui)
1264 1144 repo.filteredrevcache.clear()
1265 1145 tr.close()
1266 1146 finally:
1267 1147 tr.release()
@@ -1,241 +1,366
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 def closestpredecessors(repo, nodeid):
11 11 """yield the list of next predecessors pointing on visible changectx nodes
12 12
13 13 This function respect the repoview filtering, filtered revision will be
14 14 considered missing.
15 15 """
16 16
17 17 precursors = repo.obsstore.precursors
18 18 stack = [nodeid]
19 19 seen = set(stack)
20 20
21 21 while stack:
22 22 current = stack.pop()
23 23 currentpreccs = precursors.get(current, ())
24 24
25 25 for prec in currentpreccs:
26 26 precnodeid = prec[0]
27 27
28 28 # Basic cycle protection
29 29 if precnodeid in seen:
30 30 continue
31 31 seen.add(precnodeid)
32 32
33 33 if precnodeid in repo:
34 34 yield precnodeid
35 35 else:
36 36 stack.append(precnodeid)
37 37
38 def _filterprunes(markers):
39 """return a set with no prune markers"""
40 return set(m for m in markers if m[1])
41
42 def exclusivemarkers(repo, nodes):
43 """set of markers relevant to "nodes" but no other locally-known nodes
44
45 This function compute the set of markers "exclusive" to a locally-known
46 node. This means we walk the markers starting from <nodes> until we reach a
47 locally-known precursors outside of <nodes>. Element of <nodes> with
48 locally-known successors outside of <nodes> are ignored (since their
49 precursors markers are also relevant to these successors).
50
51 For example:
52
53 # (A0 rewritten as A1)
54 #
55 # A0 <-1- A1 # Marker "1" is exclusive to A1
56
57 or
58
59 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
60 #
61 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
62
63 or
64
65 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
66 #
67 # <-2- A1 # Marker "2" is exclusive to A0,A1
68 # /
69 # <-1- A0
70 # \
71 # <-3- A2 # Marker "3" is exclusive to A0,A2
72 #
73 # in addition:
74 #
75 # Markers "2,3" are exclusive to A1,A2
76 # Markers "1,2,3" are exclusive to A0,A1,A2
77
78 See test/test-obsolete-bundle-strip.t for more examples.
79
80 An example usage is strip. When stripping a changeset, we also want to
81 strip the markers exclusive to this changeset. Otherwise we would have
82 "dangling"" obsolescence markers from its precursors: Obsolescence markers
83 marking a node as obsolete without any successors available locally.
84
85 As for relevant markers, the prune markers for children will be followed.
86 Of course, they will only be followed if the pruned children is
87 locally-known. Since the prune markers are relevant to the pruned node.
88 However, while prune markers are considered relevant to the parent of the
89 pruned changesets, prune markers for locally-known changeset (with no
90 successors) are considered exclusive to the pruned nodes. This allows
91 to strip the prune markers (with the rest of the exclusive chain) alongside
92 the pruned changesets.
93 """
94 # running on a filtered repository would be dangerous as markers could be
95 # reported as exclusive when they are relevant for other filtered nodes.
96 unfi = repo.unfiltered()
97
98 # shortcut to various useful item
99 nm = unfi.changelog.nodemap
100 precursorsmarkers = unfi.obsstore.precursors
101 successormarkers = unfi.obsstore.successors
102 childrenmarkers = unfi.obsstore.children
103
104 # exclusive markers (return of the function)
105 exclmarkers = set()
106 # we need fast membership testing
107 nodes = set(nodes)
108 # looking for head in the obshistory
109 #
110 # XXX we are ignoring all issues in regard with cycle for now.
111 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
112 stack.sort()
113 # nodes already stacked
114 seennodes = set(stack)
115 while stack:
116 current = stack.pop()
117 # fetch precursors markers
118 markers = list(precursorsmarkers.get(current, ()))
119 # extend the list with prune markers
120 for mark in successormarkers.get(current, ()):
121 if not mark[1]:
122 markers.append(mark)
123 # and markers from children (looking for prune)
124 for mark in childrenmarkers.get(current, ()):
125 if not mark[1]:
126 markers.append(mark)
127 # traverse the markers
128 for mark in markers:
129 if mark in exclmarkers:
130 # markers already selected
131 continue
132
133 # If the markers is about the current node, select it
134 #
135 # (this delay the addition of markers from children)
136 if mark[1] or mark[0] == current:
137 exclmarkers.add(mark)
138
139 # should we keep traversing through the precursors?
140 prec = mark[0]
141
142 # nodes in the stack or already processed
143 if prec in seennodes:
144 continue
145
146 # is this a locally known node ?
147 known = prec in nm
148 # if locally-known and not in the <nodes> set the traversal
149 # stop here.
150 if known and prec not in nodes:
151 continue
152
153 # do not keep going if there are unselected markers pointing to this
154 # nodes. If we end up traversing these unselected markers later the
155 # node will be taken care of at that point.
156 precmarkers = _filterprunes(successormarkers.get(prec))
157 if precmarkers.issubset(exclmarkers):
158 seennodes.add(prec)
159 stack.append(prec)
160
161 return exclmarkers
162
38 163 def successorssets(repo, initialnode, cache=None):
39 164 """Return set of all latest successors of initial nodes
40 165
41 166 The successors set of a changeset A are the group of revisions that succeed
42 167 A. It succeeds A as a consistent whole, each revision being only a partial
43 168 replacement. The successors set contains non-obsolete changesets only.
44 169
45 170 This function returns the full list of successor sets which is why it
46 171 returns a list of tuples and not just a single tuple. Each tuple is a valid
47 172 successors set. Note that (A,) may be a valid successors set for changeset A
48 173 (see below).
49 174
50 175 In most cases, a changeset A will have a single element (e.g. the changeset
51 176 A is replaced by A') in its successors set. Though, it is also common for a
52 177 changeset A to have no elements in its successor set (e.g. the changeset
53 178 has been pruned). Therefore, the returned list of successors sets will be
54 179 [(A',)] or [], respectively.
55 180
56 181 When a changeset A is split into A' and B', however, it will result in a
57 182 successors set containing more than a single element, i.e. [(A',B')].
58 183 Divergent changesets will result in multiple successors sets, i.e. [(A',),
59 184 (A'')].
60 185
61 186 If a changeset A is not obsolete, then it will conceptually have no
62 187 successors set. To distinguish this from a pruned changeset, the successor
63 188 set will contain itself only, i.e. [(A,)].
64 189
65 190 Finally, successors unknown locally are considered to be pruned (obsoleted
66 191 without any successors).
67 192
68 193 The optional `cache` parameter is a dictionary that may contain precomputed
69 194 successors sets. It is meant to reuse the computation of a previous call to
70 195 `successorssets` when multiple calls are made at the same time. The cache
71 196 dictionary is updated in place. The caller is responsible for its life
72 197 span. Code that makes multiple calls to `successorssets` *must* use this
73 198 cache mechanism or suffer terrible performance.
74 199 """
75 200
76 201 succmarkers = repo.obsstore.successors
77 202
78 203 # Stack of nodes we search successors sets for
79 204 toproceed = [initialnode]
80 205 # set version of above list for fast loop detection
81 206 # element added to "toproceed" must be added here
82 207 stackedset = set(toproceed)
83 208 if cache is None:
84 209 cache = {}
85 210
86 211 # This while loop is the flattened version of a recursive search for
87 212 # successors sets
88 213 #
89 214 # def successorssets(x):
90 215 # successors = directsuccessors(x)
91 216 # ss = [[]]
92 217 # for succ in directsuccessors(x):
93 218 # # product as in itertools cartesian product
94 219 # ss = product(ss, successorssets(succ))
95 220 # return ss
96 221 #
97 222 # But we can not use plain recursive calls here:
98 223 # - that would blow the python call stack
99 224 # - obsolescence markers may have cycles, we need to handle them.
100 225 #
101 226 # The `toproceed` list act as our call stack. Every node we search
102 227 # successors set for are stacked there.
103 228 #
104 229 # The `stackedset` is set version of this stack used to check if a node is
105 230 # already stacked. This check is used to detect cycles and prevent infinite
106 231 # loop.
107 232 #
108 233 # successors set of all nodes are stored in the `cache` dictionary.
109 234 #
110 235 # After this while loop ends we use the cache to return the successors sets
111 236 # for the node requested by the caller.
112 237 while toproceed:
113 238 # Every iteration tries to compute the successors sets of the topmost
114 239 # node of the stack: CURRENT.
115 240 #
116 241 # There are four possible outcomes:
117 242 #
118 243 # 1) We already know the successors sets of CURRENT:
119 244 # -> mission accomplished, pop it from the stack.
120 245 # 2) Node is not obsolete:
121 246 # -> the node is its own successors sets. Add it to the cache.
122 247 # 3) We do not know successors set of direct successors of CURRENT:
123 248 # -> We add those successors to the stack.
124 249 # 4) We know successors sets of all direct successors of CURRENT:
125 250 # -> We can compute CURRENT successors set and add it to the
126 251 # cache.
127 252 #
128 253 current = toproceed[-1]
129 254 if current in cache:
130 255 # case (1): We already know the successors sets
131 256 stackedset.remove(toproceed.pop())
132 257 elif current not in succmarkers:
133 258 # case (2): The node is not obsolete.
134 259 if current in repo:
135 260 # We have a valid last successors.
136 261 cache[current] = [(current,)]
137 262 else:
138 263 # Final obsolete version is unknown locally.
139 264 # Do not count that as a valid successors
140 265 cache[current] = []
141 266 else:
142 267 # cases (3) and (4)
143 268 #
144 269 # We proceed in two phases. Phase 1 aims to distinguish case (3)
145 270 # from case (4):
146 271 #
147 272 # For each direct successors of CURRENT, we check whether its
148 273 # successors sets are known. If they are not, we stack the
149 274 # unknown node and proceed to the next iteration of the while
150 275 # loop. (case 3)
151 276 #
152 277 # During this step, we may detect obsolescence cycles: a node
153 278 # with unknown successors sets but already in the call stack.
154 279 # In such a situation, we arbitrary set the successors sets of
155 280 # the node to nothing (node pruned) to break the cycle.
156 281 #
157 282 # If no break was encountered we proceed to phase 2.
158 283 #
159 284 # Phase 2 computes successors sets of CURRENT (case 4); see details
160 285 # in phase 2 itself.
161 286 #
162 287 # Note the two levels of iteration in each phase.
163 288 # - The first one handles obsolescence markers using CURRENT as
164 289 # precursor (successors markers of CURRENT).
165 290 #
166 291 # Having multiple entry here means divergence.
167 292 #
168 293 # - The second one handles successors defined in each marker.
169 294 #
170 295 # Having none means pruned node, multiple successors means split,
171 296 # single successors are standard replacement.
172 297 #
173 298 for mark in sorted(succmarkers[current]):
174 299 for suc in mark[1]:
175 300 if suc not in cache:
176 301 if suc in stackedset:
177 302 # cycle breaking
178 303 cache[suc] = []
179 304 else:
180 305 # case (3) If we have not computed successors sets
181 306 # of one of those successors we add it to the
182 307 # `toproceed` stack and stop all work for this
183 308 # iteration.
184 309 toproceed.append(suc)
185 310 stackedset.add(suc)
186 311 break
187 312 else:
188 313 continue
189 314 break
190 315 else:
191 316 # case (4): we know all successors sets of all direct
192 317 # successors
193 318 #
194 319 # Successors set contributed by each marker depends on the
195 320 # successors sets of all its "successors" node.
196 321 #
197 322 # Each different marker is a divergence in the obsolescence
198 323 # history. It contributes successors sets distinct from other
199 324 # markers.
200 325 #
201 326 # Within a marker, a successor may have divergent successors
202 327 # sets. In such a case, the marker will contribute multiple
203 328 # divergent successors sets. If multiple successors have
204 329 # divergent successors sets, a Cartesian product is used.
205 330 #
206 331 # At the end we post-process successors sets to remove
207 332 # duplicated entry and successors set that are strict subset of
208 333 # another one.
209 334 succssets = []
210 335 for mark in sorted(succmarkers[current]):
211 336 # successors sets contributed by this marker
212 337 markss = [[]]
213 338 for suc in mark[1]:
214 339 # cardinal product with previous successors
215 340 productresult = []
216 341 for prefix in markss:
217 342 for suffix in cache[suc]:
218 343 newss = list(prefix)
219 344 for part in suffix:
220 345 # do not duplicated entry in successors set
221 346 # first entry wins.
222 347 if part not in newss:
223 348 newss.append(part)
224 349 productresult.append(newss)
225 350 markss = productresult
226 351 succssets.extend(markss)
227 352 # remove duplicated and subset
228 353 seen = []
229 354 final = []
230 355 candidate = sorted(((set(s), s) for s in succssets if s),
231 356 key=lambda x: len(x[1]), reverse=True)
232 357 for setversion, listversion in candidate:
233 358 for seenset in seen:
234 359 if setversion.issubset(seenset):
235 360 break
236 361 else:
237 362 final.append(listversion)
238 363 seen.append(setversion)
239 364 final.reverse() # put small successors set first
240 365 cache[current] = final
241 366 return cache[initialnode]
@@ -1,433 +1,434
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import short
16 16 from . import (
17 17 bundle2,
18 18 changegroup,
19 19 discovery,
20 20 error,
21 21 exchange,
22 22 obsolete,
23 obsutil,
23 24 util,
24 25 )
25 26
26 27 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
27 28 """create a bundle with the specified revisions as a backup"""
28 29
29 30 backupdir = "strip-backup"
30 31 vfs = repo.vfs
31 32 if not vfs.isdir(backupdir):
32 33 vfs.mkdir(backupdir)
33 34
34 35 # Include a hash of all the nodes in the filename for uniqueness
35 36 allcommits = repo.set('%ln::%ln', bases, heads)
36 37 allhashes = sorted(c.hex() for c in allcommits)
37 38 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
38 39 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
39 40
40 41 cgversion = changegroup.safeversion(repo)
41 42 comp = None
42 43 if cgversion != '01':
43 44 bundletype = "HG20"
44 45 if compress:
45 46 comp = 'BZ'
46 47 elif compress:
47 48 bundletype = "HG10BZ"
48 49 else:
49 50 bundletype = "HG10UN"
50 51
51 52 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
52 53 contentopts = {
53 54 'cg.version': cgversion,
54 55 'obsolescence': obsolescence,
55 56 'phases': True,
56 57 }
57 58 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
58 59 outgoing, contentopts, vfs, compression=comp)
59 60
60 61 def _collectfiles(repo, striprev):
61 62 """find out the filelogs affected by the strip"""
62 63 files = set()
63 64
64 65 for x in xrange(striprev, len(repo)):
65 66 files.update(repo[x].files())
66 67
67 68 return sorted(files)
68 69
69 70 def _collectbrokencsets(repo, files, striprev):
70 71 """return the changesets which will be broken by the truncation"""
71 72 s = set()
72 73 def collectone(revlog):
73 74 _, brokenset = revlog.getstrippoint(striprev)
74 75 s.update([revlog.linkrev(r) for r in brokenset])
75 76
76 77 collectone(repo.manifestlog._revlog)
77 78 for fname in files:
78 79 collectone(repo.file(fname))
79 80
80 81 return s
81 82
82 83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
83 84 # This function requires the caller to lock the repo, but it operates
84 85 # within a transaction of its own, and thus requires there to be no current
85 86 # transaction when it is called.
86 87 if repo.currenttransaction() is not None:
87 88 raise error.ProgrammingError('cannot strip from inside a transaction')
88 89
89 90 # Simple way to maintain backwards compatibility for this
90 91 # argument.
91 92 if backup in ['none', 'strip']:
92 93 backup = False
93 94
94 95 repo = repo.unfiltered()
95 96 repo.destroying()
96 97
97 98 cl = repo.changelog
98 99 # TODO handle undo of merge sets
99 100 if isinstance(nodelist, str):
100 101 nodelist = [nodelist]
101 102 striplist = [cl.rev(node) for node in nodelist]
102 103 striprev = min(striplist)
103 104
104 105 files = _collectfiles(repo, striprev)
105 106 saverevs = _collectbrokencsets(repo, files, striprev)
106 107
107 108 # Some revisions with rev > striprev may not be descendants of striprev.
108 109 # We have to find these revisions and put them in a bundle, so that
109 110 # we can restore them after the truncations.
110 111 # To create the bundle we use repo.changegroupsubset which requires
111 112 # the list of heads and bases of the set of interesting revisions.
112 113 # (head = revision in the set that has no descendant in the set;
113 114 # base = revision in the set that has no ancestor in the set)
114 115 tostrip = set(striplist)
115 116 saveheads = set(saverevs)
116 117 for r in cl.revs(start=striprev + 1):
117 118 if any(p in tostrip for p in cl.parentrevs(r)):
118 119 tostrip.add(r)
119 120
120 121 if r not in tostrip:
121 122 saverevs.add(r)
122 123 saveheads.difference_update(cl.parentrevs(r))
123 124 saveheads.add(r)
124 125 saveheads = [cl.node(r) for r in saveheads]
125 126
126 127 # compute base nodes
127 128 if saverevs:
128 129 descendants = set(cl.descendants(saverevs))
129 130 saverevs.difference_update(descendants)
130 131 savebases = [cl.node(r) for r in saverevs]
131 132 stripbases = [cl.node(r) for r in tostrip]
132 133
133 134 stripobsidx = obsmarkers = ()
134 135 if repo.ui.configbool('devel', 'strip-obsmarkers', True):
135 obsmarkers = obsolete.exclusivemarkers(repo, stripbases)
136 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
136 137 if obsmarkers:
137 138 stripobsidx = [i for i, m in enumerate(repo.obsstore)
138 139 if m in obsmarkers]
139 140
140 141 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
141 142 # is much faster
142 143 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
143 144 if newbmtarget:
144 145 newbmtarget = repo[newbmtarget.first()].node()
145 146 else:
146 147 newbmtarget = '.'
147 148
148 149 bm = repo._bookmarks
149 150 updatebm = []
150 151 for m in bm:
151 152 rev = repo[bm[m]].rev()
152 153 if rev in tostrip:
153 154 updatebm.append(m)
154 155
155 156 # create a changegroup for all the branches we need to keep
156 157 backupfile = None
157 158 vfs = repo.vfs
158 159 node = nodelist[-1]
159 160 if backup:
160 161 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
161 162 repo.ui.status(_("saved backup bundle to %s\n") %
162 163 vfs.join(backupfile))
163 164 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
164 165 vfs.join(backupfile))
165 166 tmpbundlefile = None
166 167 if saveheads:
167 168 # do not compress temporary bundle if we remove it from disk later
168 169 #
169 170 # We do not include obsolescence, it might re-introduce prune markers
170 171 # we are trying to strip. This is harmless since the stripped markers
171 172 # are already backed up and we did not touched the markers for the
172 173 # saved changesets.
173 174 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
174 175 compress=False, obsolescence=False)
175 176
176 177 mfst = repo.manifestlog._revlog
177 178
178 179 try:
179 180 with repo.transaction("strip") as tr:
180 181 offset = len(tr.entries)
181 182
182 183 tr.startgroup()
183 184 cl.strip(striprev, tr)
184 185 mfst.strip(striprev, tr)
185 186 striptrees(repo, tr, striprev, files)
186 187
187 188 for fn in files:
188 189 repo.file(fn).strip(striprev, tr)
189 190 tr.endgroup()
190 191
191 192 for i in xrange(offset, len(tr.entries)):
192 193 file, troffset, ignore = tr.entries[i]
193 194 with repo.svfs(file, 'a', checkambig=True) as fp:
194 195 fp.truncate(troffset)
195 196 if troffset == 0:
196 197 repo.store.markremoved(file)
197 198
198 199 deleteobsmarkers(repo.obsstore, stripobsidx)
199 200 del repo.obsstore
200 201
201 202 repo._phasecache.filterunknown(repo)
202 203 if tmpbundlefile:
203 204 ui.note(_("adding branch\n"))
204 205 f = vfs.open(tmpbundlefile, "rb")
205 206 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
206 207 if not repo.ui.verbose:
207 208 # silence internal shuffling chatter
208 209 repo.ui.pushbuffer()
209 210 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
210 211 txnname = 'strip'
211 212 if not isinstance(gen, bundle2.unbundle20):
212 213 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
213 214 with repo.transaction(txnname) as tr:
214 215 bundle2.applybundle(repo, gen, tr, source='strip',
215 216 url=tmpbundleurl, emptyok=True)
216 217 if not repo.ui.verbose:
217 218 repo.ui.popbuffer()
218 219 f.close()
219 220 repo._phasecache.invalidate()
220 221
221 222 for m in updatebm:
222 223 bm[m] = repo[newbmtarget].node()
223 224
224 225 with repo.transaction('repair') as tr:
225 226 bm.recordchange(tr)
226 227
227 228 # remove undo files
228 229 for undovfs, undofile in repo.undofiles():
229 230 try:
230 231 undovfs.unlink(undofile)
231 232 except OSError as e:
232 233 if e.errno != errno.ENOENT:
233 234 ui.warn(_('error removing %s: %s\n') %
234 235 (undovfs.join(undofile), str(e)))
235 236
236 237 except: # re-raises
237 238 if backupfile:
238 239 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
239 240 % vfs.join(backupfile))
240 241 if tmpbundlefile:
241 242 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
242 243 % vfs.join(tmpbundlefile))
243 244 ui.warn(_("(fix the problem, then recover the changesets with "
244 245 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
245 246 raise
246 247 else:
247 248 if tmpbundlefile:
248 249 # Remove temporary bundle only if there were no exceptions
249 250 vfs.unlink(tmpbundlefile)
250 251
251 252 repo.destroyed()
252 253 # return the backup file path (or None if 'backup' was False) so
253 254 # extensions can use it
254 255 return backupfile
255 256
256 257 def safestriproots(ui, repo, nodes):
257 258 """return list of roots of nodes where descendants are covered by nodes"""
258 259 torev = repo.unfiltered().changelog.rev
259 260 revs = set(torev(n) for n in nodes)
260 261 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
261 262 # orphaned = affected - wanted
262 263 # affected = descendants(roots(wanted))
263 264 # wanted = revs
264 265 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
265 266 notstrip = revs - tostrip
266 267 if notstrip:
267 268 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
268 269 ui.warn(_('warning: orphaned descendants detected, '
269 270 'not stripping %s\n') % nodestr)
270 271 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
271 272
272 273 class stripcallback(object):
273 274 """used as a transaction postclose callback"""
274 275
275 276 def __init__(self, ui, repo, backup, topic):
276 277 self.ui = ui
277 278 self.repo = repo
278 279 self.backup = backup
279 280 self.topic = topic or 'backup'
280 281 self.nodelist = []
281 282
282 283 def addnodes(self, nodes):
283 284 self.nodelist.extend(nodes)
284 285
285 286 def __call__(self, tr):
286 287 roots = safestriproots(self.ui, self.repo, self.nodelist)
287 288 if roots:
288 289 strip(self.ui, self.repo, roots, self.backup, self.topic)
289 290
290 291 def delayedstrip(ui, repo, nodelist, topic=None):
291 292 """like strip, but works inside transaction and won't strip irreverent revs
292 293
293 294 nodelist must explicitly contain all descendants. Otherwise a warning will
294 295 be printed that some nodes are not stripped.
295 296
296 297 Always do a backup. The last non-None "topic" will be used as the backup
297 298 topic name. The default backup topic name is "backup".
298 299 """
299 300 tr = repo.currenttransaction()
300 301 if not tr:
301 302 nodes = safestriproots(ui, repo, nodelist)
302 303 return strip(ui, repo, nodes, True, topic)
303 304 # transaction postclose callbacks are called in alphabet order.
304 305 # use '\xff' as prefix so we are likely to be called last.
305 306 callback = tr.getpostclose('\xffstrip')
306 307 if callback is None:
307 308 callback = stripcallback(ui, repo, True, topic)
308 309 tr.addpostclose('\xffstrip', callback)
309 310 if topic:
310 311 callback.topic = topic
311 312 callback.addnodes(nodelist)
312 313
313 314 def striptrees(repo, tr, striprev, files):
314 315 if 'treemanifest' in repo.requirements: # safe but unnecessary
315 316 # otherwise
316 317 for unencoded, encoded, size in repo.store.datafiles():
317 318 if (unencoded.startswith('meta/') and
318 319 unencoded.endswith('00manifest.i')):
319 320 dir = unencoded[5:-12]
320 321 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
321 322
322 323 def rebuildfncache(ui, repo):
323 324 """Rebuilds the fncache file from repo history.
324 325
325 326 Missing entries will be added. Extra entries will be removed.
326 327 """
327 328 repo = repo.unfiltered()
328 329
329 330 if 'fncache' not in repo.requirements:
330 331 ui.warn(_('(not rebuilding fncache because repository does not '
331 332 'support fncache)\n'))
332 333 return
333 334
334 335 with repo.lock():
335 336 fnc = repo.store.fncache
336 337 # Trigger load of fncache.
337 338 if 'irrelevant' in fnc:
338 339 pass
339 340
340 341 oldentries = set(fnc.entries)
341 342 newentries = set()
342 343 seenfiles = set()
343 344
344 345 repolen = len(repo)
345 346 for rev in repo:
346 347 ui.progress(_('rebuilding'), rev, total=repolen,
347 348 unit=_('changesets'))
348 349
349 350 ctx = repo[rev]
350 351 for f in ctx.files():
351 352 # This is to minimize I/O.
352 353 if f in seenfiles:
353 354 continue
354 355 seenfiles.add(f)
355 356
356 357 i = 'data/%s.i' % f
357 358 d = 'data/%s.d' % f
358 359
359 360 if repo.store._exists(i):
360 361 newentries.add(i)
361 362 if repo.store._exists(d):
362 363 newentries.add(d)
363 364
364 365 ui.progress(_('rebuilding'), None)
365 366
366 367 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
367 368 for dir in util.dirs(seenfiles):
368 369 i = 'meta/%s/00manifest.i' % dir
369 370 d = 'meta/%s/00manifest.d' % dir
370 371
371 372 if repo.store._exists(i):
372 373 newentries.add(i)
373 374 if repo.store._exists(d):
374 375 newentries.add(d)
375 376
376 377 addcount = len(newentries - oldentries)
377 378 removecount = len(oldentries - newentries)
378 379 for p in sorted(oldentries - newentries):
379 380 ui.write(_('removing %s\n') % p)
380 381 for p in sorted(newentries - oldentries):
381 382 ui.write(_('adding %s\n') % p)
382 383
383 384 if addcount or removecount:
384 385 ui.write(_('%d items added, %d removed from fncache\n') %
385 386 (addcount, removecount))
386 387 fnc.entries = newentries
387 388 fnc._dirty = True
388 389
389 390 with repo.transaction('fncache') as tr:
390 391 fnc.write(tr)
391 392 else:
392 393 ui.write(_('fncache already up to date\n'))
393 394
394 395 def stripbmrevset(repo, mark):
395 396 """
396 397 The revset to strip when strip is called with -B mark
397 398
398 399 Needs to live here so extensions can use it and wrap it even when strip is
399 400 not enabled or not present on a box.
400 401 """
401 402 return repo.revs("ancestors(bookmark(%s)) - "
402 403 "ancestors(head() and not bookmark(%s)) - "
403 404 "ancestors(bookmark() and not bookmark(%s))",
404 405 mark, mark, mark)
405 406
406 407 def deleteobsmarkers(obsstore, indices):
407 408 """Delete some obsmarkers from obsstore and return how many were deleted
408 409
409 410 'indices' is a list of ints which are the indices
410 411 of the markers to be deleted.
411 412
412 413 Every invocation of this function completely rewrites the obsstore file,
413 414 skipping the markers we want to be removed. The new temporary file is
414 415 created, remaining markers are written there and on .close() this file
415 416 gets atomically renamed to obsstore, thus guaranteeing consistency."""
416 417 if not indices:
417 418 # we don't want to rewrite the obsstore with the same content
418 419 return
419 420
420 421 left = []
421 422 current = obsstore._all
422 423 n = 0
423 424 for i, m in enumerate(current):
424 425 if i in indices:
425 426 n += 1
426 427 continue
427 428 left.append(m)
428 429
429 430 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
430 431 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
431 432 newobsstorefile.write(bytes)
432 433 newobsstorefile.close()
433 434 return n
General Comments 0
You need to be logged in to leave comments. Login now