##// END OF EJS Templates
phases: define an official tuple of mutable phases...
Boris Feld -
r38174:02f992ac default
parent child Browse files
Show More
@@ -1,1023 +1,1023 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "predecessor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a predecessor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "predecessor markers of Y" because they hold
28 28 information about the predecessors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 obsutil,
80 80 phases,
81 81 policy,
82 82 util,
83 83 )
84 84 from .utils import dateutil
85 85
86 86 parsers = policy.importmod(r'parsers')
87 87
88 88 _pack = struct.pack
89 89 _unpack = struct.unpack
90 90 _calcsize = struct.calcsize
91 91 propertycache = util.propertycache
92 92
93 93 # the obsolete feature is not mature enough to be enabled by default.
94 94 # you have to rely on third party extension extension to enable this.
95 95 _enabled = False
96 96
97 97 # Options for obsolescence
98 98 createmarkersopt = 'createmarkers'
99 99 allowunstableopt = 'allowunstable'
100 100 exchangeopt = 'exchange'
101 101
102 102 def _getoptionvalue(repo, option):
103 103 """Returns True if the given repository has the given obsolete option
104 104 enabled.
105 105 """
106 106 configkey = 'evolution.%s' % option
107 107 newconfig = repo.ui.configbool('experimental', configkey)
108 108
109 109 # Return the value only if defined
110 110 if newconfig is not None:
111 111 return newconfig
112 112
113 113 # Fallback on generic option
114 114 try:
115 115 return repo.ui.configbool('experimental', 'evolution')
116 116 except (error.ConfigError, AttributeError):
117 117 # Fallback on old-fashion config
118 118 # inconsistent config: experimental.evolution
119 119 result = set(repo.ui.configlist('experimental', 'evolution'))
120 120
121 121 if 'all' in result:
122 122 return True
123 123
124 124 # For migration purposes, temporarily return true if the config hasn't
125 125 # been set but _enabled is true.
126 126 if len(result) == 0 and _enabled:
127 127 return True
128 128
129 129 # Temporary hack for next check
130 130 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
131 131 if newconfig:
132 132 result.add('createmarkers')
133 133
134 134 return option in result
135 135
136 136 def getoptions(repo):
137 137 """Returns dicts showing state of obsolescence features."""
138 138
139 139 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
140 140 unstablevalue = _getoptionvalue(repo, allowunstableopt)
141 141 exchangevalue = _getoptionvalue(repo, exchangeopt)
142 142
143 143 # createmarkers must be enabled if other options are enabled
144 144 if ((unstablevalue or exchangevalue) and not createmarkersvalue):
145 145 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
146 146 "if other obsolete options are enabled"))
147 147
148 148 return {
149 149 createmarkersopt: createmarkersvalue,
150 150 allowunstableopt: unstablevalue,
151 151 exchangeopt: exchangevalue,
152 152 }
153 153
154 154 def isenabled(repo, option):
155 155 """Returns True if the given repository has the given obsolete option
156 156 enabled.
157 157 """
158 158 return getoptions(repo)[option]
159 159
160 160 # Creating aliases for marker flags because evolve extension looks for
161 161 # bumpedfix in obsolete.py
162 162 bumpedfix = obsutil.bumpedfix
163 163 usingsha256 = obsutil.usingsha256
164 164
165 165 ## Parsing and writing of version "0"
166 166 #
167 167 # The header is followed by the markers. Each marker is made of:
168 168 #
169 169 # - 1 uint8 : number of new changesets "N", can be zero.
170 170 #
171 171 # - 1 uint32: metadata size "M" in bytes.
172 172 #
173 173 # - 1 byte: a bit field. It is reserved for flags used in common
174 174 # obsolete marker operations, to avoid repeated decoding of metadata
175 175 # entries.
176 176 #
177 177 # - 20 bytes: obsoleted changeset identifier.
178 178 #
179 179 # - N*20 bytes: new changesets identifiers.
180 180 #
181 181 # - M bytes: metadata as a sequence of nul-terminated strings. Each
182 182 # string contains a key and a value, separated by a colon ':', without
183 183 # additional encoding. Keys cannot contain '\0' or ':' and values
184 184 # cannot contain '\0'.
185 185 _fm0version = 0
186 186 _fm0fixed = '>BIB20s'
187 187 _fm0node = '20s'
188 188 _fm0fsize = _calcsize(_fm0fixed)
189 189 _fm0fnodesize = _calcsize(_fm0node)
190 190
191 191 def _fm0readmarkers(data, off, stop):
192 192 # Loop on markers
193 193 while off < stop:
194 194 # read fixed part
195 195 cur = data[off:off + _fm0fsize]
196 196 off += _fm0fsize
197 197 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
198 198 # read replacement
199 199 sucs = ()
200 200 if numsuc:
201 201 s = (_fm0fnodesize * numsuc)
202 202 cur = data[off:off + s]
203 203 sucs = _unpack(_fm0node * numsuc, cur)
204 204 off += s
205 205 # read metadata
206 206 # (metadata will be decoded on demand)
207 207 metadata = data[off:off + mdsize]
208 208 if len(metadata) != mdsize:
209 209 raise error.Abort(_('parsing obsolete marker: metadata is too '
210 210 'short, %d bytes expected, got %d')
211 211 % (mdsize, len(metadata)))
212 212 off += mdsize
213 213 metadata = _fm0decodemeta(metadata)
214 214 try:
215 215 when, offset = metadata.pop('date', '0 0').split(' ')
216 216 date = float(when), int(offset)
217 217 except ValueError:
218 218 date = (0., 0)
219 219 parents = None
220 220 if 'p2' in metadata:
221 221 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
222 222 elif 'p1' in metadata:
223 223 parents = (metadata.pop('p1', None),)
224 224 elif 'p0' in metadata:
225 225 parents = ()
226 226 if parents is not None:
227 227 try:
228 228 parents = tuple(node.bin(p) for p in parents)
229 229 # if parent content is not a nodeid, drop the data
230 230 for p in parents:
231 231 if len(p) != 20:
232 232 parents = None
233 233 break
234 234 except TypeError:
235 235 # if content cannot be translated to nodeid drop the data.
236 236 parents = None
237 237
238 238 metadata = tuple(sorted(metadata.iteritems()))
239 239
240 240 yield (pre, sucs, flags, metadata, date, parents)
241 241
242 242 def _fm0encodeonemarker(marker):
243 243 pre, sucs, flags, metadata, date, parents = marker
244 244 if flags & usingsha256:
245 245 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
246 246 metadata = dict(metadata)
247 247 time, tz = date
248 248 metadata['date'] = '%r %i' % (time, tz)
249 249 if parents is not None:
250 250 if not parents:
251 251 # mark that we explicitly recorded no parents
252 252 metadata['p0'] = ''
253 253 for i, p in enumerate(parents, 1):
254 254 metadata['p%i' % i] = node.hex(p)
255 255 metadata = _fm0encodemeta(metadata)
256 256 numsuc = len(sucs)
257 257 format = _fm0fixed + (_fm0node * numsuc)
258 258 data = [numsuc, len(metadata), flags, pre]
259 259 data.extend(sucs)
260 260 return _pack(format, *data) + metadata
261 261
262 262 def _fm0encodemeta(meta):
263 263 """Return encoded metadata string to string mapping.
264 264
265 265 Assume no ':' in key and no '\0' in both key and value."""
266 266 for key, value in meta.iteritems():
267 267 if ':' in key or '\0' in key:
268 268 raise ValueError("':' and '\0' are forbidden in metadata key'")
269 269 if '\0' in value:
270 270 raise ValueError("':' is forbidden in metadata value'")
271 271 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
272 272
273 273 def _fm0decodemeta(data):
274 274 """Return string to string dictionary from encoded version."""
275 275 d = {}
276 276 for l in data.split('\0'):
277 277 if l:
278 278 key, value = l.split(':')
279 279 d[key] = value
280 280 return d
281 281
282 282 ## Parsing and writing of version "1"
283 283 #
284 284 # The header is followed by the markers. Each marker is made of:
285 285 #
286 286 # - uint32: total size of the marker (including this field)
287 287 #
288 288 # - float64: date in seconds since epoch
289 289 #
290 290 # - int16: timezone offset in minutes
291 291 #
292 292 # - uint16: a bit field. It is reserved for flags used in common
293 293 # obsolete marker operations, to avoid repeated decoding of metadata
294 294 # entries.
295 295 #
296 296 # - uint8: number of successors "N", can be zero.
297 297 #
298 298 # - uint8: number of parents "P", can be zero.
299 299 #
300 300 # 0: parents data stored but no parent,
301 301 # 1: one parent stored,
302 302 # 2: two parents stored,
303 303 # 3: no parent data stored
304 304 #
305 305 # - uint8: number of metadata entries M
306 306 #
307 307 # - 20 or 32 bytes: predecessor changeset identifier.
308 308 #
309 309 # - N*(20 or 32) bytes: successors changesets identifiers.
310 310 #
311 311 # - P*(20 or 32) bytes: parents of the predecessors changesets.
312 312 #
313 313 # - M*(uint8, uint8): size of all metadata entries (key and value)
314 314 #
315 315 # - remaining bytes: the metadata, each (key, value) pair after the other.
316 316 _fm1version = 1
317 317 _fm1fixed = '>IdhHBBB20s'
318 318 _fm1nodesha1 = '20s'
319 319 _fm1nodesha256 = '32s'
320 320 _fm1nodesha1size = _calcsize(_fm1nodesha1)
321 321 _fm1nodesha256size = _calcsize(_fm1nodesha256)
322 322 _fm1fsize = _calcsize(_fm1fixed)
323 323 _fm1parentnone = 3
324 324 _fm1parentshift = 14
325 325 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
326 326 _fm1metapair = 'BB'
327 327 _fm1metapairsize = _calcsize(_fm1metapair)
328 328
329 329 def _fm1purereadmarkers(data, off, stop):
330 330 # make some global constants local for performance
331 331 noneflag = _fm1parentnone
332 332 sha2flag = usingsha256
333 333 sha1size = _fm1nodesha1size
334 334 sha2size = _fm1nodesha256size
335 335 sha1fmt = _fm1nodesha1
336 336 sha2fmt = _fm1nodesha256
337 337 metasize = _fm1metapairsize
338 338 metafmt = _fm1metapair
339 339 fsize = _fm1fsize
340 340 unpack = _unpack
341 341
342 342 # Loop on markers
343 343 ufixed = struct.Struct(_fm1fixed).unpack
344 344
345 345 while off < stop:
346 346 # read fixed part
347 347 o1 = off + fsize
348 348 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
349 349
350 350 if flags & sha2flag:
351 351 # FIXME: prec was read as a SHA1, needs to be amended
352 352
353 353 # read 0 or more successors
354 354 if numsuc == 1:
355 355 o2 = o1 + sha2size
356 356 sucs = (data[o1:o2],)
357 357 else:
358 358 o2 = o1 + sha2size * numsuc
359 359 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
360 360
361 361 # read parents
362 362 if numpar == noneflag:
363 363 o3 = o2
364 364 parents = None
365 365 elif numpar == 1:
366 366 o3 = o2 + sha2size
367 367 parents = (data[o2:o3],)
368 368 else:
369 369 o3 = o2 + sha2size * numpar
370 370 parents = unpack(sha2fmt * numpar, data[o2:o3])
371 371 else:
372 372 # read 0 or more successors
373 373 if numsuc == 1:
374 374 o2 = o1 + sha1size
375 375 sucs = (data[o1:o2],)
376 376 else:
377 377 o2 = o1 + sha1size * numsuc
378 378 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
379 379
380 380 # read parents
381 381 if numpar == noneflag:
382 382 o3 = o2
383 383 parents = None
384 384 elif numpar == 1:
385 385 o3 = o2 + sha1size
386 386 parents = (data[o2:o3],)
387 387 else:
388 388 o3 = o2 + sha1size * numpar
389 389 parents = unpack(sha1fmt * numpar, data[o2:o3])
390 390
391 391 # read metadata
392 392 off = o3 + metasize * nummeta
393 393 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
394 394 metadata = []
395 395 for idx in xrange(0, len(metapairsize), 2):
396 396 o1 = off + metapairsize[idx]
397 397 o2 = o1 + metapairsize[idx + 1]
398 398 metadata.append((data[off:o1], data[o1:o2]))
399 399 off = o2
400 400
401 401 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
402 402
403 403 def _fm1encodeonemarker(marker):
404 404 pre, sucs, flags, metadata, date, parents = marker
405 405 # determine node size
406 406 _fm1node = _fm1nodesha1
407 407 if flags & usingsha256:
408 408 _fm1node = _fm1nodesha256
409 409 numsuc = len(sucs)
410 410 numextranodes = numsuc
411 411 if parents is None:
412 412 numpar = _fm1parentnone
413 413 else:
414 414 numpar = len(parents)
415 415 numextranodes += numpar
416 416 formatnodes = _fm1node * numextranodes
417 417 formatmeta = _fm1metapair * len(metadata)
418 418 format = _fm1fixed + formatnodes + formatmeta
419 419 # tz is stored in minutes so we divide by 60
420 420 tz = date[1]//60
421 421 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
422 422 data.extend(sucs)
423 423 if parents is not None:
424 424 data.extend(parents)
425 425 totalsize = _calcsize(format)
426 426 for key, value in metadata:
427 427 lk = len(key)
428 428 lv = len(value)
429 429 if lk > 255:
430 430 msg = ('obsstore metadata key cannot be longer than 255 bytes'
431 431 ' (key "%s" is %u bytes)') % (key, lk)
432 432 raise error.ProgrammingError(msg)
433 433 if lv > 255:
434 434 msg = ('obsstore metadata value cannot be longer than 255 bytes'
435 435 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
436 436 raise error.ProgrammingError(msg)
437 437 data.append(lk)
438 438 data.append(lv)
439 439 totalsize += lk + lv
440 440 data[0] = totalsize
441 441 data = [_pack(format, *data)]
442 442 for key, value in metadata:
443 443 data.append(key)
444 444 data.append(value)
445 445 return ''.join(data)
446 446
447 447 def _fm1readmarkers(data, off, stop):
448 448 native = getattr(parsers, 'fm1readmarkers', None)
449 449 if not native:
450 450 return _fm1purereadmarkers(data, off, stop)
451 451 return native(data, off, stop)
452 452
453 453 # mapping to read/write various marker formats
454 454 # <version> -> (decoder, encoder)
455 455 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
456 456 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
457 457
458 458 def _readmarkerversion(data):
459 459 return _unpack('>B', data[0:1])[0]
460 460
461 461 @util.nogc
462 462 def _readmarkers(data, off=None, stop=None):
463 463 """Read and enumerate markers from raw data"""
464 464 diskversion = _readmarkerversion(data)
465 465 if not off:
466 466 off = 1 # skip 1 byte version number
467 467 if stop is None:
468 468 stop = len(data)
469 469 if diskversion not in formats:
470 470 msg = _('parsing obsolete marker: unknown version %r') % diskversion
471 471 raise error.UnknownVersion(msg, version=diskversion)
472 472 return diskversion, formats[diskversion][0](data, off, stop)
473 473
474 474 def encodeheader(version=_fm0version):
475 475 return _pack('>B', version)
476 476
477 477 def encodemarkers(markers, addheader=False, version=_fm0version):
478 478 # Kept separate from flushmarkers(), it will be reused for
479 479 # markers exchange.
480 480 encodeone = formats[version][1]
481 481 if addheader:
482 482 yield encodeheader(version)
483 483 for marker in markers:
484 484 yield encodeone(marker)
485 485
486 486 @util.nogc
487 487 def _addsuccessors(successors, markers):
488 488 for mark in markers:
489 489 successors.setdefault(mark[0], set()).add(mark)
490 490
491 491 @util.nogc
492 492 def _addpredecessors(predecessors, markers):
493 493 for mark in markers:
494 494 for suc in mark[1]:
495 495 predecessors.setdefault(suc, set()).add(mark)
496 496
497 497 @util.nogc
498 498 def _addchildren(children, markers):
499 499 for mark in markers:
500 500 parents = mark[5]
501 501 if parents is not None:
502 502 for p in parents:
503 503 children.setdefault(p, set()).add(mark)
504 504
505 505 def _checkinvalidmarkers(markers):
506 506 """search for marker with invalid data and raise error if needed
507 507
508 508 Exist as a separated function to allow the evolve extension for a more
509 509 subtle handling.
510 510 """
511 511 for mark in markers:
512 512 if node.nullid in mark[1]:
513 513 raise error.Abort(_('bad obsolescence marker detected: '
514 514 'invalid successors nullid'))
515 515
516 516 class obsstore(object):
517 517 """Store obsolete markers
518 518
519 519 Markers can be accessed with two mappings:
520 520 - predecessors[x] -> set(markers on predecessors edges of x)
521 521 - successors[x] -> set(markers on successors edges of x)
522 522 - children[x] -> set(markers on predecessors edges of children(x)
523 523 """
524 524
525 525 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
526 526 # prec: nodeid, predecessors changesets
527 527 # succs: tuple of nodeid, successor changesets (0-N length)
528 528 # flag: integer, flag field carrying modifier for the markers (see doc)
529 529 # meta: binary blob, encoded metadata dictionary
530 530 # date: (float, int) tuple, date of marker creation
531 531 # parents: (tuple of nodeid) or None, parents of predecessors
532 532 # None is used when no data has been recorded
533 533
534 534 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
535 535 # caches for various obsolescence related cache
536 536 self.caches = {}
537 537 self.svfs = svfs
538 538 self._defaultformat = defaultformat
539 539 self._readonly = readonly
540 540
541 541 def __iter__(self):
542 542 return iter(self._all)
543 543
544 544 def __len__(self):
545 545 return len(self._all)
546 546
547 547 def __nonzero__(self):
548 548 if not self._cached(r'_all'):
549 549 try:
550 550 return self.svfs.stat('obsstore').st_size > 1
551 551 except OSError as inst:
552 552 if inst.errno != errno.ENOENT:
553 553 raise
554 554 # just build an empty _all list if no obsstore exists, which
555 555 # avoids further stat() syscalls
556 556 return bool(self._all)
557 557
558 558 __bool__ = __nonzero__
559 559
560 560 @property
561 561 def readonly(self):
562 562 """True if marker creation is disabled
563 563
564 564 Remove me in the future when obsolete marker is always on."""
565 565 return self._readonly
566 566
567 567 def create(self, transaction, prec, succs=(), flag=0, parents=None,
568 568 date=None, metadata=None, ui=None):
569 569 """obsolete: add a new obsolete marker
570 570
571 571 * ensuring it is hashable
572 572 * check mandatory metadata
573 573 * encode metadata
574 574
575 575 If you are a human writing code creating marker you want to use the
576 576 `createmarkers` function in this module instead.
577 577
578 578 return True if a new marker have been added, False if the markers
579 579 already existed (no op).
580 580 """
581 581 if metadata is None:
582 582 metadata = {}
583 583 if date is None:
584 584 if 'date' in metadata:
585 585 # as a courtesy for out-of-tree extensions
586 586 date = dateutil.parsedate(metadata.pop('date'))
587 587 elif ui is not None:
588 588 date = ui.configdate('devel', 'default-date')
589 589 if date is None:
590 590 date = dateutil.makedate()
591 591 else:
592 592 date = dateutil.makedate()
593 593 if len(prec) != 20:
594 594 raise ValueError(prec)
595 595 for succ in succs:
596 596 if len(succ) != 20:
597 597 raise ValueError(succ)
598 598 if prec in succs:
599 599 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
600 600
601 601 metadata = tuple(sorted(metadata.iteritems()))
602 602
603 603 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
604 604 return bool(self.add(transaction, [marker]))
605 605
606 606 def add(self, transaction, markers):
607 607 """Add new markers to the store
608 608
609 609 Take care of filtering duplicate.
610 610 Return the number of new marker."""
611 611 if self._readonly:
612 612 raise error.Abort(_('creating obsolete markers is not enabled on '
613 613 'this repo'))
614 614 known = set()
615 615 getsuccessors = self.successors.get
616 616 new = []
617 617 for m in markers:
618 618 if m not in getsuccessors(m[0], ()) and m not in known:
619 619 known.add(m)
620 620 new.append(m)
621 621 if new:
622 622 f = self.svfs('obsstore', 'ab')
623 623 try:
624 624 offset = f.tell()
625 625 transaction.add('obsstore', offset)
626 626 # offset == 0: new file - add the version header
627 627 data = b''.join(encodemarkers(new, offset == 0, self._version))
628 628 f.write(data)
629 629 finally:
630 630 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
631 631 # call 'filecacheentry.refresh()' here
632 632 f.close()
633 633 addedmarkers = transaction.changes.get('obsmarkers')
634 634 if addedmarkers is not None:
635 635 addedmarkers.update(new)
636 636 self._addmarkers(new, data)
637 637 # new marker *may* have changed several set. invalidate the cache.
638 638 self.caches.clear()
639 639 # records the number of new markers for the transaction hooks
640 640 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
641 641 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
642 642 return len(new)
643 643
644 644 def mergemarkers(self, transaction, data):
645 645 """merge a binary stream of markers inside the obsstore
646 646
647 647 Returns the number of new markers added."""
648 648 version, markers = _readmarkers(data)
649 649 return self.add(transaction, markers)
650 650
651 651 @propertycache
652 652 def _data(self):
653 653 return self.svfs.tryread('obsstore')
654 654
655 655 @propertycache
656 656 def _version(self):
657 657 if len(self._data) >= 1:
658 658 return _readmarkerversion(self._data)
659 659 else:
660 660 return self._defaultformat
661 661
662 662 @propertycache
663 663 def _all(self):
664 664 data = self._data
665 665 if not data:
666 666 return []
667 667 self._version, markers = _readmarkers(data)
668 668 markers = list(markers)
669 669 _checkinvalidmarkers(markers)
670 670 return markers
671 671
672 672 @propertycache
673 673 def successors(self):
674 674 successors = {}
675 675 _addsuccessors(successors, self._all)
676 676 return successors
677 677
678 678 @propertycache
679 679 def predecessors(self):
680 680 predecessors = {}
681 681 _addpredecessors(predecessors, self._all)
682 682 return predecessors
683 683
684 684 @propertycache
685 685 def children(self):
686 686 children = {}
687 687 _addchildren(children, self._all)
688 688 return children
689 689
690 690 def _cached(self, attr):
691 691 return attr in self.__dict__
692 692
693 693 def _addmarkers(self, markers, rawdata):
694 694 markers = list(markers) # to allow repeated iteration
695 695 self._data = self._data + rawdata
696 696 self._all.extend(markers)
697 697 if self._cached(r'successors'):
698 698 _addsuccessors(self.successors, markers)
699 699 if self._cached(r'predecessors'):
700 700 _addpredecessors(self.predecessors, markers)
701 701 if self._cached(r'children'):
702 702 _addchildren(self.children, markers)
703 703 _checkinvalidmarkers(markers)
704 704
705 705 def relevantmarkers(self, nodes):
706 706 """return a set of all obsolescence markers relevant to a set of nodes.
707 707
708 708 "relevant" to a set of nodes mean:
709 709
710 710 - marker that use this changeset as successor
711 711 - prune marker of direct children on this changeset
712 712 - recursive application of the two rules on predecessors of these
713 713 markers
714 714
715 715 It is a set so you cannot rely on order."""
716 716
717 717 pendingnodes = set(nodes)
718 718 seenmarkers = set()
719 719 seennodes = set(pendingnodes)
720 720 precursorsmarkers = self.predecessors
721 721 succsmarkers = self.successors
722 722 children = self.children
723 723 while pendingnodes:
724 724 direct = set()
725 725 for current in pendingnodes:
726 726 direct.update(precursorsmarkers.get(current, ()))
727 727 pruned = [m for m in children.get(current, ()) if not m[1]]
728 728 direct.update(pruned)
729 729 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
730 730 direct.update(pruned)
731 731 direct -= seenmarkers
732 732 pendingnodes = set([m[0] for m in direct])
733 733 seenmarkers |= direct
734 734 pendingnodes -= seennodes
735 735 seennodes |= pendingnodes
736 736 return seenmarkers
737 737
738 738 def makestore(ui, repo):
739 739 """Create an obsstore instance from a repo."""
740 740 # read default format for new obsstore.
741 741 # developer config: format.obsstore-version
742 742 defaultformat = ui.configint('format', 'obsstore-version')
743 743 # rely on obsstore class default when possible.
744 744 kwargs = {}
745 745 if defaultformat is not None:
746 746 kwargs[r'defaultformat'] = defaultformat
747 747 readonly = not isenabled(repo, createmarkersopt)
748 748 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
749 749 if store and readonly:
750 750 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
751 751 % len(list(store)))
752 752 return store
753 753
754 754 def commonversion(versions):
755 755 """Return the newest version listed in both versions and our local formats.
756 756
757 757 Returns None if no common version exists.
758 758 """
759 759 versions.sort(reverse=True)
760 760 # search for highest version known on both side
761 761 for v in versions:
762 762 if v in formats:
763 763 return v
764 764 return None
765 765
766 766 # arbitrary picked to fit into 8K limit from HTTP server
767 767 # you have to take in account:
768 768 # - the version header
769 769 # - the base85 encoding
770 770 _maxpayload = 5300
771 771
772 772 def _pushkeyescape(markers):
773 773 """encode markers into a dict suitable for pushkey exchange
774 774
775 775 - binary data is base85 encoded
776 776 - split in chunks smaller than 5300 bytes"""
777 777 keys = {}
778 778 parts = []
779 779 currentlen = _maxpayload * 2 # ensure we create a new part
780 780 for marker in markers:
781 781 nextdata = _fm0encodeonemarker(marker)
782 782 if (len(nextdata) + currentlen > _maxpayload):
783 783 currentpart = []
784 784 currentlen = 0
785 785 parts.append(currentpart)
786 786 currentpart.append(nextdata)
787 787 currentlen += len(nextdata)
788 788 for idx, part in enumerate(reversed(parts)):
789 789 data = ''.join([_pack('>B', _fm0version)] + part)
790 790 keys['dump%i' % idx] = util.b85encode(data)
791 791 return keys
792 792
793 793 def listmarkers(repo):
794 794 """List markers over pushkey"""
795 795 if not repo.obsstore:
796 796 return {}
797 797 return _pushkeyescape(sorted(repo.obsstore))
798 798
799 799 def pushmarker(repo, key, old, new):
800 800 """Push markers over pushkey"""
801 801 if not key.startswith('dump'):
802 802 repo.ui.warn(_('unknown key: %r') % key)
803 803 return False
804 804 if old:
805 805 repo.ui.warn(_('unexpected old value for %r') % key)
806 806 return False
807 807 data = util.b85decode(new)
808 808 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
809 809 repo.obsstore.mergemarkers(tr, data)
810 810 repo.invalidatevolatilesets()
811 811 return True
812 812
813 813 # mapping of 'set-name' -> <function to compute this set>
814 814 cachefuncs = {}
815 815 def cachefor(name):
816 816 """Decorator to register a function as computing the cache for a set"""
817 817 def decorator(func):
818 818 if name in cachefuncs:
819 819 msg = "duplicated registration for volatileset '%s' (existing: %r)"
820 820 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
821 821 cachefuncs[name] = func
822 822 return func
823 823 return decorator
824 824
825 825 def getrevs(repo, name):
826 826 """Return the set of revision that belong to the <name> set
827 827
828 828 Such access may compute the set and cache it for future use"""
829 829 repo = repo.unfiltered()
830 830 if not repo.obsstore:
831 831 return frozenset()
832 832 if name not in repo.obsstore.caches:
833 833 repo.obsstore.caches[name] = cachefuncs[name](repo)
834 834 return repo.obsstore.caches[name]
835 835
836 836 # To be simple we need to invalidate obsolescence cache when:
837 837 #
838 838 # - new changeset is added:
839 839 # - public phase is changed
840 840 # - obsolescence marker are added
841 841 # - strip is used a repo
842 842 def clearobscaches(repo):
843 843 """Remove all obsolescence related cache from a repo
844 844
845 845 This remove all cache in obsstore is the obsstore already exist on the
846 846 repo.
847 847
848 848 (We could be smarter here given the exact event that trigger the cache
849 849 clearing)"""
850 850 # only clear cache is there is obsstore data in this repo
851 851 if 'obsstore' in repo._filecache:
852 852 repo.obsstore.caches.clear()
853 853
854 854 def _mutablerevs(repo):
855 855 """the set of mutable revision in the repository"""
856 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
856 return repo._phasecache.getrevset(repo, phases.mutablephases)
857 857
858 858 @cachefor('obsolete')
859 859 def _computeobsoleteset(repo):
860 860 """the set of obsolete revisions"""
861 861 getnode = repo.changelog.node
862 862 notpublic = _mutablerevs(repo)
863 863 isobs = repo.obsstore.successors.__contains__
864 864 obs = set(r for r in notpublic if isobs(getnode(r)))
865 865 return obs
866 866
867 867 @cachefor('orphan')
868 868 def _computeorphanset(repo):
869 869 """the set of non obsolete revisions with obsolete parents"""
870 870 pfunc = repo.changelog.parentrevs
871 871 mutable = _mutablerevs(repo)
872 872 obsolete = getrevs(repo, 'obsolete')
873 873 others = mutable - obsolete
874 874 unstable = set()
875 875 for r in sorted(others):
876 876 # A rev is unstable if one of its parent is obsolete or unstable
877 877 # this works since we traverse following growing rev order
878 878 for p in pfunc(r):
879 879 if p in obsolete or p in unstable:
880 880 unstable.add(r)
881 881 break
882 882 return unstable
883 883
884 884 @cachefor('suspended')
885 885 def _computesuspendedset(repo):
886 886 """the set of obsolete parents with non obsolete descendants"""
887 887 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
888 888 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
889 889
890 890 @cachefor('extinct')
891 891 def _computeextinctset(repo):
892 892 """the set of obsolete parents without non obsolete descendants"""
893 893 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
894 894
895 895 @cachefor('phasedivergent')
896 896 def _computephasedivergentset(repo):
897 897 """the set of revs trying to obsolete public revisions"""
898 898 bumped = set()
899 899 # util function (avoid attribute lookup in the loop)
900 900 phase = repo._phasecache.phase # would be faster to grab the full list
901 901 public = phases.public
902 902 cl = repo.changelog
903 903 torev = cl.nodemap.get
904 904 tonode = cl.node
905 905 for rev in repo.revs('(not public()) and (not obsolete())'):
906 906 # We only evaluate mutable, non-obsolete revision
907 907 node = tonode(rev)
908 908 # (future) A cache of predecessors may worth if split is very common
909 909 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
910 910 ignoreflags=bumpedfix):
911 911 prev = torev(pnode) # unfiltered! but so is phasecache
912 912 if (prev is not None) and (phase(repo, prev) <= public):
913 913 # we have a public predecessor
914 914 bumped.add(rev)
915 915 break # Next draft!
916 916 return bumped
917 917
918 918 @cachefor('contentdivergent')
919 919 def _computecontentdivergentset(repo):
920 920 """the set of rev that compete to be the final successors of some revision.
921 921 """
922 922 divergent = set()
923 923 obsstore = repo.obsstore
924 924 newermap = {}
925 925 tonode = repo.changelog.node
926 926 for rev in repo.revs('(not public()) - obsolete()'):
927 927 node = tonode(rev)
928 928 mark = obsstore.predecessors.get(node, ())
929 929 toprocess = set(mark)
930 930 seen = set()
931 931 while toprocess:
932 932 prec = toprocess.pop()[0]
933 933 if prec in seen:
934 934 continue # emergency cycle hanging prevention
935 935 seen.add(prec)
936 936 if prec not in newermap:
937 937 obsutil.successorssets(repo, prec, cache=newermap)
938 938 newer = [n for n in newermap[prec] if n]
939 939 if len(newer) > 1:
940 940 divergent.add(rev)
941 941 break
942 942 toprocess.update(obsstore.predecessors.get(prec, ()))
943 943 return divergent
944 944
945 945
946 946 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
947 947 operation=None):
948 948 """Add obsolete markers between changesets in a repo
949 949
950 950 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
951 951 tuple. `old` and `news` are changectx. metadata is an optional dictionary
952 952 containing metadata for this marker only. It is merged with the global
953 953 metadata specified through the `metadata` argument of this function,
954 954
955 955 Trying to obsolete a public changeset will raise an exception.
956 956
957 957 Current user and date are used except if specified otherwise in the
958 958 metadata attribute.
959 959
960 960 This function operates within a transaction of its own, but does
961 961 not take any lock on the repo.
962 962 """
963 963 # prepare metadata
964 964 if metadata is None:
965 965 metadata = {}
966 966 if 'user' not in metadata:
967 967 develuser = repo.ui.config('devel', 'user.obsmarker')
968 968 if develuser:
969 969 metadata['user'] = develuser
970 970 else:
971 971 metadata['user'] = repo.ui.username()
972 972
973 973 # Operation metadata handling
974 974 useoperation = repo.ui.configbool('experimental',
975 975 'evolution.track-operation')
976 976 if useoperation and operation:
977 977 metadata['operation'] = operation
978 978
979 979 # Effect flag metadata handling
980 980 saveeffectflag = repo.ui.configbool('experimental',
981 981 'evolution.effect-flags')
982 982
983 983 with repo.transaction('add-obsolescence-marker') as tr:
984 984 markerargs = []
985 985 for rel in relations:
986 986 prec = rel[0]
987 987 sucs = rel[1]
988 988 localmetadata = metadata.copy()
989 989 if 2 < len(rel):
990 990 localmetadata.update(rel[2])
991 991
992 992 if not prec.mutable():
993 993 raise error.Abort(_("cannot obsolete public changeset: %s")
994 994 % prec,
995 995 hint="see 'hg help phases' for details")
996 996 nprec = prec.node()
997 997 nsucs = tuple(s.node() for s in sucs)
998 998 npare = None
999 999 if not nsucs:
1000 1000 npare = tuple(p.node() for p in prec.parents())
1001 1001 if nprec in nsucs:
1002 1002 raise error.Abort(_("changeset %s cannot obsolete itself")
1003 1003 % prec)
1004 1004
1005 1005 # Effect flag can be different by relation
1006 1006 if saveeffectflag:
1007 1007 # The effect flag is saved in a versioned field name for future
1008 1008 # evolution
1009 1009 effectflag = obsutil.geteffectflag(rel)
1010 1010 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1011 1011
1012 1012 # Creating the marker causes the hidden cache to become invalid,
1013 1013 # which causes recomputation when we ask for prec.parents() above.
1014 1014 # Resulting in n^2 behavior. So let's prepare all of the args
1015 1015 # first, then create the markers.
1016 1016 markerargs.append((nprec, nsucs, npare, localmetadata))
1017 1017
1018 1018 for args in markerargs:
1019 1019 nprec, nsucs, npare, localmetadata = args
1020 1020 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1021 1021 date=date, metadata=localmetadata,
1022 1022 ui=repo.ui)
1023 1023 repo.filteredrevcache.clear()
@@ -1,680 +1,681 b''
1 1 """ Mercurial phases support code
2 2
3 3 ---
4 4
5 5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 6 Logilab SA <contact@logilab.fr>
7 7 Augie Fackler <durin42@gmail.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License version 2 or any later version.
11 11
12 12 ---
13 13
14 14 This module implements most phase logic in mercurial.
15 15
16 16
17 17 Basic Concept
18 18 =============
19 19
20 20 A 'changeset phase' is an indicator that tells us how a changeset is
21 21 manipulated and communicated. The details of each phase is described
22 22 below, here we describe the properties they have in common.
23 23
24 24 Like bookmarks, phases are not stored in history and thus are not
25 25 permanent and leave no audit trail.
26 26
27 27 First, no changeset can be in two phases at once. Phases are ordered,
28 28 so they can be considered from lowest to highest. The default, lowest
29 29 phase is 'public' - this is the normal phase of existing changesets. A
30 30 child changeset can not be in a lower phase than its parents.
31 31
32 32 These phases share a hierarchy of traits:
33 33
34 34 immutable shared
35 35 public: X X
36 36 draft: X
37 37 secret:
38 38
39 39 Local commits are draft by default.
40 40
41 41 Phase Movement and Exchange
42 42 ===========================
43 43
44 44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 45 a publish option set, we call such a server a "publishing server".
46 46 Pushing a draft changeset to a publishing server changes the phase to
47 47 public.
48 48
49 49 A small list of fact/rules define the exchange of phase:
50 50
51 51 * old client never changes server states
52 52 * pull never changes server states
53 53 * publish and old server changesets are seen as public by client
54 54 * any secret changeset seen in another repository is lowered to at
55 55 least draft
56 56
57 57 Here is the final table summing up the 49 possible use cases of phase
58 58 exchange:
59 59
60 60 server
61 61 old publish non-publish
62 62 N X N D P N D P
63 63 old client
64 64 pull
65 65 N - X/X - X/D X/P - X/D X/P
66 66 X - X/X - X/D X/P - X/D X/P
67 67 push
68 68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 69 new client
70 70 pull
71 71 N - P/X - P/D P/P - D/D P/P
72 72 D - P/X - P/D P/P - D/D P/P
73 73 P - P/X - P/D P/P - P/D P/P
74 74 push
75 75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 76 P P/X P/X P/P P/P P/P P/P P/P P/P
77 77
78 78 Legend:
79 79
80 80 A/B = final state on client / state on server
81 81
82 82 * N = new/not present,
83 83 * P = public,
84 84 * D = draft,
85 85 * X = not tracked (i.e., the old client or server has no internal
86 86 way of recording the phase.)
87 87
88 88 passive = only pushes
89 89
90 90
91 91 A cell here can be read like this:
92 92
93 93 "When a new client pushes a draft changeset (D) to a publishing
94 94 server where it's not present (N), it's marked public on both
95 95 sides (P/P)."
96 96
97 97 Note: old client behave as a publishing server with draft only content
98 98 - other people see it as public
99 99 - content is pushed as draft
100 100
101 101 """
102 102
103 103 from __future__ import absolute_import
104 104
105 105 import errno
106 106 import struct
107 107
108 108 from .i18n import _
109 109 from .node import (
110 110 bin,
111 111 hex,
112 112 nullid,
113 113 nullrev,
114 114 short,
115 115 )
116 116 from . import (
117 117 error,
118 118 pycompat,
119 119 smartset,
120 120 txnutil,
121 121 util,
122 122 )
123 123
124 124 _fphasesentry = struct.Struct('>i20s')
125 125
126 126 allphases = public, draft, secret = range(3)
127 127 trackedphases = allphases[1:]
128 128 phasenames = ['public', 'draft', 'secret']
129 mutablephases = tuple(allphases[1:])
129 130
130 131 def _readroots(repo, phasedefaults=None):
131 132 """Read phase roots from disk
132 133
133 134 phasedefaults is a list of fn(repo, roots) callable, which are
134 135 executed if the phase roots file does not exist. When phases are
135 136 being initialized on an existing repository, this could be used to
136 137 set selected changesets phase to something else than public.
137 138
138 139 Return (roots, dirty) where dirty is true if roots differ from
139 140 what is being stored.
140 141 """
141 142 repo = repo.unfiltered()
142 143 dirty = False
143 144 roots = [set() for i in allphases]
144 145 try:
145 146 f, pending = txnutil.trypending(repo.root, repo.svfs, 'phaseroots')
146 147 try:
147 148 for line in f:
148 149 phase, nh = line.split()
149 150 roots[int(phase)].add(bin(nh))
150 151 finally:
151 152 f.close()
152 153 except IOError as inst:
153 154 if inst.errno != errno.ENOENT:
154 155 raise
155 156 if phasedefaults:
156 157 for f in phasedefaults:
157 158 roots = f(repo, roots)
158 159 dirty = True
159 160 return roots, dirty
160 161
161 162 def binaryencode(phasemapping):
162 163 """encode a 'phase -> nodes' mapping into a binary stream
163 164
164 165 Since phases are integer the mapping is actually a python list:
165 166 [[PUBLIC_HEADS], [DRAFTS_HEADS], [SECRET_HEADS]]
166 167 """
167 168 binarydata = []
168 169 for phase, nodes in enumerate(phasemapping):
169 170 for head in nodes:
170 171 binarydata.append(_fphasesentry.pack(phase, head))
171 172 return ''.join(binarydata)
172 173
173 174 def binarydecode(stream):
174 175 """decode a binary stream into a 'phase -> nodes' mapping
175 176
176 177 Since phases are integer the mapping is actually a python list."""
177 178 headsbyphase = [[] for i in allphases]
178 179 entrysize = _fphasesentry.size
179 180 while True:
180 181 entry = stream.read(entrysize)
181 182 if len(entry) < entrysize:
182 183 if entry:
183 184 raise error.Abort(_('bad phase-heads stream'))
184 185 break
185 186 phase, node = _fphasesentry.unpack(entry)
186 187 headsbyphase[phase].append(node)
187 188 return headsbyphase
188 189
189 190 def _trackphasechange(data, rev, old, new):
190 191 """add a phase move the <data> dictionnary
191 192
192 193 If data is None, nothing happens.
193 194 """
194 195 if data is None:
195 196 return
196 197 existing = data.get(rev)
197 198 if existing is not None:
198 199 old = existing[0]
199 200 data[rev] = (old, new)
200 201
201 202 class phasecache(object):
202 203 def __init__(self, repo, phasedefaults, _load=True):
203 204 if _load:
204 205 # Cheap trick to allow shallow-copy without copy module
205 206 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
206 207 self._loadedrevslen = 0
207 208 self._phasesets = None
208 209 self.filterunknown(repo)
209 210 self.opener = repo.svfs
210 211
211 212 def getrevset(self, repo, phases, subset=None):
212 213 """return a smartset for the given phases"""
213 214 self.loadphaserevs(repo) # ensure phase's sets are loaded
214 215 phases = set(phases)
215 216 if public not in phases:
216 217 # fast path: _phasesets contains the interesting sets,
217 218 # might only need a union and post-filtering.
218 219 if len(phases) == 1:
219 220 [p] = phases
220 221 revs = self._phasesets[p]
221 222 else:
222 223 revs = set.union(*[self._phasesets[p] for p in phases])
223 224 if repo.changelog.filteredrevs:
224 225 revs = revs - repo.changelog.filteredrevs
225 226 if subset is None:
226 227 return smartset.baseset(revs)
227 228 else:
228 229 return subset & smartset.baseset(revs)
229 230 else:
230 231 phases = set(allphases).difference(phases)
231 232 if not phases:
232 233 return smartset.fullreposet(repo)
233 234 if len(phases) == 1:
234 235 [p] = phases
235 236 revs = self._phasesets[p]
236 237 else:
237 238 revs = set.union(*[self._phasesets[p] for p in phases])
238 239 if subset is None:
239 240 subset = smartset.fullreposet(repo)
240 241 if not revs:
241 242 return subset
242 243 return subset.filter(lambda r: r not in revs)
243 244
244 245 def copy(self):
245 246 # Shallow copy meant to ensure isolation in
246 247 # advance/retractboundary(), nothing more.
247 248 ph = self.__class__(None, None, _load=False)
248 249 ph.phaseroots = self.phaseroots[:]
249 250 ph.dirty = self.dirty
250 251 ph.opener = self.opener
251 252 ph._loadedrevslen = self._loadedrevslen
252 253 ph._phasesets = self._phasesets
253 254 return ph
254 255
255 256 def replace(self, phcache):
256 257 """replace all values in 'self' with content of phcache"""
257 258 for a in ('phaseroots', 'dirty', 'opener', '_loadedrevslen',
258 259 '_phasesets'):
259 260 setattr(self, a, getattr(phcache, a))
260 261
261 262 def _getphaserevsnative(self, repo):
262 263 repo = repo.unfiltered()
263 264 nativeroots = []
264 265 for phase in trackedphases:
265 266 nativeroots.append(pycompat.maplist(repo.changelog.rev,
266 267 self.phaseroots[phase]))
267 268 return repo.changelog.computephases(nativeroots)
268 269
269 270 def _computephaserevspure(self, repo):
270 271 repo = repo.unfiltered()
271 272 cl = repo.changelog
272 273 self._phasesets = [set() for phase in allphases]
273 274 roots = pycompat.maplist(cl.rev, self.phaseroots[secret])
274 275 if roots:
275 276 ps = set(cl.descendants(roots))
276 277 for root in roots:
277 278 ps.add(root)
278 279 self._phasesets[secret] = ps
279 280 roots = pycompat.maplist(cl.rev, self.phaseroots[draft])
280 281 if roots:
281 282 ps = set(cl.descendants(roots))
282 283 for root in roots:
283 284 ps.add(root)
284 285 ps.difference_update(self._phasesets[secret])
285 286 self._phasesets[draft] = ps
286 287 self._loadedrevslen = len(cl)
287 288
288 289 def loadphaserevs(self, repo):
289 290 """ensure phase information is loaded in the object"""
290 291 if self._phasesets is None:
291 292 try:
292 293 res = self._getphaserevsnative(repo)
293 294 self._loadedrevslen, self._phasesets = res
294 295 except AttributeError:
295 296 self._computephaserevspure(repo)
296 297
297 298 def invalidate(self):
298 299 self._loadedrevslen = 0
299 300 self._phasesets = None
300 301
301 302 def phase(self, repo, rev):
302 303 # We need a repo argument here to be able to build _phasesets
303 304 # if necessary. The repository instance is not stored in
304 305 # phasecache to avoid reference cycles. The changelog instance
305 306 # is not stored because it is a filecache() property and can
306 307 # be replaced without us being notified.
307 308 if rev == nullrev:
308 309 return public
309 310 if rev < nullrev:
310 311 raise ValueError(_('cannot lookup negative revision'))
311 312 if rev >= self._loadedrevslen:
312 313 self.invalidate()
313 314 self.loadphaserevs(repo)
314 315 for phase in trackedphases:
315 316 if rev in self._phasesets[phase]:
316 317 return phase
317 318 return public
318 319
319 320 def write(self):
320 321 if not self.dirty:
321 322 return
322 323 f = self.opener('phaseroots', 'w', atomictemp=True, checkambig=True)
323 324 try:
324 325 self._write(f)
325 326 finally:
326 327 f.close()
327 328
328 329 def _write(self, fp):
329 330 for phase, roots in enumerate(self.phaseroots):
330 331 for h in sorted(roots):
331 332 fp.write('%i %s\n' % (phase, hex(h)))
332 333 self.dirty = False
333 334
334 335 def _updateroots(self, phase, newroots, tr):
335 336 self.phaseroots[phase] = newroots
336 337 self.invalidate()
337 338 self.dirty = True
338 339
339 340 tr.addfilegenerator('phase', ('phaseroots',), self._write)
340 341 tr.hookargs['phases_moved'] = '1'
341 342
342 343 def registernew(self, repo, tr, targetphase, nodes):
343 344 repo = repo.unfiltered()
344 345 self._retractboundary(repo, tr, targetphase, nodes)
345 346 if tr is not None and 'phases' in tr.changes:
346 347 phasetracking = tr.changes['phases']
347 348 torev = repo.changelog.rev
348 349 phase = self.phase
349 350 for n in nodes:
350 351 rev = torev(n)
351 352 revphase = phase(repo, rev)
352 353 _trackphasechange(phasetracking, rev, None, revphase)
353 354 repo.invalidatevolatilesets()
354 355
355 356 def advanceboundary(self, repo, tr, targetphase, nodes):
356 357 """Set all 'nodes' to phase 'targetphase'
357 358
358 359 Nodes with a phase lower than 'targetphase' are not affected.
359 360 """
360 361 # Be careful to preserve shallow-copied values: do not update
361 362 # phaseroots values, replace them.
362 363 if tr is None:
363 364 phasetracking = None
364 365 else:
365 366 phasetracking = tr.changes.get('phases')
366 367
367 368 repo = repo.unfiltered()
368 369
369 370 delroots = [] # set of root deleted by this path
370 371 for phase in xrange(targetphase + 1, len(allphases)):
371 372 # filter nodes that are not in a compatible phase already
372 373 nodes = [n for n in nodes
373 374 if self.phase(repo, repo[n].rev()) >= phase]
374 375 if not nodes:
375 376 break # no roots to move anymore
376 377
377 378 olds = self.phaseroots[phase]
378 379
379 380 affected = repo.revs('%ln::%ln', olds, nodes)
380 381 for r in affected:
381 382 _trackphasechange(phasetracking, r, self.phase(repo, r),
382 383 targetphase)
383 384
384 385 roots = set(ctx.node() for ctx in repo.set(
385 386 'roots((%ln::) - %ld)', olds, affected))
386 387 if olds != roots:
387 388 self._updateroots(phase, roots, tr)
388 389 # some roots may need to be declared for lower phases
389 390 delroots.extend(olds - roots)
390 391 # declare deleted root in the target phase
391 392 if targetphase != 0:
392 393 self._retractboundary(repo, tr, targetphase, delroots)
393 394 repo.invalidatevolatilesets()
394 395
395 396 def retractboundary(self, repo, tr, targetphase, nodes):
396 397 oldroots = self.phaseroots[:targetphase + 1]
397 398 if tr is None:
398 399 phasetracking = None
399 400 else:
400 401 phasetracking = tr.changes.get('phases')
401 402 repo = repo.unfiltered()
402 403 if (self._retractboundary(repo, tr, targetphase, nodes)
403 404 and phasetracking is not None):
404 405
405 406 # find the affected revisions
406 407 new = self.phaseroots[targetphase]
407 408 old = oldroots[targetphase]
408 409 affected = set(repo.revs('(%ln::) - (%ln::)', new, old))
409 410
410 411 # find the phase of the affected revision
411 412 for phase in xrange(targetphase, -1, -1):
412 413 if phase:
413 414 roots = oldroots[phase]
414 415 revs = set(repo.revs('%ln::%ld', roots, affected))
415 416 affected -= revs
416 417 else: # public phase
417 418 revs = affected
418 419 for r in revs:
419 420 _trackphasechange(phasetracking, r, phase, targetphase)
420 421 repo.invalidatevolatilesets()
421 422
422 423 def _retractboundary(self, repo, tr, targetphase, nodes):
423 424 # Be careful to preserve shallow-copied values: do not update
424 425 # phaseroots values, replace them.
425 426
426 427 repo = repo.unfiltered()
427 428 currentroots = self.phaseroots[targetphase]
428 429 finalroots = oldroots = set(currentroots)
429 430 newroots = [n for n in nodes
430 431 if self.phase(repo, repo[n].rev()) < targetphase]
431 432 if newroots:
432 433
433 434 if nullid in newroots:
434 435 raise error.Abort(_('cannot change null revision phase'))
435 436 currentroots = currentroots.copy()
436 437 currentroots.update(newroots)
437 438
438 439 # Only compute new roots for revs above the roots that are being
439 440 # retracted.
440 441 minnewroot = min(repo[n].rev() for n in newroots)
441 442 aboveroots = [n for n in currentroots
442 443 if repo[n].rev() >= minnewroot]
443 444 updatedroots = repo.set('roots(%ln::)', aboveroots)
444 445
445 446 finalroots = set(n for n in currentroots if repo[n].rev() <
446 447 minnewroot)
447 448 finalroots.update(ctx.node() for ctx in updatedroots)
448 449 if finalroots != oldroots:
449 450 self._updateroots(targetphase, finalroots, tr)
450 451 return True
451 452 return False
452 453
453 454 def filterunknown(self, repo):
454 455 """remove unknown nodes from the phase boundary
455 456
456 457 Nothing is lost as unknown nodes only hold data for their descendants.
457 458 """
458 459 filtered = False
459 460 nodemap = repo.changelog.nodemap # to filter unknown nodes
460 461 for phase, nodes in enumerate(self.phaseroots):
461 462 missing = sorted(node for node in nodes if node not in nodemap)
462 463 if missing:
463 464 for mnode in missing:
464 465 repo.ui.debug(
465 466 'removing unknown node %s from %i-phase boundary\n'
466 467 % (short(mnode), phase))
467 468 nodes.symmetric_difference_update(missing)
468 469 filtered = True
469 470 if filtered:
470 471 self.dirty = True
471 472 # filterunknown is called by repo.destroyed, we may have no changes in
472 473 # root but _phasesets contents is certainly invalid (or at least we
473 474 # have not proper way to check that). related to issue 3858.
474 475 #
475 476 # The other caller is __init__ that have no _phasesets initialized
476 477 # anyway. If this change we should consider adding a dedicated
477 478 # "destroyed" function to phasecache or a proper cache key mechanism
478 479 # (see branchmap one)
479 480 self.invalidate()
480 481
481 482 def advanceboundary(repo, tr, targetphase, nodes):
482 483 """Add nodes to a phase changing other nodes phases if necessary.
483 484
484 485 This function move boundary *forward* this means that all nodes
485 486 are set in the target phase or kept in a *lower* phase.
486 487
487 488 Simplify boundary to contains phase roots only."""
488 489 phcache = repo._phasecache.copy()
489 490 phcache.advanceboundary(repo, tr, targetphase, nodes)
490 491 repo._phasecache.replace(phcache)
491 492
492 493 def retractboundary(repo, tr, targetphase, nodes):
493 494 """Set nodes back to a phase changing other nodes phases if
494 495 necessary.
495 496
496 497 This function move boundary *backward* this means that all nodes
497 498 are set in the target phase or kept in a *higher* phase.
498 499
499 500 Simplify boundary to contains phase roots only."""
500 501 phcache = repo._phasecache.copy()
501 502 phcache.retractboundary(repo, tr, targetphase, nodes)
502 503 repo._phasecache.replace(phcache)
503 504
504 505 def registernew(repo, tr, targetphase, nodes):
505 506 """register a new revision and its phase
506 507
507 508 Code adding revisions to the repository should use this function to
508 509 set new changeset in their target phase (or higher).
509 510 """
510 511 phcache = repo._phasecache.copy()
511 512 phcache.registernew(repo, tr, targetphase, nodes)
512 513 repo._phasecache.replace(phcache)
513 514
514 515 def listphases(repo):
515 516 """List phases root for serialization over pushkey"""
516 517 # Use ordered dictionary so behavior is deterministic.
517 518 keys = util.sortdict()
518 519 value = '%i' % draft
519 520 cl = repo.unfiltered().changelog
520 521 for root in repo._phasecache.phaseroots[draft]:
521 522 if repo._phasecache.phase(repo, cl.rev(root)) <= draft:
522 523 keys[hex(root)] = value
523 524
524 525 if repo.publishing():
525 526 # Add an extra data to let remote know we are a publishing
526 527 # repo. Publishing repo can't just pretend they are old repo.
527 528 # When pushing to a publishing repo, the client still need to
528 529 # push phase boundary
529 530 #
530 531 # Push do not only push changeset. It also push phase data.
531 532 # New phase data may apply to common changeset which won't be
532 533 # push (as they are common). Here is a very simple example:
533 534 #
534 535 # 1) repo A push changeset X as draft to repo B
535 536 # 2) repo B make changeset X public
536 537 # 3) repo B push to repo A. X is not pushed but the data that
537 538 # X as now public should
538 539 #
539 540 # The server can't handle it on it's own as it has no idea of
540 541 # client phase data.
541 542 keys['publishing'] = 'True'
542 543 return keys
543 544
544 545 def pushphase(repo, nhex, oldphasestr, newphasestr):
545 546 """List phases root for serialization over pushkey"""
546 547 repo = repo.unfiltered()
547 548 with repo.lock():
548 549 currentphase = repo[nhex].phase()
549 550 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
550 551 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
551 552 if currentphase == oldphase and newphase < oldphase:
552 553 with repo.transaction('pushkey-phase') as tr:
553 554 advanceboundary(repo, tr, newphase, [bin(nhex)])
554 555 return True
555 556 elif currentphase == newphase:
556 557 # raced, but got correct result
557 558 return True
558 559 else:
559 560 return False
560 561
561 562 def subsetphaseheads(repo, subset):
562 563 """Finds the phase heads for a subset of a history
563 564
564 565 Returns a list indexed by phase number where each item is a list of phase
565 566 head nodes.
566 567 """
567 568 cl = repo.changelog
568 569
569 570 headsbyphase = [[] for i in allphases]
570 571 # No need to keep track of secret phase; any heads in the subset that
571 572 # are not mentioned are implicitly secret.
572 573 for phase in allphases[:-1]:
573 574 revset = "heads(%%ln & %s())" % phasenames[phase]
574 575 headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
575 576 return headsbyphase
576 577
577 578 def updatephases(repo, trgetter, headsbyphase):
578 579 """Updates the repo with the given phase heads"""
579 580 # Now advance phase boundaries of all but secret phase
580 581 #
581 582 # run the update (and fetch transaction) only if there are actually things
582 583 # to update. This avoid creating empty transaction during no-op operation.
583 584
584 585 for phase in allphases[:-1]:
585 586 revset = '%%ln - %s()' % phasenames[phase]
586 587 heads = [c.node() for c in repo.set(revset, headsbyphase[phase])]
587 588 if heads:
588 589 advanceboundary(repo, trgetter(), phase, heads)
589 590
590 591 def analyzeremotephases(repo, subset, roots):
591 592 """Compute phases heads and root in a subset of node from root dict
592 593
593 594 * subset is heads of the subset
594 595 * roots is {<nodeid> => phase} mapping. key and value are string.
595 596
596 597 Accept unknown element input
597 598 """
598 599 repo = repo.unfiltered()
599 600 # build list from dictionary
600 601 draftroots = []
601 602 nodemap = repo.changelog.nodemap # to filter unknown nodes
602 603 for nhex, phase in roots.iteritems():
603 604 if nhex == 'publishing': # ignore data related to publish option
604 605 continue
605 606 node = bin(nhex)
606 607 phase = int(phase)
607 608 if phase == public:
608 609 if node != nullid:
609 610 repo.ui.warn(_('ignoring inconsistent public root'
610 611 ' from remote: %s\n') % nhex)
611 612 elif phase == draft:
612 613 if node in nodemap:
613 614 draftroots.append(node)
614 615 else:
615 616 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
616 617 % (phase, nhex))
617 618 # compute heads
618 619 publicheads = newheads(repo, subset, draftroots)
619 620 return publicheads, draftroots
620 621
621 622 class remotephasessummary(object):
622 623 """summarize phase information on the remote side
623 624
624 625 :publishing: True is the remote is publishing
625 626 :publicheads: list of remote public phase heads (nodes)
626 627 :draftheads: list of remote draft phase heads (nodes)
627 628 :draftroots: list of remote draft phase root (nodes)
628 629 """
629 630
630 631 def __init__(self, repo, remotesubset, remoteroots):
631 632 unfi = repo.unfiltered()
632 633 self._allremoteroots = remoteroots
633 634
634 635 self.publishing = remoteroots.get('publishing', False)
635 636
636 637 ana = analyzeremotephases(repo, remotesubset, remoteroots)
637 638 self.publicheads, self.draftroots = ana
638 639 # Get the list of all "heads" revs draft on remote
639 640 dheads = unfi.set('heads(%ln::%ln)', self.draftroots, remotesubset)
640 641 self.draftheads = [c.node() for c in dheads]
641 642
642 643 def newheads(repo, heads, roots):
643 644 """compute new head of a subset minus another
644 645
645 646 * `heads`: define the first subset
646 647 * `roots`: define the second we subtract from the first"""
647 648 repo = repo.unfiltered()
648 649 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
649 650 heads, roots, roots, heads)
650 651 return [c.node() for c in revset]
651 652
652 653
653 654 def newcommitphase(ui):
654 655 """helper to get the target phase of new commit
655 656
656 657 Handle all possible values for the phases.new-commit options.
657 658
658 659 """
659 660 v = ui.config('phases', 'new-commit')
660 661 try:
661 662 return phasenames.index(v)
662 663 except ValueError:
663 664 try:
664 665 return int(v)
665 666 except ValueError:
666 667 msg = _("phases.new-commit: not a valid phase name ('%s')")
667 668 raise error.ConfigError(msg % v)
668 669
669 670 def hassecret(repo):
670 671 """utility function that check if a repo have any secret changeset."""
671 672 return bool(repo._phasecache.phaseroots[2])
672 673
673 674 def preparehookargs(node, old, new):
674 675 if old is None:
675 676 old = ''
676 677 else:
677 678 old = phasenames[old]
678 679 return {'node': node,
679 680 'oldphase': old,
680 681 'phase': phasenames[new]}
@@ -1,269 +1,268 b''
1 1 # repoview.py - Filtered view of a localrepo object
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import copy
12 12 import weakref
13 13
14 14 from .node import nullrev
15 15 from . import (
16 16 obsolete,
17 17 phases,
18 18 pycompat,
19 19 tags as tagsmod,
20 20 )
21 21
22 22 def hideablerevs(repo):
23 23 """Revision candidates to be hidden
24 24
25 25 This is a standalone function to allow extensions to wrap it.
26 26
27 27 Because we use the set of immutable changesets as a fallback subset in
28 28 branchmap (see mercurial.branchmap.subsettable), you cannot set "public"
29 29 changesets as "hideable". Doing so would break multiple code assertions and
30 30 lead to crashes."""
31 31 return obsolete.getrevs(repo, 'obsolete')
32 32
33 33 def pinnedrevs(repo):
34 34 """revisions blocking hidden changesets from being filtered
35 35 """
36 36
37 37 cl = repo.changelog
38 38 pinned = set()
39 39 pinned.update([par.rev() for par in repo[None].parents()])
40 40 pinned.update([cl.rev(bm) for bm in repo._bookmarks.values()])
41 41
42 42 tags = {}
43 43 tagsmod.readlocaltags(repo.ui, repo, tags, {})
44 44 if tags:
45 45 rev, nodemap = cl.rev, cl.nodemap
46 46 pinned.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)
47 47 return pinned
48 48
49 49
50 50 def _revealancestors(pfunc, hidden, revs):
51 51 """reveals contiguous chains of hidden ancestors of 'revs' by removing them
52 52 from 'hidden'
53 53
54 54 - pfunc(r): a funtion returning parent of 'r',
55 55 - hidden: the (preliminary) hidden revisions, to be updated
56 56 - revs: iterable of revnum,
57 57
58 58 (Ancestors are revealed exclusively, i.e. the elements in 'revs' are
59 59 *not* revealed)
60 60 """
61 61 stack = list(revs)
62 62 while stack:
63 63 for p in pfunc(stack.pop()):
64 64 if p != nullrev and p in hidden:
65 65 hidden.remove(p)
66 66 stack.append(p)
67 67
68 68 def computehidden(repo, visibilityexceptions=None):
69 69 """compute the set of hidden revision to filter
70 70
71 71 During most operation hidden should be filtered."""
72 72 assert not repo.changelog.filteredrevs
73 73
74 74 hidden = hideablerevs(repo)
75 75 if hidden:
76 76 hidden = set(hidden - pinnedrevs(repo))
77 77 if visibilityexceptions:
78 78 hidden -= visibilityexceptions
79 79 pfunc = repo.changelog.parentrevs
80 mutablephases = (phases.draft, phases.secret)
81 mutable = repo._phasecache.getrevset(repo, mutablephases)
80 mutable = repo._phasecache.getrevset(repo, phases.mutablephases)
82 81
83 82 visible = mutable - hidden
84 83 _revealancestors(pfunc, hidden, visible)
85 84 return frozenset(hidden)
86 85
87 86 def computeunserved(repo, visibilityexceptions=None):
88 87 """compute the set of revision that should be filtered when used a server
89 88
90 89 Secret and hidden changeset should not pretend to be here."""
91 90 assert not repo.changelog.filteredrevs
92 91 # fast path in simple case to avoid impact of non optimised code
93 92 hiddens = filterrevs(repo, 'visible')
94 93 if phases.hassecret(repo):
95 94 secrets = frozenset(repo._phasecache.getrevset(repo, (phases.secret,)))
96 95 return frozenset(hiddens | secrets)
97 96 else:
98 97 return hiddens
99 98
100 99 def computemutable(repo, visibilityexceptions=None):
101 100 assert not repo.changelog.filteredrevs
102 101 # fast check to avoid revset call on huge repo
103 102 if any(repo._phasecache.phaseroots[1:]):
104 103 getphase = repo._phasecache.phase
105 104 maymutable = filterrevs(repo, 'base')
106 105 return frozenset(r for r in maymutable if getphase(repo, r))
107 106 return frozenset()
108 107
109 108 def computeimpactable(repo, visibilityexceptions=None):
110 109 """Everything impactable by mutable revision
111 110
112 111 The immutable filter still have some chance to get invalidated. This will
113 112 happen when:
114 113
115 114 - you garbage collect hidden changeset,
116 115 - public phase is moved backward,
117 116 - something is changed in the filtering (this could be fixed)
118 117
119 118 This filter out any mutable changeset and any public changeset that may be
120 119 impacted by something happening to a mutable revision.
121 120
122 121 This is achieved by filtered everything with a revision number egal or
123 122 higher than the first mutable changeset is filtered."""
124 123 assert not repo.changelog.filteredrevs
125 124 cl = repo.changelog
126 125 firstmutable = len(cl)
127 126 for roots in repo._phasecache.phaseroots[1:]:
128 127 if roots:
129 128 firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
130 129 # protect from nullrev root
131 130 firstmutable = max(0, firstmutable)
132 131 return frozenset(xrange(firstmutable, len(cl)))
133 132
134 133 # function to compute filtered set
135 134 #
136 135 # When adding a new filter you MUST update the table at:
137 136 # mercurial.branchmap.subsettable
138 137 # Otherwise your filter will have to recompute all its branches cache
139 138 # from scratch (very slow).
140 139 filtertable = {'visible': computehidden,
141 140 'visible-hidden': computehidden,
142 141 'served': computeunserved,
143 142 'immutable': computemutable,
144 143 'base': computeimpactable}
145 144
146 145 def filterrevs(repo, filtername, visibilityexceptions=None):
147 146 """returns set of filtered revision for this filter name
148 147
149 148 visibilityexceptions is a set of revs which must are exceptions for
150 149 hidden-state and must be visible. They are dynamic and hence we should not
151 150 cache it's result"""
152 151 if filtername not in repo.filteredrevcache:
153 152 func = filtertable[filtername]
154 153 if visibilityexceptions:
155 154 return func(repo.unfiltered, visibilityexceptions)
156 155 repo.filteredrevcache[filtername] = func(repo.unfiltered())
157 156 return repo.filteredrevcache[filtername]
158 157
159 158 class repoview(object):
160 159 """Provide a read/write view of a repo through a filtered changelog
161 160
162 161 This object is used to access a filtered version of a repository without
163 162 altering the original repository object itself. We can not alter the
164 163 original object for two main reasons:
165 164 - It prevents the use of a repo with multiple filters at the same time. In
166 165 particular when multiple threads are involved.
167 166 - It makes scope of the filtering harder to control.
168 167
169 168 This object behaves very closely to the original repository. All attribute
170 169 operations are done on the original repository:
171 170 - An access to `repoview.someattr` actually returns `repo.someattr`,
172 171 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
173 172 - A deletion of `repoview.someattr` actually drops `someattr`
174 173 from `repo.__dict__`.
175 174
176 175 The only exception is the `changelog` property. It is overridden to return
177 176 a (surface) copy of `repo.changelog` with some revisions filtered. The
178 177 `filtername` attribute of the view control the revisions that need to be
179 178 filtered. (the fact the changelog is copied is an implementation detail).
180 179
181 180 Unlike attributes, this object intercepts all method calls. This means that
182 181 all methods are run on the `repoview` object with the filtered `changelog`
183 182 property. For this purpose the simple `repoview` class must be mixed with
184 183 the actual class of the repository. This ensures that the resulting
185 184 `repoview` object have the very same methods than the repo object. This
186 185 leads to the property below.
187 186
188 187 repoview.method() --> repo.__class__.method(repoview)
189 188
190 189 The inheritance has to be done dynamically because `repo` can be of any
191 190 subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
192 191 """
193 192
194 193 def __init__(self, repo, filtername, visibilityexceptions=None):
195 194 object.__setattr__(self, r'_unfilteredrepo', repo)
196 195 object.__setattr__(self, r'filtername', filtername)
197 196 object.__setattr__(self, r'_clcachekey', None)
198 197 object.__setattr__(self, r'_clcache', None)
199 198 # revs which are exceptions and must not be hidden
200 199 object.__setattr__(self, r'_visibilityexceptions',
201 200 visibilityexceptions)
202 201
203 202 # not a propertycache on purpose we shall implement a proper cache later
204 203 @property
205 204 def changelog(self):
206 205 """return a filtered version of the changeset
207 206
208 207 this changelog must not be used for writing"""
209 208 # some cache may be implemented later
210 209 unfi = self._unfilteredrepo
211 210 unfichangelog = unfi.changelog
212 211 # bypass call to changelog.method
213 212 unfiindex = unfichangelog.index
214 213 unfilen = len(unfiindex) - 1
215 214 unfinode = unfiindex[unfilen - 1][7]
216 215
217 216 revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
218 217 cl = self._clcache
219 218 newkey = (unfilen, unfinode, hash(revs), unfichangelog._delayed)
220 219 # if cl.index is not unfiindex, unfi.changelog would be
221 220 # recreated, and our clcache refers to garbage object
222 221 if (cl is not None and
223 222 (cl.index is not unfiindex or newkey != self._clcachekey)):
224 223 cl = None
225 224 # could have been made None by the previous if
226 225 if cl is None:
227 226 cl = copy.copy(unfichangelog)
228 227 cl.filteredrevs = revs
229 228 object.__setattr__(self, r'_clcache', cl)
230 229 object.__setattr__(self, r'_clcachekey', newkey)
231 230 return cl
232 231
233 232 def unfiltered(self):
234 233 """Return an unfiltered version of a repo"""
235 234 return self._unfilteredrepo
236 235
237 236 def filtered(self, name, visibilityexceptions=None):
238 237 """Return a filtered version of a repository"""
239 238 if name == self.filtername and not visibilityexceptions:
240 239 return self
241 240 return self.unfiltered().filtered(name, visibilityexceptions)
242 241
243 242 def __repr__(self):
244 243 return r'<%s:%s %r>' % (self.__class__.__name__,
245 244 pycompat.sysstr(self.filtername),
246 245 self.unfiltered())
247 246
248 247 # everything access are forwarded to the proxied repo
249 248 def __getattr__(self, attr):
250 249 return getattr(self._unfilteredrepo, attr)
251 250
252 251 def __setattr__(self, attr, value):
253 252 return setattr(self._unfilteredrepo, attr, value)
254 253
255 254 def __delattr__(self, attr):
256 255 return delattr(self._unfilteredrepo, attr)
257 256
258 257 # Python <3.4 easily leaks types via __mro__. See
259 258 # https://bugs.python.org/issue17950. We cache dynamically created types
260 259 # so they won't be leaked on every invocation of repo.filtered().
261 260 _filteredrepotypes = weakref.WeakKeyDictionary()
262 261
263 262 def newtype(base):
264 263 """Create a new type with the repoview mixin and the given base class"""
265 264 if base not in _filteredrepotypes:
266 265 class filteredrepo(repoview, base):
267 266 pass
268 267 _filteredrepotypes[base] = filteredrepo
269 268 return _filteredrepotypes[base]
General Comments 0
You need to be logged in to leave comments. Login now