##// END OF EJS Templates
obsstore: make the readonly attribute accessible...
Pierre-Yves David -
r26684:74ff350c default
parent child Browse files
Show More
@@ -1,1256 +1,1263 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker are used:
46 46
47 47 (A, (C, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 import errno, struct
71 71 import util, base85, node, parsers, error
72 72 import phases
73 73 from i18n import _
74 74
75 75 _pack = struct.pack
76 76 _unpack = struct.unpack
77 77 _calcsize = struct.calcsize
78 78 propertycache = util.propertycache
79 79
80 80 # the obsolete feature is not mature enough to be enabled by default.
81 81 # you have to rely on third party extension extension to enable this.
82 82 _enabled = False
83 83
84 84 # Options for obsolescence
85 85 createmarkersopt = 'createmarkers'
86 86 allowunstableopt = 'allowunstable'
87 87 exchangeopt = 'exchange'
88 88
89 89 ### obsolescence marker flag
90 90
91 91 ## bumpedfix flag
92 92 #
93 93 # When a changeset A' succeed to a changeset A which became public, we call A'
94 94 # "bumped" because it's a successors of a public changesets
95 95 #
96 96 # o A' (bumped)
97 97 # |`:
98 98 # | o A
99 99 # |/
100 100 # o Z
101 101 #
102 102 # The way to solve this situation is to create a new changeset Ad as children
103 103 # of A. This changeset have the same content than A'. So the diff from A to A'
104 104 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 105 #
106 106 # o Ad
107 107 # |`:
108 108 # | x A'
109 109 # |'|
110 110 # o | A
111 111 # |/
112 112 # o Z
113 113 #
114 114 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 115 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 116 # This flag mean that the successors express the changes between the public and
117 117 # bumped version and fix the situation, breaking the transitivity of
118 118 # "bumped" here.
119 119 bumpedfix = 1
120 120 usingsha256 = 2
121 121
122 122 ## Parsing and writing of version "0"
123 123 #
124 124 # The header is followed by the markers. Each marker is made of:
125 125 #
126 126 # - 1 uint8 : number of new changesets "N", can be zero.
127 127 #
128 128 # - 1 uint32: metadata size "M" in bytes.
129 129 #
130 130 # - 1 byte: a bit field. It is reserved for flags used in common
131 131 # obsolete marker operations, to avoid repeated decoding of metadata
132 132 # entries.
133 133 #
134 134 # - 20 bytes: obsoleted changeset identifier.
135 135 #
136 136 # - N*20 bytes: new changesets identifiers.
137 137 #
138 138 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 139 # string contains a key and a value, separated by a colon ':', without
140 140 # additional encoding. Keys cannot contain '\0' or ':' and values
141 141 # cannot contain '\0'.
142 142 _fm0version = 0
143 143 _fm0fixed = '>BIB20s'
144 144 _fm0node = '20s'
145 145 _fm0fsize = _calcsize(_fm0fixed)
146 146 _fm0fnodesize = _calcsize(_fm0node)
147 147
148 148 def _fm0readmarkers(data, off):
149 149 # Loop on markers
150 150 l = len(data)
151 151 while off + _fm0fsize <= l:
152 152 # read fixed part
153 153 cur = data[off:off + _fm0fsize]
154 154 off += _fm0fsize
155 155 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 156 # read replacement
157 157 sucs = ()
158 158 if numsuc:
159 159 s = (_fm0fnodesize * numsuc)
160 160 cur = data[off:off + s]
161 161 sucs = _unpack(_fm0node * numsuc, cur)
162 162 off += s
163 163 # read metadata
164 164 # (metadata will be decoded on demand)
165 165 metadata = data[off:off + mdsize]
166 166 if len(metadata) != mdsize:
167 167 raise error.Abort(_('parsing obsolete marker: metadata is too '
168 168 'short, %d bytes expected, got %d')
169 169 % (mdsize, len(metadata)))
170 170 off += mdsize
171 171 metadata = _fm0decodemeta(metadata)
172 172 try:
173 173 when, offset = metadata.pop('date', '0 0').split(' ')
174 174 date = float(when), int(offset)
175 175 except ValueError:
176 176 date = (0., 0)
177 177 parents = None
178 178 if 'p2' in metadata:
179 179 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 180 elif 'p1' in metadata:
181 181 parents = (metadata.pop('p1', None),)
182 182 elif 'p0' in metadata:
183 183 parents = ()
184 184 if parents is not None:
185 185 try:
186 186 parents = tuple(node.bin(p) for p in parents)
187 187 # if parent content is not a nodeid, drop the data
188 188 for p in parents:
189 189 if len(p) != 20:
190 190 parents = None
191 191 break
192 192 except TypeError:
193 193 # if content cannot be translated to nodeid drop the data.
194 194 parents = None
195 195
196 196 metadata = tuple(sorted(metadata.iteritems()))
197 197
198 198 yield (pre, sucs, flags, metadata, date, parents)
199 199
200 200 def _fm0encodeonemarker(marker):
201 201 pre, sucs, flags, metadata, date, parents = marker
202 202 if flags & usingsha256:
203 203 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
204 204 metadata = dict(metadata)
205 205 time, tz = date
206 206 metadata['date'] = '%r %i' % (time, tz)
207 207 if parents is not None:
208 208 if not parents:
209 209 # mark that we explicitly recorded no parents
210 210 metadata['p0'] = ''
211 211 for i, p in enumerate(parents):
212 212 metadata['p%i' % (i + 1)] = node.hex(p)
213 213 metadata = _fm0encodemeta(metadata)
214 214 numsuc = len(sucs)
215 215 format = _fm0fixed + (_fm0node * numsuc)
216 216 data = [numsuc, len(metadata), flags, pre]
217 217 data.extend(sucs)
218 218 return _pack(format, *data) + metadata
219 219
220 220 def _fm0encodemeta(meta):
221 221 """Return encoded metadata string to string mapping.
222 222
223 223 Assume no ':' in key and no '\0' in both key and value."""
224 224 for key, value in meta.iteritems():
225 225 if ':' in key or '\0' in key:
226 226 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 227 if '\0' in value:
228 228 raise ValueError("':' is forbidden in metadata value'")
229 229 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230 230
231 231 def _fm0decodemeta(data):
232 232 """Return string to string dictionary from encoded version."""
233 233 d = {}
234 234 for l in data.split('\0'):
235 235 if l:
236 236 key, value = l.split(':')
237 237 d[key] = value
238 238 return d
239 239
240 240 ## Parsing and writing of version "1"
241 241 #
242 242 # The header is followed by the markers. Each marker is made of:
243 243 #
244 244 # - uint32: total size of the marker (including this field)
245 245 #
246 246 # - float64: date in seconds since epoch
247 247 #
248 248 # - int16: timezone offset in minutes
249 249 #
250 250 # - uint16: a bit field. It is reserved for flags used in common
251 251 # obsolete marker operations, to avoid repeated decoding of metadata
252 252 # entries.
253 253 #
254 254 # - uint8: number of successors "N", can be zero.
255 255 #
256 256 # - uint8: number of parents "P", can be zero.
257 257 #
258 258 # 0: parents data stored but no parent,
259 259 # 1: one parent stored,
260 260 # 2: two parents stored,
261 261 # 3: no parent data stored
262 262 #
263 263 # - uint8: number of metadata entries M
264 264 #
265 265 # - 20 or 32 bytes: precursor changeset identifier.
266 266 #
267 267 # - N*(20 or 32) bytes: successors changesets identifiers.
268 268 #
269 269 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 270 #
271 271 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 272 #
273 273 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 274 _fm1version = 1
275 275 _fm1fixed = '>IdhHBBB20s'
276 276 _fm1nodesha1 = '20s'
277 277 _fm1nodesha256 = '32s'
278 278 _fm1nodesha1size = _calcsize(_fm1nodesha1)
279 279 _fm1nodesha256size = _calcsize(_fm1nodesha256)
280 280 _fm1fsize = _calcsize(_fm1fixed)
281 281 _fm1parentnone = 3
282 282 _fm1parentshift = 14
283 283 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
284 284 _fm1metapair = 'BB'
285 285 _fm1metapairsize = _calcsize('BB')
286 286
287 287 def _fm1purereadmarkers(data, off):
288 288 # make some global constants local for performance
289 289 noneflag = _fm1parentnone
290 290 sha2flag = usingsha256
291 291 sha1size = _fm1nodesha1size
292 292 sha2size = _fm1nodesha256size
293 293 sha1fmt = _fm1nodesha1
294 294 sha2fmt = _fm1nodesha256
295 295 metasize = _fm1metapairsize
296 296 metafmt = _fm1metapair
297 297 fsize = _fm1fsize
298 298 unpack = _unpack
299 299
300 300 # Loop on markers
301 301 stop = len(data) - _fm1fsize
302 302 ufixed = struct.Struct(_fm1fixed).unpack
303 303
304 304 while off <= stop:
305 305 # read fixed part
306 306 o1 = off + fsize
307 307 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
308 308
309 309 if flags & sha2flag:
310 310 # FIXME: prec was read as a SHA1, needs to be amended
311 311
312 312 # read 0 or more successors
313 313 if numsuc == 1:
314 314 o2 = o1 + sha2size
315 315 sucs = (data[o1:o2],)
316 316 else:
317 317 o2 = o1 + sha2size * numsuc
318 318 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
319 319
320 320 # read parents
321 321 if numpar == noneflag:
322 322 o3 = o2
323 323 parents = None
324 324 elif numpar == 1:
325 325 o3 = o2 + sha2size
326 326 parents = (data[o2:o3],)
327 327 else:
328 328 o3 = o2 + sha2size * numpar
329 329 parents = unpack(sha2fmt * numpar, data[o2:o3])
330 330 else:
331 331 # read 0 or more successors
332 332 if numsuc == 1:
333 333 o2 = o1 + sha1size
334 334 sucs = (data[o1:o2],)
335 335 else:
336 336 o2 = o1 + sha1size * numsuc
337 337 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
338 338
339 339 # read parents
340 340 if numpar == noneflag:
341 341 o3 = o2
342 342 parents = None
343 343 elif numpar == 1:
344 344 o3 = o2 + sha1size
345 345 parents = (data[o2:o3],)
346 346 else:
347 347 o3 = o2 + sha1size * numpar
348 348 parents = unpack(sha1fmt * numpar, data[o2:o3])
349 349
350 350 # read metadata
351 351 off = o3 + metasize * nummeta
352 352 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
353 353 metadata = []
354 354 for idx in xrange(0, len(metapairsize), 2):
355 355 o1 = off + metapairsize[idx]
356 356 o2 = o1 + metapairsize[idx + 1]
357 357 metadata.append((data[off:o1], data[o1:o2]))
358 358 off = o2
359 359
360 360 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
361 361
362 362 def _fm1encodeonemarker(marker):
363 363 pre, sucs, flags, metadata, date, parents = marker
364 364 # determine node size
365 365 _fm1node = _fm1nodesha1
366 366 if flags & usingsha256:
367 367 _fm1node = _fm1nodesha256
368 368 numsuc = len(sucs)
369 369 numextranodes = numsuc
370 370 if parents is None:
371 371 numpar = _fm1parentnone
372 372 else:
373 373 numpar = len(parents)
374 374 numextranodes += numpar
375 375 formatnodes = _fm1node * numextranodes
376 376 formatmeta = _fm1metapair * len(metadata)
377 377 format = _fm1fixed + formatnodes + formatmeta
378 378 # tz is stored in minutes so we divide by 60
379 379 tz = date[1]//60
380 380 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
381 381 data.extend(sucs)
382 382 if parents is not None:
383 383 data.extend(parents)
384 384 totalsize = _calcsize(format)
385 385 for key, value in metadata:
386 386 lk = len(key)
387 387 lv = len(value)
388 388 data.append(lk)
389 389 data.append(lv)
390 390 totalsize += lk + lv
391 391 data[0] = totalsize
392 392 data = [_pack(format, *data)]
393 393 for key, value in metadata:
394 394 data.append(key)
395 395 data.append(value)
396 396 return ''.join(data)
397 397
398 398 def _fm1readmarkers(data, off):
399 399 native = getattr(parsers, 'fm1readmarkers', None)
400 400 if not native:
401 401 return _fm1purereadmarkers(data, off)
402 402 stop = len(data) - _fm1fsize
403 403 return native(data, off, stop)
404 404
405 405 # mapping to read/write various marker formats
406 406 # <version> -> (decoder, encoder)
407 407 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
408 408 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
409 409
410 410 @util.nogc
411 411 def _readmarkers(data):
412 412 """Read and enumerate markers from raw data"""
413 413 off = 0
414 414 diskversion = _unpack('>B', data[off:off + 1])[0]
415 415 off += 1
416 416 if diskversion not in formats:
417 417 raise error.Abort(_('parsing obsolete marker: unknown version %r')
418 418 % diskversion)
419 419 return diskversion, formats[diskversion][0](data, off)
420 420
421 421 def encodemarkers(markers, addheader=False, version=_fm0version):
422 422 # Kept separate from flushmarkers(), it will be reused for
423 423 # markers exchange.
424 424 encodeone = formats[version][1]
425 425 if addheader:
426 426 yield _pack('>B', version)
427 427 for marker in markers:
428 428 yield encodeone(marker)
429 429
430 430
431 431 class marker(object):
432 432 """Wrap obsolete marker raw data"""
433 433
434 434 def __init__(self, repo, data):
435 435 # the repo argument will be used to create changectx in later version
436 436 self._repo = repo
437 437 self._data = data
438 438 self._decodedmeta = None
439 439
440 440 def __hash__(self):
441 441 return hash(self._data)
442 442
443 443 def __eq__(self, other):
444 444 if type(other) != type(self):
445 445 return False
446 446 return self._data == other._data
447 447
448 448 def precnode(self):
449 449 """Precursor changeset node identifier"""
450 450 return self._data[0]
451 451
452 452 def succnodes(self):
453 453 """List of successor changesets node identifiers"""
454 454 return self._data[1]
455 455
456 456 def parentnodes(self):
457 457 """Parents of the precursors (None if not recorded)"""
458 458 return self._data[5]
459 459
460 460 def metadata(self):
461 461 """Decoded metadata dictionary"""
462 462 return dict(self._data[3])
463 463
464 464 def date(self):
465 465 """Creation date as (unixtime, offset)"""
466 466 return self._data[4]
467 467
468 468 def flags(self):
469 469 """The flags field of the marker"""
470 470 return self._data[2]
471 471
472 472 @util.nogc
473 473 def _addsuccessors(successors, markers):
474 474 for mark in markers:
475 475 successors.setdefault(mark[0], set()).add(mark)
476 476
477 477 @util.nogc
478 478 def _addprecursors(precursors, markers):
479 479 for mark in markers:
480 480 for suc in mark[1]:
481 481 precursors.setdefault(suc, set()).add(mark)
482 482
483 483 @util.nogc
484 484 def _addchildren(children, markers):
485 485 for mark in markers:
486 486 parents = mark[5]
487 487 if parents is not None:
488 488 for p in parents:
489 489 children.setdefault(p, set()).add(mark)
490 490
491 491 def _checkinvalidmarkers(markers):
492 492 """search for marker with invalid data and raise error if needed
493 493
494 494 Exist as a separated function to allow the evolve extension for a more
495 495 subtle handling.
496 496 """
497 497 for mark in markers:
498 498 if node.nullid in mark[1]:
499 499 raise error.Abort(_('bad obsolescence marker detected: '
500 500 'invalid successors nullid'))
501 501
502 502 class obsstore(object):
503 503 """Store obsolete markers
504 504
505 505 Markers can be accessed with two mappings:
506 506 - precursors[x] -> set(markers on precursors edges of x)
507 507 - successors[x] -> set(markers on successors edges of x)
508 508 - children[x] -> set(markers on precursors edges of children(x)
509 509 """
510 510
511 511 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
512 512 # prec: nodeid, precursor changesets
513 513 # succs: tuple of nodeid, successor changesets (0-N length)
514 514 # flag: integer, flag field carrying modifier for the markers (see doc)
515 515 # meta: binary blob, encoded metadata dictionary
516 516 # date: (float, int) tuple, date of marker creation
517 517 # parents: (tuple of nodeid) or None, parents of precursors
518 518 # None is used when no data has been recorded
519 519
520 520 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
521 521 # caches for various obsolescence related cache
522 522 self.caches = {}
523 523 self.svfs = svfs
524 524 self._version = defaultformat
525 525 self._readonly = readonly
526 526
527 527 def __iter__(self):
528 528 return iter(self._all)
529 529
530 530 def __len__(self):
531 531 return len(self._all)
532 532
533 533 def __nonzero__(self):
534 534 if not self._cached('_all'):
535 535 try:
536 536 return self.svfs.stat('obsstore').st_size > 1
537 537 except OSError as inst:
538 538 if inst.errno != errno.ENOENT:
539 539 raise
540 540 # just build an empty _all list if no obsstore exists, which
541 541 # avoids further stat() syscalls
542 542 pass
543 543 return bool(self._all)
544 544
545 @property
546 def readonly(self):
547 """True if marker creation is disabled
548
549 Remove me in the future when obsolete marker is always on."""
550 return self._readonly
551
545 552 def create(self, transaction, prec, succs=(), flag=0, parents=None,
546 553 date=None, metadata=None):
547 554 """obsolete: add a new obsolete marker
548 555
549 556 * ensuring it is hashable
550 557 * check mandatory metadata
551 558 * encode metadata
552 559
553 560 If you are a human writing code creating marker you want to use the
554 561 `createmarkers` function in this module instead.
555 562
556 563 return True if a new marker have been added, False if the markers
557 564 already existed (no op).
558 565 """
559 566 if metadata is None:
560 567 metadata = {}
561 568 if date is None:
562 569 if 'date' in metadata:
563 570 # as a courtesy for out-of-tree extensions
564 571 date = util.parsedate(metadata.pop('date'))
565 572 else:
566 573 date = util.makedate()
567 574 if len(prec) != 20:
568 575 raise ValueError(prec)
569 576 for succ in succs:
570 577 if len(succ) != 20:
571 578 raise ValueError(succ)
572 579 if prec in succs:
573 580 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
574 581
575 582 metadata = tuple(sorted(metadata.iteritems()))
576 583
577 584 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
578 585 return bool(self.add(transaction, [marker]))
579 586
580 587 def add(self, transaction, markers):
581 588 """Add new markers to the store
582 589
583 590 Take care of filtering duplicate.
584 591 Return the number of new marker."""
585 592 if self._readonly:
586 593 raise error.Abort('creating obsolete markers is not enabled on '
587 594 'this repo')
588 595 known = set(self._all)
589 596 new = []
590 597 for m in markers:
591 598 if m not in known:
592 599 known.add(m)
593 600 new.append(m)
594 601 if new:
595 602 f = self.svfs('obsstore', 'ab')
596 603 try:
597 604 offset = f.tell()
598 605 transaction.add('obsstore', offset)
599 606 # offset == 0: new file - add the version header
600 607 for bytes in encodemarkers(new, offset == 0, self._version):
601 608 f.write(bytes)
602 609 finally:
603 610 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
604 611 # call 'filecacheentry.refresh()' here
605 612 f.close()
606 613 self._addmarkers(new)
607 614 # new marker *may* have changed several set. invalidate the cache.
608 615 self.caches.clear()
609 616 # records the number of new markers for the transaction hooks
610 617 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
611 618 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
612 619 return len(new)
613 620
614 621 def mergemarkers(self, transaction, data):
615 622 """merge a binary stream of markers inside the obsstore
616 623
617 624 Returns the number of new markers added."""
618 625 version, markers = _readmarkers(data)
619 626 return self.add(transaction, markers)
620 627
621 628 @propertycache
622 629 def _all(self):
623 630 data = self.svfs.tryread('obsstore')
624 631 if not data:
625 632 return []
626 633 self._version, markers = _readmarkers(data)
627 634 markers = list(markers)
628 635 _checkinvalidmarkers(markers)
629 636 return markers
630 637
631 638 @propertycache
632 639 def successors(self):
633 640 successors = {}
634 641 _addsuccessors(successors, self._all)
635 642 return successors
636 643
637 644 @propertycache
638 645 def precursors(self):
639 646 precursors = {}
640 647 _addprecursors(precursors, self._all)
641 648 return precursors
642 649
643 650 @propertycache
644 651 def children(self):
645 652 children = {}
646 653 _addchildren(children, self._all)
647 654 return children
648 655
649 656 def _cached(self, attr):
650 657 return attr in self.__dict__
651 658
652 659 def _addmarkers(self, markers):
653 660 markers = list(markers) # to allow repeated iteration
654 661 self._all.extend(markers)
655 662 if self._cached('successors'):
656 663 _addsuccessors(self.successors, markers)
657 664 if self._cached('precursors'):
658 665 _addprecursors(self.precursors, markers)
659 666 if self._cached('children'):
660 667 _addchildren(self.children, markers)
661 668 _checkinvalidmarkers(markers)
662 669
663 670 def relevantmarkers(self, nodes):
664 671 """return a set of all obsolescence markers relevant to a set of nodes.
665 672
666 673 "relevant" to a set of nodes mean:
667 674
668 675 - marker that use this changeset as successor
669 676 - prune marker of direct children on this changeset
670 677 - recursive application of the two rules on precursors of these markers
671 678
672 679 It is a set so you cannot rely on order."""
673 680
674 681 pendingnodes = set(nodes)
675 682 seenmarkers = set()
676 683 seennodes = set(pendingnodes)
677 684 precursorsmarkers = self.precursors
678 685 children = self.children
679 686 while pendingnodes:
680 687 direct = set()
681 688 for current in pendingnodes:
682 689 direct.update(precursorsmarkers.get(current, ()))
683 690 pruned = [m for m in children.get(current, ()) if not m[1]]
684 691 direct.update(pruned)
685 692 direct -= seenmarkers
686 693 pendingnodes = set([m[0] for m in direct])
687 694 seenmarkers |= direct
688 695 pendingnodes -= seennodes
689 696 seennodes |= pendingnodes
690 697 return seenmarkers
691 698
692 699 def commonversion(versions):
693 700 """Return the newest version listed in both versions and our local formats.
694 701
695 702 Returns None if no common version exists.
696 703 """
697 704 versions.sort(reverse=True)
698 705 # search for highest version known on both side
699 706 for v in versions:
700 707 if v in formats:
701 708 return v
702 709 return None
703 710
704 711 # arbitrary picked to fit into 8K limit from HTTP server
705 712 # you have to take in account:
706 713 # - the version header
707 714 # - the base85 encoding
708 715 _maxpayload = 5300
709 716
710 717 def _pushkeyescape(markers):
711 718 """encode markers into a dict suitable for pushkey exchange
712 719
713 720 - binary data is base85 encoded
714 721 - split in chunks smaller than 5300 bytes"""
715 722 keys = {}
716 723 parts = []
717 724 currentlen = _maxpayload * 2 # ensure we create a new part
718 725 for marker in markers:
719 726 nextdata = _fm0encodeonemarker(marker)
720 727 if (len(nextdata) + currentlen > _maxpayload):
721 728 currentpart = []
722 729 currentlen = 0
723 730 parts.append(currentpart)
724 731 currentpart.append(nextdata)
725 732 currentlen += len(nextdata)
726 733 for idx, part in enumerate(reversed(parts)):
727 734 data = ''.join([_pack('>B', _fm0version)] + part)
728 735 keys['dump%i' % idx] = base85.b85encode(data)
729 736 return keys
730 737
731 738 def listmarkers(repo):
732 739 """List markers over pushkey"""
733 740 if not repo.obsstore:
734 741 return {}
735 742 return _pushkeyescape(sorted(repo.obsstore))
736 743
737 744 def pushmarker(repo, key, old, new):
738 745 """Push markers over pushkey"""
739 746 if not key.startswith('dump'):
740 747 repo.ui.warn(_('unknown key: %r') % key)
741 748 return 0
742 749 if old:
743 750 repo.ui.warn(_('unexpected old value for %r') % key)
744 751 return 0
745 752 data = base85.b85decode(new)
746 753 lock = repo.lock()
747 754 try:
748 755 tr = repo.transaction('pushkey: obsolete markers')
749 756 try:
750 757 repo.obsstore.mergemarkers(tr, data)
751 758 tr.close()
752 759 return 1
753 760 finally:
754 761 tr.release()
755 762 finally:
756 763 lock.release()
757 764
758 765 def getmarkers(repo, nodes=None):
759 766 """returns markers known in a repository
760 767
761 768 If <nodes> is specified, only markers "relevant" to those nodes are are
762 769 returned"""
763 770 if nodes is None:
764 771 rawmarkers = repo.obsstore
765 772 else:
766 773 rawmarkers = repo.obsstore.relevantmarkers(nodes)
767 774
768 775 for markerdata in rawmarkers:
769 776 yield marker(repo, markerdata)
770 777
771 778 def relevantmarkers(repo, node):
772 779 """all obsolete markers relevant to some revision"""
773 780 for markerdata in repo.obsstore.relevantmarkers(node):
774 781 yield marker(repo, markerdata)
775 782
776 783
777 784 def precursormarkers(ctx):
778 785 """obsolete marker marking this changeset as a successors"""
779 786 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
780 787 yield marker(ctx.repo(), data)
781 788
782 789 def successormarkers(ctx):
783 790 """obsolete marker making this changeset obsolete"""
784 791 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
785 792 yield marker(ctx.repo(), data)
786 793
787 794 def allsuccessors(obsstore, nodes, ignoreflags=0):
788 795 """Yield node for every successor of <nodes>.
789 796
790 797 Some successors may be unknown locally.
791 798
792 799 This is a linear yield unsuited to detecting split changesets. It includes
793 800 initial nodes too."""
794 801 remaining = set(nodes)
795 802 seen = set(remaining)
796 803 while remaining:
797 804 current = remaining.pop()
798 805 yield current
799 806 for mark in obsstore.successors.get(current, ()):
800 807 # ignore marker flagged with specified flag
801 808 if mark[2] & ignoreflags:
802 809 continue
803 810 for suc in mark[1]:
804 811 if suc not in seen:
805 812 seen.add(suc)
806 813 remaining.add(suc)
807 814
808 815 def allprecursors(obsstore, nodes, ignoreflags=0):
809 816 """Yield node for every precursors of <nodes>.
810 817
811 818 Some precursors may be unknown locally.
812 819
813 820 This is a linear yield unsuited to detecting folded changesets. It includes
814 821 initial nodes too."""
815 822
816 823 remaining = set(nodes)
817 824 seen = set(remaining)
818 825 while remaining:
819 826 current = remaining.pop()
820 827 yield current
821 828 for mark in obsstore.precursors.get(current, ()):
822 829 # ignore marker flagged with specified flag
823 830 if mark[2] & ignoreflags:
824 831 continue
825 832 suc = mark[0]
826 833 if suc not in seen:
827 834 seen.add(suc)
828 835 remaining.add(suc)
829 836
830 837 def foreground(repo, nodes):
831 838 """return all nodes in the "foreground" of other node
832 839
833 840 The foreground of a revision is anything reachable using parent -> children
834 841 or precursor -> successor relation. It is very similar to "descendant" but
835 842 augmented with obsolescence information.
836 843
837 844 Beware that possible obsolescence cycle may result if complex situation.
838 845 """
839 846 repo = repo.unfiltered()
840 847 foreground = set(repo.set('%ln::', nodes))
841 848 if repo.obsstore:
842 849 # We only need this complicated logic if there is obsolescence
843 850 # XXX will probably deserve an optimised revset.
844 851 nm = repo.changelog.nodemap
845 852 plen = -1
846 853 # compute the whole set of successors or descendants
847 854 while len(foreground) != plen:
848 855 plen = len(foreground)
849 856 succs = set(c.node() for c in foreground)
850 857 mutable = [c.node() for c in foreground if c.mutable()]
851 858 succs.update(allsuccessors(repo.obsstore, mutable))
852 859 known = (n for n in succs if n in nm)
853 860 foreground = set(repo.set('%ln::', known))
854 861 return set(c.node() for c in foreground)
855 862
856 863
857 864 def successorssets(repo, initialnode, cache=None):
858 865 """Return set of all latest successors of initial nodes
859 866
860 867 The successors set of a changeset A are the group of revisions that succeed
861 868 A. It succeeds A as a consistent whole, each revision being only a partial
862 869 replacement. The successors set contains non-obsolete changesets only.
863 870
864 871 This function returns the full list of successor sets which is why it
865 872 returns a list of tuples and not just a single tuple. Each tuple is a valid
866 873 successors set. Note that (A,) may be a valid successors set for changeset A
867 874 (see below).
868 875
869 876 In most cases, a changeset A will have a single element (e.g. the changeset
870 877 A is replaced by A') in its successors set. Though, it is also common for a
871 878 changeset A to have no elements in its successor set (e.g. the changeset
872 879 has been pruned). Therefore, the returned list of successors sets will be
873 880 [(A',)] or [], respectively.
874 881
875 882 When a changeset A is split into A' and B', however, it will result in a
876 883 successors set containing more than a single element, i.e. [(A',B')].
877 884 Divergent changesets will result in multiple successors sets, i.e. [(A',),
878 885 (A'')].
879 886
880 887 If a changeset A is not obsolete, then it will conceptually have no
881 888 successors set. To distinguish this from a pruned changeset, the successor
882 889 set will contain itself only, i.e. [(A,)].
883 890
884 891 Finally, successors unknown locally are considered to be pruned (obsoleted
885 892 without any successors).
886 893
887 894 The optional `cache` parameter is a dictionary that may contain precomputed
888 895 successors sets. It is meant to reuse the computation of a previous call to
889 896 `successorssets` when multiple calls are made at the same time. The cache
890 897 dictionary is updated in place. The caller is responsible for its life
891 898 span. Code that makes multiple calls to `successorssets` *must* use this
892 899 cache mechanism or suffer terrible performance.
893 900 """
894 901
895 902 succmarkers = repo.obsstore.successors
896 903
897 904 # Stack of nodes we search successors sets for
898 905 toproceed = [initialnode]
899 906 # set version of above list for fast loop detection
900 907 # element added to "toproceed" must be added here
901 908 stackedset = set(toproceed)
902 909 if cache is None:
903 910 cache = {}
904 911
905 912 # This while loop is the flattened version of a recursive search for
906 913 # successors sets
907 914 #
908 915 # def successorssets(x):
909 916 # successors = directsuccessors(x)
910 917 # ss = [[]]
911 918 # for succ in directsuccessors(x):
912 919 # # product as in itertools cartesian product
913 920 # ss = product(ss, successorssets(succ))
914 921 # return ss
915 922 #
916 923 # But we can not use plain recursive calls here:
917 924 # - that would blow the python call stack
918 925 # - obsolescence markers may have cycles, we need to handle them.
919 926 #
920 927 # The `toproceed` list act as our call stack. Every node we search
921 928 # successors set for are stacked there.
922 929 #
923 930 # The `stackedset` is set version of this stack used to check if a node is
924 931 # already stacked. This check is used to detect cycles and prevent infinite
925 932 # loop.
926 933 #
927 934 # successors set of all nodes are stored in the `cache` dictionary.
928 935 #
929 936 # After this while loop ends we use the cache to return the successors sets
930 937 # for the node requested by the caller.
931 938 while toproceed:
932 939 # Every iteration tries to compute the successors sets of the topmost
933 940 # node of the stack: CURRENT.
934 941 #
935 942 # There are four possible outcomes:
936 943 #
937 944 # 1) We already know the successors sets of CURRENT:
938 945 # -> mission accomplished, pop it from the stack.
939 946 # 2) Node is not obsolete:
940 947 # -> the node is its own successors sets. Add it to the cache.
941 948 # 3) We do not know successors set of direct successors of CURRENT:
942 949 # -> We add those successors to the stack.
943 950 # 4) We know successors sets of all direct successors of CURRENT:
944 951 # -> We can compute CURRENT successors set and add it to the
945 952 # cache.
946 953 #
947 954 current = toproceed[-1]
948 955 if current in cache:
949 956 # case (1): We already know the successors sets
950 957 stackedset.remove(toproceed.pop())
951 958 elif current not in succmarkers:
952 959 # case (2): The node is not obsolete.
953 960 if current in repo:
954 961 # We have a valid last successors.
955 962 cache[current] = [(current,)]
956 963 else:
957 964 # Final obsolete version is unknown locally.
958 965 # Do not count that as a valid successors
959 966 cache[current] = []
960 967 else:
961 968 # cases (3) and (4)
962 969 #
963 970 # We proceed in two phases. Phase 1 aims to distinguish case (3)
964 971 # from case (4):
965 972 #
966 973 # For each direct successors of CURRENT, we check whether its
967 974 # successors sets are known. If they are not, we stack the
968 975 # unknown node and proceed to the next iteration of the while
969 976 # loop. (case 3)
970 977 #
971 978 # During this step, we may detect obsolescence cycles: a node
972 979 # with unknown successors sets but already in the call stack.
973 980 # In such a situation, we arbitrary set the successors sets of
974 981 # the node to nothing (node pruned) to break the cycle.
975 982 #
976 983 # If no break was encountered we proceed to phase 2.
977 984 #
978 985 # Phase 2 computes successors sets of CURRENT (case 4); see details
979 986 # in phase 2 itself.
980 987 #
981 988 # Note the two levels of iteration in each phase.
982 989 # - The first one handles obsolescence markers using CURRENT as
983 990 # precursor (successors markers of CURRENT).
984 991 #
985 992 # Having multiple entry here means divergence.
986 993 #
987 994 # - The second one handles successors defined in each marker.
988 995 #
989 996 # Having none means pruned node, multiple successors means split,
990 997 # single successors are standard replacement.
991 998 #
992 999 for mark in sorted(succmarkers[current]):
993 1000 for suc in mark[1]:
994 1001 if suc not in cache:
995 1002 if suc in stackedset:
996 1003 # cycle breaking
997 1004 cache[suc] = []
998 1005 else:
999 1006 # case (3) If we have not computed successors sets
1000 1007 # of one of those successors we add it to the
1001 1008 # `toproceed` stack and stop all work for this
1002 1009 # iteration.
1003 1010 toproceed.append(suc)
1004 1011 stackedset.add(suc)
1005 1012 break
1006 1013 else:
1007 1014 continue
1008 1015 break
1009 1016 else:
1010 1017 # case (4): we know all successors sets of all direct
1011 1018 # successors
1012 1019 #
1013 1020 # Successors set contributed by each marker depends on the
1014 1021 # successors sets of all its "successors" node.
1015 1022 #
1016 1023 # Each different marker is a divergence in the obsolescence
1017 1024 # history. It contributes successors sets distinct from other
1018 1025 # markers.
1019 1026 #
1020 1027 # Within a marker, a successor may have divergent successors
1021 1028 # sets. In such a case, the marker will contribute multiple
1022 1029 # divergent successors sets. If multiple successors have
1023 1030 # divergent successors sets, a Cartesian product is used.
1024 1031 #
1025 1032 # At the end we post-process successors sets to remove
1026 1033 # duplicated entry and successors set that are strict subset of
1027 1034 # another one.
1028 1035 succssets = []
1029 1036 for mark in sorted(succmarkers[current]):
1030 1037 # successors sets contributed by this marker
1031 1038 markss = [[]]
1032 1039 for suc in mark[1]:
1033 1040 # cardinal product with previous successors
1034 1041 productresult = []
1035 1042 for prefix in markss:
1036 1043 for suffix in cache[suc]:
1037 1044 newss = list(prefix)
1038 1045 for part in suffix:
1039 1046 # do not duplicated entry in successors set
1040 1047 # first entry wins.
1041 1048 if part not in newss:
1042 1049 newss.append(part)
1043 1050 productresult.append(newss)
1044 1051 markss = productresult
1045 1052 succssets.extend(markss)
1046 1053 # remove duplicated and subset
1047 1054 seen = []
1048 1055 final = []
1049 1056 candidate = sorted(((set(s), s) for s in succssets if s),
1050 1057 key=lambda x: len(x[1]), reverse=True)
1051 1058 for setversion, listversion in candidate:
1052 1059 for seenset in seen:
1053 1060 if setversion.issubset(seenset):
1054 1061 break
1055 1062 else:
1056 1063 final.append(listversion)
1057 1064 seen.append(setversion)
1058 1065 final.reverse() # put small successors set first
1059 1066 cache[current] = final
1060 1067 return cache[initialnode]
1061 1068
1062 1069 # mapping of 'set-name' -> <function to compute this set>
1063 1070 cachefuncs = {}
1064 1071 def cachefor(name):
1065 1072 """Decorator to register a function as computing the cache for a set"""
1066 1073 def decorator(func):
1067 1074 assert name not in cachefuncs
1068 1075 cachefuncs[name] = func
1069 1076 return func
1070 1077 return decorator
1071 1078
1072 1079 def getrevs(repo, name):
1073 1080 """Return the set of revision that belong to the <name> set
1074 1081
1075 1082 Such access may compute the set and cache it for future use"""
1076 1083 repo = repo.unfiltered()
1077 1084 if not repo.obsstore:
1078 1085 return frozenset()
1079 1086 if name not in repo.obsstore.caches:
1080 1087 repo.obsstore.caches[name] = cachefuncs[name](repo)
1081 1088 return repo.obsstore.caches[name]
1082 1089
1083 1090 # To be simple we need to invalidate obsolescence cache when:
1084 1091 #
1085 1092 # - new changeset is added:
1086 1093 # - public phase is changed
1087 1094 # - obsolescence marker are added
1088 1095 # - strip is used a repo
1089 1096 def clearobscaches(repo):
1090 1097 """Remove all obsolescence related cache from a repo
1091 1098
1092 1099 This remove all cache in obsstore is the obsstore already exist on the
1093 1100 repo.
1094 1101
1095 1102 (We could be smarter here given the exact event that trigger the cache
1096 1103 clearing)"""
1097 1104 # only clear cache is there is obsstore data in this repo
1098 1105 if 'obsstore' in repo._filecache:
1099 1106 repo.obsstore.caches.clear()
1100 1107
1101 1108 @cachefor('obsolete')
1102 1109 def _computeobsoleteset(repo):
1103 1110 """the set of obsolete revisions"""
1104 1111 obs = set()
1105 1112 getrev = repo.changelog.nodemap.get
1106 1113 getphase = repo._phasecache.phase
1107 1114 for n in repo.obsstore.successors:
1108 1115 rev = getrev(n)
1109 1116 if rev is not None and getphase(repo, rev):
1110 1117 obs.add(rev)
1111 1118 return obs
1112 1119
1113 1120 @cachefor('unstable')
1114 1121 def _computeunstableset(repo):
1115 1122 """the set of non obsolete revisions with obsolete parents"""
1116 1123 revs = [(ctx.rev(), ctx) for ctx in
1117 1124 repo.set('(not public()) and (not obsolete())')]
1118 1125 revs.sort(key=lambda x:x[0])
1119 1126 unstable = set()
1120 1127 for rev, ctx in revs:
1121 1128 # A rev is unstable if one of its parent is obsolete or unstable
1122 1129 # this works since we traverse following growing rev order
1123 1130 if any((x.obsolete() or (x.rev() in unstable))
1124 1131 for x in ctx.parents()):
1125 1132 unstable.add(rev)
1126 1133 return unstable
1127 1134
1128 1135 @cachefor('suspended')
1129 1136 def _computesuspendedset(repo):
1130 1137 """the set of obsolete parents with non obsolete descendants"""
1131 1138 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1132 1139 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1133 1140
1134 1141 @cachefor('extinct')
1135 1142 def _computeextinctset(repo):
1136 1143 """the set of obsolete parents without non obsolete descendants"""
1137 1144 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1138 1145
1139 1146
1140 1147 @cachefor('bumped')
1141 1148 def _computebumpedset(repo):
1142 1149 """the set of revs trying to obsolete public revisions"""
1143 1150 bumped = set()
1144 1151 # util function (avoid attribute lookup in the loop)
1145 1152 phase = repo._phasecache.phase # would be faster to grab the full list
1146 1153 public = phases.public
1147 1154 cl = repo.changelog
1148 1155 torev = cl.nodemap.get
1149 1156 for ctx in repo.set('(not public()) and (not obsolete())'):
1150 1157 rev = ctx.rev()
1151 1158 # We only evaluate mutable, non-obsolete revision
1152 1159 node = ctx.node()
1153 1160 # (future) A cache of precursors may worth if split is very common
1154 1161 for pnode in allprecursors(repo.obsstore, [node],
1155 1162 ignoreflags=bumpedfix):
1156 1163 prev = torev(pnode) # unfiltered! but so is phasecache
1157 1164 if (prev is not None) and (phase(repo, prev) <= public):
1158 1165 # we have a public precursors
1159 1166 bumped.add(rev)
1160 1167 break # Next draft!
1161 1168 return bumped
1162 1169
1163 1170 @cachefor('divergent')
1164 1171 def _computedivergentset(repo):
1165 1172 """the set of rev that compete to be the final successors of some revision.
1166 1173 """
1167 1174 divergent = set()
1168 1175 obsstore = repo.obsstore
1169 1176 newermap = {}
1170 1177 for ctx in repo.set('(not public()) - obsolete()'):
1171 1178 mark = obsstore.precursors.get(ctx.node(), ())
1172 1179 toprocess = set(mark)
1173 1180 seen = set()
1174 1181 while toprocess:
1175 1182 prec = toprocess.pop()[0]
1176 1183 if prec in seen:
1177 1184 continue # emergency cycle hanging prevention
1178 1185 seen.add(prec)
1179 1186 if prec not in newermap:
1180 1187 successorssets(repo, prec, newermap)
1181 1188 newer = [n for n in newermap[prec] if n]
1182 1189 if len(newer) > 1:
1183 1190 divergent.add(ctx.rev())
1184 1191 break
1185 1192 toprocess.update(obsstore.precursors.get(prec, ()))
1186 1193 return divergent
1187 1194
1188 1195
1189 1196 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1190 1197 """Add obsolete markers between changesets in a repo
1191 1198
1192 1199 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1193 1200 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1194 1201 containing metadata for this marker only. It is merged with the global
1195 1202 metadata specified through the `metadata` argument of this function,
1196 1203
1197 1204 Trying to obsolete a public changeset will raise an exception.
1198 1205
1199 1206 Current user and date are used except if specified otherwise in the
1200 1207 metadata attribute.
1201 1208
1202 1209 This function operates within a transaction of its own, but does
1203 1210 not take any lock on the repo.
1204 1211 """
1205 1212 # prepare metadata
1206 1213 if metadata is None:
1207 1214 metadata = {}
1208 1215 if 'user' not in metadata:
1209 1216 metadata['user'] = repo.ui.username()
1210 1217 tr = repo.transaction('add-obsolescence-marker')
1211 1218 try:
1212 1219 for rel in relations:
1213 1220 prec = rel[0]
1214 1221 sucs = rel[1]
1215 1222 localmetadata = metadata.copy()
1216 1223 if 2 < len(rel):
1217 1224 localmetadata.update(rel[2])
1218 1225
1219 1226 if not prec.mutable():
1220 1227 raise error.Abort("cannot obsolete public changeset: %s"
1221 1228 % prec,
1222 1229 hint='see "hg help phases" for details')
1223 1230 nprec = prec.node()
1224 1231 nsucs = tuple(s.node() for s in sucs)
1225 1232 npare = None
1226 1233 if not nsucs:
1227 1234 npare = tuple(p.node() for p in prec.parents())
1228 1235 if nprec in nsucs:
1229 1236 raise error.Abort("changeset %s cannot obsolete itself" % prec)
1230 1237 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1231 1238 date=date, metadata=localmetadata)
1232 1239 repo.filteredrevcache.clear()
1233 1240 tr.close()
1234 1241 finally:
1235 1242 tr.release()
1236 1243
1237 1244 def isenabled(repo, option):
1238 1245 """Returns True if the given repository has the given obsolete option
1239 1246 enabled.
1240 1247 """
1241 1248 result = set(repo.ui.configlist('experimental', 'evolution'))
1242 1249 if 'all' in result:
1243 1250 return True
1244 1251
1245 1252 # For migration purposes, temporarily return true if the config hasn't been
1246 1253 # set but _enabled is true.
1247 1254 if len(result) == 0 and _enabled:
1248 1255 return True
1249 1256
1250 1257 # createmarkers must be enabled if other options are enabled
1251 1258 if ((allowunstableopt in result or exchangeopt in result) and
1252 1259 not createmarkersopt in result):
1253 1260 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
1254 1261 "if other obsolete options are enabled"))
1255 1262
1256 1263 return option in result
General Comments 0
You need to be logged in to leave comments. Login now