##// END OF EJS Templates
changelog: fix handling of empty copy entries in changeset...
Martin von Zweigbergk -
r42756:e3df1e15 default
parent child Browse files
Show More
@@ -1,665 +1,669 b''
1 1 # changelog.py - changelog class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from .node import (
12 12 bin,
13 13 hex,
14 14 nullid,
15 15 )
16 16 from .thirdparty import (
17 17 attr,
18 18 )
19 19
20 20 from . import (
21 21 encoding,
22 22 error,
23 23 pycompat,
24 24 revlog,
25 25 util,
26 26 )
27 27 from .utils import (
28 28 dateutil,
29 29 stringutil,
30 30 )
31 31
32 32 _defaultextra = {'branch': 'default'}
33 33
34 34 def _string_escape(text):
35 35 """
36 36 >>> from .pycompat import bytechr as chr
37 37 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
38 38 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
39 39 >>> s
40 40 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
41 41 >>> res = _string_escape(s)
42 42 >>> s == _string_unescape(res)
43 43 True
44 44 """
45 45 # subset of the string_escape codec
46 46 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
47 47 return text.replace('\0', '\\0')
48 48
49 49 def _string_unescape(text):
50 50 if '\\0' in text:
51 51 # fix up \0 without getting into trouble with \\0
52 52 text = text.replace('\\\\', '\\\\\n')
53 53 text = text.replace('\\0', '\0')
54 54 text = text.replace('\n', '')
55 55 return stringutil.unescapestr(text)
56 56
57 57 def decodeextra(text):
58 58 """
59 59 >>> from .pycompat import bytechr as chr
60 60 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
61 61 ... ).items())
62 62 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
63 63 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
64 64 ... b'baz': chr(92) + chr(0) + b'2'})
65 65 ... ).items())
66 66 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
67 67 """
68 68 extra = _defaultextra.copy()
69 69 for l in text.split('\0'):
70 70 if l:
71 71 k, v = _string_unescape(l).split(':', 1)
72 72 extra[k] = v
73 73 return extra
74 74
75 75 def encodeextra(d):
76 76 # keys must be sorted to produce a deterministic changelog entry
77 77 items = [
78 78 _string_escape('%s:%s' % (k, pycompat.bytestr(d[k])))
79 79 for k in sorted(d)
80 80 ]
81 81 return "\0".join(items)
82 82
83 83 def encodecopies(files, copies):
84 84 items = []
85 85 for i, dst in enumerate(files):
86 86 if dst in copies:
87 87 items.append('%d\0%s' % (i, copies[dst]))
88 88 if len(items) != len(copies):
89 89 raise error.ProgrammingError('some copy targets missing from file list')
90 90 return "\n".join(items)
91 91
92 92 def decodecopies(files, data):
93 93 try:
94 94 copies = {}
95 if not data:
96 return copies
95 97 for l in data.split('\n'):
96 98 strindex, src = l.split('\0')
97 99 i = int(strindex)
98 100 dst = files[i]
99 101 copies[dst] = src
100 102 return copies
101 103 except (ValueError, IndexError):
102 104 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
103 105 # used different syntax for the value.
104 106 return None
105 107
106 108 def encodefileindices(files, subset):
107 109 subset = set(subset)
108 110 indices = []
109 111 for i, f in enumerate(files):
110 112 if f in subset:
111 113 indices.append('%d' % i)
112 114 return '\n'.join(indices)
113 115
114 116 def decodefileindices(files, data):
115 117 try:
116 118 subset = []
119 if not data:
120 return subset
117 121 for strindex in data.split('\n'):
118 122 i = int(strindex)
119 123 if i < 0 or i >= len(files):
120 124 return None
121 125 subset.append(files[i])
122 126 return subset
123 127 except (ValueError, IndexError):
124 128 # Perhaps someone had chosen the same key name (e.g. "added") and
125 129 # used different syntax for the value.
126 130 return None
127 131
128 132 def stripdesc(desc):
129 133 """strip trailing whitespace and leading and trailing empty lines"""
130 134 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
131 135
132 136 class appender(object):
133 137 '''the changelog index must be updated last on disk, so we use this class
134 138 to delay writes to it'''
135 139 def __init__(self, vfs, name, mode, buf):
136 140 self.data = buf
137 141 fp = vfs(name, mode)
138 142 self.fp = fp
139 143 self.offset = fp.tell()
140 144 self.size = vfs.fstat(fp).st_size
141 145 self._end = self.size
142 146
143 147 def end(self):
144 148 return self._end
145 149 def tell(self):
146 150 return self.offset
147 151 def flush(self):
148 152 pass
149 153
150 154 @property
151 155 def closed(self):
152 156 return self.fp.closed
153 157
154 158 def close(self):
155 159 self.fp.close()
156 160
157 161 def seek(self, offset, whence=0):
158 162 '''virtual file offset spans real file and data'''
159 163 if whence == 0:
160 164 self.offset = offset
161 165 elif whence == 1:
162 166 self.offset += offset
163 167 elif whence == 2:
164 168 self.offset = self.end() + offset
165 169 if self.offset < self.size:
166 170 self.fp.seek(self.offset)
167 171
168 172 def read(self, count=-1):
169 173 '''only trick here is reads that span real file and data'''
170 174 ret = ""
171 175 if self.offset < self.size:
172 176 s = self.fp.read(count)
173 177 ret = s
174 178 self.offset += len(s)
175 179 if count > 0:
176 180 count -= len(s)
177 181 if count != 0:
178 182 doff = self.offset - self.size
179 183 self.data.insert(0, "".join(self.data))
180 184 del self.data[1:]
181 185 s = self.data[0][doff:doff + count]
182 186 self.offset += len(s)
183 187 ret += s
184 188 return ret
185 189
186 190 def write(self, s):
187 191 self.data.append(bytes(s))
188 192 self.offset += len(s)
189 193 self._end += len(s)
190 194
191 195 def __enter__(self):
192 196 self.fp.__enter__()
193 197 return self
194 198
195 199 def __exit__(self, *args):
196 200 return self.fp.__exit__(*args)
197 201
198 202 def _divertopener(opener, target):
199 203 """build an opener that writes in 'target.a' instead of 'target'"""
200 204 def _divert(name, mode='r', checkambig=False):
201 205 if name != target:
202 206 return opener(name, mode)
203 207 return opener(name + ".a", mode)
204 208 return _divert
205 209
206 210 def _delayopener(opener, target, buf):
207 211 """build an opener that stores chunks in 'buf' instead of 'target'"""
208 212 def _delay(name, mode='r', checkambig=False):
209 213 if name != target:
210 214 return opener(name, mode)
211 215 return appender(opener, name, mode, buf)
212 216 return _delay
213 217
214 218 @attr.s
215 219 class _changelogrevision(object):
216 220 # Extensions might modify _defaultextra, so let the constructor below pass
217 221 # it in
218 222 extra = attr.ib()
219 223 manifest = attr.ib(default=nullid)
220 224 user = attr.ib(default='')
221 225 date = attr.ib(default=(0, 0))
222 226 files = attr.ib(default=attr.Factory(list))
223 227 filesadded = attr.ib(default=None)
224 228 filesremoved = attr.ib(default=None)
225 229 p1copies = attr.ib(default=None)
226 230 p2copies = attr.ib(default=None)
227 231 description = attr.ib(default='')
228 232
229 233 class changelogrevision(object):
230 234 """Holds results of a parsed changelog revision.
231 235
232 236 Changelog revisions consist of multiple pieces of data, including
233 237 the manifest node, user, and date. This object exposes a view into
234 238 the parsed object.
235 239 """
236 240
237 241 __slots__ = (
238 242 r'_offsets',
239 243 r'_text',
240 244 )
241 245
242 246 def __new__(cls, text):
243 247 if not text:
244 248 return _changelogrevision(extra=_defaultextra)
245 249
246 250 self = super(changelogrevision, cls).__new__(cls)
247 251 # We could return here and implement the following as an __init__.
248 252 # But doing it here is equivalent and saves an extra function call.
249 253
250 254 # format used:
251 255 # nodeid\n : manifest node in ascii
252 256 # user\n : user, no \n or \r allowed
253 257 # time tz extra\n : date (time is int or float, timezone is int)
254 258 # : extra is metadata, encoded and separated by '\0'
255 259 # : older versions ignore it
256 260 # files\n\n : files modified by the cset, no \n or \r allowed
257 261 # (.*) : comment (free text, ideally utf-8)
258 262 #
259 263 # changelog v0 doesn't use extra
260 264
261 265 nl1 = text.index('\n')
262 266 nl2 = text.index('\n', nl1 + 1)
263 267 nl3 = text.index('\n', nl2 + 1)
264 268
265 269 # The list of files may be empty. Which means nl3 is the first of the
266 270 # double newline that precedes the description.
267 271 if text[nl3 + 1:nl3 + 2] == '\n':
268 272 doublenl = nl3
269 273 else:
270 274 doublenl = text.index('\n\n', nl3 + 1)
271 275
272 276 self._offsets = (nl1, nl2, nl3, doublenl)
273 277 self._text = text
274 278
275 279 return self
276 280
277 281 @property
278 282 def manifest(self):
279 283 return bin(self._text[0:self._offsets[0]])
280 284
281 285 @property
282 286 def user(self):
283 287 off = self._offsets
284 288 return encoding.tolocal(self._text[off[0] + 1:off[1]])
285 289
286 290 @property
287 291 def _rawdate(self):
288 292 off = self._offsets
289 293 dateextra = self._text[off[1] + 1:off[2]]
290 294 return dateextra.split(' ', 2)[0:2]
291 295
292 296 @property
293 297 def _rawextra(self):
294 298 off = self._offsets
295 299 dateextra = self._text[off[1] + 1:off[2]]
296 300 fields = dateextra.split(' ', 2)
297 301 if len(fields) != 3:
298 302 return None
299 303
300 304 return fields[2]
301 305
302 306 @property
303 307 def date(self):
304 308 raw = self._rawdate
305 309 time = float(raw[0])
306 310 # Various tools did silly things with the timezone.
307 311 try:
308 312 timezone = int(raw[1])
309 313 except ValueError:
310 314 timezone = 0
311 315
312 316 return time, timezone
313 317
314 318 @property
315 319 def extra(self):
316 320 raw = self._rawextra
317 321 if raw is None:
318 322 return _defaultextra
319 323
320 324 return decodeextra(raw)
321 325
322 326 @property
323 327 def files(self):
324 328 off = self._offsets
325 329 if off[2] == off[3]:
326 330 return []
327 331
328 332 return self._text[off[2] + 1:off[3]].split('\n')
329 333
330 334 @property
331 335 def filesadded(self):
332 336 rawindices = self.extra.get('filesadded')
333 337 return rawindices and decodefileindices(self.files, rawindices)
334 338
335 339 @property
336 340 def filesremoved(self):
337 341 rawindices = self.extra.get('filesremoved')
338 342 return rawindices and decodefileindices(self.files, rawindices)
339 343
340 344 @property
341 345 def p1copies(self):
342 346 rawcopies = self.extra.get('p1copies')
343 347 return rawcopies and decodecopies(self.files, rawcopies)
344 348
345 349 @property
346 350 def p2copies(self):
347 351 rawcopies = self.extra.get('p2copies')
348 352 return rawcopies and decodecopies(self.files, rawcopies)
349 353
350 354 @property
351 355 def description(self):
352 356 return encoding.tolocal(self._text[self._offsets[3] + 2:])
353 357
354 358 class changelog(revlog.revlog):
355 359 def __init__(self, opener, trypending=False):
356 360 """Load a changelog revlog using an opener.
357 361
358 362 If ``trypending`` is true, we attempt to load the index from a
359 363 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
360 364 The ``00changelog.i.a`` file contains index (and possibly inline
361 365 revision) data for a transaction that hasn't been finalized yet.
362 366 It exists in a separate file to facilitate readers (such as
363 367 hooks processes) accessing data before a transaction is finalized.
364 368 """
365 369 if trypending and opener.exists('00changelog.i.a'):
366 370 indexfile = '00changelog.i.a'
367 371 else:
368 372 indexfile = '00changelog.i'
369 373
370 374 datafile = '00changelog.d'
371 375 revlog.revlog.__init__(self, opener, indexfile, datafile=datafile,
372 376 checkambig=True, mmaplargeindex=True)
373 377
374 378 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
375 379 # changelogs don't benefit from generaldelta.
376 380
377 381 self.version &= ~revlog.FLAG_GENERALDELTA
378 382 self._generaldelta = False
379 383
380 384 # Delta chains for changelogs tend to be very small because entries
381 385 # tend to be small and don't delta well with each. So disable delta
382 386 # chains.
383 387 self._storedeltachains = False
384 388
385 389 self._realopener = opener
386 390 self._delayed = False
387 391 self._delaybuf = None
388 392 self._divert = False
389 393 self.filteredrevs = frozenset()
390 394
391 395 def tiprev(self):
392 396 for i in pycompat.xrange(len(self) -1, -2, -1):
393 397 if i not in self.filteredrevs:
394 398 return i
395 399
396 400 def tip(self):
397 401 """filtered version of revlog.tip"""
398 402 return self.node(self.tiprev())
399 403
400 404 def __contains__(self, rev):
401 405 """filtered version of revlog.__contains__"""
402 406 return (0 <= rev < len(self)
403 407 and rev not in self.filteredrevs)
404 408
405 409 def __iter__(self):
406 410 """filtered version of revlog.__iter__"""
407 411 if len(self.filteredrevs) == 0:
408 412 return revlog.revlog.__iter__(self)
409 413
410 414 def filterediter():
411 415 for i in pycompat.xrange(len(self)):
412 416 if i not in self.filteredrevs:
413 417 yield i
414 418
415 419 return filterediter()
416 420
417 421 def revs(self, start=0, stop=None):
418 422 """filtered version of revlog.revs"""
419 423 for i in super(changelog, self).revs(start, stop):
420 424 if i not in self.filteredrevs:
421 425 yield i
422 426
423 427 def _checknofilteredinrevs(self, revs):
424 428 """raise the appropriate error if 'revs' contains a filtered revision
425 429
426 430 This returns a version of 'revs' to be used thereafter by the caller.
427 431 In particular, if revs is an iterator, it is converted into a set.
428 432 """
429 433 safehasattr = util.safehasattr
430 434 if safehasattr(revs, '__next__'):
431 435 # Note that inspect.isgenerator() is not true for iterators,
432 436 revs = set(revs)
433 437
434 438 filteredrevs = self.filteredrevs
435 439 if safehasattr(revs, 'first'): # smartset
436 440 offenders = revs & filteredrevs
437 441 else:
438 442 offenders = filteredrevs.intersection(revs)
439 443
440 444 for rev in offenders:
441 445 raise error.FilteredIndexError(rev)
442 446 return revs
443 447
444 448 def headrevs(self, revs=None):
445 449 if revs is None and self.filteredrevs:
446 450 try:
447 451 return self.index.headrevsfiltered(self.filteredrevs)
448 452 # AttributeError covers non-c-extension environments and
449 453 # old c extensions without filter handling.
450 454 except AttributeError:
451 455 return self._headrevs()
452 456
453 457 if self.filteredrevs:
454 458 revs = self._checknofilteredinrevs(revs)
455 459 return super(changelog, self).headrevs(revs)
456 460
457 461 def strip(self, *args, **kwargs):
458 462 # XXX make something better than assert
459 463 # We can't expect proper strip behavior if we are filtered.
460 464 assert not self.filteredrevs
461 465 super(changelog, self).strip(*args, **kwargs)
462 466
463 467 def rev(self, node):
464 468 """filtered version of revlog.rev"""
465 469 r = super(changelog, self).rev(node)
466 470 if r in self.filteredrevs:
467 471 raise error.FilteredLookupError(hex(node), self.indexfile,
468 472 _('filtered node'))
469 473 return r
470 474
471 475 def node(self, rev):
472 476 """filtered version of revlog.node"""
473 477 if rev in self.filteredrevs:
474 478 raise error.FilteredIndexError(rev)
475 479 return super(changelog, self).node(rev)
476 480
477 481 def linkrev(self, rev):
478 482 """filtered version of revlog.linkrev"""
479 483 if rev in self.filteredrevs:
480 484 raise error.FilteredIndexError(rev)
481 485 return super(changelog, self).linkrev(rev)
482 486
483 487 def parentrevs(self, rev):
484 488 """filtered version of revlog.parentrevs"""
485 489 if rev in self.filteredrevs:
486 490 raise error.FilteredIndexError(rev)
487 491 return super(changelog, self).parentrevs(rev)
488 492
489 493 def flags(self, rev):
490 494 """filtered version of revlog.flags"""
491 495 if rev in self.filteredrevs:
492 496 raise error.FilteredIndexError(rev)
493 497 return super(changelog, self).flags(rev)
494 498
495 499 def delayupdate(self, tr):
496 500 "delay visibility of index updates to other readers"
497 501
498 502 if not self._delayed:
499 503 if len(self) == 0:
500 504 self._divert = True
501 505 if self._realopener.exists(self.indexfile + '.a'):
502 506 self._realopener.unlink(self.indexfile + '.a')
503 507 self.opener = _divertopener(self._realopener, self.indexfile)
504 508 else:
505 509 self._delaybuf = []
506 510 self.opener = _delayopener(self._realopener, self.indexfile,
507 511 self._delaybuf)
508 512 self._delayed = True
509 513 tr.addpending('cl-%i' % id(self), self._writepending)
510 514 tr.addfinalize('cl-%i' % id(self), self._finalize)
511 515
512 516 def _finalize(self, tr):
513 517 "finalize index updates"
514 518 self._delayed = False
515 519 self.opener = self._realopener
516 520 # move redirected index data back into place
517 521 if self._divert:
518 522 assert not self._delaybuf
519 523 tmpname = self.indexfile + ".a"
520 524 nfile = self.opener.open(tmpname)
521 525 nfile.close()
522 526 self.opener.rename(tmpname, self.indexfile, checkambig=True)
523 527 elif self._delaybuf:
524 528 fp = self.opener(self.indexfile, 'a', checkambig=True)
525 529 fp.write("".join(self._delaybuf))
526 530 fp.close()
527 531 self._delaybuf = None
528 532 self._divert = False
529 533 # split when we're done
530 534 self._enforceinlinesize(tr)
531 535
532 536 def _writepending(self, tr):
533 537 "create a file containing the unfinalized state for pretxnchangegroup"
534 538 if self._delaybuf:
535 539 # make a temporary copy of the index
536 540 fp1 = self._realopener(self.indexfile)
537 541 pendingfilename = self.indexfile + ".a"
538 542 # register as a temp file to ensure cleanup on failure
539 543 tr.registertmp(pendingfilename)
540 544 # write existing data
541 545 fp2 = self._realopener(pendingfilename, "w")
542 546 fp2.write(fp1.read())
543 547 # add pending data
544 548 fp2.write("".join(self._delaybuf))
545 549 fp2.close()
546 550 # switch modes so finalize can simply rename
547 551 self._delaybuf = None
548 552 self._divert = True
549 553 self.opener = _divertopener(self._realopener, self.indexfile)
550 554
551 555 if self._divert:
552 556 return True
553 557
554 558 return False
555 559
556 560 def _enforceinlinesize(self, tr, fp=None):
557 561 if not self._delayed:
558 562 revlog.revlog._enforceinlinesize(self, tr, fp)
559 563
560 564 def read(self, node):
561 565 """Obtain data from a parsed changelog revision.
562 566
563 567 Returns a 6-tuple of:
564 568
565 569 - manifest node in binary
566 570 - author/user as a localstr
567 571 - date as a 2-tuple of (time, timezone)
568 572 - list of files
569 573 - commit message as a localstr
570 574 - dict of extra metadata
571 575
572 576 Unless you need to access all fields, consider calling
573 577 ``changelogrevision`` instead, as it is faster for partial object
574 578 access.
575 579 """
576 580 c = changelogrevision(self.revision(node))
577 581 return (
578 582 c.manifest,
579 583 c.user,
580 584 c.date,
581 585 c.files,
582 586 c.description,
583 587 c.extra
584 588 )
585 589
586 590 def changelogrevision(self, nodeorrev):
587 591 """Obtain a ``changelogrevision`` for a node or revision."""
588 592 return changelogrevision(self.revision(nodeorrev))
589 593
590 594 def readfiles(self, node):
591 595 """
592 596 short version of read that only returns the files modified by the cset
593 597 """
594 598 text = self.revision(node)
595 599 if not text:
596 600 return []
597 601 last = text.index("\n\n")
598 602 l = text[:last].split('\n')
599 603 return l[3:]
600 604
601 605 def add(self, manifest, files, desc, transaction, p1, p2,
602 606 user, date=None, extra=None, p1copies=None, p2copies=None,
603 607 filesadded=None, filesremoved=None):
604 608 # Convert to UTF-8 encoded bytestrings as the very first
605 609 # thing: calling any method on a localstr object will turn it
606 610 # into a str object and the cached UTF-8 string is thus lost.
607 611 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
608 612
609 613 user = user.strip()
610 614 # An empty username or a username with a "\n" will make the
611 615 # revision text contain two "\n\n" sequences -> corrupt
612 616 # repository since read cannot unpack the revision.
613 617 if not user:
614 618 raise error.StorageError(_("empty username"))
615 619 if "\n" in user:
616 620 raise error.StorageError(_("username %r contains a newline")
617 621 % pycompat.bytestr(user))
618 622
619 623 desc = stripdesc(desc)
620 624
621 625 if date:
622 626 parseddate = "%d %d" % dateutil.parsedate(date)
623 627 else:
624 628 parseddate = "%d %d" % dateutil.makedate()
625 629 if extra:
626 630 branch = extra.get("branch")
627 631 if branch in ("default", ""):
628 632 del extra["branch"]
629 633 elif branch in (".", "null", "tip"):
630 634 raise error.StorageError(_('the name \'%s\' is reserved')
631 635 % branch)
632 636 extrasentries = p1copies, p2copies, filesadded, filesremoved
633 637 if extra is None and any(x is not None for x in extrasentries):
634 638 extra = {}
635 639 sortedfiles = sorted(files)
636 640 if p1copies is not None:
637 641 extra['p1copies'] = encodecopies(sortedfiles, p1copies)
638 642 if p2copies is not None:
639 643 extra['p2copies'] = encodecopies(sortedfiles, p2copies)
640 644 if filesadded is not None:
641 645 extra['filesadded'] = encodefileindices(sortedfiles, filesadded)
642 646 if filesremoved is not None:
643 647 extra['filesremoved'] = encodefileindices(sortedfiles, filesremoved)
644 648
645 649 if extra:
646 650 extra = encodeextra(extra)
647 651 parseddate = "%s %s" % (parseddate, extra)
648 652 l = [hex(manifest), user, parseddate] + sortedfiles + ["", desc]
649 653 text = "\n".join(l)
650 654 return self.addrevision(text, transaction, len(self), p1, p2)
651 655
652 656 def branchinfo(self, rev):
653 657 """return the branch name and open/close state of a revision
654 658
655 659 This function exists because creating a changectx object
656 660 just to access this is costly."""
657 661 extra = self.read(rev)[5]
658 662 return encoding.tolocal(extra.get("branch")), 'close' in extra
659 663
660 664 def _nodeduplicatecallback(self, transaction, node):
661 665 # keep track of revisions that got "re-added", eg: unbunde of know rev.
662 666 #
663 667 # We track them in a list to preserve their order from the source bundle
664 668 duplicates = transaction.changes.setdefault('revduplicates', [])
665 669 duplicates.append(self.rev(node))
General Comments 0
You need to be logged in to leave comments. Login now