##// END OF EJS Templates
cvsps: kill some more trailing whitespace
Dirkjan Ochtman -
r7573:8bea01a6 default
parent child Browse files
Show More
@@ -1,678 +1,678 b''
1 1 #
2 2 # Mercurial built-in replacement for cvsps.
3 3 #
4 4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 import os
10 10 import re
11 11 import cPickle as pickle
12 12 from mercurial import util
13 13 from mercurial.i18n import _
14 14
15 15 def listsort(list, key):
16 16 "helper to sort by key in Python 2.3"
17 17 try:
18 18 list.sort(key=key)
19 19 except TypeError:
20 20 list.sort(lambda l, r: cmp(key(l), key(r)))
21 21
22 22 class logentry(object):
23 23 '''Class logentry has the following attributes:
24 24 .author - author name as CVS knows it
25 25 .branch - name of branch this revision is on
26 26 .branches - revision tuple of branches starting at this revision
27 27 .comment - commit message
28 28 .date - the commit date as a (time, tz) tuple
29 29 .dead - true if file revision is dead
30 30 .file - Name of file
31 31 .lines - a tuple (+lines, -lines) or None
32 32 .parent - Previous revision of this entry
33 33 .rcs - name of file as returned from CVS
34 34 .revision - revision number as tuple
35 35 .tags - list of tags on the file
36 36 '''
37 37 def __init__(self, **entries):
38 38 self.__dict__.update(entries)
39 39
40 40 class logerror(Exception):
41 41 pass
42 42
43 43 def getrepopath(cvspath):
44 44 """Return the repository path from a CVS path.
45 45
46 46 >>> getrepopath('/foo/bar')
47 47 '/foo/bar'
48 48 >>> getrepopath('c:/foo/bar')
49 49 'c:/foo/bar'
50 50 >>> getrepopath(':pserver:10/foo/bar')
51 51 '/foo/bar'
52 52 >>> getrepopath(':pserver:10c:/foo/bar')
53 53 '/foo/bar'
54 54 >>> getrepopath(':pserver:/foo/bar')
55 55 '/foo/bar'
56 56 >>> getrepopath(':pserver:c:/foo/bar')
57 57 'c:/foo/bar'
58 58 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
59 59 '/foo/bar'
60 60 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
61 61 'c:/foo/bar'
62 62 """
63 63 # According to CVS manual, CVS paths are expressed like:
64 64 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
65 65 #
66 66 # Unfortunately, Windows absolute paths start with a drive letter
67 67 # like 'c:' making it harder to parse. Here we assume that drive
68 68 # letters are only one character long and any CVS component before
69 69 # the repository path is at least 2 characters long, and use this
70 70 # to disambiguate.
71 71 parts = cvspath.split(':')
72 72 if len(parts) == 1:
73 73 return parts[0]
74 74 # Here there is an ambiguous case if we have a port number
75 75 # immediately followed by a Windows driver letter. We assume this
76 76 # never happens and decide it must be CVS path component,
77 77 # therefore ignoring it.
78 78 if len(parts[-2]) > 1:
79 79 return parts[-1].lstrip('0123456789')
80 80 return parts[-2] + ':' + parts[-1]
81 81
82 82 def createlog(ui, directory=None, root="", rlog=True, cache=None):
83 83 '''Collect the CVS rlog'''
84 84
85 85 # Because we store many duplicate commit log messages, reusing strings
86 86 # saves a lot of memory and pickle storage space.
87 87 _scache = {}
88 88 def scache(s):
89 89 "return a shared version of a string"
90 90 return _scache.setdefault(s, s)
91 91
92 92 ui.status(_('collecting CVS rlog\n'))
93 93
94 94 log = [] # list of logentry objects containing the CVS state
95 95
96 96 # patterns to match in CVS (r)log output, by state of use
97 97 re_00 = re.compile('RCS file: (.+)$')
98 98 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
99 99 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
100 100 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
101 101 re_10 = re.compile('Working file: (.+)$')
102 102 re_20 = re.compile('symbolic names:')
103 103 re_30 = re.compile('\t(.+): ([\\d.]+)$')
104 104 re_31 = re.compile('----------------------------$')
105 105 re_32 = re.compile('=============================================================================$')
106 106 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
107 107 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?')
108 108 re_70 = re.compile('branches: (.+);$')
109 109
110 110 prefix = '' # leading path to strip of what we get from CVS
111 111
112 112 if directory is None:
113 113 # Current working directory
114 114
115 115 # Get the real directory in the repository
116 116 try:
117 117 prefix = file(os.path.join('CVS','Repository')).read().strip()
118 118 if prefix == ".":
119 119 prefix = ""
120 120 directory = prefix
121 121 except IOError:
122 122 raise logerror('Not a CVS sandbox')
123 123
124 124 if prefix and not prefix.endswith(os.sep):
125 125 prefix += os.sep
126 126
127 127 # Use the Root file in the sandbox, if it exists
128 128 try:
129 129 root = file(os.path.join('CVS','Root')).read().strip()
130 130 except IOError:
131 131 pass
132 132
133 133 if not root:
134 134 root = os.environ.get('CVSROOT', '')
135 135
136 136 # read log cache if one exists
137 137 oldlog = []
138 138 date = None
139 139
140 140 if cache:
141 141 cachedir = os.path.expanduser('~/.hg.cvsps')
142 142 if not os.path.exists(cachedir):
143 143 os.mkdir(cachedir)
144 144
145 145 # The cvsps cache pickle needs a uniquified name, based on the
146 146 # repository location. The address may have all sort of nasties
147 147 # in it, slashes, colons and such. So here we take just the
148 148 # alphanumerics, concatenated in a way that does not mix up the
149 149 # various components, so that
150 150 # :pserver:user@server:/path
151 151 # and
152 152 # /pserver/user/server/path
153 153 # are mapped to different cache file names.
154 154 cachefile = root.split(":") + [directory, "cache"]
155 155 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
156 156 cachefile = os.path.join(cachedir,
157 157 '.'.join([s for s in cachefile if s]))
158 158
159 159 if cache == 'update':
160 160 try:
161 161 ui.note(_('reading cvs log cache %s\n') % cachefile)
162 162 oldlog = pickle.load(file(cachefile))
163 163 ui.note(_('cache has %d log entries\n') % len(oldlog))
164 164 except Exception, e:
165 165 ui.note(_('error reading cache: %r\n') % e)
166 166
167 167 if oldlog:
168 168 date = oldlog[-1].date # last commit date as a (time,tz) tuple
169 169 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
170 170
171 171 # build the CVS commandline
172 172 cmd = ['cvs', '-q']
173 173 if root:
174 174 cmd.append('-d%s' % root)
175 175 p = util.normpath(getrepopath(root))
176 176 if not p.endswith('/'):
177 177 p += '/'
178 178 prefix = p + util.normpath(prefix)
179 179 cmd.append(['log', 'rlog'][rlog])
180 180 if date:
181 181 # no space between option and date string
182 182 cmd.append('-d>%s' % date)
183 183 cmd.append(directory)
184 184
185 185 # state machine begins here
186 186 tags = {} # dictionary of revisions on current file with their tags
187 187 state = 0
188 188 store = False # set when a new record can be appended
189 189
190 190 cmd = [util.shellquote(arg) for arg in cmd]
191 191 ui.note(_("running %s\n") % (' '.join(cmd)))
192 192 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
193 193
194 194 for line in util.popen(' '.join(cmd)):
195 195 if line.endswith('\n'):
196 196 line = line[:-1]
197 197 #ui.debug('state=%d line=%r\n' % (state, line))
198 198
199 199 if state == 0:
200 200 # initial state, consume input until we see 'RCS file'
201 201 match = re_00.match(line)
202 202 if match:
203 203 rcs = match.group(1)
204 204 tags = {}
205 205 if rlog:
206 206 filename = util.normpath(rcs[:-2])
207 207 if filename.startswith(prefix):
208 208 filename = filename[len(prefix):]
209 209 if filename.startswith('/'):
210 210 filename = filename[1:]
211 211 if filename.startswith('Attic/'):
212 212 filename = filename[6:]
213 213 else:
214 214 filename = filename.replace('/Attic/', '/')
215 215 state = 2
216 216 continue
217 217 state = 1
218 218 continue
219 219 match = re_01.match(line)
220 220 if match:
221 221 raise Exception(match.group(1))
222 222 match = re_02.match(line)
223 223 if match:
224 224 raise Exception(match.group(2))
225 225 if re_03.match(line):
226 226 raise Exception(line)
227 227
228 228 elif state == 1:
229 229 # expect 'Working file' (only when using log instead of rlog)
230 230 match = re_10.match(line)
231 231 assert match, _('RCS file must be followed by working file')
232 232 filename = util.normpath(match.group(1))
233 233 state = 2
234 234
235 235 elif state == 2:
236 236 # expect 'symbolic names'
237 237 if re_20.match(line):
238 238 state = 3
239 239
240 240 elif state == 3:
241 241 # read the symbolic names and store as tags
242 242 match = re_30.match(line)
243 243 if match:
244 244 rev = [int(x) for x in match.group(2).split('.')]
245 245
246 246 # Convert magic branch number to an odd-numbered one
247 247 revn = len(rev)
248 248 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
249 249 rev = rev[:-2] + rev[-1:]
250 250 rev = tuple(rev)
251 251
252 252 if rev not in tags:
253 253 tags[rev] = []
254 254 tags[rev].append(match.group(1))
255 255
256 256 elif re_31.match(line):
257 257 state = 5
258 258 elif re_32.match(line):
259 259 state = 0
260 260
261 261 elif state == 4:
262 262 # expecting '------' separator before first revision
263 263 if re_31.match(line):
264 264 state = 5
265 265 else:
266 266 assert not re_32.match(line), _('Must have at least some revisions')
267 267
268 268 elif state == 5:
269 269 # expecting revision number and possibly (ignored) lock indication
270 270 # we create the logentry here from values stored in states 0 to 4,
271 271 # as this state is re-entered for subsequent revisions of a file.
272 272 match = re_50.match(line)
273 273 assert match, _('expected revision number')
274 274 e = logentry(rcs=scache(rcs), file=scache(filename),
275 275 revision=tuple([int(x) for x in match.group(1).split('.')]),
276 276 branches=[], parent=None)
277 277 state = 6
278 278
279 279 elif state == 6:
280 280 # expecting date, author, state, lines changed
281 281 match = re_60.match(line)
282 282 assert match, _('revision must be followed by date line')
283 283 d = match.group(1)
284 284 if d[2] == '/':
285 285 # Y2K
286 286 d = '19' + d
287 287
288 288 if len(d.split()) != 3:
289 289 # cvs log dates always in GMT
290 290 d = d + ' UTC'
291 291 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
292 292 e.author = scache(match.group(2))
293 293 e.dead = match.group(3).lower() == 'dead'
294 294
295 295 if match.group(5):
296 296 if match.group(6):
297 297 e.lines = (int(match.group(5)), int(match.group(6)))
298 298 else:
299 299 e.lines = (int(match.group(5)), 0)
300 300 elif match.group(6):
301 301 e.lines = (0, int(match.group(6)))
302 302 else:
303 303 e.lines = None
304 304 e.comment = []
305 305 state = 7
306 306
307 307 elif state == 7:
308 308 # read the revision numbers of branches that start at this revision
309 309 # or store the commit log message otherwise
310 310 m = re_70.match(line)
311 311 if m:
312 312 e.branches = [tuple([int(y) for y in x.strip().split('.')])
313 313 for x in m.group(1).split(';')]
314 314 state = 8
315 315 elif re_31.match(line):
316 316 state = 5
317 317 store = True
318 318 elif re_32.match(line):
319 319 state = 0
320 320 store = True
321 321 else:
322 322 e.comment.append(line)
323 323
324 324 elif state == 8:
325 325 # store commit log message
326 326 if re_31.match(line):
327 327 state = 5
328 328 store = True
329 329 elif re_32.match(line):
330 330 state = 0
331 331 store = True
332 332 else:
333 333 e.comment.append(line)
334 334
335 335 if store:
336 336 # clean up the results and save in the log.
337 337 store = False
338 338 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
339 339 e.comment = scache('\n'.join(e.comment))
340 340
341 341 revn = len(e.revision)
342 342 if revn > 3 and (revn % 2) == 0:
343 343 e.branch = tags.get(e.revision[:-1], [None])[0]
344 344 else:
345 345 e.branch = None
346 346
347 347 log.append(e)
348 348
349 349 if len(log) % 100 == 0:
350 350 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
351 351
352 352 listsort(log, key=lambda x:(x.rcs, x.revision))
353 353
354 354 # find parent revisions of individual files
355 355 versions = {}
356 356 for e in log:
357 357 branch = e.revision[:-1]
358 358 p = versions.get((e.rcs, branch), None)
359 359 if p is None:
360 360 p = e.revision[:-2]
361 361 e.parent = p
362 362 versions[(e.rcs, branch)] = e.revision
363 363
364 364 # update the log cache
365 365 if cache:
366 366 if log:
367 367 # join up the old and new logs
368 368 listsort(log, key=lambda x:x.date)
369 369
370 370 if oldlog and oldlog[-1].date >= log[0].date:
371 371 raise logerror('Log cache overlaps with new log entries,'
372 372 ' re-run without cache.')
373 373
374 374 log = oldlog + log
375 375
376 376 # write the new cachefile
377 377 ui.note(_('writing cvs log cache %s\n') % cachefile)
378 378 pickle.dump(log, file(cachefile, 'w'))
379 379 else:
380 380 log = oldlog
381 381
382 382 ui.status(_('%d log entries\n') % len(log))
383 383
384 384 return log
385 385
386 386
387 387 class changeset(object):
388 388 '''Class changeset has the following attributes:
389 389 .author - author name as CVS knows it
390 390 .branch - name of branch this changeset is on, or None
391 391 .comment - commit message
392 392 .date - the commit date as a (time,tz) tuple
393 393 .entries - list of logentry objects in this changeset
394 394 .parents - list of one or two parent changesets
395 395 .tags - list of tags on this changeset
396 396 '''
397 397 def __init__(self, **entries):
398 398 self.__dict__.update(entries)
399 399
400 400 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
401 401 '''Convert log into changesets.'''
402 402
403 403 ui.status(_('creating changesets\n'))
404 404
405 405 # Merge changesets
406 406
407 407 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
408 408
409 409 changesets = []
410 410 files = {}
411 411 c = None
412 412 for i, e in enumerate(log):
413 413
414 414 # Check if log entry belongs to the current changeset or not.
415 415 if not (c and
416 416 e.comment == c.comment and
417 417 e.author == c.author and
418 418 e.branch == c.branch and
419 419 ((c.date[0] + c.date[1]) <=
420 420 (e.date[0] + e.date[1]) <=
421 421 (c.date[0] + c.date[1]) + fuzz) and
422 422 e.file not in files):
423 423 c = changeset(comment=e.comment, author=e.author,
424 424 branch=e.branch, date=e.date, entries=[])
425 425 changesets.append(c)
426 426 files = {}
427 427 if len(changesets) % 100 == 0:
428 428 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
429 429 ui.status(util.ellipsis(t, 80) + '\n')
430 430
431 431 c.entries.append(e)
432 432 files[e.file] = True
433 433 c.date = e.date # changeset date is date of latest commit in it
434 434
435 435 # Sort files in each changeset
436 436
437 437 for c in changesets:
438 438 def pathcompare(l, r):
439 439 'Mimic cvsps sorting order'
440 440 l = l.split('/')
441 441 r = r.split('/')
442 442 nl = len(l)
443 443 nr = len(r)
444 444 n = min(nl, nr)
445 445 for i in range(n):
446 446 if i + 1 == nl and nl < nr:
447 447 return -1
448 448 elif i + 1 == nr and nl > nr:
449 449 return +1
450 450 elif l[i] < r[i]:
451 451 return -1
452 452 elif l[i] > r[i]:
453 453 return +1
454 454 return 0
455 455 def entitycompare(l, r):
456 456 return pathcompare(l.file, r.file)
457 457
458 458 c.entries.sort(entitycompare)
459 459
460 460 # Sort changesets by date
461 461
462 462 def cscmp(l, r):
463 463 d = sum(l.date) - sum(r.date)
464 464 if d:
465 465 return d
466 466
467 467 # detect vendor branches and initial commits on a branch
468 468 le = {}
469 469 for e in l.entries:
470 470 le[e.rcs] = e.revision
471 471 re = {}
472 472 for e in r.entries:
473 473 re[e.rcs] = e.revision
474 474
475 475 d = 0
476 476 for e in l.entries:
477 477 if re.get(e.rcs, None) == e.parent:
478 478 assert not d
479 479 d = 1
480 480 break
481 481
482 482 for e in r.entries:
483 483 if le.get(e.rcs, None) == e.parent:
484 484 assert not d
485 485 d = -1
486 486 break
487 487
488 488 return d
489 489
490 490 changesets.sort(cscmp)
491 491
492 492 # Collect tags
493 493
494 494 globaltags = {}
495 495 for c in changesets:
496 496 tags = {}
497 497 for e in c.entries:
498 498 for tag in e.tags:
499 499 # remember which is the latest changeset to have this tag
500 500 globaltags[tag] = c
501 501
502 502 for c in changesets:
503 503 tags = {}
504 504 for e in c.entries:
505 505 for tag in e.tags:
506 506 tags[tag] = True
507 507 # remember tags only if this is the latest changeset to have it
508 508 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
509 509
510 510 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
511 511 # by inserting dummy changesets with two parents, and handle
512 512 # {{mergefrombranch BRANCHNAME}} by setting two parents.
513 513
514 514 if mergeto is None:
515 515 mergeto = r'{{mergetobranch ([-\w]+)}}'
516 516 if mergeto:
517 517 mergeto = re.compile(mergeto)
518 518
519 519 if mergefrom is None:
520 520 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
521 521 if mergefrom:
522 522 mergefrom = re.compile(mergefrom)
523 523
524 524 versions = {} # changeset index where we saw any particular file version
525 525 branches = {} # changeset index where we saw a branch
526 526 n = len(changesets)
527 527 i = 0
528 528 while i<n:
529 529 c = changesets[i]
530 530
531 531 for f in c.entries:
532 532 versions[(f.rcs, f.revision)] = i
533 533
534 534 p = None
535 535 if c.branch in branches:
536 536 p = branches[c.branch]
537 537 else:
538 538 for f in c.entries:
539 539 p = max(p, versions.get((f.rcs, f.parent), None))
540 540
541 541 c.parents = []
542 542 if p is not None:
543 543 c.parents.append(changesets[p])
544 544
545 545 if mergefrom:
546 546 m = mergefrom.search(c.comment)
547 547 if m:
548 548 m = m.group(1)
549 549 if m == 'HEAD':
550 550 m = None
551 551 if m in branches and c.branch != m:
552 552 c.parents.append(changesets[branches[m]])
553 553
554 554 if mergeto:
555 555 m = mergeto.search(c.comment)
556 556 if m:
557 557 try:
558 558 m = m.group(1)
559 559 if m == 'HEAD':
560 560 m = None
561 561 except:
562 562 m = None # if no group found then merge to HEAD
563 563 if m in branches and c.branch != m:
564 564 # insert empty changeset for merge
565 565 cc = changeset(author=c.author, branch=m, date=c.date,
566 566 comment='convert-repo: CVS merge from branch %s' % c.branch,
567 567 entries=[], tags=[], parents=[changesets[branches[m]], c])
568 568 changesets.insert(i + 1, cc)
569 569 branches[m] = i + 1
570 570
571 571 # adjust our loop counters now we have inserted a new entry
572 572 n += 1
573 573 i += 2
574 574 continue
575 575
576 576 branches[c.branch] = i
577 577 i += 1
578 578
579 579 # Number changesets
580 580
581 581 for i, c in enumerate(changesets):
582 582 c.id = i + 1
583 583
584 584 ui.status(_('%d changeset entries\n') % len(changesets))
585 585
586 586 return changesets
587 587
588 588
589 589 def debugcvsps(ui, *args, **opts):
590 '''Read CVS rlog for current directory or named path in repository, and
590 '''Read CVS rlog for current directory or named path in repository, and
591 591 convert the log to changesets based on matching commit log entries and dates.'''
592 592
593 593 if opts["new_cache"]:
594 594 cache = "write"
595 595 elif opts["update_cache"]:
596 596 cache = "update"
597 597 else:
598 598 cache = None
599 599
600 600 revisions = opts["revisions"]
601 601
602 602 try:
603 603 if args:
604 604 log = []
605 605 for d in args:
606 606 log += createlog(ui, d, root=opts["root"], cache=cache)
607 607 else:
608 608 log = createlog(ui, root=opts["root"], cache=cache)
609 609 except logerror, e:
610 610 ui.write("%r\n"%e)
611 611 return
612 612
613 613 changesets = createchangeset(ui, log, opts["fuzz"])
614 614 del log
615 615
616 616 # Print changesets (optionally filtered)
617 617
618 618 off = len(revisions)
619 619 branches = {} # latest version number in each branch
620 620 ancestors = {} # parent branch
621 621 for cs in changesets:
622 622
623 623 if opts["ancestors"]:
624 624 if cs.branch not in branches and cs.parents and cs.parents[0].id:
625 625 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
626 626 branches[cs.branch] = cs.id
627 627
628 628 # limit by branches
629 629 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
630 630 continue
631 631
632 632 if not off:
633 633 # Note: trailing spaces on several lines here are needed to have
634 634 # bug-for-bug compatibility with cvsps.
635 635 ui.write('---------------------\n')
636 636 ui.write('PatchSet %d \n' % cs.id)
637 637 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
638 638 ui.write('Author: %s\n' % cs.author)
639 639 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
640 640 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
641 641 ','.join(cs.tags) or '(none)'))
642 642 if opts["parents"] and cs.parents:
643 643 if len(cs.parents)>1:
644 644 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
645 645 else:
646 646 ui.write('Parent: %d\n' % cs.parents[0].id)
647 647
648 648 if opts["ancestors"]:
649 649 b = cs.branch
650 650 r = []
651 651 while b:
652 652 b, c = ancestors[b]
653 653 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
654 654 if r:
655 655 ui.write('Ancestors: %s\n' % (','.join(r)))
656 656
657 657 ui.write('Log:\n')
658 658 ui.write('%s\n\n' % cs.comment)
659 659 ui.write('Members: \n')
660 660 for f in cs.entries:
661 661 fn = f.file
662 662 if fn.startswith(opts["prefix"]):
663 663 fn = fn[len(opts["prefix"]):]
664 664 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
665 665 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
666 666 ui.write('\n')
667 667
668 668 # have we seen the start tag?
669 669 if revisions and off:
670 670 if revisions[0] == str(cs.id) or \
671 671 revisions[0] in cs.tags:
672 672 off = False
673 673
674 674 # see if we reached the end tag
675 675 if len(revisions)>1 and not off:
676 676 if revisions[1] == str(cs.id) or \
677 677 revisions[1] in cs.tags:
678 678 break
General Comments 0
You need to be logged in to leave comments. Login now