##// END OF EJS Templates
cvsps: cvs log loop uses lookahead to avoid misleading text...
David Champion -
r7593:9811cc67 default
parent child Browse files
Show More
@@ -1,678 +1,684 b''
1 1 #
2 2 # Mercurial built-in replacement for cvsps.
3 3 #
4 4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 import os
10 10 import re
11 11 import cPickle as pickle
12 12 from mercurial import util
13 13 from mercurial.i18n import _
14 14
15 15 def listsort(list, key):
16 16 "helper to sort by key in Python 2.3"
17 17 try:
18 18 list.sort(key=key)
19 19 except TypeError:
20 20 list.sort(lambda l, r: cmp(key(l), key(r)))
21 21
22 22 class logentry(object):
23 23 '''Class logentry has the following attributes:
24 24 .author - author name as CVS knows it
25 25 .branch - name of branch this revision is on
26 26 .branches - revision tuple of branches starting at this revision
27 27 .comment - commit message
28 28 .date - the commit date as a (time, tz) tuple
29 29 .dead - true if file revision is dead
30 30 .file - Name of file
31 31 .lines - a tuple (+lines, -lines) or None
32 32 .parent - Previous revision of this entry
33 33 .rcs - name of file as returned from CVS
34 34 .revision - revision number as tuple
35 35 .tags - list of tags on the file
36 36 '''
37 37 def __init__(self, **entries):
38 38 self.__dict__.update(entries)
39 39
40 40 class logerror(Exception):
41 41 pass
42 42
43 43 def getrepopath(cvspath):
44 44 """Return the repository path from a CVS path.
45 45
46 46 >>> getrepopath('/foo/bar')
47 47 '/foo/bar'
48 48 >>> getrepopath('c:/foo/bar')
49 49 'c:/foo/bar'
50 50 >>> getrepopath(':pserver:10/foo/bar')
51 51 '/foo/bar'
52 52 >>> getrepopath(':pserver:10c:/foo/bar')
53 53 '/foo/bar'
54 54 >>> getrepopath(':pserver:/foo/bar')
55 55 '/foo/bar'
56 56 >>> getrepopath(':pserver:c:/foo/bar')
57 57 'c:/foo/bar'
58 58 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
59 59 '/foo/bar'
60 60 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
61 61 'c:/foo/bar'
62 62 """
63 63 # According to CVS manual, CVS paths are expressed like:
64 64 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
65 65 #
66 66 # Unfortunately, Windows absolute paths start with a drive letter
67 67 # like 'c:' making it harder to parse. Here we assume that drive
68 68 # letters are only one character long and any CVS component before
69 69 # the repository path is at least 2 characters long, and use this
70 70 # to disambiguate.
71 71 parts = cvspath.split(':')
72 72 if len(parts) == 1:
73 73 return parts[0]
74 74 # Here there is an ambiguous case if we have a port number
75 75 # immediately followed by a Windows driver letter. We assume this
76 76 # never happens and decide it must be CVS path component,
77 77 # therefore ignoring it.
78 78 if len(parts[-2]) > 1:
79 79 return parts[-1].lstrip('0123456789')
80 80 return parts[-2] + ':' + parts[-1]
81 81
82 82 def createlog(ui, directory=None, root="", rlog=True, cache=None):
83 83 '''Collect the CVS rlog'''
84 84
85 85 # Because we store many duplicate commit log messages, reusing strings
86 86 # saves a lot of memory and pickle storage space.
87 87 _scache = {}
88 88 def scache(s):
89 89 "return a shared version of a string"
90 90 return _scache.setdefault(s, s)
91 91
92 92 ui.status(_('collecting CVS rlog\n'))
93 93
94 94 log = [] # list of logentry objects containing the CVS state
95 95
96 96 # patterns to match in CVS (r)log output, by state of use
97 97 re_00 = re.compile('RCS file: (.+)$')
98 98 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
99 99 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
100 100 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
101 101 re_10 = re.compile('Working file: (.+)$')
102 102 re_20 = re.compile('symbolic names:')
103 103 re_30 = re.compile('\t(.+): ([\\d.]+)$')
104 104 re_31 = re.compile('----------------------------$')
105 105 re_32 = re.compile('=============================================================================$')
106 106 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
107 107 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?')
108 108 re_70 = re.compile('branches: (.+);$')
109 109
110 110 prefix = '' # leading path to strip of what we get from CVS
111 111
112 112 if directory is None:
113 113 # Current working directory
114 114
115 115 # Get the real directory in the repository
116 116 try:
117 117 prefix = file(os.path.join('CVS','Repository')).read().strip()
118 118 if prefix == ".":
119 119 prefix = ""
120 120 directory = prefix
121 121 except IOError:
122 122 raise logerror('Not a CVS sandbox')
123 123
124 124 if prefix and not prefix.endswith(os.sep):
125 125 prefix += os.sep
126 126
127 127 # Use the Root file in the sandbox, if it exists
128 128 try:
129 129 root = file(os.path.join('CVS','Root')).read().strip()
130 130 except IOError:
131 131 pass
132 132
133 133 if not root:
134 134 root = os.environ.get('CVSROOT', '')
135 135
136 136 # read log cache if one exists
137 137 oldlog = []
138 138 date = None
139 139
140 140 if cache:
141 141 cachedir = os.path.expanduser('~/.hg.cvsps')
142 142 if not os.path.exists(cachedir):
143 143 os.mkdir(cachedir)
144 144
145 145 # The cvsps cache pickle needs a uniquified name, based on the
146 146 # repository location. The address may have all sort of nasties
147 147 # in it, slashes, colons and such. So here we take just the
148 148 # alphanumerics, concatenated in a way that does not mix up the
149 149 # various components, so that
150 150 # :pserver:user@server:/path
151 151 # and
152 152 # /pserver/user/server/path
153 153 # are mapped to different cache file names.
154 154 cachefile = root.split(":") + [directory, "cache"]
155 155 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
156 156 cachefile = os.path.join(cachedir,
157 157 '.'.join([s for s in cachefile if s]))
158 158
159 159 if cache == 'update':
160 160 try:
161 161 ui.note(_('reading cvs log cache %s\n') % cachefile)
162 162 oldlog = pickle.load(file(cachefile))
163 163 ui.note(_('cache has %d log entries\n') % len(oldlog))
164 164 except Exception, e:
165 165 ui.note(_('error reading cache: %r\n') % e)
166 166
167 167 if oldlog:
168 168 date = oldlog[-1].date # last commit date as a (time,tz) tuple
169 169 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
170 170
171 171 # build the CVS commandline
172 172 cmd = ['cvs', '-q']
173 173 if root:
174 174 cmd.append('-d%s' % root)
175 175 p = util.normpath(getrepopath(root))
176 176 if not p.endswith('/'):
177 177 p += '/'
178 178 prefix = p + util.normpath(prefix)
179 179 cmd.append(['log', 'rlog'][rlog])
180 180 if date:
181 181 # no space between option and date string
182 182 cmd.append('-d>%s' % date)
183 183 cmd.append(directory)
184 184
185 185 # state machine begins here
186 186 tags = {} # dictionary of revisions on current file with their tags
187 187 state = 0
188 188 store = False # set when a new record can be appended
189 189
190 190 cmd = [util.shellquote(arg) for arg in cmd]
191 191 ui.note(_("running %s\n") % (' '.join(cmd)))
192 192 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
193 193
194 for line in util.popen(' '.join(cmd)):
194 pfp = util.popen(' '.join(cmd))
195 peek = pfp.readline()
196 while True:
197 line = peek
198 if line == '':
199 break
200 peek = pfp.readline()
195 201 if line.endswith('\n'):
196 202 line = line[:-1]
197 203 #ui.debug('state=%d line=%r\n' % (state, line))
198 204
199 205 if state == 0:
200 206 # initial state, consume input until we see 'RCS file'
201 207 match = re_00.match(line)
202 208 if match:
203 209 rcs = match.group(1)
204 210 tags = {}
205 211 if rlog:
206 212 filename = util.normpath(rcs[:-2])
207 213 if filename.startswith(prefix):
208 214 filename = filename[len(prefix):]
209 215 if filename.startswith('/'):
210 216 filename = filename[1:]
211 217 if filename.startswith('Attic/'):
212 218 filename = filename[6:]
213 219 else:
214 220 filename = filename.replace('/Attic/', '/')
215 221 state = 2
216 222 continue
217 223 state = 1
218 224 continue
219 225 match = re_01.match(line)
220 226 if match:
221 227 raise Exception(match.group(1))
222 228 match = re_02.match(line)
223 229 if match:
224 230 raise Exception(match.group(2))
225 231 if re_03.match(line):
226 232 raise Exception(line)
227 233
228 234 elif state == 1:
229 235 # expect 'Working file' (only when using log instead of rlog)
230 236 match = re_10.match(line)
231 237 assert match, _('RCS file must be followed by working file')
232 238 filename = util.normpath(match.group(1))
233 239 state = 2
234 240
235 241 elif state == 2:
236 242 # expect 'symbolic names'
237 243 if re_20.match(line):
238 244 state = 3
239 245
240 246 elif state == 3:
241 247 # read the symbolic names and store as tags
242 248 match = re_30.match(line)
243 249 if match:
244 250 rev = [int(x) for x in match.group(2).split('.')]
245 251
246 252 # Convert magic branch number to an odd-numbered one
247 253 revn = len(rev)
248 254 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
249 255 rev = rev[:-2] + rev[-1:]
250 256 rev = tuple(rev)
251 257
252 258 if rev not in tags:
253 259 tags[rev] = []
254 260 tags[rev].append(match.group(1))
255 261
256 262 elif re_31.match(line):
257 263 state = 5
258 264 elif re_32.match(line):
259 265 state = 0
260 266
261 267 elif state == 4:
262 268 # expecting '------' separator before first revision
263 269 if re_31.match(line):
264 270 state = 5
265 271 else:
266 272 assert not re_32.match(line), _('Must have at least some revisions')
267 273
268 274 elif state == 5:
269 275 # expecting revision number and possibly (ignored) lock indication
270 276 # we create the logentry here from values stored in states 0 to 4,
271 277 # as this state is re-entered for subsequent revisions of a file.
272 278 match = re_50.match(line)
273 279 assert match, _('expected revision number')
274 280 e = logentry(rcs=scache(rcs), file=scache(filename),
275 281 revision=tuple([int(x) for x in match.group(1).split('.')]),
276 282 branches=[], parent=None)
277 283 state = 6
278 284
279 285 elif state == 6:
280 286 # expecting date, author, state, lines changed
281 287 match = re_60.match(line)
282 288 assert match, _('revision must be followed by date line')
283 289 d = match.group(1)
284 290 if d[2] == '/':
285 291 # Y2K
286 292 d = '19' + d
287 293
288 294 if len(d.split()) != 3:
289 295 # cvs log dates always in GMT
290 296 d = d + ' UTC'
291 297 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
292 298 e.author = scache(match.group(2))
293 299 e.dead = match.group(3).lower() == 'dead'
294 300
295 301 if match.group(5):
296 302 if match.group(6):
297 303 e.lines = (int(match.group(5)), int(match.group(6)))
298 304 else:
299 305 e.lines = (int(match.group(5)), 0)
300 306 elif match.group(6):
301 307 e.lines = (0, int(match.group(6)))
302 308 else:
303 309 e.lines = None
304 310 e.comment = []
305 311 state = 7
306 312
307 313 elif state == 7:
308 314 # read the revision numbers of branches that start at this revision
309 315 # or store the commit log message otherwise
310 316 m = re_70.match(line)
311 317 if m:
312 318 e.branches = [tuple([int(y) for y in x.strip().split('.')])
313 319 for x in m.group(1).split(';')]
314 320 state = 8
315 elif re_31.match(line):
321 elif re_31.match(line) and re_50.match(peek):
316 322 state = 5
317 323 store = True
318 324 elif re_32.match(line):
319 325 state = 0
320 326 store = True
321 327 else:
322 328 e.comment.append(line)
323 329
324 330 elif state == 8:
325 331 # store commit log message
326 332 if re_31.match(line):
327 333 state = 5
328 334 store = True
329 335 elif re_32.match(line):
330 336 state = 0
331 337 store = True
332 338 else:
333 339 e.comment.append(line)
334 340
335 341 if store:
336 342 # clean up the results and save in the log.
337 343 store = False
338 344 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
339 345 e.comment = scache('\n'.join(e.comment))
340 346
341 347 revn = len(e.revision)
342 348 if revn > 3 and (revn % 2) == 0:
343 349 e.branch = tags.get(e.revision[:-1], [None])[0]
344 350 else:
345 351 e.branch = None
346 352
347 353 log.append(e)
348 354
349 355 if len(log) % 100 == 0:
350 356 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
351 357
352 358 listsort(log, key=lambda x:(x.rcs, x.revision))
353 359
354 360 # find parent revisions of individual files
355 361 versions = {}
356 362 for e in log:
357 363 branch = e.revision[:-1]
358 364 p = versions.get((e.rcs, branch), None)
359 365 if p is None:
360 366 p = e.revision[:-2]
361 367 e.parent = p
362 368 versions[(e.rcs, branch)] = e.revision
363 369
364 370 # update the log cache
365 371 if cache:
366 372 if log:
367 373 # join up the old and new logs
368 374 listsort(log, key=lambda x:x.date)
369 375
370 376 if oldlog and oldlog[-1].date >= log[0].date:
371 377 raise logerror('Log cache overlaps with new log entries,'
372 378 ' re-run without cache.')
373 379
374 380 log = oldlog + log
375 381
376 382 # write the new cachefile
377 383 ui.note(_('writing cvs log cache %s\n') % cachefile)
378 384 pickle.dump(log, file(cachefile, 'w'))
379 385 else:
380 386 log = oldlog
381 387
382 388 ui.status(_('%d log entries\n') % len(log))
383 389
384 390 return log
385 391
386 392
387 393 class changeset(object):
388 394 '''Class changeset has the following attributes:
389 395 .author - author name as CVS knows it
390 396 .branch - name of branch this changeset is on, or None
391 397 .comment - commit message
392 398 .date - the commit date as a (time,tz) tuple
393 399 .entries - list of logentry objects in this changeset
394 400 .parents - list of one or two parent changesets
395 401 .tags - list of tags on this changeset
396 402 '''
397 403 def __init__(self, **entries):
398 404 self.__dict__.update(entries)
399 405
400 406 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
401 407 '''Convert log into changesets.'''
402 408
403 409 ui.status(_('creating changesets\n'))
404 410
405 411 # Merge changesets
406 412
407 413 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
408 414
409 415 changesets = []
410 416 files = {}
411 417 c = None
412 418 for i, e in enumerate(log):
413 419
414 420 # Check if log entry belongs to the current changeset or not.
415 421 if not (c and
416 422 e.comment == c.comment and
417 423 e.author == c.author and
418 424 e.branch == c.branch and
419 425 ((c.date[0] + c.date[1]) <=
420 426 (e.date[0] + e.date[1]) <=
421 427 (c.date[0] + c.date[1]) + fuzz) and
422 428 e.file not in files):
423 429 c = changeset(comment=e.comment, author=e.author,
424 430 branch=e.branch, date=e.date, entries=[])
425 431 changesets.append(c)
426 432 files = {}
427 433 if len(changesets) % 100 == 0:
428 434 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
429 435 ui.status(util.ellipsis(t, 80) + '\n')
430 436
431 437 c.entries.append(e)
432 438 files[e.file] = True
433 439 c.date = e.date # changeset date is date of latest commit in it
434 440
435 441 # Sort files in each changeset
436 442
437 443 for c in changesets:
438 444 def pathcompare(l, r):
439 445 'Mimic cvsps sorting order'
440 446 l = l.split('/')
441 447 r = r.split('/')
442 448 nl = len(l)
443 449 nr = len(r)
444 450 n = min(nl, nr)
445 451 for i in range(n):
446 452 if i + 1 == nl and nl < nr:
447 453 return -1
448 454 elif i + 1 == nr and nl > nr:
449 455 return +1
450 456 elif l[i] < r[i]:
451 457 return -1
452 458 elif l[i] > r[i]:
453 459 return +1
454 460 return 0
455 461 def entitycompare(l, r):
456 462 return pathcompare(l.file, r.file)
457 463
458 464 c.entries.sort(entitycompare)
459 465
460 466 # Sort changesets by date
461 467
462 468 def cscmp(l, r):
463 469 d = sum(l.date) - sum(r.date)
464 470 if d:
465 471 return d
466 472
467 473 # detect vendor branches and initial commits on a branch
468 474 le = {}
469 475 for e in l.entries:
470 476 le[e.rcs] = e.revision
471 477 re = {}
472 478 for e in r.entries:
473 479 re[e.rcs] = e.revision
474 480
475 481 d = 0
476 482 for e in l.entries:
477 483 if re.get(e.rcs, None) == e.parent:
478 484 assert not d
479 485 d = 1
480 486 break
481 487
482 488 for e in r.entries:
483 489 if le.get(e.rcs, None) == e.parent:
484 490 assert not d
485 491 d = -1
486 492 break
487 493
488 494 return d
489 495
490 496 changesets.sort(cscmp)
491 497
492 498 # Collect tags
493 499
494 500 globaltags = {}
495 501 for c in changesets:
496 502 tags = {}
497 503 for e in c.entries:
498 504 for tag in e.tags:
499 505 # remember which is the latest changeset to have this tag
500 506 globaltags[tag] = c
501 507
502 508 for c in changesets:
503 509 tags = {}
504 510 for e in c.entries:
505 511 for tag in e.tags:
506 512 tags[tag] = True
507 513 # remember tags only if this is the latest changeset to have it
508 514 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
509 515
510 516 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
511 517 # by inserting dummy changesets with two parents, and handle
512 518 # {{mergefrombranch BRANCHNAME}} by setting two parents.
513 519
514 520 if mergeto is None:
515 521 mergeto = r'{{mergetobranch ([-\w]+)}}'
516 522 if mergeto:
517 523 mergeto = re.compile(mergeto)
518 524
519 525 if mergefrom is None:
520 526 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
521 527 if mergefrom:
522 528 mergefrom = re.compile(mergefrom)
523 529
524 530 versions = {} # changeset index where we saw any particular file version
525 531 branches = {} # changeset index where we saw a branch
526 532 n = len(changesets)
527 533 i = 0
528 534 while i<n:
529 535 c = changesets[i]
530 536
531 537 for f in c.entries:
532 538 versions[(f.rcs, f.revision)] = i
533 539
534 540 p = None
535 541 if c.branch in branches:
536 542 p = branches[c.branch]
537 543 else:
538 544 for f in c.entries:
539 545 p = max(p, versions.get((f.rcs, f.parent), None))
540 546
541 547 c.parents = []
542 548 if p is not None:
543 549 c.parents.append(changesets[p])
544 550
545 551 if mergefrom:
546 552 m = mergefrom.search(c.comment)
547 553 if m:
548 554 m = m.group(1)
549 555 if m == 'HEAD':
550 556 m = None
551 557 if m in branches and c.branch != m:
552 558 c.parents.append(changesets[branches[m]])
553 559
554 560 if mergeto:
555 561 m = mergeto.search(c.comment)
556 562 if m:
557 563 try:
558 564 m = m.group(1)
559 565 if m == 'HEAD':
560 566 m = None
561 567 except:
562 568 m = None # if no group found then merge to HEAD
563 569 if m in branches and c.branch != m:
564 570 # insert empty changeset for merge
565 571 cc = changeset(author=c.author, branch=m, date=c.date,
566 572 comment='convert-repo: CVS merge from branch %s' % c.branch,
567 573 entries=[], tags=[], parents=[changesets[branches[m]], c])
568 574 changesets.insert(i + 1, cc)
569 575 branches[m] = i + 1
570 576
571 577 # adjust our loop counters now we have inserted a new entry
572 578 n += 1
573 579 i += 2
574 580 continue
575 581
576 582 branches[c.branch] = i
577 583 i += 1
578 584
579 585 # Number changesets
580 586
581 587 for i, c in enumerate(changesets):
582 588 c.id = i + 1
583 589
584 590 ui.status(_('%d changeset entries\n') % len(changesets))
585 591
586 592 return changesets
587 593
588 594
589 595 def debugcvsps(ui, *args, **opts):
590 596 '''Read CVS rlog for current directory or named path in repository, and
591 597 convert the log to changesets based on matching commit log entries and dates.'''
592 598
593 599 if opts["new_cache"]:
594 600 cache = "write"
595 601 elif opts["update_cache"]:
596 602 cache = "update"
597 603 else:
598 604 cache = None
599 605
600 606 revisions = opts["revisions"]
601 607
602 608 try:
603 609 if args:
604 610 log = []
605 611 for d in args:
606 612 log += createlog(ui, d, root=opts["root"], cache=cache)
607 613 else:
608 614 log = createlog(ui, root=opts["root"], cache=cache)
609 615 except logerror, e:
610 616 ui.write("%r\n"%e)
611 617 return
612 618
613 619 changesets = createchangeset(ui, log, opts["fuzz"])
614 620 del log
615 621
616 622 # Print changesets (optionally filtered)
617 623
618 624 off = len(revisions)
619 625 branches = {} # latest version number in each branch
620 626 ancestors = {} # parent branch
621 627 for cs in changesets:
622 628
623 629 if opts["ancestors"]:
624 630 if cs.branch not in branches and cs.parents and cs.parents[0].id:
625 631 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
626 632 branches[cs.branch] = cs.id
627 633
628 634 # limit by branches
629 635 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
630 636 continue
631 637
632 638 if not off:
633 639 # Note: trailing spaces on several lines here are needed to have
634 640 # bug-for-bug compatibility with cvsps.
635 641 ui.write('---------------------\n')
636 642 ui.write('PatchSet %d \n' % cs.id)
637 643 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
638 644 ui.write('Author: %s\n' % cs.author)
639 645 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
640 646 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
641 647 ','.join(cs.tags) or '(none)'))
642 648 if opts["parents"] and cs.parents:
643 649 if len(cs.parents)>1:
644 650 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
645 651 else:
646 652 ui.write('Parent: %d\n' % cs.parents[0].id)
647 653
648 654 if opts["ancestors"]:
649 655 b = cs.branch
650 656 r = []
651 657 while b:
652 658 b, c = ancestors[b]
653 659 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
654 660 if r:
655 661 ui.write('Ancestors: %s\n' % (','.join(r)))
656 662
657 663 ui.write('Log:\n')
658 664 ui.write('%s\n\n' % cs.comment)
659 665 ui.write('Members: \n')
660 666 for f in cs.entries:
661 667 fn = f.file
662 668 if fn.startswith(opts["prefix"]):
663 669 fn = fn[len(opts["prefix"]):]
664 670 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
665 671 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
666 672 ui.write('\n')
667 673
668 674 # have we seen the start tag?
669 675 if revisions and off:
670 676 if revisions[0] == str(cs.id) or \
671 677 revisions[0] in cs.tags:
672 678 off = False
673 679
674 680 # see if we reached the end tag
675 681 if len(revisions)>1 and not off:
676 682 if revisions[1] == str(cs.id) or \
677 683 revisions[1] in cs.tags:
678 684 break
General Comments 0
You need to be logged in to leave comments. Login now