##// END OF EJS Templates
issue1578: fix crash: do not use synthetic changesets as merge parents.
Greg Ward -
r7950:9bbcfa89 default
parent child Browse files
Show More
@@ -1,740 +1,741
1 1 #
2 2 # Mercurial built-in replacement for cvsps.
3 3 #
4 4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 import os
10 10 import re
11 11 import cPickle as pickle
12 12 from mercurial import util
13 13 from mercurial.i18n import _
14 14
15 15 def listsort(list, key):
16 16 "helper to sort by key in Python 2.3"
17 17 try:
18 18 list.sort(key=key)
19 19 except TypeError:
20 20 list.sort(lambda l, r: cmp(key(l), key(r)))
21 21
22 22 class logentry(object):
23 23 '''Class logentry has the following attributes:
24 24 .author - author name as CVS knows it
25 25 .branch - name of branch this revision is on
26 26 .branches - revision tuple of branches starting at this revision
27 27 .comment - commit message
28 28 .date - the commit date as a (time, tz) tuple
29 29 .dead - true if file revision is dead
30 30 .file - Name of file
31 31 .lines - a tuple (+lines, -lines) or None
32 32 .parent - Previous revision of this entry
33 33 .rcs - name of file as returned from CVS
34 34 .revision - revision number as tuple
35 35 .tags - list of tags on the file
36 36 .synthetic - is this a synthetic "file ... added on ..." revision?
37 37 '''
38 38 def __init__(self, **entries):
39 39 self.__dict__.update(entries)
40 40
41 41 class logerror(Exception):
42 42 pass
43 43
44 44 def getrepopath(cvspath):
45 45 """Return the repository path from a CVS path.
46 46
47 47 >>> getrepopath('/foo/bar')
48 48 '/foo/bar'
49 49 >>> getrepopath('c:/foo/bar')
50 50 'c:/foo/bar'
51 51 >>> getrepopath(':pserver:10/foo/bar')
52 52 '/foo/bar'
53 53 >>> getrepopath(':pserver:10c:/foo/bar')
54 54 '/foo/bar'
55 55 >>> getrepopath(':pserver:/foo/bar')
56 56 '/foo/bar'
57 57 >>> getrepopath(':pserver:c:/foo/bar')
58 58 'c:/foo/bar'
59 59 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
60 60 '/foo/bar'
61 61 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
62 62 'c:/foo/bar'
63 63 """
64 64 # According to CVS manual, CVS paths are expressed like:
65 65 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
66 66 #
67 67 # Unfortunately, Windows absolute paths start with a drive letter
68 68 # like 'c:' making it harder to parse. Here we assume that drive
69 69 # letters are only one character long and any CVS component before
70 70 # the repository path is at least 2 characters long, and use this
71 71 # to disambiguate.
72 72 parts = cvspath.split(':')
73 73 if len(parts) == 1:
74 74 return parts[0]
75 75 # Here there is an ambiguous case if we have a port number
76 76 # immediately followed by a Windows driver letter. We assume this
77 77 # never happens and decide it must be CVS path component,
78 78 # therefore ignoring it.
79 79 if len(parts[-2]) > 1:
80 80 return parts[-1].lstrip('0123456789')
81 81 return parts[-2] + ':' + parts[-1]
82 82
83 83 def createlog(ui, directory=None, root="", rlog=True, cache=None):
84 84 '''Collect the CVS rlog'''
85 85
86 86 # Because we store many duplicate commit log messages, reusing strings
87 87 # saves a lot of memory and pickle storage space.
88 88 _scache = {}
89 89 def scache(s):
90 90 "return a shared version of a string"
91 91 return _scache.setdefault(s, s)
92 92
93 93 ui.status(_('collecting CVS rlog\n'))
94 94
95 95 log = [] # list of logentry objects containing the CVS state
96 96
97 97 # patterns to match in CVS (r)log output, by state of use
98 98 re_00 = re.compile('RCS file: (.+)$')
99 99 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
100 100 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
101 101 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
102 102 re_10 = re.compile('Working file: (.+)$')
103 103 re_20 = re.compile('symbolic names:')
104 104 re_30 = re.compile('\t(.+): ([\\d.]+)$')
105 105 re_31 = re.compile('----------------------------$')
106 106 re_32 = re.compile('=============================================================================$')
107 107 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
108 108 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?')
109 109 re_70 = re.compile('branches: (.+);$')
110 110
111 111 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
112 112
113 113 prefix = '' # leading path to strip of what we get from CVS
114 114
115 115 if directory is None:
116 116 # Current working directory
117 117
118 118 # Get the real directory in the repository
119 119 try:
120 120 prefix = file(os.path.join('CVS','Repository')).read().strip()
121 121 if prefix == ".":
122 122 prefix = ""
123 123 directory = prefix
124 124 except IOError:
125 125 raise logerror('Not a CVS sandbox')
126 126
127 127 if prefix and not prefix.endswith(os.sep):
128 128 prefix += os.sep
129 129
130 130 # Use the Root file in the sandbox, if it exists
131 131 try:
132 132 root = file(os.path.join('CVS','Root')).read().strip()
133 133 except IOError:
134 134 pass
135 135
136 136 if not root:
137 137 root = os.environ.get('CVSROOT', '')
138 138
139 139 # read log cache if one exists
140 140 oldlog = []
141 141 date = None
142 142
143 143 if cache:
144 144 cachedir = os.path.expanduser('~/.hg.cvsps')
145 145 if not os.path.exists(cachedir):
146 146 os.mkdir(cachedir)
147 147
148 148 # The cvsps cache pickle needs a uniquified name, based on the
149 149 # repository location. The address may have all sort of nasties
150 150 # in it, slashes, colons and such. So here we take just the
151 151 # alphanumerics, concatenated in a way that does not mix up the
152 152 # various components, so that
153 153 # :pserver:user@server:/path
154 154 # and
155 155 # /pserver/user/server/path
156 156 # are mapped to different cache file names.
157 157 cachefile = root.split(":") + [directory, "cache"]
158 158 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
159 159 cachefile = os.path.join(cachedir,
160 160 '.'.join([s for s in cachefile if s]))
161 161
162 162 if cache == 'update':
163 163 try:
164 164 ui.note(_('reading cvs log cache %s\n') % cachefile)
165 165 oldlog = pickle.load(file(cachefile))
166 166 ui.note(_('cache has %d log entries\n') % len(oldlog))
167 167 except Exception, e:
168 168 ui.note(_('error reading cache: %r\n') % e)
169 169
170 170 if oldlog:
171 171 date = oldlog[-1].date # last commit date as a (time,tz) tuple
172 172 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
173 173
174 174 # build the CVS commandline
175 175 cmd = ['cvs', '-q']
176 176 if root:
177 177 cmd.append('-d%s' % root)
178 178 p = util.normpath(getrepopath(root))
179 179 if not p.endswith('/'):
180 180 p += '/'
181 181 prefix = p + util.normpath(prefix)
182 182 cmd.append(['log', 'rlog'][rlog])
183 183 if date:
184 184 # no space between option and date string
185 185 cmd.append('-d>%s' % date)
186 186 cmd.append(directory)
187 187
188 188 # state machine begins here
189 189 tags = {} # dictionary of revisions on current file with their tags
190 190 state = 0
191 191 store = False # set when a new record can be appended
192 192
193 193 cmd = [util.shellquote(arg) for arg in cmd]
194 194 ui.note(_("running %s\n") % (' '.join(cmd)))
195 195 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
196 196
197 197 pfp = util.popen(' '.join(cmd))
198 198 peek = pfp.readline()
199 199 while True:
200 200 line = peek
201 201 if line == '':
202 202 break
203 203 peek = pfp.readline()
204 204 if line.endswith('\n'):
205 205 line = line[:-1]
206 206 #ui.debug('state=%d line=%r\n' % (state, line))
207 207
208 208 if state == 0:
209 209 # initial state, consume input until we see 'RCS file'
210 210 match = re_00.match(line)
211 211 if match:
212 212 rcs = match.group(1)
213 213 tags = {}
214 214 if rlog:
215 215 filename = util.normpath(rcs[:-2])
216 216 if filename.startswith(prefix):
217 217 filename = filename[len(prefix):]
218 218 if filename.startswith('/'):
219 219 filename = filename[1:]
220 220 if filename.startswith('Attic/'):
221 221 filename = filename[6:]
222 222 else:
223 223 filename = filename.replace('/Attic/', '/')
224 224 state = 2
225 225 continue
226 226 state = 1
227 227 continue
228 228 match = re_01.match(line)
229 229 if match:
230 230 raise Exception(match.group(1))
231 231 match = re_02.match(line)
232 232 if match:
233 233 raise Exception(match.group(2))
234 234 if re_03.match(line):
235 235 raise Exception(line)
236 236
237 237 elif state == 1:
238 238 # expect 'Working file' (only when using log instead of rlog)
239 239 match = re_10.match(line)
240 240 assert match, _('RCS file must be followed by working file')
241 241 filename = util.normpath(match.group(1))
242 242 state = 2
243 243
244 244 elif state == 2:
245 245 # expect 'symbolic names'
246 246 if re_20.match(line):
247 247 state = 3
248 248
249 249 elif state == 3:
250 250 # read the symbolic names and store as tags
251 251 match = re_30.match(line)
252 252 if match:
253 253 rev = [int(x) for x in match.group(2).split('.')]
254 254
255 255 # Convert magic branch number to an odd-numbered one
256 256 revn = len(rev)
257 257 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
258 258 rev = rev[:-2] + rev[-1:]
259 259 rev = tuple(rev)
260 260
261 261 if rev not in tags:
262 262 tags[rev] = []
263 263 tags[rev].append(match.group(1))
264 264
265 265 elif re_31.match(line):
266 266 state = 5
267 267 elif re_32.match(line):
268 268 state = 0
269 269
270 270 elif state == 4:
271 271 # expecting '------' separator before first revision
272 272 if re_31.match(line):
273 273 state = 5
274 274 else:
275 275 assert not re_32.match(line), _('must have at least some revisions')
276 276
277 277 elif state == 5:
278 278 # expecting revision number and possibly (ignored) lock indication
279 279 # we create the logentry here from values stored in states 0 to 4,
280 280 # as this state is re-entered for subsequent revisions of a file.
281 281 match = re_50.match(line)
282 282 assert match, _('expected revision number')
283 283 e = logentry(rcs=scache(rcs), file=scache(filename),
284 284 revision=tuple([int(x) for x in match.group(1).split('.')]),
285 285 branches=[], parent=None,
286 286 synthetic=False)
287 287 state = 6
288 288
289 289 elif state == 6:
290 290 # expecting date, author, state, lines changed
291 291 match = re_60.match(line)
292 292 assert match, _('revision must be followed by date line')
293 293 d = match.group(1)
294 294 if d[2] == '/':
295 295 # Y2K
296 296 d = '19' + d
297 297
298 298 if len(d.split()) != 3:
299 299 # cvs log dates always in GMT
300 300 d = d + ' UTC'
301 301 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
302 302 e.author = scache(match.group(2))
303 303 e.dead = match.group(3).lower() == 'dead'
304 304
305 305 if match.group(5):
306 306 if match.group(6):
307 307 e.lines = (int(match.group(5)), int(match.group(6)))
308 308 else:
309 309 e.lines = (int(match.group(5)), 0)
310 310 elif match.group(6):
311 311 e.lines = (0, int(match.group(6)))
312 312 else:
313 313 e.lines = None
314 314 e.comment = []
315 315 state = 7
316 316
317 317 elif state == 7:
318 318 # read the revision numbers of branches that start at this revision
319 319 # or store the commit log message otherwise
320 320 m = re_70.match(line)
321 321 if m:
322 322 e.branches = [tuple([int(y) for y in x.strip().split('.')])
323 323 for x in m.group(1).split(';')]
324 324 state = 8
325 325 elif re_31.match(line) and re_50.match(peek):
326 326 state = 5
327 327 store = True
328 328 elif re_32.match(line):
329 329 state = 0
330 330 store = True
331 331 else:
332 332 e.comment.append(line)
333 333
334 334 elif state == 8:
335 335 # store commit log message
336 336 if re_31.match(line):
337 337 state = 5
338 338 store = True
339 339 elif re_32.match(line):
340 340 state = 0
341 341 store = True
342 342 else:
343 343 e.comment.append(line)
344 344
345 345 # When a file is added on a branch B1, CVS creates a synthetic
346 346 # dead trunk revision 1.1 so that the branch has a root.
347 347 # Likewise, if you merge such a file to a later branch B2 (one
348 348 # that already existed when the file was added on B1), CVS
349 349 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
350 350 # these revisions now, but mark them synthetic so
351 351 # createchangeset() can take care of them.
352 352 if (store and
353 353 e.dead and
354 354 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
355 355 len(e.comment) == 1 and
356 356 file_added_re.match(e.comment[0])):
357 357 ui.debug(_('found synthetic rev in %s: %r\n')
358 358 % (e.rcs, e.comment[0]))
359 359 e.synthetic = True
360 360
361 361 if store:
362 362 # clean up the results and save in the log.
363 363 store = False
364 364 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
365 365 e.comment = scache('\n'.join(e.comment))
366 366
367 367 revn = len(e.revision)
368 368 if revn > 3 and (revn % 2) == 0:
369 369 e.branch = tags.get(e.revision[:-1], [None])[0]
370 370 else:
371 371 e.branch = None
372 372
373 373 log.append(e)
374 374
375 375 if len(log) % 100 == 0:
376 376 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
377 377
378 378 listsort(log, key=lambda x:(x.rcs, x.revision))
379 379
380 380 # find parent revisions of individual files
381 381 versions = {}
382 382 for e in log:
383 383 branch = e.revision[:-1]
384 384 p = versions.get((e.rcs, branch), None)
385 385 if p is None:
386 386 p = e.revision[:-2]
387 387 e.parent = p
388 388 versions[(e.rcs, branch)] = e.revision
389 389
390 390 # update the log cache
391 391 if cache:
392 392 if log:
393 393 # join up the old and new logs
394 394 listsort(log, key=lambda x:x.date)
395 395
396 396 if oldlog and oldlog[-1].date >= log[0].date:
397 397 raise logerror('Log cache overlaps with new log entries,'
398 398 ' re-run without cache.')
399 399
400 400 log = oldlog + log
401 401
402 402 # write the new cachefile
403 403 ui.note(_('writing cvs log cache %s\n') % cachefile)
404 404 pickle.dump(log, file(cachefile, 'w'))
405 405 else:
406 406 log = oldlog
407 407
408 408 ui.status(_('%d log entries\n') % len(log))
409 409
410 410 return log
411 411
412 412
413 413 class changeset(object):
414 414 '''Class changeset has the following attributes:
415 415 .author - author name as CVS knows it
416 416 .branch - name of branch this changeset is on, or None
417 417 .comment - commit message
418 418 .date - the commit date as a (time,tz) tuple
419 419 .entries - list of logentry objects in this changeset
420 420 .parents - list of one or two parent changesets
421 421 .tags - list of tags on this changeset
422 422 .synthetic - from synthetic revision "file ... added on branch ..."
423 423 '''
424 424 def __init__(self, **entries):
425 425 self.__dict__.update(entries)
426 426
427 427 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
428 428 '''Convert log into changesets.'''
429 429
430 430 ui.status(_('creating changesets\n'))
431 431
432 432 # Merge changesets
433 433
434 434 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
435 435
436 436 changesets = []
437 437 files = {}
438 438 c = None
439 439 for i, e in enumerate(log):
440 440
441 441 # Check if log entry belongs to the current changeset or not.
442 442 if not (c and
443 443 e.comment == c.comment and
444 444 e.author == c.author and
445 445 e.branch == c.branch and
446 446 ((c.date[0] + c.date[1]) <=
447 447 (e.date[0] + e.date[1]) <=
448 448 (c.date[0] + c.date[1]) + fuzz) and
449 449 e.file not in files):
450 450 c = changeset(comment=e.comment, author=e.author,
451 451 branch=e.branch, date=e.date, entries=[])
452 452 changesets.append(c)
453 453 files = {}
454 454 if len(changesets) % 100 == 0:
455 455 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
456 456 ui.status(util.ellipsis(t, 80) + '\n')
457 457
458 458 c.entries.append(e)
459 459 files[e.file] = True
460 460 c.date = e.date # changeset date is date of latest commit in it
461 461
462 462 # Mark synthetic changesets
463 463
464 464 for c in changesets:
465 465 # Synthetic revisions always get their own changeset, because
466 466 # the log message includes the filename. E.g. if you add file3
467 467 # and file4 on a branch, you get four log entries and three
468 468 # changesets:
469 469 # "File file3 was added on branch ..." (synthetic, 1 entry)
470 470 # "File file4 was added on branch ..." (synthetic, 1 entry)
471 471 # "Add file3 and file4 to fix ..." (real, 2 entries)
472 472 # Hence the check for 1 entry here.
473 473 c.synthetic = (len(c.entries) == 1 and c.entries[0].synthetic)
474 474
475 475 # Sort files in each changeset
476 476
477 477 for c in changesets:
478 478 def pathcompare(l, r):
479 479 'Mimic cvsps sorting order'
480 480 l = l.split('/')
481 481 r = r.split('/')
482 482 nl = len(l)
483 483 nr = len(r)
484 484 n = min(nl, nr)
485 485 for i in range(n):
486 486 if i + 1 == nl and nl < nr:
487 487 return -1
488 488 elif i + 1 == nr and nl > nr:
489 489 return +1
490 490 elif l[i] < r[i]:
491 491 return -1
492 492 elif l[i] > r[i]:
493 493 return +1
494 494 return 0
495 495 def entitycompare(l, r):
496 496 return pathcompare(l.file, r.file)
497 497
498 498 c.entries.sort(entitycompare)
499 499
500 500 # Sort changesets by date
501 501
502 502 def cscmp(l, r):
503 503 d = sum(l.date) - sum(r.date)
504 504 if d:
505 505 return d
506 506
507 507 # detect vendor branches and initial commits on a branch
508 508 le = {}
509 509 for e in l.entries:
510 510 le[e.rcs] = e.revision
511 511 re = {}
512 512 for e in r.entries:
513 513 re[e.rcs] = e.revision
514 514
515 515 d = 0
516 516 for e in l.entries:
517 517 if re.get(e.rcs, None) == e.parent:
518 518 assert not d
519 519 d = 1
520 520 break
521 521
522 522 for e in r.entries:
523 523 if le.get(e.rcs, None) == e.parent:
524 524 assert not d
525 525 d = -1
526 526 break
527 527
528 528 return d
529 529
530 530 changesets.sort(cscmp)
531 531
532 532 # Collect tags
533 533
534 534 globaltags = {}
535 535 for c in changesets:
536 536 tags = {}
537 537 for e in c.entries:
538 538 for tag in e.tags:
539 539 # remember which is the latest changeset to have this tag
540 540 globaltags[tag] = c
541 541
542 542 for c in changesets:
543 543 tags = {}
544 544 for e in c.entries:
545 545 for tag in e.tags:
546 546 tags[tag] = True
547 547 # remember tags only if this is the latest changeset to have it
548 548 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
549 549
550 550 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
551 551 # by inserting dummy changesets with two parents, and handle
552 552 # {{mergefrombranch BRANCHNAME}} by setting two parents.
553 553
554 554 if mergeto is None:
555 555 mergeto = r'{{mergetobranch ([-\w]+)}}'
556 556 if mergeto:
557 557 mergeto = re.compile(mergeto)
558 558
559 559 if mergefrom is None:
560 560 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
561 561 if mergefrom:
562 562 mergefrom = re.compile(mergefrom)
563 563
564 564 versions = {} # changeset index where we saw any particular file version
565 565 branches = {} # changeset index where we saw a branch
566 566 n = len(changesets)
567 567 i = 0
568 568 while i<n:
569 569 c = changesets[i]
570 570
571 571 for f in c.entries:
572 572 versions[(f.rcs, f.revision)] = i
573 573
574 574 p = None
575 575 if c.branch in branches:
576 576 p = branches[c.branch]
577 577 else:
578 578 for f in c.entries:
579 579 p = max(p, versions.get((f.rcs, f.parent), None))
580 580
581 581 c.parents = []
582 582 if p is not None:
583 583 p = changesets[p]
584 584
585 585 # Ensure no changeset has a synthetic changeset as a parent.
586 586 while p.synthetic:
587 587 assert len(p.parents) <= 1, \
588 588 _('synthetic changeset cannot have multiple parents')
589 589 if p.parents:
590 590 p = p.parents[0]
591 591 else:
592 592 p = None
593 593 break
594 594
595 595 if p is not None:
596 596 c.parents.append(p)
597 597
598 598 if mergefrom:
599 599 m = mergefrom.search(c.comment)
600 600 if m:
601 601 m = m.group(1)
602 602 if m == 'HEAD':
603 603 m = None
604 if m in branches and c.branch != m:
605 c.parents.append(changesets[branches[m]])
604 candidate = changesets[branches[m]]
605 if m in branches and c.branch != m and not candidate.synthetic:
606 c.parents.append(candidate)
606 607
607 608 if mergeto:
608 609 m = mergeto.search(c.comment)
609 610 if m:
610 611 try:
611 612 m = m.group(1)
612 613 if m == 'HEAD':
613 614 m = None
614 615 except:
615 616 m = None # if no group found then merge to HEAD
616 617 if m in branches and c.branch != m:
617 618 # insert empty changeset for merge
618 619 cc = changeset(author=c.author, branch=m, date=c.date,
619 620 comment='convert-repo: CVS merge from branch %s' % c.branch,
620 621 entries=[], tags=[], parents=[changesets[branches[m]], c])
621 622 changesets.insert(i + 1, cc)
622 623 branches[m] = i + 1
623 624
624 625 # adjust our loop counters now we have inserted a new entry
625 626 n += 1
626 627 i += 2
627 628 continue
628 629
629 630 branches[c.branch] = i
630 631 i += 1
631 632
632 633 # Drop synthetic changesets (safe now that we have ensured no other
633 634 # changesets can have them as parents).
634 635 i = 0
635 636 while i < len(changesets):
636 637 if changesets[i].synthetic:
637 638 del changesets[i]
638 639 else:
639 640 i += 1
640 641
641 642 # Number changesets
642 643
643 644 for i, c in enumerate(changesets):
644 645 c.id = i + 1
645 646
646 647 ui.status(_('%d changeset entries\n') % len(changesets))
647 648
648 649 return changesets
649 650
650 651
651 652 def debugcvsps(ui, *args, **opts):
652 653 '''Read CVS rlog for current directory or named path in repository, and
653 654 convert the log to changesets based on matching commit log entries and dates.'''
654 655
655 656 if opts["new_cache"]:
656 657 cache = "write"
657 658 elif opts["update_cache"]:
658 659 cache = "update"
659 660 else:
660 661 cache = None
661 662
662 663 revisions = opts["revisions"]
663 664
664 665 try:
665 666 if args:
666 667 log = []
667 668 for d in args:
668 669 log += createlog(ui, d, root=opts["root"], cache=cache)
669 670 else:
670 671 log = createlog(ui, root=opts["root"], cache=cache)
671 672 except logerror, e:
672 673 ui.write("%r\n"%e)
673 674 return
674 675
675 676 changesets = createchangeset(ui, log, opts["fuzz"])
676 677 del log
677 678
678 679 # Print changesets (optionally filtered)
679 680
680 681 off = len(revisions)
681 682 branches = {} # latest version number in each branch
682 683 ancestors = {} # parent branch
683 684 for cs in changesets:
684 685
685 686 if opts["ancestors"]:
686 687 if cs.branch not in branches and cs.parents and cs.parents[0].id:
687 688 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
688 689 branches[cs.branch] = cs.id
689 690
690 691 # limit by branches
691 692 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
692 693 continue
693 694
694 695 if not off:
695 696 # Note: trailing spaces on several lines here are needed to have
696 697 # bug-for-bug compatibility with cvsps.
697 698 ui.write('---------------------\n')
698 699 ui.write('PatchSet %d \n' % cs.id)
699 700 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
700 701 ui.write('Author: %s\n' % cs.author)
701 702 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
702 703 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
703 704 ','.join(cs.tags) or '(none)'))
704 705 if opts["parents"] and cs.parents:
705 706 if len(cs.parents)>1:
706 707 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
707 708 else:
708 709 ui.write('Parent: %d\n' % cs.parents[0].id)
709 710
710 711 if opts["ancestors"]:
711 712 b = cs.branch
712 713 r = []
713 714 while b:
714 715 b, c = ancestors[b]
715 716 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
716 717 if r:
717 718 ui.write('Ancestors: %s\n' % (','.join(r)))
718 719
719 720 ui.write('Log:\n')
720 721 ui.write('%s\n\n' % cs.comment)
721 722 ui.write('Members: \n')
722 723 for f in cs.entries:
723 724 fn = f.file
724 725 if fn.startswith(opts["prefix"]):
725 726 fn = fn[len(opts["prefix"]):]
726 727 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
727 728 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
728 729 ui.write('\n')
729 730
730 731 # have we seen the start tag?
731 732 if revisions and off:
732 733 if revisions[0] == str(cs.id) or \
733 734 revisions[0] in cs.tags:
734 735 off = False
735 736
736 737 # see if we reached the end tag
737 738 if len(revisions)>1 and not off:
738 739 if revisions[1] == str(cs.id) or \
739 740 revisions[1] in cs.tags:
740 741 break
General Comments 0
You need to be logged in to leave comments. Login now