##// END OF EJS Templates
cvsps: make debugging easier by adding __repr__() methods.
Greg Ward -
r8080:19229b0b default
parent child Browse files
Show More
@@ -1,766 +1,777 b''
1 1 #
2 2 # Mercurial built-in replacement for cvsps.
3 3 #
4 4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 import os
10 10 import re
11 11 import cPickle as pickle
12 12 from mercurial import util
13 13 from mercurial.i18n import _
14 14
15 15 def listsort(list, key):
16 16 "helper to sort by key in Python 2.3"
17 17 try:
18 18 list.sort(key=key)
19 19 except TypeError:
20 20 list.sort(lambda l, r: cmp(key(l), key(r)))
21 21
22 22 class logentry(object):
23 23 '''Class logentry has the following attributes:
24 24 .author - author name as CVS knows it
25 25 .branch - name of branch this revision is on
26 26 .branches - revision tuple of branches starting at this revision
27 27 .comment - commit message
28 28 .date - the commit date as a (time, tz) tuple
29 29 .dead - true if file revision is dead
30 30 .file - Name of file
31 31 .lines - a tuple (+lines, -lines) or None
32 32 .parent - Previous revision of this entry
33 33 .rcs - name of file as returned from CVS
34 34 .revision - revision number as tuple
35 35 .tags - list of tags on the file
36 36 .synthetic - is this a synthetic "file ... added on ..." revision?
37 37 .mergepoint- the branch that has been merged from (if present in rlog output)
38 38 '''
39 39 def __init__(self, **entries):
40 40 self.__dict__.update(entries)
41 41
42 def __repr__(self):
43 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
44 id(self),
45 self.file,
46 ".".join(map(str, self.revision)))
47
42 48 class logerror(Exception):
43 49 pass
44 50
45 51 def getrepopath(cvspath):
46 52 """Return the repository path from a CVS path.
47 53
48 54 >>> getrepopath('/foo/bar')
49 55 '/foo/bar'
50 56 >>> getrepopath('c:/foo/bar')
51 57 'c:/foo/bar'
52 58 >>> getrepopath(':pserver:10/foo/bar')
53 59 '/foo/bar'
54 60 >>> getrepopath(':pserver:10c:/foo/bar')
55 61 '/foo/bar'
56 62 >>> getrepopath(':pserver:/foo/bar')
57 63 '/foo/bar'
58 64 >>> getrepopath(':pserver:c:/foo/bar')
59 65 'c:/foo/bar'
60 66 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
61 67 '/foo/bar'
62 68 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
63 69 'c:/foo/bar'
64 70 """
65 71 # According to CVS manual, CVS paths are expressed like:
66 72 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
67 73 #
68 74 # Unfortunately, Windows absolute paths start with a drive letter
69 75 # like 'c:' making it harder to parse. Here we assume that drive
70 76 # letters are only one character long and any CVS component before
71 77 # the repository path is at least 2 characters long, and use this
72 78 # to disambiguate.
73 79 parts = cvspath.split(':')
74 80 if len(parts) == 1:
75 81 return parts[0]
76 82 # Here there is an ambiguous case if we have a port number
77 83 # immediately followed by a Windows driver letter. We assume this
78 84 # never happens and decide it must be CVS path component,
79 85 # therefore ignoring it.
80 86 if len(parts[-2]) > 1:
81 87 return parts[-1].lstrip('0123456789')
82 88 return parts[-2] + ':' + parts[-1]
83 89
84 90 def createlog(ui, directory=None, root="", rlog=True, cache=None):
85 91 '''Collect the CVS rlog'''
86 92
87 93 # Because we store many duplicate commit log messages, reusing strings
88 94 # saves a lot of memory and pickle storage space.
89 95 _scache = {}
90 96 def scache(s):
91 97 "return a shared version of a string"
92 98 return _scache.setdefault(s, s)
93 99
94 100 ui.status(_('collecting CVS rlog\n'))
95 101
96 102 log = [] # list of logentry objects containing the CVS state
97 103
98 104 # patterns to match in CVS (r)log output, by state of use
99 105 re_00 = re.compile('RCS file: (.+)$')
100 106 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
101 107 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
102 108 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
103 109 re_10 = re.compile('Working file: (.+)$')
104 110 re_20 = re.compile('symbolic names:')
105 111 re_30 = re.compile('\t(.+): ([\\d.]+)$')
106 112 re_31 = re.compile('----------------------------$')
107 113 re_32 = re.compile('=============================================================================$')
108 114 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
109 115 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?(.*mergepoint:\s+([^;]+);)?')
110 116 re_70 = re.compile('branches: (.+);$')
111 117
112 118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
113 119
114 120 prefix = '' # leading path to strip of what we get from CVS
115 121
116 122 if directory is None:
117 123 # Current working directory
118 124
119 125 # Get the real directory in the repository
120 126 try:
121 127 prefix = file(os.path.join('CVS','Repository')).read().strip()
122 128 if prefix == ".":
123 129 prefix = ""
124 130 directory = prefix
125 131 except IOError:
126 132 raise logerror('Not a CVS sandbox')
127 133
128 134 if prefix and not prefix.endswith(os.sep):
129 135 prefix += os.sep
130 136
131 137 # Use the Root file in the sandbox, if it exists
132 138 try:
133 139 root = file(os.path.join('CVS','Root')).read().strip()
134 140 except IOError:
135 141 pass
136 142
137 143 if not root:
138 144 root = os.environ.get('CVSROOT', '')
139 145
140 146 # read log cache if one exists
141 147 oldlog = []
142 148 date = None
143 149
144 150 if cache:
145 151 cachedir = os.path.expanduser('~/.hg.cvsps')
146 152 if not os.path.exists(cachedir):
147 153 os.mkdir(cachedir)
148 154
149 155 # The cvsps cache pickle needs a uniquified name, based on the
150 156 # repository location. The address may have all sort of nasties
151 157 # in it, slashes, colons and such. So here we take just the
152 158 # alphanumerics, concatenated in a way that does not mix up the
153 159 # various components, so that
154 160 # :pserver:user@server:/path
155 161 # and
156 162 # /pserver/user/server/path
157 163 # are mapped to different cache file names.
158 164 cachefile = root.split(":") + [directory, "cache"]
159 165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
160 166 cachefile = os.path.join(cachedir,
161 167 '.'.join([s for s in cachefile if s]))
162 168
163 169 if cache == 'update':
164 170 try:
165 171 ui.note(_('reading cvs log cache %s\n') % cachefile)
166 172 oldlog = pickle.load(file(cachefile))
167 173 ui.note(_('cache has %d log entries\n') % len(oldlog))
168 174 except Exception, e:
169 175 ui.note(_('error reading cache: %r\n') % e)
170 176
171 177 if oldlog:
172 178 date = oldlog[-1].date # last commit date as a (time,tz) tuple
173 179 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
174 180
175 181 # build the CVS commandline
176 182 cmd = ['cvs', '-q']
177 183 if root:
178 184 cmd.append('-d%s' % root)
179 185 p = util.normpath(getrepopath(root))
180 186 if not p.endswith('/'):
181 187 p += '/'
182 188 prefix = p + util.normpath(prefix)
183 189 cmd.append(['log', 'rlog'][rlog])
184 190 if date:
185 191 # no space between option and date string
186 192 cmd.append('-d>%s' % date)
187 193 cmd.append(directory)
188 194
189 195 # state machine begins here
190 196 tags = {} # dictionary of revisions on current file with their tags
191 197 branchmap = {} # mapping between branch names and revision numbers
192 198 state = 0
193 199 store = False # set when a new record can be appended
194 200
195 201 cmd = [util.shellquote(arg) for arg in cmd]
196 202 ui.note(_("running %s\n") % (' '.join(cmd)))
197 203 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
198 204
199 205 pfp = util.popen(' '.join(cmd))
200 206 peek = pfp.readline()
201 207 while True:
202 208 line = peek
203 209 if line == '':
204 210 break
205 211 peek = pfp.readline()
206 212 if line.endswith('\n'):
207 213 line = line[:-1]
208 214 #ui.debug('state=%d line=%r\n' % (state, line))
209 215
210 216 if state == 0:
211 217 # initial state, consume input until we see 'RCS file'
212 218 match = re_00.match(line)
213 219 if match:
214 220 rcs = match.group(1)
215 221 tags = {}
216 222 if rlog:
217 223 filename = util.normpath(rcs[:-2])
218 224 if filename.startswith(prefix):
219 225 filename = filename[len(prefix):]
220 226 if filename.startswith('/'):
221 227 filename = filename[1:]
222 228 if filename.startswith('Attic/'):
223 229 filename = filename[6:]
224 230 else:
225 231 filename = filename.replace('/Attic/', '/')
226 232 state = 2
227 233 continue
228 234 state = 1
229 235 continue
230 236 match = re_01.match(line)
231 237 if match:
232 238 raise Exception(match.group(1))
233 239 match = re_02.match(line)
234 240 if match:
235 241 raise Exception(match.group(2))
236 242 if re_03.match(line):
237 243 raise Exception(line)
238 244
239 245 elif state == 1:
240 246 # expect 'Working file' (only when using log instead of rlog)
241 247 match = re_10.match(line)
242 248 assert match, _('RCS file must be followed by working file')
243 249 filename = util.normpath(match.group(1))
244 250 state = 2
245 251
246 252 elif state == 2:
247 253 # expect 'symbolic names'
248 254 if re_20.match(line):
249 255 branchmap = {}
250 256 state = 3
251 257
252 258 elif state == 3:
253 259 # read the symbolic names and store as tags
254 260 match = re_30.match(line)
255 261 if match:
256 262 rev = [int(x) for x in match.group(2).split('.')]
257 263
258 264 # Convert magic branch number to an odd-numbered one
259 265 revn = len(rev)
260 266 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
261 267 rev = rev[:-2] + rev[-1:]
262 268 rev = tuple(rev)
263 269
264 270 if rev not in tags:
265 271 tags[rev] = []
266 272 tags[rev].append(match.group(1))
267 273 branchmap[match.group(1)] = match.group(2)
268 274
269 275 elif re_31.match(line):
270 276 state = 5
271 277 elif re_32.match(line):
272 278 state = 0
273 279
274 280 elif state == 4:
275 281 # expecting '------' separator before first revision
276 282 if re_31.match(line):
277 283 state = 5
278 284 else:
279 285 assert not re_32.match(line), _('must have at least some revisions')
280 286
281 287 elif state == 5:
282 288 # expecting revision number and possibly (ignored) lock indication
283 289 # we create the logentry here from values stored in states 0 to 4,
284 290 # as this state is re-entered for subsequent revisions of a file.
285 291 match = re_50.match(line)
286 292 assert match, _('expected revision number')
287 293 e = logentry(rcs=scache(rcs), file=scache(filename),
288 294 revision=tuple([int(x) for x in match.group(1).split('.')]),
289 295 branches=[], parent=None,
290 296 synthetic=False)
291 297 state = 6
292 298
293 299 elif state == 6:
294 300 # expecting date, author, state, lines changed
295 301 match = re_60.match(line)
296 302 assert match, _('revision must be followed by date line')
297 303 d = match.group(1)
298 304 if d[2] == '/':
299 305 # Y2K
300 306 d = '19' + d
301 307
302 308 if len(d.split()) != 3:
303 309 # cvs log dates always in GMT
304 310 d = d + ' UTC'
305 311 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
306 312 e.author = scache(match.group(2))
307 313 e.dead = match.group(3).lower() == 'dead'
308 314
309 315 if match.group(5):
310 316 if match.group(6):
311 317 e.lines = (int(match.group(5)), int(match.group(6)))
312 318 else:
313 319 e.lines = (int(match.group(5)), 0)
314 320 elif match.group(6):
315 321 e.lines = (0, int(match.group(6)))
316 322 else:
317 323 e.lines = None
318 324
319 325 if match.group(7): # cvsnt mergepoint
320 326 myrev = match.group(8).split('.')
321 327 if len(myrev) == 2: # head
322 328 e.mergepoint = 'HEAD'
323 329 else:
324 330 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
325 331 branches = [b for b in branchmap if branchmap[b] == myrev]
326 332 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
327 333 e.mergepoint = branches[0]
328 334 else:
329 335 e.mergepoint = None
330 336 e.comment = []
331 337 state = 7
332 338
333 339 elif state == 7:
334 340 # read the revision numbers of branches that start at this revision
335 341 # or store the commit log message otherwise
336 342 m = re_70.match(line)
337 343 if m:
338 344 e.branches = [tuple([int(y) for y in x.strip().split('.')])
339 345 for x in m.group(1).split(';')]
340 346 state = 8
341 347 elif re_31.match(line) and re_50.match(peek):
342 348 state = 5
343 349 store = True
344 350 elif re_32.match(line):
345 351 state = 0
346 352 store = True
347 353 else:
348 354 e.comment.append(line)
349 355
350 356 elif state == 8:
351 357 # store commit log message
352 358 if re_31.match(line):
353 359 state = 5
354 360 store = True
355 361 elif re_32.match(line):
356 362 state = 0
357 363 store = True
358 364 else:
359 365 e.comment.append(line)
360 366
361 367 # When a file is added on a branch B1, CVS creates a synthetic
362 368 # dead trunk revision 1.1 so that the branch has a root.
363 369 # Likewise, if you merge such a file to a later branch B2 (one
364 370 # that already existed when the file was added on B1), CVS
365 371 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
366 372 # these revisions now, but mark them synthetic so
367 373 # createchangeset() can take care of them.
368 374 if (store and
369 375 e.dead and
370 376 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
371 377 len(e.comment) == 1 and
372 378 file_added_re.match(e.comment[0])):
373 379 ui.debug(_('found synthetic revision in %s: %r\n')
374 380 % (e.rcs, e.comment[0]))
375 381 e.synthetic = True
376 382
377 383 if store:
378 384 # clean up the results and save in the log.
379 385 store = False
380 386 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
381 387 e.comment = scache('\n'.join(e.comment))
382 388
383 389 revn = len(e.revision)
384 390 if revn > 3 and (revn % 2) == 0:
385 391 e.branch = tags.get(e.revision[:-1], [None])[0]
386 392 else:
387 393 e.branch = None
388 394
389 395 log.append(e)
390 396
391 397 if len(log) % 100 == 0:
392 398 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
393 399
394 400 listsort(log, key=lambda x:(x.rcs, x.revision))
395 401
396 402 # find parent revisions of individual files
397 403 versions = {}
398 404 for e in log:
399 405 branch = e.revision[:-1]
400 406 p = versions.get((e.rcs, branch), None)
401 407 if p is None:
402 408 p = e.revision[:-2]
403 409 e.parent = p
404 410 versions[(e.rcs, branch)] = e.revision
405 411
406 412 # update the log cache
407 413 if cache:
408 414 if log:
409 415 # join up the old and new logs
410 416 listsort(log, key=lambda x:x.date)
411 417
412 418 if oldlog and oldlog[-1].date >= log[0].date:
413 419 raise logerror('Log cache overlaps with new log entries,'
414 420 ' re-run without cache.')
415 421
416 422 log = oldlog + log
417 423
418 424 # write the new cachefile
419 425 ui.note(_('writing cvs log cache %s\n') % cachefile)
420 426 pickle.dump(log, file(cachefile, 'w'))
421 427 else:
422 428 log = oldlog
423 429
424 430 ui.status(_('%d log entries\n') % len(log))
425 431
426 432 return log
427 433
428 434
429 435 class changeset(object):
430 436 '''Class changeset has the following attributes:
431 437 .id - integer identifying this changeset (list index)
432 438 .author - author name as CVS knows it
433 439 .branch - name of branch this changeset is on, or None
434 440 .comment - commit message
435 441 .date - the commit date as a (time,tz) tuple
436 442 .entries - list of logentry objects in this changeset
437 443 .parents - list of one or two parent changesets
438 444 .tags - list of tags on this changeset
439 445 .synthetic - from synthetic revision "file ... added on branch ..."
440 446 .mergepoint- the branch that has been merged from (if present in rlog output)
441 447 '''
442 448 def __init__(self, **entries):
443 449 self.__dict__.update(entries)
444 450
451 def __repr__(self):
452 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
453 id(self),
454 getattr(self, 'id', "(no id)"))
455
445 456 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
446 457 '''Convert log into changesets.'''
447 458
448 459 ui.status(_('creating changesets\n'))
449 460
450 461 # Merge changesets
451 462
452 463 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
453 464
454 465 changesets = []
455 466 files = {}
456 467 c = None
457 468 for i, e in enumerate(log):
458 469
459 470 # Check if log entry belongs to the current changeset or not.
460 471 if not (c and
461 472 e.comment == c.comment and
462 473 e.author == c.author and
463 474 e.branch == c.branch and
464 475 ((c.date[0] + c.date[1]) <=
465 476 (e.date[0] + e.date[1]) <=
466 477 (c.date[0] + c.date[1]) + fuzz) and
467 478 e.file not in files):
468 479 c = changeset(comment=e.comment, author=e.author,
469 480 branch=e.branch, date=e.date, entries=[],
470 481 mergepoint=getattr(e, 'mergepoint', None))
471 482 changesets.append(c)
472 483 files = {}
473 484 if len(changesets) % 100 == 0:
474 485 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
475 486 ui.status(util.ellipsis(t, 80) + '\n')
476 487
477 488 c.entries.append(e)
478 489 files[e.file] = True
479 490 c.date = e.date # changeset date is date of latest commit in it
480 491
481 492 # Mark synthetic changesets
482 493
483 494 for c in changesets:
484 495 # Synthetic revisions always get their own changeset, because
485 496 # the log message includes the filename. E.g. if you add file3
486 497 # and file4 on a branch, you get four log entries and three
487 498 # changesets:
488 499 # "File file3 was added on branch ..." (synthetic, 1 entry)
489 500 # "File file4 was added on branch ..." (synthetic, 1 entry)
490 501 # "Add file3 and file4 to fix ..." (real, 2 entries)
491 502 # Hence the check for 1 entry here.
492 503 synth = getattr(c.entries[0], 'synthetic', None)
493 504 c.synthetic = (len(c.entries) == 1 and synth)
494 505
495 506 # Sort files in each changeset
496 507
497 508 for c in changesets:
498 509 def pathcompare(l, r):
499 510 'Mimic cvsps sorting order'
500 511 l = l.split('/')
501 512 r = r.split('/')
502 513 nl = len(l)
503 514 nr = len(r)
504 515 n = min(nl, nr)
505 516 for i in range(n):
506 517 if i + 1 == nl and nl < nr:
507 518 return -1
508 519 elif i + 1 == nr and nl > nr:
509 520 return +1
510 521 elif l[i] < r[i]:
511 522 return -1
512 523 elif l[i] > r[i]:
513 524 return +1
514 525 return 0
515 526 def entitycompare(l, r):
516 527 return pathcompare(l.file, r.file)
517 528
518 529 c.entries.sort(entitycompare)
519 530
520 531 # Sort changesets by date
521 532
522 533 def cscmp(l, r):
523 534 d = sum(l.date) - sum(r.date)
524 535 if d:
525 536 return d
526 537
527 538 # detect vendor branches and initial commits on a branch
528 539 le = {}
529 540 for e in l.entries:
530 541 le[e.rcs] = e.revision
531 542 re = {}
532 543 for e in r.entries:
533 544 re[e.rcs] = e.revision
534 545
535 546 d = 0
536 547 for e in l.entries:
537 548 if re.get(e.rcs, None) == e.parent:
538 549 assert not d
539 550 d = 1
540 551 break
541 552
542 553 for e in r.entries:
543 554 if le.get(e.rcs, None) == e.parent:
544 555 assert not d
545 556 d = -1
546 557 break
547 558
548 559 return d
549 560
550 561 changesets.sort(cscmp)
551 562
552 563 # Collect tags
553 564
554 565 globaltags = {}
555 566 for c in changesets:
556 567 tags = {}
557 568 for e in c.entries:
558 569 for tag in e.tags:
559 570 # remember which is the latest changeset to have this tag
560 571 globaltags[tag] = c
561 572
562 573 for c in changesets:
563 574 tags = {}
564 575 for e in c.entries:
565 576 for tag in e.tags:
566 577 tags[tag] = True
567 578 # remember tags only if this is the latest changeset to have it
568 579 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
569 580
570 581 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
571 582 # by inserting dummy changesets with two parents, and handle
572 583 # {{mergefrombranch BRANCHNAME}} by setting two parents.
573 584
574 585 if mergeto is None:
575 586 mergeto = r'{{mergetobranch ([-\w]+)}}'
576 587 if mergeto:
577 588 mergeto = re.compile(mergeto)
578 589
579 590 if mergefrom is None:
580 591 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
581 592 if mergefrom:
582 593 mergefrom = re.compile(mergefrom)
583 594
584 595 versions = {} # changeset index where we saw any particular file version
585 596 branches = {} # changeset index where we saw a branch
586 597 n = len(changesets)
587 598 i = 0
588 599 while i<n:
589 600 c = changesets[i]
590 601
591 602 for f in c.entries:
592 603 versions[(f.rcs, f.revision)] = i
593 604
594 605 p = None
595 606 if c.branch in branches:
596 607 p = branches[c.branch]
597 608 else:
598 609 for f in c.entries:
599 610 p = max(p, versions.get((f.rcs, f.parent), None))
600 611
601 612 c.parents = []
602 613 if p is not None:
603 614 p = changesets[p]
604 615
605 616 # Ensure no changeset has a synthetic changeset as a parent.
606 617 while p.synthetic:
607 618 assert len(p.parents) <= 1, \
608 619 _('synthetic changeset cannot have multiple parents')
609 620 if p.parents:
610 621 p = p.parents[0]
611 622 else:
612 623 p = None
613 624 break
614 625
615 626 if p is not None:
616 627 c.parents.append(p)
617 628
618 629 if c.mergepoint:
619 630 if c.mergepoint == 'HEAD':
620 631 c.mergepoint = None
621 632 c.parents.append(changesets[branches[c.mergepoint]])
622 633
623 634 if mergefrom:
624 635 m = mergefrom.search(c.comment)
625 636 if m:
626 637 m = m.group(1)
627 638 if m == 'HEAD':
628 639 m = None
629 640 candidate = changesets[branches[m]]
630 641 if m in branches and c.branch != m and not candidate.synthetic:
631 642 c.parents.append(candidate)
632 643
633 644 if mergeto:
634 645 m = mergeto.search(c.comment)
635 646 if m:
636 647 try:
637 648 m = m.group(1)
638 649 if m == 'HEAD':
639 650 m = None
640 651 except:
641 652 m = None # if no group found then merge to HEAD
642 653 if m in branches and c.branch != m:
643 654 # insert empty changeset for merge
644 655 cc = changeset(author=c.author, branch=m, date=c.date,
645 656 comment='convert-repo: CVS merge from branch %s' % c.branch,
646 657 entries=[], tags=[], parents=[changesets[branches[m]], c])
647 658 changesets.insert(i + 1, cc)
648 659 branches[m] = i + 1
649 660
650 661 # adjust our loop counters now we have inserted a new entry
651 662 n += 1
652 663 i += 2
653 664 continue
654 665
655 666 branches[c.branch] = i
656 667 i += 1
657 668
658 669 # Drop synthetic changesets (safe now that we have ensured no other
659 670 # changesets can have them as parents).
660 671 i = 0
661 672 while i < len(changesets):
662 673 if changesets[i].synthetic:
663 674 del changesets[i]
664 675 else:
665 676 i += 1
666 677
667 678 # Number changesets
668 679
669 680 for i, c in enumerate(changesets):
670 681 c.id = i + 1
671 682
672 683 ui.status(_('%d changeset entries\n') % len(changesets))
673 684
674 685 return changesets
675 686
676 687
677 688 def debugcvsps(ui, *args, **opts):
678 689 '''Read CVS rlog for current directory or named path in repository, and
679 690 convert the log to changesets based on matching commit log entries and dates.'''
680 691
681 692 if opts["new_cache"]:
682 693 cache = "write"
683 694 elif opts["update_cache"]:
684 695 cache = "update"
685 696 else:
686 697 cache = None
687 698
688 699 revisions = opts["revisions"]
689 700
690 701 try:
691 702 if args:
692 703 log = []
693 704 for d in args:
694 705 log += createlog(ui, d, root=opts["root"], cache=cache)
695 706 else:
696 707 log = createlog(ui, root=opts["root"], cache=cache)
697 708 except logerror, e:
698 709 ui.write("%r\n"%e)
699 710 return
700 711
701 712 changesets = createchangeset(ui, log, opts["fuzz"])
702 713 del log
703 714
704 715 # Print changesets (optionally filtered)
705 716
706 717 off = len(revisions)
707 718 branches = {} # latest version number in each branch
708 719 ancestors = {} # parent branch
709 720 for cs in changesets:
710 721
711 722 if opts["ancestors"]:
712 723 if cs.branch not in branches and cs.parents and cs.parents[0].id:
713 724 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
714 725 branches[cs.branch] = cs.id
715 726
716 727 # limit by branches
717 728 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
718 729 continue
719 730
720 731 if not off:
721 732 # Note: trailing spaces on several lines here are needed to have
722 733 # bug-for-bug compatibility with cvsps.
723 734 ui.write('---------------------\n')
724 735 ui.write('PatchSet %d \n' % cs.id)
725 736 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
726 737 ui.write('Author: %s\n' % cs.author)
727 738 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
728 739 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
729 740 ','.join(cs.tags) or '(none)'))
730 741 if opts["parents"] and cs.parents:
731 742 if len(cs.parents)>1:
732 743 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
733 744 else:
734 745 ui.write('Parent: %d\n' % cs.parents[0].id)
735 746
736 747 if opts["ancestors"]:
737 748 b = cs.branch
738 749 r = []
739 750 while b:
740 751 b, c = ancestors[b]
741 752 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
742 753 if r:
743 754 ui.write('Ancestors: %s\n' % (','.join(r)))
744 755
745 756 ui.write('Log:\n')
746 757 ui.write('%s\n\n' % cs.comment)
747 758 ui.write('Members: \n')
748 759 for f in cs.entries:
749 760 fn = f.file
750 761 if fn.startswith(opts["prefix"]):
751 762 fn = fn[len(opts["prefix"]):]
752 763 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
753 764 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
754 765 ui.write('\n')
755 766
756 767 # have we seen the start tag?
757 768 if revisions and off:
758 769 if revisions[0] == str(cs.id) or \
759 770 revisions[0] in cs.tags:
760 771 off = False
761 772
762 773 # see if we reached the end tag
763 774 if len(revisions)>1 and not off:
764 775 if revisions[1] == str(cs.id) or \
765 776 revisions[1] in cs.tags:
766 777 break
General Comments 0
You need to be logged in to leave comments. Login now