##// END OF EJS Templates
convert/cvsps: wrap long lines
Martin Geisler -
r8661:883f14fc default
parent child Browse files
Show More
@@ -1,780 +1,792 b''
1 1 #
2 2 # Mercurial built-in replacement for cvsps.
3 3 #
4 4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2, incorporated herein by reference.
8 8
9 9 import os
10 10 import re
11 11 import cPickle as pickle
12 12 from mercurial import util
13 13 from mercurial.i18n import _
14 14
15 15 def listsort(list, key):
16 16 "helper to sort by key in Python 2.3"
17 17 try:
18 18 list.sort(key=key)
19 19 except TypeError:
20 20 list.sort(lambda l, r: cmp(key(l), key(r)))
21 21
22 22 class logentry(object):
23 23 '''Class logentry has the following attributes:
24 24 .author - author name as CVS knows it
25 25 .branch - name of branch this revision is on
26 26 .branches - revision tuple of branches starting at this revision
27 27 .comment - commit message
28 28 .date - the commit date as a (time, tz) tuple
29 29 .dead - true if file revision is dead
30 30 .file - Name of file
31 31 .lines - a tuple (+lines, -lines) or None
32 32 .parent - Previous revision of this entry
33 33 .rcs - name of file as returned from CVS
34 34 .revision - revision number as tuple
35 35 .tags - list of tags on the file
36 36 .synthetic - is this a synthetic "file ... added on ..." revision?
37 .mergepoint- the branch that has been merged from (if present in rlog output)
37 .mergepoint- the branch that has been merged from
38 (if present in rlog output)
38 39 '''
39 40 def __init__(self, **entries):
40 41 self.__dict__.update(entries)
41 42
42 43 def __repr__(self):
43 44 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
44 45 id(self),
45 46 self.file,
46 47 ".".join(map(str, self.revision)))
47 48
48 49 class logerror(Exception):
49 50 pass
50 51
51 52 def getrepopath(cvspath):
52 53 """Return the repository path from a CVS path.
53 54
54 55 >>> getrepopath('/foo/bar')
55 56 '/foo/bar'
56 57 >>> getrepopath('c:/foo/bar')
57 58 'c:/foo/bar'
58 59 >>> getrepopath(':pserver:10/foo/bar')
59 60 '/foo/bar'
60 61 >>> getrepopath(':pserver:10c:/foo/bar')
61 62 '/foo/bar'
62 63 >>> getrepopath(':pserver:/foo/bar')
63 64 '/foo/bar'
64 65 >>> getrepopath(':pserver:c:/foo/bar')
65 66 'c:/foo/bar'
66 67 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
67 68 '/foo/bar'
68 69 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
69 70 'c:/foo/bar'
70 71 """
71 72 # According to CVS manual, CVS paths are expressed like:
72 73 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
73 74 #
74 75 # Unfortunately, Windows absolute paths start with a drive letter
75 76 # like 'c:' making it harder to parse. Here we assume that drive
76 77 # letters are only one character long and any CVS component before
77 78 # the repository path is at least 2 characters long, and use this
78 79 # to disambiguate.
79 80 parts = cvspath.split(':')
80 81 if len(parts) == 1:
81 82 return parts[0]
82 83 # Here there is an ambiguous case if we have a port number
83 84 # immediately followed by a Windows driver letter. We assume this
84 85 # never happens and decide it must be CVS path component,
85 86 # therefore ignoring it.
86 87 if len(parts[-2]) > 1:
87 88 return parts[-1].lstrip('0123456789')
88 89 return parts[-2] + ':' + parts[-1]
89 90
90 91 def createlog(ui, directory=None, root="", rlog=True, cache=None):
91 92 '''Collect the CVS rlog'''
92 93
93 94 # Because we store many duplicate commit log messages, reusing strings
94 95 # saves a lot of memory and pickle storage space.
95 96 _scache = {}
96 97 def scache(s):
97 98 "return a shared version of a string"
98 99 return _scache.setdefault(s, s)
99 100
100 101 ui.status(_('collecting CVS rlog\n'))
101 102
102 103 log = [] # list of logentry objects containing the CVS state
103 104
104 105 # patterns to match in CVS (r)log output, by state of use
105 106 re_00 = re.compile('RCS file: (.+)$')
106 107 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
107 108 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
108 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
109 re_03 = re.compile("(Cannot access.+CVSROOT)|"
110 "(can't create temporary directory.+)$")
109 111 re_10 = re.compile('Working file: (.+)$')
110 112 re_20 = re.compile('symbolic names:')
111 113 re_30 = re.compile('\t(.+): ([\\d.]+)$')
112 114 re_31 = re.compile('----------------------------$')
113 re_32 = re.compile('=============================================================================$')
115 re_32 = re.compile('======================================='
116 '======================================$')
114 117 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
115 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?(.*mergepoint:\s+([^;]+);)?')
118 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
119 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
120 r'(.*mergepoint:\s+([^;]+);)?')
116 121 re_70 = re.compile('branches: (.+);$')
117 122
118 123 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119 124
120 125 prefix = '' # leading path to strip of what we get from CVS
121 126
122 127 if directory is None:
123 128 # Current working directory
124 129
125 130 # Get the real directory in the repository
126 131 try:
127 132 prefix = file(os.path.join('CVS','Repository')).read().strip()
128 133 if prefix == ".":
129 134 prefix = ""
130 135 directory = prefix
131 136 except IOError:
132 137 raise logerror('Not a CVS sandbox')
133 138
134 139 if prefix and not prefix.endswith(os.sep):
135 140 prefix += os.sep
136 141
137 142 # Use the Root file in the sandbox, if it exists
138 143 try:
139 144 root = file(os.path.join('CVS','Root')).read().strip()
140 145 except IOError:
141 146 pass
142 147
143 148 if not root:
144 149 root = os.environ.get('CVSROOT', '')
145 150
146 151 # read log cache if one exists
147 152 oldlog = []
148 153 date = None
149 154
150 155 if cache:
151 156 cachedir = os.path.expanduser('~/.hg.cvsps')
152 157 if not os.path.exists(cachedir):
153 158 os.mkdir(cachedir)
154 159
155 160 # The cvsps cache pickle needs a uniquified name, based on the
156 161 # repository location. The address may have all sort of nasties
157 162 # in it, slashes, colons and such. So here we take just the
158 163 # alphanumerics, concatenated in a way that does not mix up the
159 164 # various components, so that
160 165 # :pserver:user@server:/path
161 166 # and
162 167 # /pserver/user/server/path
163 168 # are mapped to different cache file names.
164 169 cachefile = root.split(":") + [directory, "cache"]
165 170 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 171 cachefile = os.path.join(cachedir,
167 172 '.'.join([s for s in cachefile if s]))
168 173
169 174 if cache == 'update':
170 175 try:
171 176 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 177 oldlog = pickle.load(file(cachefile))
173 178 ui.note(_('cache has %d log entries\n') % len(oldlog))
174 179 except Exception, e:
175 180 ui.note(_('error reading cache: %r\n') % e)
176 181
177 182 if oldlog:
178 183 date = oldlog[-1].date # last commit date as a (time,tz) tuple
179 184 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
180 185
181 186 # build the CVS commandline
182 187 cmd = ['cvs', '-q']
183 188 if root:
184 189 cmd.append('-d%s' % root)
185 190 p = util.normpath(getrepopath(root))
186 191 if not p.endswith('/'):
187 192 p += '/'
188 193 prefix = p + util.normpath(prefix)
189 194 cmd.append(['log', 'rlog'][rlog])
190 195 if date:
191 196 # no space between option and date string
192 197 cmd.append('-d>%s' % date)
193 198 cmd.append(directory)
194 199
195 200 # state machine begins here
196 201 tags = {} # dictionary of revisions on current file with their tags
197 202 branchmap = {} # mapping between branch names and revision numbers
198 203 state = 0
199 204 store = False # set when a new record can be appended
200 205
201 206 cmd = [util.shellquote(arg) for arg in cmd]
202 207 ui.note(_("running %s\n") % (' '.join(cmd)))
203 208 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
204 209
205 210 pfp = util.popen(' '.join(cmd))
206 211 peek = pfp.readline()
207 212 while True:
208 213 line = peek
209 214 if line == '':
210 215 break
211 216 peek = pfp.readline()
212 217 if line.endswith('\n'):
213 218 line = line[:-1]
214 219 #ui.debug('state=%d line=%r\n' % (state, line))
215 220
216 221 if state == 0:
217 222 # initial state, consume input until we see 'RCS file'
218 223 match = re_00.match(line)
219 224 if match:
220 225 rcs = match.group(1)
221 226 tags = {}
222 227 if rlog:
223 228 filename = util.normpath(rcs[:-2])
224 229 if filename.startswith(prefix):
225 230 filename = filename[len(prefix):]
226 231 if filename.startswith('/'):
227 232 filename = filename[1:]
228 233 if filename.startswith('Attic/'):
229 234 filename = filename[6:]
230 235 else:
231 236 filename = filename.replace('/Attic/', '/')
232 237 state = 2
233 238 continue
234 239 state = 1
235 240 continue
236 241 match = re_01.match(line)
237 242 if match:
238 243 raise Exception(match.group(1))
239 244 match = re_02.match(line)
240 245 if match:
241 246 raise Exception(match.group(2))
242 247 if re_03.match(line):
243 248 raise Exception(line)
244 249
245 250 elif state == 1:
246 251 # expect 'Working file' (only when using log instead of rlog)
247 252 match = re_10.match(line)
248 253 assert match, _('RCS file must be followed by working file')
249 254 filename = util.normpath(match.group(1))
250 255 state = 2
251 256
252 257 elif state == 2:
253 258 # expect 'symbolic names'
254 259 if re_20.match(line):
255 260 branchmap = {}
256 261 state = 3
257 262
258 263 elif state == 3:
259 264 # read the symbolic names and store as tags
260 265 match = re_30.match(line)
261 266 if match:
262 267 rev = [int(x) for x in match.group(2).split('.')]
263 268
264 269 # Convert magic branch number to an odd-numbered one
265 270 revn = len(rev)
266 271 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
267 272 rev = rev[:-2] + rev[-1:]
268 273 rev = tuple(rev)
269 274
270 275 if rev not in tags:
271 276 tags[rev] = []
272 277 tags[rev].append(match.group(1))
273 278 branchmap[match.group(1)] = match.group(2)
274 279
275 280 elif re_31.match(line):
276 281 state = 5
277 282 elif re_32.match(line):
278 283 state = 0
279 284
280 285 elif state == 4:
281 286 # expecting '------' separator before first revision
282 287 if re_31.match(line):
283 288 state = 5
284 289 else:
285 assert not re_32.match(line), _('must have at least some revisions')
290 assert not re_32.match(line), _('must have at least '
291 'some revisions')
286 292
287 293 elif state == 5:
288 294 # expecting revision number and possibly (ignored) lock indication
289 295 # we create the logentry here from values stored in states 0 to 4,
290 296 # as this state is re-entered for subsequent revisions of a file.
291 297 match = re_50.match(line)
292 298 assert match, _('expected revision number')
293 299 e = logentry(rcs=scache(rcs), file=scache(filename),
294 300 revision=tuple([int(x) for x in match.group(1).split('.')]),
295 301 branches=[], parent=None,
296 302 synthetic=False)
297 303 state = 6
298 304
299 305 elif state == 6:
300 306 # expecting date, author, state, lines changed
301 307 match = re_60.match(line)
302 308 assert match, _('revision must be followed by date line')
303 309 d = match.group(1)
304 310 if d[2] == '/':
305 311 # Y2K
306 312 d = '19' + d
307 313
308 314 if len(d.split()) != 3:
309 315 # cvs log dates always in GMT
310 316 d = d + ' UTC'
311 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
317 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
318 '%Y/%m/%d %H:%M:%S',
319 '%Y-%m-%d %H:%M:%S'])
312 320 e.author = scache(match.group(2))
313 321 e.dead = match.group(3).lower() == 'dead'
314 322
315 323 if match.group(5):
316 324 if match.group(6):
317 325 e.lines = (int(match.group(5)), int(match.group(6)))
318 326 else:
319 327 e.lines = (int(match.group(5)), 0)
320 328 elif match.group(6):
321 329 e.lines = (0, int(match.group(6)))
322 330 else:
323 331 e.lines = None
324 332
325 333 if match.group(7): # cvsnt mergepoint
326 334 myrev = match.group(8).split('.')
327 335 if len(myrev) == 2: # head
328 336 e.mergepoint = 'HEAD'
329 337 else:
330 338 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
331 339 branches = [b for b in branchmap if branchmap[b] == myrev]
332 340 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
333 341 e.mergepoint = branches[0]
334 342 else:
335 343 e.mergepoint = None
336 344 e.comment = []
337 345 state = 7
338 346
339 347 elif state == 7:
340 348 # read the revision numbers of branches that start at this revision
341 349 # or store the commit log message otherwise
342 350 m = re_70.match(line)
343 351 if m:
344 352 e.branches = [tuple([int(y) for y in x.strip().split('.')])
345 353 for x in m.group(1).split(';')]
346 354 state = 8
347 355 elif re_31.match(line) and re_50.match(peek):
348 356 state = 5
349 357 store = True
350 358 elif re_32.match(line):
351 359 state = 0
352 360 store = True
353 361 else:
354 362 e.comment.append(line)
355 363
356 364 elif state == 8:
357 365 # store commit log message
358 366 if re_31.match(line):
359 367 state = 5
360 368 store = True
361 369 elif re_32.match(line):
362 370 state = 0
363 371 store = True
364 372 else:
365 373 e.comment.append(line)
366 374
367 375 # When a file is added on a branch B1, CVS creates a synthetic
368 376 # dead trunk revision 1.1 so that the branch has a root.
369 377 # Likewise, if you merge such a file to a later branch B2 (one
370 378 # that already existed when the file was added on B1), CVS
371 379 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
372 380 # these revisions now, but mark them synthetic so
373 381 # createchangeset() can take care of them.
374 382 if (store and
375 383 e.dead and
376 384 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
377 385 len(e.comment) == 1 and
378 386 file_added_re.match(e.comment[0])):
379 387 ui.debug(_('found synthetic revision in %s: %r\n')
380 388 % (e.rcs, e.comment[0]))
381 389 e.synthetic = True
382 390
383 391 if store:
384 392 # clean up the results and save in the log.
385 393 store = False
386 394 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
387 395 e.comment = scache('\n'.join(e.comment))
388 396
389 397 revn = len(e.revision)
390 398 if revn > 3 and (revn % 2) == 0:
391 399 e.branch = tags.get(e.revision[:-1], [None])[0]
392 400 else:
393 401 e.branch = None
394 402
395 403 log.append(e)
396 404
397 405 if len(log) % 100 == 0:
398 406 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
399 407
400 408 listsort(log, key=lambda x:(x.rcs, x.revision))
401 409
402 410 # find parent revisions of individual files
403 411 versions = {}
404 412 for e in log:
405 413 branch = e.revision[:-1]
406 414 p = versions.get((e.rcs, branch), None)
407 415 if p is None:
408 416 p = e.revision[:-2]
409 417 e.parent = p
410 418 versions[(e.rcs, branch)] = e.revision
411 419
412 420 # update the log cache
413 421 if cache:
414 422 if log:
415 423 # join up the old and new logs
416 424 listsort(log, key=lambda x:x.date)
417 425
418 426 if oldlog and oldlog[-1].date >= log[0].date:
419 427 raise logerror('Log cache overlaps with new log entries,'
420 428 ' re-run without cache.')
421 429
422 430 log = oldlog + log
423 431
424 432 # write the new cachefile
425 433 ui.note(_('writing cvs log cache %s\n') % cachefile)
426 434 pickle.dump(log, file(cachefile, 'w'))
427 435 else:
428 436 log = oldlog
429 437
430 438 ui.status(_('%d log entries\n') % len(log))
431 439
432 440 return log
433 441
434 442
435 443 class changeset(object):
436 444 '''Class changeset has the following attributes:
437 445 .id - integer identifying this changeset (list index)
438 446 .author - author name as CVS knows it
439 447 .branch - name of branch this changeset is on, or None
440 448 .comment - commit message
441 449 .date - the commit date as a (time,tz) tuple
442 450 .entries - list of logentry objects in this changeset
443 451 .parents - list of one or two parent changesets
444 452 .tags - list of tags on this changeset
445 453 .synthetic - from synthetic revision "file ... added on branch ..."
446 .mergepoint- the branch that has been merged from (if present in rlog output)
454 .mergepoint- the branch that has been merged from
455 (if present in rlog output)
447 456 '''
448 457 def __init__(self, **entries):
449 458 self.__dict__.update(entries)
450 459
451 460 def __repr__(self):
452 461 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
453 462 id(self),
454 463 getattr(self, 'id', "(no id)"))
455 464
456 465 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
457 466 '''Convert log into changesets.'''
458 467
459 468 ui.status(_('creating changesets\n'))
460 469
461 470 # Merge changesets
462 471
463 472 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
464 473
465 474 changesets = []
466 475 files = set()
467 476 c = None
468 477 for i, e in enumerate(log):
469 478
470 479 # Check if log entry belongs to the current changeset or not.
471 480 if not (c and
472 481 e.comment == c.comment and
473 482 e.author == c.author and
474 483 e.branch == c.branch and
475 484 ((c.date[0] + c.date[1]) <=
476 485 (e.date[0] + e.date[1]) <=
477 486 (c.date[0] + c.date[1]) + fuzz) and
478 487 e.file not in files):
479 488 c = changeset(comment=e.comment, author=e.author,
480 489 branch=e.branch, date=e.date, entries=[],
481 490 mergepoint=getattr(e, 'mergepoint', None))
482 491 changesets.append(c)
483 492 files = set()
484 493 if len(changesets) % 100 == 0:
485 494 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
486 495 ui.status(util.ellipsis(t, 80) + '\n')
487 496
488 497 c.entries.append(e)
489 498 files.add(e.file)
490 499 c.date = e.date # changeset date is date of latest commit in it
491 500
492 501 # Mark synthetic changesets
493 502
494 503 for c in changesets:
495 504 # Synthetic revisions always get their own changeset, because
496 505 # the log message includes the filename. E.g. if you add file3
497 506 # and file4 on a branch, you get four log entries and three
498 507 # changesets:
499 508 # "File file3 was added on branch ..." (synthetic, 1 entry)
500 509 # "File file4 was added on branch ..." (synthetic, 1 entry)
501 510 # "Add file3 and file4 to fix ..." (real, 2 entries)
502 511 # Hence the check for 1 entry here.
503 512 synth = getattr(c.entries[0], 'synthetic', None)
504 513 c.synthetic = (len(c.entries) == 1 and synth)
505 514
506 515 # Sort files in each changeset
507 516
508 517 for c in changesets:
509 518 def pathcompare(l, r):
510 519 'Mimic cvsps sorting order'
511 520 l = l.split('/')
512 521 r = r.split('/')
513 522 nl = len(l)
514 523 nr = len(r)
515 524 n = min(nl, nr)
516 525 for i in range(n):
517 526 if i + 1 == nl and nl < nr:
518 527 return -1
519 528 elif i + 1 == nr and nl > nr:
520 529 return +1
521 530 elif l[i] < r[i]:
522 531 return -1
523 532 elif l[i] > r[i]:
524 533 return +1
525 534 return 0
526 535 def entitycompare(l, r):
527 536 return pathcompare(l.file, r.file)
528 537
529 538 c.entries.sort(entitycompare)
530 539
531 540 # Sort changesets by date
532 541
533 542 def cscmp(l, r):
534 543 d = sum(l.date) - sum(r.date)
535 544 if d:
536 545 return d
537 546
538 547 # detect vendor branches and initial commits on a branch
539 548 le = {}
540 549 for e in l.entries:
541 550 le[e.rcs] = e.revision
542 551 re = {}
543 552 for e in r.entries:
544 553 re[e.rcs] = e.revision
545 554
546 555 d = 0
547 556 for e in l.entries:
548 557 if re.get(e.rcs, None) == e.parent:
549 558 assert not d
550 559 d = 1
551 560 break
552 561
553 562 for e in r.entries:
554 563 if le.get(e.rcs, None) == e.parent:
555 564 assert not d
556 565 d = -1
557 566 break
558 567
559 568 return d
560 569
561 570 changesets.sort(cscmp)
562 571
563 572 # Collect tags
564 573
565 574 globaltags = {}
566 575 for c in changesets:
567 576 for e in c.entries:
568 577 for tag in e.tags:
569 578 # remember which is the latest changeset to have this tag
570 579 globaltags[tag] = c
571 580
572 581 for c in changesets:
573 582 tags = set()
574 583 for e in c.entries:
575 584 tags.update(e.tags)
576 585 # remember tags only if this is the latest changeset to have it
577 586 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
578 587
579 588 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
580 589 # by inserting dummy changesets with two parents, and handle
581 590 # {{mergefrombranch BRANCHNAME}} by setting two parents.
582 591
583 592 if mergeto is None:
584 593 mergeto = r'{{mergetobranch ([-\w]+)}}'
585 594 if mergeto:
586 595 mergeto = re.compile(mergeto)
587 596
588 597 if mergefrom is None:
589 598 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
590 599 if mergefrom:
591 600 mergefrom = re.compile(mergefrom)
592 601
593 602 versions = {} # changeset index where we saw any particular file version
594 603 branches = {} # changeset index where we saw a branch
595 604 n = len(changesets)
596 605 i = 0
597 606 while i<n:
598 607 c = changesets[i]
599 608
600 609 for f in c.entries:
601 610 versions[(f.rcs, f.revision)] = i
602 611
603 612 p = None
604 613 if c.branch in branches:
605 614 p = branches[c.branch]
606 615 else:
607 616 for f in c.entries:
608 617 p = max(p, versions.get((f.rcs, f.parent), None))
609 618
610 619 c.parents = []
611 620 if p is not None:
612 621 p = changesets[p]
613 622
614 623 # Ensure no changeset has a synthetic changeset as a parent.
615 624 while p.synthetic:
616 625 assert len(p.parents) <= 1, \
617 626 _('synthetic changeset cannot have multiple parents')
618 627 if p.parents:
619 628 p = p.parents[0]
620 629 else:
621 630 p = None
622 631 break
623 632
624 633 if p is not None:
625 634 c.parents.append(p)
626 635
627 636 if c.mergepoint:
628 637 if c.mergepoint == 'HEAD':
629 638 c.mergepoint = None
630 639 c.parents.append(changesets[branches[c.mergepoint]])
631 640
632 641 if mergefrom:
633 642 m = mergefrom.search(c.comment)
634 643 if m:
635 644 m = m.group(1)
636 645 if m == 'HEAD':
637 646 m = None
638 647 try:
639 648 candidate = changesets[branches[m]]
640 649 except KeyError:
641 650 ui.warn(_("warning: CVS commit message references "
642 651 "non-existent branch %r:\n%s\n")
643 652 % (m, c.comment))
644 653 if m in branches and c.branch != m and not candidate.synthetic:
645 654 c.parents.append(candidate)
646 655
647 656 if mergeto:
648 657 m = mergeto.search(c.comment)
649 658 if m:
650 659 try:
651 660 m = m.group(1)
652 661 if m == 'HEAD':
653 662 m = None
654 663 except:
655 664 m = None # if no group found then merge to HEAD
656 665 if m in branches and c.branch != m:
657 666 # insert empty changeset for merge
658 667 cc = changeset(author=c.author, branch=m, date=c.date,
659 668 comment='convert-repo: CVS merge from branch %s' % c.branch,
660 669 entries=[], tags=[], parents=[changesets[branches[m]], c])
661 670 changesets.insert(i + 1, cc)
662 671 branches[m] = i + 1
663 672
664 673 # adjust our loop counters now we have inserted a new entry
665 674 n += 1
666 675 i += 2
667 676 continue
668 677
669 678 branches[c.branch] = i
670 679 i += 1
671 680
672 681 # Drop synthetic changesets (safe now that we have ensured no other
673 682 # changesets can have them as parents).
674 683 i = 0
675 684 while i < len(changesets):
676 685 if changesets[i].synthetic:
677 686 del changesets[i]
678 687 else:
679 688 i += 1
680 689
681 690 # Number changesets
682 691
683 692 for i, c in enumerate(changesets):
684 693 c.id = i + 1
685 694
686 695 ui.status(_('%d changeset entries\n') % len(changesets))
687 696
688 697 return changesets
689 698
690 699
691 700 def debugcvsps(ui, *args, **opts):
692 '''Read CVS rlog for current directory or named path in repository, and
693 convert the log to changesets based on matching commit log entries and dates.'''
694
701 '''Read CVS rlog for current directory or named path in
702 repository, and convert the log to changesets based on matching
703 commit log entries and dates.
704 '''
695 705 if opts["new_cache"]:
696 706 cache = "write"
697 707 elif opts["update_cache"]:
698 708 cache = "update"
699 709 else:
700 710 cache = None
701 711
702 712 revisions = opts["revisions"]
703 713
704 714 try:
705 715 if args:
706 716 log = []
707 717 for d in args:
708 718 log += createlog(ui, d, root=opts["root"], cache=cache)
709 719 else:
710 720 log = createlog(ui, root=opts["root"], cache=cache)
711 721 except logerror, e:
712 722 ui.write("%r\n"%e)
713 723 return
714 724
715 725 changesets = createchangeset(ui, log, opts["fuzz"])
716 726 del log
717 727
718 728 # Print changesets (optionally filtered)
719 729
720 730 off = len(revisions)
721 731 branches = {} # latest version number in each branch
722 732 ancestors = {} # parent branch
723 733 for cs in changesets:
724 734
725 735 if opts["ancestors"]:
726 736 if cs.branch not in branches and cs.parents and cs.parents[0].id:
727 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
737 ancestors[cs.branch] = (changesets[cs.parents[0].id-1].branch,
738 cs.parents[0].id)
728 739 branches[cs.branch] = cs.id
729 740
730 741 # limit by branches
731 742 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
732 743 continue
733 744
734 745 if not off:
735 746 # Note: trailing spaces on several lines here are needed to have
736 747 # bug-for-bug compatibility with cvsps.
737 748 ui.write('---------------------\n')
738 749 ui.write('PatchSet %d \n' % cs.id)
739 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
750 ui.write('Date: %s\n' % util.datestr(cs.date,
751 '%Y/%m/%d %H:%M:%S %1%2'))
740 752 ui.write('Author: %s\n' % cs.author)
741 753 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
742 754 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
743 755 ','.join(cs.tags) or '(none)'))
744 756 if opts["parents"] and cs.parents:
745 757 if len(cs.parents)>1:
746 758 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
747 759 else:
748 760 ui.write('Parent: %d\n' % cs.parents[0].id)
749 761
750 762 if opts["ancestors"]:
751 763 b = cs.branch
752 764 r = []
753 765 while b:
754 766 b, c = ancestors[b]
755 767 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
756 768 if r:
757 769 ui.write('Ancestors: %s\n' % (','.join(r)))
758 770
759 771 ui.write('Log:\n')
760 772 ui.write('%s\n\n' % cs.comment)
761 773 ui.write('Members: \n')
762 774 for f in cs.entries:
763 775 fn = f.file
764 776 if fn.startswith(opts["prefix"]):
765 777 fn = fn[len(opts["prefix"]):]
766 778 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
767 779 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
768 780 ui.write('\n')
769 781
770 782 # have we seen the start tag?
771 783 if revisions and off:
772 784 if revisions[0] == str(cs.id) or \
773 785 revisions[0] in cs.tags:
774 786 off = False
775 787
776 788 # see if we reached the end tag
777 789 if len(revisions)>1 and not off:
778 790 if revisions[1] == str(cs.id) or \
779 791 revisions[1] in cs.tags:
780 792 break
General Comments 0
You need to be logged in to leave comments. Login now