##// END OF EJS Templates
convert/cvsps: use set.update for bulk update
Martin Geisler -
r8483:221786b9 default
parent child Browse files
Show More
@@ -1,781 +1,780
1 1 #
2 2 # Mercurial built-in replacement for cvsps.
3 3 #
4 4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2, incorporated herein by reference.
8 8
9 9 import os
10 10 import re
11 11 import cPickle as pickle
12 12 from mercurial import util
13 13 from mercurial.i18n import _
14 14
15 15 def listsort(list, key):
16 16 "helper to sort by key in Python 2.3"
17 17 try:
18 18 list.sort(key=key)
19 19 except TypeError:
20 20 list.sort(lambda l, r: cmp(key(l), key(r)))
21 21
22 22 class logentry(object):
23 23 '''Class logentry has the following attributes:
24 24 .author - author name as CVS knows it
25 25 .branch - name of branch this revision is on
26 26 .branches - revision tuple of branches starting at this revision
27 27 .comment - commit message
28 28 .date - the commit date as a (time, tz) tuple
29 29 .dead - true if file revision is dead
30 30 .file - Name of file
31 31 .lines - a tuple (+lines, -lines) or None
32 32 .parent - Previous revision of this entry
33 33 .rcs - name of file as returned from CVS
34 34 .revision - revision number as tuple
35 35 .tags - list of tags on the file
36 36 .synthetic - is this a synthetic "file ... added on ..." revision?
37 37 .mergepoint- the branch that has been merged from (if present in rlog output)
38 38 '''
39 39 def __init__(self, **entries):
40 40 self.__dict__.update(entries)
41 41
42 42 def __repr__(self):
43 43 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
44 44 id(self),
45 45 self.file,
46 46 ".".join(map(str, self.revision)))
47 47
48 48 class logerror(Exception):
49 49 pass
50 50
51 51 def getrepopath(cvspath):
52 52 """Return the repository path from a CVS path.
53 53
54 54 >>> getrepopath('/foo/bar')
55 55 '/foo/bar'
56 56 >>> getrepopath('c:/foo/bar')
57 57 'c:/foo/bar'
58 58 >>> getrepopath(':pserver:10/foo/bar')
59 59 '/foo/bar'
60 60 >>> getrepopath(':pserver:10c:/foo/bar')
61 61 '/foo/bar'
62 62 >>> getrepopath(':pserver:/foo/bar')
63 63 '/foo/bar'
64 64 >>> getrepopath(':pserver:c:/foo/bar')
65 65 'c:/foo/bar'
66 66 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
67 67 '/foo/bar'
68 68 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
69 69 'c:/foo/bar'
70 70 """
71 71 # According to CVS manual, CVS paths are expressed like:
72 72 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
73 73 #
74 74 # Unfortunately, Windows absolute paths start with a drive letter
75 75 # like 'c:' making it harder to parse. Here we assume that drive
76 76 # letters are only one character long and any CVS component before
77 77 # the repository path is at least 2 characters long, and use this
78 78 # to disambiguate.
79 79 parts = cvspath.split(':')
80 80 if len(parts) == 1:
81 81 return parts[0]
82 82 # Here there is an ambiguous case if we have a port number
83 83 # immediately followed by a Windows driver letter. We assume this
84 84 # never happens and decide it must be CVS path component,
85 85 # therefore ignoring it.
86 86 if len(parts[-2]) > 1:
87 87 return parts[-1].lstrip('0123456789')
88 88 return parts[-2] + ':' + parts[-1]
89 89
90 90 def createlog(ui, directory=None, root="", rlog=True, cache=None):
91 91 '''Collect the CVS rlog'''
92 92
93 93 # Because we store many duplicate commit log messages, reusing strings
94 94 # saves a lot of memory and pickle storage space.
95 95 _scache = {}
96 96 def scache(s):
97 97 "return a shared version of a string"
98 98 return _scache.setdefault(s, s)
99 99
100 100 ui.status(_('collecting CVS rlog\n'))
101 101
102 102 log = [] # list of logentry objects containing the CVS state
103 103
104 104 # patterns to match in CVS (r)log output, by state of use
105 105 re_00 = re.compile('RCS file: (.+)$')
106 106 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
107 107 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
108 108 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
109 109 re_10 = re.compile('Working file: (.+)$')
110 110 re_20 = re.compile('symbolic names:')
111 111 re_30 = re.compile('\t(.+): ([\\d.]+)$')
112 112 re_31 = re.compile('----------------------------$')
113 113 re_32 = re.compile('=============================================================================$')
114 114 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
115 115 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?(.*mergepoint:\s+([^;]+);)?')
116 116 re_70 = re.compile('branches: (.+);$')
117 117
118 118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119 119
120 120 prefix = '' # leading path to strip of what we get from CVS
121 121
122 122 if directory is None:
123 123 # Current working directory
124 124
125 125 # Get the real directory in the repository
126 126 try:
127 127 prefix = file(os.path.join('CVS','Repository')).read().strip()
128 128 if prefix == ".":
129 129 prefix = ""
130 130 directory = prefix
131 131 except IOError:
132 132 raise logerror('Not a CVS sandbox')
133 133
134 134 if prefix and not prefix.endswith(os.sep):
135 135 prefix += os.sep
136 136
137 137 # Use the Root file in the sandbox, if it exists
138 138 try:
139 139 root = file(os.path.join('CVS','Root')).read().strip()
140 140 except IOError:
141 141 pass
142 142
143 143 if not root:
144 144 root = os.environ.get('CVSROOT', '')
145 145
146 146 # read log cache if one exists
147 147 oldlog = []
148 148 date = None
149 149
150 150 if cache:
151 151 cachedir = os.path.expanduser('~/.hg.cvsps')
152 152 if not os.path.exists(cachedir):
153 153 os.mkdir(cachedir)
154 154
155 155 # The cvsps cache pickle needs a uniquified name, based on the
156 156 # repository location. The address may have all sort of nasties
157 157 # in it, slashes, colons and such. So here we take just the
158 158 # alphanumerics, concatenated in a way that does not mix up the
159 159 # various components, so that
160 160 # :pserver:user@server:/path
161 161 # and
162 162 # /pserver/user/server/path
163 163 # are mapped to different cache file names.
164 164 cachefile = root.split(":") + [directory, "cache"]
165 165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 166 cachefile = os.path.join(cachedir,
167 167 '.'.join([s for s in cachefile if s]))
168 168
169 169 if cache == 'update':
170 170 try:
171 171 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 172 oldlog = pickle.load(file(cachefile))
173 173 ui.note(_('cache has %d log entries\n') % len(oldlog))
174 174 except Exception, e:
175 175 ui.note(_('error reading cache: %r\n') % e)
176 176
177 177 if oldlog:
178 178 date = oldlog[-1].date # last commit date as a (time,tz) tuple
179 179 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
180 180
181 181 # build the CVS commandline
182 182 cmd = ['cvs', '-q']
183 183 if root:
184 184 cmd.append('-d%s' % root)
185 185 p = util.normpath(getrepopath(root))
186 186 if not p.endswith('/'):
187 187 p += '/'
188 188 prefix = p + util.normpath(prefix)
189 189 cmd.append(['log', 'rlog'][rlog])
190 190 if date:
191 191 # no space between option and date string
192 192 cmd.append('-d>%s' % date)
193 193 cmd.append(directory)
194 194
195 195 # state machine begins here
196 196 tags = {} # dictionary of revisions on current file with their tags
197 197 branchmap = {} # mapping between branch names and revision numbers
198 198 state = 0
199 199 store = False # set when a new record can be appended
200 200
201 201 cmd = [util.shellquote(arg) for arg in cmd]
202 202 ui.note(_("running %s\n") % (' '.join(cmd)))
203 203 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
204 204
205 205 pfp = util.popen(' '.join(cmd))
206 206 peek = pfp.readline()
207 207 while True:
208 208 line = peek
209 209 if line == '':
210 210 break
211 211 peek = pfp.readline()
212 212 if line.endswith('\n'):
213 213 line = line[:-1]
214 214 #ui.debug('state=%d line=%r\n' % (state, line))
215 215
216 216 if state == 0:
217 217 # initial state, consume input until we see 'RCS file'
218 218 match = re_00.match(line)
219 219 if match:
220 220 rcs = match.group(1)
221 221 tags = {}
222 222 if rlog:
223 223 filename = util.normpath(rcs[:-2])
224 224 if filename.startswith(prefix):
225 225 filename = filename[len(prefix):]
226 226 if filename.startswith('/'):
227 227 filename = filename[1:]
228 228 if filename.startswith('Attic/'):
229 229 filename = filename[6:]
230 230 else:
231 231 filename = filename.replace('/Attic/', '/')
232 232 state = 2
233 233 continue
234 234 state = 1
235 235 continue
236 236 match = re_01.match(line)
237 237 if match:
238 238 raise Exception(match.group(1))
239 239 match = re_02.match(line)
240 240 if match:
241 241 raise Exception(match.group(2))
242 242 if re_03.match(line):
243 243 raise Exception(line)
244 244
245 245 elif state == 1:
246 246 # expect 'Working file' (only when using log instead of rlog)
247 247 match = re_10.match(line)
248 248 assert match, _('RCS file must be followed by working file')
249 249 filename = util.normpath(match.group(1))
250 250 state = 2
251 251
252 252 elif state == 2:
253 253 # expect 'symbolic names'
254 254 if re_20.match(line):
255 255 branchmap = {}
256 256 state = 3
257 257
258 258 elif state == 3:
259 259 # read the symbolic names and store as tags
260 260 match = re_30.match(line)
261 261 if match:
262 262 rev = [int(x) for x in match.group(2).split('.')]
263 263
264 264 # Convert magic branch number to an odd-numbered one
265 265 revn = len(rev)
266 266 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
267 267 rev = rev[:-2] + rev[-1:]
268 268 rev = tuple(rev)
269 269
270 270 if rev not in tags:
271 271 tags[rev] = []
272 272 tags[rev].append(match.group(1))
273 273 branchmap[match.group(1)] = match.group(2)
274 274
275 275 elif re_31.match(line):
276 276 state = 5
277 277 elif re_32.match(line):
278 278 state = 0
279 279
280 280 elif state == 4:
281 281 # expecting '------' separator before first revision
282 282 if re_31.match(line):
283 283 state = 5
284 284 else:
285 285 assert not re_32.match(line), _('must have at least some revisions')
286 286
287 287 elif state == 5:
288 288 # expecting revision number and possibly (ignored) lock indication
289 289 # we create the logentry here from values stored in states 0 to 4,
290 290 # as this state is re-entered for subsequent revisions of a file.
291 291 match = re_50.match(line)
292 292 assert match, _('expected revision number')
293 293 e = logentry(rcs=scache(rcs), file=scache(filename),
294 294 revision=tuple([int(x) for x in match.group(1).split('.')]),
295 295 branches=[], parent=None,
296 296 synthetic=False)
297 297 state = 6
298 298
299 299 elif state == 6:
300 300 # expecting date, author, state, lines changed
301 301 match = re_60.match(line)
302 302 assert match, _('revision must be followed by date line')
303 303 d = match.group(1)
304 304 if d[2] == '/':
305 305 # Y2K
306 306 d = '19' + d
307 307
308 308 if len(d.split()) != 3:
309 309 # cvs log dates always in GMT
310 310 d = d + ' UTC'
311 311 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
312 312 e.author = scache(match.group(2))
313 313 e.dead = match.group(3).lower() == 'dead'
314 314
315 315 if match.group(5):
316 316 if match.group(6):
317 317 e.lines = (int(match.group(5)), int(match.group(6)))
318 318 else:
319 319 e.lines = (int(match.group(5)), 0)
320 320 elif match.group(6):
321 321 e.lines = (0, int(match.group(6)))
322 322 else:
323 323 e.lines = None
324 324
325 325 if match.group(7): # cvsnt mergepoint
326 326 myrev = match.group(8).split('.')
327 327 if len(myrev) == 2: # head
328 328 e.mergepoint = 'HEAD'
329 329 else:
330 330 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
331 331 branches = [b for b in branchmap if branchmap[b] == myrev]
332 332 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
333 333 e.mergepoint = branches[0]
334 334 else:
335 335 e.mergepoint = None
336 336 e.comment = []
337 337 state = 7
338 338
339 339 elif state == 7:
340 340 # read the revision numbers of branches that start at this revision
341 341 # or store the commit log message otherwise
342 342 m = re_70.match(line)
343 343 if m:
344 344 e.branches = [tuple([int(y) for y in x.strip().split('.')])
345 345 for x in m.group(1).split(';')]
346 346 state = 8
347 347 elif re_31.match(line) and re_50.match(peek):
348 348 state = 5
349 349 store = True
350 350 elif re_32.match(line):
351 351 state = 0
352 352 store = True
353 353 else:
354 354 e.comment.append(line)
355 355
356 356 elif state == 8:
357 357 # store commit log message
358 358 if re_31.match(line):
359 359 state = 5
360 360 store = True
361 361 elif re_32.match(line):
362 362 state = 0
363 363 store = True
364 364 else:
365 365 e.comment.append(line)
366 366
367 367 # When a file is added on a branch B1, CVS creates a synthetic
368 368 # dead trunk revision 1.1 so that the branch has a root.
369 369 # Likewise, if you merge such a file to a later branch B2 (one
370 370 # that already existed when the file was added on B1), CVS
371 371 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
372 372 # these revisions now, but mark them synthetic so
373 373 # createchangeset() can take care of them.
374 374 if (store and
375 375 e.dead and
376 376 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
377 377 len(e.comment) == 1 and
378 378 file_added_re.match(e.comment[0])):
379 379 ui.debug(_('found synthetic revision in %s: %r\n')
380 380 % (e.rcs, e.comment[0]))
381 381 e.synthetic = True
382 382
383 383 if store:
384 384 # clean up the results and save in the log.
385 385 store = False
386 386 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
387 387 e.comment = scache('\n'.join(e.comment))
388 388
389 389 revn = len(e.revision)
390 390 if revn > 3 and (revn % 2) == 0:
391 391 e.branch = tags.get(e.revision[:-1], [None])[0]
392 392 else:
393 393 e.branch = None
394 394
395 395 log.append(e)
396 396
397 397 if len(log) % 100 == 0:
398 398 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
399 399
400 400 listsort(log, key=lambda x:(x.rcs, x.revision))
401 401
402 402 # find parent revisions of individual files
403 403 versions = {}
404 404 for e in log:
405 405 branch = e.revision[:-1]
406 406 p = versions.get((e.rcs, branch), None)
407 407 if p is None:
408 408 p = e.revision[:-2]
409 409 e.parent = p
410 410 versions[(e.rcs, branch)] = e.revision
411 411
412 412 # update the log cache
413 413 if cache:
414 414 if log:
415 415 # join up the old and new logs
416 416 listsort(log, key=lambda x:x.date)
417 417
418 418 if oldlog and oldlog[-1].date >= log[0].date:
419 419 raise logerror('Log cache overlaps with new log entries,'
420 420 ' re-run without cache.')
421 421
422 422 log = oldlog + log
423 423
424 424 # write the new cachefile
425 425 ui.note(_('writing cvs log cache %s\n') % cachefile)
426 426 pickle.dump(log, file(cachefile, 'w'))
427 427 else:
428 428 log = oldlog
429 429
430 430 ui.status(_('%d log entries\n') % len(log))
431 431
432 432 return log
433 433
434 434
435 435 class changeset(object):
436 436 '''Class changeset has the following attributes:
437 437 .id - integer identifying this changeset (list index)
438 438 .author - author name as CVS knows it
439 439 .branch - name of branch this changeset is on, or None
440 440 .comment - commit message
441 441 .date - the commit date as a (time,tz) tuple
442 442 .entries - list of logentry objects in this changeset
443 443 .parents - list of one or two parent changesets
444 444 .tags - list of tags on this changeset
445 445 .synthetic - from synthetic revision "file ... added on branch ..."
446 446 .mergepoint- the branch that has been merged from (if present in rlog output)
447 447 '''
448 448 def __init__(self, **entries):
449 449 self.__dict__.update(entries)
450 450
451 451 def __repr__(self):
452 452 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
453 453 id(self),
454 454 getattr(self, 'id', "(no id)"))
455 455
456 456 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
457 457 '''Convert log into changesets.'''
458 458
459 459 ui.status(_('creating changesets\n'))
460 460
461 461 # Merge changesets
462 462
463 463 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
464 464
465 465 changesets = []
466 466 files = set()
467 467 c = None
468 468 for i, e in enumerate(log):
469 469
470 470 # Check if log entry belongs to the current changeset or not.
471 471 if not (c and
472 472 e.comment == c.comment and
473 473 e.author == c.author and
474 474 e.branch == c.branch and
475 475 ((c.date[0] + c.date[1]) <=
476 476 (e.date[0] + e.date[1]) <=
477 477 (c.date[0] + c.date[1]) + fuzz) and
478 478 e.file not in files):
479 479 c = changeset(comment=e.comment, author=e.author,
480 480 branch=e.branch, date=e.date, entries=[],
481 481 mergepoint=getattr(e, 'mergepoint', None))
482 482 changesets.append(c)
483 483 files = set()
484 484 if len(changesets) % 100 == 0:
485 485 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
486 486 ui.status(util.ellipsis(t, 80) + '\n')
487 487
488 488 c.entries.append(e)
489 489 files.add(e.file)
490 490 c.date = e.date # changeset date is date of latest commit in it
491 491
492 492 # Mark synthetic changesets
493 493
494 494 for c in changesets:
495 495 # Synthetic revisions always get their own changeset, because
496 496 # the log message includes the filename. E.g. if you add file3
497 497 # and file4 on a branch, you get four log entries and three
498 498 # changesets:
499 499 # "File file3 was added on branch ..." (synthetic, 1 entry)
500 500 # "File file4 was added on branch ..." (synthetic, 1 entry)
501 501 # "Add file3 and file4 to fix ..." (real, 2 entries)
502 502 # Hence the check for 1 entry here.
503 503 synth = getattr(c.entries[0], 'synthetic', None)
504 504 c.synthetic = (len(c.entries) == 1 and synth)
505 505
506 506 # Sort files in each changeset
507 507
508 508 for c in changesets:
509 509 def pathcompare(l, r):
510 510 'Mimic cvsps sorting order'
511 511 l = l.split('/')
512 512 r = r.split('/')
513 513 nl = len(l)
514 514 nr = len(r)
515 515 n = min(nl, nr)
516 516 for i in range(n):
517 517 if i + 1 == nl and nl < nr:
518 518 return -1
519 519 elif i + 1 == nr and nl > nr:
520 520 return +1
521 521 elif l[i] < r[i]:
522 522 return -1
523 523 elif l[i] > r[i]:
524 524 return +1
525 525 return 0
526 526 def entitycompare(l, r):
527 527 return pathcompare(l.file, r.file)
528 528
529 529 c.entries.sort(entitycompare)
530 530
531 531 # Sort changesets by date
532 532
533 533 def cscmp(l, r):
534 534 d = sum(l.date) - sum(r.date)
535 535 if d:
536 536 return d
537 537
538 538 # detect vendor branches and initial commits on a branch
539 539 le = {}
540 540 for e in l.entries:
541 541 le[e.rcs] = e.revision
542 542 re = {}
543 543 for e in r.entries:
544 544 re[e.rcs] = e.revision
545 545
546 546 d = 0
547 547 for e in l.entries:
548 548 if re.get(e.rcs, None) == e.parent:
549 549 assert not d
550 550 d = 1
551 551 break
552 552
553 553 for e in r.entries:
554 554 if le.get(e.rcs, None) == e.parent:
555 555 assert not d
556 556 d = -1
557 557 break
558 558
559 559 return d
560 560
561 561 changesets.sort(cscmp)
562 562
563 563 # Collect tags
564 564
565 565 globaltags = {}
566 566 for c in changesets:
567 567 for e in c.entries:
568 568 for tag in e.tags:
569 569 # remember which is the latest changeset to have this tag
570 570 globaltags[tag] = c
571 571
572 572 for c in changesets:
573 573 tags = set()
574 574 for e in c.entries:
575 for tag in e.tags:
576 tags.add(tag)
575 tags.update(e.tags)
577 576 # remember tags only if this is the latest changeset to have it
578 577 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
579 578
580 579 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
581 580 # by inserting dummy changesets with two parents, and handle
582 581 # {{mergefrombranch BRANCHNAME}} by setting two parents.
583 582
584 583 if mergeto is None:
585 584 mergeto = r'{{mergetobranch ([-\w]+)}}'
586 585 if mergeto:
587 586 mergeto = re.compile(mergeto)
588 587
589 588 if mergefrom is None:
590 589 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
591 590 if mergefrom:
592 591 mergefrom = re.compile(mergefrom)
593 592
594 593 versions = {} # changeset index where we saw any particular file version
595 594 branches = {} # changeset index where we saw a branch
596 595 n = len(changesets)
597 596 i = 0
598 597 while i<n:
599 598 c = changesets[i]
600 599
601 600 for f in c.entries:
602 601 versions[(f.rcs, f.revision)] = i
603 602
604 603 p = None
605 604 if c.branch in branches:
606 605 p = branches[c.branch]
607 606 else:
608 607 for f in c.entries:
609 608 p = max(p, versions.get((f.rcs, f.parent), None))
610 609
611 610 c.parents = []
612 611 if p is not None:
613 612 p = changesets[p]
614 613
615 614 # Ensure no changeset has a synthetic changeset as a parent.
616 615 while p.synthetic:
617 616 assert len(p.parents) <= 1, \
618 617 _('synthetic changeset cannot have multiple parents')
619 618 if p.parents:
620 619 p = p.parents[0]
621 620 else:
622 621 p = None
623 622 break
624 623
625 624 if p is not None:
626 625 c.parents.append(p)
627 626
628 627 if c.mergepoint:
629 628 if c.mergepoint == 'HEAD':
630 629 c.mergepoint = None
631 630 c.parents.append(changesets[branches[c.mergepoint]])
632 631
633 632 if mergefrom:
634 633 m = mergefrom.search(c.comment)
635 634 if m:
636 635 m = m.group(1)
637 636 if m == 'HEAD':
638 637 m = None
639 638 try:
640 639 candidate = changesets[branches[m]]
641 640 except KeyError:
642 641 ui.warn(_("warning: CVS commit message references "
643 642 "non-existent branch %r:\n%s\n")
644 643 % (m, c.comment))
645 644 if m in branches and c.branch != m and not candidate.synthetic:
646 645 c.parents.append(candidate)
647 646
648 647 if mergeto:
649 648 m = mergeto.search(c.comment)
650 649 if m:
651 650 try:
652 651 m = m.group(1)
653 652 if m == 'HEAD':
654 653 m = None
655 654 except:
656 655 m = None # if no group found then merge to HEAD
657 656 if m in branches and c.branch != m:
658 657 # insert empty changeset for merge
659 658 cc = changeset(author=c.author, branch=m, date=c.date,
660 659 comment='convert-repo: CVS merge from branch %s' % c.branch,
661 660 entries=[], tags=[], parents=[changesets[branches[m]], c])
662 661 changesets.insert(i + 1, cc)
663 662 branches[m] = i + 1
664 663
665 664 # adjust our loop counters now we have inserted a new entry
666 665 n += 1
667 666 i += 2
668 667 continue
669 668
670 669 branches[c.branch] = i
671 670 i += 1
672 671
673 672 # Drop synthetic changesets (safe now that we have ensured no other
674 673 # changesets can have them as parents).
675 674 i = 0
676 675 while i < len(changesets):
677 676 if changesets[i].synthetic:
678 677 del changesets[i]
679 678 else:
680 679 i += 1
681 680
682 681 # Number changesets
683 682
684 683 for i, c in enumerate(changesets):
685 684 c.id = i + 1
686 685
687 686 ui.status(_('%d changeset entries\n') % len(changesets))
688 687
689 688 return changesets
690 689
691 690
692 691 def debugcvsps(ui, *args, **opts):
693 692 '''Read CVS rlog for current directory or named path in repository, and
694 693 convert the log to changesets based on matching commit log entries and dates.'''
695 694
696 695 if opts["new_cache"]:
697 696 cache = "write"
698 697 elif opts["update_cache"]:
699 698 cache = "update"
700 699 else:
701 700 cache = None
702 701
703 702 revisions = opts["revisions"]
704 703
705 704 try:
706 705 if args:
707 706 log = []
708 707 for d in args:
709 708 log += createlog(ui, d, root=opts["root"], cache=cache)
710 709 else:
711 710 log = createlog(ui, root=opts["root"], cache=cache)
712 711 except logerror, e:
713 712 ui.write("%r\n"%e)
714 713 return
715 714
716 715 changesets = createchangeset(ui, log, opts["fuzz"])
717 716 del log
718 717
719 718 # Print changesets (optionally filtered)
720 719
721 720 off = len(revisions)
722 721 branches = {} # latest version number in each branch
723 722 ancestors = {} # parent branch
724 723 for cs in changesets:
725 724
726 725 if opts["ancestors"]:
727 726 if cs.branch not in branches and cs.parents and cs.parents[0].id:
728 727 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
729 728 branches[cs.branch] = cs.id
730 729
731 730 # limit by branches
732 731 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
733 732 continue
734 733
735 734 if not off:
736 735 # Note: trailing spaces on several lines here are needed to have
737 736 # bug-for-bug compatibility with cvsps.
738 737 ui.write('---------------------\n')
739 738 ui.write('PatchSet %d \n' % cs.id)
740 739 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
741 740 ui.write('Author: %s\n' % cs.author)
742 741 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
743 742 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
744 743 ','.join(cs.tags) or '(none)'))
745 744 if opts["parents"] and cs.parents:
746 745 if len(cs.parents)>1:
747 746 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
748 747 else:
749 748 ui.write('Parent: %d\n' % cs.parents[0].id)
750 749
751 750 if opts["ancestors"]:
752 751 b = cs.branch
753 752 r = []
754 753 while b:
755 754 b, c = ancestors[b]
756 755 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
757 756 if r:
758 757 ui.write('Ancestors: %s\n' % (','.join(r)))
759 758
760 759 ui.write('Log:\n')
761 760 ui.write('%s\n\n' % cs.comment)
762 761 ui.write('Members: \n')
763 762 for f in cs.entries:
764 763 fn = f.file
765 764 if fn.startswith(opts["prefix"]):
766 765 fn = fn[len(opts["prefix"]):]
767 766 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
768 767 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
769 768 ui.write('\n')
770 769
771 770 # have we seen the start tag?
772 771 if revisions and off:
773 772 if revisions[0] == str(cs.id) or \
774 773 revisions[0] in cs.tags:
775 774 off = False
776 775
777 776 # see if we reached the end tag
778 777 if len(revisions)>1 and not off:
779 778 if revisions[1] == str(cs.id) or \
780 779 revisions[1] in cs.tags:
781 780 break
General Comments 0
You need to be logged in to leave comments. Login now