##// END OF EJS Templates
convert: cvsps use absolute_import
timeless -
r28369:71176606 default
parent child Browse files
Show More
@@ -1,914 +1,918 b''
1 1 # Mercurial built-in replacement for cvsps.
2 2 #
3 3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 8
9 import cPickle as pickle
8 10 import os
9 11 import re
10 import cPickle as pickle
12
13 from mercurial import (
14 hook,
15 util,
16 )
11 17 from mercurial.i18n import _
12 from mercurial import hook
13 from mercurial import util
14 18
15 19 class logentry(object):
16 20 '''Class logentry has the following attributes:
17 21 .author - author name as CVS knows it
18 22 .branch - name of branch this revision is on
19 23 .branches - revision tuple of branches starting at this revision
20 24 .comment - commit message
21 25 .commitid - CVS commitid or None
22 26 .date - the commit date as a (time, tz) tuple
23 27 .dead - true if file revision is dead
24 28 .file - Name of file
25 29 .lines - a tuple (+lines, -lines) or None
26 30 .parent - Previous revision of this entry
27 31 .rcs - name of file as returned from CVS
28 32 .revision - revision number as tuple
29 33 .tags - list of tags on the file
30 34 .synthetic - is this a synthetic "file ... added on ..." revision?
31 35 .mergepoint - the branch that has been merged from (if present in
32 36 rlog output) or None
33 37 .branchpoints - the branches that start at the current entry or empty
34 38 '''
35 39 def __init__(self, **entries):
36 40 self.synthetic = False
37 41 self.__dict__.update(entries)
38 42
39 43 def __repr__(self):
40 44 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
41 45 return "%s(%s)"%(type(self).__name__, ", ".join(items))
42 46
43 47 class logerror(Exception):
44 48 pass
45 49
46 50 def getrepopath(cvspath):
47 51 """Return the repository path from a CVS path.
48 52
49 53 >>> getrepopath('/foo/bar')
50 54 '/foo/bar'
51 55 >>> getrepopath('c:/foo/bar')
52 56 '/foo/bar'
53 57 >>> getrepopath(':pserver:10/foo/bar')
54 58 '/foo/bar'
55 59 >>> getrepopath(':pserver:10c:/foo/bar')
56 60 '/foo/bar'
57 61 >>> getrepopath(':pserver:/foo/bar')
58 62 '/foo/bar'
59 63 >>> getrepopath(':pserver:c:/foo/bar')
60 64 '/foo/bar'
61 65 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
62 66 '/foo/bar'
63 67 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
64 68 '/foo/bar'
65 69 >>> getrepopath('user@server/path/to/repository')
66 70 '/path/to/repository'
67 71 """
68 72 # According to CVS manual, CVS paths are expressed like:
69 73 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
70 74 #
71 75 # CVSpath is splitted into parts and then position of the first occurrence
72 76 # of the '/' char after the '@' is located. The solution is the rest of the
73 77 # string after that '/' sign including it
74 78
75 79 parts = cvspath.split(':')
76 80 atposition = parts[-1].find('@')
77 81 start = 0
78 82
79 83 if atposition != -1:
80 84 start = atposition
81 85
82 86 repopath = parts[-1][parts[-1].find('/', start):]
83 87 return repopath
84 88
85 89 def createlog(ui, directory=None, root="", rlog=True, cache=None):
86 90 '''Collect the CVS rlog'''
87 91
88 92 # Because we store many duplicate commit log messages, reusing strings
89 93 # saves a lot of memory and pickle storage space.
90 94 _scache = {}
91 95 def scache(s):
92 96 "return a shared version of a string"
93 97 return _scache.setdefault(s, s)
94 98
95 99 ui.status(_('collecting CVS rlog\n'))
96 100
97 101 log = [] # list of logentry objects containing the CVS state
98 102
99 103 # patterns to match in CVS (r)log output, by state of use
100 104 re_00 = re.compile('RCS file: (.+)$')
101 105 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
102 106 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
103 107 re_03 = re.compile("(Cannot access.+CVSROOT)|"
104 108 "(can't create temporary directory.+)$")
105 109 re_10 = re.compile('Working file: (.+)$')
106 110 re_20 = re.compile('symbolic names:')
107 111 re_30 = re.compile('\t(.+): ([\\d.]+)$')
108 112 re_31 = re.compile('----------------------------$')
109 113 re_32 = re.compile('======================================='
110 114 '======================================$')
111 115 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
112 116 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
113 117 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
114 118 r'(\s+commitid:\s+([^;]+);)?'
115 119 r'(.*mergepoint:\s+([^;]+);)?')
116 120 re_70 = re.compile('branches: (.+);$')
117 121
118 122 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119 123
120 124 prefix = '' # leading path to strip of what we get from CVS
121 125
122 126 if directory is None:
123 127 # Current working directory
124 128
125 129 # Get the real directory in the repository
126 130 try:
127 131 prefix = open(os.path.join('CVS','Repository')).read().strip()
128 132 directory = prefix
129 133 if prefix == ".":
130 134 prefix = ""
131 135 except IOError:
132 136 raise logerror(_('not a CVS sandbox'))
133 137
134 138 if prefix and not prefix.endswith(os.sep):
135 139 prefix += os.sep
136 140
137 141 # Use the Root file in the sandbox, if it exists
138 142 try:
139 143 root = open(os.path.join('CVS','Root')).read().strip()
140 144 except IOError:
141 145 pass
142 146
143 147 if not root:
144 148 root = os.environ.get('CVSROOT', '')
145 149
146 150 # read log cache if one exists
147 151 oldlog = []
148 152 date = None
149 153
150 154 if cache:
151 155 cachedir = os.path.expanduser('~/.hg.cvsps')
152 156 if not os.path.exists(cachedir):
153 157 os.mkdir(cachedir)
154 158
155 159 # The cvsps cache pickle needs a uniquified name, based on the
156 160 # repository location. The address may have all sort of nasties
157 161 # in it, slashes, colons and such. So here we take just the
158 162 # alphanumeric characters, concatenated in a way that does not
159 163 # mix up the various components, so that
160 164 # :pserver:user@server:/path
161 165 # and
162 166 # /pserver/user/server/path
163 167 # are mapped to different cache file names.
164 168 cachefile = root.split(":") + [directory, "cache"]
165 169 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 170 cachefile = os.path.join(cachedir,
167 171 '.'.join([s for s in cachefile if s]))
168 172
169 173 if cache == 'update':
170 174 try:
171 175 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 176 oldlog = pickle.load(open(cachefile))
173 177 for e in oldlog:
174 178 if not (util.safehasattr(e, 'branchpoints') and
175 179 util.safehasattr(e, 'commitid') and
176 180 util.safehasattr(e, 'mergepoint')):
177 181 ui.status(_('ignoring old cache\n'))
178 182 oldlog = []
179 183 break
180 184
181 185 ui.note(_('cache has %d log entries\n') % len(oldlog))
182 186 except Exception as e:
183 187 ui.note(_('error reading cache: %r\n') % e)
184 188
185 189 if oldlog:
186 190 date = oldlog[-1].date # last commit date as a (time,tz) tuple
187 191 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
188 192
189 193 # build the CVS commandline
190 194 cmd = ['cvs', '-q']
191 195 if root:
192 196 cmd.append('-d%s' % root)
193 197 p = util.normpath(getrepopath(root))
194 198 if not p.endswith('/'):
195 199 p += '/'
196 200 if prefix:
197 201 # looks like normpath replaces "" by "."
198 202 prefix = p + util.normpath(prefix)
199 203 else:
200 204 prefix = p
201 205 cmd.append(['log', 'rlog'][rlog])
202 206 if date:
203 207 # no space between option and date string
204 208 cmd.append('-d>%s' % date)
205 209 cmd.append(directory)
206 210
207 211 # state machine begins here
208 212 tags = {} # dictionary of revisions on current file with their tags
209 213 branchmap = {} # mapping between branch names and revision numbers
210 214 rcsmap = {}
211 215 state = 0
212 216 store = False # set when a new record can be appended
213 217
214 218 cmd = [util.shellquote(arg) for arg in cmd]
215 219 ui.note(_("running %s\n") % (' '.join(cmd)))
216 220 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
217 221
218 222 pfp = util.popen(' '.join(cmd))
219 223 peek = pfp.readline()
220 224 while True:
221 225 line = peek
222 226 if line == '':
223 227 break
224 228 peek = pfp.readline()
225 229 if line.endswith('\n'):
226 230 line = line[:-1]
227 231 #ui.debug('state=%d line=%r\n' % (state, line))
228 232
229 233 if state == 0:
230 234 # initial state, consume input until we see 'RCS file'
231 235 match = re_00.match(line)
232 236 if match:
233 237 rcs = match.group(1)
234 238 tags = {}
235 239 if rlog:
236 240 filename = util.normpath(rcs[:-2])
237 241 if filename.startswith(prefix):
238 242 filename = filename[len(prefix):]
239 243 if filename.startswith('/'):
240 244 filename = filename[1:]
241 245 if filename.startswith('Attic/'):
242 246 filename = filename[6:]
243 247 else:
244 248 filename = filename.replace('/Attic/', '/')
245 249 state = 2
246 250 continue
247 251 state = 1
248 252 continue
249 253 match = re_01.match(line)
250 254 if match:
251 255 raise logerror(match.group(1))
252 256 match = re_02.match(line)
253 257 if match:
254 258 raise logerror(match.group(2))
255 259 if re_03.match(line):
256 260 raise logerror(line)
257 261
258 262 elif state == 1:
259 263 # expect 'Working file' (only when using log instead of rlog)
260 264 match = re_10.match(line)
261 265 assert match, _('RCS file must be followed by working file')
262 266 filename = util.normpath(match.group(1))
263 267 state = 2
264 268
265 269 elif state == 2:
266 270 # expect 'symbolic names'
267 271 if re_20.match(line):
268 272 branchmap = {}
269 273 state = 3
270 274
271 275 elif state == 3:
272 276 # read the symbolic names and store as tags
273 277 match = re_30.match(line)
274 278 if match:
275 279 rev = [int(x) for x in match.group(2).split('.')]
276 280
277 281 # Convert magic branch number to an odd-numbered one
278 282 revn = len(rev)
279 283 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
280 284 rev = rev[:-2] + rev[-1:]
281 285 rev = tuple(rev)
282 286
283 287 if rev not in tags:
284 288 tags[rev] = []
285 289 tags[rev].append(match.group(1))
286 290 branchmap[match.group(1)] = match.group(2)
287 291
288 292 elif re_31.match(line):
289 293 state = 5
290 294 elif re_32.match(line):
291 295 state = 0
292 296
293 297 elif state == 4:
294 298 # expecting '------' separator before first revision
295 299 if re_31.match(line):
296 300 state = 5
297 301 else:
298 302 assert not re_32.match(line), _('must have at least '
299 303 'some revisions')
300 304
301 305 elif state == 5:
302 306 # expecting revision number and possibly (ignored) lock indication
303 307 # we create the logentry here from values stored in states 0 to 4,
304 308 # as this state is re-entered for subsequent revisions of a file.
305 309 match = re_50.match(line)
306 310 assert match, _('expected revision number')
307 311 e = logentry(rcs=scache(rcs),
308 312 file=scache(filename),
309 313 revision=tuple([int(x) for x in
310 314 match.group(1).split('.')]),
311 315 branches=[],
312 316 parent=None,
313 317 commitid=None,
314 318 mergepoint=None,
315 319 branchpoints=set())
316 320
317 321 state = 6
318 322
319 323 elif state == 6:
320 324 # expecting date, author, state, lines changed
321 325 match = re_60.match(line)
322 326 assert match, _('revision must be followed by date line')
323 327 d = match.group(1)
324 328 if d[2] == '/':
325 329 # Y2K
326 330 d = '19' + d
327 331
328 332 if len(d.split()) != 3:
329 333 # cvs log dates always in GMT
330 334 d = d + ' UTC'
331 335 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
332 336 '%Y/%m/%d %H:%M:%S',
333 337 '%Y-%m-%d %H:%M:%S'])
334 338 e.author = scache(match.group(2))
335 339 e.dead = match.group(3).lower() == 'dead'
336 340
337 341 if match.group(5):
338 342 if match.group(6):
339 343 e.lines = (int(match.group(5)), int(match.group(6)))
340 344 else:
341 345 e.lines = (int(match.group(5)), 0)
342 346 elif match.group(6):
343 347 e.lines = (0, int(match.group(6)))
344 348 else:
345 349 e.lines = None
346 350
347 351 if match.group(7): # cvs 1.12 commitid
348 352 e.commitid = match.group(8)
349 353
350 354 if match.group(9): # cvsnt mergepoint
351 355 myrev = match.group(10).split('.')
352 356 if len(myrev) == 2: # head
353 357 e.mergepoint = 'HEAD'
354 358 else:
355 359 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
356 360 branches = [b for b in branchmap if branchmap[b] == myrev]
357 361 assert len(branches) == 1, ('unknown branch: %s'
358 362 % e.mergepoint)
359 363 e.mergepoint = branches[0]
360 364
361 365 e.comment = []
362 366 state = 7
363 367
364 368 elif state == 7:
365 369 # read the revision numbers of branches that start at this revision
366 370 # or store the commit log message otherwise
367 371 m = re_70.match(line)
368 372 if m:
369 373 e.branches = [tuple([int(y) for y in x.strip().split('.')])
370 374 for x in m.group(1).split(';')]
371 375 state = 8
372 376 elif re_31.match(line) and re_50.match(peek):
373 377 state = 5
374 378 store = True
375 379 elif re_32.match(line):
376 380 state = 0
377 381 store = True
378 382 else:
379 383 e.comment.append(line)
380 384
381 385 elif state == 8:
382 386 # store commit log message
383 387 if re_31.match(line):
384 388 cpeek = peek
385 389 if cpeek.endswith('\n'):
386 390 cpeek = cpeek[:-1]
387 391 if re_50.match(cpeek):
388 392 state = 5
389 393 store = True
390 394 else:
391 395 e.comment.append(line)
392 396 elif re_32.match(line):
393 397 state = 0
394 398 store = True
395 399 else:
396 400 e.comment.append(line)
397 401
398 402 # When a file is added on a branch B1, CVS creates a synthetic
399 403 # dead trunk revision 1.1 so that the branch has a root.
400 404 # Likewise, if you merge such a file to a later branch B2 (one
401 405 # that already existed when the file was added on B1), CVS
402 406 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
403 407 # these revisions now, but mark them synthetic so
404 408 # createchangeset() can take care of them.
405 409 if (store and
406 410 e.dead and
407 411 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
408 412 len(e.comment) == 1 and
409 413 file_added_re.match(e.comment[0])):
410 414 ui.debug('found synthetic revision in %s: %r\n'
411 415 % (e.rcs, e.comment[0]))
412 416 e.synthetic = True
413 417
414 418 if store:
415 419 # clean up the results and save in the log.
416 420 store = False
417 421 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
418 422 e.comment = scache('\n'.join(e.comment))
419 423
420 424 revn = len(e.revision)
421 425 if revn > 3 and (revn % 2) == 0:
422 426 e.branch = tags.get(e.revision[:-1], [None])[0]
423 427 else:
424 428 e.branch = None
425 429
426 430 # find the branches starting from this revision
427 431 branchpoints = set()
428 432 for branch, revision in branchmap.iteritems():
429 433 revparts = tuple([int(i) for i in revision.split('.')])
430 434 if len(revparts) < 2: # bad tags
431 435 continue
432 436 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
433 437 # normal branch
434 438 if revparts[:-2] == e.revision:
435 439 branchpoints.add(branch)
436 440 elif revparts == (1, 1, 1): # vendor branch
437 441 if revparts in e.branches:
438 442 branchpoints.add(branch)
439 443 e.branchpoints = branchpoints
440 444
441 445 log.append(e)
442 446
443 447 rcsmap[e.rcs.replace('/Attic/', '/')] = e.rcs
444 448
445 449 if len(log) % 100 == 0:
446 450 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
447 451
448 452 log.sort(key=lambda x: (x.rcs, x.revision))
449 453
450 454 # find parent revisions of individual files
451 455 versions = {}
452 456 for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)):
453 457 rcs = e.rcs.replace('/Attic/', '/')
454 458 if rcs in rcsmap:
455 459 e.rcs = rcsmap[rcs]
456 460 branch = e.revision[:-1]
457 461 versions[(e.rcs, branch)] = e.revision
458 462
459 463 for e in log:
460 464 branch = e.revision[:-1]
461 465 p = versions.get((e.rcs, branch), None)
462 466 if p is None:
463 467 p = e.revision[:-2]
464 468 e.parent = p
465 469 versions[(e.rcs, branch)] = e.revision
466 470
467 471 # update the log cache
468 472 if cache:
469 473 if log:
470 474 # join up the old and new logs
471 475 log.sort(key=lambda x: x.date)
472 476
473 477 if oldlog and oldlog[-1].date >= log[0].date:
474 478 raise logerror(_('log cache overlaps with new log entries,'
475 479 ' re-run without cache.'))
476 480
477 481 log = oldlog + log
478 482
479 483 # write the new cachefile
480 484 ui.note(_('writing cvs log cache %s\n') % cachefile)
481 485 pickle.dump(log, open(cachefile, 'w'))
482 486 else:
483 487 log = oldlog
484 488
485 489 ui.status(_('%d log entries\n') % len(log))
486 490
487 491 hook.hook(ui, None, "cvslog", True, log=log)
488 492
489 493 return log
490 494
491 495
492 496 class changeset(object):
493 497 '''Class changeset has the following attributes:
494 498 .id - integer identifying this changeset (list index)
495 499 .author - author name as CVS knows it
496 500 .branch - name of branch this changeset is on, or None
497 501 .comment - commit message
498 502 .commitid - CVS commitid or None
499 503 .date - the commit date as a (time,tz) tuple
500 504 .entries - list of logentry objects in this changeset
501 505 .parents - list of one or two parent changesets
502 506 .tags - list of tags on this changeset
503 507 .synthetic - from synthetic revision "file ... added on branch ..."
504 508 .mergepoint- the branch that has been merged from or None
505 509 .branchpoints- the branches that start at the current entry or empty
506 510 '''
507 511 def __init__(self, **entries):
508 512 self.id = None
509 513 self.synthetic = False
510 514 self.__dict__.update(entries)
511 515
512 516 def __repr__(self):
513 517 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
514 518 return "%s(%s)"%(type(self).__name__, ", ".join(items))
515 519
516 520 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
517 521 '''Convert log into changesets.'''
518 522
519 523 ui.status(_('creating changesets\n'))
520 524
521 525 # try to order commitids by date
522 526 mindate = {}
523 527 for e in log:
524 528 if e.commitid:
525 529 mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
526 530
527 531 # Merge changesets
528 532 log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
529 533 x.author, x.branch, x.date, x.branchpoints))
530 534
531 535 changesets = []
532 536 files = set()
533 537 c = None
534 538 for i, e in enumerate(log):
535 539
536 540 # Check if log entry belongs to the current changeset or not.
537 541
538 542 # Since CVS is file-centric, two different file revisions with
539 543 # different branchpoints should be treated as belonging to two
540 544 # different changesets (and the ordering is important and not
541 545 # honoured by cvsps at this point).
542 546 #
543 547 # Consider the following case:
544 548 # foo 1.1 branchpoints: [MYBRANCH]
545 549 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
546 550 #
547 551 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
548 552 # later version of foo may be in MYBRANCH2, so foo should be the
549 553 # first changeset and bar the next and MYBRANCH and MYBRANCH2
550 554 # should both start off of the bar changeset. No provisions are
551 555 # made to ensure that this is, in fact, what happens.
552 556 if not (c and e.branchpoints == c.branchpoints and
553 557 (# cvs commitids
554 558 (e.commitid is not None and e.commitid == c.commitid) or
555 559 (# no commitids, use fuzzy commit detection
556 560 (e.commitid is None or c.commitid is None) and
557 561 e.comment == c.comment and
558 562 e.author == c.author and
559 563 e.branch == c.branch and
560 564 ((c.date[0] + c.date[1]) <=
561 565 (e.date[0] + e.date[1]) <=
562 566 (c.date[0] + c.date[1]) + fuzz) and
563 567 e.file not in files))):
564 568 c = changeset(comment=e.comment, author=e.author,
565 569 branch=e.branch, date=e.date,
566 570 entries=[], mergepoint=e.mergepoint,
567 571 branchpoints=e.branchpoints, commitid=e.commitid)
568 572 changesets.append(c)
569 573
570 574 files = set()
571 575 if len(changesets) % 100 == 0:
572 576 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
573 577 ui.status(util.ellipsis(t, 80) + '\n')
574 578
575 579 c.entries.append(e)
576 580 files.add(e.file)
577 581 c.date = e.date # changeset date is date of latest commit in it
578 582
579 583 # Mark synthetic changesets
580 584
581 585 for c in changesets:
582 586 # Synthetic revisions always get their own changeset, because
583 587 # the log message includes the filename. E.g. if you add file3
584 588 # and file4 on a branch, you get four log entries and three
585 589 # changesets:
586 590 # "File file3 was added on branch ..." (synthetic, 1 entry)
587 591 # "File file4 was added on branch ..." (synthetic, 1 entry)
588 592 # "Add file3 and file4 to fix ..." (real, 2 entries)
589 593 # Hence the check for 1 entry here.
590 594 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
591 595
592 596 # Sort files in each changeset
593 597
594 598 def entitycompare(l, r):
595 599 'Mimic cvsps sorting order'
596 600 l = l.file.split('/')
597 601 r = r.file.split('/')
598 602 nl = len(l)
599 603 nr = len(r)
600 604 n = min(nl, nr)
601 605 for i in range(n):
602 606 if i + 1 == nl and nl < nr:
603 607 return -1
604 608 elif i + 1 == nr and nl > nr:
605 609 return +1
606 610 elif l[i] < r[i]:
607 611 return -1
608 612 elif l[i] > r[i]:
609 613 return +1
610 614 return 0
611 615
612 616 for c in changesets:
613 617 c.entries.sort(entitycompare)
614 618
615 619 # Sort changesets by date
616 620
617 621 odd = set()
618 622 def cscmp(l, r, odd=odd):
619 623 d = sum(l.date) - sum(r.date)
620 624 if d:
621 625 return d
622 626
623 627 # detect vendor branches and initial commits on a branch
624 628 le = {}
625 629 for e in l.entries:
626 630 le[e.rcs] = e.revision
627 631 re = {}
628 632 for e in r.entries:
629 633 re[e.rcs] = e.revision
630 634
631 635 d = 0
632 636 for e in l.entries:
633 637 if re.get(e.rcs, None) == e.parent:
634 638 assert not d
635 639 d = 1
636 640 break
637 641
638 642 for e in r.entries:
639 643 if le.get(e.rcs, None) == e.parent:
640 644 if d:
641 645 odd.add((l, r))
642 646 d = -1
643 647 break
644 648 # By this point, the changesets are sufficiently compared that
645 649 # we don't really care about ordering. However, this leaves
646 650 # some race conditions in the tests, so we compare on the
647 651 # number of files modified, the files contained in each
648 652 # changeset, and the branchpoints in the change to ensure test
649 653 # output remains stable.
650 654
651 655 # recommended replacement for cmp from
652 656 # https://docs.python.org/3.0/whatsnew/3.0.html
653 657 c = lambda x, y: (x > y) - (x < y)
654 658 # Sort bigger changes first.
655 659 if not d:
656 660 d = c(len(l.entries), len(r.entries))
657 661 # Try sorting by filename in the change.
658 662 if not d:
659 663 d = c([e.file for e in l.entries], [e.file for e in r.entries])
660 664 # Try and put changes without a branch point before ones with
661 665 # a branch point.
662 666 if not d:
663 667 d = c(len(l.branchpoints), len(r.branchpoints))
664 668 return d
665 669
666 670 changesets.sort(cscmp)
667 671
668 672 # Collect tags
669 673
670 674 globaltags = {}
671 675 for c in changesets:
672 676 for e in c.entries:
673 677 for tag in e.tags:
674 678 # remember which is the latest changeset to have this tag
675 679 globaltags[tag] = c
676 680
677 681 for c in changesets:
678 682 tags = set()
679 683 for e in c.entries:
680 684 tags.update(e.tags)
681 685 # remember tags only if this is the latest changeset to have it
682 686 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
683 687
684 688 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
685 689 # by inserting dummy changesets with two parents, and handle
686 690 # {{mergefrombranch BRANCHNAME}} by setting two parents.
687 691
688 692 if mergeto is None:
689 693 mergeto = r'{{mergetobranch ([-\w]+)}}'
690 694 if mergeto:
691 695 mergeto = re.compile(mergeto)
692 696
693 697 if mergefrom is None:
694 698 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
695 699 if mergefrom:
696 700 mergefrom = re.compile(mergefrom)
697 701
698 702 versions = {} # changeset index where we saw any particular file version
699 703 branches = {} # changeset index where we saw a branch
700 704 n = len(changesets)
701 705 i = 0
702 706 while i < n:
703 707 c = changesets[i]
704 708
705 709 for f in c.entries:
706 710 versions[(f.rcs, f.revision)] = i
707 711
708 712 p = None
709 713 if c.branch in branches:
710 714 p = branches[c.branch]
711 715 else:
712 716 # first changeset on a new branch
713 717 # the parent is a changeset with the branch in its
714 718 # branchpoints such that it is the latest possible
715 719 # commit without any intervening, unrelated commits.
716 720
717 721 for candidate in xrange(i):
718 722 if c.branch not in changesets[candidate].branchpoints:
719 723 if p is not None:
720 724 break
721 725 continue
722 726 p = candidate
723 727
724 728 c.parents = []
725 729 if p is not None:
726 730 p = changesets[p]
727 731
728 732 # Ensure no changeset has a synthetic changeset as a parent.
729 733 while p.synthetic:
730 734 assert len(p.parents) <= 1, \
731 735 _('synthetic changeset cannot have multiple parents')
732 736 if p.parents:
733 737 p = p.parents[0]
734 738 else:
735 739 p = None
736 740 break
737 741
738 742 if p is not None:
739 743 c.parents.append(p)
740 744
741 745 if c.mergepoint:
742 746 if c.mergepoint == 'HEAD':
743 747 c.mergepoint = None
744 748 c.parents.append(changesets[branches[c.mergepoint]])
745 749
746 750 if mergefrom:
747 751 m = mergefrom.search(c.comment)
748 752 if m:
749 753 m = m.group(1)
750 754 if m == 'HEAD':
751 755 m = None
752 756 try:
753 757 candidate = changesets[branches[m]]
754 758 except KeyError:
755 759 ui.warn(_("warning: CVS commit message references "
756 760 "non-existent branch %r:\n%s\n")
757 761 % (m, c.comment))
758 762 if m in branches and c.branch != m and not candidate.synthetic:
759 763 c.parents.append(candidate)
760 764
761 765 if mergeto:
762 766 m = mergeto.search(c.comment)
763 767 if m:
764 768 if m.groups():
765 769 m = m.group(1)
766 770 if m == 'HEAD':
767 771 m = None
768 772 else:
769 773 m = None # if no group found then merge to HEAD
770 774 if m in branches and c.branch != m:
771 775 # insert empty changeset for merge
772 776 cc = changeset(
773 777 author=c.author, branch=m, date=c.date,
774 778 comment='convert-repo: CVS merge from branch %s'
775 779 % c.branch,
776 780 entries=[], tags=[],
777 781 parents=[changesets[branches[m]], c])
778 782 changesets.insert(i + 1, cc)
779 783 branches[m] = i + 1
780 784
781 785 # adjust our loop counters now we have inserted a new entry
782 786 n += 1
783 787 i += 2
784 788 continue
785 789
786 790 branches[c.branch] = i
787 791 i += 1
788 792
789 793 # Drop synthetic changesets (safe now that we have ensured no other
790 794 # changesets can have them as parents).
791 795 i = 0
792 796 while i < len(changesets):
793 797 if changesets[i].synthetic:
794 798 del changesets[i]
795 799 else:
796 800 i += 1
797 801
798 802 # Number changesets
799 803
800 804 for i, c in enumerate(changesets):
801 805 c.id = i + 1
802 806
803 807 if odd:
804 808 for l, r in odd:
805 809 if l.id is not None and r.id is not None:
806 810 ui.warn(_('changeset %d is both before and after %d\n')
807 811 % (l.id, r.id))
808 812
809 813 ui.status(_('%d changeset entries\n') % len(changesets))
810 814
811 815 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
812 816
813 817 return changesets
814 818
815 819
816 820 def debugcvsps(ui, *args, **opts):
817 821 '''Read CVS rlog for current directory or named path in
818 822 repository, and convert the log to changesets based on matching
819 823 commit log entries and dates.
820 824 '''
821 825 if opts["new_cache"]:
822 826 cache = "write"
823 827 elif opts["update_cache"]:
824 828 cache = "update"
825 829 else:
826 830 cache = None
827 831
828 832 revisions = opts["revisions"]
829 833
830 834 try:
831 835 if args:
832 836 log = []
833 837 for d in args:
834 838 log += createlog(ui, d, root=opts["root"], cache=cache)
835 839 else:
836 840 log = createlog(ui, root=opts["root"], cache=cache)
837 841 except logerror as e:
838 842 ui.write("%r\n"%e)
839 843 return
840 844
841 845 changesets = createchangeset(ui, log, opts["fuzz"])
842 846 del log
843 847
844 848 # Print changesets (optionally filtered)
845 849
846 850 off = len(revisions)
847 851 branches = {} # latest version number in each branch
848 852 ancestors = {} # parent branch
849 853 for cs in changesets:
850 854
851 855 if opts["ancestors"]:
852 856 if cs.branch not in branches and cs.parents and cs.parents[0].id:
853 857 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
854 858 cs.parents[0].id)
855 859 branches[cs.branch] = cs.id
856 860
857 861 # limit by branches
858 862 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
859 863 continue
860 864
861 865 if not off:
862 866 # Note: trailing spaces on several lines here are needed to have
863 867 # bug-for-bug compatibility with cvsps.
864 868 ui.write('---------------------\n')
865 869 ui.write(('PatchSet %d \n' % cs.id))
866 870 ui.write(('Date: %s\n' % util.datestr(cs.date,
867 871 '%Y/%m/%d %H:%M:%S %1%2')))
868 872 ui.write(('Author: %s\n' % cs.author))
869 873 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
870 874 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
871 875 ','.join(cs.tags) or '(none)')))
872 876 if cs.branchpoints:
873 877 ui.write(('Branchpoints: %s \n') %
874 878 ', '.join(sorted(cs.branchpoints)))
875 879 if opts["parents"] and cs.parents:
876 880 if len(cs.parents) > 1:
877 881 ui.write(('Parents: %s\n' %
878 882 (','.join([str(p.id) for p in cs.parents]))))
879 883 else:
880 884 ui.write(('Parent: %d\n' % cs.parents[0].id))
881 885
882 886 if opts["ancestors"]:
883 887 b = cs.branch
884 888 r = []
885 889 while b:
886 890 b, c = ancestors[b]
887 891 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
888 892 if r:
889 893 ui.write(('Ancestors: %s\n' % (','.join(r))))
890 894
891 895 ui.write(('Log:\n'))
892 896 ui.write('%s\n\n' % cs.comment)
893 897 ui.write(('Members: \n'))
894 898 for f in cs.entries:
895 899 fn = f.file
896 900 if fn.startswith(opts["prefix"]):
897 901 fn = fn[len(opts["prefix"]):]
898 902 ui.write('\t%s:%s->%s%s \n' % (
899 903 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
900 904 '.'.join([str(x) for x in f.revision]),
901 905 ['', '(DEAD)'][f.dead]))
902 906 ui.write('\n')
903 907
904 908 # have we seen the start tag?
905 909 if revisions and off:
906 910 if revisions[0] == str(cs.id) or \
907 911 revisions[0] in cs.tags:
908 912 off = False
909 913
910 914 # see if we reached the end tag
911 915 if len(revisions) > 1 and not off:
912 916 if revisions[1] == str(cs.id) or \
913 917 revisions[1] in cs.tags:
914 918 break
@@ -1,154 +1,153 b''
1 1 #require test-repo
2 2
3 3 $ cd "$TESTDIR"/..
4 4
5 5 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
6 6 contrib/check-code.py not using absolute_import
7 7 contrib/check-code.py requires print_function
8 8 contrib/debugshell.py not using absolute_import
9 9 contrib/hgfixes/fix_bytes.py not using absolute_import
10 10 contrib/hgfixes/fix_bytesmod.py not using absolute_import
11 11 contrib/hgfixes/fix_leftover_imports.py not using absolute_import
12 12 contrib/import-checker.py not using absolute_import
13 13 contrib/import-checker.py requires print_function
14 14 contrib/memory.py not using absolute_import
15 15 contrib/perf.py not using absolute_import
16 16 contrib/python-hook-examples.py not using absolute_import
17 17 contrib/revsetbenchmarks.py not using absolute_import
18 18 contrib/revsetbenchmarks.py requires print_function
19 19 contrib/showstack.py not using absolute_import
20 20 contrib/synthrepo.py not using absolute_import
21 21 contrib/win32/hgwebdir_wsgi.py not using absolute_import
22 22 doc/check-seclevel.py not using absolute_import
23 23 doc/gendoc.py not using absolute_import
24 24 doc/hgmanpage.py not using absolute_import
25 25 hgext/__init__.py not using absolute_import
26 26 hgext/color.py not using absolute_import
27 27 hgext/convert/__init__.py not using absolute_import
28 28 hgext/convert/bzr.py not using absolute_import
29 29 hgext/convert/common.py not using absolute_import
30 30 hgext/convert/convcmd.py not using absolute_import
31 31 hgext/convert/cvs.py not using absolute_import
32 hgext/convert/cvsps.py not using absolute_import
33 32 hgext/convert/hg.py not using absolute_import
34 33 hgext/convert/monotone.py not using absolute_import
35 34 hgext/convert/p4.py not using absolute_import
36 35 hgext/convert/subversion.py not using absolute_import
37 36 hgext/convert/transport.py not using absolute_import
38 37 hgext/eol.py not using absolute_import
39 38 hgext/extdiff.py not using absolute_import
40 39 hgext/factotum.py not using absolute_import
41 40 hgext/fetch.py not using absolute_import
42 41 hgext/gpg.py not using absolute_import
43 42 hgext/graphlog.py not using absolute_import
44 43 hgext/hgcia.py not using absolute_import
45 44 hgext/hgk.py not using absolute_import
46 45 hgext/highlight/__init__.py not using absolute_import
47 46 hgext/highlight/highlight.py not using absolute_import
48 47 hgext/histedit.py not using absolute_import
49 48 hgext/largefiles/__init__.py not using absolute_import
50 49 hgext/largefiles/basestore.py not using absolute_import
51 50 hgext/largefiles/lfcommands.py not using absolute_import
52 51 hgext/largefiles/lfutil.py not using absolute_import
53 52 hgext/largefiles/localstore.py not using absolute_import
54 53 hgext/largefiles/overrides.py not using absolute_import
55 54 hgext/largefiles/proto.py not using absolute_import
56 55 hgext/largefiles/remotestore.py not using absolute_import
57 56 hgext/largefiles/reposetup.py not using absolute_import
58 57 hgext/largefiles/uisetup.py not using absolute_import
59 58 hgext/largefiles/wirestore.py not using absolute_import
60 59 hgext/mq.py not using absolute_import
61 60 hgext/notify.py not using absolute_import
62 61 hgext/patchbomb.py not using absolute_import
63 62 hgext/purge.py not using absolute_import
64 63 hgext/rebase.py not using absolute_import
65 64 hgext/record.py not using absolute_import
66 65 hgext/relink.py not using absolute_import
67 66 hgext/schemes.py not using absolute_import
68 67 hgext/share.py not using absolute_import
69 68 hgext/shelve.py not using absolute_import
70 69 hgext/strip.py not using absolute_import
71 70 hgext/transplant.py not using absolute_import
72 71 hgext/win32mbcs.py not using absolute_import
73 72 hgext/win32text.py not using absolute_import
74 73 i18n/check-translation.py not using absolute_import
75 74 i18n/polib.py not using absolute_import
76 75 setup.py not using absolute_import
77 76 tests/filterpyflakes.py requires print_function
78 77 tests/generate-working-copy-states.py requires print_function
79 78 tests/get-with-headers.py requires print_function
80 79 tests/heredoctest.py requires print_function
81 80 tests/hypothesishelpers.py not using absolute_import
82 81 tests/hypothesishelpers.py requires print_function
83 82 tests/killdaemons.py not using absolute_import
84 83 tests/md5sum.py not using absolute_import
85 84 tests/mockblackbox.py not using absolute_import
86 85 tests/printenv.py not using absolute_import
87 86 tests/readlink.py not using absolute_import
88 87 tests/readlink.py requires print_function
89 88 tests/revlog-formatv0.py not using absolute_import
90 89 tests/run-tests.py not using absolute_import
91 90 tests/seq.py not using absolute_import
92 91 tests/seq.py requires print_function
93 92 tests/silenttestrunner.py not using absolute_import
94 93 tests/silenttestrunner.py requires print_function
95 94 tests/sitecustomize.py not using absolute_import
96 95 tests/svn-safe-append.py not using absolute_import
97 96 tests/svnxml.py not using absolute_import
98 97 tests/test-ancestor.py requires print_function
99 98 tests/test-atomictempfile.py not using absolute_import
100 99 tests/test-batching.py not using absolute_import
101 100 tests/test-batching.py requires print_function
102 101 tests/test-bdiff.py not using absolute_import
103 102 tests/test-bdiff.py requires print_function
104 103 tests/test-context.py not using absolute_import
105 104 tests/test-context.py requires print_function
106 105 tests/test-demandimport.py not using absolute_import
107 106 tests/test-demandimport.py requires print_function
108 107 tests/test-dispatch.py not using absolute_import
109 108 tests/test-dispatch.py requires print_function
110 109 tests/test-doctest.py not using absolute_import
111 110 tests/test-duplicateoptions.py not using absolute_import
112 111 tests/test-duplicateoptions.py requires print_function
113 112 tests/test-filecache.py not using absolute_import
114 113 tests/test-filecache.py requires print_function
115 114 tests/test-filelog.py not using absolute_import
116 115 tests/test-filelog.py requires print_function
117 116 tests/test-hg-parseurl.py not using absolute_import
118 117 tests/test-hg-parseurl.py requires print_function
119 118 tests/test-hgweb-auth.py not using absolute_import
120 119 tests/test-hgweb-auth.py requires print_function
121 120 tests/test-hgwebdir-paths.py not using absolute_import
122 121 tests/test-hybridencode.py not using absolute_import
123 122 tests/test-hybridencode.py requires print_function
124 123 tests/test-lrucachedict.py not using absolute_import
125 124 tests/test-lrucachedict.py requires print_function
126 125 tests/test-manifest.py not using absolute_import
127 126 tests/test-minirst.py not using absolute_import
128 127 tests/test-minirst.py requires print_function
129 128 tests/test-parseindex2.py not using absolute_import
130 129 tests/test-parseindex2.py requires print_function
131 130 tests/test-pathencode.py not using absolute_import
132 131 tests/test-pathencode.py requires print_function
133 132 tests/test-propertycache.py not using absolute_import
134 133 tests/test-propertycache.py requires print_function
135 134 tests/test-revlog-ancestry.py not using absolute_import
136 135 tests/test-revlog-ancestry.py requires print_function
137 136 tests/test-run-tests.py not using absolute_import
138 137 tests/test-simplemerge.py not using absolute_import
139 138 tests/test-status-inprocess.py not using absolute_import
140 139 tests/test-status-inprocess.py requires print_function
141 140 tests/test-symlink-os-yes-fs-no.py not using absolute_import
142 141 tests/test-trusted.py not using absolute_import
143 142 tests/test-trusted.py requires print_function
144 143 tests/test-ui-color.py not using absolute_import
145 144 tests/test-ui-color.py requires print_function
146 145 tests/test-ui-config.py not using absolute_import
147 146 tests/test-ui-config.py requires print_function
148 147 tests/test-ui-verbosity.py not using absolute_import
149 148 tests/test-ui-verbosity.py requires print_function
150 149 tests/test-url.py not using absolute_import
151 150 tests/test-url.py requires print_function
152 151 tests/test-walkrepo.py requires print_function
153 152 tests/test-wireproto.py requires print_function
154 153 tests/tinyproxy.py requires print_function
General Comments 0
You need to be logged in to leave comments. Login now