##// END OF EJS Templates
convert: handle changeset sorting errors without traceback (issue3961)
Frank Kingswood -
r19505:7b815e38 stable
parent child Browse files
Show More
@@ -1,877 +1,886 b''
1 # Mercurial built-in replacement for cvsps.
1 # Mercurial built-in replacement for cvsps.
2 #
2 #
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 import re
9 import re
10 import cPickle as pickle
10 import cPickle as pickle
11 from mercurial import util
11 from mercurial import util
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import hook
13 from mercurial import hook
14 from mercurial import util
14 from mercurial import util
15
15
16 class logentry(object):
16 class logentry(object):
17 '''Class logentry has the following attributes:
17 '''Class logentry has the following attributes:
18 .author - author name as CVS knows it
18 .author - author name as CVS knows it
19 .branch - name of branch this revision is on
19 .branch - name of branch this revision is on
20 .branches - revision tuple of branches starting at this revision
20 .branches - revision tuple of branches starting at this revision
21 .comment - commit message
21 .comment - commit message
22 .commitid - CVS commitid or None
22 .commitid - CVS commitid or None
23 .date - the commit date as a (time, tz) tuple
23 .date - the commit date as a (time, tz) tuple
24 .dead - true if file revision is dead
24 .dead - true if file revision is dead
25 .file - Name of file
25 .file - Name of file
26 .lines - a tuple (+lines, -lines) or None
26 .lines - a tuple (+lines, -lines) or None
27 .parent - Previous revision of this entry
27 .parent - Previous revision of this entry
28 .rcs - name of file as returned from CVS
28 .rcs - name of file as returned from CVS
29 .revision - revision number as tuple
29 .revision - revision number as tuple
30 .tags - list of tags on the file
30 .tags - list of tags on the file
31 .synthetic - is this a synthetic "file ... added on ..." revision?
31 .synthetic - is this a synthetic "file ... added on ..." revision?
32 .mergepoint - the branch that has been merged from (if present in
32 .mergepoint - the branch that has been merged from (if present in
33 rlog output) or None
33 rlog output) or None
34 .branchpoints - the branches that start at the current entry or empty
34 .branchpoints - the branches that start at the current entry or empty
35 '''
35 '''
36 def __init__(self, **entries):
36 def __init__(self, **entries):
37 self.synthetic = False
37 self.synthetic = False
38 self.__dict__.update(entries)
38 self.__dict__.update(entries)
39
39
40 def __repr__(self):
40 def __repr__(self):
41 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
41 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
42 return "%s(%s)"%(type(self).__name__, ", ".join(items))
42 return "%s(%s)"%(type(self).__name__, ", ".join(items))
43
43
44 class logerror(Exception):
44 class logerror(Exception):
45 pass
45 pass
46
46
47 def getrepopath(cvspath):
47 def getrepopath(cvspath):
48 """Return the repository path from a CVS path.
48 """Return the repository path from a CVS path.
49
49
50 >>> getrepopath('/foo/bar')
50 >>> getrepopath('/foo/bar')
51 '/foo/bar'
51 '/foo/bar'
52 >>> getrepopath('c:/foo/bar')
52 >>> getrepopath('c:/foo/bar')
53 '/foo/bar'
53 '/foo/bar'
54 >>> getrepopath(':pserver:10/foo/bar')
54 >>> getrepopath(':pserver:10/foo/bar')
55 '/foo/bar'
55 '/foo/bar'
56 >>> getrepopath(':pserver:10c:/foo/bar')
56 >>> getrepopath(':pserver:10c:/foo/bar')
57 '/foo/bar'
57 '/foo/bar'
58 >>> getrepopath(':pserver:/foo/bar')
58 >>> getrepopath(':pserver:/foo/bar')
59 '/foo/bar'
59 '/foo/bar'
60 >>> getrepopath(':pserver:c:/foo/bar')
60 >>> getrepopath(':pserver:c:/foo/bar')
61 '/foo/bar'
61 '/foo/bar'
62 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
62 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
63 '/foo/bar'
63 '/foo/bar'
64 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
64 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
65 '/foo/bar'
65 '/foo/bar'
66 >>> getrepopath('user@server/path/to/repository')
66 >>> getrepopath('user@server/path/to/repository')
67 '/path/to/repository'
67 '/path/to/repository'
68 """
68 """
69 # According to CVS manual, CVS paths are expressed like:
69 # According to CVS manual, CVS paths are expressed like:
70 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
70 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
71 #
71 #
72 # CVSpath is splitted into parts and then position of the first occurrence
72 # CVSpath is splitted into parts and then position of the first occurrence
73 # of the '/' char after the '@' is located. The solution is the rest of the
73 # of the '/' char after the '@' is located. The solution is the rest of the
74 # string after that '/' sign including it
74 # string after that '/' sign including it
75
75
76 parts = cvspath.split(':')
76 parts = cvspath.split(':')
77 atposition = parts[-1].find('@')
77 atposition = parts[-1].find('@')
78 start = 0
78 start = 0
79
79
80 if atposition != -1:
80 if atposition != -1:
81 start = atposition
81 start = atposition
82
82
83 repopath = parts[-1][parts[-1].find('/', start):]
83 repopath = parts[-1][parts[-1].find('/', start):]
84 return repopath
84 return repopath
85
85
86 def createlog(ui, directory=None, root="", rlog=True, cache=None):
86 def createlog(ui, directory=None, root="", rlog=True, cache=None):
87 '''Collect the CVS rlog'''
87 '''Collect the CVS rlog'''
88
88
89 # Because we store many duplicate commit log messages, reusing strings
89 # Because we store many duplicate commit log messages, reusing strings
90 # saves a lot of memory and pickle storage space.
90 # saves a lot of memory and pickle storage space.
91 _scache = {}
91 _scache = {}
92 def scache(s):
92 def scache(s):
93 "return a shared version of a string"
93 "return a shared version of a string"
94 return _scache.setdefault(s, s)
94 return _scache.setdefault(s, s)
95
95
96 ui.status(_('collecting CVS rlog\n'))
96 ui.status(_('collecting CVS rlog\n'))
97
97
98 log = [] # list of logentry objects containing the CVS state
98 log = [] # list of logentry objects containing the CVS state
99
99
100 # patterns to match in CVS (r)log output, by state of use
100 # patterns to match in CVS (r)log output, by state of use
101 re_00 = re.compile('RCS file: (.+)$')
101 re_00 = re.compile('RCS file: (.+)$')
102 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
102 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
103 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
103 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
104 re_03 = re.compile("(Cannot access.+CVSROOT)|"
104 re_03 = re.compile("(Cannot access.+CVSROOT)|"
105 "(can't create temporary directory.+)$")
105 "(can't create temporary directory.+)$")
106 re_10 = re.compile('Working file: (.+)$')
106 re_10 = re.compile('Working file: (.+)$')
107 re_20 = re.compile('symbolic names:')
107 re_20 = re.compile('symbolic names:')
108 re_30 = re.compile('\t(.+): ([\\d.]+)$')
108 re_30 = re.compile('\t(.+): ([\\d.]+)$')
109 re_31 = re.compile('----------------------------$')
109 re_31 = re.compile('----------------------------$')
110 re_32 = re.compile('======================================='
110 re_32 = re.compile('======================================='
111 '======================================$')
111 '======================================$')
112 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
112 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
113 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
113 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
114 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
114 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
115 r'(\s+commitid:\s+([^;]+);)?'
115 r'(\s+commitid:\s+([^;]+);)?'
116 r'(.*mergepoint:\s+([^;]+);)?')
116 r'(.*mergepoint:\s+([^;]+);)?')
117 re_70 = re.compile('branches: (.+);$')
117 re_70 = re.compile('branches: (.+);$')
118
118
119 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
120
120
121 prefix = '' # leading path to strip of what we get from CVS
121 prefix = '' # leading path to strip of what we get from CVS
122
122
123 if directory is None:
123 if directory is None:
124 # Current working directory
124 # Current working directory
125
125
126 # Get the real directory in the repository
126 # Get the real directory in the repository
127 try:
127 try:
128 prefix = open(os.path.join('CVS','Repository')).read().strip()
128 prefix = open(os.path.join('CVS','Repository')).read().strip()
129 directory = prefix
129 directory = prefix
130 if prefix == ".":
130 if prefix == ".":
131 prefix = ""
131 prefix = ""
132 except IOError:
132 except IOError:
133 raise logerror(_('not a CVS sandbox'))
133 raise logerror(_('not a CVS sandbox'))
134
134
135 if prefix and not prefix.endswith(os.sep):
135 if prefix and not prefix.endswith(os.sep):
136 prefix += os.sep
136 prefix += os.sep
137
137
138 # Use the Root file in the sandbox, if it exists
138 # Use the Root file in the sandbox, if it exists
139 try:
139 try:
140 root = open(os.path.join('CVS','Root')).read().strip()
140 root = open(os.path.join('CVS','Root')).read().strip()
141 except IOError:
141 except IOError:
142 pass
142 pass
143
143
144 if not root:
144 if not root:
145 root = os.environ.get('CVSROOT', '')
145 root = os.environ.get('CVSROOT', '')
146
146
147 # read log cache if one exists
147 # read log cache if one exists
148 oldlog = []
148 oldlog = []
149 date = None
149 date = None
150
150
151 if cache:
151 if cache:
152 cachedir = os.path.expanduser('~/.hg.cvsps')
152 cachedir = os.path.expanduser('~/.hg.cvsps')
153 if not os.path.exists(cachedir):
153 if not os.path.exists(cachedir):
154 os.mkdir(cachedir)
154 os.mkdir(cachedir)
155
155
156 # The cvsps cache pickle needs a uniquified name, based on the
156 # The cvsps cache pickle needs a uniquified name, based on the
157 # repository location. The address may have all sort of nasties
157 # repository location. The address may have all sort of nasties
158 # in it, slashes, colons and such. So here we take just the
158 # in it, slashes, colons and such. So here we take just the
159 # alphanumeric characters, concatenated in a way that does not
159 # alphanumeric characters, concatenated in a way that does not
160 # mix up the various components, so that
160 # mix up the various components, so that
161 # :pserver:user@server:/path
161 # :pserver:user@server:/path
162 # and
162 # and
163 # /pserver/user/server/path
163 # /pserver/user/server/path
164 # are mapped to different cache file names.
164 # are mapped to different cache file names.
165 cachefile = root.split(":") + [directory, "cache"]
165 cachefile = root.split(":") + [directory, "cache"]
166 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
167 cachefile = os.path.join(cachedir,
167 cachefile = os.path.join(cachedir,
168 '.'.join([s for s in cachefile if s]))
168 '.'.join([s for s in cachefile if s]))
169
169
170 if cache == 'update':
170 if cache == 'update':
171 try:
171 try:
172 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 ui.note(_('reading cvs log cache %s\n') % cachefile)
173 oldlog = pickle.load(open(cachefile))
173 oldlog = pickle.load(open(cachefile))
174 for e in oldlog:
174 for e in oldlog:
175 if not (util.safehasattr(e, 'branchpoints') and
175 if not (util.safehasattr(e, 'branchpoints') and
176 util.safehasattr(e, 'commitid') and
176 util.safehasattr(e, 'commitid') and
177 util.safehasattr(e, 'mergepoint')):
177 util.safehasattr(e, 'mergepoint')):
178 ui.status(_('ignoring old cache\n'))
178 ui.status(_('ignoring old cache\n'))
179 oldlog = []
179 oldlog = []
180 break
180 break
181
181
182 ui.note(_('cache has %d log entries\n') % len(oldlog))
182 ui.note(_('cache has %d log entries\n') % len(oldlog))
183 except Exception, e:
183 except Exception, e:
184 ui.note(_('error reading cache: %r\n') % e)
184 ui.note(_('error reading cache: %r\n') % e)
185
185
186 if oldlog:
186 if oldlog:
187 date = oldlog[-1].date # last commit date as a (time,tz) tuple
187 date = oldlog[-1].date # last commit date as a (time,tz) tuple
188 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
188 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
189
189
190 # build the CVS commandline
190 # build the CVS commandline
191 cmd = ['cvs', '-q']
191 cmd = ['cvs', '-q']
192 if root:
192 if root:
193 cmd.append('-d%s' % root)
193 cmd.append('-d%s' % root)
194 p = util.normpath(getrepopath(root))
194 p = util.normpath(getrepopath(root))
195 if not p.endswith('/'):
195 if not p.endswith('/'):
196 p += '/'
196 p += '/'
197 if prefix:
197 if prefix:
198 # looks like normpath replaces "" by "."
198 # looks like normpath replaces "" by "."
199 prefix = p + util.normpath(prefix)
199 prefix = p + util.normpath(prefix)
200 else:
200 else:
201 prefix = p
201 prefix = p
202 cmd.append(['log', 'rlog'][rlog])
202 cmd.append(['log', 'rlog'][rlog])
203 if date:
203 if date:
204 # no space between option and date string
204 # no space between option and date string
205 cmd.append('-d>%s' % date)
205 cmd.append('-d>%s' % date)
206 cmd.append(directory)
206 cmd.append(directory)
207
207
208 # state machine begins here
208 # state machine begins here
209 tags = {} # dictionary of revisions on current file with their tags
209 tags = {} # dictionary of revisions on current file with their tags
210 branchmap = {} # mapping between branch names and revision numbers
210 branchmap = {} # mapping between branch names and revision numbers
211 state = 0
211 state = 0
212 store = False # set when a new record can be appended
212 store = False # set when a new record can be appended
213
213
214 cmd = [util.shellquote(arg) for arg in cmd]
214 cmd = [util.shellquote(arg) for arg in cmd]
215 ui.note(_("running %s\n") % (' '.join(cmd)))
215 ui.note(_("running %s\n") % (' '.join(cmd)))
216 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
216 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
217
217
218 pfp = util.popen(' '.join(cmd))
218 pfp = util.popen(' '.join(cmd))
219 peek = pfp.readline()
219 peek = pfp.readline()
220 while True:
220 while True:
221 line = peek
221 line = peek
222 if line == '':
222 if line == '':
223 break
223 break
224 peek = pfp.readline()
224 peek = pfp.readline()
225 if line.endswith('\n'):
225 if line.endswith('\n'):
226 line = line[:-1]
226 line = line[:-1]
227 #ui.debug('state=%d line=%r\n' % (state, line))
227 #ui.debug('state=%d line=%r\n' % (state, line))
228
228
229 if state == 0:
229 if state == 0:
230 # initial state, consume input until we see 'RCS file'
230 # initial state, consume input until we see 'RCS file'
231 match = re_00.match(line)
231 match = re_00.match(line)
232 if match:
232 if match:
233 rcs = match.group(1)
233 rcs = match.group(1)
234 tags = {}
234 tags = {}
235 if rlog:
235 if rlog:
236 filename = util.normpath(rcs[:-2])
236 filename = util.normpath(rcs[:-2])
237 if filename.startswith(prefix):
237 if filename.startswith(prefix):
238 filename = filename[len(prefix):]
238 filename = filename[len(prefix):]
239 if filename.startswith('/'):
239 if filename.startswith('/'):
240 filename = filename[1:]
240 filename = filename[1:]
241 if filename.startswith('Attic/'):
241 if filename.startswith('Attic/'):
242 filename = filename[6:]
242 filename = filename[6:]
243 else:
243 else:
244 filename = filename.replace('/Attic/', '/')
244 filename = filename.replace('/Attic/', '/')
245 state = 2
245 state = 2
246 continue
246 continue
247 state = 1
247 state = 1
248 continue
248 continue
249 match = re_01.match(line)
249 match = re_01.match(line)
250 if match:
250 if match:
251 raise logerror(match.group(1))
251 raise logerror(match.group(1))
252 match = re_02.match(line)
252 match = re_02.match(line)
253 if match:
253 if match:
254 raise logerror(match.group(2))
254 raise logerror(match.group(2))
255 if re_03.match(line):
255 if re_03.match(line):
256 raise logerror(line)
256 raise logerror(line)
257
257
258 elif state == 1:
258 elif state == 1:
259 # expect 'Working file' (only when using log instead of rlog)
259 # expect 'Working file' (only when using log instead of rlog)
260 match = re_10.match(line)
260 match = re_10.match(line)
261 assert match, _('RCS file must be followed by working file')
261 assert match, _('RCS file must be followed by working file')
262 filename = util.normpath(match.group(1))
262 filename = util.normpath(match.group(1))
263 state = 2
263 state = 2
264
264
265 elif state == 2:
265 elif state == 2:
266 # expect 'symbolic names'
266 # expect 'symbolic names'
267 if re_20.match(line):
267 if re_20.match(line):
268 branchmap = {}
268 branchmap = {}
269 state = 3
269 state = 3
270
270
271 elif state == 3:
271 elif state == 3:
272 # read the symbolic names and store as tags
272 # read the symbolic names and store as tags
273 match = re_30.match(line)
273 match = re_30.match(line)
274 if match:
274 if match:
275 rev = [int(x) for x in match.group(2).split('.')]
275 rev = [int(x) for x in match.group(2).split('.')]
276
276
277 # Convert magic branch number to an odd-numbered one
277 # Convert magic branch number to an odd-numbered one
278 revn = len(rev)
278 revn = len(rev)
279 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
279 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
280 rev = rev[:-2] + rev[-1:]
280 rev = rev[:-2] + rev[-1:]
281 rev = tuple(rev)
281 rev = tuple(rev)
282
282
283 if rev not in tags:
283 if rev not in tags:
284 tags[rev] = []
284 tags[rev] = []
285 tags[rev].append(match.group(1))
285 tags[rev].append(match.group(1))
286 branchmap[match.group(1)] = match.group(2)
286 branchmap[match.group(1)] = match.group(2)
287
287
288 elif re_31.match(line):
288 elif re_31.match(line):
289 state = 5
289 state = 5
290 elif re_32.match(line):
290 elif re_32.match(line):
291 state = 0
291 state = 0
292
292
293 elif state == 4:
293 elif state == 4:
294 # expecting '------' separator before first revision
294 # expecting '------' separator before first revision
295 if re_31.match(line):
295 if re_31.match(line):
296 state = 5
296 state = 5
297 else:
297 else:
298 assert not re_32.match(line), _('must have at least '
298 assert not re_32.match(line), _('must have at least '
299 'some revisions')
299 'some revisions')
300
300
301 elif state == 5:
301 elif state == 5:
302 # expecting revision number and possibly (ignored) lock indication
302 # expecting revision number and possibly (ignored) lock indication
303 # we create the logentry here from values stored in states 0 to 4,
303 # we create the logentry here from values stored in states 0 to 4,
304 # as this state is re-entered for subsequent revisions of a file.
304 # as this state is re-entered for subsequent revisions of a file.
305 match = re_50.match(line)
305 match = re_50.match(line)
306 assert match, _('expected revision number')
306 assert match, _('expected revision number')
307 e = logentry(rcs=scache(rcs),
307 e = logentry(rcs=scache(rcs),
308 file=scache(filename),
308 file=scache(filename),
309 revision=tuple([int(x) for x in
309 revision=tuple([int(x) for x in
310 match.group(1).split('.')]),
310 match.group(1).split('.')]),
311 branches=[],
311 branches=[],
312 parent=None,
312 parent=None,
313 commitid=None,
313 commitid=None,
314 mergepoint=None,
314 mergepoint=None,
315 branchpoints=set())
315 branchpoints=set())
316
316
317 state = 6
317 state = 6
318
318
319 elif state == 6:
319 elif state == 6:
320 # expecting date, author, state, lines changed
320 # expecting date, author, state, lines changed
321 match = re_60.match(line)
321 match = re_60.match(line)
322 assert match, _('revision must be followed by date line')
322 assert match, _('revision must be followed by date line')
323 d = match.group(1)
323 d = match.group(1)
324 if d[2] == '/':
324 if d[2] == '/':
325 # Y2K
325 # Y2K
326 d = '19' + d
326 d = '19' + d
327
327
328 if len(d.split()) != 3:
328 if len(d.split()) != 3:
329 # cvs log dates always in GMT
329 # cvs log dates always in GMT
330 d = d + ' UTC'
330 d = d + ' UTC'
331 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
331 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
332 '%Y/%m/%d %H:%M:%S',
332 '%Y/%m/%d %H:%M:%S',
333 '%Y-%m-%d %H:%M:%S'])
333 '%Y-%m-%d %H:%M:%S'])
334 e.author = scache(match.group(2))
334 e.author = scache(match.group(2))
335 e.dead = match.group(3).lower() == 'dead'
335 e.dead = match.group(3).lower() == 'dead'
336
336
337 if match.group(5):
337 if match.group(5):
338 if match.group(6):
338 if match.group(6):
339 e.lines = (int(match.group(5)), int(match.group(6)))
339 e.lines = (int(match.group(5)), int(match.group(6)))
340 else:
340 else:
341 e.lines = (int(match.group(5)), 0)
341 e.lines = (int(match.group(5)), 0)
342 elif match.group(6):
342 elif match.group(6):
343 e.lines = (0, int(match.group(6)))
343 e.lines = (0, int(match.group(6)))
344 else:
344 else:
345 e.lines = None
345 e.lines = None
346
346
347 if match.group(7): # cvs 1.12 commitid
347 if match.group(7): # cvs 1.12 commitid
348 e.commitid = match.group(8)
348 e.commitid = match.group(8)
349
349
350 if match.group(9): # cvsnt mergepoint
350 if match.group(9): # cvsnt mergepoint
351 myrev = match.group(10).split('.')
351 myrev = match.group(10).split('.')
352 if len(myrev) == 2: # head
352 if len(myrev) == 2: # head
353 e.mergepoint = 'HEAD'
353 e.mergepoint = 'HEAD'
354 else:
354 else:
355 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
355 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
356 branches = [b for b in branchmap if branchmap[b] == myrev]
356 branches = [b for b in branchmap if branchmap[b] == myrev]
357 assert len(branches) == 1, ('unknown branch: %s'
357 assert len(branches) == 1, ('unknown branch: %s'
358 % e.mergepoint)
358 % e.mergepoint)
359 e.mergepoint = branches[0]
359 e.mergepoint = branches[0]
360
360
361 e.comment = []
361 e.comment = []
362 state = 7
362 state = 7
363
363
364 elif state == 7:
364 elif state == 7:
365 # read the revision numbers of branches that start at this revision
365 # read the revision numbers of branches that start at this revision
366 # or store the commit log message otherwise
366 # or store the commit log message otherwise
367 m = re_70.match(line)
367 m = re_70.match(line)
368 if m:
368 if m:
369 e.branches = [tuple([int(y) for y in x.strip().split('.')])
369 e.branches = [tuple([int(y) for y in x.strip().split('.')])
370 for x in m.group(1).split(';')]
370 for x in m.group(1).split(';')]
371 state = 8
371 state = 8
372 elif re_31.match(line) and re_50.match(peek):
372 elif re_31.match(line) and re_50.match(peek):
373 state = 5
373 state = 5
374 store = True
374 store = True
375 elif re_32.match(line):
375 elif re_32.match(line):
376 state = 0
376 state = 0
377 store = True
377 store = True
378 else:
378 else:
379 e.comment.append(line)
379 e.comment.append(line)
380
380
381 elif state == 8:
381 elif state == 8:
382 # store commit log message
382 # store commit log message
383 if re_31.match(line):
383 if re_31.match(line):
384 cpeek = peek
384 cpeek = peek
385 if cpeek.endswith('\n'):
385 if cpeek.endswith('\n'):
386 cpeek = cpeek[:-1]
386 cpeek = cpeek[:-1]
387 if re_50.match(cpeek):
387 if re_50.match(cpeek):
388 state = 5
388 state = 5
389 store = True
389 store = True
390 else:
390 else:
391 e.comment.append(line)
391 e.comment.append(line)
392 elif re_32.match(line):
392 elif re_32.match(line):
393 state = 0
393 state = 0
394 store = True
394 store = True
395 else:
395 else:
396 e.comment.append(line)
396 e.comment.append(line)
397
397
398 # When a file is added on a branch B1, CVS creates a synthetic
398 # When a file is added on a branch B1, CVS creates a synthetic
399 # dead trunk revision 1.1 so that the branch has a root.
399 # dead trunk revision 1.1 so that the branch has a root.
400 # Likewise, if you merge such a file to a later branch B2 (one
400 # Likewise, if you merge such a file to a later branch B2 (one
401 # that already existed when the file was added on B1), CVS
401 # that already existed when the file was added on B1), CVS
402 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
402 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
403 # these revisions now, but mark them synthetic so
403 # these revisions now, but mark them synthetic so
404 # createchangeset() can take care of them.
404 # createchangeset() can take care of them.
405 if (store and
405 if (store and
406 e.dead and
406 e.dead and
407 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
407 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
408 len(e.comment) == 1 and
408 len(e.comment) == 1 and
409 file_added_re.match(e.comment[0])):
409 file_added_re.match(e.comment[0])):
410 ui.debug('found synthetic revision in %s: %r\n'
410 ui.debug('found synthetic revision in %s: %r\n'
411 % (e.rcs, e.comment[0]))
411 % (e.rcs, e.comment[0]))
412 e.synthetic = True
412 e.synthetic = True
413
413
414 if store:
414 if store:
415 # clean up the results and save in the log.
415 # clean up the results and save in the log.
416 store = False
416 store = False
417 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
417 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
418 e.comment = scache('\n'.join(e.comment))
418 e.comment = scache('\n'.join(e.comment))
419
419
420 revn = len(e.revision)
420 revn = len(e.revision)
421 if revn > 3 and (revn % 2) == 0:
421 if revn > 3 and (revn % 2) == 0:
422 e.branch = tags.get(e.revision[:-1], [None])[0]
422 e.branch = tags.get(e.revision[:-1], [None])[0]
423 else:
423 else:
424 e.branch = None
424 e.branch = None
425
425
426 # find the branches starting from this revision
426 # find the branches starting from this revision
427 branchpoints = set()
427 branchpoints = set()
428 for branch, revision in branchmap.iteritems():
428 for branch, revision in branchmap.iteritems():
429 revparts = tuple([int(i) for i in revision.split('.')])
429 revparts = tuple([int(i) for i in revision.split('.')])
430 if len(revparts) < 2: # bad tags
430 if len(revparts) < 2: # bad tags
431 continue
431 continue
432 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
432 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
433 # normal branch
433 # normal branch
434 if revparts[:-2] == e.revision:
434 if revparts[:-2] == e.revision:
435 branchpoints.add(branch)
435 branchpoints.add(branch)
436 elif revparts == (1, 1, 1): # vendor branch
436 elif revparts == (1, 1, 1): # vendor branch
437 if revparts in e.branches:
437 if revparts in e.branches:
438 branchpoints.add(branch)
438 branchpoints.add(branch)
439 e.branchpoints = branchpoints
439 e.branchpoints = branchpoints
440
440
441 log.append(e)
441 log.append(e)
442
442
443 if len(log) % 100 == 0:
443 if len(log) % 100 == 0:
444 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
444 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
445
445
446 log.sort(key=lambda x: (x.rcs, x.revision))
446 log.sort(key=lambda x: (x.rcs, x.revision))
447
447
448 # find parent revisions of individual files
448 # find parent revisions of individual files
449 versions = {}
449 versions = {}
450 for e in log:
450 for e in log:
451 branch = e.revision[:-1]
451 branch = e.revision[:-1]
452 p = versions.get((e.rcs, branch), None)
452 p = versions.get((e.rcs, branch), None)
453 if p is None:
453 if p is None:
454 p = e.revision[:-2]
454 p = e.revision[:-2]
455 e.parent = p
455 e.parent = p
456 versions[(e.rcs, branch)] = e.revision
456 versions[(e.rcs, branch)] = e.revision
457
457
458 # update the log cache
458 # update the log cache
459 if cache:
459 if cache:
460 if log:
460 if log:
461 # join up the old and new logs
461 # join up the old and new logs
462 log.sort(key=lambda x: x.date)
462 log.sort(key=lambda x: x.date)
463
463
464 if oldlog and oldlog[-1].date >= log[0].date:
464 if oldlog and oldlog[-1].date >= log[0].date:
465 raise logerror(_('log cache overlaps with new log entries,'
465 raise logerror(_('log cache overlaps with new log entries,'
466 ' re-run without cache.'))
466 ' re-run without cache.'))
467
467
468 log = oldlog + log
468 log = oldlog + log
469
469
470 # write the new cachefile
470 # write the new cachefile
471 ui.note(_('writing cvs log cache %s\n') % cachefile)
471 ui.note(_('writing cvs log cache %s\n') % cachefile)
472 pickle.dump(log, open(cachefile, 'w'))
472 pickle.dump(log, open(cachefile, 'w'))
473 else:
473 else:
474 log = oldlog
474 log = oldlog
475
475
476 ui.status(_('%d log entries\n') % len(log))
476 ui.status(_('%d log entries\n') % len(log))
477
477
478 hook.hook(ui, None, "cvslog", True, log=log)
478 hook.hook(ui, None, "cvslog", True, log=log)
479
479
480 return log
480 return log
481
481
482
482
483 class changeset(object):
483 class changeset(object):
484 '''Class changeset has the following attributes:
484 '''Class changeset has the following attributes:
485 .id - integer identifying this changeset (list index)
485 .id - integer identifying this changeset (list index)
486 .author - author name as CVS knows it
486 .author - author name as CVS knows it
487 .branch - name of branch this changeset is on, or None
487 .branch - name of branch this changeset is on, or None
488 .comment - commit message
488 .comment - commit message
489 .commitid - CVS commitid or None
489 .commitid - CVS commitid or None
490 .date - the commit date as a (time,tz) tuple
490 .date - the commit date as a (time,tz) tuple
491 .entries - list of logentry objects in this changeset
491 .entries - list of logentry objects in this changeset
492 .parents - list of one or two parent changesets
492 .parents - list of one or two parent changesets
493 .tags - list of tags on this changeset
493 .tags - list of tags on this changeset
494 .synthetic - from synthetic revision "file ... added on branch ..."
494 .synthetic - from synthetic revision "file ... added on branch ..."
495 .mergepoint- the branch that has been merged from or None
495 .mergepoint- the branch that has been merged from or None
496 .branchpoints- the branches that start at the current entry or empty
496 .branchpoints- the branches that start at the current entry or empty
497 '''
497 '''
498 def __init__(self, **entries):
498 def __init__(self, **entries):
499 self.id = None
499 self.synthetic = False
500 self.synthetic = False
500 self.__dict__.update(entries)
501 self.__dict__.update(entries)
501
502
502 def __repr__(self):
503 def __repr__(self):
503 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
504 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
504 return "%s(%s)"%(type(self).__name__, ", ".join(items))
505 return "%s(%s)"%(type(self).__name__, ", ".join(items))
505
506
506 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
507 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
507 '''Convert log into changesets.'''
508 '''Convert log into changesets.'''
508
509
509 ui.status(_('creating changesets\n'))
510 ui.status(_('creating changesets\n'))
510
511
511 # try to order commitids by date
512 # try to order commitids by date
512 mindate = {}
513 mindate = {}
513 for e in log:
514 for e in log:
514 if e.commitid:
515 if e.commitid:
515 mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
516 mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
516
517
517 # Merge changesets
518 # Merge changesets
518 log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
519 log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
519 x.author, x.branch, x.date, x.branchpoints))
520 x.author, x.branch, x.date, x.branchpoints))
520
521
521 changesets = []
522 changesets = []
522 files = set()
523 files = set()
523 c = None
524 c = None
524 for i, e in enumerate(log):
525 for i, e in enumerate(log):
525
526
526 # Check if log entry belongs to the current changeset or not.
527 # Check if log entry belongs to the current changeset or not.
527
528
528 # Since CVS is file-centric, two different file revisions with
529 # Since CVS is file-centric, two different file revisions with
529 # different branchpoints should be treated as belonging to two
530 # different branchpoints should be treated as belonging to two
530 # different changesets (and the ordering is important and not
531 # different changesets (and the ordering is important and not
531 # honoured by cvsps at this point).
532 # honoured by cvsps at this point).
532 #
533 #
533 # Consider the following case:
534 # Consider the following case:
534 # foo 1.1 branchpoints: [MYBRANCH]
535 # foo 1.1 branchpoints: [MYBRANCH]
535 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
536 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
536 #
537 #
537 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
538 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
538 # later version of foo may be in MYBRANCH2, so foo should be the
539 # later version of foo may be in MYBRANCH2, so foo should be the
539 # first changeset and bar the next and MYBRANCH and MYBRANCH2
540 # first changeset and bar the next and MYBRANCH and MYBRANCH2
540 # should both start off of the bar changeset. No provisions are
541 # should both start off of the bar changeset. No provisions are
541 # made to ensure that this is, in fact, what happens.
542 # made to ensure that this is, in fact, what happens.
542 if not (c and e.branchpoints == c.branchpoints and
543 if not (c and e.branchpoints == c.branchpoints and
543 (# cvs commitids
544 (# cvs commitids
544 (e.commitid is not None and e.commitid == c.commitid) or
545 (e.commitid is not None and e.commitid == c.commitid) or
545 (# no commitids, use fuzzy commit detection
546 (# no commitids, use fuzzy commit detection
546 (e.commitid is None or c.commitid is None) and
547 (e.commitid is None or c.commitid is None) and
547 e.comment == c.comment and
548 e.comment == c.comment and
548 e.author == c.author and
549 e.author == c.author and
549 e.branch == c.branch and
550 e.branch == c.branch and
550 ((c.date[0] + c.date[1]) <=
551 ((c.date[0] + c.date[1]) <=
551 (e.date[0] + e.date[1]) <=
552 (e.date[0] + e.date[1]) <=
552 (c.date[0] + c.date[1]) + fuzz) and
553 (c.date[0] + c.date[1]) + fuzz) and
553 e.file not in files))):
554 e.file not in files))):
554 c = changeset(comment=e.comment, author=e.author,
555 c = changeset(comment=e.comment, author=e.author,
555 branch=e.branch, date=e.date,
556 branch=e.branch, date=e.date,
556 entries=[], mergepoint=e.mergepoint,
557 entries=[], mergepoint=e.mergepoint,
557 branchpoints=e.branchpoints, commitid=e.commitid)
558 branchpoints=e.branchpoints, commitid=e.commitid)
558 changesets.append(c)
559 changesets.append(c)
559
560
560 files = set()
561 files = set()
561 if len(changesets) % 100 == 0:
562 if len(changesets) % 100 == 0:
562 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
563 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
563 ui.status(util.ellipsis(t, 80) + '\n')
564 ui.status(util.ellipsis(t, 80) + '\n')
564
565
565 c.entries.append(e)
566 c.entries.append(e)
566 files.add(e.file)
567 files.add(e.file)
567 c.date = e.date # changeset date is date of latest commit in it
568 c.date = e.date # changeset date is date of latest commit in it
568
569
569 # Mark synthetic changesets
570 # Mark synthetic changesets
570
571
571 for c in changesets:
572 for c in changesets:
572 # Synthetic revisions always get their own changeset, because
573 # Synthetic revisions always get their own changeset, because
573 # the log message includes the filename. E.g. if you add file3
574 # the log message includes the filename. E.g. if you add file3
574 # and file4 on a branch, you get four log entries and three
575 # and file4 on a branch, you get four log entries and three
575 # changesets:
576 # changesets:
576 # "File file3 was added on branch ..." (synthetic, 1 entry)
577 # "File file3 was added on branch ..." (synthetic, 1 entry)
577 # "File file4 was added on branch ..." (synthetic, 1 entry)
578 # "File file4 was added on branch ..." (synthetic, 1 entry)
578 # "Add file3 and file4 to fix ..." (real, 2 entries)
579 # "Add file3 and file4 to fix ..." (real, 2 entries)
579 # Hence the check for 1 entry here.
580 # Hence the check for 1 entry here.
580 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
581 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
581
582
582 # Sort files in each changeset
583 # Sort files in each changeset
583
584
584 def entitycompare(l, r):
585 def entitycompare(l, r):
585 'Mimic cvsps sorting order'
586 'Mimic cvsps sorting order'
586 l = l.file.split('/')
587 l = l.file.split('/')
587 r = r.file.split('/')
588 r = r.file.split('/')
588 nl = len(l)
589 nl = len(l)
589 nr = len(r)
590 nr = len(r)
590 n = min(nl, nr)
591 n = min(nl, nr)
591 for i in range(n):
592 for i in range(n):
592 if i + 1 == nl and nl < nr:
593 if i + 1 == nl and nl < nr:
593 return -1
594 return -1
594 elif i + 1 == nr and nl > nr:
595 elif i + 1 == nr and nl > nr:
595 return +1
596 return +1
596 elif l[i] < r[i]:
597 elif l[i] < r[i]:
597 return -1
598 return -1
598 elif l[i] > r[i]:
599 elif l[i] > r[i]:
599 return +1
600 return +1
600 return 0
601 return 0
601
602
602 for c in changesets:
603 for c in changesets:
603 c.entries.sort(entitycompare)
604 c.entries.sort(entitycompare)
604
605
605 # Sort changesets by date
606 # Sort changesets by date
606
607
607 def cscmp(l, r):
608 odd = set()
609 def cscmp(l, r, odd=odd):
608 d = sum(l.date) - sum(r.date)
610 d = sum(l.date) - sum(r.date)
609 if d:
611 if d:
610 return d
612 return d
611
613
612 # detect vendor branches and initial commits on a branch
614 # detect vendor branches and initial commits on a branch
613 le = {}
615 le = {}
614 for e in l.entries:
616 for e in l.entries:
615 le[e.rcs] = e.revision
617 le[e.rcs] = e.revision
616 re = {}
618 re = {}
617 for e in r.entries:
619 for e in r.entries:
618 re[e.rcs] = e.revision
620 re[e.rcs] = e.revision
619
621
620 d = 0
622 d = 0
621 for e in l.entries:
623 for e in l.entries:
622 if re.get(e.rcs, None) == e.parent:
624 if re.get(e.rcs, None) == e.parent:
623 assert not d
625 assert not d
624 d = 1
626 d = 1
625 break
627 break
626
628
627 for e in r.entries:
629 for e in r.entries:
628 if le.get(e.rcs, None) == e.parent:
630 if le.get(e.rcs, None) == e.parent:
629 assert not d
631 if d:
632 odd.add((l, r))
630 d = -1
633 d = -1
631 break
634 break
632
635
633 return d
636 return d
634
637
635 changesets.sort(cscmp)
638 changesets.sort(cscmp)
636
639
637 # Collect tags
640 # Collect tags
638
641
639 globaltags = {}
642 globaltags = {}
640 for c in changesets:
643 for c in changesets:
641 for e in c.entries:
644 for e in c.entries:
642 for tag in e.tags:
645 for tag in e.tags:
643 # remember which is the latest changeset to have this tag
646 # remember which is the latest changeset to have this tag
644 globaltags[tag] = c
647 globaltags[tag] = c
645
648
646 for c in changesets:
649 for c in changesets:
647 tags = set()
650 tags = set()
648 for e in c.entries:
651 for e in c.entries:
649 tags.update(e.tags)
652 tags.update(e.tags)
650 # remember tags only if this is the latest changeset to have it
653 # remember tags only if this is the latest changeset to have it
651 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
654 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
652
655
653 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
656 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
654 # by inserting dummy changesets with two parents, and handle
657 # by inserting dummy changesets with two parents, and handle
655 # {{mergefrombranch BRANCHNAME}} by setting two parents.
658 # {{mergefrombranch BRANCHNAME}} by setting two parents.
656
659
657 if mergeto is None:
660 if mergeto is None:
658 mergeto = r'{{mergetobranch ([-\w]+)}}'
661 mergeto = r'{{mergetobranch ([-\w]+)}}'
659 if mergeto:
662 if mergeto:
660 mergeto = re.compile(mergeto)
663 mergeto = re.compile(mergeto)
661
664
662 if mergefrom is None:
665 if mergefrom is None:
663 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
666 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
664 if mergefrom:
667 if mergefrom:
665 mergefrom = re.compile(mergefrom)
668 mergefrom = re.compile(mergefrom)
666
669
667 versions = {} # changeset index where we saw any particular file version
670 versions = {} # changeset index where we saw any particular file version
668 branches = {} # changeset index where we saw a branch
671 branches = {} # changeset index where we saw a branch
669 n = len(changesets)
672 n = len(changesets)
670 i = 0
673 i = 0
671 while i < n:
674 while i < n:
672 c = changesets[i]
675 c = changesets[i]
673
676
674 for f in c.entries:
677 for f in c.entries:
675 versions[(f.rcs, f.revision)] = i
678 versions[(f.rcs, f.revision)] = i
676
679
677 p = None
680 p = None
678 if c.branch in branches:
681 if c.branch in branches:
679 p = branches[c.branch]
682 p = branches[c.branch]
680 else:
683 else:
681 # first changeset on a new branch
684 # first changeset on a new branch
682 # the parent is a changeset with the branch in its
685 # the parent is a changeset with the branch in its
683 # branchpoints such that it is the latest possible
686 # branchpoints such that it is the latest possible
684 # commit without any intervening, unrelated commits.
687 # commit without any intervening, unrelated commits.
685
688
686 for candidate in xrange(i):
689 for candidate in xrange(i):
687 if c.branch not in changesets[candidate].branchpoints:
690 if c.branch not in changesets[candidate].branchpoints:
688 if p is not None:
691 if p is not None:
689 break
692 break
690 continue
693 continue
691 p = candidate
694 p = candidate
692
695
693 c.parents = []
696 c.parents = []
694 if p is not None:
697 if p is not None:
695 p = changesets[p]
698 p = changesets[p]
696
699
697 # Ensure no changeset has a synthetic changeset as a parent.
700 # Ensure no changeset has a synthetic changeset as a parent.
698 while p.synthetic:
701 while p.synthetic:
699 assert len(p.parents) <= 1, \
702 assert len(p.parents) <= 1, \
700 _('synthetic changeset cannot have multiple parents')
703 _('synthetic changeset cannot have multiple parents')
701 if p.parents:
704 if p.parents:
702 p = p.parents[0]
705 p = p.parents[0]
703 else:
706 else:
704 p = None
707 p = None
705 break
708 break
706
709
707 if p is not None:
710 if p is not None:
708 c.parents.append(p)
711 c.parents.append(p)
709
712
710 if c.mergepoint:
713 if c.mergepoint:
711 if c.mergepoint == 'HEAD':
714 if c.mergepoint == 'HEAD':
712 c.mergepoint = None
715 c.mergepoint = None
713 c.parents.append(changesets[branches[c.mergepoint]])
716 c.parents.append(changesets[branches[c.mergepoint]])
714
717
715 if mergefrom:
718 if mergefrom:
716 m = mergefrom.search(c.comment)
719 m = mergefrom.search(c.comment)
717 if m:
720 if m:
718 m = m.group(1)
721 m = m.group(1)
719 if m == 'HEAD':
722 if m == 'HEAD':
720 m = None
723 m = None
721 try:
724 try:
722 candidate = changesets[branches[m]]
725 candidate = changesets[branches[m]]
723 except KeyError:
726 except KeyError:
724 ui.warn(_("warning: CVS commit message references "
727 ui.warn(_("warning: CVS commit message references "
725 "non-existent branch %r:\n%s\n")
728 "non-existent branch %r:\n%s\n")
726 % (m, c.comment))
729 % (m, c.comment))
727 if m in branches and c.branch != m and not candidate.synthetic:
730 if m in branches and c.branch != m and not candidate.synthetic:
728 c.parents.append(candidate)
731 c.parents.append(candidate)
729
732
730 if mergeto:
733 if mergeto:
731 m = mergeto.search(c.comment)
734 m = mergeto.search(c.comment)
732 if m:
735 if m:
733 if m.groups():
736 if m.groups():
734 m = m.group(1)
737 m = m.group(1)
735 if m == 'HEAD':
738 if m == 'HEAD':
736 m = None
739 m = None
737 else:
740 else:
738 m = None # if no group found then merge to HEAD
741 m = None # if no group found then merge to HEAD
739 if m in branches and c.branch != m:
742 if m in branches and c.branch != m:
740 # insert empty changeset for merge
743 # insert empty changeset for merge
741 cc = changeset(
744 cc = changeset(
742 author=c.author, branch=m, date=c.date,
745 author=c.author, branch=m, date=c.date,
743 comment='convert-repo: CVS merge from branch %s'
746 comment='convert-repo: CVS merge from branch %s'
744 % c.branch,
747 % c.branch,
745 entries=[], tags=[],
748 entries=[], tags=[],
746 parents=[changesets[branches[m]], c])
749 parents=[changesets[branches[m]], c])
747 changesets.insert(i + 1, cc)
750 changesets.insert(i + 1, cc)
748 branches[m] = i + 1
751 branches[m] = i + 1
749
752
750 # adjust our loop counters now we have inserted a new entry
753 # adjust our loop counters now we have inserted a new entry
751 n += 1
754 n += 1
752 i += 2
755 i += 2
753 continue
756 continue
754
757
755 branches[c.branch] = i
758 branches[c.branch] = i
756 i += 1
759 i += 1
757
760
758 # Drop synthetic changesets (safe now that we have ensured no other
761 # Drop synthetic changesets (safe now that we have ensured no other
759 # changesets can have them as parents).
762 # changesets can have them as parents).
760 i = 0
763 i = 0
761 while i < len(changesets):
764 while i < len(changesets):
762 if changesets[i].synthetic:
765 if changesets[i].synthetic:
763 del changesets[i]
766 del changesets[i]
764 else:
767 else:
765 i += 1
768 i += 1
766
769
767 # Number changesets
770 # Number changesets
768
771
769 for i, c in enumerate(changesets):
772 for i, c in enumerate(changesets):
770 c.id = i + 1
773 c.id = i + 1
771
774
775 if odd:
776 for l, r in odd:
777 if l.id is not None and r.id is not None:
778 ui.warn(_('changeset %d is both before and after %d\n')
779 % (l.id, r.id))
780
772 ui.status(_('%d changeset entries\n') % len(changesets))
781 ui.status(_('%d changeset entries\n') % len(changesets))
773
782
774 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
783 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
775
784
776 return changesets
785 return changesets
777
786
778
787
779 def debugcvsps(ui, *args, **opts):
788 def debugcvsps(ui, *args, **opts):
780 '''Read CVS rlog for current directory or named path in
789 '''Read CVS rlog for current directory or named path in
781 repository, and convert the log to changesets based on matching
790 repository, and convert the log to changesets based on matching
782 commit log entries and dates.
791 commit log entries and dates.
783 '''
792 '''
784 if opts["new_cache"]:
793 if opts["new_cache"]:
785 cache = "write"
794 cache = "write"
786 elif opts["update_cache"]:
795 elif opts["update_cache"]:
787 cache = "update"
796 cache = "update"
788 else:
797 else:
789 cache = None
798 cache = None
790
799
791 revisions = opts["revisions"]
800 revisions = opts["revisions"]
792
801
793 try:
802 try:
794 if args:
803 if args:
795 log = []
804 log = []
796 for d in args:
805 for d in args:
797 log += createlog(ui, d, root=opts["root"], cache=cache)
806 log += createlog(ui, d, root=opts["root"], cache=cache)
798 else:
807 else:
799 log = createlog(ui, root=opts["root"], cache=cache)
808 log = createlog(ui, root=opts["root"], cache=cache)
800 except logerror, e:
809 except logerror, e:
801 ui.write("%r\n"%e)
810 ui.write("%r\n"%e)
802 return
811 return
803
812
804 changesets = createchangeset(ui, log, opts["fuzz"])
813 changesets = createchangeset(ui, log, opts["fuzz"])
805 del log
814 del log
806
815
807 # Print changesets (optionally filtered)
816 # Print changesets (optionally filtered)
808
817
809 off = len(revisions)
818 off = len(revisions)
810 branches = {} # latest version number in each branch
819 branches = {} # latest version number in each branch
811 ancestors = {} # parent branch
820 ancestors = {} # parent branch
812 for cs in changesets:
821 for cs in changesets:
813
822
814 if opts["ancestors"]:
823 if opts["ancestors"]:
815 if cs.branch not in branches and cs.parents and cs.parents[0].id:
824 if cs.branch not in branches and cs.parents and cs.parents[0].id:
816 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
825 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
817 cs.parents[0].id)
826 cs.parents[0].id)
818 branches[cs.branch] = cs.id
827 branches[cs.branch] = cs.id
819
828
820 # limit by branches
829 # limit by branches
821 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
830 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
822 continue
831 continue
823
832
824 if not off:
833 if not off:
825 # Note: trailing spaces on several lines here are needed to have
834 # Note: trailing spaces on several lines here are needed to have
826 # bug-for-bug compatibility with cvsps.
835 # bug-for-bug compatibility with cvsps.
827 ui.write('---------------------\n')
836 ui.write('---------------------\n')
828 ui.write(('PatchSet %d \n' % cs.id))
837 ui.write(('PatchSet %d \n' % cs.id))
829 ui.write(('Date: %s\n' % util.datestr(cs.date,
838 ui.write(('Date: %s\n' % util.datestr(cs.date,
830 '%Y/%m/%d %H:%M:%S %1%2')))
839 '%Y/%m/%d %H:%M:%S %1%2')))
831 ui.write(('Author: %s\n' % cs.author))
840 ui.write(('Author: %s\n' % cs.author))
832 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
841 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
833 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
842 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
834 ','.join(cs.tags) or '(none)')))
843 ','.join(cs.tags) or '(none)')))
835 if cs.branchpoints:
844 if cs.branchpoints:
836 ui.write(('Branchpoints: %s \n') %
845 ui.write(('Branchpoints: %s \n') %
837 ', '.join(sorted(cs.branchpoints)))
846 ', '.join(sorted(cs.branchpoints)))
838 if opts["parents"] and cs.parents:
847 if opts["parents"] and cs.parents:
839 if len(cs.parents) > 1:
848 if len(cs.parents) > 1:
840 ui.write(('Parents: %s\n' %
849 ui.write(('Parents: %s\n' %
841 (','.join([str(p.id) for p in cs.parents]))))
850 (','.join([str(p.id) for p in cs.parents]))))
842 else:
851 else:
843 ui.write(('Parent: %d\n' % cs.parents[0].id))
852 ui.write(('Parent: %d\n' % cs.parents[0].id))
844
853
845 if opts["ancestors"]:
854 if opts["ancestors"]:
846 b = cs.branch
855 b = cs.branch
847 r = []
856 r = []
848 while b:
857 while b:
849 b, c = ancestors[b]
858 b, c = ancestors[b]
850 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
859 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
851 if r:
860 if r:
852 ui.write(('Ancestors: %s\n' % (','.join(r))))
861 ui.write(('Ancestors: %s\n' % (','.join(r))))
853
862
854 ui.write(('Log:\n'))
863 ui.write(('Log:\n'))
855 ui.write('%s\n\n' % cs.comment)
864 ui.write('%s\n\n' % cs.comment)
856 ui.write(('Members: \n'))
865 ui.write(('Members: \n'))
857 for f in cs.entries:
866 for f in cs.entries:
858 fn = f.file
867 fn = f.file
859 if fn.startswith(opts["prefix"]):
868 if fn.startswith(opts["prefix"]):
860 fn = fn[len(opts["prefix"]):]
869 fn = fn[len(opts["prefix"]):]
861 ui.write('\t%s:%s->%s%s \n' % (
870 ui.write('\t%s:%s->%s%s \n' % (
862 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
871 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
863 '.'.join([str(x) for x in f.revision]),
872 '.'.join([str(x) for x in f.revision]),
864 ['', '(DEAD)'][f.dead]))
873 ['', '(DEAD)'][f.dead]))
865 ui.write('\n')
874 ui.write('\n')
866
875
867 # have we seen the start tag?
876 # have we seen the start tag?
868 if revisions and off:
877 if revisions and off:
869 if revisions[0] == str(cs.id) or \
878 if revisions[0] == str(cs.id) or \
870 revisions[0] in cs.tags:
879 revisions[0] in cs.tags:
871 off = False
880 off = False
872
881
873 # see if we reached the end tag
882 # see if we reached the end tag
874 if len(revisions) > 1 and not off:
883 if len(revisions) > 1 and not off:
875 if revisions[1] == str(cs.id) or \
884 if revisions[1] == str(cs.id) or \
876 revisions[1] in cs.tags:
885 revisions[1] in cs.tags:
877 break
886 break
General Comments 0
You need to be logged in to leave comments. Login now