##// END OF EJS Templates
convert: fix bug of wrong CVS path parsing without port number (issue3678)...
Blesso -
r19145:0a12e5f3 stable
parent child Browse files
Show More
@@ -1,877 +1,877 b''
1 # Mercurial built-in replacement for cvsps.
1 # Mercurial built-in replacement for cvsps.
2 #
2 #
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 import re
9 import re
10 import cPickle as pickle
10 import cPickle as pickle
11 from mercurial import util
11 from mercurial import util
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import hook
13 from mercurial import hook
14 from mercurial import util
14 from mercurial import util
15
15
16 class logentry(object):
16 class logentry(object):
17 '''Class logentry has the following attributes:
17 '''Class logentry has the following attributes:
18 .author - author name as CVS knows it
18 .author - author name as CVS knows it
19 .branch - name of branch this revision is on
19 .branch - name of branch this revision is on
20 .branches - revision tuple of branches starting at this revision
20 .branches - revision tuple of branches starting at this revision
21 .comment - commit message
21 .comment - commit message
22 .commitid - CVS commitid or None
22 .commitid - CVS commitid or None
23 .date - the commit date as a (time, tz) tuple
23 .date - the commit date as a (time, tz) tuple
24 .dead - true if file revision is dead
24 .dead - true if file revision is dead
25 .file - Name of file
25 .file - Name of file
26 .lines - a tuple (+lines, -lines) or None
26 .lines - a tuple (+lines, -lines) or None
27 .parent - Previous revision of this entry
27 .parent - Previous revision of this entry
28 .rcs - name of file as returned from CVS
28 .rcs - name of file as returned from CVS
29 .revision - revision number as tuple
29 .revision - revision number as tuple
30 .tags - list of tags on the file
30 .tags - list of tags on the file
31 .synthetic - is this a synthetic "file ... added on ..." revision?
31 .synthetic - is this a synthetic "file ... added on ..." revision?
32 .mergepoint - the branch that has been merged from (if present in
32 .mergepoint - the branch that has been merged from (if present in
33 rlog output) or None
33 rlog output) or None
34 .branchpoints - the branches that start at the current entry or empty
34 .branchpoints - the branches that start at the current entry or empty
35 '''
35 '''
36 def __init__(self, **entries):
36 def __init__(self, **entries):
37 self.synthetic = False
37 self.synthetic = False
38 self.__dict__.update(entries)
38 self.__dict__.update(entries)
39
39
40 def __repr__(self):
40 def __repr__(self):
41 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
41 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
42 return "%s(%s)"%(type(self).__name__, ", ".join(items))
42 return "%s(%s)"%(type(self).__name__, ", ".join(items))
43
43
44 class logerror(Exception):
44 class logerror(Exception):
45 pass
45 pass
46
46
47 def getrepopath(cvspath):
47 def getrepopath(cvspath):
48 """Return the repository path from a CVS path.
48 """Return the repository path from a CVS path.
49
49
50 >>> getrepopath('/foo/bar')
50 >>> getrepopath('/foo/bar')
51 '/foo/bar'
51 '/foo/bar'
52 >>> getrepopath('c:/foo/bar')
52 >>> getrepopath('c:/foo/bar')
53 'c:/foo/bar'
53 '/foo/bar'
54 >>> getrepopath(':pserver:10/foo/bar')
54 >>> getrepopath(':pserver:10/foo/bar')
55 '/foo/bar'
55 '/foo/bar'
56 >>> getrepopath(':pserver:10c:/foo/bar')
56 >>> getrepopath(':pserver:10c:/foo/bar')
57 '/foo/bar'
57 '/foo/bar'
58 >>> getrepopath(':pserver:/foo/bar')
58 >>> getrepopath(':pserver:/foo/bar')
59 '/foo/bar'
59 '/foo/bar'
60 >>> getrepopath(':pserver:c:/foo/bar')
60 >>> getrepopath(':pserver:c:/foo/bar')
61 'c:/foo/bar'
61 '/foo/bar'
62 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
62 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
63 '/foo/bar'
63 '/foo/bar'
64 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
64 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
65 'c:/foo/bar'
65 '/foo/bar'
66 >>> getrepopath('user@server/path/to/repository')
67 '/path/to/repository'
66 """
68 """
67 # According to CVS manual, CVS paths are expressed like:
69 # According to CVS manual, CVS paths are expressed like:
68 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
70 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
69 #
71 #
70 # Unfortunately, Windows absolute paths start with a drive letter
72 # CVSpath is splitted into parts and then position of the first occurrence
71 # like 'c:' making it harder to parse. Here we assume that drive
73 # of the '/' char after the '@' is located. The solution is the rest of the
72 # letters are only one character long and any CVS component before
74 # string after that '/' sign including it
73 # the repository path is at least 2 characters long, and use this
75
74 # to disambiguate.
75 parts = cvspath.split(':')
76 parts = cvspath.split(':')
76 if len(parts) == 1:
77 atposition = parts[-1].find('@')
77 return parts[0]
78 start = 0
78 # Here there is an ambiguous case if we have a port number
79
79 # immediately followed by a Windows driver letter. We assume this
80 if atposition != -1:
80 # never happens and decide it must be CVS path component,
81 start = atposition
81 # therefore ignoring it.
82
82 if len(parts[-2]) > 1:
83 repopath = parts[-1][parts[-1].find('/', start):]
83 return parts[-1].lstrip('0123456789')
84 return repopath
84 return parts[-2] + ':' + parts[-1]
85
85
86 def createlog(ui, directory=None, root="", rlog=True, cache=None):
86 def createlog(ui, directory=None, root="", rlog=True, cache=None):
87 '''Collect the CVS rlog'''
87 '''Collect the CVS rlog'''
88
88
89 # Because we store many duplicate commit log messages, reusing strings
89 # Because we store many duplicate commit log messages, reusing strings
90 # saves a lot of memory and pickle storage space.
90 # saves a lot of memory and pickle storage space.
91 _scache = {}
91 _scache = {}
92 def scache(s):
92 def scache(s):
93 "return a shared version of a string"
93 "return a shared version of a string"
94 return _scache.setdefault(s, s)
94 return _scache.setdefault(s, s)
95
95
96 ui.status(_('collecting CVS rlog\n'))
96 ui.status(_('collecting CVS rlog\n'))
97
97
98 log = [] # list of logentry objects containing the CVS state
98 log = [] # list of logentry objects containing the CVS state
99
99
100 # patterns to match in CVS (r)log output, by state of use
100 # patterns to match in CVS (r)log output, by state of use
101 re_00 = re.compile('RCS file: (.+)$')
101 re_00 = re.compile('RCS file: (.+)$')
102 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
102 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
103 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
103 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
104 re_03 = re.compile("(Cannot access.+CVSROOT)|"
104 re_03 = re.compile("(Cannot access.+CVSROOT)|"
105 "(can't create temporary directory.+)$")
105 "(can't create temporary directory.+)$")
106 re_10 = re.compile('Working file: (.+)$')
106 re_10 = re.compile('Working file: (.+)$')
107 re_20 = re.compile('symbolic names:')
107 re_20 = re.compile('symbolic names:')
108 re_30 = re.compile('\t(.+): ([\\d.]+)$')
108 re_30 = re.compile('\t(.+): ([\\d.]+)$')
109 re_31 = re.compile('----------------------------$')
109 re_31 = re.compile('----------------------------$')
110 re_32 = re.compile('======================================='
110 re_32 = re.compile('======================================='
111 '======================================$')
111 '======================================$')
112 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
112 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
113 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
113 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
114 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
114 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
115 r'(\s+commitid:\s+([^;]+);)?'
115 r'(\s+commitid:\s+([^;]+);)?'
116 r'(.*mergepoint:\s+([^;]+);)?')
116 r'(.*mergepoint:\s+([^;]+);)?')
117 re_70 = re.compile('branches: (.+);$')
117 re_70 = re.compile('branches: (.+);$')
118
118
119 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
120
120
121 prefix = '' # leading path to strip of what we get from CVS
121 prefix = '' # leading path to strip of what we get from CVS
122
122
123 if directory is None:
123 if directory is None:
124 # Current working directory
124 # Current working directory
125
125
126 # Get the real directory in the repository
126 # Get the real directory in the repository
127 try:
127 try:
128 prefix = open(os.path.join('CVS','Repository')).read().strip()
128 prefix = open(os.path.join('CVS','Repository')).read().strip()
129 directory = prefix
129 directory = prefix
130 if prefix == ".":
130 if prefix == ".":
131 prefix = ""
131 prefix = ""
132 except IOError:
132 except IOError:
133 raise logerror(_('not a CVS sandbox'))
133 raise logerror(_('not a CVS sandbox'))
134
134
135 if prefix and not prefix.endswith(os.sep):
135 if prefix and not prefix.endswith(os.sep):
136 prefix += os.sep
136 prefix += os.sep
137
137
138 # Use the Root file in the sandbox, if it exists
138 # Use the Root file in the sandbox, if it exists
139 try:
139 try:
140 root = open(os.path.join('CVS','Root')).read().strip()
140 root = open(os.path.join('CVS','Root')).read().strip()
141 except IOError:
141 except IOError:
142 pass
142 pass
143
143
144 if not root:
144 if not root:
145 root = os.environ.get('CVSROOT', '')
145 root = os.environ.get('CVSROOT', '')
146
146
147 # read log cache if one exists
147 # read log cache if one exists
148 oldlog = []
148 oldlog = []
149 date = None
149 date = None
150
150
151 if cache:
151 if cache:
152 cachedir = os.path.expanduser('~/.hg.cvsps')
152 cachedir = os.path.expanduser('~/.hg.cvsps')
153 if not os.path.exists(cachedir):
153 if not os.path.exists(cachedir):
154 os.mkdir(cachedir)
154 os.mkdir(cachedir)
155
155
156 # The cvsps cache pickle needs a uniquified name, based on the
156 # The cvsps cache pickle needs a uniquified name, based on the
157 # repository location. The address may have all sort of nasties
157 # repository location. The address may have all sort of nasties
158 # in it, slashes, colons and such. So here we take just the
158 # in it, slashes, colons and such. So here we take just the
159 # alphanumeric characters, concatenated in a way that does not
159 # alphanumeric characters, concatenated in a way that does not
160 # mix up the various components, so that
160 # mix up the various components, so that
161 # :pserver:user@server:/path
161 # :pserver:user@server:/path
162 # and
162 # and
163 # /pserver/user/server/path
163 # /pserver/user/server/path
164 # are mapped to different cache file names.
164 # are mapped to different cache file names.
165 cachefile = root.split(":") + [directory, "cache"]
165 cachefile = root.split(":") + [directory, "cache"]
166 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
167 cachefile = os.path.join(cachedir,
167 cachefile = os.path.join(cachedir,
168 '.'.join([s for s in cachefile if s]))
168 '.'.join([s for s in cachefile if s]))
169
169
170 if cache == 'update':
170 if cache == 'update':
171 try:
171 try:
172 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 ui.note(_('reading cvs log cache %s\n') % cachefile)
173 oldlog = pickle.load(open(cachefile))
173 oldlog = pickle.load(open(cachefile))
174 for e in oldlog:
174 for e in oldlog:
175 if not (util.safehasattr(e, 'branchpoints') and
175 if not (util.safehasattr(e, 'branchpoints') and
176 util.safehasattr(e, 'commitid') and
176 util.safehasattr(e, 'commitid') and
177 util.safehasattr(e, 'mergepoint')):
177 util.safehasattr(e, 'mergepoint')):
178 ui.status(_('ignoring old cache\n'))
178 ui.status(_('ignoring old cache\n'))
179 oldlog = []
179 oldlog = []
180 break
180 break
181
181
182 ui.note(_('cache has %d log entries\n') % len(oldlog))
182 ui.note(_('cache has %d log entries\n') % len(oldlog))
183 except Exception, e:
183 except Exception, e:
184 ui.note(_('error reading cache: %r\n') % e)
184 ui.note(_('error reading cache: %r\n') % e)
185
185
186 if oldlog:
186 if oldlog:
187 date = oldlog[-1].date # last commit date as a (time,tz) tuple
187 date = oldlog[-1].date # last commit date as a (time,tz) tuple
188 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
188 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
189
189
190 # build the CVS commandline
190 # build the CVS commandline
191 cmd = ['cvs', '-q']
191 cmd = ['cvs', '-q']
192 if root:
192 if root:
193 cmd.append('-d%s' % root)
193 cmd.append('-d%s' % root)
194 p = util.normpath(getrepopath(root))
194 p = util.normpath(getrepopath(root))
195 if not p.endswith('/'):
195 if not p.endswith('/'):
196 p += '/'
196 p += '/'
197 if prefix:
197 if prefix:
198 # looks like normpath replaces "" by "."
198 # looks like normpath replaces "" by "."
199 prefix = p + util.normpath(prefix)
199 prefix = p + util.normpath(prefix)
200 else:
200 else:
201 prefix = p
201 prefix = p
202 cmd.append(['log', 'rlog'][rlog])
202 cmd.append(['log', 'rlog'][rlog])
203 if date:
203 if date:
204 # no space between option and date string
204 # no space between option and date string
205 cmd.append('-d>%s' % date)
205 cmd.append('-d>%s' % date)
206 cmd.append(directory)
206 cmd.append(directory)
207
207
208 # state machine begins here
208 # state machine begins here
209 tags = {} # dictionary of revisions on current file with their tags
209 tags = {} # dictionary of revisions on current file with their tags
210 branchmap = {} # mapping between branch names and revision numbers
210 branchmap = {} # mapping between branch names and revision numbers
211 state = 0
211 state = 0
212 store = False # set when a new record can be appended
212 store = False # set when a new record can be appended
213
213
214 cmd = [util.shellquote(arg) for arg in cmd]
214 cmd = [util.shellquote(arg) for arg in cmd]
215 ui.note(_("running %s\n") % (' '.join(cmd)))
215 ui.note(_("running %s\n") % (' '.join(cmd)))
216 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
216 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
217
217
218 pfp = util.popen(' '.join(cmd))
218 pfp = util.popen(' '.join(cmd))
219 peek = pfp.readline()
219 peek = pfp.readline()
220 while True:
220 while True:
221 line = peek
221 line = peek
222 if line == '':
222 if line == '':
223 break
223 break
224 peek = pfp.readline()
224 peek = pfp.readline()
225 if line.endswith('\n'):
225 if line.endswith('\n'):
226 line = line[:-1]
226 line = line[:-1]
227 #ui.debug('state=%d line=%r\n' % (state, line))
227 #ui.debug('state=%d line=%r\n' % (state, line))
228
228
229 if state == 0:
229 if state == 0:
230 # initial state, consume input until we see 'RCS file'
230 # initial state, consume input until we see 'RCS file'
231 match = re_00.match(line)
231 match = re_00.match(line)
232 if match:
232 if match:
233 rcs = match.group(1)
233 rcs = match.group(1)
234 tags = {}
234 tags = {}
235 if rlog:
235 if rlog:
236 filename = util.normpath(rcs[:-2])
236 filename = util.normpath(rcs[:-2])
237 if filename.startswith(prefix):
237 if filename.startswith(prefix):
238 filename = filename[len(prefix):]
238 filename = filename[len(prefix):]
239 if filename.startswith('/'):
239 if filename.startswith('/'):
240 filename = filename[1:]
240 filename = filename[1:]
241 if filename.startswith('Attic/'):
241 if filename.startswith('Attic/'):
242 filename = filename[6:]
242 filename = filename[6:]
243 else:
243 else:
244 filename = filename.replace('/Attic/', '/')
244 filename = filename.replace('/Attic/', '/')
245 state = 2
245 state = 2
246 continue
246 continue
247 state = 1
247 state = 1
248 continue
248 continue
249 match = re_01.match(line)
249 match = re_01.match(line)
250 if match:
250 if match:
251 raise logerror(match.group(1))
251 raise logerror(match.group(1))
252 match = re_02.match(line)
252 match = re_02.match(line)
253 if match:
253 if match:
254 raise logerror(match.group(2))
254 raise logerror(match.group(2))
255 if re_03.match(line):
255 if re_03.match(line):
256 raise logerror(line)
256 raise logerror(line)
257
257
258 elif state == 1:
258 elif state == 1:
259 # expect 'Working file' (only when using log instead of rlog)
259 # expect 'Working file' (only when using log instead of rlog)
260 match = re_10.match(line)
260 match = re_10.match(line)
261 assert match, _('RCS file must be followed by working file')
261 assert match, _('RCS file must be followed by working file')
262 filename = util.normpath(match.group(1))
262 filename = util.normpath(match.group(1))
263 state = 2
263 state = 2
264
264
265 elif state == 2:
265 elif state == 2:
266 # expect 'symbolic names'
266 # expect 'symbolic names'
267 if re_20.match(line):
267 if re_20.match(line):
268 branchmap = {}
268 branchmap = {}
269 state = 3
269 state = 3
270
270
271 elif state == 3:
271 elif state == 3:
272 # read the symbolic names and store as tags
272 # read the symbolic names and store as tags
273 match = re_30.match(line)
273 match = re_30.match(line)
274 if match:
274 if match:
275 rev = [int(x) for x in match.group(2).split('.')]
275 rev = [int(x) for x in match.group(2).split('.')]
276
276
277 # Convert magic branch number to an odd-numbered one
277 # Convert magic branch number to an odd-numbered one
278 revn = len(rev)
278 revn = len(rev)
279 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
279 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
280 rev = rev[:-2] + rev[-1:]
280 rev = rev[:-2] + rev[-1:]
281 rev = tuple(rev)
281 rev = tuple(rev)
282
282
283 if rev not in tags:
283 if rev not in tags:
284 tags[rev] = []
284 tags[rev] = []
285 tags[rev].append(match.group(1))
285 tags[rev].append(match.group(1))
286 branchmap[match.group(1)] = match.group(2)
286 branchmap[match.group(1)] = match.group(2)
287
287
288 elif re_31.match(line):
288 elif re_31.match(line):
289 state = 5
289 state = 5
290 elif re_32.match(line):
290 elif re_32.match(line):
291 state = 0
291 state = 0
292
292
293 elif state == 4:
293 elif state == 4:
294 # expecting '------' separator before first revision
294 # expecting '------' separator before first revision
295 if re_31.match(line):
295 if re_31.match(line):
296 state = 5
296 state = 5
297 else:
297 else:
298 assert not re_32.match(line), _('must have at least '
298 assert not re_32.match(line), _('must have at least '
299 'some revisions')
299 'some revisions')
300
300
301 elif state == 5:
301 elif state == 5:
302 # expecting revision number and possibly (ignored) lock indication
302 # expecting revision number and possibly (ignored) lock indication
303 # we create the logentry here from values stored in states 0 to 4,
303 # we create the logentry here from values stored in states 0 to 4,
304 # as this state is re-entered for subsequent revisions of a file.
304 # as this state is re-entered for subsequent revisions of a file.
305 match = re_50.match(line)
305 match = re_50.match(line)
306 assert match, _('expected revision number')
306 assert match, _('expected revision number')
307 e = logentry(rcs=scache(rcs),
307 e = logentry(rcs=scache(rcs),
308 file=scache(filename),
308 file=scache(filename),
309 revision=tuple([int(x) for x in
309 revision=tuple([int(x) for x in
310 match.group(1).split('.')]),
310 match.group(1).split('.')]),
311 branches=[],
311 branches=[],
312 parent=None,
312 parent=None,
313 commitid=None,
313 commitid=None,
314 mergepoint=None,
314 mergepoint=None,
315 branchpoints=set())
315 branchpoints=set())
316
316
317 state = 6
317 state = 6
318
318
319 elif state == 6:
319 elif state == 6:
320 # expecting date, author, state, lines changed
320 # expecting date, author, state, lines changed
321 match = re_60.match(line)
321 match = re_60.match(line)
322 assert match, _('revision must be followed by date line')
322 assert match, _('revision must be followed by date line')
323 d = match.group(1)
323 d = match.group(1)
324 if d[2] == '/':
324 if d[2] == '/':
325 # Y2K
325 # Y2K
326 d = '19' + d
326 d = '19' + d
327
327
328 if len(d.split()) != 3:
328 if len(d.split()) != 3:
329 # cvs log dates always in GMT
329 # cvs log dates always in GMT
330 d = d + ' UTC'
330 d = d + ' UTC'
331 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
331 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
332 '%Y/%m/%d %H:%M:%S',
332 '%Y/%m/%d %H:%M:%S',
333 '%Y-%m-%d %H:%M:%S'])
333 '%Y-%m-%d %H:%M:%S'])
334 e.author = scache(match.group(2))
334 e.author = scache(match.group(2))
335 e.dead = match.group(3).lower() == 'dead'
335 e.dead = match.group(3).lower() == 'dead'
336
336
337 if match.group(5):
337 if match.group(5):
338 if match.group(6):
338 if match.group(6):
339 e.lines = (int(match.group(5)), int(match.group(6)))
339 e.lines = (int(match.group(5)), int(match.group(6)))
340 else:
340 else:
341 e.lines = (int(match.group(5)), 0)
341 e.lines = (int(match.group(5)), 0)
342 elif match.group(6):
342 elif match.group(6):
343 e.lines = (0, int(match.group(6)))
343 e.lines = (0, int(match.group(6)))
344 else:
344 else:
345 e.lines = None
345 e.lines = None
346
346
347 if match.group(7): # cvs 1.12 commitid
347 if match.group(7): # cvs 1.12 commitid
348 e.commitid = match.group(8)
348 e.commitid = match.group(8)
349
349
350 if match.group(9): # cvsnt mergepoint
350 if match.group(9): # cvsnt mergepoint
351 myrev = match.group(10).split('.')
351 myrev = match.group(10).split('.')
352 if len(myrev) == 2: # head
352 if len(myrev) == 2: # head
353 e.mergepoint = 'HEAD'
353 e.mergepoint = 'HEAD'
354 else:
354 else:
355 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
355 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
356 branches = [b for b in branchmap if branchmap[b] == myrev]
356 branches = [b for b in branchmap if branchmap[b] == myrev]
357 assert len(branches) == 1, ('unknown branch: %s'
357 assert len(branches) == 1, ('unknown branch: %s'
358 % e.mergepoint)
358 % e.mergepoint)
359 e.mergepoint = branches[0]
359 e.mergepoint = branches[0]
360
360
361 e.comment = []
361 e.comment = []
362 state = 7
362 state = 7
363
363
364 elif state == 7:
364 elif state == 7:
365 # read the revision numbers of branches that start at this revision
365 # read the revision numbers of branches that start at this revision
366 # or store the commit log message otherwise
366 # or store the commit log message otherwise
367 m = re_70.match(line)
367 m = re_70.match(line)
368 if m:
368 if m:
369 e.branches = [tuple([int(y) for y in x.strip().split('.')])
369 e.branches = [tuple([int(y) for y in x.strip().split('.')])
370 for x in m.group(1).split(';')]
370 for x in m.group(1).split(';')]
371 state = 8
371 state = 8
372 elif re_31.match(line) and re_50.match(peek):
372 elif re_31.match(line) and re_50.match(peek):
373 state = 5
373 state = 5
374 store = True
374 store = True
375 elif re_32.match(line):
375 elif re_32.match(line):
376 state = 0
376 state = 0
377 store = True
377 store = True
378 else:
378 else:
379 e.comment.append(line)
379 e.comment.append(line)
380
380
381 elif state == 8:
381 elif state == 8:
382 # store commit log message
382 # store commit log message
383 if re_31.match(line):
383 if re_31.match(line):
384 cpeek = peek
384 cpeek = peek
385 if cpeek.endswith('\n'):
385 if cpeek.endswith('\n'):
386 cpeek = cpeek[:-1]
386 cpeek = cpeek[:-1]
387 if re_50.match(cpeek):
387 if re_50.match(cpeek):
388 state = 5
388 state = 5
389 store = True
389 store = True
390 else:
390 else:
391 e.comment.append(line)
391 e.comment.append(line)
392 elif re_32.match(line):
392 elif re_32.match(line):
393 state = 0
393 state = 0
394 store = True
394 store = True
395 else:
395 else:
396 e.comment.append(line)
396 e.comment.append(line)
397
397
398 # When a file is added on a branch B1, CVS creates a synthetic
398 # When a file is added on a branch B1, CVS creates a synthetic
399 # dead trunk revision 1.1 so that the branch has a root.
399 # dead trunk revision 1.1 so that the branch has a root.
400 # Likewise, if you merge such a file to a later branch B2 (one
400 # Likewise, if you merge such a file to a later branch B2 (one
401 # that already existed when the file was added on B1), CVS
401 # that already existed when the file was added on B1), CVS
402 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
402 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
403 # these revisions now, but mark them synthetic so
403 # these revisions now, but mark them synthetic so
404 # createchangeset() can take care of them.
404 # createchangeset() can take care of them.
405 if (store and
405 if (store and
406 e.dead and
406 e.dead and
407 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
407 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
408 len(e.comment) == 1 and
408 len(e.comment) == 1 and
409 file_added_re.match(e.comment[0])):
409 file_added_re.match(e.comment[0])):
410 ui.debug('found synthetic revision in %s: %r\n'
410 ui.debug('found synthetic revision in %s: %r\n'
411 % (e.rcs, e.comment[0]))
411 % (e.rcs, e.comment[0]))
412 e.synthetic = True
412 e.synthetic = True
413
413
414 if store:
414 if store:
415 # clean up the results and save in the log.
415 # clean up the results and save in the log.
416 store = False
416 store = False
417 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
417 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
418 e.comment = scache('\n'.join(e.comment))
418 e.comment = scache('\n'.join(e.comment))
419
419
420 revn = len(e.revision)
420 revn = len(e.revision)
421 if revn > 3 and (revn % 2) == 0:
421 if revn > 3 and (revn % 2) == 0:
422 e.branch = tags.get(e.revision[:-1], [None])[0]
422 e.branch = tags.get(e.revision[:-1], [None])[0]
423 else:
423 else:
424 e.branch = None
424 e.branch = None
425
425
426 # find the branches starting from this revision
426 # find the branches starting from this revision
427 branchpoints = set()
427 branchpoints = set()
428 for branch, revision in branchmap.iteritems():
428 for branch, revision in branchmap.iteritems():
429 revparts = tuple([int(i) for i in revision.split('.')])
429 revparts = tuple([int(i) for i in revision.split('.')])
430 if len(revparts) < 2: # bad tags
430 if len(revparts) < 2: # bad tags
431 continue
431 continue
432 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
432 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
433 # normal branch
433 # normal branch
434 if revparts[:-2] == e.revision:
434 if revparts[:-2] == e.revision:
435 branchpoints.add(branch)
435 branchpoints.add(branch)
436 elif revparts == (1, 1, 1): # vendor branch
436 elif revparts == (1, 1, 1): # vendor branch
437 if revparts in e.branches:
437 if revparts in e.branches:
438 branchpoints.add(branch)
438 branchpoints.add(branch)
439 e.branchpoints = branchpoints
439 e.branchpoints = branchpoints
440
440
441 log.append(e)
441 log.append(e)
442
442
443 if len(log) % 100 == 0:
443 if len(log) % 100 == 0:
444 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
444 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
445
445
446 log.sort(key=lambda x: (x.rcs, x.revision))
446 log.sort(key=lambda x: (x.rcs, x.revision))
447
447
448 # find parent revisions of individual files
448 # find parent revisions of individual files
449 versions = {}
449 versions = {}
450 for e in log:
450 for e in log:
451 branch = e.revision[:-1]
451 branch = e.revision[:-1]
452 p = versions.get((e.rcs, branch), None)
452 p = versions.get((e.rcs, branch), None)
453 if p is None:
453 if p is None:
454 p = e.revision[:-2]
454 p = e.revision[:-2]
455 e.parent = p
455 e.parent = p
456 versions[(e.rcs, branch)] = e.revision
456 versions[(e.rcs, branch)] = e.revision
457
457
458 # update the log cache
458 # update the log cache
459 if cache:
459 if cache:
460 if log:
460 if log:
461 # join up the old and new logs
461 # join up the old and new logs
462 log.sort(key=lambda x: x.date)
462 log.sort(key=lambda x: x.date)
463
463
464 if oldlog and oldlog[-1].date >= log[0].date:
464 if oldlog and oldlog[-1].date >= log[0].date:
465 raise logerror(_('log cache overlaps with new log entries,'
465 raise logerror(_('log cache overlaps with new log entries,'
466 ' re-run without cache.'))
466 ' re-run without cache.'))
467
467
468 log = oldlog + log
468 log = oldlog + log
469
469
470 # write the new cachefile
470 # write the new cachefile
471 ui.note(_('writing cvs log cache %s\n') % cachefile)
471 ui.note(_('writing cvs log cache %s\n') % cachefile)
472 pickle.dump(log, open(cachefile, 'w'))
472 pickle.dump(log, open(cachefile, 'w'))
473 else:
473 else:
474 log = oldlog
474 log = oldlog
475
475
476 ui.status(_('%d log entries\n') % len(log))
476 ui.status(_('%d log entries\n') % len(log))
477
477
478 hook.hook(ui, None, "cvslog", True, log=log)
478 hook.hook(ui, None, "cvslog", True, log=log)
479
479
480 return log
480 return log
481
481
482
482
483 class changeset(object):
483 class changeset(object):
484 '''Class changeset has the following attributes:
484 '''Class changeset has the following attributes:
485 .id - integer identifying this changeset (list index)
485 .id - integer identifying this changeset (list index)
486 .author - author name as CVS knows it
486 .author - author name as CVS knows it
487 .branch - name of branch this changeset is on, or None
487 .branch - name of branch this changeset is on, or None
488 .comment - commit message
488 .comment - commit message
489 .commitid - CVS commitid or None
489 .commitid - CVS commitid or None
490 .date - the commit date as a (time,tz) tuple
490 .date - the commit date as a (time,tz) tuple
491 .entries - list of logentry objects in this changeset
491 .entries - list of logentry objects in this changeset
492 .parents - list of one or two parent changesets
492 .parents - list of one or two parent changesets
493 .tags - list of tags on this changeset
493 .tags - list of tags on this changeset
494 .synthetic - from synthetic revision "file ... added on branch ..."
494 .synthetic - from synthetic revision "file ... added on branch ..."
495 .mergepoint- the branch that has been merged from or None
495 .mergepoint- the branch that has been merged from or None
496 .branchpoints- the branches that start at the current entry or empty
496 .branchpoints- the branches that start at the current entry or empty
497 '''
497 '''
498 def __init__(self, **entries):
498 def __init__(self, **entries):
499 self.synthetic = False
499 self.synthetic = False
500 self.__dict__.update(entries)
500 self.__dict__.update(entries)
501
501
502 def __repr__(self):
502 def __repr__(self):
503 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
503 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
504 return "%s(%s)"%(type(self).__name__, ", ".join(items))
504 return "%s(%s)"%(type(self).__name__, ", ".join(items))
505
505
506 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
506 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
507 '''Convert log into changesets.'''
507 '''Convert log into changesets.'''
508
508
509 ui.status(_('creating changesets\n'))
509 ui.status(_('creating changesets\n'))
510
510
511 # try to order commitids by date
511 # try to order commitids by date
512 mindate = {}
512 mindate = {}
513 for e in log:
513 for e in log:
514 if e.commitid:
514 if e.commitid:
515 mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
515 mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
516
516
517 # Merge changesets
517 # Merge changesets
518 log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
518 log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
519 x.author, x.branch, x.date, x.branchpoints))
519 x.author, x.branch, x.date, x.branchpoints))
520
520
521 changesets = []
521 changesets = []
522 files = set()
522 files = set()
523 c = None
523 c = None
524 for i, e in enumerate(log):
524 for i, e in enumerate(log):
525
525
526 # Check if log entry belongs to the current changeset or not.
526 # Check if log entry belongs to the current changeset or not.
527
527
528 # Since CVS is file-centric, two different file revisions with
528 # Since CVS is file-centric, two different file revisions with
529 # different branchpoints should be treated as belonging to two
529 # different branchpoints should be treated as belonging to two
530 # different changesets (and the ordering is important and not
530 # different changesets (and the ordering is important and not
531 # honoured by cvsps at this point).
531 # honoured by cvsps at this point).
532 #
532 #
533 # Consider the following case:
533 # Consider the following case:
534 # foo 1.1 branchpoints: [MYBRANCH]
534 # foo 1.1 branchpoints: [MYBRANCH]
535 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
535 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
536 #
536 #
537 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
537 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
538 # later version of foo may be in MYBRANCH2, so foo should be the
538 # later version of foo may be in MYBRANCH2, so foo should be the
539 # first changeset and bar the next and MYBRANCH and MYBRANCH2
539 # first changeset and bar the next and MYBRANCH and MYBRANCH2
540 # should both start off of the bar changeset. No provisions are
540 # should both start off of the bar changeset. No provisions are
541 # made to ensure that this is, in fact, what happens.
541 # made to ensure that this is, in fact, what happens.
542 if not (c and e.branchpoints == c.branchpoints and
542 if not (c and e.branchpoints == c.branchpoints and
543 (# cvs commitids
543 (# cvs commitids
544 (e.commitid is not None and e.commitid == c.commitid) or
544 (e.commitid is not None and e.commitid == c.commitid) or
545 (# no commitids, use fuzzy commit detection
545 (# no commitids, use fuzzy commit detection
546 (e.commitid is None or c.commitid is None) and
546 (e.commitid is None or c.commitid is None) and
547 e.comment == c.comment and
547 e.comment == c.comment and
548 e.author == c.author and
548 e.author == c.author and
549 e.branch == c.branch and
549 e.branch == c.branch and
550 ((c.date[0] + c.date[1]) <=
550 ((c.date[0] + c.date[1]) <=
551 (e.date[0] + e.date[1]) <=
551 (e.date[0] + e.date[1]) <=
552 (c.date[0] + c.date[1]) + fuzz) and
552 (c.date[0] + c.date[1]) + fuzz) and
553 e.file not in files))):
553 e.file not in files))):
554 c = changeset(comment=e.comment, author=e.author,
554 c = changeset(comment=e.comment, author=e.author,
555 branch=e.branch, date=e.date,
555 branch=e.branch, date=e.date,
556 entries=[], mergepoint=e.mergepoint,
556 entries=[], mergepoint=e.mergepoint,
557 branchpoints=e.branchpoints, commitid=e.commitid)
557 branchpoints=e.branchpoints, commitid=e.commitid)
558 changesets.append(c)
558 changesets.append(c)
559
559
560 files = set()
560 files = set()
561 if len(changesets) % 100 == 0:
561 if len(changesets) % 100 == 0:
562 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
562 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
563 ui.status(util.ellipsis(t, 80) + '\n')
563 ui.status(util.ellipsis(t, 80) + '\n')
564
564
565 c.entries.append(e)
565 c.entries.append(e)
566 files.add(e.file)
566 files.add(e.file)
567 c.date = e.date # changeset date is date of latest commit in it
567 c.date = e.date # changeset date is date of latest commit in it
568
568
569 # Mark synthetic changesets
569 # Mark synthetic changesets
570
570
571 for c in changesets:
571 for c in changesets:
572 # Synthetic revisions always get their own changeset, because
572 # Synthetic revisions always get their own changeset, because
573 # the log message includes the filename. E.g. if you add file3
573 # the log message includes the filename. E.g. if you add file3
574 # and file4 on a branch, you get four log entries and three
574 # and file4 on a branch, you get four log entries and three
575 # changesets:
575 # changesets:
576 # "File file3 was added on branch ..." (synthetic, 1 entry)
576 # "File file3 was added on branch ..." (synthetic, 1 entry)
577 # "File file4 was added on branch ..." (synthetic, 1 entry)
577 # "File file4 was added on branch ..." (synthetic, 1 entry)
578 # "Add file3 and file4 to fix ..." (real, 2 entries)
578 # "Add file3 and file4 to fix ..." (real, 2 entries)
579 # Hence the check for 1 entry here.
579 # Hence the check for 1 entry here.
580 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
580 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
581
581
582 # Sort files in each changeset
582 # Sort files in each changeset
583
583
584 def entitycompare(l, r):
584 def entitycompare(l, r):
585 'Mimic cvsps sorting order'
585 'Mimic cvsps sorting order'
586 l = l.file.split('/')
586 l = l.file.split('/')
587 r = r.file.split('/')
587 r = r.file.split('/')
588 nl = len(l)
588 nl = len(l)
589 nr = len(r)
589 nr = len(r)
590 n = min(nl, nr)
590 n = min(nl, nr)
591 for i in range(n):
591 for i in range(n):
592 if i + 1 == nl and nl < nr:
592 if i + 1 == nl and nl < nr:
593 return -1
593 return -1
594 elif i + 1 == nr and nl > nr:
594 elif i + 1 == nr and nl > nr:
595 return +1
595 return +1
596 elif l[i] < r[i]:
596 elif l[i] < r[i]:
597 return -1
597 return -1
598 elif l[i] > r[i]:
598 elif l[i] > r[i]:
599 return +1
599 return +1
600 return 0
600 return 0
601
601
602 for c in changesets:
602 for c in changesets:
603 c.entries.sort(entitycompare)
603 c.entries.sort(entitycompare)
604
604
605 # Sort changesets by date
605 # Sort changesets by date
606
606
607 def cscmp(l, r):
607 def cscmp(l, r):
608 d = sum(l.date) - sum(r.date)
608 d = sum(l.date) - sum(r.date)
609 if d:
609 if d:
610 return d
610 return d
611
611
612 # detect vendor branches and initial commits on a branch
612 # detect vendor branches and initial commits on a branch
613 le = {}
613 le = {}
614 for e in l.entries:
614 for e in l.entries:
615 le[e.rcs] = e.revision
615 le[e.rcs] = e.revision
616 re = {}
616 re = {}
617 for e in r.entries:
617 for e in r.entries:
618 re[e.rcs] = e.revision
618 re[e.rcs] = e.revision
619
619
620 d = 0
620 d = 0
621 for e in l.entries:
621 for e in l.entries:
622 if re.get(e.rcs, None) == e.parent:
622 if re.get(e.rcs, None) == e.parent:
623 assert not d
623 assert not d
624 d = 1
624 d = 1
625 break
625 break
626
626
627 for e in r.entries:
627 for e in r.entries:
628 if le.get(e.rcs, None) == e.parent:
628 if le.get(e.rcs, None) == e.parent:
629 assert not d
629 assert not d
630 d = -1
630 d = -1
631 break
631 break
632
632
633 return d
633 return d
634
634
635 changesets.sort(cscmp)
635 changesets.sort(cscmp)
636
636
637 # Collect tags
637 # Collect tags
638
638
639 globaltags = {}
639 globaltags = {}
640 for c in changesets:
640 for c in changesets:
641 for e in c.entries:
641 for e in c.entries:
642 for tag in e.tags:
642 for tag in e.tags:
643 # remember which is the latest changeset to have this tag
643 # remember which is the latest changeset to have this tag
644 globaltags[tag] = c
644 globaltags[tag] = c
645
645
646 for c in changesets:
646 for c in changesets:
647 tags = set()
647 tags = set()
648 for e in c.entries:
648 for e in c.entries:
649 tags.update(e.tags)
649 tags.update(e.tags)
650 # remember tags only if this is the latest changeset to have it
650 # remember tags only if this is the latest changeset to have it
651 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
651 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
652
652
653 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
653 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
654 # by inserting dummy changesets with two parents, and handle
654 # by inserting dummy changesets with two parents, and handle
655 # {{mergefrombranch BRANCHNAME}} by setting two parents.
655 # {{mergefrombranch BRANCHNAME}} by setting two parents.
656
656
657 if mergeto is None:
657 if mergeto is None:
658 mergeto = r'{{mergetobranch ([-\w]+)}}'
658 mergeto = r'{{mergetobranch ([-\w]+)}}'
659 if mergeto:
659 if mergeto:
660 mergeto = re.compile(mergeto)
660 mergeto = re.compile(mergeto)
661
661
662 if mergefrom is None:
662 if mergefrom is None:
663 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
663 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
664 if mergefrom:
664 if mergefrom:
665 mergefrom = re.compile(mergefrom)
665 mergefrom = re.compile(mergefrom)
666
666
667 versions = {} # changeset index where we saw any particular file version
667 versions = {} # changeset index where we saw any particular file version
668 branches = {} # changeset index where we saw a branch
668 branches = {} # changeset index where we saw a branch
669 n = len(changesets)
669 n = len(changesets)
670 i = 0
670 i = 0
671 while i < n:
671 while i < n:
672 c = changesets[i]
672 c = changesets[i]
673
673
674 for f in c.entries:
674 for f in c.entries:
675 versions[(f.rcs, f.revision)] = i
675 versions[(f.rcs, f.revision)] = i
676
676
677 p = None
677 p = None
678 if c.branch in branches:
678 if c.branch in branches:
679 p = branches[c.branch]
679 p = branches[c.branch]
680 else:
680 else:
681 # first changeset on a new branch
681 # first changeset on a new branch
682 # the parent is a changeset with the branch in its
682 # the parent is a changeset with the branch in its
683 # branchpoints such that it is the latest possible
683 # branchpoints such that it is the latest possible
684 # commit without any intervening, unrelated commits.
684 # commit without any intervening, unrelated commits.
685
685
686 for candidate in xrange(i):
686 for candidate in xrange(i):
687 if c.branch not in changesets[candidate].branchpoints:
687 if c.branch not in changesets[candidate].branchpoints:
688 if p is not None:
688 if p is not None:
689 break
689 break
690 continue
690 continue
691 p = candidate
691 p = candidate
692
692
693 c.parents = []
693 c.parents = []
694 if p is not None:
694 if p is not None:
695 p = changesets[p]
695 p = changesets[p]
696
696
697 # Ensure no changeset has a synthetic changeset as a parent.
697 # Ensure no changeset has a synthetic changeset as a parent.
698 while p.synthetic:
698 while p.synthetic:
699 assert len(p.parents) <= 1, \
699 assert len(p.parents) <= 1, \
700 _('synthetic changeset cannot have multiple parents')
700 _('synthetic changeset cannot have multiple parents')
701 if p.parents:
701 if p.parents:
702 p = p.parents[0]
702 p = p.parents[0]
703 else:
703 else:
704 p = None
704 p = None
705 break
705 break
706
706
707 if p is not None:
707 if p is not None:
708 c.parents.append(p)
708 c.parents.append(p)
709
709
710 if c.mergepoint:
710 if c.mergepoint:
711 if c.mergepoint == 'HEAD':
711 if c.mergepoint == 'HEAD':
712 c.mergepoint = None
712 c.mergepoint = None
713 c.parents.append(changesets[branches[c.mergepoint]])
713 c.parents.append(changesets[branches[c.mergepoint]])
714
714
715 if mergefrom:
715 if mergefrom:
716 m = mergefrom.search(c.comment)
716 m = mergefrom.search(c.comment)
717 if m:
717 if m:
718 m = m.group(1)
718 m = m.group(1)
719 if m == 'HEAD':
719 if m == 'HEAD':
720 m = None
720 m = None
721 try:
721 try:
722 candidate = changesets[branches[m]]
722 candidate = changesets[branches[m]]
723 except KeyError:
723 except KeyError:
724 ui.warn(_("warning: CVS commit message references "
724 ui.warn(_("warning: CVS commit message references "
725 "non-existent branch %r:\n%s\n")
725 "non-existent branch %r:\n%s\n")
726 % (m, c.comment))
726 % (m, c.comment))
727 if m in branches and c.branch != m and not candidate.synthetic:
727 if m in branches and c.branch != m and not candidate.synthetic:
728 c.parents.append(candidate)
728 c.parents.append(candidate)
729
729
730 if mergeto:
730 if mergeto:
731 m = mergeto.search(c.comment)
731 m = mergeto.search(c.comment)
732 if m:
732 if m:
733 if m.groups():
733 if m.groups():
734 m = m.group(1)
734 m = m.group(1)
735 if m == 'HEAD':
735 if m == 'HEAD':
736 m = None
736 m = None
737 else:
737 else:
738 m = None # if no group found then merge to HEAD
738 m = None # if no group found then merge to HEAD
739 if m in branches and c.branch != m:
739 if m in branches and c.branch != m:
740 # insert empty changeset for merge
740 # insert empty changeset for merge
741 cc = changeset(
741 cc = changeset(
742 author=c.author, branch=m, date=c.date,
742 author=c.author, branch=m, date=c.date,
743 comment='convert-repo: CVS merge from branch %s'
743 comment='convert-repo: CVS merge from branch %s'
744 % c.branch,
744 % c.branch,
745 entries=[], tags=[],
745 entries=[], tags=[],
746 parents=[changesets[branches[m]], c])
746 parents=[changesets[branches[m]], c])
747 changesets.insert(i + 1, cc)
747 changesets.insert(i + 1, cc)
748 branches[m] = i + 1
748 branches[m] = i + 1
749
749
750 # adjust our loop counters now we have inserted a new entry
750 # adjust our loop counters now we have inserted a new entry
751 n += 1
751 n += 1
752 i += 2
752 i += 2
753 continue
753 continue
754
754
755 branches[c.branch] = i
755 branches[c.branch] = i
756 i += 1
756 i += 1
757
757
758 # Drop synthetic changesets (safe now that we have ensured no other
758 # Drop synthetic changesets (safe now that we have ensured no other
759 # changesets can have them as parents).
759 # changesets can have them as parents).
760 i = 0
760 i = 0
761 while i < len(changesets):
761 while i < len(changesets):
762 if changesets[i].synthetic:
762 if changesets[i].synthetic:
763 del changesets[i]
763 del changesets[i]
764 else:
764 else:
765 i += 1
765 i += 1
766
766
767 # Number changesets
767 # Number changesets
768
768
769 for i, c in enumerate(changesets):
769 for i, c in enumerate(changesets):
770 c.id = i + 1
770 c.id = i + 1
771
771
772 ui.status(_('%d changeset entries\n') % len(changesets))
772 ui.status(_('%d changeset entries\n') % len(changesets))
773
773
774 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
774 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
775
775
776 return changesets
776 return changesets
777
777
778
778
779 def debugcvsps(ui, *args, **opts):
779 def debugcvsps(ui, *args, **opts):
780 '''Read CVS rlog for current directory or named path in
780 '''Read CVS rlog for current directory or named path in
781 repository, and convert the log to changesets based on matching
781 repository, and convert the log to changesets based on matching
782 commit log entries and dates.
782 commit log entries and dates.
783 '''
783 '''
784 if opts["new_cache"]:
784 if opts["new_cache"]:
785 cache = "write"
785 cache = "write"
786 elif opts["update_cache"]:
786 elif opts["update_cache"]:
787 cache = "update"
787 cache = "update"
788 else:
788 else:
789 cache = None
789 cache = None
790
790
791 revisions = opts["revisions"]
791 revisions = opts["revisions"]
792
792
793 try:
793 try:
794 if args:
794 if args:
795 log = []
795 log = []
796 for d in args:
796 for d in args:
797 log += createlog(ui, d, root=opts["root"], cache=cache)
797 log += createlog(ui, d, root=opts["root"], cache=cache)
798 else:
798 else:
799 log = createlog(ui, root=opts["root"], cache=cache)
799 log = createlog(ui, root=opts["root"], cache=cache)
800 except logerror, e:
800 except logerror, e:
801 ui.write("%r\n"%e)
801 ui.write("%r\n"%e)
802 return
802 return
803
803
804 changesets = createchangeset(ui, log, opts["fuzz"])
804 changesets = createchangeset(ui, log, opts["fuzz"])
805 del log
805 del log
806
806
807 # Print changesets (optionally filtered)
807 # Print changesets (optionally filtered)
808
808
809 off = len(revisions)
809 off = len(revisions)
810 branches = {} # latest version number in each branch
810 branches = {} # latest version number in each branch
811 ancestors = {} # parent branch
811 ancestors = {} # parent branch
812 for cs in changesets:
812 for cs in changesets:
813
813
814 if opts["ancestors"]:
814 if opts["ancestors"]:
815 if cs.branch not in branches and cs.parents and cs.parents[0].id:
815 if cs.branch not in branches and cs.parents and cs.parents[0].id:
816 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
816 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
817 cs.parents[0].id)
817 cs.parents[0].id)
818 branches[cs.branch] = cs.id
818 branches[cs.branch] = cs.id
819
819
820 # limit by branches
820 # limit by branches
821 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
821 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
822 continue
822 continue
823
823
824 if not off:
824 if not off:
825 # Note: trailing spaces on several lines here are needed to have
825 # Note: trailing spaces on several lines here are needed to have
826 # bug-for-bug compatibility with cvsps.
826 # bug-for-bug compatibility with cvsps.
827 ui.write('---------------------\n')
827 ui.write('---------------------\n')
828 ui.write(('PatchSet %d \n' % cs.id))
828 ui.write(('PatchSet %d \n' % cs.id))
829 ui.write(('Date: %s\n' % util.datestr(cs.date,
829 ui.write(('Date: %s\n' % util.datestr(cs.date,
830 '%Y/%m/%d %H:%M:%S %1%2')))
830 '%Y/%m/%d %H:%M:%S %1%2')))
831 ui.write(('Author: %s\n' % cs.author))
831 ui.write(('Author: %s\n' % cs.author))
832 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
832 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
833 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
833 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
834 ','.join(cs.tags) or '(none)')))
834 ','.join(cs.tags) or '(none)')))
835 if cs.branchpoints:
835 if cs.branchpoints:
836 ui.write(('Branchpoints: %s \n') %
836 ui.write(('Branchpoints: %s \n') %
837 ', '.join(sorted(cs.branchpoints)))
837 ', '.join(sorted(cs.branchpoints)))
838 if opts["parents"] and cs.parents:
838 if opts["parents"] and cs.parents:
839 if len(cs.parents) > 1:
839 if len(cs.parents) > 1:
840 ui.write(('Parents: %s\n' %
840 ui.write(('Parents: %s\n' %
841 (','.join([str(p.id) for p in cs.parents]))))
841 (','.join([str(p.id) for p in cs.parents]))))
842 else:
842 else:
843 ui.write(('Parent: %d\n' % cs.parents[0].id))
843 ui.write(('Parent: %d\n' % cs.parents[0].id))
844
844
845 if opts["ancestors"]:
845 if opts["ancestors"]:
846 b = cs.branch
846 b = cs.branch
847 r = []
847 r = []
848 while b:
848 while b:
849 b, c = ancestors[b]
849 b, c = ancestors[b]
850 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
850 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
851 if r:
851 if r:
852 ui.write(('Ancestors: %s\n' % (','.join(r))))
852 ui.write(('Ancestors: %s\n' % (','.join(r))))
853
853
854 ui.write(('Log:\n'))
854 ui.write(('Log:\n'))
855 ui.write('%s\n\n' % cs.comment)
855 ui.write('%s\n\n' % cs.comment)
856 ui.write(('Members: \n'))
856 ui.write(('Members: \n'))
857 for f in cs.entries:
857 for f in cs.entries:
858 fn = f.file
858 fn = f.file
859 if fn.startswith(opts["prefix"]):
859 if fn.startswith(opts["prefix"]):
860 fn = fn[len(opts["prefix"]):]
860 fn = fn[len(opts["prefix"]):]
861 ui.write('\t%s:%s->%s%s \n' % (
861 ui.write('\t%s:%s->%s%s \n' % (
862 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
862 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
863 '.'.join([str(x) for x in f.revision]),
863 '.'.join([str(x) for x in f.revision]),
864 ['', '(DEAD)'][f.dead]))
864 ['', '(DEAD)'][f.dead]))
865 ui.write('\n')
865 ui.write('\n')
866
866
867 # have we seen the start tag?
867 # have we seen the start tag?
868 if revisions and off:
868 if revisions and off:
869 if revisions[0] == str(cs.id) or \
869 if revisions[0] == str(cs.id) or \
870 revisions[0] in cs.tags:
870 revisions[0] in cs.tags:
871 off = False
871 off = False
872
872
873 # see if we reached the end tag
873 # see if we reached the end tag
874 if len(revisions) > 1 and not off:
874 if len(revisions) > 1 and not off:
875 if revisions[1] == str(cs.id) or \
875 if revisions[1] == str(cs.id) or \
876 revisions[1] in cs.tags:
876 revisions[1] in cs.tags:
877 break
877 break
General Comments 0
You need to be logged in to leave comments. Login now