##// END OF EJS Templates
cvs: skip bad tags...
Brandon Parsons -
r10950:278d4570 stable
parent child Browse files
Show More
@@ -1,845 +1,847 b''
1 # Mercurial built-in replacement for cvsps.
1 # Mercurial built-in replacement for cvsps.
2 #
2 #
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 import re
9 import re
10 import cPickle as pickle
10 import cPickle as pickle
11 from mercurial import util
11 from mercurial import util
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import hook
13 from mercurial import hook
14
14
15 class logentry(object):
15 class logentry(object):
16 '''Class logentry has the following attributes:
16 '''Class logentry has the following attributes:
17 .author - author name as CVS knows it
17 .author - author name as CVS knows it
18 .branch - name of branch this revision is on
18 .branch - name of branch this revision is on
19 .branches - revision tuple of branches starting at this revision
19 .branches - revision tuple of branches starting at this revision
20 .comment - commit message
20 .comment - commit message
21 .date - the commit date as a (time, tz) tuple
21 .date - the commit date as a (time, tz) tuple
22 .dead - true if file revision is dead
22 .dead - true if file revision is dead
23 .file - Name of file
23 .file - Name of file
24 .lines - a tuple (+lines, -lines) or None
24 .lines - a tuple (+lines, -lines) or None
25 .parent - Previous revision of this entry
25 .parent - Previous revision of this entry
26 .rcs - name of file as returned from CVS
26 .rcs - name of file as returned from CVS
27 .revision - revision number as tuple
27 .revision - revision number as tuple
28 .tags - list of tags on the file
28 .tags - list of tags on the file
29 .synthetic - is this a synthetic "file ... added on ..." revision?
29 .synthetic - is this a synthetic "file ... added on ..." revision?
30 .mergepoint- the branch that has been merged from
30 .mergepoint- the branch that has been merged from
31 (if present in rlog output)
31 (if present in rlog output)
32 .branchpoints- the branches that start at the current entry
32 .branchpoints- the branches that start at the current entry
33 '''
33 '''
34 def __init__(self, **entries):
34 def __init__(self, **entries):
35 self.synthetic = False
35 self.synthetic = False
36 self.__dict__.update(entries)
36 self.__dict__.update(entries)
37
37
38 def __repr__(self):
38 def __repr__(self):
39 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
39 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
40 id(self),
40 id(self),
41 self.file,
41 self.file,
42 ".".join(map(str, self.revision)))
42 ".".join(map(str, self.revision)))
43
43
44 class logerror(Exception):
44 class logerror(Exception):
45 pass
45 pass
46
46
47 def getrepopath(cvspath):
47 def getrepopath(cvspath):
48 """Return the repository path from a CVS path.
48 """Return the repository path from a CVS path.
49
49
50 >>> getrepopath('/foo/bar')
50 >>> getrepopath('/foo/bar')
51 '/foo/bar'
51 '/foo/bar'
52 >>> getrepopath('c:/foo/bar')
52 >>> getrepopath('c:/foo/bar')
53 'c:/foo/bar'
53 'c:/foo/bar'
54 >>> getrepopath(':pserver:10/foo/bar')
54 >>> getrepopath(':pserver:10/foo/bar')
55 '/foo/bar'
55 '/foo/bar'
56 >>> getrepopath(':pserver:10c:/foo/bar')
56 >>> getrepopath(':pserver:10c:/foo/bar')
57 '/foo/bar'
57 '/foo/bar'
58 >>> getrepopath(':pserver:/foo/bar')
58 >>> getrepopath(':pserver:/foo/bar')
59 '/foo/bar'
59 '/foo/bar'
60 >>> getrepopath(':pserver:c:/foo/bar')
60 >>> getrepopath(':pserver:c:/foo/bar')
61 'c:/foo/bar'
61 'c:/foo/bar'
62 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
62 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
63 '/foo/bar'
63 '/foo/bar'
64 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
64 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
65 'c:/foo/bar'
65 'c:/foo/bar'
66 """
66 """
67 # According to CVS manual, CVS paths are expressed like:
67 # According to CVS manual, CVS paths are expressed like:
68 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
68 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
69 #
69 #
70 # Unfortunately, Windows absolute paths start with a drive letter
70 # Unfortunately, Windows absolute paths start with a drive letter
71 # like 'c:' making it harder to parse. Here we assume that drive
71 # like 'c:' making it harder to parse. Here we assume that drive
72 # letters are only one character long and any CVS component before
72 # letters are only one character long and any CVS component before
73 # the repository path is at least 2 characters long, and use this
73 # the repository path is at least 2 characters long, and use this
74 # to disambiguate.
74 # to disambiguate.
75 parts = cvspath.split(':')
75 parts = cvspath.split(':')
76 if len(parts) == 1:
76 if len(parts) == 1:
77 return parts[0]
77 return parts[0]
78 # Here there is an ambiguous case if we have a port number
78 # Here there is an ambiguous case if we have a port number
79 # immediately followed by a Windows driver letter. We assume this
79 # immediately followed by a Windows driver letter. We assume this
80 # never happens and decide it must be CVS path component,
80 # never happens and decide it must be CVS path component,
81 # therefore ignoring it.
81 # therefore ignoring it.
82 if len(parts[-2]) > 1:
82 if len(parts[-2]) > 1:
83 return parts[-1].lstrip('0123456789')
83 return parts[-1].lstrip('0123456789')
84 return parts[-2] + ':' + parts[-1]
84 return parts[-2] + ':' + parts[-1]
85
85
86 def createlog(ui, directory=None, root="", rlog=True, cache=None):
86 def createlog(ui, directory=None, root="", rlog=True, cache=None):
87 '''Collect the CVS rlog'''
87 '''Collect the CVS rlog'''
88
88
89 # Because we store many duplicate commit log messages, reusing strings
89 # Because we store many duplicate commit log messages, reusing strings
90 # saves a lot of memory and pickle storage space.
90 # saves a lot of memory and pickle storage space.
91 _scache = {}
91 _scache = {}
92 def scache(s):
92 def scache(s):
93 "return a shared version of a string"
93 "return a shared version of a string"
94 return _scache.setdefault(s, s)
94 return _scache.setdefault(s, s)
95
95
96 ui.status(_('collecting CVS rlog\n'))
96 ui.status(_('collecting CVS rlog\n'))
97
97
98 log = [] # list of logentry objects containing the CVS state
98 log = [] # list of logentry objects containing the CVS state
99
99
100 # patterns to match in CVS (r)log output, by state of use
100 # patterns to match in CVS (r)log output, by state of use
101 re_00 = re.compile('RCS file: (.+)$')
101 re_00 = re.compile('RCS file: (.+)$')
102 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
102 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
103 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
103 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
104 re_03 = re.compile("(Cannot access.+CVSROOT)|"
104 re_03 = re.compile("(Cannot access.+CVSROOT)|"
105 "(can't create temporary directory.+)$")
105 "(can't create temporary directory.+)$")
106 re_10 = re.compile('Working file: (.+)$')
106 re_10 = re.compile('Working file: (.+)$')
107 re_20 = re.compile('symbolic names:')
107 re_20 = re.compile('symbolic names:')
108 re_30 = re.compile('\t(.+): ([\\d.]+)$')
108 re_30 = re.compile('\t(.+): ([\\d.]+)$')
109 re_31 = re.compile('----------------------------$')
109 re_31 = re.compile('----------------------------$')
110 re_32 = re.compile('======================================='
110 re_32 = re.compile('======================================='
111 '======================================$')
111 '======================================$')
112 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
112 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
113 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
113 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
114 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
114 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
115 r'(.*mergepoint:\s+([^;]+);)?')
115 r'(.*mergepoint:\s+([^;]+);)?')
116 re_70 = re.compile('branches: (.+);$')
116 re_70 = re.compile('branches: (.+);$')
117
117
118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119
119
120 prefix = '' # leading path to strip of what we get from CVS
120 prefix = '' # leading path to strip of what we get from CVS
121
121
122 if directory is None:
122 if directory is None:
123 # Current working directory
123 # Current working directory
124
124
125 # Get the real directory in the repository
125 # Get the real directory in the repository
126 try:
126 try:
127 prefix = open(os.path.join('CVS','Repository')).read().strip()
127 prefix = open(os.path.join('CVS','Repository')).read().strip()
128 directory = prefix
128 directory = prefix
129 if prefix == ".":
129 if prefix == ".":
130 prefix = ""
130 prefix = ""
131 except IOError:
131 except IOError:
132 raise logerror(_('not a CVS sandbox'))
132 raise logerror(_('not a CVS sandbox'))
133
133
134 if prefix and not prefix.endswith(os.sep):
134 if prefix and not prefix.endswith(os.sep):
135 prefix += os.sep
135 prefix += os.sep
136
136
137 # Use the Root file in the sandbox, if it exists
137 # Use the Root file in the sandbox, if it exists
138 try:
138 try:
139 root = open(os.path.join('CVS','Root')).read().strip()
139 root = open(os.path.join('CVS','Root')).read().strip()
140 except IOError:
140 except IOError:
141 pass
141 pass
142
142
143 if not root:
143 if not root:
144 root = os.environ.get('CVSROOT', '')
144 root = os.environ.get('CVSROOT', '')
145
145
146 # read log cache if one exists
146 # read log cache if one exists
147 oldlog = []
147 oldlog = []
148 date = None
148 date = None
149
149
150 if cache:
150 if cache:
151 cachedir = os.path.expanduser('~/.hg.cvsps')
151 cachedir = os.path.expanduser('~/.hg.cvsps')
152 if not os.path.exists(cachedir):
152 if not os.path.exists(cachedir):
153 os.mkdir(cachedir)
153 os.mkdir(cachedir)
154
154
155 # The cvsps cache pickle needs a uniquified name, based on the
155 # The cvsps cache pickle needs a uniquified name, based on the
156 # repository location. The address may have all sort of nasties
156 # repository location. The address may have all sort of nasties
157 # in it, slashes, colons and such. So here we take just the
157 # in it, slashes, colons and such. So here we take just the
158 # alphanumerics, concatenated in a way that does not mix up the
158 # alphanumerics, concatenated in a way that does not mix up the
159 # various components, so that
159 # various components, so that
160 # :pserver:user@server:/path
160 # :pserver:user@server:/path
161 # and
161 # and
162 # /pserver/user/server/path
162 # /pserver/user/server/path
163 # are mapped to different cache file names.
163 # are mapped to different cache file names.
164 cachefile = root.split(":") + [directory, "cache"]
164 cachefile = root.split(":") + [directory, "cache"]
165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 cachefile = os.path.join(cachedir,
166 cachefile = os.path.join(cachedir,
167 '.'.join([s for s in cachefile if s]))
167 '.'.join([s for s in cachefile if s]))
168
168
169 if cache == 'update':
169 if cache == 'update':
170 try:
170 try:
171 ui.note(_('reading cvs log cache %s\n') % cachefile)
171 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 oldlog = pickle.load(open(cachefile))
172 oldlog = pickle.load(open(cachefile))
173 ui.note(_('cache has %d log entries\n') % len(oldlog))
173 ui.note(_('cache has %d log entries\n') % len(oldlog))
174 except Exception, e:
174 except Exception, e:
175 ui.note(_('error reading cache: %r\n') % e)
175 ui.note(_('error reading cache: %r\n') % e)
176
176
177 if oldlog:
177 if oldlog:
178 date = oldlog[-1].date # last commit date as a (time,tz) tuple
178 date = oldlog[-1].date # last commit date as a (time,tz) tuple
179 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
179 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
180
180
181 # build the CVS commandline
181 # build the CVS commandline
182 cmd = ['cvs', '-q']
182 cmd = ['cvs', '-q']
183 if root:
183 if root:
184 cmd.append('-d%s' % root)
184 cmd.append('-d%s' % root)
185 p = util.normpath(getrepopath(root))
185 p = util.normpath(getrepopath(root))
186 if not p.endswith('/'):
186 if not p.endswith('/'):
187 p += '/'
187 p += '/'
188 if prefix:
188 if prefix:
189 # looks like normpath replaces "" by "."
189 # looks like normpath replaces "" by "."
190 prefix = p + util.normpath(prefix)
190 prefix = p + util.normpath(prefix)
191 else:
191 else:
192 prefix = p
192 prefix = p
193 cmd.append(['log', 'rlog'][rlog])
193 cmd.append(['log', 'rlog'][rlog])
194 if date:
194 if date:
195 # no space between option and date string
195 # no space between option and date string
196 cmd.append('-d>%s' % date)
196 cmd.append('-d>%s' % date)
197 cmd.append(directory)
197 cmd.append(directory)
198
198
199 # state machine begins here
199 # state machine begins here
200 tags = {} # dictionary of revisions on current file with their tags
200 tags = {} # dictionary of revisions on current file with their tags
201 branchmap = {} # mapping between branch names and revision numbers
201 branchmap = {} # mapping between branch names and revision numbers
202 state = 0
202 state = 0
203 store = False # set when a new record can be appended
203 store = False # set when a new record can be appended
204
204
205 cmd = [util.shellquote(arg) for arg in cmd]
205 cmd = [util.shellquote(arg) for arg in cmd]
206 ui.note(_("running %s\n") % (' '.join(cmd)))
206 ui.note(_("running %s\n") % (' '.join(cmd)))
207 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
207 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
208
208
209 pfp = util.popen(' '.join(cmd))
209 pfp = util.popen(' '.join(cmd))
210 peek = pfp.readline()
210 peek = pfp.readline()
211 while True:
211 while True:
212 line = peek
212 line = peek
213 if line == '':
213 if line == '':
214 break
214 break
215 peek = pfp.readline()
215 peek = pfp.readline()
216 if line.endswith('\n'):
216 if line.endswith('\n'):
217 line = line[:-1]
217 line = line[:-1]
218 #ui.debug('state=%d line=%r\n' % (state, line))
218 #ui.debug('state=%d line=%r\n' % (state, line))
219
219
220 if state == 0:
220 if state == 0:
221 # initial state, consume input until we see 'RCS file'
221 # initial state, consume input until we see 'RCS file'
222 match = re_00.match(line)
222 match = re_00.match(line)
223 if match:
223 if match:
224 rcs = match.group(1)
224 rcs = match.group(1)
225 tags = {}
225 tags = {}
226 if rlog:
226 if rlog:
227 filename = util.normpath(rcs[:-2])
227 filename = util.normpath(rcs[:-2])
228 if filename.startswith(prefix):
228 if filename.startswith(prefix):
229 filename = filename[len(prefix):]
229 filename = filename[len(prefix):]
230 if filename.startswith('/'):
230 if filename.startswith('/'):
231 filename = filename[1:]
231 filename = filename[1:]
232 if filename.startswith('Attic/'):
232 if filename.startswith('Attic/'):
233 filename = filename[6:]
233 filename = filename[6:]
234 else:
234 else:
235 filename = filename.replace('/Attic/', '/')
235 filename = filename.replace('/Attic/', '/')
236 state = 2
236 state = 2
237 continue
237 continue
238 state = 1
238 state = 1
239 continue
239 continue
240 match = re_01.match(line)
240 match = re_01.match(line)
241 if match:
241 if match:
242 raise Exception(match.group(1))
242 raise Exception(match.group(1))
243 match = re_02.match(line)
243 match = re_02.match(line)
244 if match:
244 if match:
245 raise Exception(match.group(2))
245 raise Exception(match.group(2))
246 if re_03.match(line):
246 if re_03.match(line):
247 raise Exception(line)
247 raise Exception(line)
248
248
249 elif state == 1:
249 elif state == 1:
250 # expect 'Working file' (only when using log instead of rlog)
250 # expect 'Working file' (only when using log instead of rlog)
251 match = re_10.match(line)
251 match = re_10.match(line)
252 assert match, _('RCS file must be followed by working file')
252 assert match, _('RCS file must be followed by working file')
253 filename = util.normpath(match.group(1))
253 filename = util.normpath(match.group(1))
254 state = 2
254 state = 2
255
255
256 elif state == 2:
256 elif state == 2:
257 # expect 'symbolic names'
257 # expect 'symbolic names'
258 if re_20.match(line):
258 if re_20.match(line):
259 branchmap = {}
259 branchmap = {}
260 state = 3
260 state = 3
261
261
262 elif state == 3:
262 elif state == 3:
263 # read the symbolic names and store as tags
263 # read the symbolic names and store as tags
264 match = re_30.match(line)
264 match = re_30.match(line)
265 if match:
265 if match:
266 rev = [int(x) for x in match.group(2).split('.')]
266 rev = [int(x) for x in match.group(2).split('.')]
267
267
268 # Convert magic branch number to an odd-numbered one
268 # Convert magic branch number to an odd-numbered one
269 revn = len(rev)
269 revn = len(rev)
270 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
270 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
271 rev = rev[:-2] + rev[-1:]
271 rev = rev[:-2] + rev[-1:]
272 rev = tuple(rev)
272 rev = tuple(rev)
273
273
274 if rev not in tags:
274 if rev not in tags:
275 tags[rev] = []
275 tags[rev] = []
276 tags[rev].append(match.group(1))
276 tags[rev].append(match.group(1))
277 branchmap[match.group(1)] = match.group(2)
277 branchmap[match.group(1)] = match.group(2)
278
278
279 elif re_31.match(line):
279 elif re_31.match(line):
280 state = 5
280 state = 5
281 elif re_32.match(line):
281 elif re_32.match(line):
282 state = 0
282 state = 0
283
283
284 elif state == 4:
284 elif state == 4:
285 # expecting '------' separator before first revision
285 # expecting '------' separator before first revision
286 if re_31.match(line):
286 if re_31.match(line):
287 state = 5
287 state = 5
288 else:
288 else:
289 assert not re_32.match(line), _('must have at least '
289 assert not re_32.match(line), _('must have at least '
290 'some revisions')
290 'some revisions')
291
291
292 elif state == 5:
292 elif state == 5:
293 # expecting revision number and possibly (ignored) lock indication
293 # expecting revision number and possibly (ignored) lock indication
294 # we create the logentry here from values stored in states 0 to 4,
294 # we create the logentry here from values stored in states 0 to 4,
295 # as this state is re-entered for subsequent revisions of a file.
295 # as this state is re-entered for subsequent revisions of a file.
296 match = re_50.match(line)
296 match = re_50.match(line)
297 assert match, _('expected revision number')
297 assert match, _('expected revision number')
298 e = logentry(rcs=scache(rcs), file=scache(filename),
298 e = logentry(rcs=scache(rcs), file=scache(filename),
299 revision=tuple([int(x) for x in match.group(1).split('.')]),
299 revision=tuple([int(x) for x in match.group(1).split('.')]),
300 branches=[], parent=None)
300 branches=[], parent=None)
301 state = 6
301 state = 6
302
302
303 elif state == 6:
303 elif state == 6:
304 # expecting date, author, state, lines changed
304 # expecting date, author, state, lines changed
305 match = re_60.match(line)
305 match = re_60.match(line)
306 assert match, _('revision must be followed by date line')
306 assert match, _('revision must be followed by date line')
307 d = match.group(1)
307 d = match.group(1)
308 if d[2] == '/':
308 if d[2] == '/':
309 # Y2K
309 # Y2K
310 d = '19' + d
310 d = '19' + d
311
311
312 if len(d.split()) != 3:
312 if len(d.split()) != 3:
313 # cvs log dates always in GMT
313 # cvs log dates always in GMT
314 d = d + ' UTC'
314 d = d + ' UTC'
315 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
315 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
316 '%Y/%m/%d %H:%M:%S',
316 '%Y/%m/%d %H:%M:%S',
317 '%Y-%m-%d %H:%M:%S'])
317 '%Y-%m-%d %H:%M:%S'])
318 e.author = scache(match.group(2))
318 e.author = scache(match.group(2))
319 e.dead = match.group(3).lower() == 'dead'
319 e.dead = match.group(3).lower() == 'dead'
320
320
321 if match.group(5):
321 if match.group(5):
322 if match.group(6):
322 if match.group(6):
323 e.lines = (int(match.group(5)), int(match.group(6)))
323 e.lines = (int(match.group(5)), int(match.group(6)))
324 else:
324 else:
325 e.lines = (int(match.group(5)), 0)
325 e.lines = (int(match.group(5)), 0)
326 elif match.group(6):
326 elif match.group(6):
327 e.lines = (0, int(match.group(6)))
327 e.lines = (0, int(match.group(6)))
328 else:
328 else:
329 e.lines = None
329 e.lines = None
330
330
331 if match.group(7): # cvsnt mergepoint
331 if match.group(7): # cvsnt mergepoint
332 myrev = match.group(8).split('.')
332 myrev = match.group(8).split('.')
333 if len(myrev) == 2: # head
333 if len(myrev) == 2: # head
334 e.mergepoint = 'HEAD'
334 e.mergepoint = 'HEAD'
335 else:
335 else:
336 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
336 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
337 branches = [b for b in branchmap if branchmap[b] == myrev]
337 branches = [b for b in branchmap if branchmap[b] == myrev]
338 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
338 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
339 e.mergepoint = branches[0]
339 e.mergepoint = branches[0]
340 else:
340 else:
341 e.mergepoint = None
341 e.mergepoint = None
342 e.comment = []
342 e.comment = []
343 state = 7
343 state = 7
344
344
345 elif state == 7:
345 elif state == 7:
346 # read the revision numbers of branches that start at this revision
346 # read the revision numbers of branches that start at this revision
347 # or store the commit log message otherwise
347 # or store the commit log message otherwise
348 m = re_70.match(line)
348 m = re_70.match(line)
349 if m:
349 if m:
350 e.branches = [tuple([int(y) for y in x.strip().split('.')])
350 e.branches = [tuple([int(y) for y in x.strip().split('.')])
351 for x in m.group(1).split(';')]
351 for x in m.group(1).split(';')]
352 state = 8
352 state = 8
353 elif re_31.match(line) and re_50.match(peek):
353 elif re_31.match(line) and re_50.match(peek):
354 state = 5
354 state = 5
355 store = True
355 store = True
356 elif re_32.match(line):
356 elif re_32.match(line):
357 state = 0
357 state = 0
358 store = True
358 store = True
359 else:
359 else:
360 e.comment.append(line)
360 e.comment.append(line)
361
361
362 elif state == 8:
362 elif state == 8:
363 # store commit log message
363 # store commit log message
364 if re_31.match(line):
364 if re_31.match(line):
365 state = 5
365 state = 5
366 store = True
366 store = True
367 elif re_32.match(line):
367 elif re_32.match(line):
368 state = 0
368 state = 0
369 store = True
369 store = True
370 else:
370 else:
371 e.comment.append(line)
371 e.comment.append(line)
372
372
373 # When a file is added on a branch B1, CVS creates a synthetic
373 # When a file is added on a branch B1, CVS creates a synthetic
374 # dead trunk revision 1.1 so that the branch has a root.
374 # dead trunk revision 1.1 so that the branch has a root.
375 # Likewise, if you merge such a file to a later branch B2 (one
375 # Likewise, if you merge such a file to a later branch B2 (one
376 # that already existed when the file was added on B1), CVS
376 # that already existed when the file was added on B1), CVS
377 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
377 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
378 # these revisions now, but mark them synthetic so
378 # these revisions now, but mark them synthetic so
379 # createchangeset() can take care of them.
379 # createchangeset() can take care of them.
380 if (store and
380 if (store and
381 e.dead and
381 e.dead and
382 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
382 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
383 len(e.comment) == 1 and
383 len(e.comment) == 1 and
384 file_added_re.match(e.comment[0])):
384 file_added_re.match(e.comment[0])):
385 ui.debug('found synthetic revision in %s: %r\n'
385 ui.debug('found synthetic revision in %s: %r\n'
386 % (e.rcs, e.comment[0]))
386 % (e.rcs, e.comment[0]))
387 e.synthetic = True
387 e.synthetic = True
388
388
389 if store:
389 if store:
390 # clean up the results and save in the log.
390 # clean up the results and save in the log.
391 store = False
391 store = False
392 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
392 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
393 e.comment = scache('\n'.join(e.comment))
393 e.comment = scache('\n'.join(e.comment))
394
394
395 revn = len(e.revision)
395 revn = len(e.revision)
396 if revn > 3 and (revn % 2) == 0:
396 if revn > 3 and (revn % 2) == 0:
397 e.branch = tags.get(e.revision[:-1], [None])[0]
397 e.branch = tags.get(e.revision[:-1], [None])[0]
398 else:
398 else:
399 e.branch = None
399 e.branch = None
400
400
401 # find the branches starting from this revision
401 # find the branches starting from this revision
402 branchpoints = set()
402 branchpoints = set()
403 for branch, revision in branchmap.iteritems():
403 for branch, revision in branchmap.iteritems():
404 revparts = tuple([int(i) for i in revision.split('.')])
404 revparts = tuple([int(i) for i in revision.split('.')])
405 if len(revparts) < 2: # bad tags
406 continue
405 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
407 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
406 # normal branch
408 # normal branch
407 if revparts[:-2] == e.revision:
409 if revparts[:-2] == e.revision:
408 branchpoints.add(branch)
410 branchpoints.add(branch)
409 elif revparts == (1, 1, 1): # vendor branch
411 elif revparts == (1, 1, 1): # vendor branch
410 if revparts in e.branches:
412 if revparts in e.branches:
411 branchpoints.add(branch)
413 branchpoints.add(branch)
412 e.branchpoints = branchpoints
414 e.branchpoints = branchpoints
413
415
414 log.append(e)
416 log.append(e)
415
417
416 if len(log) % 100 == 0:
418 if len(log) % 100 == 0:
417 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
419 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
418
420
419 log.sort(key=lambda x: (x.rcs, x.revision))
421 log.sort(key=lambda x: (x.rcs, x.revision))
420
422
421 # find parent revisions of individual files
423 # find parent revisions of individual files
422 versions = {}
424 versions = {}
423 for e in log:
425 for e in log:
424 branch = e.revision[:-1]
426 branch = e.revision[:-1]
425 p = versions.get((e.rcs, branch), None)
427 p = versions.get((e.rcs, branch), None)
426 if p is None:
428 if p is None:
427 p = e.revision[:-2]
429 p = e.revision[:-2]
428 e.parent = p
430 e.parent = p
429 versions[(e.rcs, branch)] = e.revision
431 versions[(e.rcs, branch)] = e.revision
430
432
431 # update the log cache
433 # update the log cache
432 if cache:
434 if cache:
433 if log:
435 if log:
434 # join up the old and new logs
436 # join up the old and new logs
435 log.sort(key=lambda x: x.date)
437 log.sort(key=lambda x: x.date)
436
438
437 if oldlog and oldlog[-1].date >= log[0].date:
439 if oldlog and oldlog[-1].date >= log[0].date:
438 raise logerror(_('log cache overlaps with new log entries,'
440 raise logerror(_('log cache overlaps with new log entries,'
439 ' re-run without cache.'))
441 ' re-run without cache.'))
440
442
441 log = oldlog + log
443 log = oldlog + log
442
444
443 # write the new cachefile
445 # write the new cachefile
444 ui.note(_('writing cvs log cache %s\n') % cachefile)
446 ui.note(_('writing cvs log cache %s\n') % cachefile)
445 pickle.dump(log, open(cachefile, 'w'))
447 pickle.dump(log, open(cachefile, 'w'))
446 else:
448 else:
447 log = oldlog
449 log = oldlog
448
450
449 ui.status(_('%d log entries\n') % len(log))
451 ui.status(_('%d log entries\n') % len(log))
450
452
451 hook.hook(ui, None, "cvslog", True, log=log)
453 hook.hook(ui, None, "cvslog", True, log=log)
452
454
453 return log
455 return log
454
456
455
457
456 class changeset(object):
458 class changeset(object):
457 '''Class changeset has the following attributes:
459 '''Class changeset has the following attributes:
458 .id - integer identifying this changeset (list index)
460 .id - integer identifying this changeset (list index)
459 .author - author name as CVS knows it
461 .author - author name as CVS knows it
460 .branch - name of branch this changeset is on, or None
462 .branch - name of branch this changeset is on, or None
461 .comment - commit message
463 .comment - commit message
462 .date - the commit date as a (time,tz) tuple
464 .date - the commit date as a (time,tz) tuple
463 .entries - list of logentry objects in this changeset
465 .entries - list of logentry objects in this changeset
464 .parents - list of one or two parent changesets
466 .parents - list of one or two parent changesets
465 .tags - list of tags on this changeset
467 .tags - list of tags on this changeset
466 .synthetic - from synthetic revision "file ... added on branch ..."
468 .synthetic - from synthetic revision "file ... added on branch ..."
467 .mergepoint- the branch that has been merged from
469 .mergepoint- the branch that has been merged from
468 (if present in rlog output)
470 (if present in rlog output)
469 .branchpoints- the branches that start at the current entry
471 .branchpoints- the branches that start at the current entry
470 '''
472 '''
471 def __init__(self, **entries):
473 def __init__(self, **entries):
472 self.synthetic = False
474 self.synthetic = False
473 self.__dict__.update(entries)
475 self.__dict__.update(entries)
474
476
475 def __repr__(self):
477 def __repr__(self):
476 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
478 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
477 id(self),
479 id(self),
478 getattr(self, 'id', "(no id)"))
480 getattr(self, 'id', "(no id)"))
479
481
480 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
482 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
481 '''Convert log into changesets.'''
483 '''Convert log into changesets.'''
482
484
483 ui.status(_('creating changesets\n'))
485 ui.status(_('creating changesets\n'))
484
486
485 # Merge changesets
487 # Merge changesets
486
488
487 log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
489 log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
488
490
489 changesets = []
491 changesets = []
490 files = set()
492 files = set()
491 c = None
493 c = None
492 for i, e in enumerate(log):
494 for i, e in enumerate(log):
493
495
494 # Check if log entry belongs to the current changeset or not.
496 # Check if log entry belongs to the current changeset or not.
495
497
496 # Since CVS is file centric, two different file revisions with
498 # Since CVS is file centric, two different file revisions with
497 # different branchpoints should be treated as belonging to two
499 # different branchpoints should be treated as belonging to two
498 # different changesets (and the ordering is important and not
500 # different changesets (and the ordering is important and not
499 # honoured by cvsps at this point).
501 # honoured by cvsps at this point).
500 #
502 #
501 # Consider the following case:
503 # Consider the following case:
502 # foo 1.1 branchpoints: [MYBRANCH]
504 # foo 1.1 branchpoints: [MYBRANCH]
503 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
505 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
504 #
506 #
505 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
507 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
506 # later version of foo may be in MYBRANCH2, so foo should be the
508 # later version of foo may be in MYBRANCH2, so foo should be the
507 # first changeset and bar the next and MYBRANCH and MYBRANCH2
509 # first changeset and bar the next and MYBRANCH and MYBRANCH2
508 # should both start off of the bar changeset. No provisions are
510 # should both start off of the bar changeset. No provisions are
509 # made to ensure that this is, in fact, what happens.
511 # made to ensure that this is, in fact, what happens.
510 if not (c and
512 if not (c and
511 e.comment == c.comment and
513 e.comment == c.comment and
512 e.author == c.author and
514 e.author == c.author and
513 e.branch == c.branch and
515 e.branch == c.branch and
514 (not hasattr(e, 'branchpoints') or
516 (not hasattr(e, 'branchpoints') or
515 not hasattr (c, 'branchpoints') or
517 not hasattr (c, 'branchpoints') or
516 e.branchpoints == c.branchpoints) and
518 e.branchpoints == c.branchpoints) and
517 ((c.date[0] + c.date[1]) <=
519 ((c.date[0] + c.date[1]) <=
518 (e.date[0] + e.date[1]) <=
520 (e.date[0] + e.date[1]) <=
519 (c.date[0] + c.date[1]) + fuzz) and
521 (c.date[0] + c.date[1]) + fuzz) and
520 e.file not in files):
522 e.file not in files):
521 c = changeset(comment=e.comment, author=e.author,
523 c = changeset(comment=e.comment, author=e.author,
522 branch=e.branch, date=e.date, entries=[],
524 branch=e.branch, date=e.date, entries=[],
523 mergepoint=getattr(e, 'mergepoint', None),
525 mergepoint=getattr(e, 'mergepoint', None),
524 branchpoints=getattr(e, 'branchpoints', set()))
526 branchpoints=getattr(e, 'branchpoints', set()))
525 changesets.append(c)
527 changesets.append(c)
526 files = set()
528 files = set()
527 if len(changesets) % 100 == 0:
529 if len(changesets) % 100 == 0:
528 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
530 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
529 ui.status(util.ellipsis(t, 80) + '\n')
531 ui.status(util.ellipsis(t, 80) + '\n')
530
532
531 c.entries.append(e)
533 c.entries.append(e)
532 files.add(e.file)
534 files.add(e.file)
533 c.date = e.date # changeset date is date of latest commit in it
535 c.date = e.date # changeset date is date of latest commit in it
534
536
535 # Mark synthetic changesets
537 # Mark synthetic changesets
536
538
537 for c in changesets:
539 for c in changesets:
538 # Synthetic revisions always get their own changeset, because
540 # Synthetic revisions always get their own changeset, because
539 # the log message includes the filename. E.g. if you add file3
541 # the log message includes the filename. E.g. if you add file3
540 # and file4 on a branch, you get four log entries and three
542 # and file4 on a branch, you get four log entries and three
541 # changesets:
543 # changesets:
542 # "File file3 was added on branch ..." (synthetic, 1 entry)
544 # "File file3 was added on branch ..." (synthetic, 1 entry)
543 # "File file4 was added on branch ..." (synthetic, 1 entry)
545 # "File file4 was added on branch ..." (synthetic, 1 entry)
544 # "Add file3 and file4 to fix ..." (real, 2 entries)
546 # "Add file3 and file4 to fix ..." (real, 2 entries)
545 # Hence the check for 1 entry here.
547 # Hence the check for 1 entry here.
546 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
548 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
547
549
548 # Sort files in each changeset
550 # Sort files in each changeset
549
551
550 for c in changesets:
552 for c in changesets:
551 def pathcompare(l, r):
553 def pathcompare(l, r):
552 'Mimic cvsps sorting order'
554 'Mimic cvsps sorting order'
553 l = l.split('/')
555 l = l.split('/')
554 r = r.split('/')
556 r = r.split('/')
555 nl = len(l)
557 nl = len(l)
556 nr = len(r)
558 nr = len(r)
557 n = min(nl, nr)
559 n = min(nl, nr)
558 for i in range(n):
560 for i in range(n):
559 if i + 1 == nl and nl < nr:
561 if i + 1 == nl and nl < nr:
560 return -1
562 return -1
561 elif i + 1 == nr and nl > nr:
563 elif i + 1 == nr and nl > nr:
562 return +1
564 return +1
563 elif l[i] < r[i]:
565 elif l[i] < r[i]:
564 return -1
566 return -1
565 elif l[i] > r[i]:
567 elif l[i] > r[i]:
566 return +1
568 return +1
567 return 0
569 return 0
568 def entitycompare(l, r):
570 def entitycompare(l, r):
569 return pathcompare(l.file, r.file)
571 return pathcompare(l.file, r.file)
570
572
571 c.entries.sort(entitycompare)
573 c.entries.sort(entitycompare)
572
574
573 # Sort changesets by date
575 # Sort changesets by date
574
576
575 def cscmp(l, r):
577 def cscmp(l, r):
576 d = sum(l.date) - sum(r.date)
578 d = sum(l.date) - sum(r.date)
577 if d:
579 if d:
578 return d
580 return d
579
581
580 # detect vendor branches and initial commits on a branch
582 # detect vendor branches and initial commits on a branch
581 le = {}
583 le = {}
582 for e in l.entries:
584 for e in l.entries:
583 le[e.rcs] = e.revision
585 le[e.rcs] = e.revision
584 re = {}
586 re = {}
585 for e in r.entries:
587 for e in r.entries:
586 re[e.rcs] = e.revision
588 re[e.rcs] = e.revision
587
589
588 d = 0
590 d = 0
589 for e in l.entries:
591 for e in l.entries:
590 if re.get(e.rcs, None) == e.parent:
592 if re.get(e.rcs, None) == e.parent:
591 assert not d
593 assert not d
592 d = 1
594 d = 1
593 break
595 break
594
596
595 for e in r.entries:
597 for e in r.entries:
596 if le.get(e.rcs, None) == e.parent:
598 if le.get(e.rcs, None) == e.parent:
597 assert not d
599 assert not d
598 d = -1
600 d = -1
599 break
601 break
600
602
601 return d
603 return d
602
604
603 changesets.sort(cscmp)
605 changesets.sort(cscmp)
604
606
605 # Collect tags
607 # Collect tags
606
608
607 globaltags = {}
609 globaltags = {}
608 for c in changesets:
610 for c in changesets:
609 for e in c.entries:
611 for e in c.entries:
610 for tag in e.tags:
612 for tag in e.tags:
611 # remember which is the latest changeset to have this tag
613 # remember which is the latest changeset to have this tag
612 globaltags[tag] = c
614 globaltags[tag] = c
613
615
614 for c in changesets:
616 for c in changesets:
615 tags = set()
617 tags = set()
616 for e in c.entries:
618 for e in c.entries:
617 tags.update(e.tags)
619 tags.update(e.tags)
618 # remember tags only if this is the latest changeset to have it
620 # remember tags only if this is the latest changeset to have it
619 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
621 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
620
622
621 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
623 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
622 # by inserting dummy changesets with two parents, and handle
624 # by inserting dummy changesets with two parents, and handle
623 # {{mergefrombranch BRANCHNAME}} by setting two parents.
625 # {{mergefrombranch BRANCHNAME}} by setting two parents.
624
626
625 if mergeto is None:
627 if mergeto is None:
626 mergeto = r'{{mergetobranch ([-\w]+)}}'
628 mergeto = r'{{mergetobranch ([-\w]+)}}'
627 if mergeto:
629 if mergeto:
628 mergeto = re.compile(mergeto)
630 mergeto = re.compile(mergeto)
629
631
630 if mergefrom is None:
632 if mergefrom is None:
631 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
633 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
632 if mergefrom:
634 if mergefrom:
633 mergefrom = re.compile(mergefrom)
635 mergefrom = re.compile(mergefrom)
634
636
635 versions = {} # changeset index where we saw any particular file version
637 versions = {} # changeset index where we saw any particular file version
636 branches = {} # changeset index where we saw a branch
638 branches = {} # changeset index where we saw a branch
637 n = len(changesets)
639 n = len(changesets)
638 i = 0
640 i = 0
639 while i < n:
641 while i < n:
640 c = changesets[i]
642 c = changesets[i]
641
643
642 for f in c.entries:
644 for f in c.entries:
643 versions[(f.rcs, f.revision)] = i
645 versions[(f.rcs, f.revision)] = i
644
646
645 p = None
647 p = None
646 if c.branch in branches:
648 if c.branch in branches:
647 p = branches[c.branch]
649 p = branches[c.branch]
648 else:
650 else:
649 # first changeset on a new branch
651 # first changeset on a new branch
650 # the parent is a changeset with the branch in its
652 # the parent is a changeset with the branch in its
651 # branchpoints such that it is the latest possible
653 # branchpoints such that it is the latest possible
652 # commit without any intervening, unrelated commits.
654 # commit without any intervening, unrelated commits.
653
655
654 for candidate in xrange(i):
656 for candidate in xrange(i):
655 if c.branch not in changesets[candidate].branchpoints:
657 if c.branch not in changesets[candidate].branchpoints:
656 if p is not None:
658 if p is not None:
657 break
659 break
658 continue
660 continue
659 p = candidate
661 p = candidate
660
662
661 c.parents = []
663 c.parents = []
662 if p is not None:
664 if p is not None:
663 p = changesets[p]
665 p = changesets[p]
664
666
665 # Ensure no changeset has a synthetic changeset as a parent.
667 # Ensure no changeset has a synthetic changeset as a parent.
666 while p.synthetic:
668 while p.synthetic:
667 assert len(p.parents) <= 1, \
669 assert len(p.parents) <= 1, \
668 _('synthetic changeset cannot have multiple parents')
670 _('synthetic changeset cannot have multiple parents')
669 if p.parents:
671 if p.parents:
670 p = p.parents[0]
672 p = p.parents[0]
671 else:
673 else:
672 p = None
674 p = None
673 break
675 break
674
676
675 if p is not None:
677 if p is not None:
676 c.parents.append(p)
678 c.parents.append(p)
677
679
678 if c.mergepoint:
680 if c.mergepoint:
679 if c.mergepoint == 'HEAD':
681 if c.mergepoint == 'HEAD':
680 c.mergepoint = None
682 c.mergepoint = None
681 c.parents.append(changesets[branches[c.mergepoint]])
683 c.parents.append(changesets[branches[c.mergepoint]])
682
684
683 if mergefrom:
685 if mergefrom:
684 m = mergefrom.search(c.comment)
686 m = mergefrom.search(c.comment)
685 if m:
687 if m:
686 m = m.group(1)
688 m = m.group(1)
687 if m == 'HEAD':
689 if m == 'HEAD':
688 m = None
690 m = None
689 try:
691 try:
690 candidate = changesets[branches[m]]
692 candidate = changesets[branches[m]]
691 except KeyError:
693 except KeyError:
692 ui.warn(_("warning: CVS commit message references "
694 ui.warn(_("warning: CVS commit message references "
693 "non-existent branch %r:\n%s\n")
695 "non-existent branch %r:\n%s\n")
694 % (m, c.comment))
696 % (m, c.comment))
695 if m in branches and c.branch != m and not candidate.synthetic:
697 if m in branches and c.branch != m and not candidate.synthetic:
696 c.parents.append(candidate)
698 c.parents.append(candidate)
697
699
698 if mergeto:
700 if mergeto:
699 m = mergeto.search(c.comment)
701 m = mergeto.search(c.comment)
700 if m:
702 if m:
701 try:
703 try:
702 m = m.group(1)
704 m = m.group(1)
703 if m == 'HEAD':
705 if m == 'HEAD':
704 m = None
706 m = None
705 except:
707 except:
706 m = None # if no group found then merge to HEAD
708 m = None # if no group found then merge to HEAD
707 if m in branches and c.branch != m:
709 if m in branches and c.branch != m:
708 # insert empty changeset for merge
710 # insert empty changeset for merge
709 cc = changeset(
711 cc = changeset(
710 author=c.author, branch=m, date=c.date,
712 author=c.author, branch=m, date=c.date,
711 comment='convert-repo: CVS merge from branch %s'
713 comment='convert-repo: CVS merge from branch %s'
712 % c.branch,
714 % c.branch,
713 entries=[], tags=[],
715 entries=[], tags=[],
714 parents=[changesets[branches[m]], c])
716 parents=[changesets[branches[m]], c])
715 changesets.insert(i + 1, cc)
717 changesets.insert(i + 1, cc)
716 branches[m] = i + 1
718 branches[m] = i + 1
717
719
718 # adjust our loop counters now we have inserted a new entry
720 # adjust our loop counters now we have inserted a new entry
719 n += 1
721 n += 1
720 i += 2
722 i += 2
721 continue
723 continue
722
724
723 branches[c.branch] = i
725 branches[c.branch] = i
724 i += 1
726 i += 1
725
727
726 # Drop synthetic changesets (safe now that we have ensured no other
728 # Drop synthetic changesets (safe now that we have ensured no other
727 # changesets can have them as parents).
729 # changesets can have them as parents).
728 i = 0
730 i = 0
729 while i < len(changesets):
731 while i < len(changesets):
730 if changesets[i].synthetic:
732 if changesets[i].synthetic:
731 del changesets[i]
733 del changesets[i]
732 else:
734 else:
733 i += 1
735 i += 1
734
736
735 # Number changesets
737 # Number changesets
736
738
737 for i, c in enumerate(changesets):
739 for i, c in enumerate(changesets):
738 c.id = i + 1
740 c.id = i + 1
739
741
740 ui.status(_('%d changeset entries\n') % len(changesets))
742 ui.status(_('%d changeset entries\n') % len(changesets))
741
743
742 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
744 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
743
745
744 return changesets
746 return changesets
745
747
746
748
747 def debugcvsps(ui, *args, **opts):
749 def debugcvsps(ui, *args, **opts):
748 '''Read CVS rlog for current directory or named path in
750 '''Read CVS rlog for current directory or named path in
749 repository, and convert the log to changesets based on matching
751 repository, and convert the log to changesets based on matching
750 commit log entries and dates.
752 commit log entries and dates.
751 '''
753 '''
752 if opts["new_cache"]:
754 if opts["new_cache"]:
753 cache = "write"
755 cache = "write"
754 elif opts["update_cache"]:
756 elif opts["update_cache"]:
755 cache = "update"
757 cache = "update"
756 else:
758 else:
757 cache = None
759 cache = None
758
760
759 revisions = opts["revisions"]
761 revisions = opts["revisions"]
760
762
761 try:
763 try:
762 if args:
764 if args:
763 log = []
765 log = []
764 for d in args:
766 for d in args:
765 log += createlog(ui, d, root=opts["root"], cache=cache)
767 log += createlog(ui, d, root=opts["root"], cache=cache)
766 else:
768 else:
767 log = createlog(ui, root=opts["root"], cache=cache)
769 log = createlog(ui, root=opts["root"], cache=cache)
768 except logerror, e:
770 except logerror, e:
769 ui.write("%r\n"%e)
771 ui.write("%r\n"%e)
770 return
772 return
771
773
772 changesets = createchangeset(ui, log, opts["fuzz"])
774 changesets = createchangeset(ui, log, opts["fuzz"])
773 del log
775 del log
774
776
775 # Print changesets (optionally filtered)
777 # Print changesets (optionally filtered)
776
778
777 off = len(revisions)
779 off = len(revisions)
778 branches = {} # latest version number in each branch
780 branches = {} # latest version number in each branch
779 ancestors = {} # parent branch
781 ancestors = {} # parent branch
780 for cs in changesets:
782 for cs in changesets:
781
783
782 if opts["ancestors"]:
784 if opts["ancestors"]:
783 if cs.branch not in branches and cs.parents and cs.parents[0].id:
785 if cs.branch not in branches and cs.parents and cs.parents[0].id:
784 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
786 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
785 cs.parents[0].id)
787 cs.parents[0].id)
786 branches[cs.branch] = cs.id
788 branches[cs.branch] = cs.id
787
789
788 # limit by branches
790 # limit by branches
789 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
791 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
790 continue
792 continue
791
793
792 if not off:
794 if not off:
793 # Note: trailing spaces on several lines here are needed to have
795 # Note: trailing spaces on several lines here are needed to have
794 # bug-for-bug compatibility with cvsps.
796 # bug-for-bug compatibility with cvsps.
795 ui.write('---------------------\n')
797 ui.write('---------------------\n')
796 ui.write('PatchSet %d \n' % cs.id)
798 ui.write('PatchSet %d \n' % cs.id)
797 ui.write('Date: %s\n' % util.datestr(cs.date,
799 ui.write('Date: %s\n' % util.datestr(cs.date,
798 '%Y/%m/%d %H:%M:%S %1%2'))
800 '%Y/%m/%d %H:%M:%S %1%2'))
799 ui.write('Author: %s\n' % cs.author)
801 ui.write('Author: %s\n' % cs.author)
800 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
802 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
801 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
803 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
802 ','.join(cs.tags) or '(none)'))
804 ','.join(cs.tags) or '(none)'))
803 branchpoints = getattr(cs, 'branchpoints', None)
805 branchpoints = getattr(cs, 'branchpoints', None)
804 if branchpoints:
806 if branchpoints:
805 ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
807 ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
806 if opts["parents"] and cs.parents:
808 if opts["parents"] and cs.parents:
807 if len(cs.parents) > 1:
809 if len(cs.parents) > 1:
808 ui.write('Parents: %s\n' %
810 ui.write('Parents: %s\n' %
809 (','.join([str(p.id) for p in cs.parents])))
811 (','.join([str(p.id) for p in cs.parents])))
810 else:
812 else:
811 ui.write('Parent: %d\n' % cs.parents[0].id)
813 ui.write('Parent: %d\n' % cs.parents[0].id)
812
814
813 if opts["ancestors"]:
815 if opts["ancestors"]:
814 b = cs.branch
816 b = cs.branch
815 r = []
817 r = []
816 while b:
818 while b:
817 b, c = ancestors[b]
819 b, c = ancestors[b]
818 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
820 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
819 if r:
821 if r:
820 ui.write('Ancestors: %s\n' % (','.join(r)))
822 ui.write('Ancestors: %s\n' % (','.join(r)))
821
823
822 ui.write('Log:\n')
824 ui.write('Log:\n')
823 ui.write('%s\n\n' % cs.comment)
825 ui.write('%s\n\n' % cs.comment)
824 ui.write('Members: \n')
826 ui.write('Members: \n')
825 for f in cs.entries:
827 for f in cs.entries:
826 fn = f.file
828 fn = f.file
827 if fn.startswith(opts["prefix"]):
829 if fn.startswith(opts["prefix"]):
828 fn = fn[len(opts["prefix"]):]
830 fn = fn[len(opts["prefix"]):]
829 ui.write('\t%s:%s->%s%s \n' % (
831 ui.write('\t%s:%s->%s%s \n' % (
830 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
832 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
831 '.'.join([str(x) for x in f.revision]),
833 '.'.join([str(x) for x in f.revision]),
832 ['', '(DEAD)'][f.dead]))
834 ['', '(DEAD)'][f.dead]))
833 ui.write('\n')
835 ui.write('\n')
834
836
835 # have we seen the start tag?
837 # have we seen the start tag?
836 if revisions and off:
838 if revisions and off:
837 if revisions[0] == str(cs.id) or \
839 if revisions[0] == str(cs.id) or \
838 revisions[0] in cs.tags:
840 revisions[0] in cs.tags:
839 off = False
841 off = False
840
842
841 # see if we reached the end tag
843 # see if we reached the end tag
842 if len(revisions) > 1 and not off:
844 if len(revisions) > 1 and not off:
843 if revisions[1] == str(cs.id) or \
845 if revisions[1] == str(cs.id) or \
844 revisions[1] in cs.tags:
846 revisions[1] in cs.tags:
845 break
847 break
General Comments 0
You need to be logged in to leave comments. Login now