##// END OF EJS Templates
cvsps: pull function definition out of loop
Martin Geisler -
r15790:52f816b4 default
parent child Browse files
Show More
@@ -1,854 +1,852 b''
1 # Mercurial built-in replacement for cvsps.
1 # Mercurial built-in replacement for cvsps.
2 #
2 #
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 import re
9 import re
10 import cPickle as pickle
10 import cPickle as pickle
11 from mercurial import util
11 from mercurial import util
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import hook
13 from mercurial import hook
14 from mercurial import util
14 from mercurial import util
15
15
16 class logentry(object):
16 class logentry(object):
17 '''Class logentry has the following attributes:
17 '''Class logentry has the following attributes:
18 .author - author name as CVS knows it
18 .author - author name as CVS knows it
19 .branch - name of branch this revision is on
19 .branch - name of branch this revision is on
20 .branches - revision tuple of branches starting at this revision
20 .branches - revision tuple of branches starting at this revision
21 .comment - commit message
21 .comment - commit message
22 .date - the commit date as a (time, tz) tuple
22 .date - the commit date as a (time, tz) tuple
23 .dead - true if file revision is dead
23 .dead - true if file revision is dead
24 .file - Name of file
24 .file - Name of file
25 .lines - a tuple (+lines, -lines) or None
25 .lines - a tuple (+lines, -lines) or None
26 .parent - Previous revision of this entry
26 .parent - Previous revision of this entry
27 .rcs - name of file as returned from CVS
27 .rcs - name of file as returned from CVS
28 .revision - revision number as tuple
28 .revision - revision number as tuple
29 .tags - list of tags on the file
29 .tags - list of tags on the file
30 .synthetic - is this a synthetic "file ... added on ..." revision?
30 .synthetic - is this a synthetic "file ... added on ..." revision?
31 .mergepoint- the branch that has been merged from
31 .mergepoint- the branch that has been merged from
32 (if present in rlog output)
32 (if present in rlog output)
33 .branchpoints- the branches that start at the current entry
33 .branchpoints- the branches that start at the current entry
34 '''
34 '''
35 def __init__(self, **entries):
35 def __init__(self, **entries):
36 self.synthetic = False
36 self.synthetic = False
37 self.__dict__.update(entries)
37 self.__dict__.update(entries)
38
38
39 def __repr__(self):
39 def __repr__(self):
40 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
40 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
41 id(self),
41 id(self),
42 self.file,
42 self.file,
43 ".".join(map(str, self.revision)))
43 ".".join(map(str, self.revision)))
44
44
45 class logerror(Exception):
45 class logerror(Exception):
46 pass
46 pass
47
47
48 def getrepopath(cvspath):
48 def getrepopath(cvspath):
49 """Return the repository path from a CVS path.
49 """Return the repository path from a CVS path.
50
50
51 >>> getrepopath('/foo/bar')
51 >>> getrepopath('/foo/bar')
52 '/foo/bar'
52 '/foo/bar'
53 >>> getrepopath('c:/foo/bar')
53 >>> getrepopath('c:/foo/bar')
54 'c:/foo/bar'
54 'c:/foo/bar'
55 >>> getrepopath(':pserver:10/foo/bar')
55 >>> getrepopath(':pserver:10/foo/bar')
56 '/foo/bar'
56 '/foo/bar'
57 >>> getrepopath(':pserver:10c:/foo/bar')
57 >>> getrepopath(':pserver:10c:/foo/bar')
58 '/foo/bar'
58 '/foo/bar'
59 >>> getrepopath(':pserver:/foo/bar')
59 >>> getrepopath(':pserver:/foo/bar')
60 '/foo/bar'
60 '/foo/bar'
61 >>> getrepopath(':pserver:c:/foo/bar')
61 >>> getrepopath(':pserver:c:/foo/bar')
62 'c:/foo/bar'
62 'c:/foo/bar'
63 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
63 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
64 '/foo/bar'
64 '/foo/bar'
65 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
65 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
66 'c:/foo/bar'
66 'c:/foo/bar'
67 """
67 """
68 # According to CVS manual, CVS paths are expressed like:
68 # According to CVS manual, CVS paths are expressed like:
69 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
69 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
70 #
70 #
71 # Unfortunately, Windows absolute paths start with a drive letter
71 # Unfortunately, Windows absolute paths start with a drive letter
72 # like 'c:' making it harder to parse. Here we assume that drive
72 # like 'c:' making it harder to parse. Here we assume that drive
73 # letters are only one character long and any CVS component before
73 # letters are only one character long and any CVS component before
74 # the repository path is at least 2 characters long, and use this
74 # the repository path is at least 2 characters long, and use this
75 # to disambiguate.
75 # to disambiguate.
76 parts = cvspath.split(':')
76 parts = cvspath.split(':')
77 if len(parts) == 1:
77 if len(parts) == 1:
78 return parts[0]
78 return parts[0]
79 # Here there is an ambiguous case if we have a port number
79 # Here there is an ambiguous case if we have a port number
80 # immediately followed by a Windows driver letter. We assume this
80 # immediately followed by a Windows driver letter. We assume this
81 # never happens and decide it must be CVS path component,
81 # never happens and decide it must be CVS path component,
82 # therefore ignoring it.
82 # therefore ignoring it.
83 if len(parts[-2]) > 1:
83 if len(parts[-2]) > 1:
84 return parts[-1].lstrip('0123456789')
84 return parts[-1].lstrip('0123456789')
85 return parts[-2] + ':' + parts[-1]
85 return parts[-2] + ':' + parts[-1]
86
86
87 def createlog(ui, directory=None, root="", rlog=True, cache=None):
87 def createlog(ui, directory=None, root="", rlog=True, cache=None):
88 '''Collect the CVS rlog'''
88 '''Collect the CVS rlog'''
89
89
90 # Because we store many duplicate commit log messages, reusing strings
90 # Because we store many duplicate commit log messages, reusing strings
91 # saves a lot of memory and pickle storage space.
91 # saves a lot of memory and pickle storage space.
92 _scache = {}
92 _scache = {}
93 def scache(s):
93 def scache(s):
94 "return a shared version of a string"
94 "return a shared version of a string"
95 return _scache.setdefault(s, s)
95 return _scache.setdefault(s, s)
96
96
97 ui.status(_('collecting CVS rlog\n'))
97 ui.status(_('collecting CVS rlog\n'))
98
98
99 log = [] # list of logentry objects containing the CVS state
99 log = [] # list of logentry objects containing the CVS state
100
100
101 # patterns to match in CVS (r)log output, by state of use
101 # patterns to match in CVS (r)log output, by state of use
102 re_00 = re.compile('RCS file: (.+)$')
102 re_00 = re.compile('RCS file: (.+)$')
103 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
103 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
104 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
104 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
105 re_03 = re.compile("(Cannot access.+CVSROOT)|"
105 re_03 = re.compile("(Cannot access.+CVSROOT)|"
106 "(can't create temporary directory.+)$")
106 "(can't create temporary directory.+)$")
107 re_10 = re.compile('Working file: (.+)$')
107 re_10 = re.compile('Working file: (.+)$')
108 re_20 = re.compile('symbolic names:')
108 re_20 = re.compile('symbolic names:')
109 re_30 = re.compile('\t(.+): ([\\d.]+)$')
109 re_30 = re.compile('\t(.+): ([\\d.]+)$')
110 re_31 = re.compile('----------------------------$')
110 re_31 = re.compile('----------------------------$')
111 re_32 = re.compile('======================================='
111 re_32 = re.compile('======================================='
112 '======================================$')
112 '======================================$')
113 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
113 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
114 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
114 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
115 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
115 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
116 r'(.*mergepoint:\s+([^;]+);)?')
116 r'(.*mergepoint:\s+([^;]+);)?')
117 re_70 = re.compile('branches: (.+);$')
117 re_70 = re.compile('branches: (.+);$')
118
118
119 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
120
120
121 prefix = '' # leading path to strip of what we get from CVS
121 prefix = '' # leading path to strip of what we get from CVS
122
122
123 if directory is None:
123 if directory is None:
124 # Current working directory
124 # Current working directory
125
125
126 # Get the real directory in the repository
126 # Get the real directory in the repository
127 try:
127 try:
128 prefix = open(os.path.join('CVS','Repository')).read().strip()
128 prefix = open(os.path.join('CVS','Repository')).read().strip()
129 directory = prefix
129 directory = prefix
130 if prefix == ".":
130 if prefix == ".":
131 prefix = ""
131 prefix = ""
132 except IOError:
132 except IOError:
133 raise logerror(_('not a CVS sandbox'))
133 raise logerror(_('not a CVS sandbox'))
134
134
135 if prefix and not prefix.endswith(os.sep):
135 if prefix and not prefix.endswith(os.sep):
136 prefix += os.sep
136 prefix += os.sep
137
137
138 # Use the Root file in the sandbox, if it exists
138 # Use the Root file in the sandbox, if it exists
139 try:
139 try:
140 root = open(os.path.join('CVS','Root')).read().strip()
140 root = open(os.path.join('CVS','Root')).read().strip()
141 except IOError:
141 except IOError:
142 pass
142 pass
143
143
144 if not root:
144 if not root:
145 root = os.environ.get('CVSROOT', '')
145 root = os.environ.get('CVSROOT', '')
146
146
147 # read log cache if one exists
147 # read log cache if one exists
148 oldlog = []
148 oldlog = []
149 date = None
149 date = None
150
150
151 if cache:
151 if cache:
152 cachedir = os.path.expanduser('~/.hg.cvsps')
152 cachedir = os.path.expanduser('~/.hg.cvsps')
153 if not os.path.exists(cachedir):
153 if not os.path.exists(cachedir):
154 os.mkdir(cachedir)
154 os.mkdir(cachedir)
155
155
156 # The cvsps cache pickle needs a uniquified name, based on the
156 # The cvsps cache pickle needs a uniquified name, based on the
157 # repository location. The address may have all sort of nasties
157 # repository location. The address may have all sort of nasties
158 # in it, slashes, colons and such. So here we take just the
158 # in it, slashes, colons and such. So here we take just the
159 # alphanumerics, concatenated in a way that does not mix up the
159 # alphanumerics, concatenated in a way that does not mix up the
160 # various components, so that
160 # various components, so that
161 # :pserver:user@server:/path
161 # :pserver:user@server:/path
162 # and
162 # and
163 # /pserver/user/server/path
163 # /pserver/user/server/path
164 # are mapped to different cache file names.
164 # are mapped to different cache file names.
165 cachefile = root.split(":") + [directory, "cache"]
165 cachefile = root.split(":") + [directory, "cache"]
166 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
167 cachefile = os.path.join(cachedir,
167 cachefile = os.path.join(cachedir,
168 '.'.join([s for s in cachefile if s]))
168 '.'.join([s for s in cachefile if s]))
169
169
170 if cache == 'update':
170 if cache == 'update':
171 try:
171 try:
172 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 ui.note(_('reading cvs log cache %s\n') % cachefile)
173 oldlog = pickle.load(open(cachefile))
173 oldlog = pickle.load(open(cachefile))
174 ui.note(_('cache has %d log entries\n') % len(oldlog))
174 ui.note(_('cache has %d log entries\n') % len(oldlog))
175 except Exception, e:
175 except Exception, e:
176 ui.note(_('error reading cache: %r\n') % e)
176 ui.note(_('error reading cache: %r\n') % e)
177
177
178 if oldlog:
178 if oldlog:
179 date = oldlog[-1].date # last commit date as a (time,tz) tuple
179 date = oldlog[-1].date # last commit date as a (time,tz) tuple
180 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
180 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
181
181
182 # build the CVS commandline
182 # build the CVS commandline
183 cmd = ['cvs', '-q']
183 cmd = ['cvs', '-q']
184 if root:
184 if root:
185 cmd.append('-d%s' % root)
185 cmd.append('-d%s' % root)
186 p = util.normpath(getrepopath(root))
186 p = util.normpath(getrepopath(root))
187 if not p.endswith('/'):
187 if not p.endswith('/'):
188 p += '/'
188 p += '/'
189 if prefix:
189 if prefix:
190 # looks like normpath replaces "" by "."
190 # looks like normpath replaces "" by "."
191 prefix = p + util.normpath(prefix)
191 prefix = p + util.normpath(prefix)
192 else:
192 else:
193 prefix = p
193 prefix = p
194 cmd.append(['log', 'rlog'][rlog])
194 cmd.append(['log', 'rlog'][rlog])
195 if date:
195 if date:
196 # no space between option and date string
196 # no space between option and date string
197 cmd.append('-d>%s' % date)
197 cmd.append('-d>%s' % date)
198 cmd.append(directory)
198 cmd.append(directory)
199
199
200 # state machine begins here
200 # state machine begins here
201 tags = {} # dictionary of revisions on current file with their tags
201 tags = {} # dictionary of revisions on current file with their tags
202 branchmap = {} # mapping between branch names and revision numbers
202 branchmap = {} # mapping between branch names and revision numbers
203 state = 0
203 state = 0
204 store = False # set when a new record can be appended
204 store = False # set when a new record can be appended
205
205
206 cmd = [util.shellquote(arg) for arg in cmd]
206 cmd = [util.shellquote(arg) for arg in cmd]
207 ui.note(_("running %s\n") % (' '.join(cmd)))
207 ui.note(_("running %s\n") % (' '.join(cmd)))
208 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
208 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
209
209
210 pfp = util.popen(' '.join(cmd))
210 pfp = util.popen(' '.join(cmd))
211 peek = pfp.readline()
211 peek = pfp.readline()
212 while True:
212 while True:
213 line = peek
213 line = peek
214 if line == '':
214 if line == '':
215 break
215 break
216 peek = pfp.readline()
216 peek = pfp.readline()
217 if line.endswith('\n'):
217 if line.endswith('\n'):
218 line = line[:-1]
218 line = line[:-1]
219 #ui.debug('state=%d line=%r\n' % (state, line))
219 #ui.debug('state=%d line=%r\n' % (state, line))
220
220
221 if state == 0:
221 if state == 0:
222 # initial state, consume input until we see 'RCS file'
222 # initial state, consume input until we see 'RCS file'
223 match = re_00.match(line)
223 match = re_00.match(line)
224 if match:
224 if match:
225 rcs = match.group(1)
225 rcs = match.group(1)
226 tags = {}
226 tags = {}
227 if rlog:
227 if rlog:
228 filename = util.normpath(rcs[:-2])
228 filename = util.normpath(rcs[:-2])
229 if filename.startswith(prefix):
229 if filename.startswith(prefix):
230 filename = filename[len(prefix):]
230 filename = filename[len(prefix):]
231 if filename.startswith('/'):
231 if filename.startswith('/'):
232 filename = filename[1:]
232 filename = filename[1:]
233 if filename.startswith('Attic/'):
233 if filename.startswith('Attic/'):
234 filename = filename[6:]
234 filename = filename[6:]
235 else:
235 else:
236 filename = filename.replace('/Attic/', '/')
236 filename = filename.replace('/Attic/', '/')
237 state = 2
237 state = 2
238 continue
238 continue
239 state = 1
239 state = 1
240 continue
240 continue
241 match = re_01.match(line)
241 match = re_01.match(line)
242 if match:
242 if match:
243 raise logerror(match.group(1))
243 raise logerror(match.group(1))
244 match = re_02.match(line)
244 match = re_02.match(line)
245 if match:
245 if match:
246 raise logerror(match.group(2))
246 raise logerror(match.group(2))
247 if re_03.match(line):
247 if re_03.match(line):
248 raise logerror(line)
248 raise logerror(line)
249
249
250 elif state == 1:
250 elif state == 1:
251 # expect 'Working file' (only when using log instead of rlog)
251 # expect 'Working file' (only when using log instead of rlog)
252 match = re_10.match(line)
252 match = re_10.match(line)
253 assert match, _('RCS file must be followed by working file')
253 assert match, _('RCS file must be followed by working file')
254 filename = util.normpath(match.group(1))
254 filename = util.normpath(match.group(1))
255 state = 2
255 state = 2
256
256
257 elif state == 2:
257 elif state == 2:
258 # expect 'symbolic names'
258 # expect 'symbolic names'
259 if re_20.match(line):
259 if re_20.match(line):
260 branchmap = {}
260 branchmap = {}
261 state = 3
261 state = 3
262
262
263 elif state == 3:
263 elif state == 3:
264 # read the symbolic names and store as tags
264 # read the symbolic names and store as tags
265 match = re_30.match(line)
265 match = re_30.match(line)
266 if match:
266 if match:
267 rev = [int(x) for x in match.group(2).split('.')]
267 rev = [int(x) for x in match.group(2).split('.')]
268
268
269 # Convert magic branch number to an odd-numbered one
269 # Convert magic branch number to an odd-numbered one
270 revn = len(rev)
270 revn = len(rev)
271 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
271 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
272 rev = rev[:-2] + rev[-1:]
272 rev = rev[:-2] + rev[-1:]
273 rev = tuple(rev)
273 rev = tuple(rev)
274
274
275 if rev not in tags:
275 if rev not in tags:
276 tags[rev] = []
276 tags[rev] = []
277 tags[rev].append(match.group(1))
277 tags[rev].append(match.group(1))
278 branchmap[match.group(1)] = match.group(2)
278 branchmap[match.group(1)] = match.group(2)
279
279
280 elif re_31.match(line):
280 elif re_31.match(line):
281 state = 5
281 state = 5
282 elif re_32.match(line):
282 elif re_32.match(line):
283 state = 0
283 state = 0
284
284
285 elif state == 4:
285 elif state == 4:
286 # expecting '------' separator before first revision
286 # expecting '------' separator before first revision
287 if re_31.match(line):
287 if re_31.match(line):
288 state = 5
288 state = 5
289 else:
289 else:
290 assert not re_32.match(line), _('must have at least '
290 assert not re_32.match(line), _('must have at least '
291 'some revisions')
291 'some revisions')
292
292
293 elif state == 5:
293 elif state == 5:
294 # expecting revision number and possibly (ignored) lock indication
294 # expecting revision number and possibly (ignored) lock indication
295 # we create the logentry here from values stored in states 0 to 4,
295 # we create the logentry here from values stored in states 0 to 4,
296 # as this state is re-entered for subsequent revisions of a file.
296 # as this state is re-entered for subsequent revisions of a file.
297 match = re_50.match(line)
297 match = re_50.match(line)
298 assert match, _('expected revision number')
298 assert match, _('expected revision number')
299 e = logentry(rcs=scache(rcs), file=scache(filename),
299 e = logentry(rcs=scache(rcs), file=scache(filename),
300 revision=tuple([int(x) for x in match.group(1).split('.')]),
300 revision=tuple([int(x) for x in match.group(1).split('.')]),
301 branches=[], parent=None)
301 branches=[], parent=None)
302 state = 6
302 state = 6
303
303
304 elif state == 6:
304 elif state == 6:
305 # expecting date, author, state, lines changed
305 # expecting date, author, state, lines changed
306 match = re_60.match(line)
306 match = re_60.match(line)
307 assert match, _('revision must be followed by date line')
307 assert match, _('revision must be followed by date line')
308 d = match.group(1)
308 d = match.group(1)
309 if d[2] == '/':
309 if d[2] == '/':
310 # Y2K
310 # Y2K
311 d = '19' + d
311 d = '19' + d
312
312
313 if len(d.split()) != 3:
313 if len(d.split()) != 3:
314 # cvs log dates always in GMT
314 # cvs log dates always in GMT
315 d = d + ' UTC'
315 d = d + ' UTC'
316 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
316 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
317 '%Y/%m/%d %H:%M:%S',
317 '%Y/%m/%d %H:%M:%S',
318 '%Y-%m-%d %H:%M:%S'])
318 '%Y-%m-%d %H:%M:%S'])
319 e.author = scache(match.group(2))
319 e.author = scache(match.group(2))
320 e.dead = match.group(3).lower() == 'dead'
320 e.dead = match.group(3).lower() == 'dead'
321
321
322 if match.group(5):
322 if match.group(5):
323 if match.group(6):
323 if match.group(6):
324 e.lines = (int(match.group(5)), int(match.group(6)))
324 e.lines = (int(match.group(5)), int(match.group(6)))
325 else:
325 else:
326 e.lines = (int(match.group(5)), 0)
326 e.lines = (int(match.group(5)), 0)
327 elif match.group(6):
327 elif match.group(6):
328 e.lines = (0, int(match.group(6)))
328 e.lines = (0, int(match.group(6)))
329 else:
329 else:
330 e.lines = None
330 e.lines = None
331
331
332 if match.group(7): # cvsnt mergepoint
332 if match.group(7): # cvsnt mergepoint
333 myrev = match.group(8).split('.')
333 myrev = match.group(8).split('.')
334 if len(myrev) == 2: # head
334 if len(myrev) == 2: # head
335 e.mergepoint = 'HEAD'
335 e.mergepoint = 'HEAD'
336 else:
336 else:
337 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
337 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
338 branches = [b for b in branchmap if branchmap[b] == myrev]
338 branches = [b for b in branchmap if branchmap[b] == myrev]
339 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
339 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
340 e.mergepoint = branches[0]
340 e.mergepoint = branches[0]
341 else:
341 else:
342 e.mergepoint = None
342 e.mergepoint = None
343 e.comment = []
343 e.comment = []
344 state = 7
344 state = 7
345
345
346 elif state == 7:
346 elif state == 7:
347 # read the revision numbers of branches that start at this revision
347 # read the revision numbers of branches that start at this revision
348 # or store the commit log message otherwise
348 # or store the commit log message otherwise
349 m = re_70.match(line)
349 m = re_70.match(line)
350 if m:
350 if m:
351 e.branches = [tuple([int(y) for y in x.strip().split('.')])
351 e.branches = [tuple([int(y) for y in x.strip().split('.')])
352 for x in m.group(1).split(';')]
352 for x in m.group(1).split(';')]
353 state = 8
353 state = 8
354 elif re_31.match(line) and re_50.match(peek):
354 elif re_31.match(line) and re_50.match(peek):
355 state = 5
355 state = 5
356 store = True
356 store = True
357 elif re_32.match(line):
357 elif re_32.match(line):
358 state = 0
358 state = 0
359 store = True
359 store = True
360 else:
360 else:
361 e.comment.append(line)
361 e.comment.append(line)
362
362
363 elif state == 8:
363 elif state == 8:
364 # store commit log message
364 # store commit log message
365 if re_31.match(line):
365 if re_31.match(line):
366 cpeek = peek
366 cpeek = peek
367 if cpeek.endswith('\n'):
367 if cpeek.endswith('\n'):
368 cpeek = cpeek[:-1]
368 cpeek = cpeek[:-1]
369 if re_50.match(cpeek):
369 if re_50.match(cpeek):
370 state = 5
370 state = 5
371 store = True
371 store = True
372 else:
372 else:
373 e.comment.append(line)
373 e.comment.append(line)
374 elif re_32.match(line):
374 elif re_32.match(line):
375 state = 0
375 state = 0
376 store = True
376 store = True
377 else:
377 else:
378 e.comment.append(line)
378 e.comment.append(line)
379
379
380 # When a file is added on a branch B1, CVS creates a synthetic
380 # When a file is added on a branch B1, CVS creates a synthetic
381 # dead trunk revision 1.1 so that the branch has a root.
381 # dead trunk revision 1.1 so that the branch has a root.
382 # Likewise, if you merge such a file to a later branch B2 (one
382 # Likewise, if you merge such a file to a later branch B2 (one
383 # that already existed when the file was added on B1), CVS
383 # that already existed when the file was added on B1), CVS
384 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
384 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
385 # these revisions now, but mark them synthetic so
385 # these revisions now, but mark them synthetic so
386 # createchangeset() can take care of them.
386 # createchangeset() can take care of them.
387 if (store and
387 if (store and
388 e.dead and
388 e.dead and
389 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
389 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
390 len(e.comment) == 1 and
390 len(e.comment) == 1 and
391 file_added_re.match(e.comment[0])):
391 file_added_re.match(e.comment[0])):
392 ui.debug('found synthetic revision in %s: %r\n'
392 ui.debug('found synthetic revision in %s: %r\n'
393 % (e.rcs, e.comment[0]))
393 % (e.rcs, e.comment[0]))
394 e.synthetic = True
394 e.synthetic = True
395
395
396 if store:
396 if store:
397 # clean up the results and save in the log.
397 # clean up the results and save in the log.
398 store = False
398 store = False
399 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
399 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
400 e.comment = scache('\n'.join(e.comment))
400 e.comment = scache('\n'.join(e.comment))
401
401
402 revn = len(e.revision)
402 revn = len(e.revision)
403 if revn > 3 and (revn % 2) == 0:
403 if revn > 3 and (revn % 2) == 0:
404 e.branch = tags.get(e.revision[:-1], [None])[0]
404 e.branch = tags.get(e.revision[:-1], [None])[0]
405 else:
405 else:
406 e.branch = None
406 e.branch = None
407
407
408 # find the branches starting from this revision
408 # find the branches starting from this revision
409 branchpoints = set()
409 branchpoints = set()
410 for branch, revision in branchmap.iteritems():
410 for branch, revision in branchmap.iteritems():
411 revparts = tuple([int(i) for i in revision.split('.')])
411 revparts = tuple([int(i) for i in revision.split('.')])
412 if len(revparts) < 2: # bad tags
412 if len(revparts) < 2: # bad tags
413 continue
413 continue
414 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
414 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
415 # normal branch
415 # normal branch
416 if revparts[:-2] == e.revision:
416 if revparts[:-2] == e.revision:
417 branchpoints.add(branch)
417 branchpoints.add(branch)
418 elif revparts == (1, 1, 1): # vendor branch
418 elif revparts == (1, 1, 1): # vendor branch
419 if revparts in e.branches:
419 if revparts in e.branches:
420 branchpoints.add(branch)
420 branchpoints.add(branch)
421 e.branchpoints = branchpoints
421 e.branchpoints = branchpoints
422
422
423 log.append(e)
423 log.append(e)
424
424
425 if len(log) % 100 == 0:
425 if len(log) % 100 == 0:
426 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
426 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
427
427
428 log.sort(key=lambda x: (x.rcs, x.revision))
428 log.sort(key=lambda x: (x.rcs, x.revision))
429
429
430 # find parent revisions of individual files
430 # find parent revisions of individual files
431 versions = {}
431 versions = {}
432 for e in log:
432 for e in log:
433 branch = e.revision[:-1]
433 branch = e.revision[:-1]
434 p = versions.get((e.rcs, branch), None)
434 p = versions.get((e.rcs, branch), None)
435 if p is None:
435 if p is None:
436 p = e.revision[:-2]
436 p = e.revision[:-2]
437 e.parent = p
437 e.parent = p
438 versions[(e.rcs, branch)] = e.revision
438 versions[(e.rcs, branch)] = e.revision
439
439
440 # update the log cache
440 # update the log cache
441 if cache:
441 if cache:
442 if log:
442 if log:
443 # join up the old and new logs
443 # join up the old and new logs
444 log.sort(key=lambda x: x.date)
444 log.sort(key=lambda x: x.date)
445
445
446 if oldlog and oldlog[-1].date >= log[0].date:
446 if oldlog and oldlog[-1].date >= log[0].date:
447 raise logerror(_('log cache overlaps with new log entries,'
447 raise logerror(_('log cache overlaps with new log entries,'
448 ' re-run without cache.'))
448 ' re-run without cache.'))
449
449
450 log = oldlog + log
450 log = oldlog + log
451
451
452 # write the new cachefile
452 # write the new cachefile
453 ui.note(_('writing cvs log cache %s\n') % cachefile)
453 ui.note(_('writing cvs log cache %s\n') % cachefile)
454 pickle.dump(log, open(cachefile, 'w'))
454 pickle.dump(log, open(cachefile, 'w'))
455 else:
455 else:
456 log = oldlog
456 log = oldlog
457
457
458 ui.status(_('%d log entries\n') % len(log))
458 ui.status(_('%d log entries\n') % len(log))
459
459
460 hook.hook(ui, None, "cvslog", True, log=log)
460 hook.hook(ui, None, "cvslog", True, log=log)
461
461
462 return log
462 return log
463
463
464
464
465 class changeset(object):
465 class changeset(object):
466 '''Class changeset has the following attributes:
466 '''Class changeset has the following attributes:
467 .id - integer identifying this changeset (list index)
467 .id - integer identifying this changeset (list index)
468 .author - author name as CVS knows it
468 .author - author name as CVS knows it
469 .branch - name of branch this changeset is on, or None
469 .branch - name of branch this changeset is on, or None
470 .comment - commit message
470 .comment - commit message
471 .date - the commit date as a (time,tz) tuple
471 .date - the commit date as a (time,tz) tuple
472 .entries - list of logentry objects in this changeset
472 .entries - list of logentry objects in this changeset
473 .parents - list of one or two parent changesets
473 .parents - list of one or two parent changesets
474 .tags - list of tags on this changeset
474 .tags - list of tags on this changeset
475 .synthetic - from synthetic revision "file ... added on branch ..."
475 .synthetic - from synthetic revision "file ... added on branch ..."
476 .mergepoint- the branch that has been merged from
476 .mergepoint- the branch that has been merged from
477 (if present in rlog output)
477 (if present in rlog output)
478 .branchpoints- the branches that start at the current entry
478 .branchpoints- the branches that start at the current entry
479 '''
479 '''
480 def __init__(self, **entries):
480 def __init__(self, **entries):
481 self.synthetic = False
481 self.synthetic = False
482 self.__dict__.update(entries)
482 self.__dict__.update(entries)
483
483
484 def __repr__(self):
484 def __repr__(self):
485 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
485 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
486 id(self),
486 id(self),
487 getattr(self, 'id', "(no id)"))
487 getattr(self, 'id', "(no id)"))
488
488
489 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
489 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
490 '''Convert log into changesets.'''
490 '''Convert log into changesets.'''
491
491
492 ui.status(_('creating changesets\n'))
492 ui.status(_('creating changesets\n'))
493
493
494 # Merge changesets
494 # Merge changesets
495
495
496 log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
496 log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
497
497
498 changesets = []
498 changesets = []
499 files = set()
499 files = set()
500 c = None
500 c = None
501 for i, e in enumerate(log):
501 for i, e in enumerate(log):
502
502
503 # Check if log entry belongs to the current changeset or not.
503 # Check if log entry belongs to the current changeset or not.
504
504
505 # Since CVS is file centric, two different file revisions with
505 # Since CVS is file centric, two different file revisions with
506 # different branchpoints should be treated as belonging to two
506 # different branchpoints should be treated as belonging to two
507 # different changesets (and the ordering is important and not
507 # different changesets (and the ordering is important and not
508 # honoured by cvsps at this point).
508 # honoured by cvsps at this point).
509 #
509 #
510 # Consider the following case:
510 # Consider the following case:
511 # foo 1.1 branchpoints: [MYBRANCH]
511 # foo 1.1 branchpoints: [MYBRANCH]
512 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
512 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
513 #
513 #
514 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
514 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
515 # later version of foo may be in MYBRANCH2, so foo should be the
515 # later version of foo may be in MYBRANCH2, so foo should be the
516 # first changeset and bar the next and MYBRANCH and MYBRANCH2
516 # first changeset and bar the next and MYBRANCH and MYBRANCH2
517 # should both start off of the bar changeset. No provisions are
517 # should both start off of the bar changeset. No provisions are
518 # made to ensure that this is, in fact, what happens.
518 # made to ensure that this is, in fact, what happens.
519 if not (c and
519 if not (c and
520 e.comment == c.comment and
520 e.comment == c.comment and
521 e.author == c.author and
521 e.author == c.author and
522 e.branch == c.branch and
522 e.branch == c.branch and
523 (not util.safehasattr(e, 'branchpoints') or
523 (not util.safehasattr(e, 'branchpoints') or
524 not util.safehasattr (c, 'branchpoints') or
524 not util.safehasattr (c, 'branchpoints') or
525 e.branchpoints == c.branchpoints) and
525 e.branchpoints == c.branchpoints) and
526 ((c.date[0] + c.date[1]) <=
526 ((c.date[0] + c.date[1]) <=
527 (e.date[0] + e.date[1]) <=
527 (e.date[0] + e.date[1]) <=
528 (c.date[0] + c.date[1]) + fuzz) and
528 (c.date[0] + c.date[1]) + fuzz) and
529 e.file not in files):
529 e.file not in files):
530 c = changeset(comment=e.comment, author=e.author,
530 c = changeset(comment=e.comment, author=e.author,
531 branch=e.branch, date=e.date, entries=[],
531 branch=e.branch, date=e.date, entries=[],
532 mergepoint=getattr(e, 'mergepoint', None),
532 mergepoint=getattr(e, 'mergepoint', None),
533 branchpoints=getattr(e, 'branchpoints', set()))
533 branchpoints=getattr(e, 'branchpoints', set()))
534 changesets.append(c)
534 changesets.append(c)
535 files = set()
535 files = set()
536 if len(changesets) % 100 == 0:
536 if len(changesets) % 100 == 0:
537 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
537 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
538 ui.status(util.ellipsis(t, 80) + '\n')
538 ui.status(util.ellipsis(t, 80) + '\n')
539
539
540 c.entries.append(e)
540 c.entries.append(e)
541 files.add(e.file)
541 files.add(e.file)
542 c.date = e.date # changeset date is date of latest commit in it
542 c.date = e.date # changeset date is date of latest commit in it
543
543
544 # Mark synthetic changesets
544 # Mark synthetic changesets
545
545
546 for c in changesets:
546 for c in changesets:
547 # Synthetic revisions always get their own changeset, because
547 # Synthetic revisions always get their own changeset, because
548 # the log message includes the filename. E.g. if you add file3
548 # the log message includes the filename. E.g. if you add file3
549 # and file4 on a branch, you get four log entries and three
549 # and file4 on a branch, you get four log entries and three
550 # changesets:
550 # changesets:
551 # "File file3 was added on branch ..." (synthetic, 1 entry)
551 # "File file3 was added on branch ..." (synthetic, 1 entry)
552 # "File file4 was added on branch ..." (synthetic, 1 entry)
552 # "File file4 was added on branch ..." (synthetic, 1 entry)
553 # "Add file3 and file4 to fix ..." (real, 2 entries)
553 # "Add file3 and file4 to fix ..." (real, 2 entries)
554 # Hence the check for 1 entry here.
554 # Hence the check for 1 entry here.
555 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
555 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
556
556
557 # Sort files in each changeset
557 # Sort files in each changeset
558
558
559 def entitycompare(l, r):
560 'Mimic cvsps sorting order'
561 l = l.file.split('/')
562 r = r.file.split('/')
563 nl = len(l)
564 nr = len(r)
565 n = min(nl, nr)
566 for i in range(n):
567 if i + 1 == nl and nl < nr:
568 return -1
569 elif i + 1 == nr and nl > nr:
570 return +1
571 elif l[i] < r[i]:
572 return -1
573 elif l[i] > r[i]:
574 return +1
575 return 0
576
559 for c in changesets:
577 for c in changesets:
560 def pathcompare(l, r):
561 'Mimic cvsps sorting order'
562 l = l.split('/')
563 r = r.split('/')
564 nl = len(l)
565 nr = len(r)
566 n = min(nl, nr)
567 for i in range(n):
568 if i + 1 == nl and nl < nr:
569 return -1
570 elif i + 1 == nr and nl > nr:
571 return +1
572 elif l[i] < r[i]:
573 return -1
574 elif l[i] > r[i]:
575 return +1
576 return 0
577 def entitycompare(l, r):
578 return pathcompare(l.file, r.file)
579
580 c.entries.sort(entitycompare)
578 c.entries.sort(entitycompare)
581
579
582 # Sort changesets by date
580 # Sort changesets by date
583
581
584 def cscmp(l, r):
582 def cscmp(l, r):
585 d = sum(l.date) - sum(r.date)
583 d = sum(l.date) - sum(r.date)
586 if d:
584 if d:
587 return d
585 return d
588
586
589 # detect vendor branches and initial commits on a branch
587 # detect vendor branches and initial commits on a branch
590 le = {}
588 le = {}
591 for e in l.entries:
589 for e in l.entries:
592 le[e.rcs] = e.revision
590 le[e.rcs] = e.revision
593 re = {}
591 re = {}
594 for e in r.entries:
592 for e in r.entries:
595 re[e.rcs] = e.revision
593 re[e.rcs] = e.revision
596
594
597 d = 0
595 d = 0
598 for e in l.entries:
596 for e in l.entries:
599 if re.get(e.rcs, None) == e.parent:
597 if re.get(e.rcs, None) == e.parent:
600 assert not d
598 assert not d
601 d = 1
599 d = 1
602 break
600 break
603
601
604 for e in r.entries:
602 for e in r.entries:
605 if le.get(e.rcs, None) == e.parent:
603 if le.get(e.rcs, None) == e.parent:
606 assert not d
604 assert not d
607 d = -1
605 d = -1
608 break
606 break
609
607
610 return d
608 return d
611
609
612 changesets.sort(cscmp)
610 changesets.sort(cscmp)
613
611
614 # Collect tags
612 # Collect tags
615
613
616 globaltags = {}
614 globaltags = {}
617 for c in changesets:
615 for c in changesets:
618 for e in c.entries:
616 for e in c.entries:
619 for tag in e.tags:
617 for tag in e.tags:
620 # remember which is the latest changeset to have this tag
618 # remember which is the latest changeset to have this tag
621 globaltags[tag] = c
619 globaltags[tag] = c
622
620
623 for c in changesets:
621 for c in changesets:
624 tags = set()
622 tags = set()
625 for e in c.entries:
623 for e in c.entries:
626 tags.update(e.tags)
624 tags.update(e.tags)
627 # remember tags only if this is the latest changeset to have it
625 # remember tags only if this is the latest changeset to have it
628 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
626 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
629
627
630 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
628 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
631 # by inserting dummy changesets with two parents, and handle
629 # by inserting dummy changesets with two parents, and handle
632 # {{mergefrombranch BRANCHNAME}} by setting two parents.
630 # {{mergefrombranch BRANCHNAME}} by setting two parents.
633
631
634 if mergeto is None:
632 if mergeto is None:
635 mergeto = r'{{mergetobranch ([-\w]+)}}'
633 mergeto = r'{{mergetobranch ([-\w]+)}}'
636 if mergeto:
634 if mergeto:
637 mergeto = re.compile(mergeto)
635 mergeto = re.compile(mergeto)
638
636
639 if mergefrom is None:
637 if mergefrom is None:
640 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
638 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
641 if mergefrom:
639 if mergefrom:
642 mergefrom = re.compile(mergefrom)
640 mergefrom = re.compile(mergefrom)
643
641
644 versions = {} # changeset index where we saw any particular file version
642 versions = {} # changeset index where we saw any particular file version
645 branches = {} # changeset index where we saw a branch
643 branches = {} # changeset index where we saw a branch
646 n = len(changesets)
644 n = len(changesets)
647 i = 0
645 i = 0
648 while i < n:
646 while i < n:
649 c = changesets[i]
647 c = changesets[i]
650
648
651 for f in c.entries:
649 for f in c.entries:
652 versions[(f.rcs, f.revision)] = i
650 versions[(f.rcs, f.revision)] = i
653
651
654 p = None
652 p = None
655 if c.branch in branches:
653 if c.branch in branches:
656 p = branches[c.branch]
654 p = branches[c.branch]
657 else:
655 else:
658 # first changeset on a new branch
656 # first changeset on a new branch
659 # the parent is a changeset with the branch in its
657 # the parent is a changeset with the branch in its
660 # branchpoints such that it is the latest possible
658 # branchpoints such that it is the latest possible
661 # commit without any intervening, unrelated commits.
659 # commit without any intervening, unrelated commits.
662
660
663 for candidate in xrange(i):
661 for candidate in xrange(i):
664 if c.branch not in changesets[candidate].branchpoints:
662 if c.branch not in changesets[candidate].branchpoints:
665 if p is not None:
663 if p is not None:
666 break
664 break
667 continue
665 continue
668 p = candidate
666 p = candidate
669
667
670 c.parents = []
668 c.parents = []
671 if p is not None:
669 if p is not None:
672 p = changesets[p]
670 p = changesets[p]
673
671
674 # Ensure no changeset has a synthetic changeset as a parent.
672 # Ensure no changeset has a synthetic changeset as a parent.
675 while p.synthetic:
673 while p.synthetic:
676 assert len(p.parents) <= 1, \
674 assert len(p.parents) <= 1, \
677 _('synthetic changeset cannot have multiple parents')
675 _('synthetic changeset cannot have multiple parents')
678 if p.parents:
676 if p.parents:
679 p = p.parents[0]
677 p = p.parents[0]
680 else:
678 else:
681 p = None
679 p = None
682 break
680 break
683
681
684 if p is not None:
682 if p is not None:
685 c.parents.append(p)
683 c.parents.append(p)
686
684
687 if c.mergepoint:
685 if c.mergepoint:
688 if c.mergepoint == 'HEAD':
686 if c.mergepoint == 'HEAD':
689 c.mergepoint = None
687 c.mergepoint = None
690 c.parents.append(changesets[branches[c.mergepoint]])
688 c.parents.append(changesets[branches[c.mergepoint]])
691
689
692 if mergefrom:
690 if mergefrom:
693 m = mergefrom.search(c.comment)
691 m = mergefrom.search(c.comment)
694 if m:
692 if m:
695 m = m.group(1)
693 m = m.group(1)
696 if m == 'HEAD':
694 if m == 'HEAD':
697 m = None
695 m = None
698 try:
696 try:
699 candidate = changesets[branches[m]]
697 candidate = changesets[branches[m]]
700 except KeyError:
698 except KeyError:
701 ui.warn(_("warning: CVS commit message references "
699 ui.warn(_("warning: CVS commit message references "
702 "non-existent branch %r:\n%s\n")
700 "non-existent branch %r:\n%s\n")
703 % (m, c.comment))
701 % (m, c.comment))
704 if m in branches and c.branch != m and not candidate.synthetic:
702 if m in branches and c.branch != m and not candidate.synthetic:
705 c.parents.append(candidate)
703 c.parents.append(candidate)
706
704
707 if mergeto:
705 if mergeto:
708 m = mergeto.search(c.comment)
706 m = mergeto.search(c.comment)
709 if m:
707 if m:
710 try:
708 try:
711 m = m.group(1)
709 m = m.group(1)
712 if m == 'HEAD':
710 if m == 'HEAD':
713 m = None
711 m = None
714 except:
712 except:
715 m = None # if no group found then merge to HEAD
713 m = None # if no group found then merge to HEAD
716 if m in branches and c.branch != m:
714 if m in branches and c.branch != m:
717 # insert empty changeset for merge
715 # insert empty changeset for merge
718 cc = changeset(
716 cc = changeset(
719 author=c.author, branch=m, date=c.date,
717 author=c.author, branch=m, date=c.date,
720 comment='convert-repo: CVS merge from branch %s'
718 comment='convert-repo: CVS merge from branch %s'
721 % c.branch,
719 % c.branch,
722 entries=[], tags=[],
720 entries=[], tags=[],
723 parents=[changesets[branches[m]], c])
721 parents=[changesets[branches[m]], c])
724 changesets.insert(i + 1, cc)
722 changesets.insert(i + 1, cc)
725 branches[m] = i + 1
723 branches[m] = i + 1
726
724
727 # adjust our loop counters now we have inserted a new entry
725 # adjust our loop counters now we have inserted a new entry
728 n += 1
726 n += 1
729 i += 2
727 i += 2
730 continue
728 continue
731
729
732 branches[c.branch] = i
730 branches[c.branch] = i
733 i += 1
731 i += 1
734
732
735 # Drop synthetic changesets (safe now that we have ensured no other
733 # Drop synthetic changesets (safe now that we have ensured no other
736 # changesets can have them as parents).
734 # changesets can have them as parents).
737 i = 0
735 i = 0
738 while i < len(changesets):
736 while i < len(changesets):
739 if changesets[i].synthetic:
737 if changesets[i].synthetic:
740 del changesets[i]
738 del changesets[i]
741 else:
739 else:
742 i += 1
740 i += 1
743
741
744 # Number changesets
742 # Number changesets
745
743
746 for i, c in enumerate(changesets):
744 for i, c in enumerate(changesets):
747 c.id = i + 1
745 c.id = i + 1
748
746
749 ui.status(_('%d changeset entries\n') % len(changesets))
747 ui.status(_('%d changeset entries\n') % len(changesets))
750
748
751 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
749 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
752
750
753 return changesets
751 return changesets
754
752
755
753
756 def debugcvsps(ui, *args, **opts):
754 def debugcvsps(ui, *args, **opts):
757 '''Read CVS rlog for current directory or named path in
755 '''Read CVS rlog for current directory or named path in
758 repository, and convert the log to changesets based on matching
756 repository, and convert the log to changesets based on matching
759 commit log entries and dates.
757 commit log entries and dates.
760 '''
758 '''
761 if opts["new_cache"]:
759 if opts["new_cache"]:
762 cache = "write"
760 cache = "write"
763 elif opts["update_cache"]:
761 elif opts["update_cache"]:
764 cache = "update"
762 cache = "update"
765 else:
763 else:
766 cache = None
764 cache = None
767
765
768 revisions = opts["revisions"]
766 revisions = opts["revisions"]
769
767
770 try:
768 try:
771 if args:
769 if args:
772 log = []
770 log = []
773 for d in args:
771 for d in args:
774 log += createlog(ui, d, root=opts["root"], cache=cache)
772 log += createlog(ui, d, root=opts["root"], cache=cache)
775 else:
773 else:
776 log = createlog(ui, root=opts["root"], cache=cache)
774 log = createlog(ui, root=opts["root"], cache=cache)
777 except logerror, e:
775 except logerror, e:
778 ui.write("%r\n"%e)
776 ui.write("%r\n"%e)
779 return
777 return
780
778
781 changesets = createchangeset(ui, log, opts["fuzz"])
779 changesets = createchangeset(ui, log, opts["fuzz"])
782 del log
780 del log
783
781
784 # Print changesets (optionally filtered)
782 # Print changesets (optionally filtered)
785
783
786 off = len(revisions)
784 off = len(revisions)
787 branches = {} # latest version number in each branch
785 branches = {} # latest version number in each branch
788 ancestors = {} # parent branch
786 ancestors = {} # parent branch
789 for cs in changesets:
787 for cs in changesets:
790
788
791 if opts["ancestors"]:
789 if opts["ancestors"]:
792 if cs.branch not in branches and cs.parents and cs.parents[0].id:
790 if cs.branch not in branches and cs.parents and cs.parents[0].id:
793 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
791 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
794 cs.parents[0].id)
792 cs.parents[0].id)
795 branches[cs.branch] = cs.id
793 branches[cs.branch] = cs.id
796
794
797 # limit by branches
795 # limit by branches
798 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
796 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
799 continue
797 continue
800
798
801 if not off:
799 if not off:
802 # Note: trailing spaces on several lines here are needed to have
800 # Note: trailing spaces on several lines here are needed to have
803 # bug-for-bug compatibility with cvsps.
801 # bug-for-bug compatibility with cvsps.
804 ui.write('---------------------\n')
802 ui.write('---------------------\n')
805 ui.write('PatchSet %d \n' % cs.id)
803 ui.write('PatchSet %d \n' % cs.id)
806 ui.write('Date: %s\n' % util.datestr(cs.date,
804 ui.write('Date: %s\n' % util.datestr(cs.date,
807 '%Y/%m/%d %H:%M:%S %1%2'))
805 '%Y/%m/%d %H:%M:%S %1%2'))
808 ui.write('Author: %s\n' % cs.author)
806 ui.write('Author: %s\n' % cs.author)
809 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
807 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
810 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
808 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
811 ','.join(cs.tags) or '(none)'))
809 ','.join(cs.tags) or '(none)'))
812 branchpoints = getattr(cs, 'branchpoints', None)
810 branchpoints = getattr(cs, 'branchpoints', None)
813 if branchpoints:
811 if branchpoints:
814 ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
812 ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
815 if opts["parents"] and cs.parents:
813 if opts["parents"] and cs.parents:
816 if len(cs.parents) > 1:
814 if len(cs.parents) > 1:
817 ui.write('Parents: %s\n' %
815 ui.write('Parents: %s\n' %
818 (','.join([str(p.id) for p in cs.parents])))
816 (','.join([str(p.id) for p in cs.parents])))
819 else:
817 else:
820 ui.write('Parent: %d\n' % cs.parents[0].id)
818 ui.write('Parent: %d\n' % cs.parents[0].id)
821
819
822 if opts["ancestors"]:
820 if opts["ancestors"]:
823 b = cs.branch
821 b = cs.branch
824 r = []
822 r = []
825 while b:
823 while b:
826 b, c = ancestors[b]
824 b, c = ancestors[b]
827 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
825 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
828 if r:
826 if r:
829 ui.write('Ancestors: %s\n' % (','.join(r)))
827 ui.write('Ancestors: %s\n' % (','.join(r)))
830
828
831 ui.write('Log:\n')
829 ui.write('Log:\n')
832 ui.write('%s\n\n' % cs.comment)
830 ui.write('%s\n\n' % cs.comment)
833 ui.write('Members: \n')
831 ui.write('Members: \n')
834 for f in cs.entries:
832 for f in cs.entries:
835 fn = f.file
833 fn = f.file
836 if fn.startswith(opts["prefix"]):
834 if fn.startswith(opts["prefix"]):
837 fn = fn[len(opts["prefix"]):]
835 fn = fn[len(opts["prefix"]):]
838 ui.write('\t%s:%s->%s%s \n' % (
836 ui.write('\t%s:%s->%s%s \n' % (
839 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
837 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
840 '.'.join([str(x) for x in f.revision]),
838 '.'.join([str(x) for x in f.revision]),
841 ['', '(DEAD)'][f.dead]))
839 ['', '(DEAD)'][f.dead]))
842 ui.write('\n')
840 ui.write('\n')
843
841
844 # have we seen the start tag?
842 # have we seen the start tag?
845 if revisions and off:
843 if revisions and off:
846 if revisions[0] == str(cs.id) or \
844 if revisions[0] == str(cs.id) or \
847 revisions[0] in cs.tags:
845 revisions[0] in cs.tags:
848 off = False
846 off = False
849
847
850 # see if we reached the end tag
848 # see if we reached the end tag
851 if len(revisions) > 1 and not off:
849 if len(revisions) > 1 and not off:
852 if revisions[1] == str(cs.id) or \
850 if revisions[1] == str(cs.id) or \
853 revisions[1] in cs.tags:
851 revisions[1] in cs.tags:
854 break
852 break
General Comments 0
You need to be logged in to leave comments. Login now