##// END OF EJS Templates
convert: detect false cset boundaries in cvsps descriptions
jakob krainz -
r15205:4e5b7d13 default
parent child Browse files
Show More
@@ -1,848 +1,854 b''
1 # Mercurial built-in replacement for cvsps.
1 # Mercurial built-in replacement for cvsps.
2 #
2 #
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 import re
9 import re
10 import cPickle as pickle
10 import cPickle as pickle
11 from mercurial import util
11 from mercurial import util
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import hook
13 from mercurial import hook
14 from mercurial import util
14 from mercurial import util
15
15
16 class logentry(object):
16 class logentry(object):
17 '''Class logentry has the following attributes:
17 '''Class logentry has the following attributes:
18 .author - author name as CVS knows it
18 .author - author name as CVS knows it
19 .branch - name of branch this revision is on
19 .branch - name of branch this revision is on
20 .branches - revision tuple of branches starting at this revision
20 .branches - revision tuple of branches starting at this revision
21 .comment - commit message
21 .comment - commit message
22 .date - the commit date as a (time, tz) tuple
22 .date - the commit date as a (time, tz) tuple
23 .dead - true if file revision is dead
23 .dead - true if file revision is dead
24 .file - Name of file
24 .file - Name of file
25 .lines - a tuple (+lines, -lines) or None
25 .lines - a tuple (+lines, -lines) or None
26 .parent - Previous revision of this entry
26 .parent - Previous revision of this entry
27 .rcs - name of file as returned from CVS
27 .rcs - name of file as returned from CVS
28 .revision - revision number as tuple
28 .revision - revision number as tuple
29 .tags - list of tags on the file
29 .tags - list of tags on the file
30 .synthetic - is this a synthetic "file ... added on ..." revision?
30 .synthetic - is this a synthetic "file ... added on ..." revision?
31 .mergepoint- the branch that has been merged from
31 .mergepoint- the branch that has been merged from
32 (if present in rlog output)
32 (if present in rlog output)
33 .branchpoints- the branches that start at the current entry
33 .branchpoints- the branches that start at the current entry
34 '''
34 '''
35 def __init__(self, **entries):
35 def __init__(self, **entries):
36 self.synthetic = False
36 self.synthetic = False
37 self.__dict__.update(entries)
37 self.__dict__.update(entries)
38
38
39 def __repr__(self):
39 def __repr__(self):
40 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
40 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
41 id(self),
41 id(self),
42 self.file,
42 self.file,
43 ".".join(map(str, self.revision)))
43 ".".join(map(str, self.revision)))
44
44
45 class logerror(Exception):
45 class logerror(Exception):
46 pass
46 pass
47
47
48 def getrepopath(cvspath):
48 def getrepopath(cvspath):
49 """Return the repository path from a CVS path.
49 """Return the repository path from a CVS path.
50
50
51 >>> getrepopath('/foo/bar')
51 >>> getrepopath('/foo/bar')
52 '/foo/bar'
52 '/foo/bar'
53 >>> getrepopath('c:/foo/bar')
53 >>> getrepopath('c:/foo/bar')
54 'c:/foo/bar'
54 'c:/foo/bar'
55 >>> getrepopath(':pserver:10/foo/bar')
55 >>> getrepopath(':pserver:10/foo/bar')
56 '/foo/bar'
56 '/foo/bar'
57 >>> getrepopath(':pserver:10c:/foo/bar')
57 >>> getrepopath(':pserver:10c:/foo/bar')
58 '/foo/bar'
58 '/foo/bar'
59 >>> getrepopath(':pserver:/foo/bar')
59 >>> getrepopath(':pserver:/foo/bar')
60 '/foo/bar'
60 '/foo/bar'
61 >>> getrepopath(':pserver:c:/foo/bar')
61 >>> getrepopath(':pserver:c:/foo/bar')
62 'c:/foo/bar'
62 'c:/foo/bar'
63 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
63 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
64 '/foo/bar'
64 '/foo/bar'
65 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
65 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
66 'c:/foo/bar'
66 'c:/foo/bar'
67 """
67 """
68 # According to CVS manual, CVS paths are expressed like:
68 # According to CVS manual, CVS paths are expressed like:
69 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
69 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
70 #
70 #
71 # Unfortunately, Windows absolute paths start with a drive letter
71 # Unfortunately, Windows absolute paths start with a drive letter
72 # like 'c:' making it harder to parse. Here we assume that drive
72 # like 'c:' making it harder to parse. Here we assume that drive
73 # letters are only one character long and any CVS component before
73 # letters are only one character long and any CVS component before
74 # the repository path is at least 2 characters long, and use this
74 # the repository path is at least 2 characters long, and use this
75 # to disambiguate.
75 # to disambiguate.
76 parts = cvspath.split(':')
76 parts = cvspath.split(':')
77 if len(parts) == 1:
77 if len(parts) == 1:
78 return parts[0]
78 return parts[0]
79 # Here there is an ambiguous case if we have a port number
79 # Here there is an ambiguous case if we have a port number
80 # immediately followed by a Windows driver letter. We assume this
80 # immediately followed by a Windows driver letter. We assume this
81 # never happens and decide it must be CVS path component,
81 # never happens and decide it must be CVS path component,
82 # therefore ignoring it.
82 # therefore ignoring it.
83 if len(parts[-2]) > 1:
83 if len(parts[-2]) > 1:
84 return parts[-1].lstrip('0123456789')
84 return parts[-1].lstrip('0123456789')
85 return parts[-2] + ':' + parts[-1]
85 return parts[-2] + ':' + parts[-1]
86
86
87 def createlog(ui, directory=None, root="", rlog=True, cache=None):
87 def createlog(ui, directory=None, root="", rlog=True, cache=None):
88 '''Collect the CVS rlog'''
88 '''Collect the CVS rlog'''
89
89
90 # Because we store many duplicate commit log messages, reusing strings
90 # Because we store many duplicate commit log messages, reusing strings
91 # saves a lot of memory and pickle storage space.
91 # saves a lot of memory and pickle storage space.
92 _scache = {}
92 _scache = {}
93 def scache(s):
93 def scache(s):
94 "return a shared version of a string"
94 "return a shared version of a string"
95 return _scache.setdefault(s, s)
95 return _scache.setdefault(s, s)
96
96
97 ui.status(_('collecting CVS rlog\n'))
97 ui.status(_('collecting CVS rlog\n'))
98
98
99 log = [] # list of logentry objects containing the CVS state
99 log = [] # list of logentry objects containing the CVS state
100
100
101 # patterns to match in CVS (r)log output, by state of use
101 # patterns to match in CVS (r)log output, by state of use
102 re_00 = re.compile('RCS file: (.+)$')
102 re_00 = re.compile('RCS file: (.+)$')
103 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
103 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
104 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
104 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
105 re_03 = re.compile("(Cannot access.+CVSROOT)|"
105 re_03 = re.compile("(Cannot access.+CVSROOT)|"
106 "(can't create temporary directory.+)$")
106 "(can't create temporary directory.+)$")
107 re_10 = re.compile('Working file: (.+)$')
107 re_10 = re.compile('Working file: (.+)$')
108 re_20 = re.compile('symbolic names:')
108 re_20 = re.compile('symbolic names:')
109 re_30 = re.compile('\t(.+): ([\\d.]+)$')
109 re_30 = re.compile('\t(.+): ([\\d.]+)$')
110 re_31 = re.compile('----------------------------$')
110 re_31 = re.compile('----------------------------$')
111 re_32 = re.compile('======================================='
111 re_32 = re.compile('======================================='
112 '======================================$')
112 '======================================$')
113 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
113 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
114 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
114 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
115 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
115 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
116 r'(.*mergepoint:\s+([^;]+);)?')
116 r'(.*mergepoint:\s+([^;]+);)?')
117 re_70 = re.compile('branches: (.+);$')
117 re_70 = re.compile('branches: (.+);$')
118
118
119 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
120
120
121 prefix = '' # leading path to strip of what we get from CVS
121 prefix = '' # leading path to strip of what we get from CVS
122
122
123 if directory is None:
123 if directory is None:
124 # Current working directory
124 # Current working directory
125
125
126 # Get the real directory in the repository
126 # Get the real directory in the repository
127 try:
127 try:
128 prefix = open(os.path.join('CVS','Repository')).read().strip()
128 prefix = open(os.path.join('CVS','Repository')).read().strip()
129 directory = prefix
129 directory = prefix
130 if prefix == ".":
130 if prefix == ".":
131 prefix = ""
131 prefix = ""
132 except IOError:
132 except IOError:
133 raise logerror(_('not a CVS sandbox'))
133 raise logerror(_('not a CVS sandbox'))
134
134
135 if prefix and not prefix.endswith(os.sep):
135 if prefix and not prefix.endswith(os.sep):
136 prefix += os.sep
136 prefix += os.sep
137
137
138 # Use the Root file in the sandbox, if it exists
138 # Use the Root file in the sandbox, if it exists
139 try:
139 try:
140 root = open(os.path.join('CVS','Root')).read().strip()
140 root = open(os.path.join('CVS','Root')).read().strip()
141 except IOError:
141 except IOError:
142 pass
142 pass
143
143
144 if not root:
144 if not root:
145 root = os.environ.get('CVSROOT', '')
145 root = os.environ.get('CVSROOT', '')
146
146
147 # read log cache if one exists
147 # read log cache if one exists
148 oldlog = []
148 oldlog = []
149 date = None
149 date = None
150
150
151 if cache:
151 if cache:
152 cachedir = os.path.expanduser('~/.hg.cvsps')
152 cachedir = os.path.expanduser('~/.hg.cvsps')
153 if not os.path.exists(cachedir):
153 if not os.path.exists(cachedir):
154 os.mkdir(cachedir)
154 os.mkdir(cachedir)
155
155
156 # The cvsps cache pickle needs a uniquified name, based on the
156 # The cvsps cache pickle needs a uniquified name, based on the
157 # repository location. The address may have all sort of nasties
157 # repository location. The address may have all sort of nasties
158 # in it, slashes, colons and such. So here we take just the
158 # in it, slashes, colons and such. So here we take just the
159 # alphanumerics, concatenated in a way that does not mix up the
159 # alphanumerics, concatenated in a way that does not mix up the
160 # various components, so that
160 # various components, so that
161 # :pserver:user@server:/path
161 # :pserver:user@server:/path
162 # and
162 # and
163 # /pserver/user/server/path
163 # /pserver/user/server/path
164 # are mapped to different cache file names.
164 # are mapped to different cache file names.
165 cachefile = root.split(":") + [directory, "cache"]
165 cachefile = root.split(":") + [directory, "cache"]
166 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
167 cachefile = os.path.join(cachedir,
167 cachefile = os.path.join(cachedir,
168 '.'.join([s for s in cachefile if s]))
168 '.'.join([s for s in cachefile if s]))
169
169
170 if cache == 'update':
170 if cache == 'update':
171 try:
171 try:
172 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 ui.note(_('reading cvs log cache %s\n') % cachefile)
173 oldlog = pickle.load(open(cachefile))
173 oldlog = pickle.load(open(cachefile))
174 ui.note(_('cache has %d log entries\n') % len(oldlog))
174 ui.note(_('cache has %d log entries\n') % len(oldlog))
175 except Exception, e:
175 except Exception, e:
176 ui.note(_('error reading cache: %r\n') % e)
176 ui.note(_('error reading cache: %r\n') % e)
177
177
178 if oldlog:
178 if oldlog:
179 date = oldlog[-1].date # last commit date as a (time,tz) tuple
179 date = oldlog[-1].date # last commit date as a (time,tz) tuple
180 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
180 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
181
181
182 # build the CVS commandline
182 # build the CVS commandline
183 cmd = ['cvs', '-q']
183 cmd = ['cvs', '-q']
184 if root:
184 if root:
185 cmd.append('-d%s' % root)
185 cmd.append('-d%s' % root)
186 p = util.normpath(getrepopath(root))
186 p = util.normpath(getrepopath(root))
187 if not p.endswith('/'):
187 if not p.endswith('/'):
188 p += '/'
188 p += '/'
189 if prefix:
189 if prefix:
190 # looks like normpath replaces "" by "."
190 # looks like normpath replaces "" by "."
191 prefix = p + util.normpath(prefix)
191 prefix = p + util.normpath(prefix)
192 else:
192 else:
193 prefix = p
193 prefix = p
194 cmd.append(['log', 'rlog'][rlog])
194 cmd.append(['log', 'rlog'][rlog])
195 if date:
195 if date:
196 # no space between option and date string
196 # no space between option and date string
197 cmd.append('-d>%s' % date)
197 cmd.append('-d>%s' % date)
198 cmd.append(directory)
198 cmd.append(directory)
199
199
200 # state machine begins here
200 # state machine begins here
201 tags = {} # dictionary of revisions on current file with their tags
201 tags = {} # dictionary of revisions on current file with their tags
202 branchmap = {} # mapping between branch names and revision numbers
202 branchmap = {} # mapping between branch names and revision numbers
203 state = 0
203 state = 0
204 store = False # set when a new record can be appended
204 store = False # set when a new record can be appended
205
205
206 cmd = [util.shellquote(arg) for arg in cmd]
206 cmd = [util.shellquote(arg) for arg in cmd]
207 ui.note(_("running %s\n") % (' '.join(cmd)))
207 ui.note(_("running %s\n") % (' '.join(cmd)))
208 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
208 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
209
209
210 pfp = util.popen(' '.join(cmd))
210 pfp = util.popen(' '.join(cmd))
211 peek = pfp.readline()
211 peek = pfp.readline()
212 while True:
212 while True:
213 line = peek
213 line = peek
214 if line == '':
214 if line == '':
215 break
215 break
216 peek = pfp.readline()
216 peek = pfp.readline()
217 if line.endswith('\n'):
217 if line.endswith('\n'):
218 line = line[:-1]
218 line = line[:-1]
219 #ui.debug('state=%d line=%r\n' % (state, line))
219 #ui.debug('state=%d line=%r\n' % (state, line))
220
220
221 if state == 0:
221 if state == 0:
222 # initial state, consume input until we see 'RCS file'
222 # initial state, consume input until we see 'RCS file'
223 match = re_00.match(line)
223 match = re_00.match(line)
224 if match:
224 if match:
225 rcs = match.group(1)
225 rcs = match.group(1)
226 tags = {}
226 tags = {}
227 if rlog:
227 if rlog:
228 filename = util.normpath(rcs[:-2])
228 filename = util.normpath(rcs[:-2])
229 if filename.startswith(prefix):
229 if filename.startswith(prefix):
230 filename = filename[len(prefix):]
230 filename = filename[len(prefix):]
231 if filename.startswith('/'):
231 if filename.startswith('/'):
232 filename = filename[1:]
232 filename = filename[1:]
233 if filename.startswith('Attic/'):
233 if filename.startswith('Attic/'):
234 filename = filename[6:]
234 filename = filename[6:]
235 else:
235 else:
236 filename = filename.replace('/Attic/', '/')
236 filename = filename.replace('/Attic/', '/')
237 state = 2
237 state = 2
238 continue
238 continue
239 state = 1
239 state = 1
240 continue
240 continue
241 match = re_01.match(line)
241 match = re_01.match(line)
242 if match:
242 if match:
243 raise logerror(match.group(1))
243 raise logerror(match.group(1))
244 match = re_02.match(line)
244 match = re_02.match(line)
245 if match:
245 if match:
246 raise logerror(match.group(2))
246 raise logerror(match.group(2))
247 if re_03.match(line):
247 if re_03.match(line):
248 raise logerror(line)
248 raise logerror(line)
249
249
250 elif state == 1:
250 elif state == 1:
251 # expect 'Working file' (only when using log instead of rlog)
251 # expect 'Working file' (only when using log instead of rlog)
252 match = re_10.match(line)
252 match = re_10.match(line)
253 assert match, _('RCS file must be followed by working file')
253 assert match, _('RCS file must be followed by working file')
254 filename = util.normpath(match.group(1))
254 filename = util.normpath(match.group(1))
255 state = 2
255 state = 2
256
256
257 elif state == 2:
257 elif state == 2:
258 # expect 'symbolic names'
258 # expect 'symbolic names'
259 if re_20.match(line):
259 if re_20.match(line):
260 branchmap = {}
260 branchmap = {}
261 state = 3
261 state = 3
262
262
263 elif state == 3:
263 elif state == 3:
264 # read the symbolic names and store as tags
264 # read the symbolic names and store as tags
265 match = re_30.match(line)
265 match = re_30.match(line)
266 if match:
266 if match:
267 rev = [int(x) for x in match.group(2).split('.')]
267 rev = [int(x) for x in match.group(2).split('.')]
268
268
269 # Convert magic branch number to an odd-numbered one
269 # Convert magic branch number to an odd-numbered one
270 revn = len(rev)
270 revn = len(rev)
271 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
271 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
272 rev = rev[:-2] + rev[-1:]
272 rev = rev[:-2] + rev[-1:]
273 rev = tuple(rev)
273 rev = tuple(rev)
274
274
275 if rev not in tags:
275 if rev not in tags:
276 tags[rev] = []
276 tags[rev] = []
277 tags[rev].append(match.group(1))
277 tags[rev].append(match.group(1))
278 branchmap[match.group(1)] = match.group(2)
278 branchmap[match.group(1)] = match.group(2)
279
279
280 elif re_31.match(line):
280 elif re_31.match(line):
281 state = 5
281 state = 5
282 elif re_32.match(line):
282 elif re_32.match(line):
283 state = 0
283 state = 0
284
284
285 elif state == 4:
285 elif state == 4:
286 # expecting '------' separator before first revision
286 # expecting '------' separator before first revision
287 if re_31.match(line):
287 if re_31.match(line):
288 state = 5
288 state = 5
289 else:
289 else:
290 assert not re_32.match(line), _('must have at least '
290 assert not re_32.match(line), _('must have at least '
291 'some revisions')
291 'some revisions')
292
292
293 elif state == 5:
293 elif state == 5:
294 # expecting revision number and possibly (ignored) lock indication
294 # expecting revision number and possibly (ignored) lock indication
295 # we create the logentry here from values stored in states 0 to 4,
295 # we create the logentry here from values stored in states 0 to 4,
296 # as this state is re-entered for subsequent revisions of a file.
296 # as this state is re-entered for subsequent revisions of a file.
297 match = re_50.match(line)
297 match = re_50.match(line)
298 assert match, _('expected revision number')
298 assert match, _('expected revision number')
299 e = logentry(rcs=scache(rcs), file=scache(filename),
299 e = logentry(rcs=scache(rcs), file=scache(filename),
300 revision=tuple([int(x) for x in match.group(1).split('.')]),
300 revision=tuple([int(x) for x in match.group(1).split('.')]),
301 branches=[], parent=None)
301 branches=[], parent=None)
302 state = 6
302 state = 6
303
303
304 elif state == 6:
304 elif state == 6:
305 # expecting date, author, state, lines changed
305 # expecting date, author, state, lines changed
306 match = re_60.match(line)
306 match = re_60.match(line)
307 assert match, _('revision must be followed by date line')
307 assert match, _('revision must be followed by date line')
308 d = match.group(1)
308 d = match.group(1)
309 if d[2] == '/':
309 if d[2] == '/':
310 # Y2K
310 # Y2K
311 d = '19' + d
311 d = '19' + d
312
312
313 if len(d.split()) != 3:
313 if len(d.split()) != 3:
314 # cvs log dates always in GMT
314 # cvs log dates always in GMT
315 d = d + ' UTC'
315 d = d + ' UTC'
316 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
316 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
317 '%Y/%m/%d %H:%M:%S',
317 '%Y/%m/%d %H:%M:%S',
318 '%Y-%m-%d %H:%M:%S'])
318 '%Y-%m-%d %H:%M:%S'])
319 e.author = scache(match.group(2))
319 e.author = scache(match.group(2))
320 e.dead = match.group(3).lower() == 'dead'
320 e.dead = match.group(3).lower() == 'dead'
321
321
322 if match.group(5):
322 if match.group(5):
323 if match.group(6):
323 if match.group(6):
324 e.lines = (int(match.group(5)), int(match.group(6)))
324 e.lines = (int(match.group(5)), int(match.group(6)))
325 else:
325 else:
326 e.lines = (int(match.group(5)), 0)
326 e.lines = (int(match.group(5)), 0)
327 elif match.group(6):
327 elif match.group(6):
328 e.lines = (0, int(match.group(6)))
328 e.lines = (0, int(match.group(6)))
329 else:
329 else:
330 e.lines = None
330 e.lines = None
331
331
332 if match.group(7): # cvsnt mergepoint
332 if match.group(7): # cvsnt mergepoint
333 myrev = match.group(8).split('.')
333 myrev = match.group(8).split('.')
334 if len(myrev) == 2: # head
334 if len(myrev) == 2: # head
335 e.mergepoint = 'HEAD'
335 e.mergepoint = 'HEAD'
336 else:
336 else:
337 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
337 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
338 branches = [b for b in branchmap if branchmap[b] == myrev]
338 branches = [b for b in branchmap if branchmap[b] == myrev]
339 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
339 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
340 e.mergepoint = branches[0]
340 e.mergepoint = branches[0]
341 else:
341 else:
342 e.mergepoint = None
342 e.mergepoint = None
343 e.comment = []
343 e.comment = []
344 state = 7
344 state = 7
345
345
346 elif state == 7:
346 elif state == 7:
347 # read the revision numbers of branches that start at this revision
347 # read the revision numbers of branches that start at this revision
348 # or store the commit log message otherwise
348 # or store the commit log message otherwise
349 m = re_70.match(line)
349 m = re_70.match(line)
350 if m:
350 if m:
351 e.branches = [tuple([int(y) for y in x.strip().split('.')])
351 e.branches = [tuple([int(y) for y in x.strip().split('.')])
352 for x in m.group(1).split(';')]
352 for x in m.group(1).split(';')]
353 state = 8
353 state = 8
354 elif re_31.match(line) and re_50.match(peek):
354 elif re_31.match(line) and re_50.match(peek):
355 state = 5
355 state = 5
356 store = True
356 store = True
357 elif re_32.match(line):
357 elif re_32.match(line):
358 state = 0
358 state = 0
359 store = True
359 store = True
360 else:
360 else:
361 e.comment.append(line)
361 e.comment.append(line)
362
362
363 elif state == 8:
363 elif state == 8:
364 # store commit log message
364 # store commit log message
365 if re_31.match(line):
365 if re_31.match(line):
366 state = 5
366 cpeek = peek
367 store = True
367 if cpeek.endswith('\n'):
368 cpeek = cpeek[:-1]
369 if re_50.match(cpeek):
370 state = 5
371 store = True
372 else:
373 e.comment.append(line)
368 elif re_32.match(line):
374 elif re_32.match(line):
369 state = 0
375 state = 0
370 store = True
376 store = True
371 else:
377 else:
372 e.comment.append(line)
378 e.comment.append(line)
373
379
374 # When a file is added on a branch B1, CVS creates a synthetic
380 # When a file is added on a branch B1, CVS creates a synthetic
375 # dead trunk revision 1.1 so that the branch has a root.
381 # dead trunk revision 1.1 so that the branch has a root.
376 # Likewise, if you merge such a file to a later branch B2 (one
382 # Likewise, if you merge such a file to a later branch B2 (one
377 # that already existed when the file was added on B1), CVS
383 # that already existed when the file was added on B1), CVS
378 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
384 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
379 # these revisions now, but mark them synthetic so
385 # these revisions now, but mark them synthetic so
380 # createchangeset() can take care of them.
386 # createchangeset() can take care of them.
381 if (store and
387 if (store and
382 e.dead and
388 e.dead and
383 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
389 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
384 len(e.comment) == 1 and
390 len(e.comment) == 1 and
385 file_added_re.match(e.comment[0])):
391 file_added_re.match(e.comment[0])):
386 ui.debug('found synthetic revision in %s: %r\n'
392 ui.debug('found synthetic revision in %s: %r\n'
387 % (e.rcs, e.comment[0]))
393 % (e.rcs, e.comment[0]))
388 e.synthetic = True
394 e.synthetic = True
389
395
390 if store:
396 if store:
391 # clean up the results and save in the log.
397 # clean up the results and save in the log.
392 store = False
398 store = False
393 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
399 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
394 e.comment = scache('\n'.join(e.comment))
400 e.comment = scache('\n'.join(e.comment))
395
401
396 revn = len(e.revision)
402 revn = len(e.revision)
397 if revn > 3 and (revn % 2) == 0:
403 if revn > 3 and (revn % 2) == 0:
398 e.branch = tags.get(e.revision[:-1], [None])[0]
404 e.branch = tags.get(e.revision[:-1], [None])[0]
399 else:
405 else:
400 e.branch = None
406 e.branch = None
401
407
402 # find the branches starting from this revision
408 # find the branches starting from this revision
403 branchpoints = set()
409 branchpoints = set()
404 for branch, revision in branchmap.iteritems():
410 for branch, revision in branchmap.iteritems():
405 revparts = tuple([int(i) for i in revision.split('.')])
411 revparts = tuple([int(i) for i in revision.split('.')])
406 if len(revparts) < 2: # bad tags
412 if len(revparts) < 2: # bad tags
407 continue
413 continue
408 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
414 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
409 # normal branch
415 # normal branch
410 if revparts[:-2] == e.revision:
416 if revparts[:-2] == e.revision:
411 branchpoints.add(branch)
417 branchpoints.add(branch)
412 elif revparts == (1, 1, 1): # vendor branch
418 elif revparts == (1, 1, 1): # vendor branch
413 if revparts in e.branches:
419 if revparts in e.branches:
414 branchpoints.add(branch)
420 branchpoints.add(branch)
415 e.branchpoints = branchpoints
421 e.branchpoints = branchpoints
416
422
417 log.append(e)
423 log.append(e)
418
424
419 if len(log) % 100 == 0:
425 if len(log) % 100 == 0:
420 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
426 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
421
427
422 log.sort(key=lambda x: (x.rcs, x.revision))
428 log.sort(key=lambda x: (x.rcs, x.revision))
423
429
424 # find parent revisions of individual files
430 # find parent revisions of individual files
425 versions = {}
431 versions = {}
426 for e in log:
432 for e in log:
427 branch = e.revision[:-1]
433 branch = e.revision[:-1]
428 p = versions.get((e.rcs, branch), None)
434 p = versions.get((e.rcs, branch), None)
429 if p is None:
435 if p is None:
430 p = e.revision[:-2]
436 p = e.revision[:-2]
431 e.parent = p
437 e.parent = p
432 versions[(e.rcs, branch)] = e.revision
438 versions[(e.rcs, branch)] = e.revision
433
439
434 # update the log cache
440 # update the log cache
435 if cache:
441 if cache:
436 if log:
442 if log:
437 # join up the old and new logs
443 # join up the old and new logs
438 log.sort(key=lambda x: x.date)
444 log.sort(key=lambda x: x.date)
439
445
440 if oldlog and oldlog[-1].date >= log[0].date:
446 if oldlog and oldlog[-1].date >= log[0].date:
441 raise logerror(_('log cache overlaps with new log entries,'
447 raise logerror(_('log cache overlaps with new log entries,'
442 ' re-run without cache.'))
448 ' re-run without cache.'))
443
449
444 log = oldlog + log
450 log = oldlog + log
445
451
446 # write the new cachefile
452 # write the new cachefile
447 ui.note(_('writing cvs log cache %s\n') % cachefile)
453 ui.note(_('writing cvs log cache %s\n') % cachefile)
448 pickle.dump(log, open(cachefile, 'w'))
454 pickle.dump(log, open(cachefile, 'w'))
449 else:
455 else:
450 log = oldlog
456 log = oldlog
451
457
452 ui.status(_('%d log entries\n') % len(log))
458 ui.status(_('%d log entries\n') % len(log))
453
459
454 hook.hook(ui, None, "cvslog", True, log=log)
460 hook.hook(ui, None, "cvslog", True, log=log)
455
461
456 return log
462 return log
457
463
458
464
459 class changeset(object):
465 class changeset(object):
460 '''Class changeset has the following attributes:
466 '''Class changeset has the following attributes:
461 .id - integer identifying this changeset (list index)
467 .id - integer identifying this changeset (list index)
462 .author - author name as CVS knows it
468 .author - author name as CVS knows it
463 .branch - name of branch this changeset is on, or None
469 .branch - name of branch this changeset is on, or None
464 .comment - commit message
470 .comment - commit message
465 .date - the commit date as a (time,tz) tuple
471 .date - the commit date as a (time,tz) tuple
466 .entries - list of logentry objects in this changeset
472 .entries - list of logentry objects in this changeset
467 .parents - list of one or two parent changesets
473 .parents - list of one or two parent changesets
468 .tags - list of tags on this changeset
474 .tags - list of tags on this changeset
469 .synthetic - from synthetic revision "file ... added on branch ..."
475 .synthetic - from synthetic revision "file ... added on branch ..."
470 .mergepoint- the branch that has been merged from
476 .mergepoint- the branch that has been merged from
471 (if present in rlog output)
477 (if present in rlog output)
472 .branchpoints- the branches that start at the current entry
478 .branchpoints- the branches that start at the current entry
473 '''
479 '''
474 def __init__(self, **entries):
480 def __init__(self, **entries):
475 self.synthetic = False
481 self.synthetic = False
476 self.__dict__.update(entries)
482 self.__dict__.update(entries)
477
483
478 def __repr__(self):
484 def __repr__(self):
479 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
485 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
480 id(self),
486 id(self),
481 getattr(self, 'id', "(no id)"))
487 getattr(self, 'id', "(no id)"))
482
488
483 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
489 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
484 '''Convert log into changesets.'''
490 '''Convert log into changesets.'''
485
491
486 ui.status(_('creating changesets\n'))
492 ui.status(_('creating changesets\n'))
487
493
488 # Merge changesets
494 # Merge changesets
489
495
490 log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
496 log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
491
497
492 changesets = []
498 changesets = []
493 files = set()
499 files = set()
494 c = None
500 c = None
495 for i, e in enumerate(log):
501 for i, e in enumerate(log):
496
502
497 # Check if log entry belongs to the current changeset or not.
503 # Check if log entry belongs to the current changeset or not.
498
504
499 # Since CVS is file centric, two different file revisions with
505 # Since CVS is file centric, two different file revisions with
500 # different branchpoints should be treated as belonging to two
506 # different branchpoints should be treated as belonging to two
501 # different changesets (and the ordering is important and not
507 # different changesets (and the ordering is important and not
502 # honoured by cvsps at this point).
508 # honoured by cvsps at this point).
503 #
509 #
504 # Consider the following case:
510 # Consider the following case:
505 # foo 1.1 branchpoints: [MYBRANCH]
511 # foo 1.1 branchpoints: [MYBRANCH]
506 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
512 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
507 #
513 #
508 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
514 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
509 # later version of foo may be in MYBRANCH2, so foo should be the
515 # later version of foo may be in MYBRANCH2, so foo should be the
510 # first changeset and bar the next and MYBRANCH and MYBRANCH2
516 # first changeset and bar the next and MYBRANCH and MYBRANCH2
511 # should both start off of the bar changeset. No provisions are
517 # should both start off of the bar changeset. No provisions are
512 # made to ensure that this is, in fact, what happens.
518 # made to ensure that this is, in fact, what happens.
513 if not (c and
519 if not (c and
514 e.comment == c.comment and
520 e.comment == c.comment and
515 e.author == c.author and
521 e.author == c.author and
516 e.branch == c.branch and
522 e.branch == c.branch and
517 (not util.safehasattr(e, 'branchpoints') or
523 (not util.safehasattr(e, 'branchpoints') or
518 not util.safehasattr (c, 'branchpoints') or
524 not util.safehasattr (c, 'branchpoints') or
519 e.branchpoints == c.branchpoints) and
525 e.branchpoints == c.branchpoints) and
520 ((c.date[0] + c.date[1]) <=
526 ((c.date[0] + c.date[1]) <=
521 (e.date[0] + e.date[1]) <=
527 (e.date[0] + e.date[1]) <=
522 (c.date[0] + c.date[1]) + fuzz) and
528 (c.date[0] + c.date[1]) + fuzz) and
523 e.file not in files):
529 e.file not in files):
524 c = changeset(comment=e.comment, author=e.author,
530 c = changeset(comment=e.comment, author=e.author,
525 branch=e.branch, date=e.date, entries=[],
531 branch=e.branch, date=e.date, entries=[],
526 mergepoint=getattr(e, 'mergepoint', None),
532 mergepoint=getattr(e, 'mergepoint', None),
527 branchpoints=getattr(e, 'branchpoints', set()))
533 branchpoints=getattr(e, 'branchpoints', set()))
528 changesets.append(c)
534 changesets.append(c)
529 files = set()
535 files = set()
530 if len(changesets) % 100 == 0:
536 if len(changesets) % 100 == 0:
531 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
537 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
532 ui.status(util.ellipsis(t, 80) + '\n')
538 ui.status(util.ellipsis(t, 80) + '\n')
533
539
534 c.entries.append(e)
540 c.entries.append(e)
535 files.add(e.file)
541 files.add(e.file)
536 c.date = e.date # changeset date is date of latest commit in it
542 c.date = e.date # changeset date is date of latest commit in it
537
543
538 # Mark synthetic changesets
544 # Mark synthetic changesets
539
545
540 for c in changesets:
546 for c in changesets:
541 # Synthetic revisions always get their own changeset, because
547 # Synthetic revisions always get their own changeset, because
542 # the log message includes the filename. E.g. if you add file3
548 # the log message includes the filename. E.g. if you add file3
543 # and file4 on a branch, you get four log entries and three
549 # and file4 on a branch, you get four log entries and three
544 # changesets:
550 # changesets:
545 # "File file3 was added on branch ..." (synthetic, 1 entry)
551 # "File file3 was added on branch ..." (synthetic, 1 entry)
546 # "File file4 was added on branch ..." (synthetic, 1 entry)
552 # "File file4 was added on branch ..." (synthetic, 1 entry)
547 # "Add file3 and file4 to fix ..." (real, 2 entries)
553 # "Add file3 and file4 to fix ..." (real, 2 entries)
548 # Hence the check for 1 entry here.
554 # Hence the check for 1 entry here.
549 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
555 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
550
556
551 # Sort files in each changeset
557 # Sort files in each changeset
552
558
553 for c in changesets:
559 for c in changesets:
554 def pathcompare(l, r):
560 def pathcompare(l, r):
555 'Mimic cvsps sorting order'
561 'Mimic cvsps sorting order'
556 l = l.split('/')
562 l = l.split('/')
557 r = r.split('/')
563 r = r.split('/')
558 nl = len(l)
564 nl = len(l)
559 nr = len(r)
565 nr = len(r)
560 n = min(nl, nr)
566 n = min(nl, nr)
561 for i in range(n):
567 for i in range(n):
562 if i + 1 == nl and nl < nr:
568 if i + 1 == nl and nl < nr:
563 return -1
569 return -1
564 elif i + 1 == nr and nl > nr:
570 elif i + 1 == nr and nl > nr:
565 return +1
571 return +1
566 elif l[i] < r[i]:
572 elif l[i] < r[i]:
567 return -1
573 return -1
568 elif l[i] > r[i]:
574 elif l[i] > r[i]:
569 return +1
575 return +1
570 return 0
576 return 0
571 def entitycompare(l, r):
577 def entitycompare(l, r):
572 return pathcompare(l.file, r.file)
578 return pathcompare(l.file, r.file)
573
579
574 c.entries.sort(entitycompare)
580 c.entries.sort(entitycompare)
575
581
576 # Sort changesets by date
582 # Sort changesets by date
577
583
578 def cscmp(l, r):
584 def cscmp(l, r):
579 d = sum(l.date) - sum(r.date)
585 d = sum(l.date) - sum(r.date)
580 if d:
586 if d:
581 return d
587 return d
582
588
583 # detect vendor branches and initial commits on a branch
589 # detect vendor branches and initial commits on a branch
584 le = {}
590 le = {}
585 for e in l.entries:
591 for e in l.entries:
586 le[e.rcs] = e.revision
592 le[e.rcs] = e.revision
587 re = {}
593 re = {}
588 for e in r.entries:
594 for e in r.entries:
589 re[e.rcs] = e.revision
595 re[e.rcs] = e.revision
590
596
591 d = 0
597 d = 0
592 for e in l.entries:
598 for e in l.entries:
593 if re.get(e.rcs, None) == e.parent:
599 if re.get(e.rcs, None) == e.parent:
594 assert not d
600 assert not d
595 d = 1
601 d = 1
596 break
602 break
597
603
598 for e in r.entries:
604 for e in r.entries:
599 if le.get(e.rcs, None) == e.parent:
605 if le.get(e.rcs, None) == e.parent:
600 assert not d
606 assert not d
601 d = -1
607 d = -1
602 break
608 break
603
609
604 return d
610 return d
605
611
606 changesets.sort(cscmp)
612 changesets.sort(cscmp)
607
613
608 # Collect tags
614 # Collect tags
609
615
610 globaltags = {}
616 globaltags = {}
611 for c in changesets:
617 for c in changesets:
612 for e in c.entries:
618 for e in c.entries:
613 for tag in e.tags:
619 for tag in e.tags:
614 # remember which is the latest changeset to have this tag
620 # remember which is the latest changeset to have this tag
615 globaltags[tag] = c
621 globaltags[tag] = c
616
622
617 for c in changesets:
623 for c in changesets:
618 tags = set()
624 tags = set()
619 for e in c.entries:
625 for e in c.entries:
620 tags.update(e.tags)
626 tags.update(e.tags)
621 # remember tags only if this is the latest changeset to have it
627 # remember tags only if this is the latest changeset to have it
622 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
628 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
623
629
624 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
630 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
625 # by inserting dummy changesets with two parents, and handle
631 # by inserting dummy changesets with two parents, and handle
626 # {{mergefrombranch BRANCHNAME}} by setting two parents.
632 # {{mergefrombranch BRANCHNAME}} by setting two parents.
627
633
628 if mergeto is None:
634 if mergeto is None:
629 mergeto = r'{{mergetobranch ([-\w]+)}}'
635 mergeto = r'{{mergetobranch ([-\w]+)}}'
630 if mergeto:
636 if mergeto:
631 mergeto = re.compile(mergeto)
637 mergeto = re.compile(mergeto)
632
638
633 if mergefrom is None:
639 if mergefrom is None:
634 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
640 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
635 if mergefrom:
641 if mergefrom:
636 mergefrom = re.compile(mergefrom)
642 mergefrom = re.compile(mergefrom)
637
643
638 versions = {} # changeset index where we saw any particular file version
644 versions = {} # changeset index where we saw any particular file version
639 branches = {} # changeset index where we saw a branch
645 branches = {} # changeset index where we saw a branch
640 n = len(changesets)
646 n = len(changesets)
641 i = 0
647 i = 0
642 while i < n:
648 while i < n:
643 c = changesets[i]
649 c = changesets[i]
644
650
645 for f in c.entries:
651 for f in c.entries:
646 versions[(f.rcs, f.revision)] = i
652 versions[(f.rcs, f.revision)] = i
647
653
648 p = None
654 p = None
649 if c.branch in branches:
655 if c.branch in branches:
650 p = branches[c.branch]
656 p = branches[c.branch]
651 else:
657 else:
652 # first changeset on a new branch
658 # first changeset on a new branch
653 # the parent is a changeset with the branch in its
659 # the parent is a changeset with the branch in its
654 # branchpoints such that it is the latest possible
660 # branchpoints such that it is the latest possible
655 # commit without any intervening, unrelated commits.
661 # commit without any intervening, unrelated commits.
656
662
657 for candidate in xrange(i):
663 for candidate in xrange(i):
658 if c.branch not in changesets[candidate].branchpoints:
664 if c.branch not in changesets[candidate].branchpoints:
659 if p is not None:
665 if p is not None:
660 break
666 break
661 continue
667 continue
662 p = candidate
668 p = candidate
663
669
664 c.parents = []
670 c.parents = []
665 if p is not None:
671 if p is not None:
666 p = changesets[p]
672 p = changesets[p]
667
673
668 # Ensure no changeset has a synthetic changeset as a parent.
674 # Ensure no changeset has a synthetic changeset as a parent.
669 while p.synthetic:
675 while p.synthetic:
670 assert len(p.parents) <= 1, \
676 assert len(p.parents) <= 1, \
671 _('synthetic changeset cannot have multiple parents')
677 _('synthetic changeset cannot have multiple parents')
672 if p.parents:
678 if p.parents:
673 p = p.parents[0]
679 p = p.parents[0]
674 else:
680 else:
675 p = None
681 p = None
676 break
682 break
677
683
678 if p is not None:
684 if p is not None:
679 c.parents.append(p)
685 c.parents.append(p)
680
686
681 if c.mergepoint:
687 if c.mergepoint:
682 if c.mergepoint == 'HEAD':
688 if c.mergepoint == 'HEAD':
683 c.mergepoint = None
689 c.mergepoint = None
684 c.parents.append(changesets[branches[c.mergepoint]])
690 c.parents.append(changesets[branches[c.mergepoint]])
685
691
686 if mergefrom:
692 if mergefrom:
687 m = mergefrom.search(c.comment)
693 m = mergefrom.search(c.comment)
688 if m:
694 if m:
689 m = m.group(1)
695 m = m.group(1)
690 if m == 'HEAD':
696 if m == 'HEAD':
691 m = None
697 m = None
692 try:
698 try:
693 candidate = changesets[branches[m]]
699 candidate = changesets[branches[m]]
694 except KeyError:
700 except KeyError:
695 ui.warn(_("warning: CVS commit message references "
701 ui.warn(_("warning: CVS commit message references "
696 "non-existent branch %r:\n%s\n")
702 "non-existent branch %r:\n%s\n")
697 % (m, c.comment))
703 % (m, c.comment))
698 if m in branches and c.branch != m and not candidate.synthetic:
704 if m in branches and c.branch != m and not candidate.synthetic:
699 c.parents.append(candidate)
705 c.parents.append(candidate)
700
706
701 if mergeto:
707 if mergeto:
702 m = mergeto.search(c.comment)
708 m = mergeto.search(c.comment)
703 if m:
709 if m:
704 try:
710 try:
705 m = m.group(1)
711 m = m.group(1)
706 if m == 'HEAD':
712 if m == 'HEAD':
707 m = None
713 m = None
708 except:
714 except:
709 m = None # if no group found then merge to HEAD
715 m = None # if no group found then merge to HEAD
710 if m in branches and c.branch != m:
716 if m in branches and c.branch != m:
711 # insert empty changeset for merge
717 # insert empty changeset for merge
712 cc = changeset(
718 cc = changeset(
713 author=c.author, branch=m, date=c.date,
719 author=c.author, branch=m, date=c.date,
714 comment='convert-repo: CVS merge from branch %s'
720 comment='convert-repo: CVS merge from branch %s'
715 % c.branch,
721 % c.branch,
716 entries=[], tags=[],
722 entries=[], tags=[],
717 parents=[changesets[branches[m]], c])
723 parents=[changesets[branches[m]], c])
718 changesets.insert(i + 1, cc)
724 changesets.insert(i + 1, cc)
719 branches[m] = i + 1
725 branches[m] = i + 1
720
726
721 # adjust our loop counters now we have inserted a new entry
727 # adjust our loop counters now we have inserted a new entry
722 n += 1
728 n += 1
723 i += 2
729 i += 2
724 continue
730 continue
725
731
726 branches[c.branch] = i
732 branches[c.branch] = i
727 i += 1
733 i += 1
728
734
729 # Drop synthetic changesets (safe now that we have ensured no other
735 # Drop synthetic changesets (safe now that we have ensured no other
730 # changesets can have them as parents).
736 # changesets can have them as parents).
731 i = 0
737 i = 0
732 while i < len(changesets):
738 while i < len(changesets):
733 if changesets[i].synthetic:
739 if changesets[i].synthetic:
734 del changesets[i]
740 del changesets[i]
735 else:
741 else:
736 i += 1
742 i += 1
737
743
738 # Number changesets
744 # Number changesets
739
745
740 for i, c in enumerate(changesets):
746 for i, c in enumerate(changesets):
741 c.id = i + 1
747 c.id = i + 1
742
748
743 ui.status(_('%d changeset entries\n') % len(changesets))
749 ui.status(_('%d changeset entries\n') % len(changesets))
744
750
745 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
751 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
746
752
747 return changesets
753 return changesets
748
754
749
755
750 def debugcvsps(ui, *args, **opts):
756 def debugcvsps(ui, *args, **opts):
751 '''Read CVS rlog for current directory or named path in
757 '''Read CVS rlog for current directory or named path in
752 repository, and convert the log to changesets based on matching
758 repository, and convert the log to changesets based on matching
753 commit log entries and dates.
759 commit log entries and dates.
754 '''
760 '''
755 if opts["new_cache"]:
761 if opts["new_cache"]:
756 cache = "write"
762 cache = "write"
757 elif opts["update_cache"]:
763 elif opts["update_cache"]:
758 cache = "update"
764 cache = "update"
759 else:
765 else:
760 cache = None
766 cache = None
761
767
762 revisions = opts["revisions"]
768 revisions = opts["revisions"]
763
769
764 try:
770 try:
765 if args:
771 if args:
766 log = []
772 log = []
767 for d in args:
773 for d in args:
768 log += createlog(ui, d, root=opts["root"], cache=cache)
774 log += createlog(ui, d, root=opts["root"], cache=cache)
769 else:
775 else:
770 log = createlog(ui, root=opts["root"], cache=cache)
776 log = createlog(ui, root=opts["root"], cache=cache)
771 except logerror, e:
777 except logerror, e:
772 ui.write("%r\n"%e)
778 ui.write("%r\n"%e)
773 return
779 return
774
780
775 changesets = createchangeset(ui, log, opts["fuzz"])
781 changesets = createchangeset(ui, log, opts["fuzz"])
776 del log
782 del log
777
783
778 # Print changesets (optionally filtered)
784 # Print changesets (optionally filtered)
779
785
780 off = len(revisions)
786 off = len(revisions)
781 branches = {} # latest version number in each branch
787 branches = {} # latest version number in each branch
782 ancestors = {} # parent branch
788 ancestors = {} # parent branch
783 for cs in changesets:
789 for cs in changesets:
784
790
785 if opts["ancestors"]:
791 if opts["ancestors"]:
786 if cs.branch not in branches and cs.parents and cs.parents[0].id:
792 if cs.branch not in branches and cs.parents and cs.parents[0].id:
787 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
793 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
788 cs.parents[0].id)
794 cs.parents[0].id)
789 branches[cs.branch] = cs.id
795 branches[cs.branch] = cs.id
790
796
791 # limit by branches
797 # limit by branches
792 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
798 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
793 continue
799 continue
794
800
795 if not off:
801 if not off:
796 # Note: trailing spaces on several lines here are needed to have
802 # Note: trailing spaces on several lines here are needed to have
797 # bug-for-bug compatibility with cvsps.
803 # bug-for-bug compatibility with cvsps.
798 ui.write('---------------------\n')
804 ui.write('---------------------\n')
799 ui.write('PatchSet %d \n' % cs.id)
805 ui.write('PatchSet %d \n' % cs.id)
800 ui.write('Date: %s\n' % util.datestr(cs.date,
806 ui.write('Date: %s\n' % util.datestr(cs.date,
801 '%Y/%m/%d %H:%M:%S %1%2'))
807 '%Y/%m/%d %H:%M:%S %1%2'))
802 ui.write('Author: %s\n' % cs.author)
808 ui.write('Author: %s\n' % cs.author)
803 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
809 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
804 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
810 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
805 ','.join(cs.tags) or '(none)'))
811 ','.join(cs.tags) or '(none)'))
806 branchpoints = getattr(cs, 'branchpoints', None)
812 branchpoints = getattr(cs, 'branchpoints', None)
807 if branchpoints:
813 if branchpoints:
808 ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
814 ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
809 if opts["parents"] and cs.parents:
815 if opts["parents"] and cs.parents:
810 if len(cs.parents) > 1:
816 if len(cs.parents) > 1:
811 ui.write('Parents: %s\n' %
817 ui.write('Parents: %s\n' %
812 (','.join([str(p.id) for p in cs.parents])))
818 (','.join([str(p.id) for p in cs.parents])))
813 else:
819 else:
814 ui.write('Parent: %d\n' % cs.parents[0].id)
820 ui.write('Parent: %d\n' % cs.parents[0].id)
815
821
816 if opts["ancestors"]:
822 if opts["ancestors"]:
817 b = cs.branch
823 b = cs.branch
818 r = []
824 r = []
819 while b:
825 while b:
820 b, c = ancestors[b]
826 b, c = ancestors[b]
821 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
827 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
822 if r:
828 if r:
823 ui.write('Ancestors: %s\n' % (','.join(r)))
829 ui.write('Ancestors: %s\n' % (','.join(r)))
824
830
825 ui.write('Log:\n')
831 ui.write('Log:\n')
826 ui.write('%s\n\n' % cs.comment)
832 ui.write('%s\n\n' % cs.comment)
827 ui.write('Members: \n')
833 ui.write('Members: \n')
828 for f in cs.entries:
834 for f in cs.entries:
829 fn = f.file
835 fn = f.file
830 if fn.startswith(opts["prefix"]):
836 if fn.startswith(opts["prefix"]):
831 fn = fn[len(opts["prefix"]):]
837 fn = fn[len(opts["prefix"]):]
832 ui.write('\t%s:%s->%s%s \n' % (
838 ui.write('\t%s:%s->%s%s \n' % (
833 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
839 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
834 '.'.join([str(x) for x in f.revision]),
840 '.'.join([str(x) for x in f.revision]),
835 ['', '(DEAD)'][f.dead]))
841 ['', '(DEAD)'][f.dead]))
836 ui.write('\n')
842 ui.write('\n')
837
843
838 # have we seen the start tag?
844 # have we seen the start tag?
839 if revisions and off:
845 if revisions and off:
840 if revisions[0] == str(cs.id) or \
846 if revisions[0] == str(cs.id) or \
841 revisions[0] in cs.tags:
847 revisions[0] in cs.tags:
842 off = False
848 off = False
843
849
844 # see if we reached the end tag
850 # see if we reached the end tag
845 if len(revisions) > 1 and not off:
851 if len(revisions) > 1 and not off:
846 if revisions[1] == str(cs.id) or \
852 if revisions[1] == str(cs.id) or \
847 revisions[1] in cs.tags:
853 revisions[1] in cs.tags:
848 break
854 break
General Comments 0
You need to be logged in to leave comments. Login now