##// END OF EJS Templates
cvsps: use commitids (when present) to detect changesets...
Frank Kingswood -
r18261:1b7b5975 default
parent child Browse files
Show More
@@ -1,853 +1,866 b''
1 # Mercurial built-in replacement for cvsps.
1 # Mercurial built-in replacement for cvsps.
2 #
2 #
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 import re
9 import re
10 import cPickle as pickle
10 import cPickle as pickle
11 from mercurial import util
11 from mercurial import util
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import hook
13 from mercurial import hook
14 from mercurial import util
14 from mercurial import util
15
15
16 class logentry(object):
16 class logentry(object):
17 '''Class logentry has the following attributes:
17 '''Class logentry has the following attributes:
18 .author - author name as CVS knows it
18 .author - author name as CVS knows it
19 .branch - name of branch this revision is on
19 .branch - name of branch this revision is on
20 .branches - revision tuple of branches starting at this revision
20 .branches - revision tuple of branches starting at this revision
21 .comment - commit message
21 .comment - commit message
22 .commitid - CVS commitid or None
22 .date - the commit date as a (time, tz) tuple
23 .date - the commit date as a (time, tz) tuple
23 .dead - true if file revision is dead
24 .dead - true if file revision is dead
24 .file - Name of file
25 .file - Name of file
25 .lines - a tuple (+lines, -lines) or None
26 .lines - a tuple (+lines, -lines) or None
26 .parent - Previous revision of this entry
27 .parent - Previous revision of this entry
27 .rcs - name of file as returned from CVS
28 .rcs - name of file as returned from CVS
28 .revision - revision number as tuple
29 .revision - revision number as tuple
29 .tags - list of tags on the file
30 .tags - list of tags on the file
30 .synthetic - is this a synthetic "file ... added on ..." revision?
31 .synthetic - is this a synthetic "file ... added on ..." revision?
31 .mergepoint- the branch that has been merged from
32 .mergepoint - the branch that has been merged from (if present in
32 (if present in rlog output)
33 rlog output) or None
33 .branchpoints- the branches that start at the current entry
34 .branchpoints - the branches that start at the current entry or empty
34 '''
35 '''
35 def __init__(self, **entries):
36 def __init__(self, **entries):
36 self.synthetic = False
37 self.synthetic = False
37 self.__dict__.update(entries)
38 self.__dict__.update(entries)
38
39
39 def __repr__(self):
40 def __repr__(self):
40 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
41 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
41 id(self),
42 return "%s(%s)"%(type(self).__name__, ", ".join(items))
42 self.file,
43 ".".join(map(str, self.revision)))
44
43
45 class logerror(Exception):
44 class logerror(Exception):
46 pass
45 pass
47
46
48 def getrepopath(cvspath):
47 def getrepopath(cvspath):
49 """Return the repository path from a CVS path.
48 """Return the repository path from a CVS path.
50
49
51 >>> getrepopath('/foo/bar')
50 >>> getrepopath('/foo/bar')
52 '/foo/bar'
51 '/foo/bar'
53 >>> getrepopath('c:/foo/bar')
52 >>> getrepopath('c:/foo/bar')
54 'c:/foo/bar'
53 'c:/foo/bar'
55 >>> getrepopath(':pserver:10/foo/bar')
54 >>> getrepopath(':pserver:10/foo/bar')
56 '/foo/bar'
55 '/foo/bar'
57 >>> getrepopath(':pserver:10c:/foo/bar')
56 >>> getrepopath(':pserver:10c:/foo/bar')
58 '/foo/bar'
57 '/foo/bar'
59 >>> getrepopath(':pserver:/foo/bar')
58 >>> getrepopath(':pserver:/foo/bar')
60 '/foo/bar'
59 '/foo/bar'
61 >>> getrepopath(':pserver:c:/foo/bar')
60 >>> getrepopath(':pserver:c:/foo/bar')
62 'c:/foo/bar'
61 'c:/foo/bar'
63 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
62 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
64 '/foo/bar'
63 '/foo/bar'
65 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
64 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
66 'c:/foo/bar'
65 'c:/foo/bar'
67 """
66 """
68 # According to CVS manual, CVS paths are expressed like:
67 # According to CVS manual, CVS paths are expressed like:
69 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
68 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
70 #
69 #
71 # Unfortunately, Windows absolute paths start with a drive letter
70 # Unfortunately, Windows absolute paths start with a drive letter
72 # like 'c:' making it harder to parse. Here we assume that drive
71 # like 'c:' making it harder to parse. Here we assume that drive
73 # letters are only one character long and any CVS component before
72 # letters are only one character long and any CVS component before
74 # the repository path is at least 2 characters long, and use this
73 # the repository path is at least 2 characters long, and use this
75 # to disambiguate.
74 # to disambiguate.
76 parts = cvspath.split(':')
75 parts = cvspath.split(':')
77 if len(parts) == 1:
76 if len(parts) == 1:
78 return parts[0]
77 return parts[0]
79 # Here there is an ambiguous case if we have a port number
78 # Here there is an ambiguous case if we have a port number
80 # immediately followed by a Windows driver letter. We assume this
79 # immediately followed by a Windows driver letter. We assume this
81 # never happens and decide it must be CVS path component,
80 # never happens and decide it must be CVS path component,
82 # therefore ignoring it.
81 # therefore ignoring it.
83 if len(parts[-2]) > 1:
82 if len(parts[-2]) > 1:
84 return parts[-1].lstrip('0123456789')
83 return parts[-1].lstrip('0123456789')
85 return parts[-2] + ':' + parts[-1]
84 return parts[-2] + ':' + parts[-1]
86
85
87 def createlog(ui, directory=None, root="", rlog=True, cache=None):
86 def createlog(ui, directory=None, root="", rlog=True, cache=None):
88 '''Collect the CVS rlog'''
87 '''Collect the CVS rlog'''
89
88
90 # Because we store many duplicate commit log messages, reusing strings
89 # Because we store many duplicate commit log messages, reusing strings
91 # saves a lot of memory and pickle storage space.
90 # saves a lot of memory and pickle storage space.
92 _scache = {}
91 _scache = {}
93 def scache(s):
92 def scache(s):
94 "return a shared version of a string"
93 "return a shared version of a string"
95 return _scache.setdefault(s, s)
94 return _scache.setdefault(s, s)
96
95
97 ui.status(_('collecting CVS rlog\n'))
96 ui.status(_('collecting CVS rlog\n'))
98
97
99 log = [] # list of logentry objects containing the CVS state
98 log = [] # list of logentry objects containing the CVS state
100
99
101 # patterns to match in CVS (r)log output, by state of use
100 # patterns to match in CVS (r)log output, by state of use
102 re_00 = re.compile('RCS file: (.+)$')
101 re_00 = re.compile('RCS file: (.+)$')
103 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
102 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
104 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
103 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
105 re_03 = re.compile("(Cannot access.+CVSROOT)|"
104 re_03 = re.compile("(Cannot access.+CVSROOT)|"
106 "(can't create temporary directory.+)$")
105 "(can't create temporary directory.+)$")
107 re_10 = re.compile('Working file: (.+)$')
106 re_10 = re.compile('Working file: (.+)$')
108 re_20 = re.compile('symbolic names:')
107 re_20 = re.compile('symbolic names:')
109 re_30 = re.compile('\t(.+): ([\\d.]+)$')
108 re_30 = re.compile('\t(.+): ([\\d.]+)$')
110 re_31 = re.compile('----------------------------$')
109 re_31 = re.compile('----------------------------$')
111 re_32 = re.compile('======================================='
110 re_32 = re.compile('======================================='
112 '======================================$')
111 '======================================$')
113 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
112 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
114 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
113 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
115 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
114 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
115 r'(\s+commitid:\s+([^;]+);)?'
116 r'(.*mergepoint:\s+([^;]+);)?')
116 r'(.*mergepoint:\s+([^;]+);)?')
117 re_70 = re.compile('branches: (.+);$')
117 re_70 = re.compile('branches: (.+);$')
118
118
119 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
120
120
121 prefix = '' # leading path to strip of what we get from CVS
121 prefix = '' # leading path to strip of what we get from CVS
122
122
123 if directory is None:
123 if directory is None:
124 # Current working directory
124 # Current working directory
125
125
126 # Get the real directory in the repository
126 # Get the real directory in the repository
127 try:
127 try:
128 prefix = open(os.path.join('CVS','Repository')).read().strip()
128 prefix = open(os.path.join('CVS','Repository')).read().strip()
129 directory = prefix
129 directory = prefix
130 if prefix == ".":
130 if prefix == ".":
131 prefix = ""
131 prefix = ""
132 except IOError:
132 except IOError:
133 raise logerror(_('not a CVS sandbox'))
133 raise logerror(_('not a CVS sandbox'))
134
134
135 if prefix and not prefix.endswith(os.sep):
135 if prefix and not prefix.endswith(os.sep):
136 prefix += os.sep
136 prefix += os.sep
137
137
138 # Use the Root file in the sandbox, if it exists
138 # Use the Root file in the sandbox, if it exists
139 try:
139 try:
140 root = open(os.path.join('CVS','Root')).read().strip()
140 root = open(os.path.join('CVS','Root')).read().strip()
141 except IOError:
141 except IOError:
142 pass
142 pass
143
143
144 if not root:
144 if not root:
145 root = os.environ.get('CVSROOT', '')
145 root = os.environ.get('CVSROOT', '')
146
146
147 # read log cache if one exists
147 # read log cache if one exists
148 oldlog = []
148 oldlog = []
149 date = None
149 date = None
150
150
151 if cache:
151 if cache:
152 cachedir = os.path.expanduser('~/.hg.cvsps')
152 cachedir = os.path.expanduser('~/.hg.cvsps')
153 if not os.path.exists(cachedir):
153 if not os.path.exists(cachedir):
154 os.mkdir(cachedir)
154 os.mkdir(cachedir)
155
155
156 # The cvsps cache pickle needs a uniquified name, based on the
156 # The cvsps cache pickle needs a uniquified name, based on the
157 # repository location. The address may have all sort of nasties
157 # repository location. The address may have all sort of nasties
158 # in it, slashes, colons and such. So here we take just the
158 # in it, slashes, colons and such. So here we take just the
159 # alphanumeric characters, concatenated in a way that does not
159 # alphanumeric characters, concatenated in a way that does not
160 # mix up the various components, so that
160 # mix up the various components, so that
161 # :pserver:user@server:/path
161 # :pserver:user@server:/path
162 # and
162 # and
163 # /pserver/user/server/path
163 # /pserver/user/server/path
164 # are mapped to different cache file names.
164 # are mapped to different cache file names.
165 cachefile = root.split(":") + [directory, "cache"]
165 cachefile = root.split(":") + [directory, "cache"]
166 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
167 cachefile = os.path.join(cachedir,
167 cachefile = os.path.join(cachedir,
168 '.'.join([s for s in cachefile if s]))
168 '.'.join([s for s in cachefile if s]))
169
169
170 if cache == 'update':
170 if cache == 'update':
171 try:
171 try:
172 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 ui.note(_('reading cvs log cache %s\n') % cachefile)
173 oldlog = pickle.load(open(cachefile))
173 oldlog = pickle.load(open(cachefile))
174 for e in oldlog:
175 if not (util.safehasattr(e, 'branchpoints') and
176 util.safehasattr(e, 'commitid') and
177 util.safehasattr(e, 'mergepoint')):
178 ui.status(_('ignoring old cache\n'))
179 oldlog = []
180 break
181
174 ui.note(_('cache has %d log entries\n') % len(oldlog))
182 ui.note(_('cache has %d log entries\n') % len(oldlog))
175 except Exception, e:
183 except Exception, e:
176 ui.note(_('error reading cache: %r\n') % e)
184 ui.note(_('error reading cache: %r\n') % e)
177
185
178 if oldlog:
186 if oldlog:
179 date = oldlog[-1].date # last commit date as a (time,tz) tuple
187 date = oldlog[-1].date # last commit date as a (time,tz) tuple
180 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
188 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
181
189
182 # build the CVS commandline
190 # build the CVS commandline
183 cmd = ['cvs', '-q']
191 cmd = ['cvs', '-q']
184 if root:
192 if root:
185 cmd.append('-d%s' % root)
193 cmd.append('-d%s' % root)
186 p = util.normpath(getrepopath(root))
194 p = util.normpath(getrepopath(root))
187 if not p.endswith('/'):
195 if not p.endswith('/'):
188 p += '/'
196 p += '/'
189 if prefix:
197 if prefix:
190 # looks like normpath replaces "" by "."
198 # looks like normpath replaces "" by "."
191 prefix = p + util.normpath(prefix)
199 prefix = p + util.normpath(prefix)
192 else:
200 else:
193 prefix = p
201 prefix = p
194 cmd.append(['log', 'rlog'][rlog])
202 cmd.append(['log', 'rlog'][rlog])
195 if date:
203 if date:
196 # no space between option and date string
204 # no space between option and date string
197 cmd.append('-d>%s' % date)
205 cmd.append('-d>%s' % date)
198 cmd.append(directory)
206 cmd.append(directory)
199
207
200 # state machine begins here
208 # state machine begins here
201 tags = {} # dictionary of revisions on current file with their tags
209 tags = {} # dictionary of revisions on current file with their tags
202 branchmap = {} # mapping between branch names and revision numbers
210 branchmap = {} # mapping between branch names and revision numbers
203 state = 0
211 state = 0
204 store = False # set when a new record can be appended
212 store = False # set when a new record can be appended
205
213
206 cmd = [util.shellquote(arg) for arg in cmd]
214 cmd = [util.shellquote(arg) for arg in cmd]
207 ui.note(_("running %s\n") % (' '.join(cmd)))
215 ui.note(_("running %s\n") % (' '.join(cmd)))
208 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
216 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
209
217
210 pfp = util.popen(' '.join(cmd))
218 pfp = util.popen(' '.join(cmd))
211 peek = pfp.readline()
219 peek = pfp.readline()
212 while True:
220 while True:
213 line = peek
221 line = peek
214 if line == '':
222 if line == '':
215 break
223 break
216 peek = pfp.readline()
224 peek = pfp.readline()
217 if line.endswith('\n'):
225 if line.endswith('\n'):
218 line = line[:-1]
226 line = line[:-1]
219 #ui.debug('state=%d line=%r\n' % (state, line))
227 #ui.debug('state=%d line=%r\n' % (state, line))
220
228
221 if state == 0:
229 if state == 0:
222 # initial state, consume input until we see 'RCS file'
230 # initial state, consume input until we see 'RCS file'
223 match = re_00.match(line)
231 match = re_00.match(line)
224 if match:
232 if match:
225 rcs = match.group(1)
233 rcs = match.group(1)
226 tags = {}
234 tags = {}
227 if rlog:
235 if rlog:
228 filename = util.normpath(rcs[:-2])
236 filename = util.normpath(rcs[:-2])
229 if filename.startswith(prefix):
237 if filename.startswith(prefix):
230 filename = filename[len(prefix):]
238 filename = filename[len(prefix):]
231 if filename.startswith('/'):
239 if filename.startswith('/'):
232 filename = filename[1:]
240 filename = filename[1:]
233 if filename.startswith('Attic/'):
241 if filename.startswith('Attic/'):
234 filename = filename[6:]
242 filename = filename[6:]
235 else:
243 else:
236 filename = filename.replace('/Attic/', '/')
244 filename = filename.replace('/Attic/', '/')
237 state = 2
245 state = 2
238 continue
246 continue
239 state = 1
247 state = 1
240 continue
248 continue
241 match = re_01.match(line)
249 match = re_01.match(line)
242 if match:
250 if match:
243 raise logerror(match.group(1))
251 raise logerror(match.group(1))
244 match = re_02.match(line)
252 match = re_02.match(line)
245 if match:
253 if match:
246 raise logerror(match.group(2))
254 raise logerror(match.group(2))
247 if re_03.match(line):
255 if re_03.match(line):
248 raise logerror(line)
256 raise logerror(line)
249
257
250 elif state == 1:
258 elif state == 1:
251 # expect 'Working file' (only when using log instead of rlog)
259 # expect 'Working file' (only when using log instead of rlog)
252 match = re_10.match(line)
260 match = re_10.match(line)
253 assert match, _('RCS file must be followed by working file')
261 assert match, _('RCS file must be followed by working file')
254 filename = util.normpath(match.group(1))
262 filename = util.normpath(match.group(1))
255 state = 2
263 state = 2
256
264
257 elif state == 2:
265 elif state == 2:
258 # expect 'symbolic names'
266 # expect 'symbolic names'
259 if re_20.match(line):
267 if re_20.match(line):
260 branchmap = {}
268 branchmap = {}
261 state = 3
269 state = 3
262
270
263 elif state == 3:
271 elif state == 3:
264 # read the symbolic names and store as tags
272 # read the symbolic names and store as tags
265 match = re_30.match(line)
273 match = re_30.match(line)
266 if match:
274 if match:
267 rev = [int(x) for x in match.group(2).split('.')]
275 rev = [int(x) for x in match.group(2).split('.')]
268
276
269 # Convert magic branch number to an odd-numbered one
277 # Convert magic branch number to an odd-numbered one
270 revn = len(rev)
278 revn = len(rev)
271 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
279 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
272 rev = rev[:-2] + rev[-1:]
280 rev = rev[:-2] + rev[-1:]
273 rev = tuple(rev)
281 rev = tuple(rev)
274
282
275 if rev not in tags:
283 if rev not in tags:
276 tags[rev] = []
284 tags[rev] = []
277 tags[rev].append(match.group(1))
285 tags[rev].append(match.group(1))
278 branchmap[match.group(1)] = match.group(2)
286 branchmap[match.group(1)] = match.group(2)
279
287
280 elif re_31.match(line):
288 elif re_31.match(line):
281 state = 5
289 state = 5
282 elif re_32.match(line):
290 elif re_32.match(line):
283 state = 0
291 state = 0
284
292
285 elif state == 4:
293 elif state == 4:
286 # expecting '------' separator before first revision
294 # expecting '------' separator before first revision
287 if re_31.match(line):
295 if re_31.match(line):
288 state = 5
296 state = 5
289 else:
297 else:
290 assert not re_32.match(line), _('must have at least '
298 assert not re_32.match(line), _('must have at least '
291 'some revisions')
299 'some revisions')
292
300
293 elif state == 5:
301 elif state == 5:
294 # expecting revision number and possibly (ignored) lock indication
302 # expecting revision number and possibly (ignored) lock indication
295 # we create the logentry here from values stored in states 0 to 4,
303 # we create the logentry here from values stored in states 0 to 4,
296 # as this state is re-entered for subsequent revisions of a file.
304 # as this state is re-entered for subsequent revisions of a file.
297 match = re_50.match(line)
305 match = re_50.match(line)
298 assert match, _('expected revision number')
306 assert match, _('expected revision number')
299 e = logentry(rcs=scache(rcs), file=scache(filename),
307 e = logentry(rcs=scache(rcs), file=scache(filename),
300 revision=tuple([int(x) for x in match.group(1).split('.')]),
308 revision=tuple([int(x) for x in match.group(1).split('.')]),
301 branches=[], parent=None)
309 branches=[], parent=None, commitid=None, mergepoint=None, branchpoints=set())
310
302 state = 6
311 state = 6
303
312
304 elif state == 6:
313 elif state == 6:
305 # expecting date, author, state, lines changed
314 # expecting date, author, state, lines changed
306 match = re_60.match(line)
315 match = re_60.match(line)
307 assert match, _('revision must be followed by date line')
316 assert match, _('revision must be followed by date line')
308 d = match.group(1)
317 d = match.group(1)
309 if d[2] == '/':
318 if d[2] == '/':
310 # Y2K
319 # Y2K
311 d = '19' + d
320 d = '19' + d
312
321
313 if len(d.split()) != 3:
322 if len(d.split()) != 3:
314 # cvs log dates always in GMT
323 # cvs log dates always in GMT
315 d = d + ' UTC'
324 d = d + ' UTC'
316 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
325 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
317 '%Y/%m/%d %H:%M:%S',
326 '%Y/%m/%d %H:%M:%S',
318 '%Y-%m-%d %H:%M:%S'])
327 '%Y-%m-%d %H:%M:%S'])
319 e.author = scache(match.group(2))
328 e.author = scache(match.group(2))
320 e.dead = match.group(3).lower() == 'dead'
329 e.dead = match.group(3).lower() == 'dead'
321
330
322 if match.group(5):
331 if match.group(5):
323 if match.group(6):
332 if match.group(6):
324 e.lines = (int(match.group(5)), int(match.group(6)))
333 e.lines = (int(match.group(5)), int(match.group(6)))
325 else:
334 else:
326 e.lines = (int(match.group(5)), 0)
335 e.lines = (int(match.group(5)), 0)
327 elif match.group(6):
336 elif match.group(6):
328 e.lines = (0, int(match.group(6)))
337 e.lines = (0, int(match.group(6)))
329 else:
338 else:
330 e.lines = None
339 e.lines = None
331
340
332 if match.group(7): # cvsnt mergepoint
341 if match.group(7): # cvs 1.12 commitid
333 myrev = match.group(8).split('.')
342 e.commitid = match.group(8)
343
344 if match.group(9): # cvsnt mergepoint
345 myrev = match.group(10).split('.')
334 if len(myrev) == 2: # head
346 if len(myrev) == 2: # head
335 e.mergepoint = 'HEAD'
347 e.mergepoint = 'HEAD'
336 else:
348 else:
337 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
349 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
338 branches = [b for b in branchmap if branchmap[b] == myrev]
350 branches = [b for b in branchmap if branchmap[b] == myrev]
339 assert len(branches) == 1, ('unknown branch: %s'
351 assert len(branches) == 1, ('unknown branch: %s'
340 % e.mergepoint)
352 % e.mergepoint)
341 e.mergepoint = branches[0]
353 e.mergepoint = branches[0]
342 else:
354
343 e.mergepoint = None
344 e.comment = []
355 e.comment = []
345 state = 7
356 state = 7
346
357
347 elif state == 7:
358 elif state == 7:
348 # read the revision numbers of branches that start at this revision
359 # read the revision numbers of branches that start at this revision
349 # or store the commit log message otherwise
360 # or store the commit log message otherwise
350 m = re_70.match(line)
361 m = re_70.match(line)
351 if m:
362 if m:
352 e.branches = [tuple([int(y) for y in x.strip().split('.')])
363 e.branches = [tuple([int(y) for y in x.strip().split('.')])
353 for x in m.group(1).split(';')]
364 for x in m.group(1).split(';')]
354 state = 8
365 state = 8
355 elif re_31.match(line) and re_50.match(peek):
366 elif re_31.match(line) and re_50.match(peek):
356 state = 5
367 state = 5
357 store = True
368 store = True
358 elif re_32.match(line):
369 elif re_32.match(line):
359 state = 0
370 state = 0
360 store = True
371 store = True
361 else:
372 else:
362 e.comment.append(line)
373 e.comment.append(line)
363
374
364 elif state == 8:
375 elif state == 8:
365 # store commit log message
376 # store commit log message
366 if re_31.match(line):
377 if re_31.match(line):
367 cpeek = peek
378 cpeek = peek
368 if cpeek.endswith('\n'):
379 if cpeek.endswith('\n'):
369 cpeek = cpeek[:-1]
380 cpeek = cpeek[:-1]
370 if re_50.match(cpeek):
381 if re_50.match(cpeek):
371 state = 5
382 state = 5
372 store = True
383 store = True
373 else:
384 else:
374 e.comment.append(line)
385 e.comment.append(line)
375 elif re_32.match(line):
386 elif re_32.match(line):
376 state = 0
387 state = 0
377 store = True
388 store = True
378 else:
389 else:
379 e.comment.append(line)
390 e.comment.append(line)
380
391
381 # When a file is added on a branch B1, CVS creates a synthetic
392 # When a file is added on a branch B1, CVS creates a synthetic
382 # dead trunk revision 1.1 so that the branch has a root.
393 # dead trunk revision 1.1 so that the branch has a root.
383 # Likewise, if you merge such a file to a later branch B2 (one
394 # Likewise, if you merge such a file to a later branch B2 (one
384 # that already existed when the file was added on B1), CVS
395 # that already existed when the file was added on B1), CVS
385 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
396 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
386 # these revisions now, but mark them synthetic so
397 # these revisions now, but mark them synthetic so
387 # createchangeset() can take care of them.
398 # createchangeset() can take care of them.
388 if (store and
399 if (store and
389 e.dead and
400 e.dead and
390 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
401 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
391 len(e.comment) == 1 and
402 len(e.comment) == 1 and
392 file_added_re.match(e.comment[0])):
403 file_added_re.match(e.comment[0])):
393 ui.debug('found synthetic revision in %s: %r\n'
404 ui.debug('found synthetic revision in %s: %r\n'
394 % (e.rcs, e.comment[0]))
405 % (e.rcs, e.comment[0]))
395 e.synthetic = True
406 e.synthetic = True
396
407
397 if store:
408 if store:
398 # clean up the results and save in the log.
409 # clean up the results and save in the log.
399 store = False
410 store = False
400 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
411 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
401 e.comment = scache('\n'.join(e.comment))
412 e.comment = scache('\n'.join(e.comment))
402
413
403 revn = len(e.revision)
414 revn = len(e.revision)
404 if revn > 3 and (revn % 2) == 0:
415 if revn > 3 and (revn % 2) == 0:
405 e.branch = tags.get(e.revision[:-1], [None])[0]
416 e.branch = tags.get(e.revision[:-1], [None])[0]
406 else:
417 else:
407 e.branch = None
418 e.branch = None
408
419
409 # find the branches starting from this revision
420 # find the branches starting from this revision
410 branchpoints = set()
421 branchpoints = set()
411 for branch, revision in branchmap.iteritems():
422 for branch, revision in branchmap.iteritems():
412 revparts = tuple([int(i) for i in revision.split('.')])
423 revparts = tuple([int(i) for i in revision.split('.')])
413 if len(revparts) < 2: # bad tags
424 if len(revparts) < 2: # bad tags
414 continue
425 continue
415 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
426 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
416 # normal branch
427 # normal branch
417 if revparts[:-2] == e.revision:
428 if revparts[:-2] == e.revision:
418 branchpoints.add(branch)
429 branchpoints.add(branch)
419 elif revparts == (1, 1, 1): # vendor branch
430 elif revparts == (1, 1, 1): # vendor branch
420 if revparts in e.branches:
431 if revparts in e.branches:
421 branchpoints.add(branch)
432 branchpoints.add(branch)
422 e.branchpoints = branchpoints
433 e.branchpoints = branchpoints
423
434
424 log.append(e)
435 log.append(e)
425
436
426 if len(log) % 100 == 0:
437 if len(log) % 100 == 0:
427 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
438 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
428
439
429 log.sort(key=lambda x: (x.rcs, x.revision))
440 log.sort(key=lambda x: (x.rcs, x.revision))
430
441
431 # find parent revisions of individual files
442 # find parent revisions of individual files
432 versions = {}
443 versions = {}
433 for e in log:
444 for e in log:
434 branch = e.revision[:-1]
445 branch = e.revision[:-1]
435 p = versions.get((e.rcs, branch), None)
446 p = versions.get((e.rcs, branch), None)
436 if p is None:
447 if p is None:
437 p = e.revision[:-2]
448 p = e.revision[:-2]
438 e.parent = p
449 e.parent = p
439 versions[(e.rcs, branch)] = e.revision
450 versions[(e.rcs, branch)] = e.revision
440
451
441 # update the log cache
452 # update the log cache
442 if cache:
453 if cache:
443 if log:
454 if log:
444 # join up the old and new logs
455 # join up the old and new logs
445 log.sort(key=lambda x: x.date)
456 log.sort(key=lambda x: x.date)
446
457
447 if oldlog and oldlog[-1].date >= log[0].date:
458 if oldlog and oldlog[-1].date >= log[0].date:
448 raise logerror(_('log cache overlaps with new log entries,'
459 raise logerror(_('log cache overlaps with new log entries,'
449 ' re-run without cache.'))
460 ' re-run without cache.'))
450
461
451 log = oldlog + log
462 log = oldlog + log
452
463
453 # write the new cachefile
464 # write the new cachefile
454 ui.note(_('writing cvs log cache %s\n') % cachefile)
465 ui.note(_('writing cvs log cache %s\n') % cachefile)
455 pickle.dump(log, open(cachefile, 'w'))
466 pickle.dump(log, open(cachefile, 'w'))
456 else:
467 else:
457 log = oldlog
468 log = oldlog
458
469
459 ui.status(_('%d log entries\n') % len(log))
470 ui.status(_('%d log entries\n') % len(log))
460
471
461 hook.hook(ui, None, "cvslog", True, log=log)
472 hook.hook(ui, None, "cvslog", True, log=log)
462
473
463 return log
474 return log
464
475
465
476
466 class changeset(object):
477 class changeset(object):
467 '''Class changeset has the following attributes:
478 '''Class changeset has the following attributes:
468 .id - integer identifying this changeset (list index)
479 .id - integer identifying this changeset (list index)
469 .author - author name as CVS knows it
480 .author - author name as CVS knows it
470 .branch - name of branch this changeset is on, or None
481 .branch - name of branch this changeset is on, or None
471 .comment - commit message
482 .comment - commit message
483 .commitid - CVS commitid or None
472 .date - the commit date as a (time,tz) tuple
484 .date - the commit date as a (time,tz) tuple
473 .entries - list of logentry objects in this changeset
485 .entries - list of logentry objects in this changeset
474 .parents - list of one or two parent changesets
486 .parents - list of one or two parent changesets
475 .tags - list of tags on this changeset
487 .tags - list of tags on this changeset
476 .synthetic - from synthetic revision "file ... added on branch ..."
488 .synthetic - from synthetic revision "file ... added on branch ..."
477 .mergepoint- the branch that has been merged from
489 .mergepoint- the branch that has been merged from or None
478 (if present in rlog output)
490 .branchpoints- the branches that start at the current entry or empty
479 .branchpoints- the branches that start at the current entry
480 '''
491 '''
481 def __init__(self, **entries):
492 def __init__(self, **entries):
482 self.synthetic = False
493 self.synthetic = False
483 self.__dict__.update(entries)
494 self.__dict__.update(entries)
484
495
485 def __repr__(self):
496 def __repr__(self):
486 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
497 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
487 id(self),
498 return "%s(%s)"%(type(self).__name__, ", ".join(items))
488 getattr(self, 'id', "(no id)"))
489
499
490 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
500 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
491 '''Convert log into changesets.'''
501 '''Convert log into changesets.'''
492
502
493 ui.status(_('creating changesets\n'))
503 ui.status(_('creating changesets\n'))
494
504
495 # Merge changesets
505 # Merge changesets
496
506 log.sort(key=lambda x: (x.commitid, x.comment, x.author, x.branch, x.date, x.branchpoints))
497 log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
498
507
499 changesets = []
508 changesets = []
500 files = set()
509 files = set()
501 c = None
510 c = None
502 for i, e in enumerate(log):
511 for i, e in enumerate(log):
503
512
504 # Check if log entry belongs to the current changeset or not.
513 # Check if log entry belongs to the current changeset or not.
505
514
506 # Since CVS is file-centric, two different file revisions with
515 # Since CVS is file-centric, two different file revisions with
507 # different branchpoints should be treated as belonging to two
516 # different branchpoints should be treated as belonging to two
508 # different changesets (and the ordering is important and not
517 # different changesets (and the ordering is important and not
509 # honoured by cvsps at this point).
518 # honoured by cvsps at this point).
510 #
519 #
511 # Consider the following case:
520 # Consider the following case:
512 # foo 1.1 branchpoints: [MYBRANCH]
521 # foo 1.1 branchpoints: [MYBRANCH]
513 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
522 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
514 #
523 #
515 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
524 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
516 # later version of foo may be in MYBRANCH2, so foo should be the
525 # later version of foo may be in MYBRANCH2, so foo should be the
517 # first changeset and bar the next and MYBRANCH and MYBRANCH2
526 # first changeset and bar the next and MYBRANCH and MYBRANCH2
518 # should both start off of the bar changeset. No provisions are
527 # should both start off of the bar changeset. No provisions are
519 # made to ensure that this is, in fact, what happens.
528 # made to ensure that this is, in fact, what happens.
520 if not (c and
529 if not (c and e.branchpoints == c.branchpoints and
521 e.comment == c.comment and
530 ( # cvs commitids
522 e.author == c.author and
531 (e.commitid is not None and e.commitid == c.commitid)
523 e.branch == c.branch and
532 or
524 (not util.safehasattr(e, 'branchpoints') or
533 ( # no commitids, use fuzzy commit detection
525 not util.safehasattr (c, 'branchpoints') or
534 (e.commitid is None or c.commitid is None) and
526 e.branchpoints == c.branchpoints) and
535 e.comment == c.comment and
527 ((c.date[0] + c.date[1]) <=
536 e.author == c.author and
528 (e.date[0] + e.date[1]) <=
537 e.branch == c.branch and
529 (c.date[0] + c.date[1]) + fuzz) and
538 ((c.date[0] + c.date[1]) <=
530 e.file not in files):
539 (e.date[0] + e.date[1]) <=
540 (c.date[0] + c.date[1]) + fuzz) and
541 e.file not in files
542 )
543 )):
531 c = changeset(comment=e.comment, author=e.author,
544 c = changeset(comment=e.comment, author=e.author,
532 branch=e.branch, date=e.date, entries=[],
545 branch=e.branch, date=e.date,
533 mergepoint=getattr(e, 'mergepoint', None),
546 entries=[], mergepoint=e.mergepoint,
534 branchpoints=getattr(e, 'branchpoints', set()))
547 branchpoints=e.branchpoints, commitid=e.commitid)
535 changesets.append(c)
548 changesets.append(c)
549
536 files = set()
550 files = set()
537 if len(changesets) % 100 == 0:
551 if len(changesets) % 100 == 0:
538 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
552 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
539 ui.status(util.ellipsis(t, 80) + '\n')
553 ui.status(util.ellipsis(t, 80) + '\n')
540
554
541 c.entries.append(e)
555 c.entries.append(e)
542 files.add(e.file)
556 files.add(e.file)
543 c.date = e.date # changeset date is date of latest commit in it
557 c.date = e.date # changeset date is date of latest commit in it
544
558
545 # Mark synthetic changesets
559 # Mark synthetic changesets
546
560
547 for c in changesets:
561 for c in changesets:
548 # Synthetic revisions always get their own changeset, because
562 # Synthetic revisions always get their own changeset, because
549 # the log message includes the filename. E.g. if you add file3
563 # the log message includes the filename. E.g. if you add file3
550 # and file4 on a branch, you get four log entries and three
564 # and file4 on a branch, you get four log entries and three
551 # changesets:
565 # changesets:
552 # "File file3 was added on branch ..." (synthetic, 1 entry)
566 # "File file3 was added on branch ..." (synthetic, 1 entry)
553 # "File file4 was added on branch ..." (synthetic, 1 entry)
567 # "File file4 was added on branch ..." (synthetic, 1 entry)
554 # "Add file3 and file4 to fix ..." (real, 2 entries)
568 # "Add file3 and file4 to fix ..." (real, 2 entries)
555 # Hence the check for 1 entry here.
569 # Hence the check for 1 entry here.
556 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
570 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
557
571
558 # Sort files in each changeset
572 # Sort files in each changeset
559
573
560 def entitycompare(l, r):
574 def entitycompare(l, r):
561 'Mimic cvsps sorting order'
575 'Mimic cvsps sorting order'
562 l = l.file.split('/')
576 l = l.file.split('/')
563 r = r.file.split('/')
577 r = r.file.split('/')
564 nl = len(l)
578 nl = len(l)
565 nr = len(r)
579 nr = len(r)
566 n = min(nl, nr)
580 n = min(nl, nr)
567 for i in range(n):
581 for i in range(n):
568 if i + 1 == nl and nl < nr:
582 if i + 1 == nl and nl < nr:
569 return -1
583 return -1
570 elif i + 1 == nr and nl > nr:
584 elif i + 1 == nr and nl > nr:
571 return +1
585 return +1
572 elif l[i] < r[i]:
586 elif l[i] < r[i]:
573 return -1
587 return -1
574 elif l[i] > r[i]:
588 elif l[i] > r[i]:
575 return +1
589 return +1
576 return 0
590 return 0
577
591
578 for c in changesets:
592 for c in changesets:
579 c.entries.sort(entitycompare)
593 c.entries.sort(entitycompare)
580
594
581 # Sort changesets by date
595 # Sort changesets by date
582
596
583 def cscmp(l, r):
597 def cscmp(l, r):
584 d = sum(l.date) - sum(r.date)
598 d = sum(l.date) - sum(r.date)
585 if d:
599 if d:
586 return d
600 return d
587
601
588 # detect vendor branches and initial commits on a branch
602 # detect vendor branches and initial commits on a branch
589 le = {}
603 le = {}
590 for e in l.entries:
604 for e in l.entries:
591 le[e.rcs] = e.revision
605 le[e.rcs] = e.revision
592 re = {}
606 re = {}
593 for e in r.entries:
607 for e in r.entries:
594 re[e.rcs] = e.revision
608 re[e.rcs] = e.revision
595
609
596 d = 0
610 d = 0
597 for e in l.entries:
611 for e in l.entries:
598 if re.get(e.rcs, None) == e.parent:
612 if re.get(e.rcs, None) == e.parent:
599 assert not d
613 assert not d
600 d = 1
614 d = 1
601 break
615 break
602
616
603 for e in r.entries:
617 for e in r.entries:
604 if le.get(e.rcs, None) == e.parent:
618 if le.get(e.rcs, None) == e.parent:
605 assert not d
619 assert not d
606 d = -1
620 d = -1
607 break
621 break
608
622
609 return d
623 return d
610
624
611 changesets.sort(cscmp)
625 changesets.sort(cscmp)
612
626
613 # Collect tags
627 # Collect tags
614
628
615 globaltags = {}
629 globaltags = {}
616 for c in changesets:
630 for c in changesets:
617 for e in c.entries:
631 for e in c.entries:
618 for tag in e.tags:
632 for tag in e.tags:
619 # remember which is the latest changeset to have this tag
633 # remember which is the latest changeset to have this tag
620 globaltags[tag] = c
634 globaltags[tag] = c
621
635
622 for c in changesets:
636 for c in changesets:
623 tags = set()
637 tags = set()
624 for e in c.entries:
638 for e in c.entries:
625 tags.update(e.tags)
639 tags.update(e.tags)
626 # remember tags only if this is the latest changeset to have it
640 # remember tags only if this is the latest changeset to have it
627 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
641 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
628
642
629 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
643 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
630 # by inserting dummy changesets with two parents, and handle
644 # by inserting dummy changesets with two parents, and handle
631 # {{mergefrombranch BRANCHNAME}} by setting two parents.
645 # {{mergefrombranch BRANCHNAME}} by setting two parents.
632
646
633 if mergeto is None:
647 if mergeto is None:
634 mergeto = r'{{mergetobranch ([-\w]+)}}'
648 mergeto = r'{{mergetobranch ([-\w]+)}}'
635 if mergeto:
649 if mergeto:
636 mergeto = re.compile(mergeto)
650 mergeto = re.compile(mergeto)
637
651
638 if mergefrom is None:
652 if mergefrom is None:
639 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
653 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
640 if mergefrom:
654 if mergefrom:
641 mergefrom = re.compile(mergefrom)
655 mergefrom = re.compile(mergefrom)
642
656
643 versions = {} # changeset index where we saw any particular file version
657 versions = {} # changeset index where we saw any particular file version
644 branches = {} # changeset index where we saw a branch
658 branches = {} # changeset index where we saw a branch
645 n = len(changesets)
659 n = len(changesets)
646 i = 0
660 i = 0
647 while i < n:
661 while i < n:
648 c = changesets[i]
662 c = changesets[i]
649
663
650 for f in c.entries:
664 for f in c.entries:
651 versions[(f.rcs, f.revision)] = i
665 versions[(f.rcs, f.revision)] = i
652
666
653 p = None
667 p = None
654 if c.branch in branches:
668 if c.branch in branches:
655 p = branches[c.branch]
669 p = branches[c.branch]
656 else:
670 else:
657 # first changeset on a new branch
671 # first changeset on a new branch
658 # the parent is a changeset with the branch in its
672 # the parent is a changeset with the branch in its
659 # branchpoints such that it is the latest possible
673 # branchpoints such that it is the latest possible
660 # commit without any intervening, unrelated commits.
674 # commit without any intervening, unrelated commits.
661
675
662 for candidate in xrange(i):
676 for candidate in xrange(i):
663 if c.branch not in changesets[candidate].branchpoints:
677 if c.branch not in changesets[candidate].branchpoints:
664 if p is not None:
678 if p is not None:
665 break
679 break
666 continue
680 continue
667 p = candidate
681 p = candidate
668
682
669 c.parents = []
683 c.parents = []
670 if p is not None:
684 if p is not None:
671 p = changesets[p]
685 p = changesets[p]
672
686
673 # Ensure no changeset has a synthetic changeset as a parent.
687 # Ensure no changeset has a synthetic changeset as a parent.
674 while p.synthetic:
688 while p.synthetic:
675 assert len(p.parents) <= 1, \
689 assert len(p.parents) <= 1, \
676 _('synthetic changeset cannot have multiple parents')
690 _('synthetic changeset cannot have multiple parents')
677 if p.parents:
691 if p.parents:
678 p = p.parents[0]
692 p = p.parents[0]
679 else:
693 else:
680 p = None
694 p = None
681 break
695 break
682
696
683 if p is not None:
697 if p is not None:
684 c.parents.append(p)
698 c.parents.append(p)
685
699
686 if c.mergepoint:
700 if c.mergepoint:
687 if c.mergepoint == 'HEAD':
701 if c.mergepoint == 'HEAD':
688 c.mergepoint = None
702 c.mergepoint = None
689 c.parents.append(changesets[branches[c.mergepoint]])
703 c.parents.append(changesets[branches[c.mergepoint]])
690
704
691 if mergefrom:
705 if mergefrom:
692 m = mergefrom.search(c.comment)
706 m = mergefrom.search(c.comment)
693 if m:
707 if m:
694 m = m.group(1)
708 m = m.group(1)
695 if m == 'HEAD':
709 if m == 'HEAD':
696 m = None
710 m = None
697 try:
711 try:
698 candidate = changesets[branches[m]]
712 candidate = changesets[branches[m]]
699 except KeyError:
713 except KeyError:
700 ui.warn(_("warning: CVS commit message references "
714 ui.warn(_("warning: CVS commit message references "
701 "non-existent branch %r:\n%s\n")
715 "non-existent branch %r:\n%s\n")
702 % (m, c.comment))
716 % (m, c.comment))
703 if m in branches and c.branch != m and not candidate.synthetic:
717 if m in branches and c.branch != m and not candidate.synthetic:
704 c.parents.append(candidate)
718 c.parents.append(candidate)
705
719
706 if mergeto:
720 if mergeto:
707 m = mergeto.search(c.comment)
721 m = mergeto.search(c.comment)
708 if m:
722 if m:
709 if m.groups():
723 if m.groups():
710 m = m.group(1)
724 m = m.group(1)
711 if m == 'HEAD':
725 if m == 'HEAD':
712 m = None
726 m = None
713 else:
727 else:
714 m = None # if no group found then merge to HEAD
728 m = None # if no group found then merge to HEAD
715 if m in branches and c.branch != m:
729 if m in branches and c.branch != m:
716 # insert empty changeset for merge
730 # insert empty changeset for merge
717 cc = changeset(
731 cc = changeset(
718 author=c.author, branch=m, date=c.date,
732 author=c.author, branch=m, date=c.date,
719 comment='convert-repo: CVS merge from branch %s'
733 comment='convert-repo: CVS merge from branch %s'
720 % c.branch,
734 % c.branch,
721 entries=[], tags=[],
735 entries=[], tags=[],
722 parents=[changesets[branches[m]], c])
736 parents=[changesets[branches[m]], c])
723 changesets.insert(i + 1, cc)
737 changesets.insert(i + 1, cc)
724 branches[m] = i + 1
738 branches[m] = i + 1
725
739
726 # adjust our loop counters now we have inserted a new entry
740 # adjust our loop counters now we have inserted a new entry
727 n += 1
741 n += 1
728 i += 2
742 i += 2
729 continue
743 continue
730
744
731 branches[c.branch] = i
745 branches[c.branch] = i
732 i += 1
746 i += 1
733
747
734 # Drop synthetic changesets (safe now that we have ensured no other
748 # Drop synthetic changesets (safe now that we have ensured no other
735 # changesets can have them as parents).
749 # changesets can have them as parents).
736 i = 0
750 i = 0
737 while i < len(changesets):
751 while i < len(changesets):
738 if changesets[i].synthetic:
752 if changesets[i].synthetic:
739 del changesets[i]
753 del changesets[i]
740 else:
754 else:
741 i += 1
755 i += 1
742
756
743 # Number changesets
757 # Number changesets
744
758
745 for i, c in enumerate(changesets):
759 for i, c in enumerate(changesets):
746 c.id = i + 1
760 c.id = i + 1
747
761
748 ui.status(_('%d changeset entries\n') % len(changesets))
762 ui.status(_('%d changeset entries\n') % len(changesets))
749
763
750 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
764 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
751
765
752 return changesets
766 return changesets
753
767
754
768
755 def debugcvsps(ui, *args, **opts):
769 def debugcvsps(ui, *args, **opts):
756 '''Read CVS rlog for current directory or named path in
770 '''Read CVS rlog for current directory or named path in
757 repository, and convert the log to changesets based on matching
771 repository, and convert the log to changesets based on matching
758 commit log entries and dates.
772 commit log entries and dates.
759 '''
773 '''
760 if opts["new_cache"]:
774 if opts["new_cache"]:
761 cache = "write"
775 cache = "write"
762 elif opts["update_cache"]:
776 elif opts["update_cache"]:
763 cache = "update"
777 cache = "update"
764 else:
778 else:
765 cache = None
779 cache = None
766
780
767 revisions = opts["revisions"]
781 revisions = opts["revisions"]
768
782
769 try:
783 try:
770 if args:
784 if args:
771 log = []
785 log = []
772 for d in args:
786 for d in args:
773 log += createlog(ui, d, root=opts["root"], cache=cache)
787 log += createlog(ui, d, root=opts["root"], cache=cache)
774 else:
788 else:
775 log = createlog(ui, root=opts["root"], cache=cache)
789 log = createlog(ui, root=opts["root"], cache=cache)
776 except logerror, e:
790 except logerror, e:
777 ui.write("%r\n"%e)
791 ui.write("%r\n"%e)
778 return
792 return
779
793
780 changesets = createchangeset(ui, log, opts["fuzz"])
794 changesets = createchangeset(ui, log, opts["fuzz"])
781 del log
795 del log
782
796
783 # Print changesets (optionally filtered)
797 # Print changesets (optionally filtered)
784
798
785 off = len(revisions)
799 off = len(revisions)
786 branches = {} # latest version number in each branch
800 branches = {} # latest version number in each branch
787 ancestors = {} # parent branch
801 ancestors = {} # parent branch
788 for cs in changesets:
802 for cs in changesets:
789
803
790 if opts["ancestors"]:
804 if opts["ancestors"]:
791 if cs.branch not in branches and cs.parents and cs.parents[0].id:
805 if cs.branch not in branches and cs.parents and cs.parents[0].id:
792 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
806 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
793 cs.parents[0].id)
807 cs.parents[0].id)
794 branches[cs.branch] = cs.id
808 branches[cs.branch] = cs.id
795
809
796 # limit by branches
810 # limit by branches
797 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
811 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
798 continue
812 continue
799
813
800 if not off:
814 if not off:
801 # Note: trailing spaces on several lines here are needed to have
815 # Note: trailing spaces on several lines here are needed to have
802 # bug-for-bug compatibility with cvsps.
816 # bug-for-bug compatibility with cvsps.
803 ui.write('---------------------\n')
817 ui.write('---------------------\n')
804 ui.write(('PatchSet %d \n' % cs.id))
818 ui.write(('PatchSet %d \n' % cs.id))
805 ui.write(('Date: %s\n' % util.datestr(cs.date,
819 ui.write(('Date: %s\n' % util.datestr(cs.date,
806 '%Y/%m/%d %H:%M:%S %1%2')))
820 '%Y/%m/%d %H:%M:%S %1%2')))
807 ui.write(('Author: %s\n' % cs.author))
821 ui.write(('Author: %s\n' % cs.author))
808 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
822 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
809 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
823 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
810 ','.join(cs.tags) or '(none)')))
824 ','.join(cs.tags) or '(none)')))
811 branchpoints = getattr(cs, 'branchpoints', None)
825 if cs.branchpoints:
812 if branchpoints:
826 ui.write('Branchpoints: %s \n' % ', '.join(cs.branchpoints))
813 ui.write(('Branchpoints: %s \n' % ', '.join(branchpoints)))
814 if opts["parents"] and cs.parents:
827 if opts["parents"] and cs.parents:
815 if len(cs.parents) > 1:
828 if len(cs.parents) > 1:
816 ui.write(('Parents: %s\n' %
829 ui.write(('Parents: %s\n' %
817 (','.join([str(p.id) for p in cs.parents]))))
830 (','.join([str(p.id) for p in cs.parents]))))
818 else:
831 else:
819 ui.write(('Parent: %d\n' % cs.parents[0].id))
832 ui.write(('Parent: %d\n' % cs.parents[0].id))
820
833
821 if opts["ancestors"]:
834 if opts["ancestors"]:
822 b = cs.branch
835 b = cs.branch
823 r = []
836 r = []
824 while b:
837 while b:
825 b, c = ancestors[b]
838 b, c = ancestors[b]
826 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
839 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
827 if r:
840 if r:
828 ui.write(('Ancestors: %s\n' % (','.join(r))))
841 ui.write(('Ancestors: %s\n' % (','.join(r))))
829
842
830 ui.write(('Log:\n'))
843 ui.write(('Log:\n'))
831 ui.write('%s\n\n' % cs.comment)
844 ui.write('%s\n\n' % cs.comment)
832 ui.write(('Members: \n'))
845 ui.write(('Members: \n'))
833 for f in cs.entries:
846 for f in cs.entries:
834 fn = f.file
847 fn = f.file
835 if fn.startswith(opts["prefix"]):
848 if fn.startswith(opts["prefix"]):
836 fn = fn[len(opts["prefix"]):]
849 fn = fn[len(opts["prefix"]):]
837 ui.write('\t%s:%s->%s%s \n' % (
850 ui.write('\t%s:%s->%s%s \n' % (
838 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
851 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
839 '.'.join([str(x) for x in f.revision]),
852 '.'.join([str(x) for x in f.revision]),
840 ['', '(DEAD)'][f.dead]))
853 ['', '(DEAD)'][f.dead]))
841 ui.write('\n')
854 ui.write('\n')
842
855
843 # have we seen the start tag?
856 # have we seen the start tag?
844 if revisions and off:
857 if revisions and off:
845 if revisions[0] == str(cs.id) or \
858 if revisions[0] == str(cs.id) or \
846 revisions[0] in cs.tags:
859 revisions[0] in cs.tags:
847 off = False
860 off = False
848
861
849 # see if we reached the end tag
862 # see if we reached the end tag
850 if len(revisions) > 1 and not off:
863 if len(revisions) > 1 and not off:
851 if revisions[1] == str(cs.id) or \
864 if revisions[1] == str(cs.id) or \
852 revisions[1] in cs.tags:
865 revisions[1] in cs.tags:
853 break
866 break
General Comments 0
You need to be logged in to leave comments. Login now