##// END OF EJS Templates
cvsps: fix indentation
Idan Kamara -
r18286:762f12b8 default
parent child Browse files
Show More
@@ -1,870 +1,870 b''
1 # Mercurial built-in replacement for cvsps.
1 # Mercurial built-in replacement for cvsps.
2 #
2 #
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 import re
9 import re
10 import cPickle as pickle
10 import cPickle as pickle
11 from mercurial import util
11 from mercurial import util
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import hook
13 from mercurial import hook
14 from mercurial import util
14 from mercurial import util
15
15
16 class logentry(object):
16 class logentry(object):
17 '''Class logentry has the following attributes:
17 '''Class logentry has the following attributes:
18 .author - author name as CVS knows it
18 .author - author name as CVS knows it
19 .branch - name of branch this revision is on
19 .branch - name of branch this revision is on
20 .branches - revision tuple of branches starting at this revision
20 .branches - revision tuple of branches starting at this revision
21 .comment - commit message
21 .comment - commit message
22 .commitid - CVS commitid or None
22 .commitid - CVS commitid or None
23 .date - the commit date as a (time, tz) tuple
23 .date - the commit date as a (time, tz) tuple
24 .dead - true if file revision is dead
24 .dead - true if file revision is dead
25 .file - Name of file
25 .file - Name of file
26 .lines - a tuple (+lines, -lines) or None
26 .lines - a tuple (+lines, -lines) or None
27 .parent - Previous revision of this entry
27 .parent - Previous revision of this entry
28 .rcs - name of file as returned from CVS
28 .rcs - name of file as returned from CVS
29 .revision - revision number as tuple
29 .revision - revision number as tuple
30 .tags - list of tags on the file
30 .tags - list of tags on the file
31 .synthetic - is this a synthetic "file ... added on ..." revision?
31 .synthetic - is this a synthetic "file ... added on ..." revision?
32 .mergepoint - the branch that has been merged from (if present in
32 .mergepoint - the branch that has been merged from (if present in
33 rlog output) or None
33 rlog output) or None
34 .branchpoints - the branches that start at the current entry or empty
34 .branchpoints - the branches that start at the current entry or empty
35 '''
35 '''
36 def __init__(self, **entries):
36 def __init__(self, **entries):
37 self.synthetic = False
37 self.synthetic = False
38 self.__dict__.update(entries)
38 self.__dict__.update(entries)
39
39
40 def __repr__(self):
40 def __repr__(self):
41 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
41 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
42 return "%s(%s)"%(type(self).__name__, ", ".join(items))
42 return "%s(%s)"%(type(self).__name__, ", ".join(items))
43
43
44 class logerror(Exception):
44 class logerror(Exception):
45 pass
45 pass
46
46
47 def getrepopath(cvspath):
47 def getrepopath(cvspath):
48 """Return the repository path from a CVS path.
48 """Return the repository path from a CVS path.
49
49
50 >>> getrepopath('/foo/bar')
50 >>> getrepopath('/foo/bar')
51 '/foo/bar'
51 '/foo/bar'
52 >>> getrepopath('c:/foo/bar')
52 >>> getrepopath('c:/foo/bar')
53 'c:/foo/bar'
53 'c:/foo/bar'
54 >>> getrepopath(':pserver:10/foo/bar')
54 >>> getrepopath(':pserver:10/foo/bar')
55 '/foo/bar'
55 '/foo/bar'
56 >>> getrepopath(':pserver:10c:/foo/bar')
56 >>> getrepopath(':pserver:10c:/foo/bar')
57 '/foo/bar'
57 '/foo/bar'
58 >>> getrepopath(':pserver:/foo/bar')
58 >>> getrepopath(':pserver:/foo/bar')
59 '/foo/bar'
59 '/foo/bar'
60 >>> getrepopath(':pserver:c:/foo/bar')
60 >>> getrepopath(':pserver:c:/foo/bar')
61 'c:/foo/bar'
61 'c:/foo/bar'
62 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
62 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
63 '/foo/bar'
63 '/foo/bar'
64 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
64 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
65 'c:/foo/bar'
65 'c:/foo/bar'
66 """
66 """
67 # According to CVS manual, CVS paths are expressed like:
67 # According to CVS manual, CVS paths are expressed like:
68 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
68 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
69 #
69 #
70 # Unfortunately, Windows absolute paths start with a drive letter
70 # Unfortunately, Windows absolute paths start with a drive letter
71 # like 'c:' making it harder to parse. Here we assume that drive
71 # like 'c:' making it harder to parse. Here we assume that drive
72 # letters are only one character long and any CVS component before
72 # letters are only one character long and any CVS component before
73 # the repository path is at least 2 characters long, and use this
73 # the repository path is at least 2 characters long, and use this
74 # to disambiguate.
74 # to disambiguate.
75 parts = cvspath.split(':')
75 parts = cvspath.split(':')
76 if len(parts) == 1:
76 if len(parts) == 1:
77 return parts[0]
77 return parts[0]
78 # Here there is an ambiguous case if we have a port number
78 # Here there is an ambiguous case if we have a port number
79 # immediately followed by a Windows driver letter. We assume this
79 # immediately followed by a Windows driver letter. We assume this
80 # never happens and decide it must be CVS path component,
80 # never happens and decide it must be CVS path component,
81 # therefore ignoring it.
81 # therefore ignoring it.
82 if len(parts[-2]) > 1:
82 if len(parts[-2]) > 1:
83 return parts[-1].lstrip('0123456789')
83 return parts[-1].lstrip('0123456789')
84 return parts[-2] + ':' + parts[-1]
84 return parts[-2] + ':' + parts[-1]
85
85
86 def createlog(ui, directory=None, root="", rlog=True, cache=None):
86 def createlog(ui, directory=None, root="", rlog=True, cache=None):
87 '''Collect the CVS rlog'''
87 '''Collect the CVS rlog'''
88
88
89 # Because we store many duplicate commit log messages, reusing strings
89 # Because we store many duplicate commit log messages, reusing strings
90 # saves a lot of memory and pickle storage space.
90 # saves a lot of memory and pickle storage space.
91 _scache = {}
91 _scache = {}
92 def scache(s):
92 def scache(s):
93 "return a shared version of a string"
93 "return a shared version of a string"
94 return _scache.setdefault(s, s)
94 return _scache.setdefault(s, s)
95
95
96 ui.status(_('collecting CVS rlog\n'))
96 ui.status(_('collecting CVS rlog\n'))
97
97
98 log = [] # list of logentry objects containing the CVS state
98 log = [] # list of logentry objects containing the CVS state
99
99
100 # patterns to match in CVS (r)log output, by state of use
100 # patterns to match in CVS (r)log output, by state of use
101 re_00 = re.compile('RCS file: (.+)$')
101 re_00 = re.compile('RCS file: (.+)$')
102 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
102 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
103 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
103 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
104 re_03 = re.compile("(Cannot access.+CVSROOT)|"
104 re_03 = re.compile("(Cannot access.+CVSROOT)|"
105 "(can't create temporary directory.+)$")
105 "(can't create temporary directory.+)$")
106 re_10 = re.compile('Working file: (.+)$')
106 re_10 = re.compile('Working file: (.+)$')
107 re_20 = re.compile('symbolic names:')
107 re_20 = re.compile('symbolic names:')
108 re_30 = re.compile('\t(.+): ([\\d.]+)$')
108 re_30 = re.compile('\t(.+): ([\\d.]+)$')
109 re_31 = re.compile('----------------------------$')
109 re_31 = re.compile('----------------------------$')
110 re_32 = re.compile('======================================='
110 re_32 = re.compile('======================================='
111 '======================================$')
111 '======================================$')
112 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
112 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
113 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
113 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
114 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
114 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
115 r'(\s+commitid:\s+([^;]+);)?'
115 r'(\s+commitid:\s+([^;]+);)?'
116 r'(.*mergepoint:\s+([^;]+);)?')
116 r'(.*mergepoint:\s+([^;]+);)?')
117 re_70 = re.compile('branches: (.+);$')
117 re_70 = re.compile('branches: (.+);$')
118
118
119 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
120
120
121 prefix = '' # leading path to strip of what we get from CVS
121 prefix = '' # leading path to strip of what we get from CVS
122
122
123 if directory is None:
123 if directory is None:
124 # Current working directory
124 # Current working directory
125
125
126 # Get the real directory in the repository
126 # Get the real directory in the repository
127 try:
127 try:
128 prefix = open(os.path.join('CVS','Repository')).read().strip()
128 prefix = open(os.path.join('CVS','Repository')).read().strip()
129 directory = prefix
129 directory = prefix
130 if prefix == ".":
130 if prefix == ".":
131 prefix = ""
131 prefix = ""
132 except IOError:
132 except IOError:
133 raise logerror(_('not a CVS sandbox'))
133 raise logerror(_('not a CVS sandbox'))
134
134
135 if prefix and not prefix.endswith(os.sep):
135 if prefix and not prefix.endswith(os.sep):
136 prefix += os.sep
136 prefix += os.sep
137
137
138 # Use the Root file in the sandbox, if it exists
138 # Use the Root file in the sandbox, if it exists
139 try:
139 try:
140 root = open(os.path.join('CVS','Root')).read().strip()
140 root = open(os.path.join('CVS','Root')).read().strip()
141 except IOError:
141 except IOError:
142 pass
142 pass
143
143
144 if not root:
144 if not root:
145 root = os.environ.get('CVSROOT', '')
145 root = os.environ.get('CVSROOT', '')
146
146
147 # read log cache if one exists
147 # read log cache if one exists
148 oldlog = []
148 oldlog = []
149 date = None
149 date = None
150
150
151 if cache:
151 if cache:
152 cachedir = os.path.expanduser('~/.hg.cvsps')
152 cachedir = os.path.expanduser('~/.hg.cvsps')
153 if not os.path.exists(cachedir):
153 if not os.path.exists(cachedir):
154 os.mkdir(cachedir)
154 os.mkdir(cachedir)
155
155
156 # The cvsps cache pickle needs a uniquified name, based on the
156 # The cvsps cache pickle needs a uniquified name, based on the
157 # repository location. The address may have all sort of nasties
157 # repository location. The address may have all sort of nasties
158 # in it, slashes, colons and such. So here we take just the
158 # in it, slashes, colons and such. So here we take just the
159 # alphanumeric characters, concatenated in a way that does not
159 # alphanumeric characters, concatenated in a way that does not
160 # mix up the various components, so that
160 # mix up the various components, so that
161 # :pserver:user@server:/path
161 # :pserver:user@server:/path
162 # and
162 # and
163 # /pserver/user/server/path
163 # /pserver/user/server/path
164 # are mapped to different cache file names.
164 # are mapped to different cache file names.
165 cachefile = root.split(":") + [directory, "cache"]
165 cachefile = root.split(":") + [directory, "cache"]
166 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
167 cachefile = os.path.join(cachedir,
167 cachefile = os.path.join(cachedir,
168 '.'.join([s for s in cachefile if s]))
168 '.'.join([s for s in cachefile if s]))
169
169
170 if cache == 'update':
170 if cache == 'update':
171 try:
171 try:
172 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 ui.note(_('reading cvs log cache %s\n') % cachefile)
173 oldlog = pickle.load(open(cachefile))
173 oldlog = pickle.load(open(cachefile))
174 for e in oldlog:
174 for e in oldlog:
175 if not (util.safehasattr(e, 'branchpoints') and
175 if not (util.safehasattr(e, 'branchpoints') and
176 util.safehasattr(e, 'commitid') and
176 util.safehasattr(e, 'commitid') and
177 util.safehasattr(e, 'mergepoint')):
177 util.safehasattr(e, 'mergepoint')):
178 ui.status(_('ignoring old cache\n'))
178 ui.status(_('ignoring old cache\n'))
179 oldlog = []
179 oldlog = []
180 break
180 break
181
181
182 ui.note(_('cache has %d log entries\n') % len(oldlog))
182 ui.note(_('cache has %d log entries\n') % len(oldlog))
183 except Exception, e:
183 except Exception, e:
184 ui.note(_('error reading cache: %r\n') % e)
184 ui.note(_('error reading cache: %r\n') % e)
185
185
186 if oldlog:
186 if oldlog:
187 date = oldlog[-1].date # last commit date as a (time,tz) tuple
187 date = oldlog[-1].date # last commit date as a (time,tz) tuple
188 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
188 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
189
189
190 # build the CVS commandline
190 # build the CVS commandline
191 cmd = ['cvs', '-q']
191 cmd = ['cvs', '-q']
192 if root:
192 if root:
193 cmd.append('-d%s' % root)
193 cmd.append('-d%s' % root)
194 p = util.normpath(getrepopath(root))
194 p = util.normpath(getrepopath(root))
195 if not p.endswith('/'):
195 if not p.endswith('/'):
196 p += '/'
196 p += '/'
197 if prefix:
197 if prefix:
198 # looks like normpath replaces "" by "."
198 # looks like normpath replaces "" by "."
199 prefix = p + util.normpath(prefix)
199 prefix = p + util.normpath(prefix)
200 else:
200 else:
201 prefix = p
201 prefix = p
202 cmd.append(['log', 'rlog'][rlog])
202 cmd.append(['log', 'rlog'][rlog])
203 if date:
203 if date:
204 # no space between option and date string
204 # no space between option and date string
205 cmd.append('-d>%s' % date)
205 cmd.append('-d>%s' % date)
206 cmd.append(directory)
206 cmd.append(directory)
207
207
208 # state machine begins here
208 # state machine begins here
209 tags = {} # dictionary of revisions on current file with their tags
209 tags = {} # dictionary of revisions on current file with their tags
210 branchmap = {} # mapping between branch names and revision numbers
210 branchmap = {} # mapping between branch names and revision numbers
211 state = 0
211 state = 0
212 store = False # set when a new record can be appended
212 store = False # set when a new record can be appended
213
213
214 cmd = [util.shellquote(arg) for arg in cmd]
214 cmd = [util.shellquote(arg) for arg in cmd]
215 ui.note(_("running %s\n") % (' '.join(cmd)))
215 ui.note(_("running %s\n") % (' '.join(cmd)))
216 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
216 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
217
217
218 pfp = util.popen(' '.join(cmd))
218 pfp = util.popen(' '.join(cmd))
219 peek = pfp.readline()
219 peek = pfp.readline()
220 while True:
220 while True:
221 line = peek
221 line = peek
222 if line == '':
222 if line == '':
223 break
223 break
224 peek = pfp.readline()
224 peek = pfp.readline()
225 if line.endswith('\n'):
225 if line.endswith('\n'):
226 line = line[:-1]
226 line = line[:-1]
227 #ui.debug('state=%d line=%r\n' % (state, line))
227 #ui.debug('state=%d line=%r\n' % (state, line))
228
228
229 if state == 0:
229 if state == 0:
230 # initial state, consume input until we see 'RCS file'
230 # initial state, consume input until we see 'RCS file'
231 match = re_00.match(line)
231 match = re_00.match(line)
232 if match:
232 if match:
233 rcs = match.group(1)
233 rcs = match.group(1)
234 tags = {}
234 tags = {}
235 if rlog:
235 if rlog:
236 filename = util.normpath(rcs[:-2])
236 filename = util.normpath(rcs[:-2])
237 if filename.startswith(prefix):
237 if filename.startswith(prefix):
238 filename = filename[len(prefix):]
238 filename = filename[len(prefix):]
239 if filename.startswith('/'):
239 if filename.startswith('/'):
240 filename = filename[1:]
240 filename = filename[1:]
241 if filename.startswith('Attic/'):
241 if filename.startswith('Attic/'):
242 filename = filename[6:]
242 filename = filename[6:]
243 else:
243 else:
244 filename = filename.replace('/Attic/', '/')
244 filename = filename.replace('/Attic/', '/')
245 state = 2
245 state = 2
246 continue
246 continue
247 state = 1
247 state = 1
248 continue
248 continue
249 match = re_01.match(line)
249 match = re_01.match(line)
250 if match:
250 if match:
251 raise logerror(match.group(1))
251 raise logerror(match.group(1))
252 match = re_02.match(line)
252 match = re_02.match(line)
253 if match:
253 if match:
254 raise logerror(match.group(2))
254 raise logerror(match.group(2))
255 if re_03.match(line):
255 if re_03.match(line):
256 raise logerror(line)
256 raise logerror(line)
257
257
258 elif state == 1:
258 elif state == 1:
259 # expect 'Working file' (only when using log instead of rlog)
259 # expect 'Working file' (only when using log instead of rlog)
260 match = re_10.match(line)
260 match = re_10.match(line)
261 assert match, _('RCS file must be followed by working file')
261 assert match, _('RCS file must be followed by working file')
262 filename = util.normpath(match.group(1))
262 filename = util.normpath(match.group(1))
263 state = 2
263 state = 2
264
264
265 elif state == 2:
265 elif state == 2:
266 # expect 'symbolic names'
266 # expect 'symbolic names'
267 if re_20.match(line):
267 if re_20.match(line):
268 branchmap = {}
268 branchmap = {}
269 state = 3
269 state = 3
270
270
271 elif state == 3:
271 elif state == 3:
272 # read the symbolic names and store as tags
272 # read the symbolic names and store as tags
273 match = re_30.match(line)
273 match = re_30.match(line)
274 if match:
274 if match:
275 rev = [int(x) for x in match.group(2).split('.')]
275 rev = [int(x) for x in match.group(2).split('.')]
276
276
277 # Convert magic branch number to an odd-numbered one
277 # Convert magic branch number to an odd-numbered one
278 revn = len(rev)
278 revn = len(rev)
279 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
279 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
280 rev = rev[:-2] + rev[-1:]
280 rev = rev[:-2] + rev[-1:]
281 rev = tuple(rev)
281 rev = tuple(rev)
282
282
283 if rev not in tags:
283 if rev not in tags:
284 tags[rev] = []
284 tags[rev] = []
285 tags[rev].append(match.group(1))
285 tags[rev].append(match.group(1))
286 branchmap[match.group(1)] = match.group(2)
286 branchmap[match.group(1)] = match.group(2)
287
287
288 elif re_31.match(line):
288 elif re_31.match(line):
289 state = 5
289 state = 5
290 elif re_32.match(line):
290 elif re_32.match(line):
291 state = 0
291 state = 0
292
292
293 elif state == 4:
293 elif state == 4:
294 # expecting '------' separator before first revision
294 # expecting '------' separator before first revision
295 if re_31.match(line):
295 if re_31.match(line):
296 state = 5
296 state = 5
297 else:
297 else:
298 assert not re_32.match(line), _('must have at least '
298 assert not re_32.match(line), _('must have at least '
299 'some revisions')
299 'some revisions')
300
300
301 elif state == 5:
301 elif state == 5:
302 # expecting revision number and possibly (ignored) lock indication
302 # expecting revision number and possibly (ignored) lock indication
303 # we create the logentry here from values stored in states 0 to 4,
303 # we create the logentry here from values stored in states 0 to 4,
304 # as this state is re-entered for subsequent revisions of a file.
304 # as this state is re-entered for subsequent revisions of a file.
305 match = re_50.match(line)
305 match = re_50.match(line)
306 assert match, _('expected revision number')
306 assert match, _('expected revision number')
307 e = logentry(rcs=scache(rcs),
307 e = logentry(rcs=scache(rcs),
308 file=scache(filename),
308 file=scache(filename),
309 revision=tuple([int(x) for x in
309 revision=tuple([int(x) for x in
310 match.group(1).split('.')]),
310 match.group(1).split('.')]),
311 branches=[],
311 branches=[],
312 parent=None,
312 parent=None,
313 commitid=None,
313 commitid=None,
314 mergepoint=None,
314 mergepoint=None,
315 branchpoints=set())
315 branchpoints=set())
316
316
317 state = 6
317 state = 6
318
318
319 elif state == 6:
319 elif state == 6:
320 # expecting date, author, state, lines changed
320 # expecting date, author, state, lines changed
321 match = re_60.match(line)
321 match = re_60.match(line)
322 assert match, _('revision must be followed by date line')
322 assert match, _('revision must be followed by date line')
323 d = match.group(1)
323 d = match.group(1)
324 if d[2] == '/':
324 if d[2] == '/':
325 # Y2K
325 # Y2K
326 d = '19' + d
326 d = '19' + d
327
327
328 if len(d.split()) != 3:
328 if len(d.split()) != 3:
329 # cvs log dates always in GMT
329 # cvs log dates always in GMT
330 d = d + ' UTC'
330 d = d + ' UTC'
331 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
331 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
332 '%Y/%m/%d %H:%M:%S',
332 '%Y/%m/%d %H:%M:%S',
333 '%Y-%m-%d %H:%M:%S'])
333 '%Y-%m-%d %H:%M:%S'])
334 e.author = scache(match.group(2))
334 e.author = scache(match.group(2))
335 e.dead = match.group(3).lower() == 'dead'
335 e.dead = match.group(3).lower() == 'dead'
336
336
337 if match.group(5):
337 if match.group(5):
338 if match.group(6):
338 if match.group(6):
339 e.lines = (int(match.group(5)), int(match.group(6)))
339 e.lines = (int(match.group(5)), int(match.group(6)))
340 else:
340 else:
341 e.lines = (int(match.group(5)), 0)
341 e.lines = (int(match.group(5)), 0)
342 elif match.group(6):
342 elif match.group(6):
343 e.lines = (0, int(match.group(6)))
343 e.lines = (0, int(match.group(6)))
344 else:
344 else:
345 e.lines = None
345 e.lines = None
346
346
347 if match.group(7): # cvs 1.12 commitid
347 if match.group(7): # cvs 1.12 commitid
348 e.commitid = match.group(8)
348 e.commitid = match.group(8)
349
349
350 if match.group(9): # cvsnt mergepoint
350 if match.group(9): # cvsnt mergepoint
351 myrev = match.group(10).split('.')
351 myrev = match.group(10).split('.')
352 if len(myrev) == 2: # head
352 if len(myrev) == 2: # head
353 e.mergepoint = 'HEAD'
353 e.mergepoint = 'HEAD'
354 else:
354 else:
355 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
355 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
356 branches = [b for b in branchmap if branchmap[b] == myrev]
356 branches = [b for b in branchmap if branchmap[b] == myrev]
357 assert len(branches) == 1, ('unknown branch: %s'
357 assert len(branches) == 1, ('unknown branch: %s'
358 % e.mergepoint)
358 % e.mergepoint)
359 e.mergepoint = branches[0]
359 e.mergepoint = branches[0]
360
360
361 e.comment = []
361 e.comment = []
362 state = 7
362 state = 7
363
363
364 elif state == 7:
364 elif state == 7:
365 # read the revision numbers of branches that start at this revision
365 # read the revision numbers of branches that start at this revision
366 # or store the commit log message otherwise
366 # or store the commit log message otherwise
367 m = re_70.match(line)
367 m = re_70.match(line)
368 if m:
368 if m:
369 e.branches = [tuple([int(y) for y in x.strip().split('.')])
369 e.branches = [tuple([int(y) for y in x.strip().split('.')])
370 for x in m.group(1).split(';')]
370 for x in m.group(1).split(';')]
371 state = 8
371 state = 8
372 elif re_31.match(line) and re_50.match(peek):
372 elif re_31.match(line) and re_50.match(peek):
373 state = 5
373 state = 5
374 store = True
374 store = True
375 elif re_32.match(line):
375 elif re_32.match(line):
376 state = 0
376 state = 0
377 store = True
377 store = True
378 else:
378 else:
379 e.comment.append(line)
379 e.comment.append(line)
380
380
381 elif state == 8:
381 elif state == 8:
382 # store commit log message
382 # store commit log message
383 if re_31.match(line):
383 if re_31.match(line):
384 cpeek = peek
384 cpeek = peek
385 if cpeek.endswith('\n'):
385 if cpeek.endswith('\n'):
386 cpeek = cpeek[:-1]
386 cpeek = cpeek[:-1]
387 if re_50.match(cpeek):
387 if re_50.match(cpeek):
388 state = 5
388 state = 5
389 store = True
389 store = True
390 else:
390 else:
391 e.comment.append(line)
391 e.comment.append(line)
392 elif re_32.match(line):
392 elif re_32.match(line):
393 state = 0
393 state = 0
394 store = True
394 store = True
395 else:
395 else:
396 e.comment.append(line)
396 e.comment.append(line)
397
397
398 # When a file is added on a branch B1, CVS creates a synthetic
398 # When a file is added on a branch B1, CVS creates a synthetic
399 # dead trunk revision 1.1 so that the branch has a root.
399 # dead trunk revision 1.1 so that the branch has a root.
400 # Likewise, if you merge such a file to a later branch B2 (one
400 # Likewise, if you merge such a file to a later branch B2 (one
401 # that already existed when the file was added on B1), CVS
401 # that already existed when the file was added on B1), CVS
402 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
402 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
403 # these revisions now, but mark them synthetic so
403 # these revisions now, but mark them synthetic so
404 # createchangeset() can take care of them.
404 # createchangeset() can take care of them.
405 if (store and
405 if (store and
406 e.dead and
406 e.dead and
407 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
407 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
408 len(e.comment) == 1 and
408 len(e.comment) == 1 and
409 file_added_re.match(e.comment[0])):
409 file_added_re.match(e.comment[0])):
410 ui.debug('found synthetic revision in %s: %r\n'
410 ui.debug('found synthetic revision in %s: %r\n'
411 % (e.rcs, e.comment[0]))
411 % (e.rcs, e.comment[0]))
412 e.synthetic = True
412 e.synthetic = True
413
413
414 if store:
414 if store:
415 # clean up the results and save in the log.
415 # clean up the results and save in the log.
416 store = False
416 store = False
417 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
417 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
418 e.comment = scache('\n'.join(e.comment))
418 e.comment = scache('\n'.join(e.comment))
419
419
420 revn = len(e.revision)
420 revn = len(e.revision)
421 if revn > 3 and (revn % 2) == 0:
421 if revn > 3 and (revn % 2) == 0:
422 e.branch = tags.get(e.revision[:-1], [None])[0]
422 e.branch = tags.get(e.revision[:-1], [None])[0]
423 else:
423 else:
424 e.branch = None
424 e.branch = None
425
425
426 # find the branches starting from this revision
426 # find the branches starting from this revision
427 branchpoints = set()
427 branchpoints = set()
428 for branch, revision in branchmap.iteritems():
428 for branch, revision in branchmap.iteritems():
429 revparts = tuple([int(i) for i in revision.split('.')])
429 revparts = tuple([int(i) for i in revision.split('.')])
430 if len(revparts) < 2: # bad tags
430 if len(revparts) < 2: # bad tags
431 continue
431 continue
432 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
432 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
433 # normal branch
433 # normal branch
434 if revparts[:-2] == e.revision:
434 if revparts[:-2] == e.revision:
435 branchpoints.add(branch)
435 branchpoints.add(branch)
436 elif revparts == (1, 1, 1): # vendor branch
436 elif revparts == (1, 1, 1): # vendor branch
437 if revparts in e.branches:
437 if revparts in e.branches:
438 branchpoints.add(branch)
438 branchpoints.add(branch)
439 e.branchpoints = branchpoints
439 e.branchpoints = branchpoints
440
440
441 log.append(e)
441 log.append(e)
442
442
443 if len(log) % 100 == 0:
443 if len(log) % 100 == 0:
444 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
444 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
445
445
446 log.sort(key=lambda x: (x.rcs, x.revision))
446 log.sort(key=lambda x: (x.rcs, x.revision))
447
447
448 # find parent revisions of individual files
448 # find parent revisions of individual files
449 versions = {}
449 versions = {}
450 for e in log:
450 for e in log:
451 branch = e.revision[:-1]
451 branch = e.revision[:-1]
452 p = versions.get((e.rcs, branch), None)
452 p = versions.get((e.rcs, branch), None)
453 if p is None:
453 if p is None:
454 p = e.revision[:-2]
454 p = e.revision[:-2]
455 e.parent = p
455 e.parent = p
456 versions[(e.rcs, branch)] = e.revision
456 versions[(e.rcs, branch)] = e.revision
457
457
458 # update the log cache
458 # update the log cache
459 if cache:
459 if cache:
460 if log:
460 if log:
461 # join up the old and new logs
461 # join up the old and new logs
462 log.sort(key=lambda x: x.date)
462 log.sort(key=lambda x: x.date)
463
463
464 if oldlog and oldlog[-1].date >= log[0].date:
464 if oldlog and oldlog[-1].date >= log[0].date:
465 raise logerror(_('log cache overlaps with new log entries,'
465 raise logerror(_('log cache overlaps with new log entries,'
466 ' re-run without cache.'))
466 ' re-run without cache.'))
467
467
468 log = oldlog + log
468 log = oldlog + log
469
469
470 # write the new cachefile
470 # write the new cachefile
471 ui.note(_('writing cvs log cache %s\n') % cachefile)
471 ui.note(_('writing cvs log cache %s\n') % cachefile)
472 pickle.dump(log, open(cachefile, 'w'))
472 pickle.dump(log, open(cachefile, 'w'))
473 else:
473 else:
474 log = oldlog
474 log = oldlog
475
475
476 ui.status(_('%d log entries\n') % len(log))
476 ui.status(_('%d log entries\n') % len(log))
477
477
478 hook.hook(ui, None, "cvslog", True, log=log)
478 hook.hook(ui, None, "cvslog", True, log=log)
479
479
480 return log
480 return log
481
481
482
482
483 class changeset(object):
483 class changeset(object):
484 '''Class changeset has the following attributes:
484 '''Class changeset has the following attributes:
485 .id - integer identifying this changeset (list index)
485 .id - integer identifying this changeset (list index)
486 .author - author name as CVS knows it
486 .author - author name as CVS knows it
487 .branch - name of branch this changeset is on, or None
487 .branch - name of branch this changeset is on, or None
488 .comment - commit message
488 .comment - commit message
489 .commitid - CVS commitid or None
489 .commitid - CVS commitid or None
490 .date - the commit date as a (time,tz) tuple
490 .date - the commit date as a (time,tz) tuple
491 .entries - list of logentry objects in this changeset
491 .entries - list of logentry objects in this changeset
492 .parents - list of one or two parent changesets
492 .parents - list of one or two parent changesets
493 .tags - list of tags on this changeset
493 .tags - list of tags on this changeset
494 .synthetic - from synthetic revision "file ... added on branch ..."
494 .synthetic - from synthetic revision "file ... added on branch ..."
495 .mergepoint- the branch that has been merged from or None
495 .mergepoint- the branch that has been merged from or None
496 .branchpoints- the branches that start at the current entry or empty
496 .branchpoints- the branches that start at the current entry or empty
497 '''
497 '''
498 def __init__(self, **entries):
498 def __init__(self, **entries):
499 self.synthetic = False
499 self.synthetic = False
500 self.__dict__.update(entries)
500 self.__dict__.update(entries)
501
501
502 def __repr__(self):
502 def __repr__(self):
503 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
503 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
504 return "%s(%s)"%(type(self).__name__, ", ".join(items))
504 return "%s(%s)"%(type(self).__name__, ", ".join(items))
505
505
506 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
506 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
507 '''Convert log into changesets.'''
507 '''Convert log into changesets.'''
508
508
509 ui.status(_('creating changesets\n'))
509 ui.status(_('creating changesets\n'))
510
510
511 # Merge changesets
511 # Merge changesets
512 log.sort(key=lambda x: (x.commitid, x.comment, x.author, x.branch, x.date,
512 log.sort(key=lambda x: (x.commitid, x.comment, x.author, x.branch, x.date,
513 x.branchpoints))
513 x.branchpoints))
514
514
515 changesets = []
515 changesets = []
516 files = set()
516 files = set()
517 c = None
517 c = None
518 for i, e in enumerate(log):
518 for i, e in enumerate(log):
519
519
520 # Check if log entry belongs to the current changeset or not.
520 # Check if log entry belongs to the current changeset or not.
521
521
522 # Since CVS is file-centric, two different file revisions with
522 # Since CVS is file-centric, two different file revisions with
523 # different branchpoints should be treated as belonging to two
523 # different branchpoints should be treated as belonging to two
524 # different changesets (and the ordering is important and not
524 # different changesets (and the ordering is important and not
525 # honoured by cvsps at this point).
525 # honoured by cvsps at this point).
526 #
526 #
527 # Consider the following case:
527 # Consider the following case:
528 # foo 1.1 branchpoints: [MYBRANCH]
528 # foo 1.1 branchpoints: [MYBRANCH]
529 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
529 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
530 #
530 #
531 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
531 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
532 # later version of foo may be in MYBRANCH2, so foo should be the
532 # later version of foo may be in MYBRANCH2, so foo should be the
533 # first changeset and bar the next and MYBRANCH and MYBRANCH2
533 # first changeset and bar the next and MYBRANCH and MYBRANCH2
534 # should both start off of the bar changeset. No provisions are
534 # should both start off of the bar changeset. No provisions are
535 # made to ensure that this is, in fact, what happens.
535 # made to ensure that this is, in fact, what happens.
536 if not (c and e.branchpoints == c.branchpoints and
536 if not (c and e.branchpoints == c.branchpoints and
537 (# cvs commitids
537 (# cvs commitids
538 (e.commitid is not None and e.commitid == c.commitid) or
538 (e.commitid is not None and e.commitid == c.commitid) or
539 (# no commitids, use fuzzy commit detection
539 (# no commitids, use fuzzy commit detection
540 (e.commitid is None or c.commitid is None) and
540 (e.commitid is None or c.commitid is None) and
541 e.comment == c.comment and
541 e.comment == c.comment and
542 e.author == c.author and
542 e.author == c.author and
543 e.branch == c.branch and
543 e.branch == c.branch and
544 ((c.date[0] + c.date[1]) <=
544 ((c.date[0] + c.date[1]) <=
545 (e.date[0] + e.date[1]) <=
545 (e.date[0] + e.date[1]) <=
546 (c.date[0] + c.date[1]) + fuzz) and
546 (c.date[0] + c.date[1]) + fuzz) and
547 e.file not in files))):
547 e.file not in files))):
548 c = changeset(comment=e.comment, author=e.author,
548 c = changeset(comment=e.comment, author=e.author,
549 branch=e.branch, date=e.date,
549 branch=e.branch, date=e.date,
550 entries=[], mergepoint=e.mergepoint,
550 entries=[], mergepoint=e.mergepoint,
551 branchpoints=e.branchpoints, commitid=e.commitid)
551 branchpoints=e.branchpoints, commitid=e.commitid)
552 changesets.append(c)
552 changesets.append(c)
553
553
554 files = set()
554 files = set()
555 if len(changesets) % 100 == 0:
555 if len(changesets) % 100 == 0:
556 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
556 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
557 ui.status(util.ellipsis(t, 80) + '\n')
557 ui.status(util.ellipsis(t, 80) + '\n')
558
558
559 c.entries.append(e)
559 c.entries.append(e)
560 files.add(e.file)
560 files.add(e.file)
561 c.date = e.date # changeset date is date of latest commit in it
561 c.date = e.date # changeset date is date of latest commit in it
562
562
563 # Mark synthetic changesets
563 # Mark synthetic changesets
564
564
565 for c in changesets:
565 for c in changesets:
566 # Synthetic revisions always get their own changeset, because
566 # Synthetic revisions always get their own changeset, because
567 # the log message includes the filename. E.g. if you add file3
567 # the log message includes the filename. E.g. if you add file3
568 # and file4 on a branch, you get four log entries and three
568 # and file4 on a branch, you get four log entries and three
569 # changesets:
569 # changesets:
570 # "File file3 was added on branch ..." (synthetic, 1 entry)
570 # "File file3 was added on branch ..." (synthetic, 1 entry)
571 # "File file4 was added on branch ..." (synthetic, 1 entry)
571 # "File file4 was added on branch ..." (synthetic, 1 entry)
572 # "Add file3 and file4 to fix ..." (real, 2 entries)
572 # "Add file3 and file4 to fix ..." (real, 2 entries)
573 # Hence the check for 1 entry here.
573 # Hence the check for 1 entry here.
574 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
574 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
575
575
576 # Sort files in each changeset
576 # Sort files in each changeset
577
577
578 def entitycompare(l, r):
578 def entitycompare(l, r):
579 'Mimic cvsps sorting order'
579 'Mimic cvsps sorting order'
580 l = l.file.split('/')
580 l = l.file.split('/')
581 r = r.file.split('/')
581 r = r.file.split('/')
582 nl = len(l)
582 nl = len(l)
583 nr = len(r)
583 nr = len(r)
584 n = min(nl, nr)
584 n = min(nl, nr)
585 for i in range(n):
585 for i in range(n):
586 if i + 1 == nl and nl < nr:
586 if i + 1 == nl and nl < nr:
587 return -1
587 return -1
588 elif i + 1 == nr and nl > nr:
588 elif i + 1 == nr and nl > nr:
589 return +1
589 return +1
590 elif l[i] < r[i]:
590 elif l[i] < r[i]:
591 return -1
591 return -1
592 elif l[i] > r[i]:
592 elif l[i] > r[i]:
593 return +1
593 return +1
594 return 0
594 return 0
595
595
596 for c in changesets:
596 for c in changesets:
597 c.entries.sort(entitycompare)
597 c.entries.sort(entitycompare)
598
598
599 # Sort changesets by date
599 # Sort changesets by date
600
600
601 def cscmp(l, r):
601 def cscmp(l, r):
602 d = sum(l.date) - sum(r.date)
602 d = sum(l.date) - sum(r.date)
603 if d:
603 if d:
604 return d
604 return d
605
605
606 # detect vendor branches and initial commits on a branch
606 # detect vendor branches and initial commits on a branch
607 le = {}
607 le = {}
608 for e in l.entries:
608 for e in l.entries:
609 le[e.rcs] = e.revision
609 le[e.rcs] = e.revision
610 re = {}
610 re = {}
611 for e in r.entries:
611 for e in r.entries:
612 re[e.rcs] = e.revision
612 re[e.rcs] = e.revision
613
613
614 d = 0
614 d = 0
615 for e in l.entries:
615 for e in l.entries:
616 if re.get(e.rcs, None) == e.parent:
616 if re.get(e.rcs, None) == e.parent:
617 assert not d
617 assert not d
618 d = 1
618 d = 1
619 break
619 break
620
620
621 for e in r.entries:
621 for e in r.entries:
622 if le.get(e.rcs, None) == e.parent:
622 if le.get(e.rcs, None) == e.parent:
623 assert not d
623 assert not d
624 d = -1
624 d = -1
625 break
625 break
626
626
627 return d
627 return d
628
628
629 changesets.sort(cscmp)
629 changesets.sort(cscmp)
630
630
631 # Collect tags
631 # Collect tags
632
632
633 globaltags = {}
633 globaltags = {}
634 for c in changesets:
634 for c in changesets:
635 for e in c.entries:
635 for e in c.entries:
636 for tag in e.tags:
636 for tag in e.tags:
637 # remember which is the latest changeset to have this tag
637 # remember which is the latest changeset to have this tag
638 globaltags[tag] = c
638 globaltags[tag] = c
639
639
640 for c in changesets:
640 for c in changesets:
641 tags = set()
641 tags = set()
642 for e in c.entries:
642 for e in c.entries:
643 tags.update(e.tags)
643 tags.update(e.tags)
644 # remember tags only if this is the latest changeset to have it
644 # remember tags only if this is the latest changeset to have it
645 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
645 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
646
646
647 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
647 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
648 # by inserting dummy changesets with two parents, and handle
648 # by inserting dummy changesets with two parents, and handle
649 # {{mergefrombranch BRANCHNAME}} by setting two parents.
649 # {{mergefrombranch BRANCHNAME}} by setting two parents.
650
650
651 if mergeto is None:
651 if mergeto is None:
652 mergeto = r'{{mergetobranch ([-\w]+)}}'
652 mergeto = r'{{mergetobranch ([-\w]+)}}'
653 if mergeto:
653 if mergeto:
654 mergeto = re.compile(mergeto)
654 mergeto = re.compile(mergeto)
655
655
656 if mergefrom is None:
656 if mergefrom is None:
657 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
657 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
658 if mergefrom:
658 if mergefrom:
659 mergefrom = re.compile(mergefrom)
659 mergefrom = re.compile(mergefrom)
660
660
661 versions = {} # changeset index where we saw any particular file version
661 versions = {} # changeset index where we saw any particular file version
662 branches = {} # changeset index where we saw a branch
662 branches = {} # changeset index where we saw a branch
663 n = len(changesets)
663 n = len(changesets)
664 i = 0
664 i = 0
665 while i < n:
665 while i < n:
666 c = changesets[i]
666 c = changesets[i]
667
667
668 for f in c.entries:
668 for f in c.entries:
669 versions[(f.rcs, f.revision)] = i
669 versions[(f.rcs, f.revision)] = i
670
670
671 p = None
671 p = None
672 if c.branch in branches:
672 if c.branch in branches:
673 p = branches[c.branch]
673 p = branches[c.branch]
674 else:
674 else:
675 # first changeset on a new branch
675 # first changeset on a new branch
676 # the parent is a changeset with the branch in its
676 # the parent is a changeset with the branch in its
677 # branchpoints such that it is the latest possible
677 # branchpoints such that it is the latest possible
678 # commit without any intervening, unrelated commits.
678 # commit without any intervening, unrelated commits.
679
679
680 for candidate in xrange(i):
680 for candidate in xrange(i):
681 if c.branch not in changesets[candidate].branchpoints:
681 if c.branch not in changesets[candidate].branchpoints:
682 if p is not None:
682 if p is not None:
683 break
683 break
684 continue
684 continue
685 p = candidate
685 p = candidate
686
686
687 c.parents = []
687 c.parents = []
688 if p is not None:
688 if p is not None:
689 p = changesets[p]
689 p = changesets[p]
690
690
691 # Ensure no changeset has a synthetic changeset as a parent.
691 # Ensure no changeset has a synthetic changeset as a parent.
692 while p.synthetic:
692 while p.synthetic:
693 assert len(p.parents) <= 1, \
693 assert len(p.parents) <= 1, \
694 _('synthetic changeset cannot have multiple parents')
694 _('synthetic changeset cannot have multiple parents')
695 if p.parents:
695 if p.parents:
696 p = p.parents[0]
696 p = p.parents[0]
697 else:
697 else:
698 p = None
698 p = None
699 break
699 break
700
700
701 if p is not None:
701 if p is not None:
702 c.parents.append(p)
702 c.parents.append(p)
703
703
704 if c.mergepoint:
704 if c.mergepoint:
705 if c.mergepoint == 'HEAD':
705 if c.mergepoint == 'HEAD':
706 c.mergepoint = None
706 c.mergepoint = None
707 c.parents.append(changesets[branches[c.mergepoint]])
707 c.parents.append(changesets[branches[c.mergepoint]])
708
708
709 if mergefrom:
709 if mergefrom:
710 m = mergefrom.search(c.comment)
710 m = mergefrom.search(c.comment)
711 if m:
711 if m:
712 m = m.group(1)
712 m = m.group(1)
713 if m == 'HEAD':
713 if m == 'HEAD':
714 m = None
714 m = None
715 try:
715 try:
716 candidate = changesets[branches[m]]
716 candidate = changesets[branches[m]]
717 except KeyError:
717 except KeyError:
718 ui.warn(_("warning: CVS commit message references "
718 ui.warn(_("warning: CVS commit message references "
719 "non-existent branch %r:\n%s\n")
719 "non-existent branch %r:\n%s\n")
720 % (m, c.comment))
720 % (m, c.comment))
721 if m in branches and c.branch != m and not candidate.synthetic:
721 if m in branches and c.branch != m and not candidate.synthetic:
722 c.parents.append(candidate)
722 c.parents.append(candidate)
723
723
724 if mergeto:
724 if mergeto:
725 m = mergeto.search(c.comment)
725 m = mergeto.search(c.comment)
726 if m:
726 if m:
727 if m.groups():
727 if m.groups():
728 m = m.group(1)
728 m = m.group(1)
729 if m == 'HEAD':
729 if m == 'HEAD':
730 m = None
730 m = None
731 else:
731 else:
732 m = None # if no group found then merge to HEAD
732 m = None # if no group found then merge to HEAD
733 if m in branches and c.branch != m:
733 if m in branches and c.branch != m:
734 # insert empty changeset for merge
734 # insert empty changeset for merge
735 cc = changeset(
735 cc = changeset(
736 author=c.author, branch=m, date=c.date,
736 author=c.author, branch=m, date=c.date,
737 comment='convert-repo: CVS merge from branch %s'
737 comment='convert-repo: CVS merge from branch %s'
738 % c.branch,
738 % c.branch,
739 entries=[], tags=[],
739 entries=[], tags=[],
740 parents=[changesets[branches[m]], c])
740 parents=[changesets[branches[m]], c])
741 changesets.insert(i + 1, cc)
741 changesets.insert(i + 1, cc)
742 branches[m] = i + 1
742 branches[m] = i + 1
743
743
744 # adjust our loop counters now we have inserted a new entry
744 # adjust our loop counters now we have inserted a new entry
745 n += 1
745 n += 1
746 i += 2
746 i += 2
747 continue
747 continue
748
748
749 branches[c.branch] = i
749 branches[c.branch] = i
750 i += 1
750 i += 1
751
751
752 # Drop synthetic changesets (safe now that we have ensured no other
752 # Drop synthetic changesets (safe now that we have ensured no other
753 # changesets can have them as parents).
753 # changesets can have them as parents).
754 i = 0
754 i = 0
755 while i < len(changesets):
755 while i < len(changesets):
756 if changesets[i].synthetic:
756 if changesets[i].synthetic:
757 del changesets[i]
757 del changesets[i]
758 else:
758 else:
759 i += 1
759 i += 1
760
760
761 # Number changesets
761 # Number changesets
762
762
763 for i, c in enumerate(changesets):
763 for i, c in enumerate(changesets):
764 c.id = i + 1
764 c.id = i + 1
765
765
766 ui.status(_('%d changeset entries\n') % len(changesets))
766 ui.status(_('%d changeset entries\n') % len(changesets))
767
767
768 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
768 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
769
769
770 return changesets
770 return changesets
771
771
772
772
773 def debugcvsps(ui, *args, **opts):
773 def debugcvsps(ui, *args, **opts):
774 '''Read CVS rlog for current directory or named path in
774 '''Read CVS rlog for current directory or named path in
775 repository, and convert the log to changesets based on matching
775 repository, and convert the log to changesets based on matching
776 commit log entries and dates.
776 commit log entries and dates.
777 '''
777 '''
778 if opts["new_cache"]:
778 if opts["new_cache"]:
779 cache = "write"
779 cache = "write"
780 elif opts["update_cache"]:
780 elif opts["update_cache"]:
781 cache = "update"
781 cache = "update"
782 else:
782 else:
783 cache = None
783 cache = None
784
784
785 revisions = opts["revisions"]
785 revisions = opts["revisions"]
786
786
787 try:
787 try:
788 if args:
788 if args:
789 log = []
789 log = []
790 for d in args:
790 for d in args:
791 log += createlog(ui, d, root=opts["root"], cache=cache)
791 log += createlog(ui, d, root=opts["root"], cache=cache)
792 else:
792 else:
793 log = createlog(ui, root=opts["root"], cache=cache)
793 log = createlog(ui, root=opts["root"], cache=cache)
794 except logerror, e:
794 except logerror, e:
795 ui.write("%r\n"%e)
795 ui.write("%r\n"%e)
796 return
796 return
797
797
798 changesets = createchangeset(ui, log, opts["fuzz"])
798 changesets = createchangeset(ui, log, opts["fuzz"])
799 del log
799 del log
800
800
801 # Print changesets (optionally filtered)
801 # Print changesets (optionally filtered)
802
802
803 off = len(revisions)
803 off = len(revisions)
804 branches = {} # latest version number in each branch
804 branches = {} # latest version number in each branch
805 ancestors = {} # parent branch
805 ancestors = {} # parent branch
806 for cs in changesets:
806 for cs in changesets:
807
807
808 if opts["ancestors"]:
808 if opts["ancestors"]:
809 if cs.branch not in branches and cs.parents and cs.parents[0].id:
809 if cs.branch not in branches and cs.parents and cs.parents[0].id:
810 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
810 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
811 cs.parents[0].id)
811 cs.parents[0].id)
812 branches[cs.branch] = cs.id
812 branches[cs.branch] = cs.id
813
813
814 # limit by branches
814 # limit by branches
815 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
815 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
816 continue
816 continue
817
817
818 if not off:
818 if not off:
819 # Note: trailing spaces on several lines here are needed to have
819 # Note: trailing spaces on several lines here are needed to have
820 # bug-for-bug compatibility with cvsps.
820 # bug-for-bug compatibility with cvsps.
821 ui.write('---------------------\n')
821 ui.write('---------------------\n')
822 ui.write(('PatchSet %d \n' % cs.id))
822 ui.write(('PatchSet %d \n' % cs.id))
823 ui.write(('Date: %s\n' % util.datestr(cs.date,
823 ui.write(('Date: %s\n' % util.datestr(cs.date,
824 '%Y/%m/%d %H:%M:%S %1%2')))
824 '%Y/%m/%d %H:%M:%S %1%2')))
825 ui.write(('Author: %s\n' % cs.author))
825 ui.write(('Author: %s\n' % cs.author))
826 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
826 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
827 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
827 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
828 ','.join(cs.tags) or '(none)')))
828 ','.join(cs.tags) or '(none)')))
829 if cs.branchpoints:
829 if cs.branchpoints:
830 ui.write(('Branchpoints: %s \n') % ', '.join(cs.branchpoints))
830 ui.write(('Branchpoints: %s \n') % ', '.join(cs.branchpoints))
831 if opts["parents"] and cs.parents:
831 if opts["parents"] and cs.parents:
832 if len(cs.parents) > 1:
832 if len(cs.parents) > 1:
833 ui.write(('Parents: %s\n' %
833 ui.write(('Parents: %s\n' %
834 (','.join([str(p.id) for p in cs.parents]))))
834 (','.join([str(p.id) for p in cs.parents]))))
835 else:
835 else:
836 ui.write(('Parent: %d\n' % cs.parents[0].id))
836 ui.write(('Parent: %d\n' % cs.parents[0].id))
837
837
838 if opts["ancestors"]:
838 if opts["ancestors"]:
839 b = cs.branch
839 b = cs.branch
840 r = []
840 r = []
841 while b:
841 while b:
842 b, c = ancestors[b]
842 b, c = ancestors[b]
843 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
843 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
844 if r:
844 if r:
845 ui.write(('Ancestors: %s\n' % (','.join(r))))
845 ui.write(('Ancestors: %s\n' % (','.join(r))))
846
846
847 ui.write(('Log:\n'))
847 ui.write(('Log:\n'))
848 ui.write('%s\n\n' % cs.comment)
848 ui.write('%s\n\n' % cs.comment)
849 ui.write(('Members: \n'))
849 ui.write(('Members: \n'))
850 for f in cs.entries:
850 for f in cs.entries:
851 fn = f.file
851 fn = f.file
852 if fn.startswith(opts["prefix"]):
852 if fn.startswith(opts["prefix"]):
853 fn = fn[len(opts["prefix"]):]
853 fn = fn[len(opts["prefix"]):]
854 ui.write('\t%s:%s->%s%s \n' % (
854 ui.write('\t%s:%s->%s%s \n' % (
855 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
855 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
856 '.'.join([str(x) for x in f.revision]),
856 '.'.join([str(x) for x in f.revision]),
857 ['', '(DEAD)'][f.dead]))
857 ['', '(DEAD)'][f.dead]))
858 ui.write('\n')
858 ui.write('\n')
859
859
860 # have we seen the start tag?
860 # have we seen the start tag?
861 if revisions and off:
861 if revisions and off:
862 if revisions[0] == str(cs.id) or \
862 if revisions[0] == str(cs.id) or \
863 revisions[0] in cs.tags:
863 revisions[0] in cs.tags:
864 off = False
864 off = False
865
865
866 # see if we reached the end tag
866 # see if we reached the end tag
867 if len(revisions) > 1 and not off:
867 if len(revisions) > 1 and not off:
868 if revisions[1] == str(cs.id) or \
868 if revisions[1] == str(cs.id) or \
869 revisions[1] in cs.tags:
869 revisions[1] in cs.tags:
870 break
870 break
General Comments 0
You need to be logged in to leave comments. Login now