##// END OF EJS Templates
convert: don't use mutable default argument value...
Pierre-Yves David -
r31408:6e3c79bc default
parent child Browse files
Show More
@@ -1,921 +1,921 b''
1 # Mercurial built-in replacement for cvsps.
1 # Mercurial built-in replacement for cvsps.
2 #
2 #
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import os
9 import os
10 import re
10 import re
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import (
13 from mercurial import (
14 encoding,
14 encoding,
15 hook,
15 hook,
16 pycompat,
16 pycompat,
17 util,
17 util,
18 )
18 )
19
19
20 pickle = util.pickle
20 pickle = util.pickle
21
21
22 class logentry(object):
22 class logentry(object):
23 '''Class logentry has the following attributes:
23 '''Class logentry has the following attributes:
24 .author - author name as CVS knows it
24 .author - author name as CVS knows it
25 .branch - name of branch this revision is on
25 .branch - name of branch this revision is on
26 .branches - revision tuple of branches starting at this revision
26 .branches - revision tuple of branches starting at this revision
27 .comment - commit message
27 .comment - commit message
28 .commitid - CVS commitid or None
28 .commitid - CVS commitid or None
29 .date - the commit date as a (time, tz) tuple
29 .date - the commit date as a (time, tz) tuple
30 .dead - true if file revision is dead
30 .dead - true if file revision is dead
31 .file - Name of file
31 .file - Name of file
32 .lines - a tuple (+lines, -lines) or None
32 .lines - a tuple (+lines, -lines) or None
33 .parent - Previous revision of this entry
33 .parent - Previous revision of this entry
34 .rcs - name of file as returned from CVS
34 .rcs - name of file as returned from CVS
35 .revision - revision number as tuple
35 .revision - revision number as tuple
36 .tags - list of tags on the file
36 .tags - list of tags on the file
37 .synthetic - is this a synthetic "file ... added on ..." revision?
37 .synthetic - is this a synthetic "file ... added on ..." revision?
38 .mergepoint - the branch that has been merged from (if present in
38 .mergepoint - the branch that has been merged from (if present in
39 rlog output) or None
39 rlog output) or None
40 .branchpoints - the branches that start at the current entry or empty
40 .branchpoints - the branches that start at the current entry or empty
41 '''
41 '''
42 def __init__(self, **entries):
42 def __init__(self, **entries):
43 self.synthetic = False
43 self.synthetic = False
44 self.__dict__.update(entries)
44 self.__dict__.update(entries)
45
45
46 def __repr__(self):
46 def __repr__(self):
47 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
47 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
48 return "%s(%s)"%(type(self).__name__, ", ".join(items))
48 return "%s(%s)"%(type(self).__name__, ", ".join(items))
49
49
50 class logerror(Exception):
50 class logerror(Exception):
51 pass
51 pass
52
52
53 def getrepopath(cvspath):
53 def getrepopath(cvspath):
54 """Return the repository path from a CVS path.
54 """Return the repository path from a CVS path.
55
55
56 >>> getrepopath('/foo/bar')
56 >>> getrepopath('/foo/bar')
57 '/foo/bar'
57 '/foo/bar'
58 >>> getrepopath('c:/foo/bar')
58 >>> getrepopath('c:/foo/bar')
59 '/foo/bar'
59 '/foo/bar'
60 >>> getrepopath(':pserver:10/foo/bar')
60 >>> getrepopath(':pserver:10/foo/bar')
61 '/foo/bar'
61 '/foo/bar'
62 >>> getrepopath(':pserver:10c:/foo/bar')
62 >>> getrepopath(':pserver:10c:/foo/bar')
63 '/foo/bar'
63 '/foo/bar'
64 >>> getrepopath(':pserver:/foo/bar')
64 >>> getrepopath(':pserver:/foo/bar')
65 '/foo/bar'
65 '/foo/bar'
66 >>> getrepopath(':pserver:c:/foo/bar')
66 >>> getrepopath(':pserver:c:/foo/bar')
67 '/foo/bar'
67 '/foo/bar'
68 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
68 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
69 '/foo/bar'
69 '/foo/bar'
70 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
70 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
71 '/foo/bar'
71 '/foo/bar'
72 >>> getrepopath('user@server/path/to/repository')
72 >>> getrepopath('user@server/path/to/repository')
73 '/path/to/repository'
73 '/path/to/repository'
74 """
74 """
75 # According to CVS manual, CVS paths are expressed like:
75 # According to CVS manual, CVS paths are expressed like:
76 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
76 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
77 #
77 #
78 # CVSpath is splitted into parts and then position of the first occurrence
78 # CVSpath is splitted into parts and then position of the first occurrence
79 # of the '/' char after the '@' is located. The solution is the rest of the
79 # of the '/' char after the '@' is located. The solution is the rest of the
80 # string after that '/' sign including it
80 # string after that '/' sign including it
81
81
82 parts = cvspath.split(':')
82 parts = cvspath.split(':')
83 atposition = parts[-1].find('@')
83 atposition = parts[-1].find('@')
84 start = 0
84 start = 0
85
85
86 if atposition != -1:
86 if atposition != -1:
87 start = atposition
87 start = atposition
88
88
89 repopath = parts[-1][parts[-1].find('/', start):]
89 repopath = parts[-1][parts[-1].find('/', start):]
90 return repopath
90 return repopath
91
91
92 def createlog(ui, directory=None, root="", rlog=True, cache=None):
92 def createlog(ui, directory=None, root="", rlog=True, cache=None):
93 '''Collect the CVS rlog'''
93 '''Collect the CVS rlog'''
94
94
95 # Because we store many duplicate commit log messages, reusing strings
95 # Because we store many duplicate commit log messages, reusing strings
96 # saves a lot of memory and pickle storage space.
96 # saves a lot of memory and pickle storage space.
97 _scache = {}
97 _scache = {}
98 def scache(s):
98 def scache(s):
99 "return a shared version of a string"
99 "return a shared version of a string"
100 return _scache.setdefault(s, s)
100 return _scache.setdefault(s, s)
101
101
102 ui.status(_('collecting CVS rlog\n'))
102 ui.status(_('collecting CVS rlog\n'))
103
103
104 log = [] # list of logentry objects containing the CVS state
104 log = [] # list of logentry objects containing the CVS state
105
105
106 # patterns to match in CVS (r)log output, by state of use
106 # patterns to match in CVS (r)log output, by state of use
107 re_00 = re.compile('RCS file: (.+)$')
107 re_00 = re.compile('RCS file: (.+)$')
108 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
108 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
109 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
109 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
110 re_03 = re.compile("(Cannot access.+CVSROOT)|"
110 re_03 = re.compile("(Cannot access.+CVSROOT)|"
111 "(can't create temporary directory.+)$")
111 "(can't create temporary directory.+)$")
112 re_10 = re.compile('Working file: (.+)$')
112 re_10 = re.compile('Working file: (.+)$')
113 re_20 = re.compile('symbolic names:')
113 re_20 = re.compile('symbolic names:')
114 re_30 = re.compile('\t(.+): ([\\d.]+)$')
114 re_30 = re.compile('\t(.+): ([\\d.]+)$')
115 re_31 = re.compile('----------------------------$')
115 re_31 = re.compile('----------------------------$')
116 re_32 = re.compile('======================================='
116 re_32 = re.compile('======================================='
117 '======================================$')
117 '======================================$')
118 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
118 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
119 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
119 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
120 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
120 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
121 r'(\s+commitid:\s+([^;]+);)?'
121 r'(\s+commitid:\s+([^;]+);)?'
122 r'(.*mergepoint:\s+([^;]+);)?')
122 r'(.*mergepoint:\s+([^;]+);)?')
123 re_70 = re.compile('branches: (.+);$')
123 re_70 = re.compile('branches: (.+);$')
124
124
125 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
125 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
126
126
127 prefix = '' # leading path to strip of what we get from CVS
127 prefix = '' # leading path to strip of what we get from CVS
128
128
129 if directory is None:
129 if directory is None:
130 # Current working directory
130 # Current working directory
131
131
132 # Get the real directory in the repository
132 # Get the real directory in the repository
133 try:
133 try:
134 prefix = open(os.path.join('CVS','Repository')).read().strip()
134 prefix = open(os.path.join('CVS','Repository')).read().strip()
135 directory = prefix
135 directory = prefix
136 if prefix == ".":
136 if prefix == ".":
137 prefix = ""
137 prefix = ""
138 except IOError:
138 except IOError:
139 raise logerror(_('not a CVS sandbox'))
139 raise logerror(_('not a CVS sandbox'))
140
140
141 if prefix and not prefix.endswith(pycompat.ossep):
141 if prefix and not prefix.endswith(pycompat.ossep):
142 prefix += pycompat.ossep
142 prefix += pycompat.ossep
143
143
144 # Use the Root file in the sandbox, if it exists
144 # Use the Root file in the sandbox, if it exists
145 try:
145 try:
146 root = open(os.path.join('CVS','Root')).read().strip()
146 root = open(os.path.join('CVS','Root')).read().strip()
147 except IOError:
147 except IOError:
148 pass
148 pass
149
149
150 if not root:
150 if not root:
151 root = encoding.environ.get('CVSROOT', '')
151 root = encoding.environ.get('CVSROOT', '')
152
152
153 # read log cache if one exists
153 # read log cache if one exists
154 oldlog = []
154 oldlog = []
155 date = None
155 date = None
156
156
157 if cache:
157 if cache:
158 cachedir = os.path.expanduser('~/.hg.cvsps')
158 cachedir = os.path.expanduser('~/.hg.cvsps')
159 if not os.path.exists(cachedir):
159 if not os.path.exists(cachedir):
160 os.mkdir(cachedir)
160 os.mkdir(cachedir)
161
161
162 # The cvsps cache pickle needs a uniquified name, based on the
162 # The cvsps cache pickle needs a uniquified name, based on the
163 # repository location. The address may have all sort of nasties
163 # repository location. The address may have all sort of nasties
164 # in it, slashes, colons and such. So here we take just the
164 # in it, slashes, colons and such. So here we take just the
165 # alphanumeric characters, concatenated in a way that does not
165 # alphanumeric characters, concatenated in a way that does not
166 # mix up the various components, so that
166 # mix up the various components, so that
167 # :pserver:user@server:/path
167 # :pserver:user@server:/path
168 # and
168 # and
169 # /pserver/user/server/path
169 # /pserver/user/server/path
170 # are mapped to different cache file names.
170 # are mapped to different cache file names.
171 cachefile = root.split(":") + [directory, "cache"]
171 cachefile = root.split(":") + [directory, "cache"]
172 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
172 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
173 cachefile = os.path.join(cachedir,
173 cachefile = os.path.join(cachedir,
174 '.'.join([s for s in cachefile if s]))
174 '.'.join([s for s in cachefile if s]))
175
175
176 if cache == 'update':
176 if cache == 'update':
177 try:
177 try:
178 ui.note(_('reading cvs log cache %s\n') % cachefile)
178 ui.note(_('reading cvs log cache %s\n') % cachefile)
179 oldlog = pickle.load(open(cachefile))
179 oldlog = pickle.load(open(cachefile))
180 for e in oldlog:
180 for e in oldlog:
181 if not (util.safehasattr(e, 'branchpoints') and
181 if not (util.safehasattr(e, 'branchpoints') and
182 util.safehasattr(e, 'commitid') and
182 util.safehasattr(e, 'commitid') and
183 util.safehasattr(e, 'mergepoint')):
183 util.safehasattr(e, 'mergepoint')):
184 ui.status(_('ignoring old cache\n'))
184 ui.status(_('ignoring old cache\n'))
185 oldlog = []
185 oldlog = []
186 break
186 break
187
187
188 ui.note(_('cache has %d log entries\n') % len(oldlog))
188 ui.note(_('cache has %d log entries\n') % len(oldlog))
189 except Exception as e:
189 except Exception as e:
190 ui.note(_('error reading cache: %r\n') % e)
190 ui.note(_('error reading cache: %r\n') % e)
191
191
192 if oldlog:
192 if oldlog:
193 date = oldlog[-1].date # last commit date as a (time,tz) tuple
193 date = oldlog[-1].date # last commit date as a (time,tz) tuple
194 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
194 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
195
195
196 # build the CVS commandline
196 # build the CVS commandline
197 cmd = ['cvs', '-q']
197 cmd = ['cvs', '-q']
198 if root:
198 if root:
199 cmd.append('-d%s' % root)
199 cmd.append('-d%s' % root)
200 p = util.normpath(getrepopath(root))
200 p = util.normpath(getrepopath(root))
201 if not p.endswith('/'):
201 if not p.endswith('/'):
202 p += '/'
202 p += '/'
203 if prefix:
203 if prefix:
204 # looks like normpath replaces "" by "."
204 # looks like normpath replaces "" by "."
205 prefix = p + util.normpath(prefix)
205 prefix = p + util.normpath(prefix)
206 else:
206 else:
207 prefix = p
207 prefix = p
208 cmd.append(['log', 'rlog'][rlog])
208 cmd.append(['log', 'rlog'][rlog])
209 if date:
209 if date:
210 # no space between option and date string
210 # no space between option and date string
211 cmd.append('-d>%s' % date)
211 cmd.append('-d>%s' % date)
212 cmd.append(directory)
212 cmd.append(directory)
213
213
214 # state machine begins here
214 # state machine begins here
215 tags = {} # dictionary of revisions on current file with their tags
215 tags = {} # dictionary of revisions on current file with their tags
216 branchmap = {} # mapping between branch names and revision numbers
216 branchmap = {} # mapping between branch names and revision numbers
217 rcsmap = {}
217 rcsmap = {}
218 state = 0
218 state = 0
219 store = False # set when a new record can be appended
219 store = False # set when a new record can be appended
220
220
221 cmd = [util.shellquote(arg) for arg in cmd]
221 cmd = [util.shellquote(arg) for arg in cmd]
222 ui.note(_("running %s\n") % (' '.join(cmd)))
222 ui.note(_("running %s\n") % (' '.join(cmd)))
223 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
223 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
224
224
225 pfp = util.popen(' '.join(cmd))
225 pfp = util.popen(' '.join(cmd))
226 peek = pfp.readline()
226 peek = pfp.readline()
227 while True:
227 while True:
228 line = peek
228 line = peek
229 if line == '':
229 if line == '':
230 break
230 break
231 peek = pfp.readline()
231 peek = pfp.readline()
232 if line.endswith('\n'):
232 if line.endswith('\n'):
233 line = line[:-1]
233 line = line[:-1]
234 #ui.debug('state=%d line=%r\n' % (state, line))
234 #ui.debug('state=%d line=%r\n' % (state, line))
235
235
236 if state == 0:
236 if state == 0:
237 # initial state, consume input until we see 'RCS file'
237 # initial state, consume input until we see 'RCS file'
238 match = re_00.match(line)
238 match = re_00.match(line)
239 if match:
239 if match:
240 rcs = match.group(1)
240 rcs = match.group(1)
241 tags = {}
241 tags = {}
242 if rlog:
242 if rlog:
243 filename = util.normpath(rcs[:-2])
243 filename = util.normpath(rcs[:-2])
244 if filename.startswith(prefix):
244 if filename.startswith(prefix):
245 filename = filename[len(prefix):]
245 filename = filename[len(prefix):]
246 if filename.startswith('/'):
246 if filename.startswith('/'):
247 filename = filename[1:]
247 filename = filename[1:]
248 if filename.startswith('Attic/'):
248 if filename.startswith('Attic/'):
249 filename = filename[6:]
249 filename = filename[6:]
250 else:
250 else:
251 filename = filename.replace('/Attic/', '/')
251 filename = filename.replace('/Attic/', '/')
252 state = 2
252 state = 2
253 continue
253 continue
254 state = 1
254 state = 1
255 continue
255 continue
256 match = re_01.match(line)
256 match = re_01.match(line)
257 if match:
257 if match:
258 raise logerror(match.group(1))
258 raise logerror(match.group(1))
259 match = re_02.match(line)
259 match = re_02.match(line)
260 if match:
260 if match:
261 raise logerror(match.group(2))
261 raise logerror(match.group(2))
262 if re_03.match(line):
262 if re_03.match(line):
263 raise logerror(line)
263 raise logerror(line)
264
264
265 elif state == 1:
265 elif state == 1:
266 # expect 'Working file' (only when using log instead of rlog)
266 # expect 'Working file' (only when using log instead of rlog)
267 match = re_10.match(line)
267 match = re_10.match(line)
268 assert match, _('RCS file must be followed by working file')
268 assert match, _('RCS file must be followed by working file')
269 filename = util.normpath(match.group(1))
269 filename = util.normpath(match.group(1))
270 state = 2
270 state = 2
271
271
272 elif state == 2:
272 elif state == 2:
273 # expect 'symbolic names'
273 # expect 'symbolic names'
274 if re_20.match(line):
274 if re_20.match(line):
275 branchmap = {}
275 branchmap = {}
276 state = 3
276 state = 3
277
277
278 elif state == 3:
278 elif state == 3:
279 # read the symbolic names and store as tags
279 # read the symbolic names and store as tags
280 match = re_30.match(line)
280 match = re_30.match(line)
281 if match:
281 if match:
282 rev = [int(x) for x in match.group(2).split('.')]
282 rev = [int(x) for x in match.group(2).split('.')]
283
283
284 # Convert magic branch number to an odd-numbered one
284 # Convert magic branch number to an odd-numbered one
285 revn = len(rev)
285 revn = len(rev)
286 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
286 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
287 rev = rev[:-2] + rev[-1:]
287 rev = rev[:-2] + rev[-1:]
288 rev = tuple(rev)
288 rev = tuple(rev)
289
289
290 if rev not in tags:
290 if rev not in tags:
291 tags[rev] = []
291 tags[rev] = []
292 tags[rev].append(match.group(1))
292 tags[rev].append(match.group(1))
293 branchmap[match.group(1)] = match.group(2)
293 branchmap[match.group(1)] = match.group(2)
294
294
295 elif re_31.match(line):
295 elif re_31.match(line):
296 state = 5
296 state = 5
297 elif re_32.match(line):
297 elif re_32.match(line):
298 state = 0
298 state = 0
299
299
300 elif state == 4:
300 elif state == 4:
301 # expecting '------' separator before first revision
301 # expecting '------' separator before first revision
302 if re_31.match(line):
302 if re_31.match(line):
303 state = 5
303 state = 5
304 else:
304 else:
305 assert not re_32.match(line), _('must have at least '
305 assert not re_32.match(line), _('must have at least '
306 'some revisions')
306 'some revisions')
307
307
308 elif state == 5:
308 elif state == 5:
309 # expecting revision number and possibly (ignored) lock indication
309 # expecting revision number and possibly (ignored) lock indication
310 # we create the logentry here from values stored in states 0 to 4,
310 # we create the logentry here from values stored in states 0 to 4,
311 # as this state is re-entered for subsequent revisions of a file.
311 # as this state is re-entered for subsequent revisions of a file.
312 match = re_50.match(line)
312 match = re_50.match(line)
313 assert match, _('expected revision number')
313 assert match, _('expected revision number')
314 e = logentry(rcs=scache(rcs),
314 e = logentry(rcs=scache(rcs),
315 file=scache(filename),
315 file=scache(filename),
316 revision=tuple([int(x) for x in
316 revision=tuple([int(x) for x in
317 match.group(1).split('.')]),
317 match.group(1).split('.')]),
318 branches=[],
318 branches=[],
319 parent=None,
319 parent=None,
320 commitid=None,
320 commitid=None,
321 mergepoint=None,
321 mergepoint=None,
322 branchpoints=set())
322 branchpoints=set())
323
323
324 state = 6
324 state = 6
325
325
326 elif state == 6:
326 elif state == 6:
327 # expecting date, author, state, lines changed
327 # expecting date, author, state, lines changed
328 match = re_60.match(line)
328 match = re_60.match(line)
329 assert match, _('revision must be followed by date line')
329 assert match, _('revision must be followed by date line')
330 d = match.group(1)
330 d = match.group(1)
331 if d[2] == '/':
331 if d[2] == '/':
332 # Y2K
332 # Y2K
333 d = '19' + d
333 d = '19' + d
334
334
335 if len(d.split()) != 3:
335 if len(d.split()) != 3:
336 # cvs log dates always in GMT
336 # cvs log dates always in GMT
337 d = d + ' UTC'
337 d = d + ' UTC'
338 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
338 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
339 '%Y/%m/%d %H:%M:%S',
339 '%Y/%m/%d %H:%M:%S',
340 '%Y-%m-%d %H:%M:%S'])
340 '%Y-%m-%d %H:%M:%S'])
341 e.author = scache(match.group(2))
341 e.author = scache(match.group(2))
342 e.dead = match.group(3).lower() == 'dead'
342 e.dead = match.group(3).lower() == 'dead'
343
343
344 if match.group(5):
344 if match.group(5):
345 if match.group(6):
345 if match.group(6):
346 e.lines = (int(match.group(5)), int(match.group(6)))
346 e.lines = (int(match.group(5)), int(match.group(6)))
347 else:
347 else:
348 e.lines = (int(match.group(5)), 0)
348 e.lines = (int(match.group(5)), 0)
349 elif match.group(6):
349 elif match.group(6):
350 e.lines = (0, int(match.group(6)))
350 e.lines = (0, int(match.group(6)))
351 else:
351 else:
352 e.lines = None
352 e.lines = None
353
353
354 if match.group(7): # cvs 1.12 commitid
354 if match.group(7): # cvs 1.12 commitid
355 e.commitid = match.group(8)
355 e.commitid = match.group(8)
356
356
357 if match.group(9): # cvsnt mergepoint
357 if match.group(9): # cvsnt mergepoint
358 myrev = match.group(10).split('.')
358 myrev = match.group(10).split('.')
359 if len(myrev) == 2: # head
359 if len(myrev) == 2: # head
360 e.mergepoint = 'HEAD'
360 e.mergepoint = 'HEAD'
361 else:
361 else:
362 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
362 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
363 branches = [b for b in branchmap if branchmap[b] == myrev]
363 branches = [b for b in branchmap if branchmap[b] == myrev]
364 assert len(branches) == 1, ('unknown branch: %s'
364 assert len(branches) == 1, ('unknown branch: %s'
365 % e.mergepoint)
365 % e.mergepoint)
366 e.mergepoint = branches[0]
366 e.mergepoint = branches[0]
367
367
368 e.comment = []
368 e.comment = []
369 state = 7
369 state = 7
370
370
371 elif state == 7:
371 elif state == 7:
372 # read the revision numbers of branches that start at this revision
372 # read the revision numbers of branches that start at this revision
373 # or store the commit log message otherwise
373 # or store the commit log message otherwise
374 m = re_70.match(line)
374 m = re_70.match(line)
375 if m:
375 if m:
376 e.branches = [tuple([int(y) for y in x.strip().split('.')])
376 e.branches = [tuple([int(y) for y in x.strip().split('.')])
377 for x in m.group(1).split(';')]
377 for x in m.group(1).split(';')]
378 state = 8
378 state = 8
379 elif re_31.match(line) and re_50.match(peek):
379 elif re_31.match(line) and re_50.match(peek):
380 state = 5
380 state = 5
381 store = True
381 store = True
382 elif re_32.match(line):
382 elif re_32.match(line):
383 state = 0
383 state = 0
384 store = True
384 store = True
385 else:
385 else:
386 e.comment.append(line)
386 e.comment.append(line)
387
387
388 elif state == 8:
388 elif state == 8:
389 # store commit log message
389 # store commit log message
390 if re_31.match(line):
390 if re_31.match(line):
391 cpeek = peek
391 cpeek = peek
392 if cpeek.endswith('\n'):
392 if cpeek.endswith('\n'):
393 cpeek = cpeek[:-1]
393 cpeek = cpeek[:-1]
394 if re_50.match(cpeek):
394 if re_50.match(cpeek):
395 state = 5
395 state = 5
396 store = True
396 store = True
397 else:
397 else:
398 e.comment.append(line)
398 e.comment.append(line)
399 elif re_32.match(line):
399 elif re_32.match(line):
400 state = 0
400 state = 0
401 store = True
401 store = True
402 else:
402 else:
403 e.comment.append(line)
403 e.comment.append(line)
404
404
405 # When a file is added on a branch B1, CVS creates a synthetic
405 # When a file is added on a branch B1, CVS creates a synthetic
406 # dead trunk revision 1.1 so that the branch has a root.
406 # dead trunk revision 1.1 so that the branch has a root.
407 # Likewise, if you merge such a file to a later branch B2 (one
407 # Likewise, if you merge such a file to a later branch B2 (one
408 # that already existed when the file was added on B1), CVS
408 # that already existed when the file was added on B1), CVS
409 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
409 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
410 # these revisions now, but mark them synthetic so
410 # these revisions now, but mark them synthetic so
411 # createchangeset() can take care of them.
411 # createchangeset() can take care of them.
412 if (store and
412 if (store and
413 e.dead and
413 e.dead and
414 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
414 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
415 len(e.comment) == 1 and
415 len(e.comment) == 1 and
416 file_added_re.match(e.comment[0])):
416 file_added_re.match(e.comment[0])):
417 ui.debug('found synthetic revision in %s: %r\n'
417 ui.debug('found synthetic revision in %s: %r\n'
418 % (e.rcs, e.comment[0]))
418 % (e.rcs, e.comment[0]))
419 e.synthetic = True
419 e.synthetic = True
420
420
421 if store:
421 if store:
422 # clean up the results and save in the log.
422 # clean up the results and save in the log.
423 store = False
423 store = False
424 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
424 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
425 e.comment = scache('\n'.join(e.comment))
425 e.comment = scache('\n'.join(e.comment))
426
426
427 revn = len(e.revision)
427 revn = len(e.revision)
428 if revn > 3 and (revn % 2) == 0:
428 if revn > 3 and (revn % 2) == 0:
429 e.branch = tags.get(e.revision[:-1], [None])[0]
429 e.branch = tags.get(e.revision[:-1], [None])[0]
430 else:
430 else:
431 e.branch = None
431 e.branch = None
432
432
433 # find the branches starting from this revision
433 # find the branches starting from this revision
434 branchpoints = set()
434 branchpoints = set()
435 for branch, revision in branchmap.iteritems():
435 for branch, revision in branchmap.iteritems():
436 revparts = tuple([int(i) for i in revision.split('.')])
436 revparts = tuple([int(i) for i in revision.split('.')])
437 if len(revparts) < 2: # bad tags
437 if len(revparts) < 2: # bad tags
438 continue
438 continue
439 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
439 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
440 # normal branch
440 # normal branch
441 if revparts[:-2] == e.revision:
441 if revparts[:-2] == e.revision:
442 branchpoints.add(branch)
442 branchpoints.add(branch)
443 elif revparts == (1, 1, 1): # vendor branch
443 elif revparts == (1, 1, 1): # vendor branch
444 if revparts in e.branches:
444 if revparts in e.branches:
445 branchpoints.add(branch)
445 branchpoints.add(branch)
446 e.branchpoints = branchpoints
446 e.branchpoints = branchpoints
447
447
448 log.append(e)
448 log.append(e)
449
449
450 rcsmap[e.rcs.replace('/Attic/', '/')] = e.rcs
450 rcsmap[e.rcs.replace('/Attic/', '/')] = e.rcs
451
451
452 if len(log) % 100 == 0:
452 if len(log) % 100 == 0:
453 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
453 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
454
454
455 log.sort(key=lambda x: (x.rcs, x.revision))
455 log.sort(key=lambda x: (x.rcs, x.revision))
456
456
457 # find parent revisions of individual files
457 # find parent revisions of individual files
458 versions = {}
458 versions = {}
459 for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)):
459 for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)):
460 rcs = e.rcs.replace('/Attic/', '/')
460 rcs = e.rcs.replace('/Attic/', '/')
461 if rcs in rcsmap:
461 if rcs in rcsmap:
462 e.rcs = rcsmap[rcs]
462 e.rcs = rcsmap[rcs]
463 branch = e.revision[:-1]
463 branch = e.revision[:-1]
464 versions[(e.rcs, branch)] = e.revision
464 versions[(e.rcs, branch)] = e.revision
465
465
466 for e in log:
466 for e in log:
467 branch = e.revision[:-1]
467 branch = e.revision[:-1]
468 p = versions.get((e.rcs, branch), None)
468 p = versions.get((e.rcs, branch), None)
469 if p is None:
469 if p is None:
470 p = e.revision[:-2]
470 p = e.revision[:-2]
471 e.parent = p
471 e.parent = p
472 versions[(e.rcs, branch)] = e.revision
472 versions[(e.rcs, branch)] = e.revision
473
473
474 # update the log cache
474 # update the log cache
475 if cache:
475 if cache:
476 if log:
476 if log:
477 # join up the old and new logs
477 # join up the old and new logs
478 log.sort(key=lambda x: x.date)
478 log.sort(key=lambda x: x.date)
479
479
480 if oldlog and oldlog[-1].date >= log[0].date:
480 if oldlog and oldlog[-1].date >= log[0].date:
481 raise logerror(_('log cache overlaps with new log entries,'
481 raise logerror(_('log cache overlaps with new log entries,'
482 ' re-run without cache.'))
482 ' re-run without cache.'))
483
483
484 log = oldlog + log
484 log = oldlog + log
485
485
486 # write the new cachefile
486 # write the new cachefile
487 ui.note(_('writing cvs log cache %s\n') % cachefile)
487 ui.note(_('writing cvs log cache %s\n') % cachefile)
488 pickle.dump(log, open(cachefile, 'w'))
488 pickle.dump(log, open(cachefile, 'w'))
489 else:
489 else:
490 log = oldlog
490 log = oldlog
491
491
492 ui.status(_('%d log entries\n') % len(log))
492 ui.status(_('%d log entries\n') % len(log))
493
493
494 hook.hook(ui, None, "cvslog", True, log=log)
494 hook.hook(ui, None, "cvslog", True, log=log)
495
495
496 return log
496 return log
497
497
498
498
499 class changeset(object):
499 class changeset(object):
500 '''Class changeset has the following attributes:
500 '''Class changeset has the following attributes:
501 .id - integer identifying this changeset (list index)
501 .id - integer identifying this changeset (list index)
502 .author - author name as CVS knows it
502 .author - author name as CVS knows it
503 .branch - name of branch this changeset is on, or None
503 .branch - name of branch this changeset is on, or None
504 .comment - commit message
504 .comment - commit message
505 .commitid - CVS commitid or None
505 .commitid - CVS commitid or None
506 .date - the commit date as a (time,tz) tuple
506 .date - the commit date as a (time,tz) tuple
507 .entries - list of logentry objects in this changeset
507 .entries - list of logentry objects in this changeset
508 .parents - list of one or two parent changesets
508 .parents - list of one or two parent changesets
509 .tags - list of tags on this changeset
509 .tags - list of tags on this changeset
510 .synthetic - from synthetic revision "file ... added on branch ..."
510 .synthetic - from synthetic revision "file ... added on branch ..."
511 .mergepoint- the branch that has been merged from or None
511 .mergepoint- the branch that has been merged from or None
512 .branchpoints- the branches that start at the current entry or empty
512 .branchpoints- the branches that start at the current entry or empty
513 '''
513 '''
514 def __init__(self, **entries):
514 def __init__(self, **entries):
515 self.id = None
515 self.id = None
516 self.synthetic = False
516 self.synthetic = False
517 self.__dict__.update(entries)
517 self.__dict__.update(entries)
518
518
519 def __repr__(self):
519 def __repr__(self):
520 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
520 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
521 return "%s(%s)"%(type(self).__name__, ", ".join(items))
521 return "%s(%s)"%(type(self).__name__, ", ".join(items))
522
522
523 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
523 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
524 '''Convert log into changesets.'''
524 '''Convert log into changesets.'''
525
525
526 ui.status(_('creating changesets\n'))
526 ui.status(_('creating changesets\n'))
527
527
528 # try to order commitids by date
528 # try to order commitids by date
529 mindate = {}
529 mindate = {}
530 for e in log:
530 for e in log:
531 if e.commitid:
531 if e.commitid:
532 mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
532 mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
533
533
534 # Merge changesets
534 # Merge changesets
535 log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
535 log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
536 x.author, x.branch, x.date, x.branchpoints))
536 x.author, x.branch, x.date, x.branchpoints))
537
537
538 changesets = []
538 changesets = []
539 files = set()
539 files = set()
540 c = None
540 c = None
541 for i, e in enumerate(log):
541 for i, e in enumerate(log):
542
542
543 # Check if log entry belongs to the current changeset or not.
543 # Check if log entry belongs to the current changeset or not.
544
544
545 # Since CVS is file-centric, two different file revisions with
545 # Since CVS is file-centric, two different file revisions with
546 # different branchpoints should be treated as belonging to two
546 # different branchpoints should be treated as belonging to two
547 # different changesets (and the ordering is important and not
547 # different changesets (and the ordering is important and not
548 # honoured by cvsps at this point).
548 # honoured by cvsps at this point).
549 #
549 #
550 # Consider the following case:
550 # Consider the following case:
551 # foo 1.1 branchpoints: [MYBRANCH]
551 # foo 1.1 branchpoints: [MYBRANCH]
552 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
552 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
553 #
553 #
554 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
554 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
555 # later version of foo may be in MYBRANCH2, so foo should be the
555 # later version of foo may be in MYBRANCH2, so foo should be the
556 # first changeset and bar the next and MYBRANCH and MYBRANCH2
556 # first changeset and bar the next and MYBRANCH and MYBRANCH2
557 # should both start off of the bar changeset. No provisions are
557 # should both start off of the bar changeset. No provisions are
558 # made to ensure that this is, in fact, what happens.
558 # made to ensure that this is, in fact, what happens.
559 if not (c and e.branchpoints == c.branchpoints and
559 if not (c and e.branchpoints == c.branchpoints and
560 (# cvs commitids
560 (# cvs commitids
561 (e.commitid is not None and e.commitid == c.commitid) or
561 (e.commitid is not None and e.commitid == c.commitid) or
562 (# no commitids, use fuzzy commit detection
562 (# no commitids, use fuzzy commit detection
563 (e.commitid is None or c.commitid is None) and
563 (e.commitid is None or c.commitid is None) and
564 e.comment == c.comment and
564 e.comment == c.comment and
565 e.author == c.author and
565 e.author == c.author and
566 e.branch == c.branch and
566 e.branch == c.branch and
567 ((c.date[0] + c.date[1]) <=
567 ((c.date[0] + c.date[1]) <=
568 (e.date[0] + e.date[1]) <=
568 (e.date[0] + e.date[1]) <=
569 (c.date[0] + c.date[1]) + fuzz) and
569 (c.date[0] + c.date[1]) + fuzz) and
570 e.file not in files))):
570 e.file not in files))):
571 c = changeset(comment=e.comment, author=e.author,
571 c = changeset(comment=e.comment, author=e.author,
572 branch=e.branch, date=e.date,
572 branch=e.branch, date=e.date,
573 entries=[], mergepoint=e.mergepoint,
573 entries=[], mergepoint=e.mergepoint,
574 branchpoints=e.branchpoints, commitid=e.commitid)
574 branchpoints=e.branchpoints, commitid=e.commitid)
575 changesets.append(c)
575 changesets.append(c)
576
576
577 files = set()
577 files = set()
578 if len(changesets) % 100 == 0:
578 if len(changesets) % 100 == 0:
579 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
579 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
580 ui.status(util.ellipsis(t, 80) + '\n')
580 ui.status(util.ellipsis(t, 80) + '\n')
581
581
582 c.entries.append(e)
582 c.entries.append(e)
583 files.add(e.file)
583 files.add(e.file)
584 c.date = e.date # changeset date is date of latest commit in it
584 c.date = e.date # changeset date is date of latest commit in it
585
585
586 # Mark synthetic changesets
586 # Mark synthetic changesets
587
587
588 for c in changesets:
588 for c in changesets:
589 # Synthetic revisions always get their own changeset, because
589 # Synthetic revisions always get their own changeset, because
590 # the log message includes the filename. E.g. if you add file3
590 # the log message includes the filename. E.g. if you add file3
591 # and file4 on a branch, you get four log entries and three
591 # and file4 on a branch, you get four log entries and three
592 # changesets:
592 # changesets:
593 # "File file3 was added on branch ..." (synthetic, 1 entry)
593 # "File file3 was added on branch ..." (synthetic, 1 entry)
594 # "File file4 was added on branch ..." (synthetic, 1 entry)
594 # "File file4 was added on branch ..." (synthetic, 1 entry)
595 # "Add file3 and file4 to fix ..." (real, 2 entries)
595 # "Add file3 and file4 to fix ..." (real, 2 entries)
596 # Hence the check for 1 entry here.
596 # Hence the check for 1 entry here.
597 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
597 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
598
598
599 # Sort files in each changeset
599 # Sort files in each changeset
600
600
601 def entitycompare(l, r):
601 def entitycompare(l, r):
602 'Mimic cvsps sorting order'
602 'Mimic cvsps sorting order'
603 l = l.file.split('/')
603 l = l.file.split('/')
604 r = r.file.split('/')
604 r = r.file.split('/')
605 nl = len(l)
605 nl = len(l)
606 nr = len(r)
606 nr = len(r)
607 n = min(nl, nr)
607 n = min(nl, nr)
608 for i in range(n):
608 for i in range(n):
609 if i + 1 == nl and nl < nr:
609 if i + 1 == nl and nl < nr:
610 return -1
610 return -1
611 elif i + 1 == nr and nl > nr:
611 elif i + 1 == nr and nl > nr:
612 return +1
612 return +1
613 elif l[i] < r[i]:
613 elif l[i] < r[i]:
614 return -1
614 return -1
615 elif l[i] > r[i]:
615 elif l[i] > r[i]:
616 return +1
616 return +1
617 return 0
617 return 0
618
618
619 for c in changesets:
619 for c in changesets:
620 c.entries.sort(entitycompare)
620 c.entries.sort(entitycompare)
621
621
622 # Sort changesets by date
622 # Sort changesets by date
623
623
624 odd = set()
624 odd = set()
625 def cscmp(l, r, odd=odd):
625 def cscmp(l, r):
626 d = sum(l.date) - sum(r.date)
626 d = sum(l.date) - sum(r.date)
627 if d:
627 if d:
628 return d
628 return d
629
629
630 # detect vendor branches and initial commits on a branch
630 # detect vendor branches and initial commits on a branch
631 le = {}
631 le = {}
632 for e in l.entries:
632 for e in l.entries:
633 le[e.rcs] = e.revision
633 le[e.rcs] = e.revision
634 re = {}
634 re = {}
635 for e in r.entries:
635 for e in r.entries:
636 re[e.rcs] = e.revision
636 re[e.rcs] = e.revision
637
637
638 d = 0
638 d = 0
639 for e in l.entries:
639 for e in l.entries:
640 if re.get(e.rcs, None) == e.parent:
640 if re.get(e.rcs, None) == e.parent:
641 assert not d
641 assert not d
642 d = 1
642 d = 1
643 break
643 break
644
644
645 for e in r.entries:
645 for e in r.entries:
646 if le.get(e.rcs, None) == e.parent:
646 if le.get(e.rcs, None) == e.parent:
647 if d:
647 if d:
648 odd.add((l, r))
648 odd.add((l, r))
649 d = -1
649 d = -1
650 break
650 break
651 # By this point, the changesets are sufficiently compared that
651 # By this point, the changesets are sufficiently compared that
652 # we don't really care about ordering. However, this leaves
652 # we don't really care about ordering. However, this leaves
653 # some race conditions in the tests, so we compare on the
653 # some race conditions in the tests, so we compare on the
654 # number of files modified, the files contained in each
654 # number of files modified, the files contained in each
655 # changeset, and the branchpoints in the change to ensure test
655 # changeset, and the branchpoints in the change to ensure test
656 # output remains stable.
656 # output remains stable.
657
657
658 # recommended replacement for cmp from
658 # recommended replacement for cmp from
659 # https://docs.python.org/3.0/whatsnew/3.0.html
659 # https://docs.python.org/3.0/whatsnew/3.0.html
660 c = lambda x, y: (x > y) - (x < y)
660 c = lambda x, y: (x > y) - (x < y)
661 # Sort bigger changes first.
661 # Sort bigger changes first.
662 if not d:
662 if not d:
663 d = c(len(l.entries), len(r.entries))
663 d = c(len(l.entries), len(r.entries))
664 # Try sorting by filename in the change.
664 # Try sorting by filename in the change.
665 if not d:
665 if not d:
666 d = c([e.file for e in l.entries], [e.file for e in r.entries])
666 d = c([e.file for e in l.entries], [e.file for e in r.entries])
667 # Try and put changes without a branch point before ones with
667 # Try and put changes without a branch point before ones with
668 # a branch point.
668 # a branch point.
669 if not d:
669 if not d:
670 d = c(len(l.branchpoints), len(r.branchpoints))
670 d = c(len(l.branchpoints), len(r.branchpoints))
671 return d
671 return d
672
672
673 changesets.sort(cscmp)
673 changesets.sort(cscmp)
674
674
675 # Collect tags
675 # Collect tags
676
676
677 globaltags = {}
677 globaltags = {}
678 for c in changesets:
678 for c in changesets:
679 for e in c.entries:
679 for e in c.entries:
680 for tag in e.tags:
680 for tag in e.tags:
681 # remember which is the latest changeset to have this tag
681 # remember which is the latest changeset to have this tag
682 globaltags[tag] = c
682 globaltags[tag] = c
683
683
684 for c in changesets:
684 for c in changesets:
685 tags = set()
685 tags = set()
686 for e in c.entries:
686 for e in c.entries:
687 tags.update(e.tags)
687 tags.update(e.tags)
688 # remember tags only if this is the latest changeset to have it
688 # remember tags only if this is the latest changeset to have it
689 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
689 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
690
690
691 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
691 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
692 # by inserting dummy changesets with two parents, and handle
692 # by inserting dummy changesets with two parents, and handle
693 # {{mergefrombranch BRANCHNAME}} by setting two parents.
693 # {{mergefrombranch BRANCHNAME}} by setting two parents.
694
694
695 if mergeto is None:
695 if mergeto is None:
696 mergeto = r'{{mergetobranch ([-\w]+)}}'
696 mergeto = r'{{mergetobranch ([-\w]+)}}'
697 if mergeto:
697 if mergeto:
698 mergeto = re.compile(mergeto)
698 mergeto = re.compile(mergeto)
699
699
700 if mergefrom is None:
700 if mergefrom is None:
701 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
701 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
702 if mergefrom:
702 if mergefrom:
703 mergefrom = re.compile(mergefrom)
703 mergefrom = re.compile(mergefrom)
704
704
705 versions = {} # changeset index where we saw any particular file version
705 versions = {} # changeset index where we saw any particular file version
706 branches = {} # changeset index where we saw a branch
706 branches = {} # changeset index where we saw a branch
707 n = len(changesets)
707 n = len(changesets)
708 i = 0
708 i = 0
709 while i < n:
709 while i < n:
710 c = changesets[i]
710 c = changesets[i]
711
711
712 for f in c.entries:
712 for f in c.entries:
713 versions[(f.rcs, f.revision)] = i
713 versions[(f.rcs, f.revision)] = i
714
714
715 p = None
715 p = None
716 if c.branch in branches:
716 if c.branch in branches:
717 p = branches[c.branch]
717 p = branches[c.branch]
718 else:
718 else:
719 # first changeset on a new branch
719 # first changeset on a new branch
720 # the parent is a changeset with the branch in its
720 # the parent is a changeset with the branch in its
721 # branchpoints such that it is the latest possible
721 # branchpoints such that it is the latest possible
722 # commit without any intervening, unrelated commits.
722 # commit without any intervening, unrelated commits.
723
723
724 for candidate in xrange(i):
724 for candidate in xrange(i):
725 if c.branch not in changesets[candidate].branchpoints:
725 if c.branch not in changesets[candidate].branchpoints:
726 if p is not None:
726 if p is not None:
727 break
727 break
728 continue
728 continue
729 p = candidate
729 p = candidate
730
730
731 c.parents = []
731 c.parents = []
732 if p is not None:
732 if p is not None:
733 p = changesets[p]
733 p = changesets[p]
734
734
735 # Ensure no changeset has a synthetic changeset as a parent.
735 # Ensure no changeset has a synthetic changeset as a parent.
736 while p.synthetic:
736 while p.synthetic:
737 assert len(p.parents) <= 1, \
737 assert len(p.parents) <= 1, \
738 _('synthetic changeset cannot have multiple parents')
738 _('synthetic changeset cannot have multiple parents')
739 if p.parents:
739 if p.parents:
740 p = p.parents[0]
740 p = p.parents[0]
741 else:
741 else:
742 p = None
742 p = None
743 break
743 break
744
744
745 if p is not None:
745 if p is not None:
746 c.parents.append(p)
746 c.parents.append(p)
747
747
748 if c.mergepoint:
748 if c.mergepoint:
749 if c.mergepoint == 'HEAD':
749 if c.mergepoint == 'HEAD':
750 c.mergepoint = None
750 c.mergepoint = None
751 c.parents.append(changesets[branches[c.mergepoint]])
751 c.parents.append(changesets[branches[c.mergepoint]])
752
752
753 if mergefrom:
753 if mergefrom:
754 m = mergefrom.search(c.comment)
754 m = mergefrom.search(c.comment)
755 if m:
755 if m:
756 m = m.group(1)
756 m = m.group(1)
757 if m == 'HEAD':
757 if m == 'HEAD':
758 m = None
758 m = None
759 try:
759 try:
760 candidate = changesets[branches[m]]
760 candidate = changesets[branches[m]]
761 except KeyError:
761 except KeyError:
762 ui.warn(_("warning: CVS commit message references "
762 ui.warn(_("warning: CVS commit message references "
763 "non-existent branch %r:\n%s\n")
763 "non-existent branch %r:\n%s\n")
764 % (m, c.comment))
764 % (m, c.comment))
765 if m in branches and c.branch != m and not candidate.synthetic:
765 if m in branches and c.branch != m and not candidate.synthetic:
766 c.parents.append(candidate)
766 c.parents.append(candidate)
767
767
768 if mergeto:
768 if mergeto:
769 m = mergeto.search(c.comment)
769 m = mergeto.search(c.comment)
770 if m:
770 if m:
771 if m.groups():
771 if m.groups():
772 m = m.group(1)
772 m = m.group(1)
773 if m == 'HEAD':
773 if m == 'HEAD':
774 m = None
774 m = None
775 else:
775 else:
776 m = None # if no group found then merge to HEAD
776 m = None # if no group found then merge to HEAD
777 if m in branches and c.branch != m:
777 if m in branches and c.branch != m:
778 # insert empty changeset for merge
778 # insert empty changeset for merge
779 cc = changeset(
779 cc = changeset(
780 author=c.author, branch=m, date=c.date,
780 author=c.author, branch=m, date=c.date,
781 comment='convert-repo: CVS merge from branch %s'
781 comment='convert-repo: CVS merge from branch %s'
782 % c.branch,
782 % c.branch,
783 entries=[], tags=[],
783 entries=[], tags=[],
784 parents=[changesets[branches[m]], c])
784 parents=[changesets[branches[m]], c])
785 changesets.insert(i + 1, cc)
785 changesets.insert(i + 1, cc)
786 branches[m] = i + 1
786 branches[m] = i + 1
787
787
788 # adjust our loop counters now we have inserted a new entry
788 # adjust our loop counters now we have inserted a new entry
789 n += 1
789 n += 1
790 i += 2
790 i += 2
791 continue
791 continue
792
792
793 branches[c.branch] = i
793 branches[c.branch] = i
794 i += 1
794 i += 1
795
795
796 # Drop synthetic changesets (safe now that we have ensured no other
796 # Drop synthetic changesets (safe now that we have ensured no other
797 # changesets can have them as parents).
797 # changesets can have them as parents).
798 i = 0
798 i = 0
799 while i < len(changesets):
799 while i < len(changesets):
800 if changesets[i].synthetic:
800 if changesets[i].synthetic:
801 del changesets[i]
801 del changesets[i]
802 else:
802 else:
803 i += 1
803 i += 1
804
804
805 # Number changesets
805 # Number changesets
806
806
807 for i, c in enumerate(changesets):
807 for i, c in enumerate(changesets):
808 c.id = i + 1
808 c.id = i + 1
809
809
810 if odd:
810 if odd:
811 for l, r in odd:
811 for l, r in odd:
812 if l.id is not None and r.id is not None:
812 if l.id is not None and r.id is not None:
813 ui.warn(_('changeset %d is both before and after %d\n')
813 ui.warn(_('changeset %d is both before and after %d\n')
814 % (l.id, r.id))
814 % (l.id, r.id))
815
815
816 ui.status(_('%d changeset entries\n') % len(changesets))
816 ui.status(_('%d changeset entries\n') % len(changesets))
817
817
818 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
818 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
819
819
820 return changesets
820 return changesets
821
821
822
822
823 def debugcvsps(ui, *args, **opts):
823 def debugcvsps(ui, *args, **opts):
824 '''Read CVS rlog for current directory or named path in
824 '''Read CVS rlog for current directory or named path in
825 repository, and convert the log to changesets based on matching
825 repository, and convert the log to changesets based on matching
826 commit log entries and dates.
826 commit log entries and dates.
827 '''
827 '''
828 if opts["new_cache"]:
828 if opts["new_cache"]:
829 cache = "write"
829 cache = "write"
830 elif opts["update_cache"]:
830 elif opts["update_cache"]:
831 cache = "update"
831 cache = "update"
832 else:
832 else:
833 cache = None
833 cache = None
834
834
835 revisions = opts["revisions"]
835 revisions = opts["revisions"]
836
836
837 try:
837 try:
838 if args:
838 if args:
839 log = []
839 log = []
840 for d in args:
840 for d in args:
841 log += createlog(ui, d, root=opts["root"], cache=cache)
841 log += createlog(ui, d, root=opts["root"], cache=cache)
842 else:
842 else:
843 log = createlog(ui, root=opts["root"], cache=cache)
843 log = createlog(ui, root=opts["root"], cache=cache)
844 except logerror as e:
844 except logerror as e:
845 ui.write("%r\n"%e)
845 ui.write("%r\n"%e)
846 return
846 return
847
847
848 changesets = createchangeset(ui, log, opts["fuzz"])
848 changesets = createchangeset(ui, log, opts["fuzz"])
849 del log
849 del log
850
850
851 # Print changesets (optionally filtered)
851 # Print changesets (optionally filtered)
852
852
853 off = len(revisions)
853 off = len(revisions)
854 branches = {} # latest version number in each branch
854 branches = {} # latest version number in each branch
855 ancestors = {} # parent branch
855 ancestors = {} # parent branch
856 for cs in changesets:
856 for cs in changesets:
857
857
858 if opts["ancestors"]:
858 if opts["ancestors"]:
859 if cs.branch not in branches and cs.parents and cs.parents[0].id:
859 if cs.branch not in branches and cs.parents and cs.parents[0].id:
860 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
860 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
861 cs.parents[0].id)
861 cs.parents[0].id)
862 branches[cs.branch] = cs.id
862 branches[cs.branch] = cs.id
863
863
864 # limit by branches
864 # limit by branches
865 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
865 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
866 continue
866 continue
867
867
868 if not off:
868 if not off:
869 # Note: trailing spaces on several lines here are needed to have
869 # Note: trailing spaces on several lines here are needed to have
870 # bug-for-bug compatibility with cvsps.
870 # bug-for-bug compatibility with cvsps.
871 ui.write('---------------------\n')
871 ui.write('---------------------\n')
872 ui.write(('PatchSet %d \n' % cs.id))
872 ui.write(('PatchSet %d \n' % cs.id))
873 ui.write(('Date: %s\n' % util.datestr(cs.date,
873 ui.write(('Date: %s\n' % util.datestr(cs.date,
874 '%Y/%m/%d %H:%M:%S %1%2')))
874 '%Y/%m/%d %H:%M:%S %1%2')))
875 ui.write(('Author: %s\n' % cs.author))
875 ui.write(('Author: %s\n' % cs.author))
876 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
876 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
877 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
877 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
878 ','.join(cs.tags) or '(none)')))
878 ','.join(cs.tags) or '(none)')))
879 if cs.branchpoints:
879 if cs.branchpoints:
880 ui.write(('Branchpoints: %s \n') %
880 ui.write(('Branchpoints: %s \n') %
881 ', '.join(sorted(cs.branchpoints)))
881 ', '.join(sorted(cs.branchpoints)))
882 if opts["parents"] and cs.parents:
882 if opts["parents"] and cs.parents:
883 if len(cs.parents) > 1:
883 if len(cs.parents) > 1:
884 ui.write(('Parents: %s\n' %
884 ui.write(('Parents: %s\n' %
885 (','.join([str(p.id) for p in cs.parents]))))
885 (','.join([str(p.id) for p in cs.parents]))))
886 else:
886 else:
887 ui.write(('Parent: %d\n' % cs.parents[0].id))
887 ui.write(('Parent: %d\n' % cs.parents[0].id))
888
888
889 if opts["ancestors"]:
889 if opts["ancestors"]:
890 b = cs.branch
890 b = cs.branch
891 r = []
891 r = []
892 while b:
892 while b:
893 b, c = ancestors[b]
893 b, c = ancestors[b]
894 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
894 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
895 if r:
895 if r:
896 ui.write(('Ancestors: %s\n' % (','.join(r))))
896 ui.write(('Ancestors: %s\n' % (','.join(r))))
897
897
898 ui.write(('Log:\n'))
898 ui.write(('Log:\n'))
899 ui.write('%s\n\n' % cs.comment)
899 ui.write('%s\n\n' % cs.comment)
900 ui.write(('Members: \n'))
900 ui.write(('Members: \n'))
901 for f in cs.entries:
901 for f in cs.entries:
902 fn = f.file
902 fn = f.file
903 if fn.startswith(opts["prefix"]):
903 if fn.startswith(opts["prefix"]):
904 fn = fn[len(opts["prefix"]):]
904 fn = fn[len(opts["prefix"]):]
905 ui.write('\t%s:%s->%s%s \n' % (
905 ui.write('\t%s:%s->%s%s \n' % (
906 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
906 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
907 '.'.join([str(x) for x in f.revision]),
907 '.'.join([str(x) for x in f.revision]),
908 ['', '(DEAD)'][f.dead]))
908 ['', '(DEAD)'][f.dead]))
909 ui.write('\n')
909 ui.write('\n')
910
910
911 # have we seen the start tag?
911 # have we seen the start tag?
912 if revisions and off:
912 if revisions and off:
913 if revisions[0] == str(cs.id) or \
913 if revisions[0] == str(cs.id) or \
914 revisions[0] in cs.tags:
914 revisions[0] in cs.tags:
915 off = False
915 off = False
916
916
917 # see if we reached the end tag
917 # see if we reached the end tag
918 if len(revisions) > 1 and not off:
918 if len(revisions) > 1 and not off:
919 if revisions[1] == str(cs.id) or \
919 if revisions[1] == str(cs.id) or \
920 revisions[1] in cs.tags:
920 revisions[1] in cs.tags:
921 break
921 break
General Comments 0
You need to be logged in to leave comments. Login now