##// END OF EJS Templates
cvsps: add two more tiebreakers in cscmp...
Augie Fackler -
r22267:90cf454e default
parent child Browse files
Show More
@@ -1,885 +1,897 b''
1 # Mercurial built-in replacement for cvsps.
1 # Mercurial built-in replacement for cvsps.
2 #
2 #
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 import re
9 import re
10 import cPickle as pickle
10 import cPickle as pickle
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 from mercurial import hook
12 from mercurial import hook
13 from mercurial import util
13 from mercurial import util
14
14
15 class logentry(object):
15 class logentry(object):
16 '''Class logentry has the following attributes:
16 '''Class logentry has the following attributes:
17 .author - author name as CVS knows it
17 .author - author name as CVS knows it
18 .branch - name of branch this revision is on
18 .branch - name of branch this revision is on
19 .branches - revision tuple of branches starting at this revision
19 .branches - revision tuple of branches starting at this revision
20 .comment - commit message
20 .comment - commit message
21 .commitid - CVS commitid or None
21 .commitid - CVS commitid or None
22 .date - the commit date as a (time, tz) tuple
22 .date - the commit date as a (time, tz) tuple
23 .dead - true if file revision is dead
23 .dead - true if file revision is dead
24 .file - Name of file
24 .file - Name of file
25 .lines - a tuple (+lines, -lines) or None
25 .lines - a tuple (+lines, -lines) or None
26 .parent - Previous revision of this entry
26 .parent - Previous revision of this entry
27 .rcs - name of file as returned from CVS
27 .rcs - name of file as returned from CVS
28 .revision - revision number as tuple
28 .revision - revision number as tuple
29 .tags - list of tags on the file
29 .tags - list of tags on the file
30 .synthetic - is this a synthetic "file ... added on ..." revision?
30 .synthetic - is this a synthetic "file ... added on ..." revision?
31 .mergepoint - the branch that has been merged from (if present in
31 .mergepoint - the branch that has been merged from (if present in
32 rlog output) or None
32 rlog output) or None
33 .branchpoints - the branches that start at the current entry or empty
33 .branchpoints - the branches that start at the current entry or empty
34 '''
34 '''
35 def __init__(self, **entries):
35 def __init__(self, **entries):
36 self.synthetic = False
36 self.synthetic = False
37 self.__dict__.update(entries)
37 self.__dict__.update(entries)
38
38
39 def __repr__(self):
39 def __repr__(self):
40 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
40 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
41 return "%s(%s)"%(type(self).__name__, ", ".join(items))
41 return "%s(%s)"%(type(self).__name__, ", ".join(items))
42
42
43 class logerror(Exception):
43 class logerror(Exception):
44 pass
44 pass
45
45
46 def getrepopath(cvspath):
46 def getrepopath(cvspath):
47 """Return the repository path from a CVS path.
47 """Return the repository path from a CVS path.
48
48
49 >>> getrepopath('/foo/bar')
49 >>> getrepopath('/foo/bar')
50 '/foo/bar'
50 '/foo/bar'
51 >>> getrepopath('c:/foo/bar')
51 >>> getrepopath('c:/foo/bar')
52 '/foo/bar'
52 '/foo/bar'
53 >>> getrepopath(':pserver:10/foo/bar')
53 >>> getrepopath(':pserver:10/foo/bar')
54 '/foo/bar'
54 '/foo/bar'
55 >>> getrepopath(':pserver:10c:/foo/bar')
55 >>> getrepopath(':pserver:10c:/foo/bar')
56 '/foo/bar'
56 '/foo/bar'
57 >>> getrepopath(':pserver:/foo/bar')
57 >>> getrepopath(':pserver:/foo/bar')
58 '/foo/bar'
58 '/foo/bar'
59 >>> getrepopath(':pserver:c:/foo/bar')
59 >>> getrepopath(':pserver:c:/foo/bar')
60 '/foo/bar'
60 '/foo/bar'
61 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
61 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
62 '/foo/bar'
62 '/foo/bar'
63 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
63 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
64 '/foo/bar'
64 '/foo/bar'
65 >>> getrepopath('user@server/path/to/repository')
65 >>> getrepopath('user@server/path/to/repository')
66 '/path/to/repository'
66 '/path/to/repository'
67 """
67 """
68 # According to CVS manual, CVS paths are expressed like:
68 # According to CVS manual, CVS paths are expressed like:
69 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
69 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
70 #
70 #
71 # CVSpath is splitted into parts and then position of the first occurrence
71 # CVSpath is splitted into parts and then position of the first occurrence
72 # of the '/' char after the '@' is located. The solution is the rest of the
72 # of the '/' char after the '@' is located. The solution is the rest of the
73 # string after that '/' sign including it
73 # string after that '/' sign including it
74
74
75 parts = cvspath.split(':')
75 parts = cvspath.split(':')
76 atposition = parts[-1].find('@')
76 atposition = parts[-1].find('@')
77 start = 0
77 start = 0
78
78
79 if atposition != -1:
79 if atposition != -1:
80 start = atposition
80 start = atposition
81
81
82 repopath = parts[-1][parts[-1].find('/', start):]
82 repopath = parts[-1][parts[-1].find('/', start):]
83 return repopath
83 return repopath
84
84
85 def createlog(ui, directory=None, root="", rlog=True, cache=None):
85 def createlog(ui, directory=None, root="", rlog=True, cache=None):
86 '''Collect the CVS rlog'''
86 '''Collect the CVS rlog'''
87
87
88 # Because we store many duplicate commit log messages, reusing strings
88 # Because we store many duplicate commit log messages, reusing strings
89 # saves a lot of memory and pickle storage space.
89 # saves a lot of memory and pickle storage space.
90 _scache = {}
90 _scache = {}
91 def scache(s):
91 def scache(s):
92 "return a shared version of a string"
92 "return a shared version of a string"
93 return _scache.setdefault(s, s)
93 return _scache.setdefault(s, s)
94
94
95 ui.status(_('collecting CVS rlog\n'))
95 ui.status(_('collecting CVS rlog\n'))
96
96
97 log = [] # list of logentry objects containing the CVS state
97 log = [] # list of logentry objects containing the CVS state
98
98
99 # patterns to match in CVS (r)log output, by state of use
99 # patterns to match in CVS (r)log output, by state of use
100 re_00 = re.compile('RCS file: (.+)$')
100 re_00 = re.compile('RCS file: (.+)$')
101 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
101 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
102 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
102 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
103 re_03 = re.compile("(Cannot access.+CVSROOT)|"
103 re_03 = re.compile("(Cannot access.+CVSROOT)|"
104 "(can't create temporary directory.+)$")
104 "(can't create temporary directory.+)$")
105 re_10 = re.compile('Working file: (.+)$')
105 re_10 = re.compile('Working file: (.+)$')
106 re_20 = re.compile('symbolic names:')
106 re_20 = re.compile('symbolic names:')
107 re_30 = re.compile('\t(.+): ([\\d.]+)$')
107 re_30 = re.compile('\t(.+): ([\\d.]+)$')
108 re_31 = re.compile('----------------------------$')
108 re_31 = re.compile('----------------------------$')
109 re_32 = re.compile('======================================='
109 re_32 = re.compile('======================================='
110 '======================================$')
110 '======================================$')
111 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
111 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
112 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
112 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
113 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
113 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
114 r'(\s+commitid:\s+([^;]+);)?'
114 r'(\s+commitid:\s+([^;]+);)?'
115 r'(.*mergepoint:\s+([^;]+);)?')
115 r'(.*mergepoint:\s+([^;]+);)?')
116 re_70 = re.compile('branches: (.+);$')
116 re_70 = re.compile('branches: (.+);$')
117
117
118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119
119
120 prefix = '' # leading path to strip of what we get from CVS
120 prefix = '' # leading path to strip of what we get from CVS
121
121
122 if directory is None:
122 if directory is None:
123 # Current working directory
123 # Current working directory
124
124
125 # Get the real directory in the repository
125 # Get the real directory in the repository
126 try:
126 try:
127 prefix = open(os.path.join('CVS','Repository')).read().strip()
127 prefix = open(os.path.join('CVS','Repository')).read().strip()
128 directory = prefix
128 directory = prefix
129 if prefix == ".":
129 if prefix == ".":
130 prefix = ""
130 prefix = ""
131 except IOError:
131 except IOError:
132 raise logerror(_('not a CVS sandbox'))
132 raise logerror(_('not a CVS sandbox'))
133
133
134 if prefix and not prefix.endswith(os.sep):
134 if prefix and not prefix.endswith(os.sep):
135 prefix += os.sep
135 prefix += os.sep
136
136
137 # Use the Root file in the sandbox, if it exists
137 # Use the Root file in the sandbox, if it exists
138 try:
138 try:
139 root = open(os.path.join('CVS','Root')).read().strip()
139 root = open(os.path.join('CVS','Root')).read().strip()
140 except IOError:
140 except IOError:
141 pass
141 pass
142
142
143 if not root:
143 if not root:
144 root = os.environ.get('CVSROOT', '')
144 root = os.environ.get('CVSROOT', '')
145
145
146 # read log cache if one exists
146 # read log cache if one exists
147 oldlog = []
147 oldlog = []
148 date = None
148 date = None
149
149
150 if cache:
150 if cache:
151 cachedir = os.path.expanduser('~/.hg.cvsps')
151 cachedir = os.path.expanduser('~/.hg.cvsps')
152 if not os.path.exists(cachedir):
152 if not os.path.exists(cachedir):
153 os.mkdir(cachedir)
153 os.mkdir(cachedir)
154
154
155 # The cvsps cache pickle needs a uniquified name, based on the
155 # The cvsps cache pickle needs a uniquified name, based on the
156 # repository location. The address may have all sort of nasties
156 # repository location. The address may have all sort of nasties
157 # in it, slashes, colons and such. So here we take just the
157 # in it, slashes, colons and such. So here we take just the
158 # alphanumeric characters, concatenated in a way that does not
158 # alphanumeric characters, concatenated in a way that does not
159 # mix up the various components, so that
159 # mix up the various components, so that
160 # :pserver:user@server:/path
160 # :pserver:user@server:/path
161 # and
161 # and
162 # /pserver/user/server/path
162 # /pserver/user/server/path
163 # are mapped to different cache file names.
163 # are mapped to different cache file names.
164 cachefile = root.split(":") + [directory, "cache"]
164 cachefile = root.split(":") + [directory, "cache"]
165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 cachefile = os.path.join(cachedir,
166 cachefile = os.path.join(cachedir,
167 '.'.join([s for s in cachefile if s]))
167 '.'.join([s for s in cachefile if s]))
168
168
169 if cache == 'update':
169 if cache == 'update':
170 try:
170 try:
171 ui.note(_('reading cvs log cache %s\n') % cachefile)
171 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 oldlog = pickle.load(open(cachefile))
172 oldlog = pickle.load(open(cachefile))
173 for e in oldlog:
173 for e in oldlog:
174 if not (util.safehasattr(e, 'branchpoints') and
174 if not (util.safehasattr(e, 'branchpoints') and
175 util.safehasattr(e, 'commitid') and
175 util.safehasattr(e, 'commitid') and
176 util.safehasattr(e, 'mergepoint')):
176 util.safehasattr(e, 'mergepoint')):
177 ui.status(_('ignoring old cache\n'))
177 ui.status(_('ignoring old cache\n'))
178 oldlog = []
178 oldlog = []
179 break
179 break
180
180
181 ui.note(_('cache has %d log entries\n') % len(oldlog))
181 ui.note(_('cache has %d log entries\n') % len(oldlog))
182 except Exception, e:
182 except Exception, e:
183 ui.note(_('error reading cache: %r\n') % e)
183 ui.note(_('error reading cache: %r\n') % e)
184
184
185 if oldlog:
185 if oldlog:
186 date = oldlog[-1].date # last commit date as a (time,tz) tuple
186 date = oldlog[-1].date # last commit date as a (time,tz) tuple
187 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
187 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
188
188
189 # build the CVS commandline
189 # build the CVS commandline
190 cmd = ['cvs', '-q']
190 cmd = ['cvs', '-q']
191 if root:
191 if root:
192 cmd.append('-d%s' % root)
192 cmd.append('-d%s' % root)
193 p = util.normpath(getrepopath(root))
193 p = util.normpath(getrepopath(root))
194 if not p.endswith('/'):
194 if not p.endswith('/'):
195 p += '/'
195 p += '/'
196 if prefix:
196 if prefix:
197 # looks like normpath replaces "" by "."
197 # looks like normpath replaces "" by "."
198 prefix = p + util.normpath(prefix)
198 prefix = p + util.normpath(prefix)
199 else:
199 else:
200 prefix = p
200 prefix = p
201 cmd.append(['log', 'rlog'][rlog])
201 cmd.append(['log', 'rlog'][rlog])
202 if date:
202 if date:
203 # no space between option and date string
203 # no space between option and date string
204 cmd.append('-d>%s' % date)
204 cmd.append('-d>%s' % date)
205 cmd.append(directory)
205 cmd.append(directory)
206
206
207 # state machine begins here
207 # state machine begins here
208 tags = {} # dictionary of revisions on current file with their tags
208 tags = {} # dictionary of revisions on current file with their tags
209 branchmap = {} # mapping between branch names and revision numbers
209 branchmap = {} # mapping between branch names and revision numbers
210 state = 0
210 state = 0
211 store = False # set when a new record can be appended
211 store = False # set when a new record can be appended
212
212
213 cmd = [util.shellquote(arg) for arg in cmd]
213 cmd = [util.shellquote(arg) for arg in cmd]
214 ui.note(_("running %s\n") % (' '.join(cmd)))
214 ui.note(_("running %s\n") % (' '.join(cmd)))
215 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
215 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
216
216
217 pfp = util.popen(' '.join(cmd))
217 pfp = util.popen(' '.join(cmd))
218 peek = pfp.readline()
218 peek = pfp.readline()
219 while True:
219 while True:
220 line = peek
220 line = peek
221 if line == '':
221 if line == '':
222 break
222 break
223 peek = pfp.readline()
223 peek = pfp.readline()
224 if line.endswith('\n'):
224 if line.endswith('\n'):
225 line = line[:-1]
225 line = line[:-1]
226 #ui.debug('state=%d line=%r\n' % (state, line))
226 #ui.debug('state=%d line=%r\n' % (state, line))
227
227
228 if state == 0:
228 if state == 0:
229 # initial state, consume input until we see 'RCS file'
229 # initial state, consume input until we see 'RCS file'
230 match = re_00.match(line)
230 match = re_00.match(line)
231 if match:
231 if match:
232 rcs = match.group(1)
232 rcs = match.group(1)
233 tags = {}
233 tags = {}
234 if rlog:
234 if rlog:
235 filename = util.normpath(rcs[:-2])
235 filename = util.normpath(rcs[:-2])
236 if filename.startswith(prefix):
236 if filename.startswith(prefix):
237 filename = filename[len(prefix):]
237 filename = filename[len(prefix):]
238 if filename.startswith('/'):
238 if filename.startswith('/'):
239 filename = filename[1:]
239 filename = filename[1:]
240 if filename.startswith('Attic/'):
240 if filename.startswith('Attic/'):
241 filename = filename[6:]
241 filename = filename[6:]
242 else:
242 else:
243 filename = filename.replace('/Attic/', '/')
243 filename = filename.replace('/Attic/', '/')
244 state = 2
244 state = 2
245 continue
245 continue
246 state = 1
246 state = 1
247 continue
247 continue
248 match = re_01.match(line)
248 match = re_01.match(line)
249 if match:
249 if match:
250 raise logerror(match.group(1))
250 raise logerror(match.group(1))
251 match = re_02.match(line)
251 match = re_02.match(line)
252 if match:
252 if match:
253 raise logerror(match.group(2))
253 raise logerror(match.group(2))
254 if re_03.match(line):
254 if re_03.match(line):
255 raise logerror(line)
255 raise logerror(line)
256
256
257 elif state == 1:
257 elif state == 1:
258 # expect 'Working file' (only when using log instead of rlog)
258 # expect 'Working file' (only when using log instead of rlog)
259 match = re_10.match(line)
259 match = re_10.match(line)
260 assert match, _('RCS file must be followed by working file')
260 assert match, _('RCS file must be followed by working file')
261 filename = util.normpath(match.group(1))
261 filename = util.normpath(match.group(1))
262 state = 2
262 state = 2
263
263
264 elif state == 2:
264 elif state == 2:
265 # expect 'symbolic names'
265 # expect 'symbolic names'
266 if re_20.match(line):
266 if re_20.match(line):
267 branchmap = {}
267 branchmap = {}
268 state = 3
268 state = 3
269
269
270 elif state == 3:
270 elif state == 3:
271 # read the symbolic names and store as tags
271 # read the symbolic names and store as tags
272 match = re_30.match(line)
272 match = re_30.match(line)
273 if match:
273 if match:
274 rev = [int(x) for x in match.group(2).split('.')]
274 rev = [int(x) for x in match.group(2).split('.')]
275
275
276 # Convert magic branch number to an odd-numbered one
276 # Convert magic branch number to an odd-numbered one
277 revn = len(rev)
277 revn = len(rev)
278 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
278 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
279 rev = rev[:-2] + rev[-1:]
279 rev = rev[:-2] + rev[-1:]
280 rev = tuple(rev)
280 rev = tuple(rev)
281
281
282 if rev not in tags:
282 if rev not in tags:
283 tags[rev] = []
283 tags[rev] = []
284 tags[rev].append(match.group(1))
284 tags[rev].append(match.group(1))
285 branchmap[match.group(1)] = match.group(2)
285 branchmap[match.group(1)] = match.group(2)
286
286
287 elif re_31.match(line):
287 elif re_31.match(line):
288 state = 5
288 state = 5
289 elif re_32.match(line):
289 elif re_32.match(line):
290 state = 0
290 state = 0
291
291
292 elif state == 4:
292 elif state == 4:
293 # expecting '------' separator before first revision
293 # expecting '------' separator before first revision
294 if re_31.match(line):
294 if re_31.match(line):
295 state = 5
295 state = 5
296 else:
296 else:
297 assert not re_32.match(line), _('must have at least '
297 assert not re_32.match(line), _('must have at least '
298 'some revisions')
298 'some revisions')
299
299
300 elif state == 5:
300 elif state == 5:
301 # expecting revision number and possibly (ignored) lock indication
301 # expecting revision number and possibly (ignored) lock indication
302 # we create the logentry here from values stored in states 0 to 4,
302 # we create the logentry here from values stored in states 0 to 4,
303 # as this state is re-entered for subsequent revisions of a file.
303 # as this state is re-entered for subsequent revisions of a file.
304 match = re_50.match(line)
304 match = re_50.match(line)
305 assert match, _('expected revision number')
305 assert match, _('expected revision number')
306 e = logentry(rcs=scache(rcs),
306 e = logentry(rcs=scache(rcs),
307 file=scache(filename),
307 file=scache(filename),
308 revision=tuple([int(x) for x in
308 revision=tuple([int(x) for x in
309 match.group(1).split('.')]),
309 match.group(1).split('.')]),
310 branches=[],
310 branches=[],
311 parent=None,
311 parent=None,
312 commitid=None,
312 commitid=None,
313 mergepoint=None,
313 mergepoint=None,
314 branchpoints=set())
314 branchpoints=set())
315
315
316 state = 6
316 state = 6
317
317
318 elif state == 6:
318 elif state == 6:
319 # expecting date, author, state, lines changed
319 # expecting date, author, state, lines changed
320 match = re_60.match(line)
320 match = re_60.match(line)
321 assert match, _('revision must be followed by date line')
321 assert match, _('revision must be followed by date line')
322 d = match.group(1)
322 d = match.group(1)
323 if d[2] == '/':
323 if d[2] == '/':
324 # Y2K
324 # Y2K
325 d = '19' + d
325 d = '19' + d
326
326
327 if len(d.split()) != 3:
327 if len(d.split()) != 3:
328 # cvs log dates always in GMT
328 # cvs log dates always in GMT
329 d = d + ' UTC'
329 d = d + ' UTC'
330 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
330 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
331 '%Y/%m/%d %H:%M:%S',
331 '%Y/%m/%d %H:%M:%S',
332 '%Y-%m-%d %H:%M:%S'])
332 '%Y-%m-%d %H:%M:%S'])
333 e.author = scache(match.group(2))
333 e.author = scache(match.group(2))
334 e.dead = match.group(3).lower() == 'dead'
334 e.dead = match.group(3).lower() == 'dead'
335
335
336 if match.group(5):
336 if match.group(5):
337 if match.group(6):
337 if match.group(6):
338 e.lines = (int(match.group(5)), int(match.group(6)))
338 e.lines = (int(match.group(5)), int(match.group(6)))
339 else:
339 else:
340 e.lines = (int(match.group(5)), 0)
340 e.lines = (int(match.group(5)), 0)
341 elif match.group(6):
341 elif match.group(6):
342 e.lines = (0, int(match.group(6)))
342 e.lines = (0, int(match.group(6)))
343 else:
343 else:
344 e.lines = None
344 e.lines = None
345
345
346 if match.group(7): # cvs 1.12 commitid
346 if match.group(7): # cvs 1.12 commitid
347 e.commitid = match.group(8)
347 e.commitid = match.group(8)
348
348
349 if match.group(9): # cvsnt mergepoint
349 if match.group(9): # cvsnt mergepoint
350 myrev = match.group(10).split('.')
350 myrev = match.group(10).split('.')
351 if len(myrev) == 2: # head
351 if len(myrev) == 2: # head
352 e.mergepoint = 'HEAD'
352 e.mergepoint = 'HEAD'
353 else:
353 else:
354 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
354 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
355 branches = [b for b in branchmap if branchmap[b] == myrev]
355 branches = [b for b in branchmap if branchmap[b] == myrev]
356 assert len(branches) == 1, ('unknown branch: %s'
356 assert len(branches) == 1, ('unknown branch: %s'
357 % e.mergepoint)
357 % e.mergepoint)
358 e.mergepoint = branches[0]
358 e.mergepoint = branches[0]
359
359
360 e.comment = []
360 e.comment = []
361 state = 7
361 state = 7
362
362
363 elif state == 7:
363 elif state == 7:
364 # read the revision numbers of branches that start at this revision
364 # read the revision numbers of branches that start at this revision
365 # or store the commit log message otherwise
365 # or store the commit log message otherwise
366 m = re_70.match(line)
366 m = re_70.match(line)
367 if m:
367 if m:
368 e.branches = [tuple([int(y) for y in x.strip().split('.')])
368 e.branches = [tuple([int(y) for y in x.strip().split('.')])
369 for x in m.group(1).split(';')]
369 for x in m.group(1).split(';')]
370 state = 8
370 state = 8
371 elif re_31.match(line) and re_50.match(peek):
371 elif re_31.match(line) and re_50.match(peek):
372 state = 5
372 state = 5
373 store = True
373 store = True
374 elif re_32.match(line):
374 elif re_32.match(line):
375 state = 0
375 state = 0
376 store = True
376 store = True
377 else:
377 else:
378 e.comment.append(line)
378 e.comment.append(line)
379
379
380 elif state == 8:
380 elif state == 8:
381 # store commit log message
381 # store commit log message
382 if re_31.match(line):
382 if re_31.match(line):
383 cpeek = peek
383 cpeek = peek
384 if cpeek.endswith('\n'):
384 if cpeek.endswith('\n'):
385 cpeek = cpeek[:-1]
385 cpeek = cpeek[:-1]
386 if re_50.match(cpeek):
386 if re_50.match(cpeek):
387 state = 5
387 state = 5
388 store = True
388 store = True
389 else:
389 else:
390 e.comment.append(line)
390 e.comment.append(line)
391 elif re_32.match(line):
391 elif re_32.match(line):
392 state = 0
392 state = 0
393 store = True
393 store = True
394 else:
394 else:
395 e.comment.append(line)
395 e.comment.append(line)
396
396
397 # When a file is added on a branch B1, CVS creates a synthetic
397 # When a file is added on a branch B1, CVS creates a synthetic
398 # dead trunk revision 1.1 so that the branch has a root.
398 # dead trunk revision 1.1 so that the branch has a root.
399 # Likewise, if you merge such a file to a later branch B2 (one
399 # Likewise, if you merge such a file to a later branch B2 (one
400 # that already existed when the file was added on B1), CVS
400 # that already existed when the file was added on B1), CVS
401 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
401 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
402 # these revisions now, but mark them synthetic so
402 # these revisions now, but mark them synthetic so
403 # createchangeset() can take care of them.
403 # createchangeset() can take care of them.
404 if (store and
404 if (store and
405 e.dead and
405 e.dead and
406 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
406 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
407 len(e.comment) == 1 and
407 len(e.comment) == 1 and
408 file_added_re.match(e.comment[0])):
408 file_added_re.match(e.comment[0])):
409 ui.debug('found synthetic revision in %s: %r\n'
409 ui.debug('found synthetic revision in %s: %r\n'
410 % (e.rcs, e.comment[0]))
410 % (e.rcs, e.comment[0]))
411 e.synthetic = True
411 e.synthetic = True
412
412
413 if store:
413 if store:
414 # clean up the results and save in the log.
414 # clean up the results and save in the log.
415 store = False
415 store = False
416 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
416 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
417 e.comment = scache('\n'.join(e.comment))
417 e.comment = scache('\n'.join(e.comment))
418
418
419 revn = len(e.revision)
419 revn = len(e.revision)
420 if revn > 3 and (revn % 2) == 0:
420 if revn > 3 and (revn % 2) == 0:
421 e.branch = tags.get(e.revision[:-1], [None])[0]
421 e.branch = tags.get(e.revision[:-1], [None])[0]
422 else:
422 else:
423 e.branch = None
423 e.branch = None
424
424
425 # find the branches starting from this revision
425 # find the branches starting from this revision
426 branchpoints = set()
426 branchpoints = set()
427 for branch, revision in branchmap.iteritems():
427 for branch, revision in branchmap.iteritems():
428 revparts = tuple([int(i) for i in revision.split('.')])
428 revparts = tuple([int(i) for i in revision.split('.')])
429 if len(revparts) < 2: # bad tags
429 if len(revparts) < 2: # bad tags
430 continue
430 continue
431 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
431 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
432 # normal branch
432 # normal branch
433 if revparts[:-2] == e.revision:
433 if revparts[:-2] == e.revision:
434 branchpoints.add(branch)
434 branchpoints.add(branch)
435 elif revparts == (1, 1, 1): # vendor branch
435 elif revparts == (1, 1, 1): # vendor branch
436 if revparts in e.branches:
436 if revparts in e.branches:
437 branchpoints.add(branch)
437 branchpoints.add(branch)
438 e.branchpoints = branchpoints
438 e.branchpoints = branchpoints
439
439
440 log.append(e)
440 log.append(e)
441
441
442 if len(log) % 100 == 0:
442 if len(log) % 100 == 0:
443 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
443 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
444
444
445 log.sort(key=lambda x: (x.rcs, x.revision))
445 log.sort(key=lambda x: (x.rcs, x.revision))
446
446
447 # find parent revisions of individual files
447 # find parent revisions of individual files
448 versions = {}
448 versions = {}
449 for e in log:
449 for e in log:
450 branch = e.revision[:-1]
450 branch = e.revision[:-1]
451 p = versions.get((e.rcs, branch), None)
451 p = versions.get((e.rcs, branch), None)
452 if p is None:
452 if p is None:
453 p = e.revision[:-2]
453 p = e.revision[:-2]
454 e.parent = p
454 e.parent = p
455 versions[(e.rcs, branch)] = e.revision
455 versions[(e.rcs, branch)] = e.revision
456
456
457 # update the log cache
457 # update the log cache
458 if cache:
458 if cache:
459 if log:
459 if log:
460 # join up the old and new logs
460 # join up the old and new logs
461 log.sort(key=lambda x: x.date)
461 log.sort(key=lambda x: x.date)
462
462
463 if oldlog and oldlog[-1].date >= log[0].date:
463 if oldlog and oldlog[-1].date >= log[0].date:
464 raise logerror(_('log cache overlaps with new log entries,'
464 raise logerror(_('log cache overlaps with new log entries,'
465 ' re-run without cache.'))
465 ' re-run without cache.'))
466
466
467 log = oldlog + log
467 log = oldlog + log
468
468
469 # write the new cachefile
469 # write the new cachefile
470 ui.note(_('writing cvs log cache %s\n') % cachefile)
470 ui.note(_('writing cvs log cache %s\n') % cachefile)
471 pickle.dump(log, open(cachefile, 'w'))
471 pickle.dump(log, open(cachefile, 'w'))
472 else:
472 else:
473 log = oldlog
473 log = oldlog
474
474
475 ui.status(_('%d log entries\n') % len(log))
475 ui.status(_('%d log entries\n') % len(log))
476
476
477 hook.hook(ui, None, "cvslog", True, log=log)
477 hook.hook(ui, None, "cvslog", True, log=log)
478
478
479 return log
479 return log
480
480
481
481
482 class changeset(object):
482 class changeset(object):
483 '''Class changeset has the following attributes:
483 '''Class changeset has the following attributes:
484 .id - integer identifying this changeset (list index)
484 .id - integer identifying this changeset (list index)
485 .author - author name as CVS knows it
485 .author - author name as CVS knows it
486 .branch - name of branch this changeset is on, or None
486 .branch - name of branch this changeset is on, or None
487 .comment - commit message
487 .comment - commit message
488 .commitid - CVS commitid or None
488 .commitid - CVS commitid or None
489 .date - the commit date as a (time,tz) tuple
489 .date - the commit date as a (time,tz) tuple
490 .entries - list of logentry objects in this changeset
490 .entries - list of logentry objects in this changeset
491 .parents - list of one or two parent changesets
491 .parents - list of one or two parent changesets
492 .tags - list of tags on this changeset
492 .tags - list of tags on this changeset
493 .synthetic - from synthetic revision "file ... added on branch ..."
493 .synthetic - from synthetic revision "file ... added on branch ..."
494 .mergepoint- the branch that has been merged from or None
494 .mergepoint- the branch that has been merged from or None
495 .branchpoints- the branches that start at the current entry or empty
495 .branchpoints- the branches that start at the current entry or empty
496 '''
496 '''
497 def __init__(self, **entries):
497 def __init__(self, **entries):
498 self.id = None
498 self.id = None
499 self.synthetic = False
499 self.synthetic = False
500 self.__dict__.update(entries)
500 self.__dict__.update(entries)
501
501
502 def __repr__(self):
502 def __repr__(self):
503 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
503 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
504 return "%s(%s)"%(type(self).__name__, ", ".join(items))
504 return "%s(%s)"%(type(self).__name__, ", ".join(items))
505
505
506 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
506 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
507 '''Convert log into changesets.'''
507 '''Convert log into changesets.'''
508
508
509 ui.status(_('creating changesets\n'))
509 ui.status(_('creating changesets\n'))
510
510
511 # try to order commitids by date
511 # try to order commitids by date
512 mindate = {}
512 mindate = {}
513 for e in log:
513 for e in log:
514 if e.commitid:
514 if e.commitid:
515 mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
515 mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
516
516
517 # Merge changesets
517 # Merge changesets
518 log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
518 log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
519 x.author, x.branch, x.date, x.branchpoints))
519 x.author, x.branch, x.date, x.branchpoints))
520
520
521 changesets = []
521 changesets = []
522 files = set()
522 files = set()
523 c = None
523 c = None
524 for i, e in enumerate(log):
524 for i, e in enumerate(log):
525
525
526 # Check if log entry belongs to the current changeset or not.
526 # Check if log entry belongs to the current changeset or not.
527
527
528 # Since CVS is file-centric, two different file revisions with
528 # Since CVS is file-centric, two different file revisions with
529 # different branchpoints should be treated as belonging to two
529 # different branchpoints should be treated as belonging to two
530 # different changesets (and the ordering is important and not
530 # different changesets (and the ordering is important and not
531 # honoured by cvsps at this point).
531 # honoured by cvsps at this point).
532 #
532 #
533 # Consider the following case:
533 # Consider the following case:
534 # foo 1.1 branchpoints: [MYBRANCH]
534 # foo 1.1 branchpoints: [MYBRANCH]
535 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
535 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
536 #
536 #
537 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
537 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
538 # later version of foo may be in MYBRANCH2, so foo should be the
538 # later version of foo may be in MYBRANCH2, so foo should be the
539 # first changeset and bar the next and MYBRANCH and MYBRANCH2
539 # first changeset and bar the next and MYBRANCH and MYBRANCH2
540 # should both start off of the bar changeset. No provisions are
540 # should both start off of the bar changeset. No provisions are
541 # made to ensure that this is, in fact, what happens.
541 # made to ensure that this is, in fact, what happens.
542 if not (c and e.branchpoints == c.branchpoints and
542 if not (c and e.branchpoints == c.branchpoints and
543 (# cvs commitids
543 (# cvs commitids
544 (e.commitid is not None and e.commitid == c.commitid) or
544 (e.commitid is not None and e.commitid == c.commitid) or
545 (# no commitids, use fuzzy commit detection
545 (# no commitids, use fuzzy commit detection
546 (e.commitid is None or c.commitid is None) and
546 (e.commitid is None or c.commitid is None) and
547 e.comment == c.comment and
547 e.comment == c.comment and
548 e.author == c.author and
548 e.author == c.author and
549 e.branch == c.branch and
549 e.branch == c.branch and
550 ((c.date[0] + c.date[1]) <=
550 ((c.date[0] + c.date[1]) <=
551 (e.date[0] + e.date[1]) <=
551 (e.date[0] + e.date[1]) <=
552 (c.date[0] + c.date[1]) + fuzz) and
552 (c.date[0] + c.date[1]) + fuzz) and
553 e.file not in files))):
553 e.file not in files))):
554 c = changeset(comment=e.comment, author=e.author,
554 c = changeset(comment=e.comment, author=e.author,
555 branch=e.branch, date=e.date,
555 branch=e.branch, date=e.date,
556 entries=[], mergepoint=e.mergepoint,
556 entries=[], mergepoint=e.mergepoint,
557 branchpoints=e.branchpoints, commitid=e.commitid)
557 branchpoints=e.branchpoints, commitid=e.commitid)
558 changesets.append(c)
558 changesets.append(c)
559
559
560 files = set()
560 files = set()
561 if len(changesets) % 100 == 0:
561 if len(changesets) % 100 == 0:
562 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
562 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
563 ui.status(util.ellipsis(t, 80) + '\n')
563 ui.status(util.ellipsis(t, 80) + '\n')
564
564
565 c.entries.append(e)
565 c.entries.append(e)
566 files.add(e.file)
566 files.add(e.file)
567 c.date = e.date # changeset date is date of latest commit in it
567 c.date = e.date # changeset date is date of latest commit in it
568
568
569 # Mark synthetic changesets
569 # Mark synthetic changesets
570
570
571 for c in changesets:
571 for c in changesets:
572 # Synthetic revisions always get their own changeset, because
572 # Synthetic revisions always get their own changeset, because
573 # the log message includes the filename. E.g. if you add file3
573 # the log message includes the filename. E.g. if you add file3
574 # and file4 on a branch, you get four log entries and three
574 # and file4 on a branch, you get four log entries and three
575 # changesets:
575 # changesets:
576 # "File file3 was added on branch ..." (synthetic, 1 entry)
576 # "File file3 was added on branch ..." (synthetic, 1 entry)
577 # "File file4 was added on branch ..." (synthetic, 1 entry)
577 # "File file4 was added on branch ..." (synthetic, 1 entry)
578 # "Add file3 and file4 to fix ..." (real, 2 entries)
578 # "Add file3 and file4 to fix ..." (real, 2 entries)
579 # Hence the check for 1 entry here.
579 # Hence the check for 1 entry here.
580 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
580 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
581
581
582 # Sort files in each changeset
582 # Sort files in each changeset
583
583
584 def entitycompare(l, r):
584 def entitycompare(l, r):
585 'Mimic cvsps sorting order'
585 'Mimic cvsps sorting order'
586 l = l.file.split('/')
586 l = l.file.split('/')
587 r = r.file.split('/')
587 r = r.file.split('/')
588 nl = len(l)
588 nl = len(l)
589 nr = len(r)
589 nr = len(r)
590 n = min(nl, nr)
590 n = min(nl, nr)
591 for i in range(n):
591 for i in range(n):
592 if i + 1 == nl and nl < nr:
592 if i + 1 == nl and nl < nr:
593 return -1
593 return -1
594 elif i + 1 == nr and nl > nr:
594 elif i + 1 == nr and nl > nr:
595 return +1
595 return +1
596 elif l[i] < r[i]:
596 elif l[i] < r[i]:
597 return -1
597 return -1
598 elif l[i] > r[i]:
598 elif l[i] > r[i]:
599 return +1
599 return +1
600 return 0
600 return 0
601
601
602 for c in changesets:
602 for c in changesets:
603 c.entries.sort(entitycompare)
603 c.entries.sort(entitycompare)
604
604
605 # Sort changesets by date
605 # Sort changesets by date
606
606
607 odd = set()
607 odd = set()
608 def cscmp(l, r, odd=odd):
608 def cscmp(l, r, odd=odd):
609 d = sum(l.date) - sum(r.date)
609 d = sum(l.date) - sum(r.date)
610 if d:
610 if d:
611 return d
611 return d
612
612
613 # detect vendor branches and initial commits on a branch
613 # detect vendor branches and initial commits on a branch
614 le = {}
614 le = {}
615 for e in l.entries:
615 for e in l.entries:
616 le[e.rcs] = e.revision
616 le[e.rcs] = e.revision
617 re = {}
617 re = {}
618 for e in r.entries:
618 for e in r.entries:
619 re[e.rcs] = e.revision
619 re[e.rcs] = e.revision
620
620
621 d = 0
621 d = 0
622 for e in l.entries:
622 for e in l.entries:
623 if re.get(e.rcs, None) == e.parent:
623 if re.get(e.rcs, None) == e.parent:
624 assert not d
624 assert not d
625 d = 1
625 d = 1
626 break
626 break
627
627
628 for e in r.entries:
628 for e in r.entries:
629 if le.get(e.rcs, None) == e.parent:
629 if le.get(e.rcs, None) == e.parent:
630 if d:
630 if d:
631 odd.add((l, r))
631 odd.add((l, r))
632 d = -1
632 d = -1
633 break
633 break
634 # By this point, the changesets are sufficiently compared that
635 # we don't really care about ordering. However, this leaves
636 # some race conditions in the tests, so we compare on the
637 # number of files modified and the number of branchpoints in
638 # each changeset to ensure test output remains stable.
634
639
640 # recommended replacement for cmp from
641 # https://docs.python.org/3.0/whatsnew/3.0.html
642 c = lambda x, y: (x > y) - (x < y)
643 if not d:
644 d = c(len(l.entries), len(r.entries))
645 if not d:
646 d = c(len(l.branchpoints), len(r.branchpoints))
635 return d
647 return d
636
648
637 changesets.sort(cscmp)
649 changesets.sort(cscmp)
638
650
639 # Collect tags
651 # Collect tags
640
652
641 globaltags = {}
653 globaltags = {}
642 for c in changesets:
654 for c in changesets:
643 for e in c.entries:
655 for e in c.entries:
644 for tag in e.tags:
656 for tag in e.tags:
645 # remember which is the latest changeset to have this tag
657 # remember which is the latest changeset to have this tag
646 globaltags[tag] = c
658 globaltags[tag] = c
647
659
648 for c in changesets:
660 for c in changesets:
649 tags = set()
661 tags = set()
650 for e in c.entries:
662 for e in c.entries:
651 tags.update(e.tags)
663 tags.update(e.tags)
652 # remember tags only if this is the latest changeset to have it
664 # remember tags only if this is the latest changeset to have it
653 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
665 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
654
666
655 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
667 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
656 # by inserting dummy changesets with two parents, and handle
668 # by inserting dummy changesets with two parents, and handle
657 # {{mergefrombranch BRANCHNAME}} by setting two parents.
669 # {{mergefrombranch BRANCHNAME}} by setting two parents.
658
670
659 if mergeto is None:
671 if mergeto is None:
660 mergeto = r'{{mergetobranch ([-\w]+)}}'
672 mergeto = r'{{mergetobranch ([-\w]+)}}'
661 if mergeto:
673 if mergeto:
662 mergeto = re.compile(mergeto)
674 mergeto = re.compile(mergeto)
663
675
664 if mergefrom is None:
676 if mergefrom is None:
665 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
677 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
666 if mergefrom:
678 if mergefrom:
667 mergefrom = re.compile(mergefrom)
679 mergefrom = re.compile(mergefrom)
668
680
669 versions = {} # changeset index where we saw any particular file version
681 versions = {} # changeset index where we saw any particular file version
670 branches = {} # changeset index where we saw a branch
682 branches = {} # changeset index where we saw a branch
671 n = len(changesets)
683 n = len(changesets)
672 i = 0
684 i = 0
673 while i < n:
685 while i < n:
674 c = changesets[i]
686 c = changesets[i]
675
687
676 for f in c.entries:
688 for f in c.entries:
677 versions[(f.rcs, f.revision)] = i
689 versions[(f.rcs, f.revision)] = i
678
690
679 p = None
691 p = None
680 if c.branch in branches:
692 if c.branch in branches:
681 p = branches[c.branch]
693 p = branches[c.branch]
682 else:
694 else:
683 # first changeset on a new branch
695 # first changeset on a new branch
684 # the parent is a changeset with the branch in its
696 # the parent is a changeset with the branch in its
685 # branchpoints such that it is the latest possible
697 # branchpoints such that it is the latest possible
686 # commit without any intervening, unrelated commits.
698 # commit without any intervening, unrelated commits.
687
699
688 for candidate in xrange(i):
700 for candidate in xrange(i):
689 if c.branch not in changesets[candidate].branchpoints:
701 if c.branch not in changesets[candidate].branchpoints:
690 if p is not None:
702 if p is not None:
691 break
703 break
692 continue
704 continue
693 p = candidate
705 p = candidate
694
706
695 c.parents = []
707 c.parents = []
696 if p is not None:
708 if p is not None:
697 p = changesets[p]
709 p = changesets[p]
698
710
699 # Ensure no changeset has a synthetic changeset as a parent.
711 # Ensure no changeset has a synthetic changeset as a parent.
700 while p.synthetic:
712 while p.synthetic:
701 assert len(p.parents) <= 1, \
713 assert len(p.parents) <= 1, \
702 _('synthetic changeset cannot have multiple parents')
714 _('synthetic changeset cannot have multiple parents')
703 if p.parents:
715 if p.parents:
704 p = p.parents[0]
716 p = p.parents[0]
705 else:
717 else:
706 p = None
718 p = None
707 break
719 break
708
720
709 if p is not None:
721 if p is not None:
710 c.parents.append(p)
722 c.parents.append(p)
711
723
712 if c.mergepoint:
724 if c.mergepoint:
713 if c.mergepoint == 'HEAD':
725 if c.mergepoint == 'HEAD':
714 c.mergepoint = None
726 c.mergepoint = None
715 c.parents.append(changesets[branches[c.mergepoint]])
727 c.parents.append(changesets[branches[c.mergepoint]])
716
728
717 if mergefrom:
729 if mergefrom:
718 m = mergefrom.search(c.comment)
730 m = mergefrom.search(c.comment)
719 if m:
731 if m:
720 m = m.group(1)
732 m = m.group(1)
721 if m == 'HEAD':
733 if m == 'HEAD':
722 m = None
734 m = None
723 try:
735 try:
724 candidate = changesets[branches[m]]
736 candidate = changesets[branches[m]]
725 except KeyError:
737 except KeyError:
726 ui.warn(_("warning: CVS commit message references "
738 ui.warn(_("warning: CVS commit message references "
727 "non-existent branch %r:\n%s\n")
739 "non-existent branch %r:\n%s\n")
728 % (m, c.comment))
740 % (m, c.comment))
729 if m in branches and c.branch != m and not candidate.synthetic:
741 if m in branches and c.branch != m and not candidate.synthetic:
730 c.parents.append(candidate)
742 c.parents.append(candidate)
731
743
732 if mergeto:
744 if mergeto:
733 m = mergeto.search(c.comment)
745 m = mergeto.search(c.comment)
734 if m:
746 if m:
735 if m.groups():
747 if m.groups():
736 m = m.group(1)
748 m = m.group(1)
737 if m == 'HEAD':
749 if m == 'HEAD':
738 m = None
750 m = None
739 else:
751 else:
740 m = None # if no group found then merge to HEAD
752 m = None # if no group found then merge to HEAD
741 if m in branches and c.branch != m:
753 if m in branches and c.branch != m:
742 # insert empty changeset for merge
754 # insert empty changeset for merge
743 cc = changeset(
755 cc = changeset(
744 author=c.author, branch=m, date=c.date,
756 author=c.author, branch=m, date=c.date,
745 comment='convert-repo: CVS merge from branch %s'
757 comment='convert-repo: CVS merge from branch %s'
746 % c.branch,
758 % c.branch,
747 entries=[], tags=[],
759 entries=[], tags=[],
748 parents=[changesets[branches[m]], c])
760 parents=[changesets[branches[m]], c])
749 changesets.insert(i + 1, cc)
761 changesets.insert(i + 1, cc)
750 branches[m] = i + 1
762 branches[m] = i + 1
751
763
752 # adjust our loop counters now we have inserted a new entry
764 # adjust our loop counters now we have inserted a new entry
753 n += 1
765 n += 1
754 i += 2
766 i += 2
755 continue
767 continue
756
768
757 branches[c.branch] = i
769 branches[c.branch] = i
758 i += 1
770 i += 1
759
771
760 # Drop synthetic changesets (safe now that we have ensured no other
772 # Drop synthetic changesets (safe now that we have ensured no other
761 # changesets can have them as parents).
773 # changesets can have them as parents).
762 i = 0
774 i = 0
763 while i < len(changesets):
775 while i < len(changesets):
764 if changesets[i].synthetic:
776 if changesets[i].synthetic:
765 del changesets[i]
777 del changesets[i]
766 else:
778 else:
767 i += 1
779 i += 1
768
780
769 # Number changesets
781 # Number changesets
770
782
771 for i, c in enumerate(changesets):
783 for i, c in enumerate(changesets):
772 c.id = i + 1
784 c.id = i + 1
773
785
774 if odd:
786 if odd:
775 for l, r in odd:
787 for l, r in odd:
776 if l.id is not None and r.id is not None:
788 if l.id is not None and r.id is not None:
777 ui.warn(_('changeset %d is both before and after %d\n')
789 ui.warn(_('changeset %d is both before and after %d\n')
778 % (l.id, r.id))
790 % (l.id, r.id))
779
791
780 ui.status(_('%d changeset entries\n') % len(changesets))
792 ui.status(_('%d changeset entries\n') % len(changesets))
781
793
782 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
794 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
783
795
784 return changesets
796 return changesets
785
797
786
798
787 def debugcvsps(ui, *args, **opts):
799 def debugcvsps(ui, *args, **opts):
788 '''Read CVS rlog for current directory or named path in
800 '''Read CVS rlog for current directory or named path in
789 repository, and convert the log to changesets based on matching
801 repository, and convert the log to changesets based on matching
790 commit log entries and dates.
802 commit log entries and dates.
791 '''
803 '''
792 if opts["new_cache"]:
804 if opts["new_cache"]:
793 cache = "write"
805 cache = "write"
794 elif opts["update_cache"]:
806 elif opts["update_cache"]:
795 cache = "update"
807 cache = "update"
796 else:
808 else:
797 cache = None
809 cache = None
798
810
799 revisions = opts["revisions"]
811 revisions = opts["revisions"]
800
812
801 try:
813 try:
802 if args:
814 if args:
803 log = []
815 log = []
804 for d in args:
816 for d in args:
805 log += createlog(ui, d, root=opts["root"], cache=cache)
817 log += createlog(ui, d, root=opts["root"], cache=cache)
806 else:
818 else:
807 log = createlog(ui, root=opts["root"], cache=cache)
819 log = createlog(ui, root=opts["root"], cache=cache)
808 except logerror, e:
820 except logerror, e:
809 ui.write("%r\n"%e)
821 ui.write("%r\n"%e)
810 return
822 return
811
823
812 changesets = createchangeset(ui, log, opts["fuzz"])
824 changesets = createchangeset(ui, log, opts["fuzz"])
813 del log
825 del log
814
826
815 # Print changesets (optionally filtered)
827 # Print changesets (optionally filtered)
816
828
817 off = len(revisions)
829 off = len(revisions)
818 branches = {} # latest version number in each branch
830 branches = {} # latest version number in each branch
819 ancestors = {} # parent branch
831 ancestors = {} # parent branch
820 for cs in changesets:
832 for cs in changesets:
821
833
822 if opts["ancestors"]:
834 if opts["ancestors"]:
823 if cs.branch not in branches and cs.parents and cs.parents[0].id:
835 if cs.branch not in branches and cs.parents and cs.parents[0].id:
824 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
836 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
825 cs.parents[0].id)
837 cs.parents[0].id)
826 branches[cs.branch] = cs.id
838 branches[cs.branch] = cs.id
827
839
828 # limit by branches
840 # limit by branches
829 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
841 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
830 continue
842 continue
831
843
832 if not off:
844 if not off:
833 # Note: trailing spaces on several lines here are needed to have
845 # Note: trailing spaces on several lines here are needed to have
834 # bug-for-bug compatibility with cvsps.
846 # bug-for-bug compatibility with cvsps.
835 ui.write('---------------------\n')
847 ui.write('---------------------\n')
836 ui.write(('PatchSet %d \n' % cs.id))
848 ui.write(('PatchSet %d \n' % cs.id))
837 ui.write(('Date: %s\n' % util.datestr(cs.date,
849 ui.write(('Date: %s\n' % util.datestr(cs.date,
838 '%Y/%m/%d %H:%M:%S %1%2')))
850 '%Y/%m/%d %H:%M:%S %1%2')))
839 ui.write(('Author: %s\n' % cs.author))
851 ui.write(('Author: %s\n' % cs.author))
840 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
852 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
841 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
853 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
842 ','.join(cs.tags) or '(none)')))
854 ','.join(cs.tags) or '(none)')))
843 if cs.branchpoints:
855 if cs.branchpoints:
844 ui.write(('Branchpoints: %s \n') %
856 ui.write(('Branchpoints: %s \n') %
845 ', '.join(sorted(cs.branchpoints)))
857 ', '.join(sorted(cs.branchpoints)))
846 if opts["parents"] and cs.parents:
858 if opts["parents"] and cs.parents:
847 if len(cs.parents) > 1:
859 if len(cs.parents) > 1:
848 ui.write(('Parents: %s\n' %
860 ui.write(('Parents: %s\n' %
849 (','.join([str(p.id) for p in cs.parents]))))
861 (','.join([str(p.id) for p in cs.parents]))))
850 else:
862 else:
851 ui.write(('Parent: %d\n' % cs.parents[0].id))
863 ui.write(('Parent: %d\n' % cs.parents[0].id))
852
864
853 if opts["ancestors"]:
865 if opts["ancestors"]:
854 b = cs.branch
866 b = cs.branch
855 r = []
867 r = []
856 while b:
868 while b:
857 b, c = ancestors[b]
869 b, c = ancestors[b]
858 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
870 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
859 if r:
871 if r:
860 ui.write(('Ancestors: %s\n' % (','.join(r))))
872 ui.write(('Ancestors: %s\n' % (','.join(r))))
861
873
862 ui.write(('Log:\n'))
874 ui.write(('Log:\n'))
863 ui.write('%s\n\n' % cs.comment)
875 ui.write('%s\n\n' % cs.comment)
864 ui.write(('Members: \n'))
876 ui.write(('Members: \n'))
865 for f in cs.entries:
877 for f in cs.entries:
866 fn = f.file
878 fn = f.file
867 if fn.startswith(opts["prefix"]):
879 if fn.startswith(opts["prefix"]):
868 fn = fn[len(opts["prefix"]):]
880 fn = fn[len(opts["prefix"]):]
869 ui.write('\t%s:%s->%s%s \n' % (
881 ui.write('\t%s:%s->%s%s \n' % (
870 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
882 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
871 '.'.join([str(x) for x in f.revision]),
883 '.'.join([str(x) for x in f.revision]),
872 ['', '(DEAD)'][f.dead]))
884 ['', '(DEAD)'][f.dead]))
873 ui.write('\n')
885 ui.write('\n')
874
886
875 # have we seen the start tag?
887 # have we seen the start tag?
876 if revisions and off:
888 if revisions and off:
877 if revisions[0] == str(cs.id) or \
889 if revisions[0] == str(cs.id) or \
878 revisions[0] in cs.tags:
890 revisions[0] in cs.tags:
879 off = False
891 off = False
880
892
881 # see if we reached the end tag
893 # see if we reached the end tag
882 if len(revisions) > 1 and not off:
894 if len(revisions) > 1 and not off:
883 if revisions[1] == str(cs.id) or \
895 if revisions[1] == str(cs.id) or \
884 revisions[1] in cs.tags:
896 revisions[1] in cs.tags:
885 break
897 break
General Comments 0
You need to be logged in to leave comments. Login now