##// END OF EJS Templates
cvsps: fix crash when log message refers to non-existent branch (issue1615).
Greg Ward -
r8171:4e5bd9b9 default
parent child Browse files
Show More
@@ -1,777 +1,782 b''
1 #
1 #
2 # Mercurial built-in replacement for cvsps.
2 # Mercurial built-in replacement for cvsps.
3 #
3 #
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 import os
9 import os
10 import re
10 import re
11 import cPickle as pickle
11 import cPickle as pickle
12 from mercurial import util
12 from mercurial import util
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14
14
15 def listsort(list, key):
15 def listsort(list, key):
16 "helper to sort by key in Python 2.3"
16 "helper to sort by key in Python 2.3"
17 try:
17 try:
18 list.sort(key=key)
18 list.sort(key=key)
19 except TypeError:
19 except TypeError:
20 list.sort(lambda l, r: cmp(key(l), key(r)))
20 list.sort(lambda l, r: cmp(key(l), key(r)))
21
21
22 class logentry(object):
22 class logentry(object):
23 '''Class logentry has the following attributes:
23 '''Class logentry has the following attributes:
24 .author - author name as CVS knows it
24 .author - author name as CVS knows it
25 .branch - name of branch this revision is on
25 .branch - name of branch this revision is on
26 .branches - revision tuple of branches starting at this revision
26 .branches - revision tuple of branches starting at this revision
27 .comment - commit message
27 .comment - commit message
28 .date - the commit date as a (time, tz) tuple
28 .date - the commit date as a (time, tz) tuple
29 .dead - true if file revision is dead
29 .dead - true if file revision is dead
30 .file - Name of file
30 .file - Name of file
31 .lines - a tuple (+lines, -lines) or None
31 .lines - a tuple (+lines, -lines) or None
32 .parent - Previous revision of this entry
32 .parent - Previous revision of this entry
33 .rcs - name of file as returned from CVS
33 .rcs - name of file as returned from CVS
34 .revision - revision number as tuple
34 .revision - revision number as tuple
35 .tags - list of tags on the file
35 .tags - list of tags on the file
36 .synthetic - is this a synthetic "file ... added on ..." revision?
36 .synthetic - is this a synthetic "file ... added on ..." revision?
37 .mergepoint- the branch that has been merged from (if present in rlog output)
37 .mergepoint- the branch that has been merged from (if present in rlog output)
38 '''
38 '''
39 def __init__(self, **entries):
39 def __init__(self, **entries):
40 self.__dict__.update(entries)
40 self.__dict__.update(entries)
41
41
42 def __repr__(self):
42 def __repr__(self):
43 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
43 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
44 id(self),
44 id(self),
45 self.file,
45 self.file,
46 ".".join(map(str, self.revision)))
46 ".".join(map(str, self.revision)))
47
47
48 class logerror(Exception):
48 class logerror(Exception):
49 pass
49 pass
50
50
51 def getrepopath(cvspath):
51 def getrepopath(cvspath):
52 """Return the repository path from a CVS path.
52 """Return the repository path from a CVS path.
53
53
54 >>> getrepopath('/foo/bar')
54 >>> getrepopath('/foo/bar')
55 '/foo/bar'
55 '/foo/bar'
56 >>> getrepopath('c:/foo/bar')
56 >>> getrepopath('c:/foo/bar')
57 'c:/foo/bar'
57 'c:/foo/bar'
58 >>> getrepopath(':pserver:10/foo/bar')
58 >>> getrepopath(':pserver:10/foo/bar')
59 '/foo/bar'
59 '/foo/bar'
60 >>> getrepopath(':pserver:10c:/foo/bar')
60 >>> getrepopath(':pserver:10c:/foo/bar')
61 '/foo/bar'
61 '/foo/bar'
62 >>> getrepopath(':pserver:/foo/bar')
62 >>> getrepopath(':pserver:/foo/bar')
63 '/foo/bar'
63 '/foo/bar'
64 >>> getrepopath(':pserver:c:/foo/bar')
64 >>> getrepopath(':pserver:c:/foo/bar')
65 'c:/foo/bar'
65 'c:/foo/bar'
66 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
66 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
67 '/foo/bar'
67 '/foo/bar'
68 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
68 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
69 'c:/foo/bar'
69 'c:/foo/bar'
70 """
70 """
71 # According to CVS manual, CVS paths are expressed like:
71 # According to CVS manual, CVS paths are expressed like:
72 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
72 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
73 #
73 #
74 # Unfortunately, Windows absolute paths start with a drive letter
74 # Unfortunately, Windows absolute paths start with a drive letter
75 # like 'c:' making it harder to parse. Here we assume that drive
75 # like 'c:' making it harder to parse. Here we assume that drive
76 # letters are only one character long and any CVS component before
76 # letters are only one character long and any CVS component before
77 # the repository path is at least 2 characters long, and use this
77 # the repository path is at least 2 characters long, and use this
78 # to disambiguate.
78 # to disambiguate.
79 parts = cvspath.split(':')
79 parts = cvspath.split(':')
80 if len(parts) == 1:
80 if len(parts) == 1:
81 return parts[0]
81 return parts[0]
82 # Here there is an ambiguous case if we have a port number
82 # Here there is an ambiguous case if we have a port number
83 # immediately followed by a Windows driver letter. We assume this
83 # immediately followed by a Windows driver letter. We assume this
84 # never happens and decide it must be CVS path component,
84 # never happens and decide it must be CVS path component,
85 # therefore ignoring it.
85 # therefore ignoring it.
86 if len(parts[-2]) > 1:
86 if len(parts[-2]) > 1:
87 return parts[-1].lstrip('0123456789')
87 return parts[-1].lstrip('0123456789')
88 return parts[-2] + ':' + parts[-1]
88 return parts[-2] + ':' + parts[-1]
89
89
90 def createlog(ui, directory=None, root="", rlog=True, cache=None):
90 def createlog(ui, directory=None, root="", rlog=True, cache=None):
91 '''Collect the CVS rlog'''
91 '''Collect the CVS rlog'''
92
92
93 # Because we store many duplicate commit log messages, reusing strings
93 # Because we store many duplicate commit log messages, reusing strings
94 # saves a lot of memory and pickle storage space.
94 # saves a lot of memory and pickle storage space.
95 _scache = {}
95 _scache = {}
96 def scache(s):
96 def scache(s):
97 "return a shared version of a string"
97 "return a shared version of a string"
98 return _scache.setdefault(s, s)
98 return _scache.setdefault(s, s)
99
99
100 ui.status(_('collecting CVS rlog\n'))
100 ui.status(_('collecting CVS rlog\n'))
101
101
102 log = [] # list of logentry objects containing the CVS state
102 log = [] # list of logentry objects containing the CVS state
103
103
104 # patterns to match in CVS (r)log output, by state of use
104 # patterns to match in CVS (r)log output, by state of use
105 re_00 = re.compile('RCS file: (.+)$')
105 re_00 = re.compile('RCS file: (.+)$')
106 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
106 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
107 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
107 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
108 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
108 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
109 re_10 = re.compile('Working file: (.+)$')
109 re_10 = re.compile('Working file: (.+)$')
110 re_20 = re.compile('symbolic names:')
110 re_20 = re.compile('symbolic names:')
111 re_30 = re.compile('\t(.+): ([\\d.]+)$')
111 re_30 = re.compile('\t(.+): ([\\d.]+)$')
112 re_31 = re.compile('----------------------------$')
112 re_31 = re.compile('----------------------------$')
113 re_32 = re.compile('=============================================================================$')
113 re_32 = re.compile('=============================================================================$')
114 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
114 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
115 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?(.*mergepoint:\s+([^;]+);)?')
115 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?(.*mergepoint:\s+([^;]+);)?')
116 re_70 = re.compile('branches: (.+);$')
116 re_70 = re.compile('branches: (.+);$')
117
117
118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119
119
120 prefix = '' # leading path to strip of what we get from CVS
120 prefix = '' # leading path to strip of what we get from CVS
121
121
122 if directory is None:
122 if directory is None:
123 # Current working directory
123 # Current working directory
124
124
125 # Get the real directory in the repository
125 # Get the real directory in the repository
126 try:
126 try:
127 prefix = file(os.path.join('CVS','Repository')).read().strip()
127 prefix = file(os.path.join('CVS','Repository')).read().strip()
128 if prefix == ".":
128 if prefix == ".":
129 prefix = ""
129 prefix = ""
130 directory = prefix
130 directory = prefix
131 except IOError:
131 except IOError:
132 raise logerror('Not a CVS sandbox')
132 raise logerror('Not a CVS sandbox')
133
133
134 if prefix and not prefix.endswith(os.sep):
134 if prefix and not prefix.endswith(os.sep):
135 prefix += os.sep
135 prefix += os.sep
136
136
137 # Use the Root file in the sandbox, if it exists
137 # Use the Root file in the sandbox, if it exists
138 try:
138 try:
139 root = file(os.path.join('CVS','Root')).read().strip()
139 root = file(os.path.join('CVS','Root')).read().strip()
140 except IOError:
140 except IOError:
141 pass
141 pass
142
142
143 if not root:
143 if not root:
144 root = os.environ.get('CVSROOT', '')
144 root = os.environ.get('CVSROOT', '')
145
145
146 # read log cache if one exists
146 # read log cache if one exists
147 oldlog = []
147 oldlog = []
148 date = None
148 date = None
149
149
150 if cache:
150 if cache:
151 cachedir = os.path.expanduser('~/.hg.cvsps')
151 cachedir = os.path.expanduser('~/.hg.cvsps')
152 if not os.path.exists(cachedir):
152 if not os.path.exists(cachedir):
153 os.mkdir(cachedir)
153 os.mkdir(cachedir)
154
154
155 # The cvsps cache pickle needs a uniquified name, based on the
155 # The cvsps cache pickle needs a uniquified name, based on the
156 # repository location. The address may have all sort of nasties
156 # repository location. The address may have all sort of nasties
157 # in it, slashes, colons and such. So here we take just the
157 # in it, slashes, colons and such. So here we take just the
158 # alphanumerics, concatenated in a way that does not mix up the
158 # alphanumerics, concatenated in a way that does not mix up the
159 # various components, so that
159 # various components, so that
160 # :pserver:user@server:/path
160 # :pserver:user@server:/path
161 # and
161 # and
162 # /pserver/user/server/path
162 # /pserver/user/server/path
163 # are mapped to different cache file names.
163 # are mapped to different cache file names.
164 cachefile = root.split(":") + [directory, "cache"]
164 cachefile = root.split(":") + [directory, "cache"]
165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 cachefile = os.path.join(cachedir,
166 cachefile = os.path.join(cachedir,
167 '.'.join([s for s in cachefile if s]))
167 '.'.join([s for s in cachefile if s]))
168
168
169 if cache == 'update':
169 if cache == 'update':
170 try:
170 try:
171 ui.note(_('reading cvs log cache %s\n') % cachefile)
171 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 oldlog = pickle.load(file(cachefile))
172 oldlog = pickle.load(file(cachefile))
173 ui.note(_('cache has %d log entries\n') % len(oldlog))
173 ui.note(_('cache has %d log entries\n') % len(oldlog))
174 except Exception, e:
174 except Exception, e:
175 ui.note(_('error reading cache: %r\n') % e)
175 ui.note(_('error reading cache: %r\n') % e)
176
176
177 if oldlog:
177 if oldlog:
178 date = oldlog[-1].date # last commit date as a (time,tz) tuple
178 date = oldlog[-1].date # last commit date as a (time,tz) tuple
179 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
179 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
180
180
181 # build the CVS commandline
181 # build the CVS commandline
182 cmd = ['cvs', '-q']
182 cmd = ['cvs', '-q']
183 if root:
183 if root:
184 cmd.append('-d%s' % root)
184 cmd.append('-d%s' % root)
185 p = util.normpath(getrepopath(root))
185 p = util.normpath(getrepopath(root))
186 if not p.endswith('/'):
186 if not p.endswith('/'):
187 p += '/'
187 p += '/'
188 prefix = p + util.normpath(prefix)
188 prefix = p + util.normpath(prefix)
189 cmd.append(['log', 'rlog'][rlog])
189 cmd.append(['log', 'rlog'][rlog])
190 if date:
190 if date:
191 # no space between option and date string
191 # no space between option and date string
192 cmd.append('-d>%s' % date)
192 cmd.append('-d>%s' % date)
193 cmd.append(directory)
193 cmd.append(directory)
194
194
195 # state machine begins here
195 # state machine begins here
196 tags = {} # dictionary of revisions on current file with their tags
196 tags = {} # dictionary of revisions on current file with their tags
197 branchmap = {} # mapping between branch names and revision numbers
197 branchmap = {} # mapping between branch names and revision numbers
198 state = 0
198 state = 0
199 store = False # set when a new record can be appended
199 store = False # set when a new record can be appended
200
200
201 cmd = [util.shellquote(arg) for arg in cmd]
201 cmd = [util.shellquote(arg) for arg in cmd]
202 ui.note(_("running %s\n") % (' '.join(cmd)))
202 ui.note(_("running %s\n") % (' '.join(cmd)))
203 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
203 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
204
204
205 pfp = util.popen(' '.join(cmd))
205 pfp = util.popen(' '.join(cmd))
206 peek = pfp.readline()
206 peek = pfp.readline()
207 while True:
207 while True:
208 line = peek
208 line = peek
209 if line == '':
209 if line == '':
210 break
210 break
211 peek = pfp.readline()
211 peek = pfp.readline()
212 if line.endswith('\n'):
212 if line.endswith('\n'):
213 line = line[:-1]
213 line = line[:-1]
214 #ui.debug('state=%d line=%r\n' % (state, line))
214 #ui.debug('state=%d line=%r\n' % (state, line))
215
215
216 if state == 0:
216 if state == 0:
217 # initial state, consume input until we see 'RCS file'
217 # initial state, consume input until we see 'RCS file'
218 match = re_00.match(line)
218 match = re_00.match(line)
219 if match:
219 if match:
220 rcs = match.group(1)
220 rcs = match.group(1)
221 tags = {}
221 tags = {}
222 if rlog:
222 if rlog:
223 filename = util.normpath(rcs[:-2])
223 filename = util.normpath(rcs[:-2])
224 if filename.startswith(prefix):
224 if filename.startswith(prefix):
225 filename = filename[len(prefix):]
225 filename = filename[len(prefix):]
226 if filename.startswith('/'):
226 if filename.startswith('/'):
227 filename = filename[1:]
227 filename = filename[1:]
228 if filename.startswith('Attic/'):
228 if filename.startswith('Attic/'):
229 filename = filename[6:]
229 filename = filename[6:]
230 else:
230 else:
231 filename = filename.replace('/Attic/', '/')
231 filename = filename.replace('/Attic/', '/')
232 state = 2
232 state = 2
233 continue
233 continue
234 state = 1
234 state = 1
235 continue
235 continue
236 match = re_01.match(line)
236 match = re_01.match(line)
237 if match:
237 if match:
238 raise Exception(match.group(1))
238 raise Exception(match.group(1))
239 match = re_02.match(line)
239 match = re_02.match(line)
240 if match:
240 if match:
241 raise Exception(match.group(2))
241 raise Exception(match.group(2))
242 if re_03.match(line):
242 if re_03.match(line):
243 raise Exception(line)
243 raise Exception(line)
244
244
245 elif state == 1:
245 elif state == 1:
246 # expect 'Working file' (only when using log instead of rlog)
246 # expect 'Working file' (only when using log instead of rlog)
247 match = re_10.match(line)
247 match = re_10.match(line)
248 assert match, _('RCS file must be followed by working file')
248 assert match, _('RCS file must be followed by working file')
249 filename = util.normpath(match.group(1))
249 filename = util.normpath(match.group(1))
250 state = 2
250 state = 2
251
251
252 elif state == 2:
252 elif state == 2:
253 # expect 'symbolic names'
253 # expect 'symbolic names'
254 if re_20.match(line):
254 if re_20.match(line):
255 branchmap = {}
255 branchmap = {}
256 state = 3
256 state = 3
257
257
258 elif state == 3:
258 elif state == 3:
259 # read the symbolic names and store as tags
259 # read the symbolic names and store as tags
260 match = re_30.match(line)
260 match = re_30.match(line)
261 if match:
261 if match:
262 rev = [int(x) for x in match.group(2).split('.')]
262 rev = [int(x) for x in match.group(2).split('.')]
263
263
264 # Convert magic branch number to an odd-numbered one
264 # Convert magic branch number to an odd-numbered one
265 revn = len(rev)
265 revn = len(rev)
266 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
266 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
267 rev = rev[:-2] + rev[-1:]
267 rev = rev[:-2] + rev[-1:]
268 rev = tuple(rev)
268 rev = tuple(rev)
269
269
270 if rev not in tags:
270 if rev not in tags:
271 tags[rev] = []
271 tags[rev] = []
272 tags[rev].append(match.group(1))
272 tags[rev].append(match.group(1))
273 branchmap[match.group(1)] = match.group(2)
273 branchmap[match.group(1)] = match.group(2)
274
274
275 elif re_31.match(line):
275 elif re_31.match(line):
276 state = 5
276 state = 5
277 elif re_32.match(line):
277 elif re_32.match(line):
278 state = 0
278 state = 0
279
279
280 elif state == 4:
280 elif state == 4:
281 # expecting '------' separator before first revision
281 # expecting '------' separator before first revision
282 if re_31.match(line):
282 if re_31.match(line):
283 state = 5
283 state = 5
284 else:
284 else:
285 assert not re_32.match(line), _('must have at least some revisions')
285 assert not re_32.match(line), _('must have at least some revisions')
286
286
287 elif state == 5:
287 elif state == 5:
288 # expecting revision number and possibly (ignored) lock indication
288 # expecting revision number and possibly (ignored) lock indication
289 # we create the logentry here from values stored in states 0 to 4,
289 # we create the logentry here from values stored in states 0 to 4,
290 # as this state is re-entered for subsequent revisions of a file.
290 # as this state is re-entered for subsequent revisions of a file.
291 match = re_50.match(line)
291 match = re_50.match(line)
292 assert match, _('expected revision number')
292 assert match, _('expected revision number')
293 e = logentry(rcs=scache(rcs), file=scache(filename),
293 e = logentry(rcs=scache(rcs), file=scache(filename),
294 revision=tuple([int(x) for x in match.group(1).split('.')]),
294 revision=tuple([int(x) for x in match.group(1).split('.')]),
295 branches=[], parent=None,
295 branches=[], parent=None,
296 synthetic=False)
296 synthetic=False)
297 state = 6
297 state = 6
298
298
299 elif state == 6:
299 elif state == 6:
300 # expecting date, author, state, lines changed
300 # expecting date, author, state, lines changed
301 match = re_60.match(line)
301 match = re_60.match(line)
302 assert match, _('revision must be followed by date line')
302 assert match, _('revision must be followed by date line')
303 d = match.group(1)
303 d = match.group(1)
304 if d[2] == '/':
304 if d[2] == '/':
305 # Y2K
305 # Y2K
306 d = '19' + d
306 d = '19' + d
307
307
308 if len(d.split()) != 3:
308 if len(d.split()) != 3:
309 # cvs log dates always in GMT
309 # cvs log dates always in GMT
310 d = d + ' UTC'
310 d = d + ' UTC'
311 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
311 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
312 e.author = scache(match.group(2))
312 e.author = scache(match.group(2))
313 e.dead = match.group(3).lower() == 'dead'
313 e.dead = match.group(3).lower() == 'dead'
314
314
315 if match.group(5):
315 if match.group(5):
316 if match.group(6):
316 if match.group(6):
317 e.lines = (int(match.group(5)), int(match.group(6)))
317 e.lines = (int(match.group(5)), int(match.group(6)))
318 else:
318 else:
319 e.lines = (int(match.group(5)), 0)
319 e.lines = (int(match.group(5)), 0)
320 elif match.group(6):
320 elif match.group(6):
321 e.lines = (0, int(match.group(6)))
321 e.lines = (0, int(match.group(6)))
322 else:
322 else:
323 e.lines = None
323 e.lines = None
324
324
325 if match.group(7): # cvsnt mergepoint
325 if match.group(7): # cvsnt mergepoint
326 myrev = match.group(8).split('.')
326 myrev = match.group(8).split('.')
327 if len(myrev) == 2: # head
327 if len(myrev) == 2: # head
328 e.mergepoint = 'HEAD'
328 e.mergepoint = 'HEAD'
329 else:
329 else:
330 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
330 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
331 branches = [b for b in branchmap if branchmap[b] == myrev]
331 branches = [b for b in branchmap if branchmap[b] == myrev]
332 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
332 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
333 e.mergepoint = branches[0]
333 e.mergepoint = branches[0]
334 else:
334 else:
335 e.mergepoint = None
335 e.mergepoint = None
336 e.comment = []
336 e.comment = []
337 state = 7
337 state = 7
338
338
339 elif state == 7:
339 elif state == 7:
340 # read the revision numbers of branches that start at this revision
340 # read the revision numbers of branches that start at this revision
341 # or store the commit log message otherwise
341 # or store the commit log message otherwise
342 m = re_70.match(line)
342 m = re_70.match(line)
343 if m:
343 if m:
344 e.branches = [tuple([int(y) for y in x.strip().split('.')])
344 e.branches = [tuple([int(y) for y in x.strip().split('.')])
345 for x in m.group(1).split(';')]
345 for x in m.group(1).split(';')]
346 state = 8
346 state = 8
347 elif re_31.match(line) and re_50.match(peek):
347 elif re_31.match(line) and re_50.match(peek):
348 state = 5
348 state = 5
349 store = True
349 store = True
350 elif re_32.match(line):
350 elif re_32.match(line):
351 state = 0
351 state = 0
352 store = True
352 store = True
353 else:
353 else:
354 e.comment.append(line)
354 e.comment.append(line)
355
355
356 elif state == 8:
356 elif state == 8:
357 # store commit log message
357 # store commit log message
358 if re_31.match(line):
358 if re_31.match(line):
359 state = 5
359 state = 5
360 store = True
360 store = True
361 elif re_32.match(line):
361 elif re_32.match(line):
362 state = 0
362 state = 0
363 store = True
363 store = True
364 else:
364 else:
365 e.comment.append(line)
365 e.comment.append(line)
366
366
367 # When a file is added on a branch B1, CVS creates a synthetic
367 # When a file is added on a branch B1, CVS creates a synthetic
368 # dead trunk revision 1.1 so that the branch has a root.
368 # dead trunk revision 1.1 so that the branch has a root.
369 # Likewise, if you merge such a file to a later branch B2 (one
369 # Likewise, if you merge such a file to a later branch B2 (one
370 # that already existed when the file was added on B1), CVS
370 # that already existed when the file was added on B1), CVS
371 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
371 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
372 # these revisions now, but mark them synthetic so
372 # these revisions now, but mark them synthetic so
373 # createchangeset() can take care of them.
373 # createchangeset() can take care of them.
374 if (store and
374 if (store and
375 e.dead and
375 e.dead and
376 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
376 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
377 len(e.comment) == 1 and
377 len(e.comment) == 1 and
378 file_added_re.match(e.comment[0])):
378 file_added_re.match(e.comment[0])):
379 ui.debug(_('found synthetic revision in %s: %r\n')
379 ui.debug(_('found synthetic revision in %s: %r\n')
380 % (e.rcs, e.comment[0]))
380 % (e.rcs, e.comment[0]))
381 e.synthetic = True
381 e.synthetic = True
382
382
383 if store:
383 if store:
384 # clean up the results and save in the log.
384 # clean up the results and save in the log.
385 store = False
385 store = False
386 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
386 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
387 e.comment = scache('\n'.join(e.comment))
387 e.comment = scache('\n'.join(e.comment))
388
388
389 revn = len(e.revision)
389 revn = len(e.revision)
390 if revn > 3 and (revn % 2) == 0:
390 if revn > 3 and (revn % 2) == 0:
391 e.branch = tags.get(e.revision[:-1], [None])[0]
391 e.branch = tags.get(e.revision[:-1], [None])[0]
392 else:
392 else:
393 e.branch = None
393 e.branch = None
394
394
395 log.append(e)
395 log.append(e)
396
396
397 if len(log) % 100 == 0:
397 if len(log) % 100 == 0:
398 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
398 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
399
399
400 listsort(log, key=lambda x:(x.rcs, x.revision))
400 listsort(log, key=lambda x:(x.rcs, x.revision))
401
401
402 # find parent revisions of individual files
402 # find parent revisions of individual files
403 versions = {}
403 versions = {}
404 for e in log:
404 for e in log:
405 branch = e.revision[:-1]
405 branch = e.revision[:-1]
406 p = versions.get((e.rcs, branch), None)
406 p = versions.get((e.rcs, branch), None)
407 if p is None:
407 if p is None:
408 p = e.revision[:-2]
408 p = e.revision[:-2]
409 e.parent = p
409 e.parent = p
410 versions[(e.rcs, branch)] = e.revision
410 versions[(e.rcs, branch)] = e.revision
411
411
412 # update the log cache
412 # update the log cache
413 if cache:
413 if cache:
414 if log:
414 if log:
415 # join up the old and new logs
415 # join up the old and new logs
416 listsort(log, key=lambda x:x.date)
416 listsort(log, key=lambda x:x.date)
417
417
418 if oldlog and oldlog[-1].date >= log[0].date:
418 if oldlog and oldlog[-1].date >= log[0].date:
419 raise logerror('Log cache overlaps with new log entries,'
419 raise logerror('Log cache overlaps with new log entries,'
420 ' re-run without cache.')
420 ' re-run without cache.')
421
421
422 log = oldlog + log
422 log = oldlog + log
423
423
424 # write the new cachefile
424 # write the new cachefile
425 ui.note(_('writing cvs log cache %s\n') % cachefile)
425 ui.note(_('writing cvs log cache %s\n') % cachefile)
426 pickle.dump(log, file(cachefile, 'w'))
426 pickle.dump(log, file(cachefile, 'w'))
427 else:
427 else:
428 log = oldlog
428 log = oldlog
429
429
430 ui.status(_('%d log entries\n') % len(log))
430 ui.status(_('%d log entries\n') % len(log))
431
431
432 return log
432 return log
433
433
434
434
435 class changeset(object):
435 class changeset(object):
436 '''Class changeset has the following attributes:
436 '''Class changeset has the following attributes:
437 .id - integer identifying this changeset (list index)
437 .id - integer identifying this changeset (list index)
438 .author - author name as CVS knows it
438 .author - author name as CVS knows it
439 .branch - name of branch this changeset is on, or None
439 .branch - name of branch this changeset is on, or None
440 .comment - commit message
440 .comment - commit message
441 .date - the commit date as a (time,tz) tuple
441 .date - the commit date as a (time,tz) tuple
442 .entries - list of logentry objects in this changeset
442 .entries - list of logentry objects in this changeset
443 .parents - list of one or two parent changesets
443 .parents - list of one or two parent changesets
444 .tags - list of tags on this changeset
444 .tags - list of tags on this changeset
445 .synthetic - from synthetic revision "file ... added on branch ..."
445 .synthetic - from synthetic revision "file ... added on branch ..."
446 .mergepoint- the branch that has been merged from (if present in rlog output)
446 .mergepoint- the branch that has been merged from (if present in rlog output)
447 '''
447 '''
448 def __init__(self, **entries):
448 def __init__(self, **entries):
449 self.__dict__.update(entries)
449 self.__dict__.update(entries)
450
450
451 def __repr__(self):
451 def __repr__(self):
452 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
452 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
453 id(self),
453 id(self),
454 getattr(self, 'id', "(no id)"))
454 getattr(self, 'id', "(no id)"))
455
455
456 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
456 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
457 '''Convert log into changesets.'''
457 '''Convert log into changesets.'''
458
458
459 ui.status(_('creating changesets\n'))
459 ui.status(_('creating changesets\n'))
460
460
461 # Merge changesets
461 # Merge changesets
462
462
463 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
463 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
464
464
465 changesets = []
465 changesets = []
466 files = {}
466 files = {}
467 c = None
467 c = None
468 for i, e in enumerate(log):
468 for i, e in enumerate(log):
469
469
470 # Check if log entry belongs to the current changeset or not.
470 # Check if log entry belongs to the current changeset or not.
471 if not (c and
471 if not (c and
472 e.comment == c.comment and
472 e.comment == c.comment and
473 e.author == c.author and
473 e.author == c.author and
474 e.branch == c.branch and
474 e.branch == c.branch and
475 ((c.date[0] + c.date[1]) <=
475 ((c.date[0] + c.date[1]) <=
476 (e.date[0] + e.date[1]) <=
476 (e.date[0] + e.date[1]) <=
477 (c.date[0] + c.date[1]) + fuzz) and
477 (c.date[0] + c.date[1]) + fuzz) and
478 e.file not in files):
478 e.file not in files):
479 c = changeset(comment=e.comment, author=e.author,
479 c = changeset(comment=e.comment, author=e.author,
480 branch=e.branch, date=e.date, entries=[],
480 branch=e.branch, date=e.date, entries=[],
481 mergepoint=getattr(e, 'mergepoint', None))
481 mergepoint=getattr(e, 'mergepoint', None))
482 changesets.append(c)
482 changesets.append(c)
483 files = {}
483 files = {}
484 if len(changesets) % 100 == 0:
484 if len(changesets) % 100 == 0:
485 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
485 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
486 ui.status(util.ellipsis(t, 80) + '\n')
486 ui.status(util.ellipsis(t, 80) + '\n')
487
487
488 c.entries.append(e)
488 c.entries.append(e)
489 files[e.file] = True
489 files[e.file] = True
490 c.date = e.date # changeset date is date of latest commit in it
490 c.date = e.date # changeset date is date of latest commit in it
491
491
492 # Mark synthetic changesets
492 # Mark synthetic changesets
493
493
494 for c in changesets:
494 for c in changesets:
495 # Synthetic revisions always get their own changeset, because
495 # Synthetic revisions always get their own changeset, because
496 # the log message includes the filename. E.g. if you add file3
496 # the log message includes the filename. E.g. if you add file3
497 # and file4 on a branch, you get four log entries and three
497 # and file4 on a branch, you get four log entries and three
498 # changesets:
498 # changesets:
499 # "File file3 was added on branch ..." (synthetic, 1 entry)
499 # "File file3 was added on branch ..." (synthetic, 1 entry)
500 # "File file4 was added on branch ..." (synthetic, 1 entry)
500 # "File file4 was added on branch ..." (synthetic, 1 entry)
501 # "Add file3 and file4 to fix ..." (real, 2 entries)
501 # "Add file3 and file4 to fix ..." (real, 2 entries)
502 # Hence the check for 1 entry here.
502 # Hence the check for 1 entry here.
503 synth = getattr(c.entries[0], 'synthetic', None)
503 synth = getattr(c.entries[0], 'synthetic', None)
504 c.synthetic = (len(c.entries) == 1 and synth)
504 c.synthetic = (len(c.entries) == 1 and synth)
505
505
506 # Sort files in each changeset
506 # Sort files in each changeset
507
507
508 for c in changesets:
508 for c in changesets:
509 def pathcompare(l, r):
509 def pathcompare(l, r):
510 'Mimic cvsps sorting order'
510 'Mimic cvsps sorting order'
511 l = l.split('/')
511 l = l.split('/')
512 r = r.split('/')
512 r = r.split('/')
513 nl = len(l)
513 nl = len(l)
514 nr = len(r)
514 nr = len(r)
515 n = min(nl, nr)
515 n = min(nl, nr)
516 for i in range(n):
516 for i in range(n):
517 if i + 1 == nl and nl < nr:
517 if i + 1 == nl and nl < nr:
518 return -1
518 return -1
519 elif i + 1 == nr and nl > nr:
519 elif i + 1 == nr and nl > nr:
520 return +1
520 return +1
521 elif l[i] < r[i]:
521 elif l[i] < r[i]:
522 return -1
522 return -1
523 elif l[i] > r[i]:
523 elif l[i] > r[i]:
524 return +1
524 return +1
525 return 0
525 return 0
526 def entitycompare(l, r):
526 def entitycompare(l, r):
527 return pathcompare(l.file, r.file)
527 return pathcompare(l.file, r.file)
528
528
529 c.entries.sort(entitycompare)
529 c.entries.sort(entitycompare)
530
530
531 # Sort changesets by date
531 # Sort changesets by date
532
532
533 def cscmp(l, r):
533 def cscmp(l, r):
534 d = sum(l.date) - sum(r.date)
534 d = sum(l.date) - sum(r.date)
535 if d:
535 if d:
536 return d
536 return d
537
537
538 # detect vendor branches and initial commits on a branch
538 # detect vendor branches and initial commits on a branch
539 le = {}
539 le = {}
540 for e in l.entries:
540 for e in l.entries:
541 le[e.rcs] = e.revision
541 le[e.rcs] = e.revision
542 re = {}
542 re = {}
543 for e in r.entries:
543 for e in r.entries:
544 re[e.rcs] = e.revision
544 re[e.rcs] = e.revision
545
545
546 d = 0
546 d = 0
547 for e in l.entries:
547 for e in l.entries:
548 if re.get(e.rcs, None) == e.parent:
548 if re.get(e.rcs, None) == e.parent:
549 assert not d
549 assert not d
550 d = 1
550 d = 1
551 break
551 break
552
552
553 for e in r.entries:
553 for e in r.entries:
554 if le.get(e.rcs, None) == e.parent:
554 if le.get(e.rcs, None) == e.parent:
555 assert not d
555 assert not d
556 d = -1
556 d = -1
557 break
557 break
558
558
559 return d
559 return d
560
560
561 changesets.sort(cscmp)
561 changesets.sort(cscmp)
562
562
563 # Collect tags
563 # Collect tags
564
564
565 globaltags = {}
565 globaltags = {}
566 for c in changesets:
566 for c in changesets:
567 tags = {}
567 tags = {}
568 for e in c.entries:
568 for e in c.entries:
569 for tag in e.tags:
569 for tag in e.tags:
570 # remember which is the latest changeset to have this tag
570 # remember which is the latest changeset to have this tag
571 globaltags[tag] = c
571 globaltags[tag] = c
572
572
573 for c in changesets:
573 for c in changesets:
574 tags = {}
574 tags = {}
575 for e in c.entries:
575 for e in c.entries:
576 for tag in e.tags:
576 for tag in e.tags:
577 tags[tag] = True
577 tags[tag] = True
578 # remember tags only if this is the latest changeset to have it
578 # remember tags only if this is the latest changeset to have it
579 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
579 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
580
580
581 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
581 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
582 # by inserting dummy changesets with two parents, and handle
582 # by inserting dummy changesets with two parents, and handle
583 # {{mergefrombranch BRANCHNAME}} by setting two parents.
583 # {{mergefrombranch BRANCHNAME}} by setting two parents.
584
584
585 if mergeto is None:
585 if mergeto is None:
586 mergeto = r'{{mergetobranch ([-\w]+)}}'
586 mergeto = r'{{mergetobranch ([-\w]+)}}'
587 if mergeto:
587 if mergeto:
588 mergeto = re.compile(mergeto)
588 mergeto = re.compile(mergeto)
589
589
590 if mergefrom is None:
590 if mergefrom is None:
591 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
591 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
592 if mergefrom:
592 if mergefrom:
593 mergefrom = re.compile(mergefrom)
593 mergefrom = re.compile(mergefrom)
594
594
595 versions = {} # changeset index where we saw any particular file version
595 versions = {} # changeset index where we saw any particular file version
596 branches = {} # changeset index where we saw a branch
596 branches = {} # changeset index where we saw a branch
597 n = len(changesets)
597 n = len(changesets)
598 i = 0
598 i = 0
599 while i<n:
599 while i<n:
600 c = changesets[i]
600 c = changesets[i]
601
601
602 for f in c.entries:
602 for f in c.entries:
603 versions[(f.rcs, f.revision)] = i
603 versions[(f.rcs, f.revision)] = i
604
604
605 p = None
605 p = None
606 if c.branch in branches:
606 if c.branch in branches:
607 p = branches[c.branch]
607 p = branches[c.branch]
608 else:
608 else:
609 for f in c.entries:
609 for f in c.entries:
610 p = max(p, versions.get((f.rcs, f.parent), None))
610 p = max(p, versions.get((f.rcs, f.parent), None))
611
611
612 c.parents = []
612 c.parents = []
613 if p is not None:
613 if p is not None:
614 p = changesets[p]
614 p = changesets[p]
615
615
616 # Ensure no changeset has a synthetic changeset as a parent.
616 # Ensure no changeset has a synthetic changeset as a parent.
617 while p.synthetic:
617 while p.synthetic:
618 assert len(p.parents) <= 1, \
618 assert len(p.parents) <= 1, \
619 _('synthetic changeset cannot have multiple parents')
619 _('synthetic changeset cannot have multiple parents')
620 if p.parents:
620 if p.parents:
621 p = p.parents[0]
621 p = p.parents[0]
622 else:
622 else:
623 p = None
623 p = None
624 break
624 break
625
625
626 if p is not None:
626 if p is not None:
627 c.parents.append(p)
627 c.parents.append(p)
628
628
629 if c.mergepoint:
629 if c.mergepoint:
630 if c.mergepoint == 'HEAD':
630 if c.mergepoint == 'HEAD':
631 c.mergepoint = None
631 c.mergepoint = None
632 c.parents.append(changesets[branches[c.mergepoint]])
632 c.parents.append(changesets[branches[c.mergepoint]])
633
633
634 if mergefrom:
634 if mergefrom:
635 m = mergefrom.search(c.comment)
635 m = mergefrom.search(c.comment)
636 if m:
636 if m:
637 m = m.group(1)
637 m = m.group(1)
638 if m == 'HEAD':
638 if m == 'HEAD':
639 m = None
639 m = None
640 try:
640 candidate = changesets[branches[m]]
641 candidate = changesets[branches[m]]
642 except KeyError:
643 ui.warn(_("warning: CVS commit message references "
644 "non-existent branch %r:\n%s\n")
645 % (m, c.comment))
641 if m in branches and c.branch != m and not candidate.synthetic:
646 if m in branches and c.branch != m and not candidate.synthetic:
642 c.parents.append(candidate)
647 c.parents.append(candidate)
643
648
644 if mergeto:
649 if mergeto:
645 m = mergeto.search(c.comment)
650 m = mergeto.search(c.comment)
646 if m:
651 if m:
647 try:
652 try:
648 m = m.group(1)
653 m = m.group(1)
649 if m == 'HEAD':
654 if m == 'HEAD':
650 m = None
655 m = None
651 except:
656 except:
652 m = None # if no group found then merge to HEAD
657 m = None # if no group found then merge to HEAD
653 if m in branches and c.branch != m:
658 if m in branches and c.branch != m:
654 # insert empty changeset for merge
659 # insert empty changeset for merge
655 cc = changeset(author=c.author, branch=m, date=c.date,
660 cc = changeset(author=c.author, branch=m, date=c.date,
656 comment='convert-repo: CVS merge from branch %s' % c.branch,
661 comment='convert-repo: CVS merge from branch %s' % c.branch,
657 entries=[], tags=[], parents=[changesets[branches[m]], c])
662 entries=[], tags=[], parents=[changesets[branches[m]], c])
658 changesets.insert(i + 1, cc)
663 changesets.insert(i + 1, cc)
659 branches[m] = i + 1
664 branches[m] = i + 1
660
665
661 # adjust our loop counters now we have inserted a new entry
666 # adjust our loop counters now we have inserted a new entry
662 n += 1
667 n += 1
663 i += 2
668 i += 2
664 continue
669 continue
665
670
666 branches[c.branch] = i
671 branches[c.branch] = i
667 i += 1
672 i += 1
668
673
669 # Drop synthetic changesets (safe now that we have ensured no other
674 # Drop synthetic changesets (safe now that we have ensured no other
670 # changesets can have them as parents).
675 # changesets can have them as parents).
671 i = 0
676 i = 0
672 while i < len(changesets):
677 while i < len(changesets):
673 if changesets[i].synthetic:
678 if changesets[i].synthetic:
674 del changesets[i]
679 del changesets[i]
675 else:
680 else:
676 i += 1
681 i += 1
677
682
678 # Number changesets
683 # Number changesets
679
684
680 for i, c in enumerate(changesets):
685 for i, c in enumerate(changesets):
681 c.id = i + 1
686 c.id = i + 1
682
687
683 ui.status(_('%d changeset entries\n') % len(changesets))
688 ui.status(_('%d changeset entries\n') % len(changesets))
684
689
685 return changesets
690 return changesets
686
691
687
692
688 def debugcvsps(ui, *args, **opts):
693 def debugcvsps(ui, *args, **opts):
689 '''Read CVS rlog for current directory or named path in repository, and
694 '''Read CVS rlog for current directory or named path in repository, and
690 convert the log to changesets based on matching commit log entries and dates.'''
695 convert the log to changesets based on matching commit log entries and dates.'''
691
696
692 if opts["new_cache"]:
697 if opts["new_cache"]:
693 cache = "write"
698 cache = "write"
694 elif opts["update_cache"]:
699 elif opts["update_cache"]:
695 cache = "update"
700 cache = "update"
696 else:
701 else:
697 cache = None
702 cache = None
698
703
699 revisions = opts["revisions"]
704 revisions = opts["revisions"]
700
705
701 try:
706 try:
702 if args:
707 if args:
703 log = []
708 log = []
704 for d in args:
709 for d in args:
705 log += createlog(ui, d, root=opts["root"], cache=cache)
710 log += createlog(ui, d, root=opts["root"], cache=cache)
706 else:
711 else:
707 log = createlog(ui, root=opts["root"], cache=cache)
712 log = createlog(ui, root=opts["root"], cache=cache)
708 except logerror, e:
713 except logerror, e:
709 ui.write("%r\n"%e)
714 ui.write("%r\n"%e)
710 return
715 return
711
716
712 changesets = createchangeset(ui, log, opts["fuzz"])
717 changesets = createchangeset(ui, log, opts["fuzz"])
713 del log
718 del log
714
719
715 # Print changesets (optionally filtered)
720 # Print changesets (optionally filtered)
716
721
717 off = len(revisions)
722 off = len(revisions)
718 branches = {} # latest version number in each branch
723 branches = {} # latest version number in each branch
719 ancestors = {} # parent branch
724 ancestors = {} # parent branch
720 for cs in changesets:
725 for cs in changesets:
721
726
722 if opts["ancestors"]:
727 if opts["ancestors"]:
723 if cs.branch not in branches and cs.parents and cs.parents[0].id:
728 if cs.branch not in branches and cs.parents and cs.parents[0].id:
724 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
729 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
725 branches[cs.branch] = cs.id
730 branches[cs.branch] = cs.id
726
731
727 # limit by branches
732 # limit by branches
728 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
733 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
729 continue
734 continue
730
735
731 if not off:
736 if not off:
732 # Note: trailing spaces on several lines here are needed to have
737 # Note: trailing spaces on several lines here are needed to have
733 # bug-for-bug compatibility with cvsps.
738 # bug-for-bug compatibility with cvsps.
734 ui.write('---------------------\n')
739 ui.write('---------------------\n')
735 ui.write('PatchSet %d \n' % cs.id)
740 ui.write('PatchSet %d \n' % cs.id)
736 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
741 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
737 ui.write('Author: %s\n' % cs.author)
742 ui.write('Author: %s\n' % cs.author)
738 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
743 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
739 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
744 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
740 ','.join(cs.tags) or '(none)'))
745 ','.join(cs.tags) or '(none)'))
741 if opts["parents"] and cs.parents:
746 if opts["parents"] and cs.parents:
742 if len(cs.parents)>1:
747 if len(cs.parents)>1:
743 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
748 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
744 else:
749 else:
745 ui.write('Parent: %d\n' % cs.parents[0].id)
750 ui.write('Parent: %d\n' % cs.parents[0].id)
746
751
747 if opts["ancestors"]:
752 if opts["ancestors"]:
748 b = cs.branch
753 b = cs.branch
749 r = []
754 r = []
750 while b:
755 while b:
751 b, c = ancestors[b]
756 b, c = ancestors[b]
752 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
757 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
753 if r:
758 if r:
754 ui.write('Ancestors: %s\n' % (','.join(r)))
759 ui.write('Ancestors: %s\n' % (','.join(r)))
755
760
756 ui.write('Log:\n')
761 ui.write('Log:\n')
757 ui.write('%s\n\n' % cs.comment)
762 ui.write('%s\n\n' % cs.comment)
758 ui.write('Members: \n')
763 ui.write('Members: \n')
759 for f in cs.entries:
764 for f in cs.entries:
760 fn = f.file
765 fn = f.file
761 if fn.startswith(opts["prefix"]):
766 if fn.startswith(opts["prefix"]):
762 fn = fn[len(opts["prefix"]):]
767 fn = fn[len(opts["prefix"]):]
763 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
768 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
764 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
769 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
765 ui.write('\n')
770 ui.write('\n')
766
771
767 # have we seen the start tag?
772 # have we seen the start tag?
768 if revisions and off:
773 if revisions and off:
769 if revisions[0] == str(cs.id) or \
774 if revisions[0] == str(cs.id) or \
770 revisions[0] in cs.tags:
775 revisions[0] in cs.tags:
771 off = False
776 off = False
772
777
773 # see if we reached the end tag
778 # see if we reached the end tag
774 if len(revisions)>1 and not off:
779 if len(revisions)>1 and not off:
775 if revisions[1] == str(cs.id) or \
780 if revisions[1] == str(cs.id) or \
776 revisions[1] in cs.tags:
781 revisions[1] in cs.tags:
777 break
782 break
@@ -1,108 +1,116 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 # Test config convert.cvsps.mergefrom config setting.
3 # Test config convert.cvsps.mergefrom config setting.
4 # (Should test similar mergeto feature, but I don't understand it yet.)
4 # (Should test similar mergeto feature, but I don't understand it yet.)
5 # Requires builtin cvsps.
5 # Requires builtin cvsps.
6
6
7 "$TESTDIR/hghave" cvs || exit 80
7 "$TESTDIR/hghave" cvs || exit 80
8
8
9 export CVSROOT=`pwd`/cvsrepo
9 export CVSROOT=`pwd`/cvsrepo
10
10
11 # XXX copied from test-convert-cvs-synthetic
11 # XXX copied from test-convert-cvs-synthetic
12 cvscall()
12 cvscall()
13 {
13 {
14 echo cvs -f "$@"
14 echo cvs -f "$@"
15 cvs -f "$@"
15 cvs -f "$@"
16 }
16 }
17
17
18 # output of 'cvs ci' varies unpredictably, so just discard it
18 # output of 'cvs ci' varies unpredictably, so just discard it
19 # XXX copied from test-convert-cvs-synthetic
19 # XXX copied from test-convert-cvs-synthetic
20 cvsci()
20 cvsci()
21 {
21 {
22 echo cvs -f ci "$@"
22 echo cvs -f ci "$@"
23 cvs -f ci "$@" >/dev/null 2>&1
23 cvs -f ci "$@" >/dev/null 2>&1
24 }
24 }
25
25
26 # XXX copied from test-convert-cvs-synthetic
26 # XXX copied from test-convert-cvs-synthetic
27 filterpath()
27 filterpath()
28 {
28 {
29 eval "$@" | sed "s:$CVSROOT:*REPO*:g"
29 eval "$@" | sed "s:$CVSROOT:*REPO*:g"
30 }
30 }
31
31
32 echo "[extensions]" >> $HGRCPATH
32 echo "[extensions]" >> $HGRCPATH
33 echo "convert = " >> $HGRCPATH
33 echo "convert = " >> $HGRCPATH
34 echo "graphlog = " >> $HGRCPATH
34 echo "[convert]" >> $HGRCPATH
35 echo "[convert]" >> $HGRCPATH
35 echo "cvsps=builtin" >> $HGRCPATH
36 echo "cvsps=builtin" >> $HGRCPATH
37 echo "cvsps.cache=0" >> $HGRCPATH
36 echo "cvsps.mergefrom=\[MERGE from (\S+)\]" >> $HGRCPATH
38 echo "cvsps.mergefrom=\[MERGE from (\S+)\]" >> $HGRCPATH
37
39
38 echo % create cvs repository with one project
40 echo % create cvs repository with one project
39 mkdir cvsrepo
41 mkdir cvsrepo
40
42
41 filterpath cvscall -q -d "$CVSROOT" init
43 filterpath cvscall -q -d "$CVSROOT" init
42 mkdir cvsrepo/proj
44 mkdir cvsrepo/proj
43
45
44 echo % populate cvs repository
46 echo % populate cvs repository
45 cvscall -Q co proj
47 cvscall -Q co proj
46 cd proj
48 cd proj
47 touch file1
49 touch file1
48 cvscall -Q add file1
50 cvscall -Q add file1
49 cvsci -m"add file1 on trunk"
51 cvsci -m"add file1 on trunk"
50
52
51 echo % create two release branches
53 echo % create two release branches
52 cvscall -q tag -b v1_0
54 cvscall -q tag -b v1_0
53 cvscall -q tag -b v1_1
55 cvscall -q tag -b v1_1
54
56
55 echo % modify file1 on branch v1_0
57 echo % modify file1 on branch v1_0
56 filterpath cvscall -Q update -rv1_0
58 filterpath cvscall -Q update -rv1_0
57 echo "change" >> file1
59 echo "change" >> file1
58 cvsci -m"add text"
60 cvsci -m"add text"
59
61
60 echo % make unrelated change on v1_1
62 echo % make unrelated change on v1_1
61 cvscall -Q update -rv1_1
63 cvscall -Q update -rv1_1
62 touch unrelated
64 touch unrelated
63 cvscall -Q add unrelated
65 cvscall -Q add unrelated
64 cvsci -m"unrelated change"
66 cvsci -m"unrelated change"
65
67
66 echo % merge file1 to v1_1
68 echo % merge file1 to v1_1
67 filterpath cvscall -Q update -jv1_0
69 filterpath cvscall -Q update -jv1_0
68 cvsci -m"add text [MERGE from v1_0]"
70 cvsci -m"add text [MERGE from v1_0]"
69
71
70 echo % merge change to trunk
72 echo % merge change to trunk
71 cvscall -Q update -A
73 cvscall -Q update -A
72 filterpath cvscall -Q update -jv1_1
74 filterpath cvscall -Q update -jv1_1
73 cvsci -m"add text [MERGE from v1_1]"
75 cvsci -m"add text [MERGE from v1_1]"
74
76
75 echo % non-merged change on trunk
77 echo % non-merged change on trunk
76 echo "foo" > file2
78 echo "foo" > file2
77 cvscall -Q add file2
79 cvscall -Q add file2
78 cvsci -m"add file2 on trunk" file2
80 cvsci -m"add file2 on trunk" file2
79
81
80 # this will create rev 1.3
82 # this will create rev 1.3
81 echo % change on trunk to backport
83 echo % change on trunk to backport
82 echo "backport me" >> file1
84 echo "backport me" >> file1
83 cvsci -m"add other text" file1
85 cvsci -m"add other text" file1
84 cvscall log file1 | sed -n '/^date: / d; /^revision /,$ p;'
86 cvscall log file1 | sed -n '/^date: / d; /^revision /,$ p;'
85
87
86 # XXX how many ways are there to spell "trunk" with CVS?
88 # XXX how many ways are there to spell "trunk" with CVS?
87 echo % backport trunk change to v1_1
89 echo % backport trunk change to v1_1
88 cvscall -Q update -rv1_1
90 cvscall -Q update -rv1_1
89 filterpath cvscall -Q update -j1.2 -j1.3 file1
91 filterpath cvscall -Q update -j1.2 -j1.3 file1
90 cvsci -m"add other text [MERGE from HEAD]" file1
92 cvsci -m"add other text [MERGE from HEAD]" file1
91
93
92 set -e
94 set -e
95 echo "% fix bug on v1_1, merge to trunk with error"
96 cvscall -Q update -rv1_1
97 echo "merge forward" >> file1
98 cvscall -Q tag unmerged
99 cvsci -m"fix file1"
100 cvscall -Q update -A
101 filterpath cvscall -Q update -junmerged -jv1_1
102 # note the typo in the commit log message
103 cvsci -m"fix file1 [MERGE from v1-1]"
104 cvs -Q tag -d unmerged
105
106 set -e
93 echo % convert to hg
107 echo % convert to hg
94 cd ..
108 cd ..
95 filterpath hg convert proj proj.hg
109 filterpath hg convert proj proj.hg
96
110
97 echo % complete log
111 echo % complete log
98 template="{rev}: '{branches}' {desc}\n"
112 template="{rev}: '{branches}' {desc}\n"
99 hg -R proj.hg log --template="$template"
113 hg -R proj.hg log --template="$template"
100
114
101 echo % parents of rev 3
115 echo % graphical log
102 hg -R proj.hg parents --template="$template" -r3
116 hg -R proj.hg glog --template="$template"
103 echo % parents of rev 4
104 hg -R proj.hg parents --template="$template" -r4
105 echo % parents of rev 5
106 hg -R proj.hg parents --template="$template" -r5
107 echo % parents of rev 7
108 hg -R proj.hg parents --template="$template" -r7
@@ -1,101 +1,128 b''
1 % create cvs repository with one project
1 % create cvs repository with one project
2 cvs -f -q -d *REPO* init
2 cvs -f -q -d *REPO* init
3 % populate cvs repository
3 % populate cvs repository
4 cvs -f -Q co proj
4 cvs -f -Q co proj
5 cvs -f -Q add file1
5 cvs -f -Q add file1
6 cvs -f ci -madd file1 on trunk
6 cvs -f ci -madd file1 on trunk
7 % create two release branches
7 % create two release branches
8 cvs -f -q tag -b v1_0
8 cvs -f -q tag -b v1_0
9 T file1
9 T file1
10 cvs -f -q tag -b v1_1
10 cvs -f -q tag -b v1_1
11 T file1
11 T file1
12 % modify file1 on branch v1_0
12 % modify file1 on branch v1_0
13 cvs -f -Q update -rv1_0
13 cvs -f -Q update -rv1_0
14 cvs -f ci -madd text
14 cvs -f ci -madd text
15 % make unrelated change on v1_1
15 % make unrelated change on v1_1
16 cvs -f -Q update -rv1_1
16 cvs -f -Q update -rv1_1
17 cvs -f -Q add unrelated
17 cvs -f -Q add unrelated
18 cvs -f ci -munrelated change
18 cvs -f ci -munrelated change
19 % merge file1 to v1_1
19 % merge file1 to v1_1
20 cvs -f -Q update -jv1_0
20 cvs -f -Q update -jv1_0
21 RCS file: *REPO*/proj/file1,v
21 RCS file: *REPO*/proj/file1,v
22 retrieving revision 1.1
22 retrieving revision 1.1
23 retrieving revision 1.1.2.1
23 retrieving revision 1.1.2.1
24 Merging differences between 1.1 and 1.1.2.1 into file1
24 Merging differences between 1.1 and 1.1.2.1 into file1
25 cvs -f ci -madd text [MERGE from v1_0]
25 cvs -f ci -madd text [MERGE from v1_0]
26 % merge change to trunk
26 % merge change to trunk
27 cvs -f -Q update -A
27 cvs -f -Q update -A
28 cvs -f -Q update -jv1_1
28 cvs -f -Q update -jv1_1
29 RCS file: *REPO*/proj/file1,v
29 RCS file: *REPO*/proj/file1,v
30 retrieving revision 1.1
30 retrieving revision 1.1
31 retrieving revision 1.1.4.1
31 retrieving revision 1.1.4.1
32 Merging differences between 1.1 and 1.1.4.1 into file1
32 Merging differences between 1.1 and 1.1.4.1 into file1
33 cvs -f ci -madd text [MERGE from v1_1]
33 cvs -f ci -madd text [MERGE from v1_1]
34 % non-merged change on trunk
34 % non-merged change on trunk
35 cvs -f -Q add file2
35 cvs -f -Q add file2
36 cvs -f ci -madd file2 on trunk file2
36 cvs -f ci -madd file2 on trunk file2
37 % change on trunk to backport
37 % change on trunk to backport
38 cvs -f ci -madd other text file1
38 cvs -f ci -madd other text file1
39 revision 1.3
39 revision 1.3
40 add other text
40 add other text
41 ----------------------------
41 ----------------------------
42 revision 1.2
42 revision 1.2
43 add text [MERGE from v1_1]
43 add text [MERGE from v1_1]
44 ----------------------------
44 ----------------------------
45 revision 1.1
45 revision 1.1
46 branches: 1.1.2; 1.1.4;
46 branches: 1.1.2; 1.1.4;
47 add file1 on trunk
47 add file1 on trunk
48 ----------------------------
48 ----------------------------
49 revision 1.1.4.1
49 revision 1.1.4.1
50 add text [MERGE from v1_0]
50 add text [MERGE from v1_0]
51 ----------------------------
51 ----------------------------
52 revision 1.1.2.1
52 revision 1.1.2.1
53 add text
53 add text
54 =============================================================================
54 =============================================================================
55 % backport trunk change to v1_1
55 % backport trunk change to v1_1
56 cvs -f -Q update -rv1_1
56 cvs -f -Q update -rv1_1
57 cvs -f -Q update -j1.2 -j1.3 file1
57 cvs -f -Q update -j1.2 -j1.3 file1
58 RCS file: *REPO*/proj/file1,v
58 RCS file: *REPO*/proj/file1,v
59 retrieving revision 1.2
59 retrieving revision 1.2
60 retrieving revision 1.3
60 retrieving revision 1.3
61 Merging differences between 1.2 and 1.3 into file1
61 Merging differences between 1.2 and 1.3 into file1
62 cvs -f ci -madd other text [MERGE from HEAD] file1
62 cvs -f ci -madd other text [MERGE from HEAD] file1
63 % fix bug on v1_1, merge to trunk with error
64 cvs -f -Q update -rv1_1
65 cvs -f -Q tag unmerged
66 cvs -f ci -mfix file1
67 cvs -f -Q update -A
68 cvs -f -Q update -junmerged -jv1_1
69 RCS file: *REPO*/proj/file1,v
70 retrieving revision 1.1.4.2
71 retrieving revision 1.1.4.3
72 Merging differences between 1.1.4.2 and 1.1.4.3 into file1
73 cvs -f ci -mfix file1 [MERGE from v1-1]
63 % convert to hg
74 % convert to hg
75 warning: CVS commit message references non-existent branch 'v1-1':
76 fix file1 [MERGE from v1-1]
64 initializing destination proj.hg repository
77 initializing destination proj.hg repository
65 connecting to *REPO*
78 connecting to *REPO*
66 scanning source...
79 scanning source...
67 using builtin cvsps
80 using builtin cvsps
68 collecting CVS rlog
81 collecting CVS rlog
69 10 log entries
82 12 log entries
70 creating changesets
83 creating changesets
71 8 changeset entries
84 10 changeset entries
72 sorting...
85 sorting...
73 converting...
86 converting...
74 7 add file1 on trunk
87 9 add file1 on trunk
75 6 add text
88 8 add text
76 5 unrelated change
89 7 unrelated change
77 4 add text [MERGE from v1_0]
90 6 add text [MERGE from v1_0]
78 3 add text [MERGE from v1_1]
91 5 add text [MERGE from v1_1]
79 2 add file2 on trunk
92 4 add file2 on trunk
80 1 add other text
93 3 add other text
81 0 add other text [MERGE from HEAD]
94 2 add other text [MERGE from HEAD]
95 1 fix file1
96 0 fix file1 [MERGE from v1-1]
82 % complete log
97 % complete log
98 9: '' fix file1 [MERGE from v1-1]
99 8: 'v1_1' fix file1
83 7: 'v1_1' add other text [MERGE from HEAD]
100 7: 'v1_1' add other text [MERGE from HEAD]
84 6: '' add other text
101 6: '' add other text
85 5: '' add file2 on trunk
102 5: '' add file2 on trunk
86 4: '' add text [MERGE from v1_1]
103 4: '' add text [MERGE from v1_1]
87 3: 'v1_1' add text [MERGE from v1_0]
104 3: 'v1_1' add text [MERGE from v1_0]
88 2: 'v1_1' unrelated change
105 2: 'v1_1' unrelated change
89 1: 'v1_0' add text
106 1: 'v1_0' add text
90 0: '' add file1 on trunk
107 0: '' add file1 on trunk
91 % parents of rev 3
108 % graphical log
92 2: 'v1_1' unrelated change
109 o 9: '' fix file1 [MERGE from v1-1]
93 1: 'v1_0' add text
110 |
94 % parents of rev 4
111 | o 8: 'v1_1' fix file1
95 0: '' add file1 on trunk
112 | |
96 3: 'v1_1' add text [MERGE from v1_0]
113 | o 7: 'v1_1' add other text [MERGE from HEAD]
97 % parents of rev 5
114 |/|
98 4: '' add text [MERGE from v1_1]
115 o | 6: '' add other text
99 % parents of rev 7
116 | |
100 3: 'v1_1' add text [MERGE from v1_0]
117 o | 5: '' add file2 on trunk
101 6: '' add other text
118 | |
119 o | 4: '' add text [MERGE from v1_1]
120 |\|
121 | o 3: 'v1_1' add text [MERGE from v1_0]
122 | |\
123 +---o 2: 'v1_1' unrelated change
124 | |
125 | o 1: 'v1_0' add text
126 |/
127 o 0: '' add file1 on trunk
128
General Comments 0
You need to be logged in to leave comments. Login now