##// END OF EJS Templates
cvsps: fix traceback involving 'synthetic'...
Benoit Boissinot -
r10701:35893dcf stable
parent child Browse files
Show More
@@ -1,845 +1,845 b''
1 # Mercurial built-in replacement for cvsps.
1 # Mercurial built-in replacement for cvsps.
2 #
2 #
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 import re
9 import re
10 import cPickle as pickle
10 import cPickle as pickle
11 from mercurial import util
11 from mercurial import util
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import hook
13 from mercurial import hook
14
14
15 class logentry(object):
15 class logentry(object):
16 '''Class logentry has the following attributes:
16 '''Class logentry has the following attributes:
17 .author - author name as CVS knows it
17 .author - author name as CVS knows it
18 .branch - name of branch this revision is on
18 .branch - name of branch this revision is on
19 .branches - revision tuple of branches starting at this revision
19 .branches - revision tuple of branches starting at this revision
20 .comment - commit message
20 .comment - commit message
21 .date - the commit date as a (time, tz) tuple
21 .date - the commit date as a (time, tz) tuple
22 .dead - true if file revision is dead
22 .dead - true if file revision is dead
23 .file - Name of file
23 .file - Name of file
24 .lines - a tuple (+lines, -lines) or None
24 .lines - a tuple (+lines, -lines) or None
25 .parent - Previous revision of this entry
25 .parent - Previous revision of this entry
26 .rcs - name of file as returned from CVS
26 .rcs - name of file as returned from CVS
27 .revision - revision number as tuple
27 .revision - revision number as tuple
28 .tags - list of tags on the file
28 .tags - list of tags on the file
29 .synthetic - is this a synthetic "file ... added on ..." revision?
29 .synthetic - is this a synthetic "file ... added on ..." revision?
30 .mergepoint- the branch that has been merged from
30 .mergepoint- the branch that has been merged from
31 (if present in rlog output)
31 (if present in rlog output)
32 .branchpoints- the branches that start at the current entry
32 .branchpoints- the branches that start at the current entry
33 '''
33 '''
34 def __init__(self, **entries):
34 def __init__(self, **entries):
35 self.synthetic = False
35 self.__dict__.update(entries)
36 self.__dict__.update(entries)
36
37
37 def __repr__(self):
38 def __repr__(self):
38 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
39 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
39 id(self),
40 id(self),
40 self.file,
41 self.file,
41 ".".join(map(str, self.revision)))
42 ".".join(map(str, self.revision)))
42
43
43 class logerror(Exception):
44 class logerror(Exception):
44 pass
45 pass
45
46
46 def getrepopath(cvspath):
47 def getrepopath(cvspath):
47 """Return the repository path from a CVS path.
48 """Return the repository path from a CVS path.
48
49
49 >>> getrepopath('/foo/bar')
50 >>> getrepopath('/foo/bar')
50 '/foo/bar'
51 '/foo/bar'
51 >>> getrepopath('c:/foo/bar')
52 >>> getrepopath('c:/foo/bar')
52 'c:/foo/bar'
53 'c:/foo/bar'
53 >>> getrepopath(':pserver:10/foo/bar')
54 >>> getrepopath(':pserver:10/foo/bar')
54 '/foo/bar'
55 '/foo/bar'
55 >>> getrepopath(':pserver:10c:/foo/bar')
56 >>> getrepopath(':pserver:10c:/foo/bar')
56 '/foo/bar'
57 '/foo/bar'
57 >>> getrepopath(':pserver:/foo/bar')
58 >>> getrepopath(':pserver:/foo/bar')
58 '/foo/bar'
59 '/foo/bar'
59 >>> getrepopath(':pserver:c:/foo/bar')
60 >>> getrepopath(':pserver:c:/foo/bar')
60 'c:/foo/bar'
61 'c:/foo/bar'
61 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
62 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
62 '/foo/bar'
63 '/foo/bar'
63 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
64 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
64 'c:/foo/bar'
65 'c:/foo/bar'
65 """
66 """
66 # According to CVS manual, CVS paths are expressed like:
67 # According to CVS manual, CVS paths are expressed like:
67 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
68 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
68 #
69 #
69 # Unfortunately, Windows absolute paths start with a drive letter
70 # Unfortunately, Windows absolute paths start with a drive letter
70 # like 'c:' making it harder to parse. Here we assume that drive
71 # like 'c:' making it harder to parse. Here we assume that drive
71 # letters are only one character long and any CVS component before
72 # letters are only one character long and any CVS component before
72 # the repository path is at least 2 characters long, and use this
73 # the repository path is at least 2 characters long, and use this
73 # to disambiguate.
74 # to disambiguate.
74 parts = cvspath.split(':')
75 parts = cvspath.split(':')
75 if len(parts) == 1:
76 if len(parts) == 1:
76 return parts[0]
77 return parts[0]
77 # Here there is an ambiguous case if we have a port number
78 # Here there is an ambiguous case if we have a port number
78 # immediately followed by a Windows driver letter. We assume this
79 # immediately followed by a Windows driver letter. We assume this
79 # never happens and decide it must be CVS path component,
80 # never happens and decide it must be CVS path component,
80 # therefore ignoring it.
81 # therefore ignoring it.
81 if len(parts[-2]) > 1:
82 if len(parts[-2]) > 1:
82 return parts[-1].lstrip('0123456789')
83 return parts[-1].lstrip('0123456789')
83 return parts[-2] + ':' + parts[-1]
84 return parts[-2] + ':' + parts[-1]
84
85
85 def createlog(ui, directory=None, root="", rlog=True, cache=None):
86 def createlog(ui, directory=None, root="", rlog=True, cache=None):
86 '''Collect the CVS rlog'''
87 '''Collect the CVS rlog'''
87
88
88 # Because we store many duplicate commit log messages, reusing strings
89 # Because we store many duplicate commit log messages, reusing strings
89 # saves a lot of memory and pickle storage space.
90 # saves a lot of memory and pickle storage space.
90 _scache = {}
91 _scache = {}
91 def scache(s):
92 def scache(s):
92 "return a shared version of a string"
93 "return a shared version of a string"
93 return _scache.setdefault(s, s)
94 return _scache.setdefault(s, s)
94
95
95 ui.status(_('collecting CVS rlog\n'))
96 ui.status(_('collecting CVS rlog\n'))
96
97
97 log = [] # list of logentry objects containing the CVS state
98 log = [] # list of logentry objects containing the CVS state
98
99
99 # patterns to match in CVS (r)log output, by state of use
100 # patterns to match in CVS (r)log output, by state of use
100 re_00 = re.compile('RCS file: (.+)$')
101 re_00 = re.compile('RCS file: (.+)$')
101 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
102 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
102 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
103 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
103 re_03 = re.compile("(Cannot access.+CVSROOT)|"
104 re_03 = re.compile("(Cannot access.+CVSROOT)|"
104 "(can't create temporary directory.+)$")
105 "(can't create temporary directory.+)$")
105 re_10 = re.compile('Working file: (.+)$')
106 re_10 = re.compile('Working file: (.+)$')
106 re_20 = re.compile('symbolic names:')
107 re_20 = re.compile('symbolic names:')
107 re_30 = re.compile('\t(.+): ([\\d.]+)$')
108 re_30 = re.compile('\t(.+): ([\\d.]+)$')
108 re_31 = re.compile('----------------------------$')
109 re_31 = re.compile('----------------------------$')
109 re_32 = re.compile('======================================='
110 re_32 = re.compile('======================================='
110 '======================================$')
111 '======================================$')
111 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
112 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
112 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
113 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
113 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
114 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
114 r'(.*mergepoint:\s+([^;]+);)?')
115 r'(.*mergepoint:\s+([^;]+);)?')
115 re_70 = re.compile('branches: (.+);$')
116 re_70 = re.compile('branches: (.+);$')
116
117
117 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
118
119
119 prefix = '' # leading path to strip of what we get from CVS
120 prefix = '' # leading path to strip of what we get from CVS
120
121
121 if directory is None:
122 if directory is None:
122 # Current working directory
123 # Current working directory
123
124
124 # Get the real directory in the repository
125 # Get the real directory in the repository
125 try:
126 try:
126 prefix = open(os.path.join('CVS','Repository')).read().strip()
127 prefix = open(os.path.join('CVS','Repository')).read().strip()
127 directory = prefix
128 directory = prefix
128 if prefix == ".":
129 if prefix == ".":
129 prefix = ""
130 prefix = ""
130 except IOError:
131 except IOError:
131 raise logerror('Not a CVS sandbox')
132 raise logerror('Not a CVS sandbox')
132
133
133 if prefix and not prefix.endswith(os.sep):
134 if prefix and not prefix.endswith(os.sep):
134 prefix += os.sep
135 prefix += os.sep
135
136
136 # Use the Root file in the sandbox, if it exists
137 # Use the Root file in the sandbox, if it exists
137 try:
138 try:
138 root = open(os.path.join('CVS','Root')).read().strip()
139 root = open(os.path.join('CVS','Root')).read().strip()
139 except IOError:
140 except IOError:
140 pass
141 pass
141
142
142 if not root:
143 if not root:
143 root = os.environ.get('CVSROOT', '')
144 root = os.environ.get('CVSROOT', '')
144
145
145 # read log cache if one exists
146 # read log cache if one exists
146 oldlog = []
147 oldlog = []
147 date = None
148 date = None
148
149
149 if cache:
150 if cache:
150 cachedir = os.path.expanduser('~/.hg.cvsps')
151 cachedir = os.path.expanduser('~/.hg.cvsps')
151 if not os.path.exists(cachedir):
152 if not os.path.exists(cachedir):
152 os.mkdir(cachedir)
153 os.mkdir(cachedir)
153
154
154 # The cvsps cache pickle needs a uniquified name, based on the
155 # The cvsps cache pickle needs a uniquified name, based on the
155 # repository location. The address may have all sort of nasties
156 # repository location. The address may have all sort of nasties
156 # in it, slashes, colons and such. So here we take just the
157 # in it, slashes, colons and such. So here we take just the
157 # alphanumerics, concatenated in a way that does not mix up the
158 # alphanumerics, concatenated in a way that does not mix up the
158 # various components, so that
159 # various components, so that
159 # :pserver:user@server:/path
160 # :pserver:user@server:/path
160 # and
161 # and
161 # /pserver/user/server/path
162 # /pserver/user/server/path
162 # are mapped to different cache file names.
163 # are mapped to different cache file names.
163 cachefile = root.split(":") + [directory, "cache"]
164 cachefile = root.split(":") + [directory, "cache"]
164 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
165 cachefile = os.path.join(cachedir,
166 cachefile = os.path.join(cachedir,
166 '.'.join([s for s in cachefile if s]))
167 '.'.join([s for s in cachefile if s]))
167
168
168 if cache == 'update':
169 if cache == 'update':
169 try:
170 try:
170 ui.note(_('reading cvs log cache %s\n') % cachefile)
171 ui.note(_('reading cvs log cache %s\n') % cachefile)
171 oldlog = pickle.load(open(cachefile))
172 oldlog = pickle.load(open(cachefile))
172 ui.note(_('cache has %d log entries\n') % len(oldlog))
173 ui.note(_('cache has %d log entries\n') % len(oldlog))
173 except Exception, e:
174 except Exception, e:
174 ui.note(_('error reading cache: %r\n') % e)
175 ui.note(_('error reading cache: %r\n') % e)
175
176
176 if oldlog:
177 if oldlog:
177 date = oldlog[-1].date # last commit date as a (time,tz) tuple
178 date = oldlog[-1].date # last commit date as a (time,tz) tuple
178 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
179 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
179
180
180 # build the CVS commandline
181 # build the CVS commandline
181 cmd = ['cvs', '-q']
182 cmd = ['cvs', '-q']
182 if root:
183 if root:
183 cmd.append('-d%s' % root)
184 cmd.append('-d%s' % root)
184 p = util.normpath(getrepopath(root))
185 p = util.normpath(getrepopath(root))
185 if not p.endswith('/'):
186 if not p.endswith('/'):
186 p += '/'
187 p += '/'
187 if prefix:
188 if prefix:
188 # looks like normpath replaces "" by "."
189 # looks like normpath replaces "" by "."
189 prefix = p + util.normpath(prefix)
190 prefix = p + util.normpath(prefix)
190 else:
191 else:
191 prefix = p
192 prefix = p
192 cmd.append(['log', 'rlog'][rlog])
193 cmd.append(['log', 'rlog'][rlog])
193 if date:
194 if date:
194 # no space between option and date string
195 # no space between option and date string
195 cmd.append('-d>%s' % date)
196 cmd.append('-d>%s' % date)
196 cmd.append(directory)
197 cmd.append(directory)
197
198
198 # state machine begins here
199 # state machine begins here
199 tags = {} # dictionary of revisions on current file with their tags
200 tags = {} # dictionary of revisions on current file with their tags
200 branchmap = {} # mapping between branch names and revision numbers
201 branchmap = {} # mapping between branch names and revision numbers
201 state = 0
202 state = 0
202 store = False # set when a new record can be appended
203 store = False # set when a new record can be appended
203
204
204 cmd = [util.shellquote(arg) for arg in cmd]
205 cmd = [util.shellquote(arg) for arg in cmd]
205 ui.note(_("running %s\n") % (' '.join(cmd)))
206 ui.note(_("running %s\n") % (' '.join(cmd)))
206 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
207 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
207
208
208 pfp = util.popen(' '.join(cmd))
209 pfp = util.popen(' '.join(cmd))
209 peek = pfp.readline()
210 peek = pfp.readline()
210 while True:
211 while True:
211 line = peek
212 line = peek
212 if line == '':
213 if line == '':
213 break
214 break
214 peek = pfp.readline()
215 peek = pfp.readline()
215 if line.endswith('\n'):
216 if line.endswith('\n'):
216 line = line[:-1]
217 line = line[:-1]
217 #ui.debug('state=%d line=%r\n' % (state, line))
218 #ui.debug('state=%d line=%r\n' % (state, line))
218
219
219 if state == 0:
220 if state == 0:
220 # initial state, consume input until we see 'RCS file'
221 # initial state, consume input until we see 'RCS file'
221 match = re_00.match(line)
222 match = re_00.match(line)
222 if match:
223 if match:
223 rcs = match.group(1)
224 rcs = match.group(1)
224 tags = {}
225 tags = {}
225 if rlog:
226 if rlog:
226 filename = util.normpath(rcs[:-2])
227 filename = util.normpath(rcs[:-2])
227 if filename.startswith(prefix):
228 if filename.startswith(prefix):
228 filename = filename[len(prefix):]
229 filename = filename[len(prefix):]
229 if filename.startswith('/'):
230 if filename.startswith('/'):
230 filename = filename[1:]
231 filename = filename[1:]
231 if filename.startswith('Attic/'):
232 if filename.startswith('Attic/'):
232 filename = filename[6:]
233 filename = filename[6:]
233 else:
234 else:
234 filename = filename.replace('/Attic/', '/')
235 filename = filename.replace('/Attic/', '/')
235 state = 2
236 state = 2
236 continue
237 continue
237 state = 1
238 state = 1
238 continue
239 continue
239 match = re_01.match(line)
240 match = re_01.match(line)
240 if match:
241 if match:
241 raise Exception(match.group(1))
242 raise Exception(match.group(1))
242 match = re_02.match(line)
243 match = re_02.match(line)
243 if match:
244 if match:
244 raise Exception(match.group(2))
245 raise Exception(match.group(2))
245 if re_03.match(line):
246 if re_03.match(line):
246 raise Exception(line)
247 raise Exception(line)
247
248
248 elif state == 1:
249 elif state == 1:
249 # expect 'Working file' (only when using log instead of rlog)
250 # expect 'Working file' (only when using log instead of rlog)
250 match = re_10.match(line)
251 match = re_10.match(line)
251 assert match, _('RCS file must be followed by working file')
252 assert match, _('RCS file must be followed by working file')
252 filename = util.normpath(match.group(1))
253 filename = util.normpath(match.group(1))
253 state = 2
254 state = 2
254
255
255 elif state == 2:
256 elif state == 2:
256 # expect 'symbolic names'
257 # expect 'symbolic names'
257 if re_20.match(line):
258 if re_20.match(line):
258 branchmap = {}
259 branchmap = {}
259 state = 3
260 state = 3
260
261
261 elif state == 3:
262 elif state == 3:
262 # read the symbolic names and store as tags
263 # read the symbolic names and store as tags
263 match = re_30.match(line)
264 match = re_30.match(line)
264 if match:
265 if match:
265 rev = [int(x) for x in match.group(2).split('.')]
266 rev = [int(x) for x in match.group(2).split('.')]
266
267
267 # Convert magic branch number to an odd-numbered one
268 # Convert magic branch number to an odd-numbered one
268 revn = len(rev)
269 revn = len(rev)
269 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
270 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
270 rev = rev[:-2] + rev[-1:]
271 rev = rev[:-2] + rev[-1:]
271 rev = tuple(rev)
272 rev = tuple(rev)
272
273
273 if rev not in tags:
274 if rev not in tags:
274 tags[rev] = []
275 tags[rev] = []
275 tags[rev].append(match.group(1))
276 tags[rev].append(match.group(1))
276 branchmap[match.group(1)] = match.group(2)
277 branchmap[match.group(1)] = match.group(2)
277
278
278 elif re_31.match(line):
279 elif re_31.match(line):
279 state = 5
280 state = 5
280 elif re_32.match(line):
281 elif re_32.match(line):
281 state = 0
282 state = 0
282
283
283 elif state == 4:
284 elif state == 4:
284 # expecting '------' separator before first revision
285 # expecting '------' separator before first revision
285 if re_31.match(line):
286 if re_31.match(line):
286 state = 5
287 state = 5
287 else:
288 else:
288 assert not re_32.match(line), _('must have at least '
289 assert not re_32.match(line), _('must have at least '
289 'some revisions')
290 'some revisions')
290
291
291 elif state == 5:
292 elif state == 5:
292 # expecting revision number and possibly (ignored) lock indication
293 # expecting revision number and possibly (ignored) lock indication
293 # we create the logentry here from values stored in states 0 to 4,
294 # we create the logentry here from values stored in states 0 to 4,
294 # as this state is re-entered for subsequent revisions of a file.
295 # as this state is re-entered for subsequent revisions of a file.
295 match = re_50.match(line)
296 match = re_50.match(line)
296 assert match, _('expected revision number')
297 assert match, _('expected revision number')
297 e = logentry(rcs=scache(rcs), file=scache(filename),
298 e = logentry(rcs=scache(rcs), file=scache(filename),
298 revision=tuple([int(x) for x in match.group(1).split('.')]),
299 revision=tuple([int(x) for x in match.group(1).split('.')]),
299 branches=[], parent=None,
300 branches=[], parent=None)
300 synthetic=False)
301 state = 6
301 state = 6
302
302
303 elif state == 6:
303 elif state == 6:
304 # expecting date, author, state, lines changed
304 # expecting date, author, state, lines changed
305 match = re_60.match(line)
305 match = re_60.match(line)
306 assert match, _('revision must be followed by date line')
306 assert match, _('revision must be followed by date line')
307 d = match.group(1)
307 d = match.group(1)
308 if d[2] == '/':
308 if d[2] == '/':
309 # Y2K
309 # Y2K
310 d = '19' + d
310 d = '19' + d
311
311
312 if len(d.split()) != 3:
312 if len(d.split()) != 3:
313 # cvs log dates always in GMT
313 # cvs log dates always in GMT
314 d = d + ' UTC'
314 d = d + ' UTC'
315 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
315 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
316 '%Y/%m/%d %H:%M:%S',
316 '%Y/%m/%d %H:%M:%S',
317 '%Y-%m-%d %H:%M:%S'])
317 '%Y-%m-%d %H:%M:%S'])
318 e.author = scache(match.group(2))
318 e.author = scache(match.group(2))
319 e.dead = match.group(3).lower() == 'dead'
319 e.dead = match.group(3).lower() == 'dead'
320
320
321 if match.group(5):
321 if match.group(5):
322 if match.group(6):
322 if match.group(6):
323 e.lines = (int(match.group(5)), int(match.group(6)))
323 e.lines = (int(match.group(5)), int(match.group(6)))
324 else:
324 else:
325 e.lines = (int(match.group(5)), 0)
325 e.lines = (int(match.group(5)), 0)
326 elif match.group(6):
326 elif match.group(6):
327 e.lines = (0, int(match.group(6)))
327 e.lines = (0, int(match.group(6)))
328 else:
328 else:
329 e.lines = None
329 e.lines = None
330
330
331 if match.group(7): # cvsnt mergepoint
331 if match.group(7): # cvsnt mergepoint
332 myrev = match.group(8).split('.')
332 myrev = match.group(8).split('.')
333 if len(myrev) == 2: # head
333 if len(myrev) == 2: # head
334 e.mergepoint = 'HEAD'
334 e.mergepoint = 'HEAD'
335 else:
335 else:
336 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
336 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
337 branches = [b for b in branchmap if branchmap[b] == myrev]
337 branches = [b for b in branchmap if branchmap[b] == myrev]
338 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
338 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
339 e.mergepoint = branches[0]
339 e.mergepoint = branches[0]
340 else:
340 else:
341 e.mergepoint = None
341 e.mergepoint = None
342 e.comment = []
342 e.comment = []
343 state = 7
343 state = 7
344
344
345 elif state == 7:
345 elif state == 7:
346 # read the revision numbers of branches that start at this revision
346 # read the revision numbers of branches that start at this revision
347 # or store the commit log message otherwise
347 # or store the commit log message otherwise
348 m = re_70.match(line)
348 m = re_70.match(line)
349 if m:
349 if m:
350 e.branches = [tuple([int(y) for y in x.strip().split('.')])
350 e.branches = [tuple([int(y) for y in x.strip().split('.')])
351 for x in m.group(1).split(';')]
351 for x in m.group(1).split(';')]
352 state = 8
352 state = 8
353 elif re_31.match(line) and re_50.match(peek):
353 elif re_31.match(line) and re_50.match(peek):
354 state = 5
354 state = 5
355 store = True
355 store = True
356 elif re_32.match(line):
356 elif re_32.match(line):
357 state = 0
357 state = 0
358 store = True
358 store = True
359 else:
359 else:
360 e.comment.append(line)
360 e.comment.append(line)
361
361
362 elif state == 8:
362 elif state == 8:
363 # store commit log message
363 # store commit log message
364 if re_31.match(line):
364 if re_31.match(line):
365 state = 5
365 state = 5
366 store = True
366 store = True
367 elif re_32.match(line):
367 elif re_32.match(line):
368 state = 0
368 state = 0
369 store = True
369 store = True
370 else:
370 else:
371 e.comment.append(line)
371 e.comment.append(line)
372
372
373 # When a file is added on a branch B1, CVS creates a synthetic
373 # When a file is added on a branch B1, CVS creates a synthetic
374 # dead trunk revision 1.1 so that the branch has a root.
374 # dead trunk revision 1.1 so that the branch has a root.
375 # Likewise, if you merge such a file to a later branch B2 (one
375 # Likewise, if you merge such a file to a later branch B2 (one
376 # that already existed when the file was added on B1), CVS
376 # that already existed when the file was added on B1), CVS
377 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
377 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
378 # these revisions now, but mark them synthetic so
378 # these revisions now, but mark them synthetic so
379 # createchangeset() can take care of them.
379 # createchangeset() can take care of them.
380 if (store and
380 if (store and
381 e.dead and
381 e.dead and
382 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
382 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
383 len(e.comment) == 1 and
383 len(e.comment) == 1 and
384 file_added_re.match(e.comment[0])):
384 file_added_re.match(e.comment[0])):
385 ui.debug('found synthetic revision in %s: %r\n'
385 ui.debug('found synthetic revision in %s: %r\n'
386 % (e.rcs, e.comment[0]))
386 % (e.rcs, e.comment[0]))
387 e.synthetic = True
387 e.synthetic = True
388
388
389 if store:
389 if store:
390 # clean up the results and save in the log.
390 # clean up the results and save in the log.
391 store = False
391 store = False
392 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
392 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
393 e.comment = scache('\n'.join(e.comment))
393 e.comment = scache('\n'.join(e.comment))
394
394
395 revn = len(e.revision)
395 revn = len(e.revision)
396 if revn > 3 and (revn % 2) == 0:
396 if revn > 3 and (revn % 2) == 0:
397 e.branch = tags.get(e.revision[:-1], [None])[0]
397 e.branch = tags.get(e.revision[:-1], [None])[0]
398 else:
398 else:
399 e.branch = None
399 e.branch = None
400
400
401 # find the branches starting from this revision
401 # find the branches starting from this revision
402 branchpoints = set()
402 branchpoints = set()
403 for branch, revision in branchmap.iteritems():
403 for branch, revision in branchmap.iteritems():
404 revparts = tuple([int(i) for i in revision.split('.')])
404 revparts = tuple([int(i) for i in revision.split('.')])
405 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
405 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
406 # normal branch
406 # normal branch
407 if revparts[:-2] == e.revision:
407 if revparts[:-2] == e.revision:
408 branchpoints.add(branch)
408 branchpoints.add(branch)
409 elif revparts == (1, 1, 1): # vendor branch
409 elif revparts == (1, 1, 1): # vendor branch
410 if revparts in e.branches:
410 if revparts in e.branches:
411 branchpoints.add(branch)
411 branchpoints.add(branch)
412 e.branchpoints = branchpoints
412 e.branchpoints = branchpoints
413
413
414 log.append(e)
414 log.append(e)
415
415
416 if len(log) % 100 == 0:
416 if len(log) % 100 == 0:
417 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
417 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
418
418
419 log.sort(key=lambda x: (x.rcs, x.revision))
419 log.sort(key=lambda x: (x.rcs, x.revision))
420
420
421 # find parent revisions of individual files
421 # find parent revisions of individual files
422 versions = {}
422 versions = {}
423 for e in log:
423 for e in log:
424 branch = e.revision[:-1]
424 branch = e.revision[:-1]
425 p = versions.get((e.rcs, branch), None)
425 p = versions.get((e.rcs, branch), None)
426 if p is None:
426 if p is None:
427 p = e.revision[:-2]
427 p = e.revision[:-2]
428 e.parent = p
428 e.parent = p
429 versions[(e.rcs, branch)] = e.revision
429 versions[(e.rcs, branch)] = e.revision
430
430
431 # update the log cache
431 # update the log cache
432 if cache:
432 if cache:
433 if log:
433 if log:
434 # join up the old and new logs
434 # join up the old and new logs
435 log.sort(key=lambda x: x.date)
435 log.sort(key=lambda x: x.date)
436
436
437 if oldlog and oldlog[-1].date >= log[0].date:
437 if oldlog and oldlog[-1].date >= log[0].date:
438 raise logerror('Log cache overlaps with new log entries,'
438 raise logerror('Log cache overlaps with new log entries,'
439 ' re-run without cache.')
439 ' re-run without cache.')
440
440
441 log = oldlog + log
441 log = oldlog + log
442
442
443 # write the new cachefile
443 # write the new cachefile
444 ui.note(_('writing cvs log cache %s\n') % cachefile)
444 ui.note(_('writing cvs log cache %s\n') % cachefile)
445 pickle.dump(log, open(cachefile, 'w'))
445 pickle.dump(log, open(cachefile, 'w'))
446 else:
446 else:
447 log = oldlog
447 log = oldlog
448
448
449 ui.status(_('%d log entries\n') % len(log))
449 ui.status(_('%d log entries\n') % len(log))
450
450
451 hook.hook(ui, None, "cvslog", True, log=log)
451 hook.hook(ui, None, "cvslog", True, log=log)
452
452
453 return log
453 return log
454
454
455
455
456 class changeset(object):
456 class changeset(object):
457 '''Class changeset has the following attributes:
457 '''Class changeset has the following attributes:
458 .id - integer identifying this changeset (list index)
458 .id - integer identifying this changeset (list index)
459 .author - author name as CVS knows it
459 .author - author name as CVS knows it
460 .branch - name of branch this changeset is on, or None
460 .branch - name of branch this changeset is on, or None
461 .comment - commit message
461 .comment - commit message
462 .date - the commit date as a (time,tz) tuple
462 .date - the commit date as a (time,tz) tuple
463 .entries - list of logentry objects in this changeset
463 .entries - list of logentry objects in this changeset
464 .parents - list of one or two parent changesets
464 .parents - list of one or two parent changesets
465 .tags - list of tags on this changeset
465 .tags - list of tags on this changeset
466 .synthetic - from synthetic revision "file ... added on branch ..."
466 .synthetic - from synthetic revision "file ... added on branch ..."
467 .mergepoint- the branch that has been merged from
467 .mergepoint- the branch that has been merged from
468 (if present in rlog output)
468 (if present in rlog output)
469 .branchpoints- the branches that start at the current entry
469 .branchpoints- the branches that start at the current entry
470 '''
470 '''
471 def __init__(self, **entries):
471 def __init__(self, **entries):
472 self.synthetic = False
472 self.__dict__.update(entries)
473 self.__dict__.update(entries)
473
474
474 def __repr__(self):
475 def __repr__(self):
475 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
476 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
476 id(self),
477 id(self),
477 getattr(self, 'id', "(no id)"))
478 getattr(self, 'id', "(no id)"))
478
479
479 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
480 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
480 '''Convert log into changesets.'''
481 '''Convert log into changesets.'''
481
482
482 ui.status(_('creating changesets\n'))
483 ui.status(_('creating changesets\n'))
483
484
484 # Merge changesets
485 # Merge changesets
485
486
486 log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
487 log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
487
488
488 changesets = []
489 changesets = []
489 files = set()
490 files = set()
490 c = None
491 c = None
491 for i, e in enumerate(log):
492 for i, e in enumerate(log):
492
493
493 # Check if log entry belongs to the current changeset or not.
494 # Check if log entry belongs to the current changeset or not.
494
495
495 # Since CVS is file centric, two different file revisions with
496 # Since CVS is file centric, two different file revisions with
496 # different branchpoints should be treated as belonging to two
497 # different branchpoints should be treated as belonging to two
497 # different changesets (and the ordering is important and not
498 # different changesets (and the ordering is important and not
498 # honoured by cvsps at this point).
499 # honoured by cvsps at this point).
499 #
500 #
500 # Consider the following case:
501 # Consider the following case:
501 # foo 1.1 branchpoints: [MYBRANCH]
502 # foo 1.1 branchpoints: [MYBRANCH]
502 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
503 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
503 #
504 #
504 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
505 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
505 # later version of foo may be in MYBRANCH2, so foo should be the
506 # later version of foo may be in MYBRANCH2, so foo should be the
506 # first changeset and bar the next and MYBRANCH and MYBRANCH2
507 # first changeset and bar the next and MYBRANCH and MYBRANCH2
507 # should both start off of the bar changeset. No provisions are
508 # should both start off of the bar changeset. No provisions are
508 # made to ensure that this is, in fact, what happens.
509 # made to ensure that this is, in fact, what happens.
509 if not (c and
510 if not (c and
510 e.comment == c.comment and
511 e.comment == c.comment and
511 e.author == c.author and
512 e.author == c.author and
512 e.branch == c.branch and
513 e.branch == c.branch and
513 (not hasattr(e, 'branchpoints') or
514 (not hasattr(e, 'branchpoints') or
514 not hasattr (c, 'branchpoints') or
515 not hasattr (c, 'branchpoints') or
515 e.branchpoints == c.branchpoints) and
516 e.branchpoints == c.branchpoints) and
516 ((c.date[0] + c.date[1]) <=
517 ((c.date[0] + c.date[1]) <=
517 (e.date[0] + e.date[1]) <=
518 (e.date[0] + e.date[1]) <=
518 (c.date[0] + c.date[1]) + fuzz) and
519 (c.date[0] + c.date[1]) + fuzz) and
519 e.file not in files):
520 e.file not in files):
520 c = changeset(comment=e.comment, author=e.author,
521 c = changeset(comment=e.comment, author=e.author,
521 branch=e.branch, date=e.date, entries=[],
522 branch=e.branch, date=e.date, entries=[],
522 mergepoint=getattr(e, 'mergepoint', None),
523 mergepoint=getattr(e, 'mergepoint', None),
523 branchpoints=getattr(e, 'branchpoints', set()))
524 branchpoints=getattr(e, 'branchpoints', set()))
524 changesets.append(c)
525 changesets.append(c)
525 files = set()
526 files = set()
526 if len(changesets) % 100 == 0:
527 if len(changesets) % 100 == 0:
527 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
528 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
528 ui.status(util.ellipsis(t, 80) + '\n')
529 ui.status(util.ellipsis(t, 80) + '\n')
529
530
530 c.entries.append(e)
531 c.entries.append(e)
531 files.add(e.file)
532 files.add(e.file)
532 c.date = e.date # changeset date is date of latest commit in it
533 c.date = e.date # changeset date is date of latest commit in it
533
534
534 # Mark synthetic changesets
535 # Mark synthetic changesets
535
536
536 for c in changesets:
537 for c in changesets:
537 # Synthetic revisions always get their own changeset, because
538 # Synthetic revisions always get their own changeset, because
538 # the log message includes the filename. E.g. if you add file3
539 # the log message includes the filename. E.g. if you add file3
539 # and file4 on a branch, you get four log entries and three
540 # and file4 on a branch, you get four log entries and three
540 # changesets:
541 # changesets:
541 # "File file3 was added on branch ..." (synthetic, 1 entry)
542 # "File file3 was added on branch ..." (synthetic, 1 entry)
542 # "File file4 was added on branch ..." (synthetic, 1 entry)
543 # "File file4 was added on branch ..." (synthetic, 1 entry)
543 # "Add file3 and file4 to fix ..." (real, 2 entries)
544 # "Add file3 and file4 to fix ..." (real, 2 entries)
544 # Hence the check for 1 entry here.
545 # Hence the check for 1 entry here.
545 synth = getattr(c.entries[0], 'synthetic', None)
546 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
546 c.synthetic = (len(c.entries) == 1 and synth)
547
547
548 # Sort files in each changeset
548 # Sort files in each changeset
549
549
550 for c in changesets:
550 for c in changesets:
551 def pathcompare(l, r):
551 def pathcompare(l, r):
552 'Mimic cvsps sorting order'
552 'Mimic cvsps sorting order'
553 l = l.split('/')
553 l = l.split('/')
554 r = r.split('/')
554 r = r.split('/')
555 nl = len(l)
555 nl = len(l)
556 nr = len(r)
556 nr = len(r)
557 n = min(nl, nr)
557 n = min(nl, nr)
558 for i in range(n):
558 for i in range(n):
559 if i + 1 == nl and nl < nr:
559 if i + 1 == nl and nl < nr:
560 return -1
560 return -1
561 elif i + 1 == nr and nl > nr:
561 elif i + 1 == nr and nl > nr:
562 return +1
562 return +1
563 elif l[i] < r[i]:
563 elif l[i] < r[i]:
564 return -1
564 return -1
565 elif l[i] > r[i]:
565 elif l[i] > r[i]:
566 return +1
566 return +1
567 return 0
567 return 0
568 def entitycompare(l, r):
568 def entitycompare(l, r):
569 return pathcompare(l.file, r.file)
569 return pathcompare(l.file, r.file)
570
570
571 c.entries.sort(entitycompare)
571 c.entries.sort(entitycompare)
572
572
573 # Sort changesets by date
573 # Sort changesets by date
574
574
575 def cscmp(l, r):
575 def cscmp(l, r):
576 d = sum(l.date) - sum(r.date)
576 d = sum(l.date) - sum(r.date)
577 if d:
577 if d:
578 return d
578 return d
579
579
580 # detect vendor branches and initial commits on a branch
580 # detect vendor branches and initial commits on a branch
581 le = {}
581 le = {}
582 for e in l.entries:
582 for e in l.entries:
583 le[e.rcs] = e.revision
583 le[e.rcs] = e.revision
584 re = {}
584 re = {}
585 for e in r.entries:
585 for e in r.entries:
586 re[e.rcs] = e.revision
586 re[e.rcs] = e.revision
587
587
588 d = 0
588 d = 0
589 for e in l.entries:
589 for e in l.entries:
590 if re.get(e.rcs, None) == e.parent:
590 if re.get(e.rcs, None) == e.parent:
591 assert not d
591 assert not d
592 d = 1
592 d = 1
593 break
593 break
594
594
595 for e in r.entries:
595 for e in r.entries:
596 if le.get(e.rcs, None) == e.parent:
596 if le.get(e.rcs, None) == e.parent:
597 assert not d
597 assert not d
598 d = -1
598 d = -1
599 break
599 break
600
600
601 return d
601 return d
602
602
603 changesets.sort(cscmp)
603 changesets.sort(cscmp)
604
604
605 # Collect tags
605 # Collect tags
606
606
607 globaltags = {}
607 globaltags = {}
608 for c in changesets:
608 for c in changesets:
609 for e in c.entries:
609 for e in c.entries:
610 for tag in e.tags:
610 for tag in e.tags:
611 # remember which is the latest changeset to have this tag
611 # remember which is the latest changeset to have this tag
612 globaltags[tag] = c
612 globaltags[tag] = c
613
613
614 for c in changesets:
614 for c in changesets:
615 tags = set()
615 tags = set()
616 for e in c.entries:
616 for e in c.entries:
617 tags.update(e.tags)
617 tags.update(e.tags)
618 # remember tags only if this is the latest changeset to have it
618 # remember tags only if this is the latest changeset to have it
619 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
619 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
620
620
621 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
621 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
622 # by inserting dummy changesets with two parents, and handle
622 # by inserting dummy changesets with two parents, and handle
623 # {{mergefrombranch BRANCHNAME}} by setting two parents.
623 # {{mergefrombranch BRANCHNAME}} by setting two parents.
624
624
625 if mergeto is None:
625 if mergeto is None:
626 mergeto = r'{{mergetobranch ([-\w]+)}}'
626 mergeto = r'{{mergetobranch ([-\w]+)}}'
627 if mergeto:
627 if mergeto:
628 mergeto = re.compile(mergeto)
628 mergeto = re.compile(mergeto)
629
629
630 if mergefrom is None:
630 if mergefrom is None:
631 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
631 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
632 if mergefrom:
632 if mergefrom:
633 mergefrom = re.compile(mergefrom)
633 mergefrom = re.compile(mergefrom)
634
634
635 versions = {} # changeset index where we saw any particular file version
635 versions = {} # changeset index where we saw any particular file version
636 branches = {} # changeset index where we saw a branch
636 branches = {} # changeset index where we saw a branch
637 n = len(changesets)
637 n = len(changesets)
638 i = 0
638 i = 0
639 while i < n:
639 while i < n:
640 c = changesets[i]
640 c = changesets[i]
641
641
642 for f in c.entries:
642 for f in c.entries:
643 versions[(f.rcs, f.revision)] = i
643 versions[(f.rcs, f.revision)] = i
644
644
645 p = None
645 p = None
646 if c.branch in branches:
646 if c.branch in branches:
647 p = branches[c.branch]
647 p = branches[c.branch]
648 else:
648 else:
649 # first changeset on a new branch
649 # first changeset on a new branch
650 # the parent is a changeset with the branch in its
650 # the parent is a changeset with the branch in its
651 # branchpoints such that it is the latest possible
651 # branchpoints such that it is the latest possible
652 # commit without any intervening, unrelated commits.
652 # commit without any intervening, unrelated commits.
653
653
654 for candidate in xrange(i):
654 for candidate in xrange(i):
655 if c.branch not in changesets[candidate].branchpoints:
655 if c.branch not in changesets[candidate].branchpoints:
656 if p is not None:
656 if p is not None:
657 break
657 break
658 continue
658 continue
659 p = candidate
659 p = candidate
660
660
661 c.parents = []
661 c.parents = []
662 if p is not None:
662 if p is not None:
663 p = changesets[p]
663 p = changesets[p]
664
664
665 # Ensure no changeset has a synthetic changeset as a parent.
665 # Ensure no changeset has a synthetic changeset as a parent.
666 while p.synthetic:
666 while p.synthetic:
667 assert len(p.parents) <= 1, \
667 assert len(p.parents) <= 1, \
668 _('synthetic changeset cannot have multiple parents')
668 _('synthetic changeset cannot have multiple parents')
669 if p.parents:
669 if p.parents:
670 p = p.parents[0]
670 p = p.parents[0]
671 else:
671 else:
672 p = None
672 p = None
673 break
673 break
674
674
675 if p is not None:
675 if p is not None:
676 c.parents.append(p)
676 c.parents.append(p)
677
677
678 if c.mergepoint:
678 if c.mergepoint:
679 if c.mergepoint == 'HEAD':
679 if c.mergepoint == 'HEAD':
680 c.mergepoint = None
680 c.mergepoint = None
681 c.parents.append(changesets[branches[c.mergepoint]])
681 c.parents.append(changesets[branches[c.mergepoint]])
682
682
683 if mergefrom:
683 if mergefrom:
684 m = mergefrom.search(c.comment)
684 m = mergefrom.search(c.comment)
685 if m:
685 if m:
686 m = m.group(1)
686 m = m.group(1)
687 if m == 'HEAD':
687 if m == 'HEAD':
688 m = None
688 m = None
689 try:
689 try:
690 candidate = changesets[branches[m]]
690 candidate = changesets[branches[m]]
691 except KeyError:
691 except KeyError:
692 ui.warn(_("warning: CVS commit message references "
692 ui.warn(_("warning: CVS commit message references "
693 "non-existent branch %r:\n%s\n")
693 "non-existent branch %r:\n%s\n")
694 % (m, c.comment))
694 % (m, c.comment))
695 if m in branches and c.branch != m and not candidate.synthetic:
695 if m in branches and c.branch != m and not candidate.synthetic:
696 c.parents.append(candidate)
696 c.parents.append(candidate)
697
697
698 if mergeto:
698 if mergeto:
699 m = mergeto.search(c.comment)
699 m = mergeto.search(c.comment)
700 if m:
700 if m:
701 try:
701 try:
702 m = m.group(1)
702 m = m.group(1)
703 if m == 'HEAD':
703 if m == 'HEAD':
704 m = None
704 m = None
705 except:
705 except:
706 m = None # if no group found then merge to HEAD
706 m = None # if no group found then merge to HEAD
707 if m in branches and c.branch != m:
707 if m in branches and c.branch != m:
708 # insert empty changeset for merge
708 # insert empty changeset for merge
709 cc = changeset(
709 cc = changeset(
710 author=c.author, branch=m, date=c.date,
710 author=c.author, branch=m, date=c.date,
711 comment='convert-repo: CVS merge from branch %s'
711 comment='convert-repo: CVS merge from branch %s'
712 % c.branch,
712 % c.branch,
713 entries=[], tags=[],
713 entries=[], tags=[],
714 parents=[changesets[branches[m]], c])
714 parents=[changesets[branches[m]], c])
715 changesets.insert(i + 1, cc)
715 changesets.insert(i + 1, cc)
716 branches[m] = i + 1
716 branches[m] = i + 1
717
717
718 # adjust our loop counters now we have inserted a new entry
718 # adjust our loop counters now we have inserted a new entry
719 n += 1
719 n += 1
720 i += 2
720 i += 2
721 continue
721 continue
722
722
723 branches[c.branch] = i
723 branches[c.branch] = i
724 i += 1
724 i += 1
725
725
726 # Drop synthetic changesets (safe now that we have ensured no other
726 # Drop synthetic changesets (safe now that we have ensured no other
727 # changesets can have them as parents).
727 # changesets can have them as parents).
728 i = 0
728 i = 0
729 while i < len(changesets):
729 while i < len(changesets):
730 if changesets[i].synthetic:
730 if changesets[i].synthetic:
731 del changesets[i]
731 del changesets[i]
732 else:
732 else:
733 i += 1
733 i += 1
734
734
735 # Number changesets
735 # Number changesets
736
736
737 for i, c in enumerate(changesets):
737 for i, c in enumerate(changesets):
738 c.id = i + 1
738 c.id = i + 1
739
739
740 ui.status(_('%d changeset entries\n') % len(changesets))
740 ui.status(_('%d changeset entries\n') % len(changesets))
741
741
742 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
742 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
743
743
744 return changesets
744 return changesets
745
745
746
746
747 def debugcvsps(ui, *args, **opts):
747 def debugcvsps(ui, *args, **opts):
748 '''Read CVS rlog for current directory or named path in
748 '''Read CVS rlog for current directory or named path in
749 repository, and convert the log to changesets based on matching
749 repository, and convert the log to changesets based on matching
750 commit log entries and dates.
750 commit log entries and dates.
751 '''
751 '''
752 if opts["new_cache"]:
752 if opts["new_cache"]:
753 cache = "write"
753 cache = "write"
754 elif opts["update_cache"]:
754 elif opts["update_cache"]:
755 cache = "update"
755 cache = "update"
756 else:
756 else:
757 cache = None
757 cache = None
758
758
759 revisions = opts["revisions"]
759 revisions = opts["revisions"]
760
760
761 try:
761 try:
762 if args:
762 if args:
763 log = []
763 log = []
764 for d in args:
764 for d in args:
765 log += createlog(ui, d, root=opts["root"], cache=cache)
765 log += createlog(ui, d, root=opts["root"], cache=cache)
766 else:
766 else:
767 log = createlog(ui, root=opts["root"], cache=cache)
767 log = createlog(ui, root=opts["root"], cache=cache)
768 except logerror, e:
768 except logerror, e:
769 ui.write("%r\n"%e)
769 ui.write("%r\n"%e)
770 return
770 return
771
771
772 changesets = createchangeset(ui, log, opts["fuzz"])
772 changesets = createchangeset(ui, log, opts["fuzz"])
773 del log
773 del log
774
774
775 # Print changesets (optionally filtered)
775 # Print changesets (optionally filtered)
776
776
777 off = len(revisions)
777 off = len(revisions)
778 branches = {} # latest version number in each branch
778 branches = {} # latest version number in each branch
779 ancestors = {} # parent branch
779 ancestors = {} # parent branch
780 for cs in changesets:
780 for cs in changesets:
781
781
782 if opts["ancestors"]:
782 if opts["ancestors"]:
783 if cs.branch not in branches and cs.parents and cs.parents[0].id:
783 if cs.branch not in branches and cs.parents and cs.parents[0].id:
784 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
784 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
785 cs.parents[0].id)
785 cs.parents[0].id)
786 branches[cs.branch] = cs.id
786 branches[cs.branch] = cs.id
787
787
788 # limit by branches
788 # limit by branches
789 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
789 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
790 continue
790 continue
791
791
792 if not off:
792 if not off:
793 # Note: trailing spaces on several lines here are needed to have
793 # Note: trailing spaces on several lines here are needed to have
794 # bug-for-bug compatibility with cvsps.
794 # bug-for-bug compatibility with cvsps.
795 ui.write('---------------------\n')
795 ui.write('---------------------\n')
796 ui.write('PatchSet %d \n' % cs.id)
796 ui.write('PatchSet %d \n' % cs.id)
797 ui.write('Date: %s\n' % util.datestr(cs.date,
797 ui.write('Date: %s\n' % util.datestr(cs.date,
798 '%Y/%m/%d %H:%M:%S %1%2'))
798 '%Y/%m/%d %H:%M:%S %1%2'))
799 ui.write('Author: %s\n' % cs.author)
799 ui.write('Author: %s\n' % cs.author)
800 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
800 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
801 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
801 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
802 ','.join(cs.tags) or '(none)'))
802 ','.join(cs.tags) or '(none)'))
803 branchpoints = getattr(cs, 'branchpoints', None)
803 branchpoints = getattr(cs, 'branchpoints', None)
804 if branchpoints:
804 if branchpoints:
805 ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
805 ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
806 if opts["parents"] and cs.parents:
806 if opts["parents"] and cs.parents:
807 if len(cs.parents) > 1:
807 if len(cs.parents) > 1:
808 ui.write('Parents: %s\n' %
808 ui.write('Parents: %s\n' %
809 (','.join([str(p.id) for p in cs.parents])))
809 (','.join([str(p.id) for p in cs.parents])))
810 else:
810 else:
811 ui.write('Parent: %d\n' % cs.parents[0].id)
811 ui.write('Parent: %d\n' % cs.parents[0].id)
812
812
813 if opts["ancestors"]:
813 if opts["ancestors"]:
814 b = cs.branch
814 b = cs.branch
815 r = []
815 r = []
816 while b:
816 while b:
817 b, c = ancestors[b]
817 b, c = ancestors[b]
818 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
818 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
819 if r:
819 if r:
820 ui.write('Ancestors: %s\n' % (','.join(r)))
820 ui.write('Ancestors: %s\n' % (','.join(r)))
821
821
822 ui.write('Log:\n')
822 ui.write('Log:\n')
823 ui.write('%s\n\n' % cs.comment)
823 ui.write('%s\n\n' % cs.comment)
824 ui.write('Members: \n')
824 ui.write('Members: \n')
825 for f in cs.entries:
825 for f in cs.entries:
826 fn = f.file
826 fn = f.file
827 if fn.startswith(opts["prefix"]):
827 if fn.startswith(opts["prefix"]):
828 fn = fn[len(opts["prefix"]):]
828 fn = fn[len(opts["prefix"]):]
829 ui.write('\t%s:%s->%s%s \n' % (
829 ui.write('\t%s:%s->%s%s \n' % (
830 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
830 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
831 '.'.join([str(x) for x in f.revision]),
831 '.'.join([str(x) for x in f.revision]),
832 ['', '(DEAD)'][f.dead]))
832 ['', '(DEAD)'][f.dead]))
833 ui.write('\n')
833 ui.write('\n')
834
834
835 # have we seen the start tag?
835 # have we seen the start tag?
836 if revisions and off:
836 if revisions and off:
837 if revisions[0] == str(cs.id) or \
837 if revisions[0] == str(cs.id) or \
838 revisions[0] in cs.tags:
838 revisions[0] in cs.tags:
839 off = False
839 off = False
840
840
841 # see if we reached the end tag
841 # see if we reached the end tag
842 if len(revisions) > 1 and not off:
842 if len(revisions) > 1 and not off:
843 if revisions[1] == str(cs.id) or \
843 if revisions[1] == str(cs.id) or \
844 revisions[1] in cs.tags:
844 revisions[1] in cs.tags:
845 break
845 break
General Comments 0
You need to be logged in to leave comments. Login now