##// END OF EJS Templates
convert/cvsps: wrap long lines
Martin Geisler -
r8661:883f14fc default
parent child Browse files
Show More
@@ -1,780 +1,792 b''
1 #
1 #
2 # Mercurial built-in replacement for cvsps.
2 # Mercurial built-in replacement for cvsps.
3 #
3 #
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2, incorporated herein by reference.
7 # GNU General Public License version 2, incorporated herein by reference.
8
8
9 import os
9 import os
10 import re
10 import re
11 import cPickle as pickle
11 import cPickle as pickle
12 from mercurial import util
12 from mercurial import util
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14
14
15 def listsort(list, key):
15 def listsort(list, key):
16 "helper to sort by key in Python 2.3"
16 "helper to sort by key in Python 2.3"
17 try:
17 try:
18 list.sort(key=key)
18 list.sort(key=key)
19 except TypeError:
19 except TypeError:
20 list.sort(lambda l, r: cmp(key(l), key(r)))
20 list.sort(lambda l, r: cmp(key(l), key(r)))
21
21
22 class logentry(object):
22 class logentry(object):
23 '''Class logentry has the following attributes:
23 '''Class logentry has the following attributes:
24 .author - author name as CVS knows it
24 .author - author name as CVS knows it
25 .branch - name of branch this revision is on
25 .branch - name of branch this revision is on
26 .branches - revision tuple of branches starting at this revision
26 .branches - revision tuple of branches starting at this revision
27 .comment - commit message
27 .comment - commit message
28 .date - the commit date as a (time, tz) tuple
28 .date - the commit date as a (time, tz) tuple
29 .dead - true if file revision is dead
29 .dead - true if file revision is dead
30 .file - Name of file
30 .file - Name of file
31 .lines - a tuple (+lines, -lines) or None
31 .lines - a tuple (+lines, -lines) or None
32 .parent - Previous revision of this entry
32 .parent - Previous revision of this entry
33 .rcs - name of file as returned from CVS
33 .rcs - name of file as returned from CVS
34 .revision - revision number as tuple
34 .revision - revision number as tuple
35 .tags - list of tags on the file
35 .tags - list of tags on the file
36 .synthetic - is this a synthetic "file ... added on ..." revision?
36 .synthetic - is this a synthetic "file ... added on ..." revision?
37 .mergepoint- the branch that has been merged from (if present in rlog output)
37 .mergepoint- the branch that has been merged from
38 (if present in rlog output)
38 '''
39 '''
39 def __init__(self, **entries):
40 def __init__(self, **entries):
40 self.__dict__.update(entries)
41 self.__dict__.update(entries)
41
42
42 def __repr__(self):
43 def __repr__(self):
43 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
44 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
44 id(self),
45 id(self),
45 self.file,
46 self.file,
46 ".".join(map(str, self.revision)))
47 ".".join(map(str, self.revision)))
47
48
48 class logerror(Exception):
49 class logerror(Exception):
49 pass
50 pass
50
51
51 def getrepopath(cvspath):
52 def getrepopath(cvspath):
52 """Return the repository path from a CVS path.
53 """Return the repository path from a CVS path.
53
54
54 >>> getrepopath('/foo/bar')
55 >>> getrepopath('/foo/bar')
55 '/foo/bar'
56 '/foo/bar'
56 >>> getrepopath('c:/foo/bar')
57 >>> getrepopath('c:/foo/bar')
57 'c:/foo/bar'
58 'c:/foo/bar'
58 >>> getrepopath(':pserver:10/foo/bar')
59 >>> getrepopath(':pserver:10/foo/bar')
59 '/foo/bar'
60 '/foo/bar'
60 >>> getrepopath(':pserver:10c:/foo/bar')
61 >>> getrepopath(':pserver:10c:/foo/bar')
61 '/foo/bar'
62 '/foo/bar'
62 >>> getrepopath(':pserver:/foo/bar')
63 >>> getrepopath(':pserver:/foo/bar')
63 '/foo/bar'
64 '/foo/bar'
64 >>> getrepopath(':pserver:c:/foo/bar')
65 >>> getrepopath(':pserver:c:/foo/bar')
65 'c:/foo/bar'
66 'c:/foo/bar'
66 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
67 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
67 '/foo/bar'
68 '/foo/bar'
68 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
69 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
69 'c:/foo/bar'
70 'c:/foo/bar'
70 """
71 """
71 # According to CVS manual, CVS paths are expressed like:
72 # According to CVS manual, CVS paths are expressed like:
72 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
73 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
73 #
74 #
74 # Unfortunately, Windows absolute paths start with a drive letter
75 # Unfortunately, Windows absolute paths start with a drive letter
75 # like 'c:' making it harder to parse. Here we assume that drive
76 # like 'c:' making it harder to parse. Here we assume that drive
76 # letters are only one character long and any CVS component before
77 # letters are only one character long and any CVS component before
77 # the repository path is at least 2 characters long, and use this
78 # the repository path is at least 2 characters long, and use this
78 # to disambiguate.
79 # to disambiguate.
79 parts = cvspath.split(':')
80 parts = cvspath.split(':')
80 if len(parts) == 1:
81 if len(parts) == 1:
81 return parts[0]
82 return parts[0]
82 # Here there is an ambiguous case if we have a port number
83 # Here there is an ambiguous case if we have a port number
83 # immediately followed by a Windows driver letter. We assume this
84 # immediately followed by a Windows driver letter. We assume this
84 # never happens and decide it must be CVS path component,
85 # never happens and decide it must be CVS path component,
85 # therefore ignoring it.
86 # therefore ignoring it.
86 if len(parts[-2]) > 1:
87 if len(parts[-2]) > 1:
87 return parts[-1].lstrip('0123456789')
88 return parts[-1].lstrip('0123456789')
88 return parts[-2] + ':' + parts[-1]
89 return parts[-2] + ':' + parts[-1]
89
90
90 def createlog(ui, directory=None, root="", rlog=True, cache=None):
91 def createlog(ui, directory=None, root="", rlog=True, cache=None):
91 '''Collect the CVS rlog'''
92 '''Collect the CVS rlog'''
92
93
93 # Because we store many duplicate commit log messages, reusing strings
94 # Because we store many duplicate commit log messages, reusing strings
94 # saves a lot of memory and pickle storage space.
95 # saves a lot of memory and pickle storage space.
95 _scache = {}
96 _scache = {}
96 def scache(s):
97 def scache(s):
97 "return a shared version of a string"
98 "return a shared version of a string"
98 return _scache.setdefault(s, s)
99 return _scache.setdefault(s, s)
99
100
100 ui.status(_('collecting CVS rlog\n'))
101 ui.status(_('collecting CVS rlog\n'))
101
102
102 log = [] # list of logentry objects containing the CVS state
103 log = [] # list of logentry objects containing the CVS state
103
104
104 # patterns to match in CVS (r)log output, by state of use
105 # patterns to match in CVS (r)log output, by state of use
105 re_00 = re.compile('RCS file: (.+)$')
106 re_00 = re.compile('RCS file: (.+)$')
106 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
107 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
107 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
108 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
108 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
109 re_03 = re.compile("(Cannot access.+CVSROOT)|"
110 "(can't create temporary directory.+)$")
109 re_10 = re.compile('Working file: (.+)$')
111 re_10 = re.compile('Working file: (.+)$')
110 re_20 = re.compile('symbolic names:')
112 re_20 = re.compile('symbolic names:')
111 re_30 = re.compile('\t(.+): ([\\d.]+)$')
113 re_30 = re.compile('\t(.+): ([\\d.]+)$')
112 re_31 = re.compile('----------------------------$')
114 re_31 = re.compile('----------------------------$')
113 re_32 = re.compile('=============================================================================$')
115 re_32 = re.compile('======================================='
116 '======================================$')
114 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
117 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
115 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?(.*mergepoint:\s+([^;]+);)?')
118 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
119 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
120 r'(.*mergepoint:\s+([^;]+);)?')
116 re_70 = re.compile('branches: (.+);$')
121 re_70 = re.compile('branches: (.+);$')
117
122
118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
123 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119
124
120 prefix = '' # leading path to strip of what we get from CVS
125 prefix = '' # leading path to strip of what we get from CVS
121
126
122 if directory is None:
127 if directory is None:
123 # Current working directory
128 # Current working directory
124
129
125 # Get the real directory in the repository
130 # Get the real directory in the repository
126 try:
131 try:
127 prefix = file(os.path.join('CVS','Repository')).read().strip()
132 prefix = file(os.path.join('CVS','Repository')).read().strip()
128 if prefix == ".":
133 if prefix == ".":
129 prefix = ""
134 prefix = ""
130 directory = prefix
135 directory = prefix
131 except IOError:
136 except IOError:
132 raise logerror('Not a CVS sandbox')
137 raise logerror('Not a CVS sandbox')
133
138
134 if prefix and not prefix.endswith(os.sep):
139 if prefix and not prefix.endswith(os.sep):
135 prefix += os.sep
140 prefix += os.sep
136
141
137 # Use the Root file in the sandbox, if it exists
142 # Use the Root file in the sandbox, if it exists
138 try:
143 try:
139 root = file(os.path.join('CVS','Root')).read().strip()
144 root = file(os.path.join('CVS','Root')).read().strip()
140 except IOError:
145 except IOError:
141 pass
146 pass
142
147
143 if not root:
148 if not root:
144 root = os.environ.get('CVSROOT', '')
149 root = os.environ.get('CVSROOT', '')
145
150
146 # read log cache if one exists
151 # read log cache if one exists
147 oldlog = []
152 oldlog = []
148 date = None
153 date = None
149
154
150 if cache:
155 if cache:
151 cachedir = os.path.expanduser('~/.hg.cvsps')
156 cachedir = os.path.expanduser('~/.hg.cvsps')
152 if not os.path.exists(cachedir):
157 if not os.path.exists(cachedir):
153 os.mkdir(cachedir)
158 os.mkdir(cachedir)
154
159
155 # The cvsps cache pickle needs a uniquified name, based on the
160 # The cvsps cache pickle needs a uniquified name, based on the
156 # repository location. The address may have all sort of nasties
161 # repository location. The address may have all sort of nasties
157 # in it, slashes, colons and such. So here we take just the
162 # in it, slashes, colons and such. So here we take just the
158 # alphanumerics, concatenated in a way that does not mix up the
163 # alphanumerics, concatenated in a way that does not mix up the
159 # various components, so that
164 # various components, so that
160 # :pserver:user@server:/path
165 # :pserver:user@server:/path
161 # and
166 # and
162 # /pserver/user/server/path
167 # /pserver/user/server/path
163 # are mapped to different cache file names.
168 # are mapped to different cache file names.
164 cachefile = root.split(":") + [directory, "cache"]
169 cachefile = root.split(":") + [directory, "cache"]
165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
170 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 cachefile = os.path.join(cachedir,
171 cachefile = os.path.join(cachedir,
167 '.'.join([s for s in cachefile if s]))
172 '.'.join([s for s in cachefile if s]))
168
173
169 if cache == 'update':
174 if cache == 'update':
170 try:
175 try:
171 ui.note(_('reading cvs log cache %s\n') % cachefile)
176 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 oldlog = pickle.load(file(cachefile))
177 oldlog = pickle.load(file(cachefile))
173 ui.note(_('cache has %d log entries\n') % len(oldlog))
178 ui.note(_('cache has %d log entries\n') % len(oldlog))
174 except Exception, e:
179 except Exception, e:
175 ui.note(_('error reading cache: %r\n') % e)
180 ui.note(_('error reading cache: %r\n') % e)
176
181
177 if oldlog:
182 if oldlog:
178 date = oldlog[-1].date # last commit date as a (time,tz) tuple
183 date = oldlog[-1].date # last commit date as a (time,tz) tuple
179 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
184 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
180
185
181 # build the CVS commandline
186 # build the CVS commandline
182 cmd = ['cvs', '-q']
187 cmd = ['cvs', '-q']
183 if root:
188 if root:
184 cmd.append('-d%s' % root)
189 cmd.append('-d%s' % root)
185 p = util.normpath(getrepopath(root))
190 p = util.normpath(getrepopath(root))
186 if not p.endswith('/'):
191 if not p.endswith('/'):
187 p += '/'
192 p += '/'
188 prefix = p + util.normpath(prefix)
193 prefix = p + util.normpath(prefix)
189 cmd.append(['log', 'rlog'][rlog])
194 cmd.append(['log', 'rlog'][rlog])
190 if date:
195 if date:
191 # no space between option and date string
196 # no space between option and date string
192 cmd.append('-d>%s' % date)
197 cmd.append('-d>%s' % date)
193 cmd.append(directory)
198 cmd.append(directory)
194
199
195 # state machine begins here
200 # state machine begins here
196 tags = {} # dictionary of revisions on current file with their tags
201 tags = {} # dictionary of revisions on current file with their tags
197 branchmap = {} # mapping between branch names and revision numbers
202 branchmap = {} # mapping between branch names and revision numbers
198 state = 0
203 state = 0
199 store = False # set when a new record can be appended
204 store = False # set when a new record can be appended
200
205
201 cmd = [util.shellquote(arg) for arg in cmd]
206 cmd = [util.shellquote(arg) for arg in cmd]
202 ui.note(_("running %s\n") % (' '.join(cmd)))
207 ui.note(_("running %s\n") % (' '.join(cmd)))
203 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
208 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
204
209
205 pfp = util.popen(' '.join(cmd))
210 pfp = util.popen(' '.join(cmd))
206 peek = pfp.readline()
211 peek = pfp.readline()
207 while True:
212 while True:
208 line = peek
213 line = peek
209 if line == '':
214 if line == '':
210 break
215 break
211 peek = pfp.readline()
216 peek = pfp.readline()
212 if line.endswith('\n'):
217 if line.endswith('\n'):
213 line = line[:-1]
218 line = line[:-1]
214 #ui.debug('state=%d line=%r\n' % (state, line))
219 #ui.debug('state=%d line=%r\n' % (state, line))
215
220
216 if state == 0:
221 if state == 0:
217 # initial state, consume input until we see 'RCS file'
222 # initial state, consume input until we see 'RCS file'
218 match = re_00.match(line)
223 match = re_00.match(line)
219 if match:
224 if match:
220 rcs = match.group(1)
225 rcs = match.group(1)
221 tags = {}
226 tags = {}
222 if rlog:
227 if rlog:
223 filename = util.normpath(rcs[:-2])
228 filename = util.normpath(rcs[:-2])
224 if filename.startswith(prefix):
229 if filename.startswith(prefix):
225 filename = filename[len(prefix):]
230 filename = filename[len(prefix):]
226 if filename.startswith('/'):
231 if filename.startswith('/'):
227 filename = filename[1:]
232 filename = filename[1:]
228 if filename.startswith('Attic/'):
233 if filename.startswith('Attic/'):
229 filename = filename[6:]
234 filename = filename[6:]
230 else:
235 else:
231 filename = filename.replace('/Attic/', '/')
236 filename = filename.replace('/Attic/', '/')
232 state = 2
237 state = 2
233 continue
238 continue
234 state = 1
239 state = 1
235 continue
240 continue
236 match = re_01.match(line)
241 match = re_01.match(line)
237 if match:
242 if match:
238 raise Exception(match.group(1))
243 raise Exception(match.group(1))
239 match = re_02.match(line)
244 match = re_02.match(line)
240 if match:
245 if match:
241 raise Exception(match.group(2))
246 raise Exception(match.group(2))
242 if re_03.match(line):
247 if re_03.match(line):
243 raise Exception(line)
248 raise Exception(line)
244
249
245 elif state == 1:
250 elif state == 1:
246 # expect 'Working file' (only when using log instead of rlog)
251 # expect 'Working file' (only when using log instead of rlog)
247 match = re_10.match(line)
252 match = re_10.match(line)
248 assert match, _('RCS file must be followed by working file')
253 assert match, _('RCS file must be followed by working file')
249 filename = util.normpath(match.group(1))
254 filename = util.normpath(match.group(1))
250 state = 2
255 state = 2
251
256
252 elif state == 2:
257 elif state == 2:
253 # expect 'symbolic names'
258 # expect 'symbolic names'
254 if re_20.match(line):
259 if re_20.match(line):
255 branchmap = {}
260 branchmap = {}
256 state = 3
261 state = 3
257
262
258 elif state == 3:
263 elif state == 3:
259 # read the symbolic names and store as tags
264 # read the symbolic names and store as tags
260 match = re_30.match(line)
265 match = re_30.match(line)
261 if match:
266 if match:
262 rev = [int(x) for x in match.group(2).split('.')]
267 rev = [int(x) for x in match.group(2).split('.')]
263
268
264 # Convert magic branch number to an odd-numbered one
269 # Convert magic branch number to an odd-numbered one
265 revn = len(rev)
270 revn = len(rev)
266 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
271 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
267 rev = rev[:-2] + rev[-1:]
272 rev = rev[:-2] + rev[-1:]
268 rev = tuple(rev)
273 rev = tuple(rev)
269
274
270 if rev not in tags:
275 if rev not in tags:
271 tags[rev] = []
276 tags[rev] = []
272 tags[rev].append(match.group(1))
277 tags[rev].append(match.group(1))
273 branchmap[match.group(1)] = match.group(2)
278 branchmap[match.group(1)] = match.group(2)
274
279
275 elif re_31.match(line):
280 elif re_31.match(line):
276 state = 5
281 state = 5
277 elif re_32.match(line):
282 elif re_32.match(line):
278 state = 0
283 state = 0
279
284
280 elif state == 4:
285 elif state == 4:
281 # expecting '------' separator before first revision
286 # expecting '------' separator before first revision
282 if re_31.match(line):
287 if re_31.match(line):
283 state = 5
288 state = 5
284 else:
289 else:
285 assert not re_32.match(line), _('must have at least some revisions')
290 assert not re_32.match(line), _('must have at least '
291 'some revisions')
286
292
287 elif state == 5:
293 elif state == 5:
288 # expecting revision number and possibly (ignored) lock indication
294 # expecting revision number and possibly (ignored) lock indication
289 # we create the logentry here from values stored in states 0 to 4,
295 # we create the logentry here from values stored in states 0 to 4,
290 # as this state is re-entered for subsequent revisions of a file.
296 # as this state is re-entered for subsequent revisions of a file.
291 match = re_50.match(line)
297 match = re_50.match(line)
292 assert match, _('expected revision number')
298 assert match, _('expected revision number')
293 e = logentry(rcs=scache(rcs), file=scache(filename),
299 e = logentry(rcs=scache(rcs), file=scache(filename),
294 revision=tuple([int(x) for x in match.group(1).split('.')]),
300 revision=tuple([int(x) for x in match.group(1).split('.')]),
295 branches=[], parent=None,
301 branches=[], parent=None,
296 synthetic=False)
302 synthetic=False)
297 state = 6
303 state = 6
298
304
299 elif state == 6:
305 elif state == 6:
300 # expecting date, author, state, lines changed
306 # expecting date, author, state, lines changed
301 match = re_60.match(line)
307 match = re_60.match(line)
302 assert match, _('revision must be followed by date line')
308 assert match, _('revision must be followed by date line')
303 d = match.group(1)
309 d = match.group(1)
304 if d[2] == '/':
310 if d[2] == '/':
305 # Y2K
311 # Y2K
306 d = '19' + d
312 d = '19' + d
307
313
308 if len(d.split()) != 3:
314 if len(d.split()) != 3:
309 # cvs log dates always in GMT
315 # cvs log dates always in GMT
310 d = d + ' UTC'
316 d = d + ' UTC'
311 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
317 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
318 '%Y/%m/%d %H:%M:%S',
319 '%Y-%m-%d %H:%M:%S'])
312 e.author = scache(match.group(2))
320 e.author = scache(match.group(2))
313 e.dead = match.group(3).lower() == 'dead'
321 e.dead = match.group(3).lower() == 'dead'
314
322
315 if match.group(5):
323 if match.group(5):
316 if match.group(6):
324 if match.group(6):
317 e.lines = (int(match.group(5)), int(match.group(6)))
325 e.lines = (int(match.group(5)), int(match.group(6)))
318 else:
326 else:
319 e.lines = (int(match.group(5)), 0)
327 e.lines = (int(match.group(5)), 0)
320 elif match.group(6):
328 elif match.group(6):
321 e.lines = (0, int(match.group(6)))
329 e.lines = (0, int(match.group(6)))
322 else:
330 else:
323 e.lines = None
331 e.lines = None
324
332
325 if match.group(7): # cvsnt mergepoint
333 if match.group(7): # cvsnt mergepoint
326 myrev = match.group(8).split('.')
334 myrev = match.group(8).split('.')
327 if len(myrev) == 2: # head
335 if len(myrev) == 2: # head
328 e.mergepoint = 'HEAD'
336 e.mergepoint = 'HEAD'
329 else:
337 else:
330 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
338 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
331 branches = [b for b in branchmap if branchmap[b] == myrev]
339 branches = [b for b in branchmap if branchmap[b] == myrev]
332 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
340 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
333 e.mergepoint = branches[0]
341 e.mergepoint = branches[0]
334 else:
342 else:
335 e.mergepoint = None
343 e.mergepoint = None
336 e.comment = []
344 e.comment = []
337 state = 7
345 state = 7
338
346
339 elif state == 7:
347 elif state == 7:
340 # read the revision numbers of branches that start at this revision
348 # read the revision numbers of branches that start at this revision
341 # or store the commit log message otherwise
349 # or store the commit log message otherwise
342 m = re_70.match(line)
350 m = re_70.match(line)
343 if m:
351 if m:
344 e.branches = [tuple([int(y) for y in x.strip().split('.')])
352 e.branches = [tuple([int(y) for y in x.strip().split('.')])
345 for x in m.group(1).split(';')]
353 for x in m.group(1).split(';')]
346 state = 8
354 state = 8
347 elif re_31.match(line) and re_50.match(peek):
355 elif re_31.match(line) and re_50.match(peek):
348 state = 5
356 state = 5
349 store = True
357 store = True
350 elif re_32.match(line):
358 elif re_32.match(line):
351 state = 0
359 state = 0
352 store = True
360 store = True
353 else:
361 else:
354 e.comment.append(line)
362 e.comment.append(line)
355
363
356 elif state == 8:
364 elif state == 8:
357 # store commit log message
365 # store commit log message
358 if re_31.match(line):
366 if re_31.match(line):
359 state = 5
367 state = 5
360 store = True
368 store = True
361 elif re_32.match(line):
369 elif re_32.match(line):
362 state = 0
370 state = 0
363 store = True
371 store = True
364 else:
372 else:
365 e.comment.append(line)
373 e.comment.append(line)
366
374
367 # When a file is added on a branch B1, CVS creates a synthetic
375 # When a file is added on a branch B1, CVS creates a synthetic
368 # dead trunk revision 1.1 so that the branch has a root.
376 # dead trunk revision 1.1 so that the branch has a root.
369 # Likewise, if you merge such a file to a later branch B2 (one
377 # Likewise, if you merge such a file to a later branch B2 (one
370 # that already existed when the file was added on B1), CVS
378 # that already existed when the file was added on B1), CVS
371 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
379 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
372 # these revisions now, but mark them synthetic so
380 # these revisions now, but mark them synthetic so
373 # createchangeset() can take care of them.
381 # createchangeset() can take care of them.
374 if (store and
382 if (store and
375 e.dead and
383 e.dead and
376 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
384 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
377 len(e.comment) == 1 and
385 len(e.comment) == 1 and
378 file_added_re.match(e.comment[0])):
386 file_added_re.match(e.comment[0])):
379 ui.debug(_('found synthetic revision in %s: %r\n')
387 ui.debug(_('found synthetic revision in %s: %r\n')
380 % (e.rcs, e.comment[0]))
388 % (e.rcs, e.comment[0]))
381 e.synthetic = True
389 e.synthetic = True
382
390
383 if store:
391 if store:
384 # clean up the results and save in the log.
392 # clean up the results and save in the log.
385 store = False
393 store = False
386 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
394 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
387 e.comment = scache('\n'.join(e.comment))
395 e.comment = scache('\n'.join(e.comment))
388
396
389 revn = len(e.revision)
397 revn = len(e.revision)
390 if revn > 3 and (revn % 2) == 0:
398 if revn > 3 and (revn % 2) == 0:
391 e.branch = tags.get(e.revision[:-1], [None])[0]
399 e.branch = tags.get(e.revision[:-1], [None])[0]
392 else:
400 else:
393 e.branch = None
401 e.branch = None
394
402
395 log.append(e)
403 log.append(e)
396
404
397 if len(log) % 100 == 0:
405 if len(log) % 100 == 0:
398 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
406 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
399
407
400 listsort(log, key=lambda x:(x.rcs, x.revision))
408 listsort(log, key=lambda x:(x.rcs, x.revision))
401
409
402 # find parent revisions of individual files
410 # find parent revisions of individual files
403 versions = {}
411 versions = {}
404 for e in log:
412 for e in log:
405 branch = e.revision[:-1]
413 branch = e.revision[:-1]
406 p = versions.get((e.rcs, branch), None)
414 p = versions.get((e.rcs, branch), None)
407 if p is None:
415 if p is None:
408 p = e.revision[:-2]
416 p = e.revision[:-2]
409 e.parent = p
417 e.parent = p
410 versions[(e.rcs, branch)] = e.revision
418 versions[(e.rcs, branch)] = e.revision
411
419
412 # update the log cache
420 # update the log cache
413 if cache:
421 if cache:
414 if log:
422 if log:
415 # join up the old and new logs
423 # join up the old and new logs
416 listsort(log, key=lambda x:x.date)
424 listsort(log, key=lambda x:x.date)
417
425
418 if oldlog and oldlog[-1].date >= log[0].date:
426 if oldlog and oldlog[-1].date >= log[0].date:
419 raise logerror('Log cache overlaps with new log entries,'
427 raise logerror('Log cache overlaps with new log entries,'
420 ' re-run without cache.')
428 ' re-run without cache.')
421
429
422 log = oldlog + log
430 log = oldlog + log
423
431
424 # write the new cachefile
432 # write the new cachefile
425 ui.note(_('writing cvs log cache %s\n') % cachefile)
433 ui.note(_('writing cvs log cache %s\n') % cachefile)
426 pickle.dump(log, file(cachefile, 'w'))
434 pickle.dump(log, file(cachefile, 'w'))
427 else:
435 else:
428 log = oldlog
436 log = oldlog
429
437
430 ui.status(_('%d log entries\n') % len(log))
438 ui.status(_('%d log entries\n') % len(log))
431
439
432 return log
440 return log
433
441
434
442
435 class changeset(object):
443 class changeset(object):
436 '''Class changeset has the following attributes:
444 '''Class changeset has the following attributes:
437 .id - integer identifying this changeset (list index)
445 .id - integer identifying this changeset (list index)
438 .author - author name as CVS knows it
446 .author - author name as CVS knows it
439 .branch - name of branch this changeset is on, or None
447 .branch - name of branch this changeset is on, or None
440 .comment - commit message
448 .comment - commit message
441 .date - the commit date as a (time,tz) tuple
449 .date - the commit date as a (time,tz) tuple
442 .entries - list of logentry objects in this changeset
450 .entries - list of logentry objects in this changeset
443 .parents - list of one or two parent changesets
451 .parents - list of one or two parent changesets
444 .tags - list of tags on this changeset
452 .tags - list of tags on this changeset
445 .synthetic - from synthetic revision "file ... added on branch ..."
453 .synthetic - from synthetic revision "file ... added on branch ..."
446 .mergepoint- the branch that has been merged from (if present in rlog output)
454 .mergepoint- the branch that has been merged from
455 (if present in rlog output)
447 '''
456 '''
448 def __init__(self, **entries):
457 def __init__(self, **entries):
449 self.__dict__.update(entries)
458 self.__dict__.update(entries)
450
459
451 def __repr__(self):
460 def __repr__(self):
452 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
461 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
453 id(self),
462 id(self),
454 getattr(self, 'id', "(no id)"))
463 getattr(self, 'id', "(no id)"))
455
464
456 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
465 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
457 '''Convert log into changesets.'''
466 '''Convert log into changesets.'''
458
467
459 ui.status(_('creating changesets\n'))
468 ui.status(_('creating changesets\n'))
460
469
461 # Merge changesets
470 # Merge changesets
462
471
463 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
472 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
464
473
465 changesets = []
474 changesets = []
466 files = set()
475 files = set()
467 c = None
476 c = None
468 for i, e in enumerate(log):
477 for i, e in enumerate(log):
469
478
470 # Check if log entry belongs to the current changeset or not.
479 # Check if log entry belongs to the current changeset or not.
471 if not (c and
480 if not (c and
472 e.comment == c.comment and
481 e.comment == c.comment and
473 e.author == c.author and
482 e.author == c.author and
474 e.branch == c.branch and
483 e.branch == c.branch and
475 ((c.date[0] + c.date[1]) <=
484 ((c.date[0] + c.date[1]) <=
476 (e.date[0] + e.date[1]) <=
485 (e.date[0] + e.date[1]) <=
477 (c.date[0] + c.date[1]) + fuzz) and
486 (c.date[0] + c.date[1]) + fuzz) and
478 e.file not in files):
487 e.file not in files):
479 c = changeset(comment=e.comment, author=e.author,
488 c = changeset(comment=e.comment, author=e.author,
480 branch=e.branch, date=e.date, entries=[],
489 branch=e.branch, date=e.date, entries=[],
481 mergepoint=getattr(e, 'mergepoint', None))
490 mergepoint=getattr(e, 'mergepoint', None))
482 changesets.append(c)
491 changesets.append(c)
483 files = set()
492 files = set()
484 if len(changesets) % 100 == 0:
493 if len(changesets) % 100 == 0:
485 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
494 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
486 ui.status(util.ellipsis(t, 80) + '\n')
495 ui.status(util.ellipsis(t, 80) + '\n')
487
496
488 c.entries.append(e)
497 c.entries.append(e)
489 files.add(e.file)
498 files.add(e.file)
490 c.date = e.date # changeset date is date of latest commit in it
499 c.date = e.date # changeset date is date of latest commit in it
491
500
492 # Mark synthetic changesets
501 # Mark synthetic changesets
493
502
494 for c in changesets:
503 for c in changesets:
495 # Synthetic revisions always get their own changeset, because
504 # Synthetic revisions always get their own changeset, because
496 # the log message includes the filename. E.g. if you add file3
505 # the log message includes the filename. E.g. if you add file3
497 # and file4 on a branch, you get four log entries and three
506 # and file4 on a branch, you get four log entries and three
498 # changesets:
507 # changesets:
499 # "File file3 was added on branch ..." (synthetic, 1 entry)
508 # "File file3 was added on branch ..." (synthetic, 1 entry)
500 # "File file4 was added on branch ..." (synthetic, 1 entry)
509 # "File file4 was added on branch ..." (synthetic, 1 entry)
501 # "Add file3 and file4 to fix ..." (real, 2 entries)
510 # "Add file3 and file4 to fix ..." (real, 2 entries)
502 # Hence the check for 1 entry here.
511 # Hence the check for 1 entry here.
503 synth = getattr(c.entries[0], 'synthetic', None)
512 synth = getattr(c.entries[0], 'synthetic', None)
504 c.synthetic = (len(c.entries) == 1 and synth)
513 c.synthetic = (len(c.entries) == 1 and synth)
505
514
506 # Sort files in each changeset
515 # Sort files in each changeset
507
516
508 for c in changesets:
517 for c in changesets:
509 def pathcompare(l, r):
518 def pathcompare(l, r):
510 'Mimic cvsps sorting order'
519 'Mimic cvsps sorting order'
511 l = l.split('/')
520 l = l.split('/')
512 r = r.split('/')
521 r = r.split('/')
513 nl = len(l)
522 nl = len(l)
514 nr = len(r)
523 nr = len(r)
515 n = min(nl, nr)
524 n = min(nl, nr)
516 for i in range(n):
525 for i in range(n):
517 if i + 1 == nl and nl < nr:
526 if i + 1 == nl and nl < nr:
518 return -1
527 return -1
519 elif i + 1 == nr and nl > nr:
528 elif i + 1 == nr and nl > nr:
520 return +1
529 return +1
521 elif l[i] < r[i]:
530 elif l[i] < r[i]:
522 return -1
531 return -1
523 elif l[i] > r[i]:
532 elif l[i] > r[i]:
524 return +1
533 return +1
525 return 0
534 return 0
526 def entitycompare(l, r):
535 def entitycompare(l, r):
527 return pathcompare(l.file, r.file)
536 return pathcompare(l.file, r.file)
528
537
529 c.entries.sort(entitycompare)
538 c.entries.sort(entitycompare)
530
539
531 # Sort changesets by date
540 # Sort changesets by date
532
541
533 def cscmp(l, r):
542 def cscmp(l, r):
534 d = sum(l.date) - sum(r.date)
543 d = sum(l.date) - sum(r.date)
535 if d:
544 if d:
536 return d
545 return d
537
546
538 # detect vendor branches and initial commits on a branch
547 # detect vendor branches and initial commits on a branch
539 le = {}
548 le = {}
540 for e in l.entries:
549 for e in l.entries:
541 le[e.rcs] = e.revision
550 le[e.rcs] = e.revision
542 re = {}
551 re = {}
543 for e in r.entries:
552 for e in r.entries:
544 re[e.rcs] = e.revision
553 re[e.rcs] = e.revision
545
554
546 d = 0
555 d = 0
547 for e in l.entries:
556 for e in l.entries:
548 if re.get(e.rcs, None) == e.parent:
557 if re.get(e.rcs, None) == e.parent:
549 assert not d
558 assert not d
550 d = 1
559 d = 1
551 break
560 break
552
561
553 for e in r.entries:
562 for e in r.entries:
554 if le.get(e.rcs, None) == e.parent:
563 if le.get(e.rcs, None) == e.parent:
555 assert not d
564 assert not d
556 d = -1
565 d = -1
557 break
566 break
558
567
559 return d
568 return d
560
569
561 changesets.sort(cscmp)
570 changesets.sort(cscmp)
562
571
563 # Collect tags
572 # Collect tags
564
573
565 globaltags = {}
574 globaltags = {}
566 for c in changesets:
575 for c in changesets:
567 for e in c.entries:
576 for e in c.entries:
568 for tag in e.tags:
577 for tag in e.tags:
569 # remember which is the latest changeset to have this tag
578 # remember which is the latest changeset to have this tag
570 globaltags[tag] = c
579 globaltags[tag] = c
571
580
572 for c in changesets:
581 for c in changesets:
573 tags = set()
582 tags = set()
574 for e in c.entries:
583 for e in c.entries:
575 tags.update(e.tags)
584 tags.update(e.tags)
576 # remember tags only if this is the latest changeset to have it
585 # remember tags only if this is the latest changeset to have it
577 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
586 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
578
587
579 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
588 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
580 # by inserting dummy changesets with two parents, and handle
589 # by inserting dummy changesets with two parents, and handle
581 # {{mergefrombranch BRANCHNAME}} by setting two parents.
590 # {{mergefrombranch BRANCHNAME}} by setting two parents.
582
591
583 if mergeto is None:
592 if mergeto is None:
584 mergeto = r'{{mergetobranch ([-\w]+)}}'
593 mergeto = r'{{mergetobranch ([-\w]+)}}'
585 if mergeto:
594 if mergeto:
586 mergeto = re.compile(mergeto)
595 mergeto = re.compile(mergeto)
587
596
588 if mergefrom is None:
597 if mergefrom is None:
589 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
598 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
590 if mergefrom:
599 if mergefrom:
591 mergefrom = re.compile(mergefrom)
600 mergefrom = re.compile(mergefrom)
592
601
593 versions = {} # changeset index where we saw any particular file version
602 versions = {} # changeset index where we saw any particular file version
594 branches = {} # changeset index where we saw a branch
603 branches = {} # changeset index where we saw a branch
595 n = len(changesets)
604 n = len(changesets)
596 i = 0
605 i = 0
597 while i<n:
606 while i<n:
598 c = changesets[i]
607 c = changesets[i]
599
608
600 for f in c.entries:
609 for f in c.entries:
601 versions[(f.rcs, f.revision)] = i
610 versions[(f.rcs, f.revision)] = i
602
611
603 p = None
612 p = None
604 if c.branch in branches:
613 if c.branch in branches:
605 p = branches[c.branch]
614 p = branches[c.branch]
606 else:
615 else:
607 for f in c.entries:
616 for f in c.entries:
608 p = max(p, versions.get((f.rcs, f.parent), None))
617 p = max(p, versions.get((f.rcs, f.parent), None))
609
618
610 c.parents = []
619 c.parents = []
611 if p is not None:
620 if p is not None:
612 p = changesets[p]
621 p = changesets[p]
613
622
614 # Ensure no changeset has a synthetic changeset as a parent.
623 # Ensure no changeset has a synthetic changeset as a parent.
615 while p.synthetic:
624 while p.synthetic:
616 assert len(p.parents) <= 1, \
625 assert len(p.parents) <= 1, \
617 _('synthetic changeset cannot have multiple parents')
626 _('synthetic changeset cannot have multiple parents')
618 if p.parents:
627 if p.parents:
619 p = p.parents[0]
628 p = p.parents[0]
620 else:
629 else:
621 p = None
630 p = None
622 break
631 break
623
632
624 if p is not None:
633 if p is not None:
625 c.parents.append(p)
634 c.parents.append(p)
626
635
627 if c.mergepoint:
636 if c.mergepoint:
628 if c.mergepoint == 'HEAD':
637 if c.mergepoint == 'HEAD':
629 c.mergepoint = None
638 c.mergepoint = None
630 c.parents.append(changesets[branches[c.mergepoint]])
639 c.parents.append(changesets[branches[c.mergepoint]])
631
640
632 if mergefrom:
641 if mergefrom:
633 m = mergefrom.search(c.comment)
642 m = mergefrom.search(c.comment)
634 if m:
643 if m:
635 m = m.group(1)
644 m = m.group(1)
636 if m == 'HEAD':
645 if m == 'HEAD':
637 m = None
646 m = None
638 try:
647 try:
639 candidate = changesets[branches[m]]
648 candidate = changesets[branches[m]]
640 except KeyError:
649 except KeyError:
641 ui.warn(_("warning: CVS commit message references "
650 ui.warn(_("warning: CVS commit message references "
642 "non-existent branch %r:\n%s\n")
651 "non-existent branch %r:\n%s\n")
643 % (m, c.comment))
652 % (m, c.comment))
644 if m in branches and c.branch != m and not candidate.synthetic:
653 if m in branches and c.branch != m and not candidate.synthetic:
645 c.parents.append(candidate)
654 c.parents.append(candidate)
646
655
647 if mergeto:
656 if mergeto:
648 m = mergeto.search(c.comment)
657 m = mergeto.search(c.comment)
649 if m:
658 if m:
650 try:
659 try:
651 m = m.group(1)
660 m = m.group(1)
652 if m == 'HEAD':
661 if m == 'HEAD':
653 m = None
662 m = None
654 except:
663 except:
655 m = None # if no group found then merge to HEAD
664 m = None # if no group found then merge to HEAD
656 if m in branches and c.branch != m:
665 if m in branches and c.branch != m:
657 # insert empty changeset for merge
666 # insert empty changeset for merge
658 cc = changeset(author=c.author, branch=m, date=c.date,
667 cc = changeset(author=c.author, branch=m, date=c.date,
659 comment='convert-repo: CVS merge from branch %s' % c.branch,
668 comment='convert-repo: CVS merge from branch %s' % c.branch,
660 entries=[], tags=[], parents=[changesets[branches[m]], c])
669 entries=[], tags=[], parents=[changesets[branches[m]], c])
661 changesets.insert(i + 1, cc)
670 changesets.insert(i + 1, cc)
662 branches[m] = i + 1
671 branches[m] = i + 1
663
672
664 # adjust our loop counters now we have inserted a new entry
673 # adjust our loop counters now we have inserted a new entry
665 n += 1
674 n += 1
666 i += 2
675 i += 2
667 continue
676 continue
668
677
669 branches[c.branch] = i
678 branches[c.branch] = i
670 i += 1
679 i += 1
671
680
672 # Drop synthetic changesets (safe now that we have ensured no other
681 # Drop synthetic changesets (safe now that we have ensured no other
673 # changesets can have them as parents).
682 # changesets can have them as parents).
674 i = 0
683 i = 0
675 while i < len(changesets):
684 while i < len(changesets):
676 if changesets[i].synthetic:
685 if changesets[i].synthetic:
677 del changesets[i]
686 del changesets[i]
678 else:
687 else:
679 i += 1
688 i += 1
680
689
681 # Number changesets
690 # Number changesets
682
691
683 for i, c in enumerate(changesets):
692 for i, c in enumerate(changesets):
684 c.id = i + 1
693 c.id = i + 1
685
694
686 ui.status(_('%d changeset entries\n') % len(changesets))
695 ui.status(_('%d changeset entries\n') % len(changesets))
687
696
688 return changesets
697 return changesets
689
698
690
699
691 def debugcvsps(ui, *args, **opts):
700 def debugcvsps(ui, *args, **opts):
692 '''Read CVS rlog for current directory or named path in repository, and
701 '''Read CVS rlog for current directory or named path in
693 convert the log to changesets based on matching commit log entries and dates.'''
702 repository, and convert the log to changesets based on matching
694
703 commit log entries and dates.
704 '''
695 if opts["new_cache"]:
705 if opts["new_cache"]:
696 cache = "write"
706 cache = "write"
697 elif opts["update_cache"]:
707 elif opts["update_cache"]:
698 cache = "update"
708 cache = "update"
699 else:
709 else:
700 cache = None
710 cache = None
701
711
702 revisions = opts["revisions"]
712 revisions = opts["revisions"]
703
713
704 try:
714 try:
705 if args:
715 if args:
706 log = []
716 log = []
707 for d in args:
717 for d in args:
708 log += createlog(ui, d, root=opts["root"], cache=cache)
718 log += createlog(ui, d, root=opts["root"], cache=cache)
709 else:
719 else:
710 log = createlog(ui, root=opts["root"], cache=cache)
720 log = createlog(ui, root=opts["root"], cache=cache)
711 except logerror, e:
721 except logerror, e:
712 ui.write("%r\n"%e)
722 ui.write("%r\n"%e)
713 return
723 return
714
724
715 changesets = createchangeset(ui, log, opts["fuzz"])
725 changesets = createchangeset(ui, log, opts["fuzz"])
716 del log
726 del log
717
727
718 # Print changesets (optionally filtered)
728 # Print changesets (optionally filtered)
719
729
720 off = len(revisions)
730 off = len(revisions)
721 branches = {} # latest version number in each branch
731 branches = {} # latest version number in each branch
722 ancestors = {} # parent branch
732 ancestors = {} # parent branch
723 for cs in changesets:
733 for cs in changesets:
724
734
725 if opts["ancestors"]:
735 if opts["ancestors"]:
726 if cs.branch not in branches and cs.parents and cs.parents[0].id:
736 if cs.branch not in branches and cs.parents and cs.parents[0].id:
727 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
737 ancestors[cs.branch] = (changesets[cs.parents[0].id-1].branch,
738 cs.parents[0].id)
728 branches[cs.branch] = cs.id
739 branches[cs.branch] = cs.id
729
740
730 # limit by branches
741 # limit by branches
731 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
742 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
732 continue
743 continue
733
744
734 if not off:
745 if not off:
735 # Note: trailing spaces on several lines here are needed to have
746 # Note: trailing spaces on several lines here are needed to have
736 # bug-for-bug compatibility with cvsps.
747 # bug-for-bug compatibility with cvsps.
737 ui.write('---------------------\n')
748 ui.write('---------------------\n')
738 ui.write('PatchSet %d \n' % cs.id)
749 ui.write('PatchSet %d \n' % cs.id)
739 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
750 ui.write('Date: %s\n' % util.datestr(cs.date,
751 '%Y/%m/%d %H:%M:%S %1%2'))
740 ui.write('Author: %s\n' % cs.author)
752 ui.write('Author: %s\n' % cs.author)
741 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
753 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
742 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
754 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
743 ','.join(cs.tags) or '(none)'))
755 ','.join(cs.tags) or '(none)'))
744 if opts["parents"] and cs.parents:
756 if opts["parents"] and cs.parents:
745 if len(cs.parents)>1:
757 if len(cs.parents)>1:
746 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
758 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
747 else:
759 else:
748 ui.write('Parent: %d\n' % cs.parents[0].id)
760 ui.write('Parent: %d\n' % cs.parents[0].id)
749
761
750 if opts["ancestors"]:
762 if opts["ancestors"]:
751 b = cs.branch
763 b = cs.branch
752 r = []
764 r = []
753 while b:
765 while b:
754 b, c = ancestors[b]
766 b, c = ancestors[b]
755 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
767 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
756 if r:
768 if r:
757 ui.write('Ancestors: %s\n' % (','.join(r)))
769 ui.write('Ancestors: %s\n' % (','.join(r)))
758
770
759 ui.write('Log:\n')
771 ui.write('Log:\n')
760 ui.write('%s\n\n' % cs.comment)
772 ui.write('%s\n\n' % cs.comment)
761 ui.write('Members: \n')
773 ui.write('Members: \n')
762 for f in cs.entries:
774 for f in cs.entries:
763 fn = f.file
775 fn = f.file
764 if fn.startswith(opts["prefix"]):
776 if fn.startswith(opts["prefix"]):
765 fn = fn[len(opts["prefix"]):]
777 fn = fn[len(opts["prefix"]):]
766 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
778 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
767 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
779 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
768 ui.write('\n')
780 ui.write('\n')
769
781
770 # have we seen the start tag?
782 # have we seen the start tag?
771 if revisions and off:
783 if revisions and off:
772 if revisions[0] == str(cs.id) or \
784 if revisions[0] == str(cs.id) or \
773 revisions[0] in cs.tags:
785 revisions[0] in cs.tags:
774 off = False
786 off = False
775
787
776 # see if we reached the end tag
788 # see if we reached the end tag
777 if len(revisions)>1 and not off:
789 if len(revisions)>1 and not off:
778 if revisions[1] == str(cs.id) or \
790 if revisions[1] == str(cs.id) or \
779 revisions[1] in cs.tags:
791 revisions[1] in cs.tags:
780 break
792 break
General Comments 0
You need to be logged in to leave comments. Login now