##// END OF EJS Templates
cvsps: update docstring for changeset class.
Greg Ward -
r8079:fb162c47 default
parent child Browse files
Show More
@@ -1,765 +1,766 b''
1 #
1 #
2 # Mercurial built-in replacement for cvsps.
2 # Mercurial built-in replacement for cvsps.
3 #
3 #
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 import os
9 import os
10 import re
10 import re
11 import cPickle as pickle
11 import cPickle as pickle
12 from mercurial import util
12 from mercurial import util
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14
14
15 def listsort(list, key):
15 def listsort(list, key):
16 "helper to sort by key in Python 2.3"
16 "helper to sort by key in Python 2.3"
17 try:
17 try:
18 list.sort(key=key)
18 list.sort(key=key)
19 except TypeError:
19 except TypeError:
20 list.sort(lambda l, r: cmp(key(l), key(r)))
20 list.sort(lambda l, r: cmp(key(l), key(r)))
21
21
22 class logentry(object):
22 class logentry(object):
23 '''Class logentry has the following attributes:
23 '''Class logentry has the following attributes:
24 .author - author name as CVS knows it
24 .author - author name as CVS knows it
25 .branch - name of branch this revision is on
25 .branch - name of branch this revision is on
26 .branches - revision tuple of branches starting at this revision
26 .branches - revision tuple of branches starting at this revision
27 .comment - commit message
27 .comment - commit message
28 .date - the commit date as a (time, tz) tuple
28 .date - the commit date as a (time, tz) tuple
29 .dead - true if file revision is dead
29 .dead - true if file revision is dead
30 .file - Name of file
30 .file - Name of file
31 .lines - a tuple (+lines, -lines) or None
31 .lines - a tuple (+lines, -lines) or None
32 .parent - Previous revision of this entry
32 .parent - Previous revision of this entry
33 .rcs - name of file as returned from CVS
33 .rcs - name of file as returned from CVS
34 .revision - revision number as tuple
34 .revision - revision number as tuple
35 .tags - list of tags on the file
35 .tags - list of tags on the file
36 .synthetic - is this a synthetic "file ... added on ..." revision?
36 .synthetic - is this a synthetic "file ... added on ..." revision?
37 .mergepoint- the branch that has been merged from (if present in rlog output)
37 .mergepoint- the branch that has been merged from (if present in rlog output)
38 '''
38 '''
39 def __init__(self, **entries):
39 def __init__(self, **entries):
40 self.__dict__.update(entries)
40 self.__dict__.update(entries)
41
41
42 class logerror(Exception):
42 class logerror(Exception):
43 pass
43 pass
44
44
45 def getrepopath(cvspath):
45 def getrepopath(cvspath):
46 """Return the repository path from a CVS path.
46 """Return the repository path from a CVS path.
47
47
48 >>> getrepopath('/foo/bar')
48 >>> getrepopath('/foo/bar')
49 '/foo/bar'
49 '/foo/bar'
50 >>> getrepopath('c:/foo/bar')
50 >>> getrepopath('c:/foo/bar')
51 'c:/foo/bar'
51 'c:/foo/bar'
52 >>> getrepopath(':pserver:10/foo/bar')
52 >>> getrepopath(':pserver:10/foo/bar')
53 '/foo/bar'
53 '/foo/bar'
54 >>> getrepopath(':pserver:10c:/foo/bar')
54 >>> getrepopath(':pserver:10c:/foo/bar')
55 '/foo/bar'
55 '/foo/bar'
56 >>> getrepopath(':pserver:/foo/bar')
56 >>> getrepopath(':pserver:/foo/bar')
57 '/foo/bar'
57 '/foo/bar'
58 >>> getrepopath(':pserver:c:/foo/bar')
58 >>> getrepopath(':pserver:c:/foo/bar')
59 'c:/foo/bar'
59 'c:/foo/bar'
60 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
60 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
61 '/foo/bar'
61 '/foo/bar'
62 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
62 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
63 'c:/foo/bar'
63 'c:/foo/bar'
64 """
64 """
65 # According to CVS manual, CVS paths are expressed like:
65 # According to CVS manual, CVS paths are expressed like:
66 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
66 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
67 #
67 #
68 # Unfortunately, Windows absolute paths start with a drive letter
68 # Unfortunately, Windows absolute paths start with a drive letter
69 # like 'c:' making it harder to parse. Here we assume that drive
69 # like 'c:' making it harder to parse. Here we assume that drive
70 # letters are only one character long and any CVS component before
70 # letters are only one character long and any CVS component before
71 # the repository path is at least 2 characters long, and use this
71 # the repository path is at least 2 characters long, and use this
72 # to disambiguate.
72 # to disambiguate.
73 parts = cvspath.split(':')
73 parts = cvspath.split(':')
74 if len(parts) == 1:
74 if len(parts) == 1:
75 return parts[0]
75 return parts[0]
76 # Here there is an ambiguous case if we have a port number
76 # Here there is an ambiguous case if we have a port number
77 # immediately followed by a Windows driver letter. We assume this
77 # immediately followed by a Windows driver letter. We assume this
78 # never happens and decide it must be CVS path component,
78 # never happens and decide it must be CVS path component,
79 # therefore ignoring it.
79 # therefore ignoring it.
80 if len(parts[-2]) > 1:
80 if len(parts[-2]) > 1:
81 return parts[-1].lstrip('0123456789')
81 return parts[-1].lstrip('0123456789')
82 return parts[-2] + ':' + parts[-1]
82 return parts[-2] + ':' + parts[-1]
83
83
84 def createlog(ui, directory=None, root="", rlog=True, cache=None):
84 def createlog(ui, directory=None, root="", rlog=True, cache=None):
85 '''Collect the CVS rlog'''
85 '''Collect the CVS rlog'''
86
86
87 # Because we store many duplicate commit log messages, reusing strings
87 # Because we store many duplicate commit log messages, reusing strings
88 # saves a lot of memory and pickle storage space.
88 # saves a lot of memory and pickle storage space.
89 _scache = {}
89 _scache = {}
90 def scache(s):
90 def scache(s):
91 "return a shared version of a string"
91 "return a shared version of a string"
92 return _scache.setdefault(s, s)
92 return _scache.setdefault(s, s)
93
93
94 ui.status(_('collecting CVS rlog\n'))
94 ui.status(_('collecting CVS rlog\n'))
95
95
96 log = [] # list of logentry objects containing the CVS state
96 log = [] # list of logentry objects containing the CVS state
97
97
98 # patterns to match in CVS (r)log output, by state of use
98 # patterns to match in CVS (r)log output, by state of use
99 re_00 = re.compile('RCS file: (.+)$')
99 re_00 = re.compile('RCS file: (.+)$')
100 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
100 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
101 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
101 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
102 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
102 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
103 re_10 = re.compile('Working file: (.+)$')
103 re_10 = re.compile('Working file: (.+)$')
104 re_20 = re.compile('symbolic names:')
104 re_20 = re.compile('symbolic names:')
105 re_30 = re.compile('\t(.+): ([\\d.]+)$')
105 re_30 = re.compile('\t(.+): ([\\d.]+)$')
106 re_31 = re.compile('----------------------------$')
106 re_31 = re.compile('----------------------------$')
107 re_32 = re.compile('=============================================================================$')
107 re_32 = re.compile('=============================================================================$')
108 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
108 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
109 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?(.*mergepoint:\s+([^;]+);)?')
109 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?(.*mergepoint:\s+([^;]+);)?')
110 re_70 = re.compile('branches: (.+);$')
110 re_70 = re.compile('branches: (.+);$')
111
111
112 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
112 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
113
113
114 prefix = '' # leading path to strip of what we get from CVS
114 prefix = '' # leading path to strip of what we get from CVS
115
115
116 if directory is None:
116 if directory is None:
117 # Current working directory
117 # Current working directory
118
118
119 # Get the real directory in the repository
119 # Get the real directory in the repository
120 try:
120 try:
121 prefix = file(os.path.join('CVS','Repository')).read().strip()
121 prefix = file(os.path.join('CVS','Repository')).read().strip()
122 if prefix == ".":
122 if prefix == ".":
123 prefix = ""
123 prefix = ""
124 directory = prefix
124 directory = prefix
125 except IOError:
125 except IOError:
126 raise logerror('Not a CVS sandbox')
126 raise logerror('Not a CVS sandbox')
127
127
128 if prefix and not prefix.endswith(os.sep):
128 if prefix and not prefix.endswith(os.sep):
129 prefix += os.sep
129 prefix += os.sep
130
130
131 # Use the Root file in the sandbox, if it exists
131 # Use the Root file in the sandbox, if it exists
132 try:
132 try:
133 root = file(os.path.join('CVS','Root')).read().strip()
133 root = file(os.path.join('CVS','Root')).read().strip()
134 except IOError:
134 except IOError:
135 pass
135 pass
136
136
137 if not root:
137 if not root:
138 root = os.environ.get('CVSROOT', '')
138 root = os.environ.get('CVSROOT', '')
139
139
140 # read log cache if one exists
140 # read log cache if one exists
141 oldlog = []
141 oldlog = []
142 date = None
142 date = None
143
143
144 if cache:
144 if cache:
145 cachedir = os.path.expanduser('~/.hg.cvsps')
145 cachedir = os.path.expanduser('~/.hg.cvsps')
146 if not os.path.exists(cachedir):
146 if not os.path.exists(cachedir):
147 os.mkdir(cachedir)
147 os.mkdir(cachedir)
148
148
149 # The cvsps cache pickle needs a uniquified name, based on the
149 # The cvsps cache pickle needs a uniquified name, based on the
150 # repository location. The address may have all sort of nasties
150 # repository location. The address may have all sort of nasties
151 # in it, slashes, colons and such. So here we take just the
151 # in it, slashes, colons and such. So here we take just the
152 # alphanumerics, concatenated in a way that does not mix up the
152 # alphanumerics, concatenated in a way that does not mix up the
153 # various components, so that
153 # various components, so that
154 # :pserver:user@server:/path
154 # :pserver:user@server:/path
155 # and
155 # and
156 # /pserver/user/server/path
156 # /pserver/user/server/path
157 # are mapped to different cache file names.
157 # are mapped to different cache file names.
158 cachefile = root.split(":") + [directory, "cache"]
158 cachefile = root.split(":") + [directory, "cache"]
159 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
159 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
160 cachefile = os.path.join(cachedir,
160 cachefile = os.path.join(cachedir,
161 '.'.join([s for s in cachefile if s]))
161 '.'.join([s for s in cachefile if s]))
162
162
163 if cache == 'update':
163 if cache == 'update':
164 try:
164 try:
165 ui.note(_('reading cvs log cache %s\n') % cachefile)
165 ui.note(_('reading cvs log cache %s\n') % cachefile)
166 oldlog = pickle.load(file(cachefile))
166 oldlog = pickle.load(file(cachefile))
167 ui.note(_('cache has %d log entries\n') % len(oldlog))
167 ui.note(_('cache has %d log entries\n') % len(oldlog))
168 except Exception, e:
168 except Exception, e:
169 ui.note(_('error reading cache: %r\n') % e)
169 ui.note(_('error reading cache: %r\n') % e)
170
170
171 if oldlog:
171 if oldlog:
172 date = oldlog[-1].date # last commit date as a (time,tz) tuple
172 date = oldlog[-1].date # last commit date as a (time,tz) tuple
173 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
173 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
174
174
175 # build the CVS commandline
175 # build the CVS commandline
176 cmd = ['cvs', '-q']
176 cmd = ['cvs', '-q']
177 if root:
177 if root:
178 cmd.append('-d%s' % root)
178 cmd.append('-d%s' % root)
179 p = util.normpath(getrepopath(root))
179 p = util.normpath(getrepopath(root))
180 if not p.endswith('/'):
180 if not p.endswith('/'):
181 p += '/'
181 p += '/'
182 prefix = p + util.normpath(prefix)
182 prefix = p + util.normpath(prefix)
183 cmd.append(['log', 'rlog'][rlog])
183 cmd.append(['log', 'rlog'][rlog])
184 if date:
184 if date:
185 # no space between option and date string
185 # no space between option and date string
186 cmd.append('-d>%s' % date)
186 cmd.append('-d>%s' % date)
187 cmd.append(directory)
187 cmd.append(directory)
188
188
189 # state machine begins here
189 # state machine begins here
190 tags = {} # dictionary of revisions on current file with their tags
190 tags = {} # dictionary of revisions on current file with their tags
191 branchmap = {} # mapping between branch names and revision numbers
191 branchmap = {} # mapping between branch names and revision numbers
192 state = 0
192 state = 0
193 store = False # set when a new record can be appended
193 store = False # set when a new record can be appended
194
194
195 cmd = [util.shellquote(arg) for arg in cmd]
195 cmd = [util.shellquote(arg) for arg in cmd]
196 ui.note(_("running %s\n") % (' '.join(cmd)))
196 ui.note(_("running %s\n") % (' '.join(cmd)))
197 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
197 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
198
198
199 pfp = util.popen(' '.join(cmd))
199 pfp = util.popen(' '.join(cmd))
200 peek = pfp.readline()
200 peek = pfp.readline()
201 while True:
201 while True:
202 line = peek
202 line = peek
203 if line == '':
203 if line == '':
204 break
204 break
205 peek = pfp.readline()
205 peek = pfp.readline()
206 if line.endswith('\n'):
206 if line.endswith('\n'):
207 line = line[:-1]
207 line = line[:-1]
208 #ui.debug('state=%d line=%r\n' % (state, line))
208 #ui.debug('state=%d line=%r\n' % (state, line))
209
209
210 if state == 0:
210 if state == 0:
211 # initial state, consume input until we see 'RCS file'
211 # initial state, consume input until we see 'RCS file'
212 match = re_00.match(line)
212 match = re_00.match(line)
213 if match:
213 if match:
214 rcs = match.group(1)
214 rcs = match.group(1)
215 tags = {}
215 tags = {}
216 if rlog:
216 if rlog:
217 filename = util.normpath(rcs[:-2])
217 filename = util.normpath(rcs[:-2])
218 if filename.startswith(prefix):
218 if filename.startswith(prefix):
219 filename = filename[len(prefix):]
219 filename = filename[len(prefix):]
220 if filename.startswith('/'):
220 if filename.startswith('/'):
221 filename = filename[1:]
221 filename = filename[1:]
222 if filename.startswith('Attic/'):
222 if filename.startswith('Attic/'):
223 filename = filename[6:]
223 filename = filename[6:]
224 else:
224 else:
225 filename = filename.replace('/Attic/', '/')
225 filename = filename.replace('/Attic/', '/')
226 state = 2
226 state = 2
227 continue
227 continue
228 state = 1
228 state = 1
229 continue
229 continue
230 match = re_01.match(line)
230 match = re_01.match(line)
231 if match:
231 if match:
232 raise Exception(match.group(1))
232 raise Exception(match.group(1))
233 match = re_02.match(line)
233 match = re_02.match(line)
234 if match:
234 if match:
235 raise Exception(match.group(2))
235 raise Exception(match.group(2))
236 if re_03.match(line):
236 if re_03.match(line):
237 raise Exception(line)
237 raise Exception(line)
238
238
239 elif state == 1:
239 elif state == 1:
240 # expect 'Working file' (only when using log instead of rlog)
240 # expect 'Working file' (only when using log instead of rlog)
241 match = re_10.match(line)
241 match = re_10.match(line)
242 assert match, _('RCS file must be followed by working file')
242 assert match, _('RCS file must be followed by working file')
243 filename = util.normpath(match.group(1))
243 filename = util.normpath(match.group(1))
244 state = 2
244 state = 2
245
245
246 elif state == 2:
246 elif state == 2:
247 # expect 'symbolic names'
247 # expect 'symbolic names'
248 if re_20.match(line):
248 if re_20.match(line):
249 branchmap = {}
249 branchmap = {}
250 state = 3
250 state = 3
251
251
252 elif state == 3:
252 elif state == 3:
253 # read the symbolic names and store as tags
253 # read the symbolic names and store as tags
254 match = re_30.match(line)
254 match = re_30.match(line)
255 if match:
255 if match:
256 rev = [int(x) for x in match.group(2).split('.')]
256 rev = [int(x) for x in match.group(2).split('.')]
257
257
258 # Convert magic branch number to an odd-numbered one
258 # Convert magic branch number to an odd-numbered one
259 revn = len(rev)
259 revn = len(rev)
260 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
260 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
261 rev = rev[:-2] + rev[-1:]
261 rev = rev[:-2] + rev[-1:]
262 rev = tuple(rev)
262 rev = tuple(rev)
263
263
264 if rev not in tags:
264 if rev not in tags:
265 tags[rev] = []
265 tags[rev] = []
266 tags[rev].append(match.group(1))
266 tags[rev].append(match.group(1))
267 branchmap[match.group(1)] = match.group(2)
267 branchmap[match.group(1)] = match.group(2)
268
268
269 elif re_31.match(line):
269 elif re_31.match(line):
270 state = 5
270 state = 5
271 elif re_32.match(line):
271 elif re_32.match(line):
272 state = 0
272 state = 0
273
273
274 elif state == 4:
274 elif state == 4:
275 # expecting '------' separator before first revision
275 # expecting '------' separator before first revision
276 if re_31.match(line):
276 if re_31.match(line):
277 state = 5
277 state = 5
278 else:
278 else:
279 assert not re_32.match(line), _('must have at least some revisions')
279 assert not re_32.match(line), _('must have at least some revisions')
280
280
281 elif state == 5:
281 elif state == 5:
282 # expecting revision number and possibly (ignored) lock indication
282 # expecting revision number and possibly (ignored) lock indication
283 # we create the logentry here from values stored in states 0 to 4,
283 # we create the logentry here from values stored in states 0 to 4,
284 # as this state is re-entered for subsequent revisions of a file.
284 # as this state is re-entered for subsequent revisions of a file.
285 match = re_50.match(line)
285 match = re_50.match(line)
286 assert match, _('expected revision number')
286 assert match, _('expected revision number')
287 e = logentry(rcs=scache(rcs), file=scache(filename),
287 e = logentry(rcs=scache(rcs), file=scache(filename),
288 revision=tuple([int(x) for x in match.group(1).split('.')]),
288 revision=tuple([int(x) for x in match.group(1).split('.')]),
289 branches=[], parent=None,
289 branches=[], parent=None,
290 synthetic=False)
290 synthetic=False)
291 state = 6
291 state = 6
292
292
293 elif state == 6:
293 elif state == 6:
294 # expecting date, author, state, lines changed
294 # expecting date, author, state, lines changed
295 match = re_60.match(line)
295 match = re_60.match(line)
296 assert match, _('revision must be followed by date line')
296 assert match, _('revision must be followed by date line')
297 d = match.group(1)
297 d = match.group(1)
298 if d[2] == '/':
298 if d[2] == '/':
299 # Y2K
299 # Y2K
300 d = '19' + d
300 d = '19' + d
301
301
302 if len(d.split()) != 3:
302 if len(d.split()) != 3:
303 # cvs log dates always in GMT
303 # cvs log dates always in GMT
304 d = d + ' UTC'
304 d = d + ' UTC'
305 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
305 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
306 e.author = scache(match.group(2))
306 e.author = scache(match.group(2))
307 e.dead = match.group(3).lower() == 'dead'
307 e.dead = match.group(3).lower() == 'dead'
308
308
309 if match.group(5):
309 if match.group(5):
310 if match.group(6):
310 if match.group(6):
311 e.lines = (int(match.group(5)), int(match.group(6)))
311 e.lines = (int(match.group(5)), int(match.group(6)))
312 else:
312 else:
313 e.lines = (int(match.group(5)), 0)
313 e.lines = (int(match.group(5)), 0)
314 elif match.group(6):
314 elif match.group(6):
315 e.lines = (0, int(match.group(6)))
315 e.lines = (0, int(match.group(6)))
316 else:
316 else:
317 e.lines = None
317 e.lines = None
318
318
319 if match.group(7): # cvsnt mergepoint
319 if match.group(7): # cvsnt mergepoint
320 myrev = match.group(8).split('.')
320 myrev = match.group(8).split('.')
321 if len(myrev) == 2: # head
321 if len(myrev) == 2: # head
322 e.mergepoint = 'HEAD'
322 e.mergepoint = 'HEAD'
323 else:
323 else:
324 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
324 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
325 branches = [b for b in branchmap if branchmap[b] == myrev]
325 branches = [b for b in branchmap if branchmap[b] == myrev]
326 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
326 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
327 e.mergepoint = branches[0]
327 e.mergepoint = branches[0]
328 else:
328 else:
329 e.mergepoint = None
329 e.mergepoint = None
330 e.comment = []
330 e.comment = []
331 state = 7
331 state = 7
332
332
333 elif state == 7:
333 elif state == 7:
334 # read the revision numbers of branches that start at this revision
334 # read the revision numbers of branches that start at this revision
335 # or store the commit log message otherwise
335 # or store the commit log message otherwise
336 m = re_70.match(line)
336 m = re_70.match(line)
337 if m:
337 if m:
338 e.branches = [tuple([int(y) for y in x.strip().split('.')])
338 e.branches = [tuple([int(y) for y in x.strip().split('.')])
339 for x in m.group(1).split(';')]
339 for x in m.group(1).split(';')]
340 state = 8
340 state = 8
341 elif re_31.match(line) and re_50.match(peek):
341 elif re_31.match(line) and re_50.match(peek):
342 state = 5
342 state = 5
343 store = True
343 store = True
344 elif re_32.match(line):
344 elif re_32.match(line):
345 state = 0
345 state = 0
346 store = True
346 store = True
347 else:
347 else:
348 e.comment.append(line)
348 e.comment.append(line)
349
349
350 elif state == 8:
350 elif state == 8:
351 # store commit log message
351 # store commit log message
352 if re_31.match(line):
352 if re_31.match(line):
353 state = 5
353 state = 5
354 store = True
354 store = True
355 elif re_32.match(line):
355 elif re_32.match(line):
356 state = 0
356 state = 0
357 store = True
357 store = True
358 else:
358 else:
359 e.comment.append(line)
359 e.comment.append(line)
360
360
361 # When a file is added on a branch B1, CVS creates a synthetic
361 # When a file is added on a branch B1, CVS creates a synthetic
362 # dead trunk revision 1.1 so that the branch has a root.
362 # dead trunk revision 1.1 so that the branch has a root.
363 # Likewise, if you merge such a file to a later branch B2 (one
363 # Likewise, if you merge such a file to a later branch B2 (one
364 # that already existed when the file was added on B1), CVS
364 # that already existed when the file was added on B1), CVS
365 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
365 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
366 # these revisions now, but mark them synthetic so
366 # these revisions now, but mark them synthetic so
367 # createchangeset() can take care of them.
367 # createchangeset() can take care of them.
368 if (store and
368 if (store and
369 e.dead and
369 e.dead and
370 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
370 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
371 len(e.comment) == 1 and
371 len(e.comment) == 1 and
372 file_added_re.match(e.comment[0])):
372 file_added_re.match(e.comment[0])):
373 ui.debug(_('found synthetic revision in %s: %r\n')
373 ui.debug(_('found synthetic revision in %s: %r\n')
374 % (e.rcs, e.comment[0]))
374 % (e.rcs, e.comment[0]))
375 e.synthetic = True
375 e.synthetic = True
376
376
377 if store:
377 if store:
378 # clean up the results and save in the log.
378 # clean up the results and save in the log.
379 store = False
379 store = False
380 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
380 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
381 e.comment = scache('\n'.join(e.comment))
381 e.comment = scache('\n'.join(e.comment))
382
382
383 revn = len(e.revision)
383 revn = len(e.revision)
384 if revn > 3 and (revn % 2) == 0:
384 if revn > 3 and (revn % 2) == 0:
385 e.branch = tags.get(e.revision[:-1], [None])[0]
385 e.branch = tags.get(e.revision[:-1], [None])[0]
386 else:
386 else:
387 e.branch = None
387 e.branch = None
388
388
389 log.append(e)
389 log.append(e)
390
390
391 if len(log) % 100 == 0:
391 if len(log) % 100 == 0:
392 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
392 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
393
393
394 listsort(log, key=lambda x:(x.rcs, x.revision))
394 listsort(log, key=lambda x:(x.rcs, x.revision))
395
395
396 # find parent revisions of individual files
396 # find parent revisions of individual files
397 versions = {}
397 versions = {}
398 for e in log:
398 for e in log:
399 branch = e.revision[:-1]
399 branch = e.revision[:-1]
400 p = versions.get((e.rcs, branch), None)
400 p = versions.get((e.rcs, branch), None)
401 if p is None:
401 if p is None:
402 p = e.revision[:-2]
402 p = e.revision[:-2]
403 e.parent = p
403 e.parent = p
404 versions[(e.rcs, branch)] = e.revision
404 versions[(e.rcs, branch)] = e.revision
405
405
406 # update the log cache
406 # update the log cache
407 if cache:
407 if cache:
408 if log:
408 if log:
409 # join up the old and new logs
409 # join up the old and new logs
410 listsort(log, key=lambda x:x.date)
410 listsort(log, key=lambda x:x.date)
411
411
412 if oldlog and oldlog[-1].date >= log[0].date:
412 if oldlog and oldlog[-1].date >= log[0].date:
413 raise logerror('Log cache overlaps with new log entries,'
413 raise logerror('Log cache overlaps with new log entries,'
414 ' re-run without cache.')
414 ' re-run without cache.')
415
415
416 log = oldlog + log
416 log = oldlog + log
417
417
418 # write the new cachefile
418 # write the new cachefile
419 ui.note(_('writing cvs log cache %s\n') % cachefile)
419 ui.note(_('writing cvs log cache %s\n') % cachefile)
420 pickle.dump(log, file(cachefile, 'w'))
420 pickle.dump(log, file(cachefile, 'w'))
421 else:
421 else:
422 log = oldlog
422 log = oldlog
423
423
424 ui.status(_('%d log entries\n') % len(log))
424 ui.status(_('%d log entries\n') % len(log))
425
425
426 return log
426 return log
427
427
428
428
429 class changeset(object):
429 class changeset(object):
430 '''Class changeset has the following attributes:
430 '''Class changeset has the following attributes:
431 .id - integer identifying this changeset (list index)
431 .author - author name as CVS knows it
432 .author - author name as CVS knows it
432 .branch - name of branch this changeset is on, or None
433 .branch - name of branch this changeset is on, or None
433 .comment - commit message
434 .comment - commit message
434 .date - the commit date as a (time,tz) tuple
435 .date - the commit date as a (time,tz) tuple
435 .entries - list of logentry objects in this changeset
436 .entries - list of logentry objects in this changeset
436 .parents - list of one or two parent changesets
437 .parents - list of one or two parent changesets
437 .tags - list of tags on this changeset
438 .tags - list of tags on this changeset
438 .synthetic - from synthetic revision "file ... added on branch ..."
439 .synthetic - from synthetic revision "file ... added on branch ..."
439 .mergepoint- the branch that has been merged from (if present in rlog output)
440 .mergepoint- the branch that has been merged from (if present in rlog output)
440 '''
441 '''
441 def __init__(self, **entries):
442 def __init__(self, **entries):
442 self.__dict__.update(entries)
443 self.__dict__.update(entries)
443
444
444 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
445 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
445 '''Convert log into changesets.'''
446 '''Convert log into changesets.'''
446
447
447 ui.status(_('creating changesets\n'))
448 ui.status(_('creating changesets\n'))
448
449
449 # Merge changesets
450 # Merge changesets
450
451
451 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
452 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
452
453
453 changesets = []
454 changesets = []
454 files = {}
455 files = {}
455 c = None
456 c = None
456 for i, e in enumerate(log):
457 for i, e in enumerate(log):
457
458
458 # Check if log entry belongs to the current changeset or not.
459 # Check if log entry belongs to the current changeset or not.
459 if not (c and
460 if not (c and
460 e.comment == c.comment and
461 e.comment == c.comment and
461 e.author == c.author and
462 e.author == c.author and
462 e.branch == c.branch and
463 e.branch == c.branch and
463 ((c.date[0] + c.date[1]) <=
464 ((c.date[0] + c.date[1]) <=
464 (e.date[0] + e.date[1]) <=
465 (e.date[0] + e.date[1]) <=
465 (c.date[0] + c.date[1]) + fuzz) and
466 (c.date[0] + c.date[1]) + fuzz) and
466 e.file not in files):
467 e.file not in files):
467 c = changeset(comment=e.comment, author=e.author,
468 c = changeset(comment=e.comment, author=e.author,
468 branch=e.branch, date=e.date, entries=[],
469 branch=e.branch, date=e.date, entries=[],
469 mergepoint=getattr(e, 'mergepoint', None))
470 mergepoint=getattr(e, 'mergepoint', None))
470 changesets.append(c)
471 changesets.append(c)
471 files = {}
472 files = {}
472 if len(changesets) % 100 == 0:
473 if len(changesets) % 100 == 0:
473 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
474 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
474 ui.status(util.ellipsis(t, 80) + '\n')
475 ui.status(util.ellipsis(t, 80) + '\n')
475
476
476 c.entries.append(e)
477 c.entries.append(e)
477 files[e.file] = True
478 files[e.file] = True
478 c.date = e.date # changeset date is date of latest commit in it
479 c.date = e.date # changeset date is date of latest commit in it
479
480
480 # Mark synthetic changesets
481 # Mark synthetic changesets
481
482
482 for c in changesets:
483 for c in changesets:
483 # Synthetic revisions always get their own changeset, because
484 # Synthetic revisions always get their own changeset, because
484 # the log message includes the filename. E.g. if you add file3
485 # the log message includes the filename. E.g. if you add file3
485 # and file4 on a branch, you get four log entries and three
486 # and file4 on a branch, you get four log entries and three
486 # changesets:
487 # changesets:
487 # "File file3 was added on branch ..." (synthetic, 1 entry)
488 # "File file3 was added on branch ..." (synthetic, 1 entry)
488 # "File file4 was added on branch ..." (synthetic, 1 entry)
489 # "File file4 was added on branch ..." (synthetic, 1 entry)
489 # "Add file3 and file4 to fix ..." (real, 2 entries)
490 # "Add file3 and file4 to fix ..." (real, 2 entries)
490 # Hence the check for 1 entry here.
491 # Hence the check for 1 entry here.
491 synth = getattr(c.entries[0], 'synthetic', None)
492 synth = getattr(c.entries[0], 'synthetic', None)
492 c.synthetic = (len(c.entries) == 1 and synth)
493 c.synthetic = (len(c.entries) == 1 and synth)
493
494
494 # Sort files in each changeset
495 # Sort files in each changeset
495
496
496 for c in changesets:
497 for c in changesets:
497 def pathcompare(l, r):
498 def pathcompare(l, r):
498 'Mimic cvsps sorting order'
499 'Mimic cvsps sorting order'
499 l = l.split('/')
500 l = l.split('/')
500 r = r.split('/')
501 r = r.split('/')
501 nl = len(l)
502 nl = len(l)
502 nr = len(r)
503 nr = len(r)
503 n = min(nl, nr)
504 n = min(nl, nr)
504 for i in range(n):
505 for i in range(n):
505 if i + 1 == nl and nl < nr:
506 if i + 1 == nl and nl < nr:
506 return -1
507 return -1
507 elif i + 1 == nr and nl > nr:
508 elif i + 1 == nr and nl > nr:
508 return +1
509 return +1
509 elif l[i] < r[i]:
510 elif l[i] < r[i]:
510 return -1
511 return -1
511 elif l[i] > r[i]:
512 elif l[i] > r[i]:
512 return +1
513 return +1
513 return 0
514 return 0
514 def entitycompare(l, r):
515 def entitycompare(l, r):
515 return pathcompare(l.file, r.file)
516 return pathcompare(l.file, r.file)
516
517
517 c.entries.sort(entitycompare)
518 c.entries.sort(entitycompare)
518
519
519 # Sort changesets by date
520 # Sort changesets by date
520
521
521 def cscmp(l, r):
522 def cscmp(l, r):
522 d = sum(l.date) - sum(r.date)
523 d = sum(l.date) - sum(r.date)
523 if d:
524 if d:
524 return d
525 return d
525
526
526 # detect vendor branches and initial commits on a branch
527 # detect vendor branches and initial commits on a branch
527 le = {}
528 le = {}
528 for e in l.entries:
529 for e in l.entries:
529 le[e.rcs] = e.revision
530 le[e.rcs] = e.revision
530 re = {}
531 re = {}
531 for e in r.entries:
532 for e in r.entries:
532 re[e.rcs] = e.revision
533 re[e.rcs] = e.revision
533
534
534 d = 0
535 d = 0
535 for e in l.entries:
536 for e in l.entries:
536 if re.get(e.rcs, None) == e.parent:
537 if re.get(e.rcs, None) == e.parent:
537 assert not d
538 assert not d
538 d = 1
539 d = 1
539 break
540 break
540
541
541 for e in r.entries:
542 for e in r.entries:
542 if le.get(e.rcs, None) == e.parent:
543 if le.get(e.rcs, None) == e.parent:
543 assert not d
544 assert not d
544 d = -1
545 d = -1
545 break
546 break
546
547
547 return d
548 return d
548
549
549 changesets.sort(cscmp)
550 changesets.sort(cscmp)
550
551
551 # Collect tags
552 # Collect tags
552
553
553 globaltags = {}
554 globaltags = {}
554 for c in changesets:
555 for c in changesets:
555 tags = {}
556 tags = {}
556 for e in c.entries:
557 for e in c.entries:
557 for tag in e.tags:
558 for tag in e.tags:
558 # remember which is the latest changeset to have this tag
559 # remember which is the latest changeset to have this tag
559 globaltags[tag] = c
560 globaltags[tag] = c
560
561
561 for c in changesets:
562 for c in changesets:
562 tags = {}
563 tags = {}
563 for e in c.entries:
564 for e in c.entries:
564 for tag in e.tags:
565 for tag in e.tags:
565 tags[tag] = True
566 tags[tag] = True
566 # remember tags only if this is the latest changeset to have it
567 # remember tags only if this is the latest changeset to have it
567 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
568 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
568
569
569 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
570 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
570 # by inserting dummy changesets with two parents, and handle
571 # by inserting dummy changesets with two parents, and handle
571 # {{mergefrombranch BRANCHNAME}} by setting two parents.
572 # {{mergefrombranch BRANCHNAME}} by setting two parents.
572
573
573 if mergeto is None:
574 if mergeto is None:
574 mergeto = r'{{mergetobranch ([-\w]+)}}'
575 mergeto = r'{{mergetobranch ([-\w]+)}}'
575 if mergeto:
576 if mergeto:
576 mergeto = re.compile(mergeto)
577 mergeto = re.compile(mergeto)
577
578
578 if mergefrom is None:
579 if mergefrom is None:
579 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
580 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
580 if mergefrom:
581 if mergefrom:
581 mergefrom = re.compile(mergefrom)
582 mergefrom = re.compile(mergefrom)
582
583
583 versions = {} # changeset index where we saw any particular file version
584 versions = {} # changeset index where we saw any particular file version
584 branches = {} # changeset index where we saw a branch
585 branches = {} # changeset index where we saw a branch
585 n = len(changesets)
586 n = len(changesets)
586 i = 0
587 i = 0
587 while i<n:
588 while i<n:
588 c = changesets[i]
589 c = changesets[i]
589
590
590 for f in c.entries:
591 for f in c.entries:
591 versions[(f.rcs, f.revision)] = i
592 versions[(f.rcs, f.revision)] = i
592
593
593 p = None
594 p = None
594 if c.branch in branches:
595 if c.branch in branches:
595 p = branches[c.branch]
596 p = branches[c.branch]
596 else:
597 else:
597 for f in c.entries:
598 for f in c.entries:
598 p = max(p, versions.get((f.rcs, f.parent), None))
599 p = max(p, versions.get((f.rcs, f.parent), None))
599
600
600 c.parents = []
601 c.parents = []
601 if p is not None:
602 if p is not None:
602 p = changesets[p]
603 p = changesets[p]
603
604
604 # Ensure no changeset has a synthetic changeset as a parent.
605 # Ensure no changeset has a synthetic changeset as a parent.
605 while p.synthetic:
606 while p.synthetic:
606 assert len(p.parents) <= 1, \
607 assert len(p.parents) <= 1, \
607 _('synthetic changeset cannot have multiple parents')
608 _('synthetic changeset cannot have multiple parents')
608 if p.parents:
609 if p.parents:
609 p = p.parents[0]
610 p = p.parents[0]
610 else:
611 else:
611 p = None
612 p = None
612 break
613 break
613
614
614 if p is not None:
615 if p is not None:
615 c.parents.append(p)
616 c.parents.append(p)
616
617
617 if c.mergepoint:
618 if c.mergepoint:
618 if c.mergepoint == 'HEAD':
619 if c.mergepoint == 'HEAD':
619 c.mergepoint = None
620 c.mergepoint = None
620 c.parents.append(changesets[branches[c.mergepoint]])
621 c.parents.append(changesets[branches[c.mergepoint]])
621
622
622 if mergefrom:
623 if mergefrom:
623 m = mergefrom.search(c.comment)
624 m = mergefrom.search(c.comment)
624 if m:
625 if m:
625 m = m.group(1)
626 m = m.group(1)
626 if m == 'HEAD':
627 if m == 'HEAD':
627 m = None
628 m = None
628 candidate = changesets[branches[m]]
629 candidate = changesets[branches[m]]
629 if m in branches and c.branch != m and not candidate.synthetic:
630 if m in branches and c.branch != m and not candidate.synthetic:
630 c.parents.append(candidate)
631 c.parents.append(candidate)
631
632
632 if mergeto:
633 if mergeto:
633 m = mergeto.search(c.comment)
634 m = mergeto.search(c.comment)
634 if m:
635 if m:
635 try:
636 try:
636 m = m.group(1)
637 m = m.group(1)
637 if m == 'HEAD':
638 if m == 'HEAD':
638 m = None
639 m = None
639 except:
640 except:
640 m = None # if no group found then merge to HEAD
641 m = None # if no group found then merge to HEAD
641 if m in branches and c.branch != m:
642 if m in branches and c.branch != m:
642 # insert empty changeset for merge
643 # insert empty changeset for merge
643 cc = changeset(author=c.author, branch=m, date=c.date,
644 cc = changeset(author=c.author, branch=m, date=c.date,
644 comment='convert-repo: CVS merge from branch %s' % c.branch,
645 comment='convert-repo: CVS merge from branch %s' % c.branch,
645 entries=[], tags=[], parents=[changesets[branches[m]], c])
646 entries=[], tags=[], parents=[changesets[branches[m]], c])
646 changesets.insert(i + 1, cc)
647 changesets.insert(i + 1, cc)
647 branches[m] = i + 1
648 branches[m] = i + 1
648
649
649 # adjust our loop counters now we have inserted a new entry
650 # adjust our loop counters now we have inserted a new entry
650 n += 1
651 n += 1
651 i += 2
652 i += 2
652 continue
653 continue
653
654
654 branches[c.branch] = i
655 branches[c.branch] = i
655 i += 1
656 i += 1
656
657
657 # Drop synthetic changesets (safe now that we have ensured no other
658 # Drop synthetic changesets (safe now that we have ensured no other
658 # changesets can have them as parents).
659 # changesets can have them as parents).
659 i = 0
660 i = 0
660 while i < len(changesets):
661 while i < len(changesets):
661 if changesets[i].synthetic:
662 if changesets[i].synthetic:
662 del changesets[i]
663 del changesets[i]
663 else:
664 else:
664 i += 1
665 i += 1
665
666
666 # Number changesets
667 # Number changesets
667
668
668 for i, c in enumerate(changesets):
669 for i, c in enumerate(changesets):
669 c.id = i + 1
670 c.id = i + 1
670
671
671 ui.status(_('%d changeset entries\n') % len(changesets))
672 ui.status(_('%d changeset entries\n') % len(changesets))
672
673
673 return changesets
674 return changesets
674
675
675
676
676 def debugcvsps(ui, *args, **opts):
677 def debugcvsps(ui, *args, **opts):
677 '''Read CVS rlog for current directory or named path in repository, and
678 '''Read CVS rlog for current directory or named path in repository, and
678 convert the log to changesets based on matching commit log entries and dates.'''
679 convert the log to changesets based on matching commit log entries and dates.'''
679
680
680 if opts["new_cache"]:
681 if opts["new_cache"]:
681 cache = "write"
682 cache = "write"
682 elif opts["update_cache"]:
683 elif opts["update_cache"]:
683 cache = "update"
684 cache = "update"
684 else:
685 else:
685 cache = None
686 cache = None
686
687
687 revisions = opts["revisions"]
688 revisions = opts["revisions"]
688
689
689 try:
690 try:
690 if args:
691 if args:
691 log = []
692 log = []
692 for d in args:
693 for d in args:
693 log += createlog(ui, d, root=opts["root"], cache=cache)
694 log += createlog(ui, d, root=opts["root"], cache=cache)
694 else:
695 else:
695 log = createlog(ui, root=opts["root"], cache=cache)
696 log = createlog(ui, root=opts["root"], cache=cache)
696 except logerror, e:
697 except logerror, e:
697 ui.write("%r\n"%e)
698 ui.write("%r\n"%e)
698 return
699 return
699
700
700 changesets = createchangeset(ui, log, opts["fuzz"])
701 changesets = createchangeset(ui, log, opts["fuzz"])
701 del log
702 del log
702
703
703 # Print changesets (optionally filtered)
704 # Print changesets (optionally filtered)
704
705
705 off = len(revisions)
706 off = len(revisions)
706 branches = {} # latest version number in each branch
707 branches = {} # latest version number in each branch
707 ancestors = {} # parent branch
708 ancestors = {} # parent branch
708 for cs in changesets:
709 for cs in changesets:
709
710
710 if opts["ancestors"]:
711 if opts["ancestors"]:
711 if cs.branch not in branches and cs.parents and cs.parents[0].id:
712 if cs.branch not in branches and cs.parents and cs.parents[0].id:
712 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
713 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
713 branches[cs.branch] = cs.id
714 branches[cs.branch] = cs.id
714
715
715 # limit by branches
716 # limit by branches
716 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
717 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
717 continue
718 continue
718
719
719 if not off:
720 if not off:
720 # Note: trailing spaces on several lines here are needed to have
721 # Note: trailing spaces on several lines here are needed to have
721 # bug-for-bug compatibility with cvsps.
722 # bug-for-bug compatibility with cvsps.
722 ui.write('---------------------\n')
723 ui.write('---------------------\n')
723 ui.write('PatchSet %d \n' % cs.id)
724 ui.write('PatchSet %d \n' % cs.id)
724 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
725 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
725 ui.write('Author: %s\n' % cs.author)
726 ui.write('Author: %s\n' % cs.author)
726 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
727 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
727 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
728 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
728 ','.join(cs.tags) or '(none)'))
729 ','.join(cs.tags) or '(none)'))
729 if opts["parents"] and cs.parents:
730 if opts["parents"] and cs.parents:
730 if len(cs.parents)>1:
731 if len(cs.parents)>1:
731 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
732 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
732 else:
733 else:
733 ui.write('Parent: %d\n' % cs.parents[0].id)
734 ui.write('Parent: %d\n' % cs.parents[0].id)
734
735
735 if opts["ancestors"]:
736 if opts["ancestors"]:
736 b = cs.branch
737 b = cs.branch
737 r = []
738 r = []
738 while b:
739 while b:
739 b, c = ancestors[b]
740 b, c = ancestors[b]
740 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
741 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
741 if r:
742 if r:
742 ui.write('Ancestors: %s\n' % (','.join(r)))
743 ui.write('Ancestors: %s\n' % (','.join(r)))
743
744
744 ui.write('Log:\n')
745 ui.write('Log:\n')
745 ui.write('%s\n\n' % cs.comment)
746 ui.write('%s\n\n' % cs.comment)
746 ui.write('Members: \n')
747 ui.write('Members: \n')
747 for f in cs.entries:
748 for f in cs.entries:
748 fn = f.file
749 fn = f.file
749 if fn.startswith(opts["prefix"]):
750 if fn.startswith(opts["prefix"]):
750 fn = fn[len(opts["prefix"]):]
751 fn = fn[len(opts["prefix"]):]
751 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
752 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
752 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
753 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
753 ui.write('\n')
754 ui.write('\n')
754
755
755 # have we seen the start tag?
756 # have we seen the start tag?
756 if revisions and off:
757 if revisions and off:
757 if revisions[0] == str(cs.id) or \
758 if revisions[0] == str(cs.id) or \
758 revisions[0] in cs.tags:
759 revisions[0] in cs.tags:
759 off = False
760 off = False
760
761
761 # see if we reached the end tag
762 # see if we reached the end tag
762 if len(revisions)>1 and not off:
763 if len(revisions)>1 and not off:
763 if revisions[1] == str(cs.id) or \
764 if revisions[1] == str(cs.id) or \
764 revisions[1] in cs.tags:
765 revisions[1] in cs.tags:
765 break
766 break
General Comments 0
You need to be logged in to leave comments. Login now