##// END OF EJS Templates
convert: simple fix for non-existent synthetic/mergepoint attributes
Rocco Rutte -
r7969:a969b147 default
parent child Browse files
Show More
@@ -1,764 +1,765 b''
1 #
1 #
2 # Mercurial built-in replacement for cvsps.
2 # Mercurial built-in replacement for cvsps.
3 #
3 #
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 import os
9 import os
10 import re
10 import re
11 import cPickle as pickle
11 import cPickle as pickle
12 from mercurial import util
12 from mercurial import util
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14
14
15 def listsort(list, key):
15 def listsort(list, key):
16 "helper to sort by key in Python 2.3"
16 "helper to sort by key in Python 2.3"
17 try:
17 try:
18 list.sort(key=key)
18 list.sort(key=key)
19 except TypeError:
19 except TypeError:
20 list.sort(lambda l, r: cmp(key(l), key(r)))
20 list.sort(lambda l, r: cmp(key(l), key(r)))
21
21
22 class logentry(object):
22 class logentry(object):
23 '''Class logentry has the following attributes:
23 '''Class logentry has the following attributes:
24 .author - author name as CVS knows it
24 .author - author name as CVS knows it
25 .branch - name of branch this revision is on
25 .branch - name of branch this revision is on
26 .branches - revision tuple of branches starting at this revision
26 .branches - revision tuple of branches starting at this revision
27 .comment - commit message
27 .comment - commit message
28 .date - the commit date as a (time, tz) tuple
28 .date - the commit date as a (time, tz) tuple
29 .dead - true if file revision is dead
29 .dead - true if file revision is dead
30 .file - Name of file
30 .file - Name of file
31 .lines - a tuple (+lines, -lines) or None
31 .lines - a tuple (+lines, -lines) or None
32 .parent - Previous revision of this entry
32 .parent - Previous revision of this entry
33 .rcs - name of file as returned from CVS
33 .rcs - name of file as returned from CVS
34 .revision - revision number as tuple
34 .revision - revision number as tuple
35 .tags - list of tags on the file
35 .tags - list of tags on the file
36 .synthetic - is this a synthetic "file ... added on ..." revision?
36 .synthetic - is this a synthetic "file ... added on ..." revision?
37 .mergepoint- the branch that has been merged from (if present in rlog output)
37 .mergepoint- the branch that has been merged from (if present in rlog output)
38 '''
38 '''
39 def __init__(self, **entries):
39 def __init__(self, **entries):
40 self.__dict__.update(entries)
40 self.__dict__.update(entries)
41
41
42 class logerror(Exception):
42 class logerror(Exception):
43 pass
43 pass
44
44
45 def getrepopath(cvspath):
45 def getrepopath(cvspath):
46 """Return the repository path from a CVS path.
46 """Return the repository path from a CVS path.
47
47
48 >>> getrepopath('/foo/bar')
48 >>> getrepopath('/foo/bar')
49 '/foo/bar'
49 '/foo/bar'
50 >>> getrepopath('c:/foo/bar')
50 >>> getrepopath('c:/foo/bar')
51 'c:/foo/bar'
51 'c:/foo/bar'
52 >>> getrepopath(':pserver:10/foo/bar')
52 >>> getrepopath(':pserver:10/foo/bar')
53 '/foo/bar'
53 '/foo/bar'
54 >>> getrepopath(':pserver:10c:/foo/bar')
54 >>> getrepopath(':pserver:10c:/foo/bar')
55 '/foo/bar'
55 '/foo/bar'
56 >>> getrepopath(':pserver:/foo/bar')
56 >>> getrepopath(':pserver:/foo/bar')
57 '/foo/bar'
57 '/foo/bar'
58 >>> getrepopath(':pserver:c:/foo/bar')
58 >>> getrepopath(':pserver:c:/foo/bar')
59 'c:/foo/bar'
59 'c:/foo/bar'
60 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
60 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
61 '/foo/bar'
61 '/foo/bar'
62 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
62 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
63 'c:/foo/bar'
63 'c:/foo/bar'
64 """
64 """
65 # According to CVS manual, CVS paths are expressed like:
65 # According to CVS manual, CVS paths are expressed like:
66 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
66 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
67 #
67 #
68 # Unfortunately, Windows absolute paths start with a drive letter
68 # Unfortunately, Windows absolute paths start with a drive letter
69 # like 'c:' making it harder to parse. Here we assume that drive
69 # like 'c:' making it harder to parse. Here we assume that drive
70 # letters are only one character long and any CVS component before
70 # letters are only one character long and any CVS component before
71 # the repository path is at least 2 characters long, and use this
71 # the repository path is at least 2 characters long, and use this
72 # to disambiguate.
72 # to disambiguate.
73 parts = cvspath.split(':')
73 parts = cvspath.split(':')
74 if len(parts) == 1:
74 if len(parts) == 1:
75 return parts[0]
75 return parts[0]
76 # Here there is an ambiguous case if we have a port number
76 # Here there is an ambiguous case if we have a port number
77 # immediately followed by a Windows driver letter. We assume this
77 # immediately followed by a Windows driver letter. We assume this
78 # never happens and decide it must be CVS path component,
78 # never happens and decide it must be CVS path component,
79 # therefore ignoring it.
79 # therefore ignoring it.
80 if len(parts[-2]) > 1:
80 if len(parts[-2]) > 1:
81 return parts[-1].lstrip('0123456789')
81 return parts[-1].lstrip('0123456789')
82 return parts[-2] + ':' + parts[-1]
82 return parts[-2] + ':' + parts[-1]
83
83
84 def createlog(ui, directory=None, root="", rlog=True, cache=None):
84 def createlog(ui, directory=None, root="", rlog=True, cache=None):
85 '''Collect the CVS rlog'''
85 '''Collect the CVS rlog'''
86
86
87 # Because we store many duplicate commit log messages, reusing strings
87 # Because we store many duplicate commit log messages, reusing strings
88 # saves a lot of memory and pickle storage space.
88 # saves a lot of memory and pickle storage space.
89 _scache = {}
89 _scache = {}
90 def scache(s):
90 def scache(s):
91 "return a shared version of a string"
91 "return a shared version of a string"
92 return _scache.setdefault(s, s)
92 return _scache.setdefault(s, s)
93
93
94 ui.status(_('collecting CVS rlog\n'))
94 ui.status(_('collecting CVS rlog\n'))
95
95
96 log = [] # list of logentry objects containing the CVS state
96 log = [] # list of logentry objects containing the CVS state
97
97
98 # patterns to match in CVS (r)log output, by state of use
98 # patterns to match in CVS (r)log output, by state of use
99 re_00 = re.compile('RCS file: (.+)$')
99 re_00 = re.compile('RCS file: (.+)$')
100 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
100 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
101 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
101 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
102 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
102 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
103 re_10 = re.compile('Working file: (.+)$')
103 re_10 = re.compile('Working file: (.+)$')
104 re_20 = re.compile('symbolic names:')
104 re_20 = re.compile('symbolic names:')
105 re_30 = re.compile('\t(.+): ([\\d.]+)$')
105 re_30 = re.compile('\t(.+): ([\\d.]+)$')
106 re_31 = re.compile('----------------------------$')
106 re_31 = re.compile('----------------------------$')
107 re_32 = re.compile('=============================================================================$')
107 re_32 = re.compile('=============================================================================$')
108 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
108 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
109 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?(.*mergepoint:\s+([^;]+);)?')
109 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?(.*mergepoint:\s+([^;]+);)?')
110 re_70 = re.compile('branches: (.+);$')
110 re_70 = re.compile('branches: (.+);$')
111
111
112 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
112 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
113
113
114 prefix = '' # leading path to strip of what we get from CVS
114 prefix = '' # leading path to strip of what we get from CVS
115
115
116 if directory is None:
116 if directory is None:
117 # Current working directory
117 # Current working directory
118
118
119 # Get the real directory in the repository
119 # Get the real directory in the repository
120 try:
120 try:
121 prefix = file(os.path.join('CVS','Repository')).read().strip()
121 prefix = file(os.path.join('CVS','Repository')).read().strip()
122 if prefix == ".":
122 if prefix == ".":
123 prefix = ""
123 prefix = ""
124 directory = prefix
124 directory = prefix
125 except IOError:
125 except IOError:
126 raise logerror('Not a CVS sandbox')
126 raise logerror('Not a CVS sandbox')
127
127
128 if prefix and not prefix.endswith(os.sep):
128 if prefix and not prefix.endswith(os.sep):
129 prefix += os.sep
129 prefix += os.sep
130
130
131 # Use the Root file in the sandbox, if it exists
131 # Use the Root file in the sandbox, if it exists
132 try:
132 try:
133 root = file(os.path.join('CVS','Root')).read().strip()
133 root = file(os.path.join('CVS','Root')).read().strip()
134 except IOError:
134 except IOError:
135 pass
135 pass
136
136
137 if not root:
137 if not root:
138 root = os.environ.get('CVSROOT', '')
138 root = os.environ.get('CVSROOT', '')
139
139
140 # read log cache if one exists
140 # read log cache if one exists
141 oldlog = []
141 oldlog = []
142 date = None
142 date = None
143
143
144 if cache:
144 if cache:
145 cachedir = os.path.expanduser('~/.hg.cvsps')
145 cachedir = os.path.expanduser('~/.hg.cvsps')
146 if not os.path.exists(cachedir):
146 if not os.path.exists(cachedir):
147 os.mkdir(cachedir)
147 os.mkdir(cachedir)
148
148
149 # The cvsps cache pickle needs a uniquified name, based on the
149 # The cvsps cache pickle needs a uniquified name, based on the
150 # repository location. The address may have all sort of nasties
150 # repository location. The address may have all sort of nasties
151 # in it, slashes, colons and such. So here we take just the
151 # in it, slashes, colons and such. So here we take just the
152 # alphanumerics, concatenated in a way that does not mix up the
152 # alphanumerics, concatenated in a way that does not mix up the
153 # various components, so that
153 # various components, so that
154 # :pserver:user@server:/path
154 # :pserver:user@server:/path
155 # and
155 # and
156 # /pserver/user/server/path
156 # /pserver/user/server/path
157 # are mapped to different cache file names.
157 # are mapped to different cache file names.
158 cachefile = root.split(":") + [directory, "cache"]
158 cachefile = root.split(":") + [directory, "cache"]
159 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
159 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
160 cachefile = os.path.join(cachedir,
160 cachefile = os.path.join(cachedir,
161 '.'.join([s for s in cachefile if s]))
161 '.'.join([s for s in cachefile if s]))
162
162
163 if cache == 'update':
163 if cache == 'update':
164 try:
164 try:
165 ui.note(_('reading cvs log cache %s\n') % cachefile)
165 ui.note(_('reading cvs log cache %s\n') % cachefile)
166 oldlog = pickle.load(file(cachefile))
166 oldlog = pickle.load(file(cachefile))
167 ui.note(_('cache has %d log entries\n') % len(oldlog))
167 ui.note(_('cache has %d log entries\n') % len(oldlog))
168 except Exception, e:
168 except Exception, e:
169 ui.note(_('error reading cache: %r\n') % e)
169 ui.note(_('error reading cache: %r\n') % e)
170
170
171 if oldlog:
171 if oldlog:
172 date = oldlog[-1].date # last commit date as a (time,tz) tuple
172 date = oldlog[-1].date # last commit date as a (time,tz) tuple
173 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
173 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
174
174
175 # build the CVS commandline
175 # build the CVS commandline
176 cmd = ['cvs', '-q']
176 cmd = ['cvs', '-q']
177 if root:
177 if root:
178 cmd.append('-d%s' % root)
178 cmd.append('-d%s' % root)
179 p = util.normpath(getrepopath(root))
179 p = util.normpath(getrepopath(root))
180 if not p.endswith('/'):
180 if not p.endswith('/'):
181 p += '/'
181 p += '/'
182 prefix = p + util.normpath(prefix)
182 prefix = p + util.normpath(prefix)
183 cmd.append(['log', 'rlog'][rlog])
183 cmd.append(['log', 'rlog'][rlog])
184 if date:
184 if date:
185 # no space between option and date string
185 # no space between option and date string
186 cmd.append('-d>%s' % date)
186 cmd.append('-d>%s' % date)
187 cmd.append(directory)
187 cmd.append(directory)
188
188
189 # state machine begins here
189 # state machine begins here
190 tags = {} # dictionary of revisions on current file with their tags
190 tags = {} # dictionary of revisions on current file with their tags
191 branchmap = {} # mapping between branch names and revision numbers
191 branchmap = {} # mapping between branch names and revision numbers
192 state = 0
192 state = 0
193 store = False # set when a new record can be appended
193 store = False # set when a new record can be appended
194
194
195 cmd = [util.shellquote(arg) for arg in cmd]
195 cmd = [util.shellquote(arg) for arg in cmd]
196 ui.note(_("running %s\n") % (' '.join(cmd)))
196 ui.note(_("running %s\n") % (' '.join(cmd)))
197 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
197 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
198
198
199 pfp = util.popen(' '.join(cmd))
199 pfp = util.popen(' '.join(cmd))
200 peek = pfp.readline()
200 peek = pfp.readline()
201 while True:
201 while True:
202 line = peek
202 line = peek
203 if line == '':
203 if line == '':
204 break
204 break
205 peek = pfp.readline()
205 peek = pfp.readline()
206 if line.endswith('\n'):
206 if line.endswith('\n'):
207 line = line[:-1]
207 line = line[:-1]
208 #ui.debug('state=%d line=%r\n' % (state, line))
208 #ui.debug('state=%d line=%r\n' % (state, line))
209
209
210 if state == 0:
210 if state == 0:
211 # initial state, consume input until we see 'RCS file'
211 # initial state, consume input until we see 'RCS file'
212 match = re_00.match(line)
212 match = re_00.match(line)
213 if match:
213 if match:
214 rcs = match.group(1)
214 rcs = match.group(1)
215 tags = {}
215 tags = {}
216 if rlog:
216 if rlog:
217 filename = util.normpath(rcs[:-2])
217 filename = util.normpath(rcs[:-2])
218 if filename.startswith(prefix):
218 if filename.startswith(prefix):
219 filename = filename[len(prefix):]
219 filename = filename[len(prefix):]
220 if filename.startswith('/'):
220 if filename.startswith('/'):
221 filename = filename[1:]
221 filename = filename[1:]
222 if filename.startswith('Attic/'):
222 if filename.startswith('Attic/'):
223 filename = filename[6:]
223 filename = filename[6:]
224 else:
224 else:
225 filename = filename.replace('/Attic/', '/')
225 filename = filename.replace('/Attic/', '/')
226 state = 2
226 state = 2
227 continue
227 continue
228 state = 1
228 state = 1
229 continue
229 continue
230 match = re_01.match(line)
230 match = re_01.match(line)
231 if match:
231 if match:
232 raise Exception(match.group(1))
232 raise Exception(match.group(1))
233 match = re_02.match(line)
233 match = re_02.match(line)
234 if match:
234 if match:
235 raise Exception(match.group(2))
235 raise Exception(match.group(2))
236 if re_03.match(line):
236 if re_03.match(line):
237 raise Exception(line)
237 raise Exception(line)
238
238
239 elif state == 1:
239 elif state == 1:
240 # expect 'Working file' (only when using log instead of rlog)
240 # expect 'Working file' (only when using log instead of rlog)
241 match = re_10.match(line)
241 match = re_10.match(line)
242 assert match, _('RCS file must be followed by working file')
242 assert match, _('RCS file must be followed by working file')
243 filename = util.normpath(match.group(1))
243 filename = util.normpath(match.group(1))
244 state = 2
244 state = 2
245
245
246 elif state == 2:
246 elif state == 2:
247 # expect 'symbolic names'
247 # expect 'symbolic names'
248 if re_20.match(line):
248 if re_20.match(line):
249 branchmap = {}
249 branchmap = {}
250 state = 3
250 state = 3
251
251
252 elif state == 3:
252 elif state == 3:
253 # read the symbolic names and store as tags
253 # read the symbolic names and store as tags
254 match = re_30.match(line)
254 match = re_30.match(line)
255 if match:
255 if match:
256 rev = [int(x) for x in match.group(2).split('.')]
256 rev = [int(x) for x in match.group(2).split('.')]
257
257
258 # Convert magic branch number to an odd-numbered one
258 # Convert magic branch number to an odd-numbered one
259 revn = len(rev)
259 revn = len(rev)
260 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
260 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
261 rev = rev[:-2] + rev[-1:]
261 rev = rev[:-2] + rev[-1:]
262 rev = tuple(rev)
262 rev = tuple(rev)
263
263
264 if rev not in tags:
264 if rev not in tags:
265 tags[rev] = []
265 tags[rev] = []
266 tags[rev].append(match.group(1))
266 tags[rev].append(match.group(1))
267 branchmap[match.group(1)] = match.group(2)
267 branchmap[match.group(1)] = match.group(2)
268
268
269 elif re_31.match(line):
269 elif re_31.match(line):
270 state = 5
270 state = 5
271 elif re_32.match(line):
271 elif re_32.match(line):
272 state = 0
272 state = 0
273
273
274 elif state == 4:
274 elif state == 4:
275 # expecting '------' separator before first revision
275 # expecting '------' separator before first revision
276 if re_31.match(line):
276 if re_31.match(line):
277 state = 5
277 state = 5
278 else:
278 else:
279 assert not re_32.match(line), _('must have at least some revisions')
279 assert not re_32.match(line), _('must have at least some revisions')
280
280
281 elif state == 5:
281 elif state == 5:
282 # expecting revision number and possibly (ignored) lock indication
282 # expecting revision number and possibly (ignored) lock indication
283 # we create the logentry here from values stored in states 0 to 4,
283 # we create the logentry here from values stored in states 0 to 4,
284 # as this state is re-entered for subsequent revisions of a file.
284 # as this state is re-entered for subsequent revisions of a file.
285 match = re_50.match(line)
285 match = re_50.match(line)
286 assert match, _('expected revision number')
286 assert match, _('expected revision number')
287 e = logentry(rcs=scache(rcs), file=scache(filename),
287 e = logentry(rcs=scache(rcs), file=scache(filename),
288 revision=tuple([int(x) for x in match.group(1).split('.')]),
288 revision=tuple([int(x) for x in match.group(1).split('.')]),
289 branches=[], parent=None,
289 branches=[], parent=None,
290 synthetic=False)
290 synthetic=False)
291 state = 6
291 state = 6
292
292
293 elif state == 6:
293 elif state == 6:
294 # expecting date, author, state, lines changed
294 # expecting date, author, state, lines changed
295 match = re_60.match(line)
295 match = re_60.match(line)
296 assert match, _('revision must be followed by date line')
296 assert match, _('revision must be followed by date line')
297 d = match.group(1)
297 d = match.group(1)
298 if d[2] == '/':
298 if d[2] == '/':
299 # Y2K
299 # Y2K
300 d = '19' + d
300 d = '19' + d
301
301
302 if len(d.split()) != 3:
302 if len(d.split()) != 3:
303 # cvs log dates always in GMT
303 # cvs log dates always in GMT
304 d = d + ' UTC'
304 d = d + ' UTC'
305 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
305 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
306 e.author = scache(match.group(2))
306 e.author = scache(match.group(2))
307 e.dead = match.group(3).lower() == 'dead'
307 e.dead = match.group(3).lower() == 'dead'
308
308
309 if match.group(5):
309 if match.group(5):
310 if match.group(6):
310 if match.group(6):
311 e.lines = (int(match.group(5)), int(match.group(6)))
311 e.lines = (int(match.group(5)), int(match.group(6)))
312 else:
312 else:
313 e.lines = (int(match.group(5)), 0)
313 e.lines = (int(match.group(5)), 0)
314 elif match.group(6):
314 elif match.group(6):
315 e.lines = (0, int(match.group(6)))
315 e.lines = (0, int(match.group(6)))
316 else:
316 else:
317 e.lines = None
317 e.lines = None
318
318
319 if match.group(7): # cvsnt mergepoint
319 if match.group(7): # cvsnt mergepoint
320 myrev = match.group(8).split('.')
320 myrev = match.group(8).split('.')
321 if len(myrev) == 2: # head
321 if len(myrev) == 2: # head
322 e.mergepoint = 'HEAD'
322 e.mergepoint = 'HEAD'
323 else:
323 else:
324 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
324 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
325 branches = [b for b in branchmap if branchmap[b] == myrev]
325 branches = [b for b in branchmap if branchmap[b] == myrev]
326 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
326 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
327 e.mergepoint = branches[0]
327 e.mergepoint = branches[0]
328 else:
328 else:
329 e.mergepoint = None
329 e.mergepoint = None
330 e.comment = []
330 e.comment = []
331 state = 7
331 state = 7
332
332
333 elif state == 7:
333 elif state == 7:
334 # read the revision numbers of branches that start at this revision
334 # read the revision numbers of branches that start at this revision
335 # or store the commit log message otherwise
335 # or store the commit log message otherwise
336 m = re_70.match(line)
336 m = re_70.match(line)
337 if m:
337 if m:
338 e.branches = [tuple([int(y) for y in x.strip().split('.')])
338 e.branches = [tuple([int(y) for y in x.strip().split('.')])
339 for x in m.group(1).split(';')]
339 for x in m.group(1).split(';')]
340 state = 8
340 state = 8
341 elif re_31.match(line) and re_50.match(peek):
341 elif re_31.match(line) and re_50.match(peek):
342 state = 5
342 state = 5
343 store = True
343 store = True
344 elif re_32.match(line):
344 elif re_32.match(line):
345 state = 0
345 state = 0
346 store = True
346 store = True
347 else:
347 else:
348 e.comment.append(line)
348 e.comment.append(line)
349
349
350 elif state == 8:
350 elif state == 8:
351 # store commit log message
351 # store commit log message
352 if re_31.match(line):
352 if re_31.match(line):
353 state = 5
353 state = 5
354 store = True
354 store = True
355 elif re_32.match(line):
355 elif re_32.match(line):
356 state = 0
356 state = 0
357 store = True
357 store = True
358 else:
358 else:
359 e.comment.append(line)
359 e.comment.append(line)
360
360
361 # When a file is added on a branch B1, CVS creates a synthetic
361 # When a file is added on a branch B1, CVS creates a synthetic
362 # dead trunk revision 1.1 so that the branch has a root.
362 # dead trunk revision 1.1 so that the branch has a root.
363 # Likewise, if you merge such a file to a later branch B2 (one
363 # Likewise, if you merge such a file to a later branch B2 (one
364 # that already existed when the file was added on B1), CVS
364 # that already existed when the file was added on B1), CVS
365 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
365 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
366 # these revisions now, but mark them synthetic so
366 # these revisions now, but mark them synthetic so
367 # createchangeset() can take care of them.
367 # createchangeset() can take care of them.
368 if (store and
368 if (store and
369 e.dead and
369 e.dead and
370 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
370 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
371 len(e.comment) == 1 and
371 len(e.comment) == 1 and
372 file_added_re.match(e.comment[0])):
372 file_added_re.match(e.comment[0])):
373 ui.debug(_('found synthetic rev in %s: %r\n')
373 ui.debug(_('found synthetic rev in %s: %r\n')
374 % (e.rcs, e.comment[0]))
374 % (e.rcs, e.comment[0]))
375 e.synthetic = True
375 e.synthetic = True
376
376
377 if store:
377 if store:
378 # clean up the results and save in the log.
378 # clean up the results and save in the log.
379 store = False
379 store = False
380 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
380 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
381 e.comment = scache('\n'.join(e.comment))
381 e.comment = scache('\n'.join(e.comment))
382
382
383 revn = len(e.revision)
383 revn = len(e.revision)
384 if revn > 3 and (revn % 2) == 0:
384 if revn > 3 and (revn % 2) == 0:
385 e.branch = tags.get(e.revision[:-1], [None])[0]
385 e.branch = tags.get(e.revision[:-1], [None])[0]
386 else:
386 else:
387 e.branch = None
387 e.branch = None
388
388
389 log.append(e)
389 log.append(e)
390
390
391 if len(log) % 100 == 0:
391 if len(log) % 100 == 0:
392 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
392 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
393
393
394 listsort(log, key=lambda x:(x.rcs, x.revision))
394 listsort(log, key=lambda x:(x.rcs, x.revision))
395
395
396 # find parent revisions of individual files
396 # find parent revisions of individual files
397 versions = {}
397 versions = {}
398 for e in log:
398 for e in log:
399 branch = e.revision[:-1]
399 branch = e.revision[:-1]
400 p = versions.get((e.rcs, branch), None)
400 p = versions.get((e.rcs, branch), None)
401 if p is None:
401 if p is None:
402 p = e.revision[:-2]
402 p = e.revision[:-2]
403 e.parent = p
403 e.parent = p
404 versions[(e.rcs, branch)] = e.revision
404 versions[(e.rcs, branch)] = e.revision
405
405
406 # update the log cache
406 # update the log cache
407 if cache:
407 if cache:
408 if log:
408 if log:
409 # join up the old and new logs
409 # join up the old and new logs
410 listsort(log, key=lambda x:x.date)
410 listsort(log, key=lambda x:x.date)
411
411
412 if oldlog and oldlog[-1].date >= log[0].date:
412 if oldlog and oldlog[-1].date >= log[0].date:
413 raise logerror('Log cache overlaps with new log entries,'
413 raise logerror('Log cache overlaps with new log entries,'
414 ' re-run without cache.')
414 ' re-run without cache.')
415
415
416 log = oldlog + log
416 log = oldlog + log
417
417
418 # write the new cachefile
418 # write the new cachefile
419 ui.note(_('writing cvs log cache %s\n') % cachefile)
419 ui.note(_('writing cvs log cache %s\n') % cachefile)
420 pickle.dump(log, file(cachefile, 'w'))
420 pickle.dump(log, file(cachefile, 'w'))
421 else:
421 else:
422 log = oldlog
422 log = oldlog
423
423
424 ui.status(_('%d log entries\n') % len(log))
424 ui.status(_('%d log entries\n') % len(log))
425
425
426 return log
426 return log
427
427
428
428
429 class changeset(object):
429 class changeset(object):
430 '''Class changeset has the following attributes:
430 '''Class changeset has the following attributes:
431 .author - author name as CVS knows it
431 .author - author name as CVS knows it
432 .branch - name of branch this changeset is on, or None
432 .branch - name of branch this changeset is on, or None
433 .comment - commit message
433 .comment - commit message
434 .date - the commit date as a (time,tz) tuple
434 .date - the commit date as a (time,tz) tuple
435 .entries - list of logentry objects in this changeset
435 .entries - list of logentry objects in this changeset
436 .parents - list of one or two parent changesets
436 .parents - list of one or two parent changesets
437 .tags - list of tags on this changeset
437 .tags - list of tags on this changeset
438 .synthetic - from synthetic revision "file ... added on branch ..."
438 .synthetic - from synthetic revision "file ... added on branch ..."
439 .mergepoint- the branch that has been merged from (if present in rlog output)
439 .mergepoint- the branch that has been merged from (if present in rlog output)
440 '''
440 '''
441 def __init__(self, **entries):
441 def __init__(self, **entries):
442 self.__dict__.update(entries)
442 self.__dict__.update(entries)
443
443
444 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
444 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
445 '''Convert log into changesets.'''
445 '''Convert log into changesets.'''
446
446
447 ui.status(_('creating changesets\n'))
447 ui.status(_('creating changesets\n'))
448
448
449 # Merge changesets
449 # Merge changesets
450
450
451 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
451 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
452
452
453 changesets = []
453 changesets = []
454 files = {}
454 files = {}
455 c = None
455 c = None
456 for i, e in enumerate(log):
456 for i, e in enumerate(log):
457
457
458 # Check if log entry belongs to the current changeset or not.
458 # Check if log entry belongs to the current changeset or not.
459 if not (c and
459 if not (c and
460 e.comment == c.comment and
460 e.comment == c.comment and
461 e.author == c.author and
461 e.author == c.author and
462 e.branch == c.branch and
462 e.branch == c.branch and
463 ((c.date[0] + c.date[1]) <=
463 ((c.date[0] + c.date[1]) <=
464 (e.date[0] + e.date[1]) <=
464 (e.date[0] + e.date[1]) <=
465 (c.date[0] + c.date[1]) + fuzz) and
465 (c.date[0] + c.date[1]) + fuzz) and
466 e.file not in files):
466 e.file not in files):
467 c = changeset(comment=e.comment, author=e.author,
467 c = changeset(comment=e.comment, author=e.author,
468 branch=e.branch, date=e.date, entries=[],
468 branch=e.branch, date=e.date, entries=[],
469 mergepoint=e.mergepoint)
469 mergepoint=getattr(e, 'mergepoint', None))
470 changesets.append(c)
470 changesets.append(c)
471 files = {}
471 files = {}
472 if len(changesets) % 100 == 0:
472 if len(changesets) % 100 == 0:
473 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
473 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
474 ui.status(util.ellipsis(t, 80) + '\n')
474 ui.status(util.ellipsis(t, 80) + '\n')
475
475
476 c.entries.append(e)
476 c.entries.append(e)
477 files[e.file] = True
477 files[e.file] = True
478 c.date = e.date # changeset date is date of latest commit in it
478 c.date = e.date # changeset date is date of latest commit in it
479
479
480 # Mark synthetic changesets
480 # Mark synthetic changesets
481
481
482 for c in changesets:
482 for c in changesets:
483 # Synthetic revisions always get their own changeset, because
483 # Synthetic revisions always get their own changeset, because
484 # the log message includes the filename. E.g. if you add file3
484 # the log message includes the filename. E.g. if you add file3
485 # and file4 on a branch, you get four log entries and three
485 # and file4 on a branch, you get four log entries and three
486 # changesets:
486 # changesets:
487 # "File file3 was added on branch ..." (synthetic, 1 entry)
487 # "File file3 was added on branch ..." (synthetic, 1 entry)
488 # "File file4 was added on branch ..." (synthetic, 1 entry)
488 # "File file4 was added on branch ..." (synthetic, 1 entry)
489 # "Add file3 and file4 to fix ..." (real, 2 entries)
489 # "Add file3 and file4 to fix ..." (real, 2 entries)
490 # Hence the check for 1 entry here.
490 # Hence the check for 1 entry here.
491 c.synthetic = (len(c.entries) == 1 and c.entries[0].synthetic)
491 synth = getattr(c.entries[0], 'synthetic', None)
492 c.synthetic = (len(c.entries) == 1 and synth)
492
493
493 # Sort files in each changeset
494 # Sort files in each changeset
494
495
495 for c in changesets:
496 for c in changesets:
496 def pathcompare(l, r):
497 def pathcompare(l, r):
497 'Mimic cvsps sorting order'
498 'Mimic cvsps sorting order'
498 l = l.split('/')
499 l = l.split('/')
499 r = r.split('/')
500 r = r.split('/')
500 nl = len(l)
501 nl = len(l)
501 nr = len(r)
502 nr = len(r)
502 n = min(nl, nr)
503 n = min(nl, nr)
503 for i in range(n):
504 for i in range(n):
504 if i + 1 == nl and nl < nr:
505 if i + 1 == nl and nl < nr:
505 return -1
506 return -1
506 elif i + 1 == nr and nl > nr:
507 elif i + 1 == nr and nl > nr:
507 return +1
508 return +1
508 elif l[i] < r[i]:
509 elif l[i] < r[i]:
509 return -1
510 return -1
510 elif l[i] > r[i]:
511 elif l[i] > r[i]:
511 return +1
512 return +1
512 return 0
513 return 0
513 def entitycompare(l, r):
514 def entitycompare(l, r):
514 return pathcompare(l.file, r.file)
515 return pathcompare(l.file, r.file)
515
516
516 c.entries.sort(entitycompare)
517 c.entries.sort(entitycompare)
517
518
518 # Sort changesets by date
519 # Sort changesets by date
519
520
520 def cscmp(l, r):
521 def cscmp(l, r):
521 d = sum(l.date) - sum(r.date)
522 d = sum(l.date) - sum(r.date)
522 if d:
523 if d:
523 return d
524 return d
524
525
525 # detect vendor branches and initial commits on a branch
526 # detect vendor branches and initial commits on a branch
526 le = {}
527 le = {}
527 for e in l.entries:
528 for e in l.entries:
528 le[e.rcs] = e.revision
529 le[e.rcs] = e.revision
529 re = {}
530 re = {}
530 for e in r.entries:
531 for e in r.entries:
531 re[e.rcs] = e.revision
532 re[e.rcs] = e.revision
532
533
533 d = 0
534 d = 0
534 for e in l.entries:
535 for e in l.entries:
535 if re.get(e.rcs, None) == e.parent:
536 if re.get(e.rcs, None) == e.parent:
536 assert not d
537 assert not d
537 d = 1
538 d = 1
538 break
539 break
539
540
540 for e in r.entries:
541 for e in r.entries:
541 if le.get(e.rcs, None) == e.parent:
542 if le.get(e.rcs, None) == e.parent:
542 assert not d
543 assert not d
543 d = -1
544 d = -1
544 break
545 break
545
546
546 return d
547 return d
547
548
548 changesets.sort(cscmp)
549 changesets.sort(cscmp)
549
550
550 # Collect tags
551 # Collect tags
551
552
552 globaltags = {}
553 globaltags = {}
553 for c in changesets:
554 for c in changesets:
554 tags = {}
555 tags = {}
555 for e in c.entries:
556 for e in c.entries:
556 for tag in e.tags:
557 for tag in e.tags:
557 # remember which is the latest changeset to have this tag
558 # remember which is the latest changeset to have this tag
558 globaltags[tag] = c
559 globaltags[tag] = c
559
560
560 for c in changesets:
561 for c in changesets:
561 tags = {}
562 tags = {}
562 for e in c.entries:
563 for e in c.entries:
563 for tag in e.tags:
564 for tag in e.tags:
564 tags[tag] = True
565 tags[tag] = True
565 # remember tags only if this is the latest changeset to have it
566 # remember tags only if this is the latest changeset to have it
566 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
567 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
567
568
568 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
569 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
569 # by inserting dummy changesets with two parents, and handle
570 # by inserting dummy changesets with two parents, and handle
570 # {{mergefrombranch BRANCHNAME}} by setting two parents.
571 # {{mergefrombranch BRANCHNAME}} by setting two parents.
571
572
572 if mergeto is None:
573 if mergeto is None:
573 mergeto = r'{{mergetobranch ([-\w]+)}}'
574 mergeto = r'{{mergetobranch ([-\w]+)}}'
574 if mergeto:
575 if mergeto:
575 mergeto = re.compile(mergeto)
576 mergeto = re.compile(mergeto)
576
577
577 if mergefrom is None:
578 if mergefrom is None:
578 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
579 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
579 if mergefrom:
580 if mergefrom:
580 mergefrom = re.compile(mergefrom)
581 mergefrom = re.compile(mergefrom)
581
582
582 versions = {} # changeset index where we saw any particular file version
583 versions = {} # changeset index where we saw any particular file version
583 branches = {} # changeset index where we saw a branch
584 branches = {} # changeset index where we saw a branch
584 n = len(changesets)
585 n = len(changesets)
585 i = 0
586 i = 0
586 while i<n:
587 while i<n:
587 c = changesets[i]
588 c = changesets[i]
588
589
589 for f in c.entries:
590 for f in c.entries:
590 versions[(f.rcs, f.revision)] = i
591 versions[(f.rcs, f.revision)] = i
591
592
592 p = None
593 p = None
593 if c.branch in branches:
594 if c.branch in branches:
594 p = branches[c.branch]
595 p = branches[c.branch]
595 else:
596 else:
596 for f in c.entries:
597 for f in c.entries:
597 p = max(p, versions.get((f.rcs, f.parent), None))
598 p = max(p, versions.get((f.rcs, f.parent), None))
598
599
599 c.parents = []
600 c.parents = []
600 if p is not None:
601 if p is not None:
601 p = changesets[p]
602 p = changesets[p]
602
603
603 # Ensure no changeset has a synthetic changeset as a parent.
604 # Ensure no changeset has a synthetic changeset as a parent.
604 while p.synthetic:
605 while p.synthetic:
605 assert len(p.parents) <= 1, \
606 assert len(p.parents) <= 1, \
606 _('synthetic changeset cannot have multiple parents')
607 _('synthetic changeset cannot have multiple parents')
607 if p.parents:
608 if p.parents:
608 p = p.parents[0]
609 p = p.parents[0]
609 else:
610 else:
610 p = None
611 p = None
611 break
612 break
612
613
613 if p is not None:
614 if p is not None:
614 c.parents.append(p)
615 c.parents.append(p)
615
616
616 if c.mergepoint:
617 if c.mergepoint:
617 if c.mergepoint == 'HEAD':
618 if c.mergepoint == 'HEAD':
618 c.mergepoint = None
619 c.mergepoint = None
619 c.parents.append(changesets[branches[c.mergepoint]])
620 c.parents.append(changesets[branches[c.mergepoint]])
620
621
621 if mergefrom:
622 if mergefrom:
622 m = mergefrom.search(c.comment)
623 m = mergefrom.search(c.comment)
623 if m:
624 if m:
624 m = m.group(1)
625 m = m.group(1)
625 if m == 'HEAD':
626 if m == 'HEAD':
626 m = None
627 m = None
627 candidate = changesets[branches[m]]
628 candidate = changesets[branches[m]]
628 if m in branches and c.branch != m and not candidate.synthetic:
629 if m in branches and c.branch != m and not candidate.synthetic:
629 c.parents.append(candidate)
630 c.parents.append(candidate)
630
631
631 if mergeto:
632 if mergeto:
632 m = mergeto.search(c.comment)
633 m = mergeto.search(c.comment)
633 if m:
634 if m:
634 try:
635 try:
635 m = m.group(1)
636 m = m.group(1)
636 if m == 'HEAD':
637 if m == 'HEAD':
637 m = None
638 m = None
638 except:
639 except:
639 m = None # if no group found then merge to HEAD
640 m = None # if no group found then merge to HEAD
640 if m in branches and c.branch != m:
641 if m in branches and c.branch != m:
641 # insert empty changeset for merge
642 # insert empty changeset for merge
642 cc = changeset(author=c.author, branch=m, date=c.date,
643 cc = changeset(author=c.author, branch=m, date=c.date,
643 comment='convert-repo: CVS merge from branch %s' % c.branch,
644 comment='convert-repo: CVS merge from branch %s' % c.branch,
644 entries=[], tags=[], parents=[changesets[branches[m]], c])
645 entries=[], tags=[], parents=[changesets[branches[m]], c])
645 changesets.insert(i + 1, cc)
646 changesets.insert(i + 1, cc)
646 branches[m] = i + 1
647 branches[m] = i + 1
647
648
648 # adjust our loop counters now we have inserted a new entry
649 # adjust our loop counters now we have inserted a new entry
649 n += 1
650 n += 1
650 i += 2
651 i += 2
651 continue
652 continue
652
653
653 branches[c.branch] = i
654 branches[c.branch] = i
654 i += 1
655 i += 1
655
656
656 # Drop synthetic changesets (safe now that we have ensured no other
657 # Drop synthetic changesets (safe now that we have ensured no other
657 # changesets can have them as parents).
658 # changesets can have them as parents).
658 i = 0
659 i = 0
659 while i < len(changesets):
660 while i < len(changesets):
660 if changesets[i].synthetic:
661 if changesets[i].synthetic:
661 del changesets[i]
662 del changesets[i]
662 else:
663 else:
663 i += 1
664 i += 1
664
665
665 # Number changesets
666 # Number changesets
666
667
667 for i, c in enumerate(changesets):
668 for i, c in enumerate(changesets):
668 c.id = i + 1
669 c.id = i + 1
669
670
670 ui.status(_('%d changeset entries\n') % len(changesets))
671 ui.status(_('%d changeset entries\n') % len(changesets))
671
672
672 return changesets
673 return changesets
673
674
674
675
675 def debugcvsps(ui, *args, **opts):
676 def debugcvsps(ui, *args, **opts):
676 '''Read CVS rlog for current directory or named path in repository, and
677 '''Read CVS rlog for current directory or named path in repository, and
677 convert the log to changesets based on matching commit log entries and dates.'''
678 convert the log to changesets based on matching commit log entries and dates.'''
678
679
679 if opts["new_cache"]:
680 if opts["new_cache"]:
680 cache = "write"
681 cache = "write"
681 elif opts["update_cache"]:
682 elif opts["update_cache"]:
682 cache = "update"
683 cache = "update"
683 else:
684 else:
684 cache = None
685 cache = None
685
686
686 revisions = opts["revisions"]
687 revisions = opts["revisions"]
687
688
688 try:
689 try:
689 if args:
690 if args:
690 log = []
691 log = []
691 for d in args:
692 for d in args:
692 log += createlog(ui, d, root=opts["root"], cache=cache)
693 log += createlog(ui, d, root=opts["root"], cache=cache)
693 else:
694 else:
694 log = createlog(ui, root=opts["root"], cache=cache)
695 log = createlog(ui, root=opts["root"], cache=cache)
695 except logerror, e:
696 except logerror, e:
696 ui.write("%r\n"%e)
697 ui.write("%r\n"%e)
697 return
698 return
698
699
699 changesets = createchangeset(ui, log, opts["fuzz"])
700 changesets = createchangeset(ui, log, opts["fuzz"])
700 del log
701 del log
701
702
702 # Print changesets (optionally filtered)
703 # Print changesets (optionally filtered)
703
704
704 off = len(revisions)
705 off = len(revisions)
705 branches = {} # latest version number in each branch
706 branches = {} # latest version number in each branch
706 ancestors = {} # parent branch
707 ancestors = {} # parent branch
707 for cs in changesets:
708 for cs in changesets:
708
709
709 if opts["ancestors"]:
710 if opts["ancestors"]:
710 if cs.branch not in branches and cs.parents and cs.parents[0].id:
711 if cs.branch not in branches and cs.parents and cs.parents[0].id:
711 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
712 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
712 branches[cs.branch] = cs.id
713 branches[cs.branch] = cs.id
713
714
714 # limit by branches
715 # limit by branches
715 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
716 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
716 continue
717 continue
717
718
718 if not off:
719 if not off:
719 # Note: trailing spaces on several lines here are needed to have
720 # Note: trailing spaces on several lines here are needed to have
720 # bug-for-bug compatibility with cvsps.
721 # bug-for-bug compatibility with cvsps.
721 ui.write('---------------------\n')
722 ui.write('---------------------\n')
722 ui.write('PatchSet %d \n' % cs.id)
723 ui.write('PatchSet %d \n' % cs.id)
723 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
724 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
724 ui.write('Author: %s\n' % cs.author)
725 ui.write('Author: %s\n' % cs.author)
725 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
726 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
726 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
727 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
727 ','.join(cs.tags) or '(none)'))
728 ','.join(cs.tags) or '(none)'))
728 if opts["parents"] and cs.parents:
729 if opts["parents"] and cs.parents:
729 if len(cs.parents)>1:
730 if len(cs.parents)>1:
730 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
731 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
731 else:
732 else:
732 ui.write('Parent: %d\n' % cs.parents[0].id)
733 ui.write('Parent: %d\n' % cs.parents[0].id)
733
734
734 if opts["ancestors"]:
735 if opts["ancestors"]:
735 b = cs.branch
736 b = cs.branch
736 r = []
737 r = []
737 while b:
738 while b:
738 b, c = ancestors[b]
739 b, c = ancestors[b]
739 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
740 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
740 if r:
741 if r:
741 ui.write('Ancestors: %s\n' % (','.join(r)))
742 ui.write('Ancestors: %s\n' % (','.join(r)))
742
743
743 ui.write('Log:\n')
744 ui.write('Log:\n')
744 ui.write('%s\n\n' % cs.comment)
745 ui.write('%s\n\n' % cs.comment)
745 ui.write('Members: \n')
746 ui.write('Members: \n')
746 for f in cs.entries:
747 for f in cs.entries:
747 fn = f.file
748 fn = f.file
748 if fn.startswith(opts["prefix"]):
749 if fn.startswith(opts["prefix"]):
749 fn = fn[len(opts["prefix"]):]
750 fn = fn[len(opts["prefix"]):]
750 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
751 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
751 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
752 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
752 ui.write('\n')
753 ui.write('\n')
753
754
754 # have we seen the start tag?
755 # have we seen the start tag?
755 if revisions and off:
756 if revisions and off:
756 if revisions[0] == str(cs.id) or \
757 if revisions[0] == str(cs.id) or \
757 revisions[0] in cs.tags:
758 revisions[0] in cs.tags:
758 off = False
759 off = False
759
760
760 # see if we reached the end tag
761 # see if we reached the end tag
761 if len(revisions)>1 and not off:
762 if len(revisions)>1 and not off:
762 if revisions[1] == str(cs.id) or \
763 if revisions[1] == str(cs.id) or \
763 revisions[1] in cs.tags:
764 revisions[1] in cs.tags:
764 break
765 break
General Comments 0
You need to be logged in to leave comments. Login now