##// END OF EJS Templates
cvsps: make debugging easier by adding __repr__() methods.
Greg Ward -
r8080:19229b0b default
parent child Browse files
Show More
@@ -1,766 +1,777 b''
1 #
1 #
2 # Mercurial built-in replacement for cvsps.
2 # Mercurial built-in replacement for cvsps.
3 #
3 #
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 import os
9 import os
10 import re
10 import re
11 import cPickle as pickle
11 import cPickle as pickle
12 from mercurial import util
12 from mercurial import util
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14
14
15 def listsort(list, key):
15 def listsort(list, key):
16 "helper to sort by key in Python 2.3"
16 "helper to sort by key in Python 2.3"
17 try:
17 try:
18 list.sort(key=key)
18 list.sort(key=key)
19 except TypeError:
19 except TypeError:
20 list.sort(lambda l, r: cmp(key(l), key(r)))
20 list.sort(lambda l, r: cmp(key(l), key(r)))
21
21
22 class logentry(object):
22 class logentry(object):
23 '''Class logentry has the following attributes:
23 '''Class logentry has the following attributes:
24 .author - author name as CVS knows it
24 .author - author name as CVS knows it
25 .branch - name of branch this revision is on
25 .branch - name of branch this revision is on
26 .branches - revision tuple of branches starting at this revision
26 .branches - revision tuple of branches starting at this revision
27 .comment - commit message
27 .comment - commit message
28 .date - the commit date as a (time, tz) tuple
28 .date - the commit date as a (time, tz) tuple
29 .dead - true if file revision is dead
29 .dead - true if file revision is dead
30 .file - Name of file
30 .file - Name of file
31 .lines - a tuple (+lines, -lines) or None
31 .lines - a tuple (+lines, -lines) or None
32 .parent - Previous revision of this entry
32 .parent - Previous revision of this entry
33 .rcs - name of file as returned from CVS
33 .rcs - name of file as returned from CVS
34 .revision - revision number as tuple
34 .revision - revision number as tuple
35 .tags - list of tags on the file
35 .tags - list of tags on the file
36 .synthetic - is this a synthetic "file ... added on ..." revision?
36 .synthetic - is this a synthetic "file ... added on ..." revision?
37 .mergepoint- the branch that has been merged from (if present in rlog output)
37 .mergepoint- the branch that has been merged from (if present in rlog output)
38 '''
38 '''
39 def __init__(self, **entries):
39 def __init__(self, **entries):
40 self.__dict__.update(entries)
40 self.__dict__.update(entries)
41
41
42 def __repr__(self):
43 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
44 id(self),
45 self.file,
46 ".".join(map(str, self.revision)))
47
42 class logerror(Exception):
48 class logerror(Exception):
43 pass
49 pass
44
50
45 def getrepopath(cvspath):
51 def getrepopath(cvspath):
46 """Return the repository path from a CVS path.
52 """Return the repository path from a CVS path.
47
53
48 >>> getrepopath('/foo/bar')
54 >>> getrepopath('/foo/bar')
49 '/foo/bar'
55 '/foo/bar'
50 >>> getrepopath('c:/foo/bar')
56 >>> getrepopath('c:/foo/bar')
51 'c:/foo/bar'
57 'c:/foo/bar'
52 >>> getrepopath(':pserver:10/foo/bar')
58 >>> getrepopath(':pserver:10/foo/bar')
53 '/foo/bar'
59 '/foo/bar'
54 >>> getrepopath(':pserver:10c:/foo/bar')
60 >>> getrepopath(':pserver:10c:/foo/bar')
55 '/foo/bar'
61 '/foo/bar'
56 >>> getrepopath(':pserver:/foo/bar')
62 >>> getrepopath(':pserver:/foo/bar')
57 '/foo/bar'
63 '/foo/bar'
58 >>> getrepopath(':pserver:c:/foo/bar')
64 >>> getrepopath(':pserver:c:/foo/bar')
59 'c:/foo/bar'
65 'c:/foo/bar'
60 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
66 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
61 '/foo/bar'
67 '/foo/bar'
62 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
68 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
63 'c:/foo/bar'
69 'c:/foo/bar'
64 """
70 """
65 # According to CVS manual, CVS paths are expressed like:
71 # According to CVS manual, CVS paths are expressed like:
66 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
72 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
67 #
73 #
68 # Unfortunately, Windows absolute paths start with a drive letter
74 # Unfortunately, Windows absolute paths start with a drive letter
69 # like 'c:' making it harder to parse. Here we assume that drive
75 # like 'c:' making it harder to parse. Here we assume that drive
70 # letters are only one character long and any CVS component before
76 # letters are only one character long and any CVS component before
71 # the repository path is at least 2 characters long, and use this
77 # the repository path is at least 2 characters long, and use this
72 # to disambiguate.
78 # to disambiguate.
73 parts = cvspath.split(':')
79 parts = cvspath.split(':')
74 if len(parts) == 1:
80 if len(parts) == 1:
75 return parts[0]
81 return parts[0]
76 # Here there is an ambiguous case if we have a port number
82 # Here there is an ambiguous case if we have a port number
77 # immediately followed by a Windows driver letter. We assume this
83 # immediately followed by a Windows driver letter. We assume this
78 # never happens and decide it must be CVS path component,
84 # never happens and decide it must be CVS path component,
79 # therefore ignoring it.
85 # therefore ignoring it.
80 if len(parts[-2]) > 1:
86 if len(parts[-2]) > 1:
81 return parts[-1].lstrip('0123456789')
87 return parts[-1].lstrip('0123456789')
82 return parts[-2] + ':' + parts[-1]
88 return parts[-2] + ':' + parts[-1]
83
89
84 def createlog(ui, directory=None, root="", rlog=True, cache=None):
90 def createlog(ui, directory=None, root="", rlog=True, cache=None):
85 '''Collect the CVS rlog'''
91 '''Collect the CVS rlog'''
86
92
87 # Because we store many duplicate commit log messages, reusing strings
93 # Because we store many duplicate commit log messages, reusing strings
88 # saves a lot of memory and pickle storage space.
94 # saves a lot of memory and pickle storage space.
89 _scache = {}
95 _scache = {}
90 def scache(s):
96 def scache(s):
91 "return a shared version of a string"
97 "return a shared version of a string"
92 return _scache.setdefault(s, s)
98 return _scache.setdefault(s, s)
93
99
94 ui.status(_('collecting CVS rlog\n'))
100 ui.status(_('collecting CVS rlog\n'))
95
101
96 log = [] # list of logentry objects containing the CVS state
102 log = [] # list of logentry objects containing the CVS state
97
103
98 # patterns to match in CVS (r)log output, by state of use
104 # patterns to match in CVS (r)log output, by state of use
99 re_00 = re.compile('RCS file: (.+)$')
105 re_00 = re.compile('RCS file: (.+)$')
100 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
106 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
101 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
107 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
102 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
108 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
103 re_10 = re.compile('Working file: (.+)$')
109 re_10 = re.compile('Working file: (.+)$')
104 re_20 = re.compile('symbolic names:')
110 re_20 = re.compile('symbolic names:')
105 re_30 = re.compile('\t(.+): ([\\d.]+)$')
111 re_30 = re.compile('\t(.+): ([\\d.]+)$')
106 re_31 = re.compile('----------------------------$')
112 re_31 = re.compile('----------------------------$')
107 re_32 = re.compile('=============================================================================$')
113 re_32 = re.compile('=============================================================================$')
108 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
114 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
109 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?(.*mergepoint:\s+([^;]+);)?')
115 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?(.*mergepoint:\s+([^;]+);)?')
110 re_70 = re.compile('branches: (.+);$')
116 re_70 = re.compile('branches: (.+);$')
111
117
112 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
113
119
114 prefix = '' # leading path to strip of what we get from CVS
120 prefix = '' # leading path to strip of what we get from CVS
115
121
116 if directory is None:
122 if directory is None:
117 # Current working directory
123 # Current working directory
118
124
119 # Get the real directory in the repository
125 # Get the real directory in the repository
120 try:
126 try:
121 prefix = file(os.path.join('CVS','Repository')).read().strip()
127 prefix = file(os.path.join('CVS','Repository')).read().strip()
122 if prefix == ".":
128 if prefix == ".":
123 prefix = ""
129 prefix = ""
124 directory = prefix
130 directory = prefix
125 except IOError:
131 except IOError:
126 raise logerror('Not a CVS sandbox')
132 raise logerror('Not a CVS sandbox')
127
133
128 if prefix and not prefix.endswith(os.sep):
134 if prefix and not prefix.endswith(os.sep):
129 prefix += os.sep
135 prefix += os.sep
130
136
131 # Use the Root file in the sandbox, if it exists
137 # Use the Root file in the sandbox, if it exists
132 try:
138 try:
133 root = file(os.path.join('CVS','Root')).read().strip()
139 root = file(os.path.join('CVS','Root')).read().strip()
134 except IOError:
140 except IOError:
135 pass
141 pass
136
142
137 if not root:
143 if not root:
138 root = os.environ.get('CVSROOT', '')
144 root = os.environ.get('CVSROOT', '')
139
145
140 # read log cache if one exists
146 # read log cache if one exists
141 oldlog = []
147 oldlog = []
142 date = None
148 date = None
143
149
144 if cache:
150 if cache:
145 cachedir = os.path.expanduser('~/.hg.cvsps')
151 cachedir = os.path.expanduser('~/.hg.cvsps')
146 if not os.path.exists(cachedir):
152 if not os.path.exists(cachedir):
147 os.mkdir(cachedir)
153 os.mkdir(cachedir)
148
154
149 # The cvsps cache pickle needs a uniquified name, based on the
155 # The cvsps cache pickle needs a uniquified name, based on the
150 # repository location. The address may have all sort of nasties
156 # repository location. The address may have all sort of nasties
151 # in it, slashes, colons and such. So here we take just the
157 # in it, slashes, colons and such. So here we take just the
152 # alphanumerics, concatenated in a way that does not mix up the
158 # alphanumerics, concatenated in a way that does not mix up the
153 # various components, so that
159 # various components, so that
154 # :pserver:user@server:/path
160 # :pserver:user@server:/path
155 # and
161 # and
156 # /pserver/user/server/path
162 # /pserver/user/server/path
157 # are mapped to different cache file names.
163 # are mapped to different cache file names.
158 cachefile = root.split(":") + [directory, "cache"]
164 cachefile = root.split(":") + [directory, "cache"]
159 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
160 cachefile = os.path.join(cachedir,
166 cachefile = os.path.join(cachedir,
161 '.'.join([s for s in cachefile if s]))
167 '.'.join([s for s in cachefile if s]))
162
168
163 if cache == 'update':
169 if cache == 'update':
164 try:
170 try:
165 ui.note(_('reading cvs log cache %s\n') % cachefile)
171 ui.note(_('reading cvs log cache %s\n') % cachefile)
166 oldlog = pickle.load(file(cachefile))
172 oldlog = pickle.load(file(cachefile))
167 ui.note(_('cache has %d log entries\n') % len(oldlog))
173 ui.note(_('cache has %d log entries\n') % len(oldlog))
168 except Exception, e:
174 except Exception, e:
169 ui.note(_('error reading cache: %r\n') % e)
175 ui.note(_('error reading cache: %r\n') % e)
170
176
171 if oldlog:
177 if oldlog:
172 date = oldlog[-1].date # last commit date as a (time,tz) tuple
178 date = oldlog[-1].date # last commit date as a (time,tz) tuple
173 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
179 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
174
180
175 # build the CVS commandline
181 # build the CVS commandline
176 cmd = ['cvs', '-q']
182 cmd = ['cvs', '-q']
177 if root:
183 if root:
178 cmd.append('-d%s' % root)
184 cmd.append('-d%s' % root)
179 p = util.normpath(getrepopath(root))
185 p = util.normpath(getrepopath(root))
180 if not p.endswith('/'):
186 if not p.endswith('/'):
181 p += '/'
187 p += '/'
182 prefix = p + util.normpath(prefix)
188 prefix = p + util.normpath(prefix)
183 cmd.append(['log', 'rlog'][rlog])
189 cmd.append(['log', 'rlog'][rlog])
184 if date:
190 if date:
185 # no space between option and date string
191 # no space between option and date string
186 cmd.append('-d>%s' % date)
192 cmd.append('-d>%s' % date)
187 cmd.append(directory)
193 cmd.append(directory)
188
194
189 # state machine begins here
195 # state machine begins here
190 tags = {} # dictionary of revisions on current file with their tags
196 tags = {} # dictionary of revisions on current file with their tags
191 branchmap = {} # mapping between branch names and revision numbers
197 branchmap = {} # mapping between branch names and revision numbers
192 state = 0
198 state = 0
193 store = False # set when a new record can be appended
199 store = False # set when a new record can be appended
194
200
195 cmd = [util.shellquote(arg) for arg in cmd]
201 cmd = [util.shellquote(arg) for arg in cmd]
196 ui.note(_("running %s\n") % (' '.join(cmd)))
202 ui.note(_("running %s\n") % (' '.join(cmd)))
197 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
203 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
198
204
199 pfp = util.popen(' '.join(cmd))
205 pfp = util.popen(' '.join(cmd))
200 peek = pfp.readline()
206 peek = pfp.readline()
201 while True:
207 while True:
202 line = peek
208 line = peek
203 if line == '':
209 if line == '':
204 break
210 break
205 peek = pfp.readline()
211 peek = pfp.readline()
206 if line.endswith('\n'):
212 if line.endswith('\n'):
207 line = line[:-1]
213 line = line[:-1]
208 #ui.debug('state=%d line=%r\n' % (state, line))
214 #ui.debug('state=%d line=%r\n' % (state, line))
209
215
210 if state == 0:
216 if state == 0:
211 # initial state, consume input until we see 'RCS file'
217 # initial state, consume input until we see 'RCS file'
212 match = re_00.match(line)
218 match = re_00.match(line)
213 if match:
219 if match:
214 rcs = match.group(1)
220 rcs = match.group(1)
215 tags = {}
221 tags = {}
216 if rlog:
222 if rlog:
217 filename = util.normpath(rcs[:-2])
223 filename = util.normpath(rcs[:-2])
218 if filename.startswith(prefix):
224 if filename.startswith(prefix):
219 filename = filename[len(prefix):]
225 filename = filename[len(prefix):]
220 if filename.startswith('/'):
226 if filename.startswith('/'):
221 filename = filename[1:]
227 filename = filename[1:]
222 if filename.startswith('Attic/'):
228 if filename.startswith('Attic/'):
223 filename = filename[6:]
229 filename = filename[6:]
224 else:
230 else:
225 filename = filename.replace('/Attic/', '/')
231 filename = filename.replace('/Attic/', '/')
226 state = 2
232 state = 2
227 continue
233 continue
228 state = 1
234 state = 1
229 continue
235 continue
230 match = re_01.match(line)
236 match = re_01.match(line)
231 if match:
237 if match:
232 raise Exception(match.group(1))
238 raise Exception(match.group(1))
233 match = re_02.match(line)
239 match = re_02.match(line)
234 if match:
240 if match:
235 raise Exception(match.group(2))
241 raise Exception(match.group(2))
236 if re_03.match(line):
242 if re_03.match(line):
237 raise Exception(line)
243 raise Exception(line)
238
244
239 elif state == 1:
245 elif state == 1:
240 # expect 'Working file' (only when using log instead of rlog)
246 # expect 'Working file' (only when using log instead of rlog)
241 match = re_10.match(line)
247 match = re_10.match(line)
242 assert match, _('RCS file must be followed by working file')
248 assert match, _('RCS file must be followed by working file')
243 filename = util.normpath(match.group(1))
249 filename = util.normpath(match.group(1))
244 state = 2
250 state = 2
245
251
246 elif state == 2:
252 elif state == 2:
247 # expect 'symbolic names'
253 # expect 'symbolic names'
248 if re_20.match(line):
254 if re_20.match(line):
249 branchmap = {}
255 branchmap = {}
250 state = 3
256 state = 3
251
257
252 elif state == 3:
258 elif state == 3:
253 # read the symbolic names and store as tags
259 # read the symbolic names and store as tags
254 match = re_30.match(line)
260 match = re_30.match(line)
255 if match:
261 if match:
256 rev = [int(x) for x in match.group(2).split('.')]
262 rev = [int(x) for x in match.group(2).split('.')]
257
263
258 # Convert magic branch number to an odd-numbered one
264 # Convert magic branch number to an odd-numbered one
259 revn = len(rev)
265 revn = len(rev)
260 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
266 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
261 rev = rev[:-2] + rev[-1:]
267 rev = rev[:-2] + rev[-1:]
262 rev = tuple(rev)
268 rev = tuple(rev)
263
269
264 if rev not in tags:
270 if rev not in tags:
265 tags[rev] = []
271 tags[rev] = []
266 tags[rev].append(match.group(1))
272 tags[rev].append(match.group(1))
267 branchmap[match.group(1)] = match.group(2)
273 branchmap[match.group(1)] = match.group(2)
268
274
269 elif re_31.match(line):
275 elif re_31.match(line):
270 state = 5
276 state = 5
271 elif re_32.match(line):
277 elif re_32.match(line):
272 state = 0
278 state = 0
273
279
274 elif state == 4:
280 elif state == 4:
275 # expecting '------' separator before first revision
281 # expecting '------' separator before first revision
276 if re_31.match(line):
282 if re_31.match(line):
277 state = 5
283 state = 5
278 else:
284 else:
279 assert not re_32.match(line), _('must have at least some revisions')
285 assert not re_32.match(line), _('must have at least some revisions')
280
286
281 elif state == 5:
287 elif state == 5:
282 # expecting revision number and possibly (ignored) lock indication
288 # expecting revision number and possibly (ignored) lock indication
283 # we create the logentry here from values stored in states 0 to 4,
289 # we create the logentry here from values stored in states 0 to 4,
284 # as this state is re-entered for subsequent revisions of a file.
290 # as this state is re-entered for subsequent revisions of a file.
285 match = re_50.match(line)
291 match = re_50.match(line)
286 assert match, _('expected revision number')
292 assert match, _('expected revision number')
287 e = logentry(rcs=scache(rcs), file=scache(filename),
293 e = logentry(rcs=scache(rcs), file=scache(filename),
288 revision=tuple([int(x) for x in match.group(1).split('.')]),
294 revision=tuple([int(x) for x in match.group(1).split('.')]),
289 branches=[], parent=None,
295 branches=[], parent=None,
290 synthetic=False)
296 synthetic=False)
291 state = 6
297 state = 6
292
298
293 elif state == 6:
299 elif state == 6:
294 # expecting date, author, state, lines changed
300 # expecting date, author, state, lines changed
295 match = re_60.match(line)
301 match = re_60.match(line)
296 assert match, _('revision must be followed by date line')
302 assert match, _('revision must be followed by date line')
297 d = match.group(1)
303 d = match.group(1)
298 if d[2] == '/':
304 if d[2] == '/':
299 # Y2K
305 # Y2K
300 d = '19' + d
306 d = '19' + d
301
307
302 if len(d.split()) != 3:
308 if len(d.split()) != 3:
303 # cvs log dates always in GMT
309 # cvs log dates always in GMT
304 d = d + ' UTC'
310 d = d + ' UTC'
305 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
311 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
306 e.author = scache(match.group(2))
312 e.author = scache(match.group(2))
307 e.dead = match.group(3).lower() == 'dead'
313 e.dead = match.group(3).lower() == 'dead'
308
314
309 if match.group(5):
315 if match.group(5):
310 if match.group(6):
316 if match.group(6):
311 e.lines = (int(match.group(5)), int(match.group(6)))
317 e.lines = (int(match.group(5)), int(match.group(6)))
312 else:
318 else:
313 e.lines = (int(match.group(5)), 0)
319 e.lines = (int(match.group(5)), 0)
314 elif match.group(6):
320 elif match.group(6):
315 e.lines = (0, int(match.group(6)))
321 e.lines = (0, int(match.group(6)))
316 else:
322 else:
317 e.lines = None
323 e.lines = None
318
324
319 if match.group(7): # cvsnt mergepoint
325 if match.group(7): # cvsnt mergepoint
320 myrev = match.group(8).split('.')
326 myrev = match.group(8).split('.')
321 if len(myrev) == 2: # head
327 if len(myrev) == 2: # head
322 e.mergepoint = 'HEAD'
328 e.mergepoint = 'HEAD'
323 else:
329 else:
324 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
330 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
325 branches = [b for b in branchmap if branchmap[b] == myrev]
331 branches = [b for b in branchmap if branchmap[b] == myrev]
326 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
332 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
327 e.mergepoint = branches[0]
333 e.mergepoint = branches[0]
328 else:
334 else:
329 e.mergepoint = None
335 e.mergepoint = None
330 e.comment = []
336 e.comment = []
331 state = 7
337 state = 7
332
338
333 elif state == 7:
339 elif state == 7:
334 # read the revision numbers of branches that start at this revision
340 # read the revision numbers of branches that start at this revision
335 # or store the commit log message otherwise
341 # or store the commit log message otherwise
336 m = re_70.match(line)
342 m = re_70.match(line)
337 if m:
343 if m:
338 e.branches = [tuple([int(y) for y in x.strip().split('.')])
344 e.branches = [tuple([int(y) for y in x.strip().split('.')])
339 for x in m.group(1).split(';')]
345 for x in m.group(1).split(';')]
340 state = 8
346 state = 8
341 elif re_31.match(line) and re_50.match(peek):
347 elif re_31.match(line) and re_50.match(peek):
342 state = 5
348 state = 5
343 store = True
349 store = True
344 elif re_32.match(line):
350 elif re_32.match(line):
345 state = 0
351 state = 0
346 store = True
352 store = True
347 else:
353 else:
348 e.comment.append(line)
354 e.comment.append(line)
349
355
350 elif state == 8:
356 elif state == 8:
351 # store commit log message
357 # store commit log message
352 if re_31.match(line):
358 if re_31.match(line):
353 state = 5
359 state = 5
354 store = True
360 store = True
355 elif re_32.match(line):
361 elif re_32.match(line):
356 state = 0
362 state = 0
357 store = True
363 store = True
358 else:
364 else:
359 e.comment.append(line)
365 e.comment.append(line)
360
366
361 # When a file is added on a branch B1, CVS creates a synthetic
367 # When a file is added on a branch B1, CVS creates a synthetic
362 # dead trunk revision 1.1 so that the branch has a root.
368 # dead trunk revision 1.1 so that the branch has a root.
363 # Likewise, if you merge such a file to a later branch B2 (one
369 # Likewise, if you merge such a file to a later branch B2 (one
364 # that already existed when the file was added on B1), CVS
370 # that already existed when the file was added on B1), CVS
365 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
371 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
366 # these revisions now, but mark them synthetic so
372 # these revisions now, but mark them synthetic so
367 # createchangeset() can take care of them.
373 # createchangeset() can take care of them.
368 if (store and
374 if (store and
369 e.dead and
375 e.dead and
370 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
376 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
371 len(e.comment) == 1 and
377 len(e.comment) == 1 and
372 file_added_re.match(e.comment[0])):
378 file_added_re.match(e.comment[0])):
373 ui.debug(_('found synthetic revision in %s: %r\n')
379 ui.debug(_('found synthetic revision in %s: %r\n')
374 % (e.rcs, e.comment[0]))
380 % (e.rcs, e.comment[0]))
375 e.synthetic = True
381 e.synthetic = True
376
382
377 if store:
383 if store:
378 # clean up the results and save in the log.
384 # clean up the results and save in the log.
379 store = False
385 store = False
380 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
386 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
381 e.comment = scache('\n'.join(e.comment))
387 e.comment = scache('\n'.join(e.comment))
382
388
383 revn = len(e.revision)
389 revn = len(e.revision)
384 if revn > 3 and (revn % 2) == 0:
390 if revn > 3 and (revn % 2) == 0:
385 e.branch = tags.get(e.revision[:-1], [None])[0]
391 e.branch = tags.get(e.revision[:-1], [None])[0]
386 else:
392 else:
387 e.branch = None
393 e.branch = None
388
394
389 log.append(e)
395 log.append(e)
390
396
391 if len(log) % 100 == 0:
397 if len(log) % 100 == 0:
392 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
398 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
393
399
394 listsort(log, key=lambda x:(x.rcs, x.revision))
400 listsort(log, key=lambda x:(x.rcs, x.revision))
395
401
396 # find parent revisions of individual files
402 # find parent revisions of individual files
397 versions = {}
403 versions = {}
398 for e in log:
404 for e in log:
399 branch = e.revision[:-1]
405 branch = e.revision[:-1]
400 p = versions.get((e.rcs, branch), None)
406 p = versions.get((e.rcs, branch), None)
401 if p is None:
407 if p is None:
402 p = e.revision[:-2]
408 p = e.revision[:-2]
403 e.parent = p
409 e.parent = p
404 versions[(e.rcs, branch)] = e.revision
410 versions[(e.rcs, branch)] = e.revision
405
411
406 # update the log cache
412 # update the log cache
407 if cache:
413 if cache:
408 if log:
414 if log:
409 # join up the old and new logs
415 # join up the old and new logs
410 listsort(log, key=lambda x:x.date)
416 listsort(log, key=lambda x:x.date)
411
417
412 if oldlog and oldlog[-1].date >= log[0].date:
418 if oldlog and oldlog[-1].date >= log[0].date:
413 raise logerror('Log cache overlaps with new log entries,'
419 raise logerror('Log cache overlaps with new log entries,'
414 ' re-run without cache.')
420 ' re-run without cache.')
415
421
416 log = oldlog + log
422 log = oldlog + log
417
423
418 # write the new cachefile
424 # write the new cachefile
419 ui.note(_('writing cvs log cache %s\n') % cachefile)
425 ui.note(_('writing cvs log cache %s\n') % cachefile)
420 pickle.dump(log, file(cachefile, 'w'))
426 pickle.dump(log, file(cachefile, 'w'))
421 else:
427 else:
422 log = oldlog
428 log = oldlog
423
429
424 ui.status(_('%d log entries\n') % len(log))
430 ui.status(_('%d log entries\n') % len(log))
425
431
426 return log
432 return log
427
433
428
434
429 class changeset(object):
435 class changeset(object):
430 '''Class changeset has the following attributes:
436 '''Class changeset has the following attributes:
431 .id - integer identifying this changeset (list index)
437 .id - integer identifying this changeset (list index)
432 .author - author name as CVS knows it
438 .author - author name as CVS knows it
433 .branch - name of branch this changeset is on, or None
439 .branch - name of branch this changeset is on, or None
434 .comment - commit message
440 .comment - commit message
435 .date - the commit date as a (time,tz) tuple
441 .date - the commit date as a (time,tz) tuple
436 .entries - list of logentry objects in this changeset
442 .entries - list of logentry objects in this changeset
437 .parents - list of one or two parent changesets
443 .parents - list of one or two parent changesets
438 .tags - list of tags on this changeset
444 .tags - list of tags on this changeset
439 .synthetic - from synthetic revision "file ... added on branch ..."
445 .synthetic - from synthetic revision "file ... added on branch ..."
440 .mergepoint- the branch that has been merged from (if present in rlog output)
446 .mergepoint- the branch that has been merged from (if present in rlog output)
441 '''
447 '''
442 def __init__(self, **entries):
448 def __init__(self, **entries):
443 self.__dict__.update(entries)
449 self.__dict__.update(entries)
444
450
451 def __repr__(self):
452 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
453 id(self),
454 getattr(self, 'id', "(no id)"))
455
445 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
456 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
446 '''Convert log into changesets.'''
457 '''Convert log into changesets.'''
447
458
448 ui.status(_('creating changesets\n'))
459 ui.status(_('creating changesets\n'))
449
460
450 # Merge changesets
461 # Merge changesets
451
462
452 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
463 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
453
464
454 changesets = []
465 changesets = []
455 files = {}
466 files = {}
456 c = None
467 c = None
457 for i, e in enumerate(log):
468 for i, e in enumerate(log):
458
469
459 # Check if log entry belongs to the current changeset or not.
470 # Check if log entry belongs to the current changeset or not.
460 if not (c and
471 if not (c and
461 e.comment == c.comment and
472 e.comment == c.comment and
462 e.author == c.author and
473 e.author == c.author and
463 e.branch == c.branch and
474 e.branch == c.branch and
464 ((c.date[0] + c.date[1]) <=
475 ((c.date[0] + c.date[1]) <=
465 (e.date[0] + e.date[1]) <=
476 (e.date[0] + e.date[1]) <=
466 (c.date[0] + c.date[1]) + fuzz) and
477 (c.date[0] + c.date[1]) + fuzz) and
467 e.file not in files):
478 e.file not in files):
468 c = changeset(comment=e.comment, author=e.author,
479 c = changeset(comment=e.comment, author=e.author,
469 branch=e.branch, date=e.date, entries=[],
480 branch=e.branch, date=e.date, entries=[],
470 mergepoint=getattr(e, 'mergepoint', None))
481 mergepoint=getattr(e, 'mergepoint', None))
471 changesets.append(c)
482 changesets.append(c)
472 files = {}
483 files = {}
473 if len(changesets) % 100 == 0:
484 if len(changesets) % 100 == 0:
474 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
485 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
475 ui.status(util.ellipsis(t, 80) + '\n')
486 ui.status(util.ellipsis(t, 80) + '\n')
476
487
477 c.entries.append(e)
488 c.entries.append(e)
478 files[e.file] = True
489 files[e.file] = True
479 c.date = e.date # changeset date is date of latest commit in it
490 c.date = e.date # changeset date is date of latest commit in it
480
491
481 # Mark synthetic changesets
492 # Mark synthetic changesets
482
493
483 for c in changesets:
494 for c in changesets:
484 # Synthetic revisions always get their own changeset, because
495 # Synthetic revisions always get their own changeset, because
485 # the log message includes the filename. E.g. if you add file3
496 # the log message includes the filename. E.g. if you add file3
486 # and file4 on a branch, you get four log entries and three
497 # and file4 on a branch, you get four log entries and three
487 # changesets:
498 # changesets:
488 # "File file3 was added on branch ..." (synthetic, 1 entry)
499 # "File file3 was added on branch ..." (synthetic, 1 entry)
489 # "File file4 was added on branch ..." (synthetic, 1 entry)
500 # "File file4 was added on branch ..." (synthetic, 1 entry)
490 # "Add file3 and file4 to fix ..." (real, 2 entries)
501 # "Add file3 and file4 to fix ..." (real, 2 entries)
491 # Hence the check for 1 entry here.
502 # Hence the check for 1 entry here.
492 synth = getattr(c.entries[0], 'synthetic', None)
503 synth = getattr(c.entries[0], 'synthetic', None)
493 c.synthetic = (len(c.entries) == 1 and synth)
504 c.synthetic = (len(c.entries) == 1 and synth)
494
505
495 # Sort files in each changeset
506 # Sort files in each changeset
496
507
497 for c in changesets:
508 for c in changesets:
498 def pathcompare(l, r):
509 def pathcompare(l, r):
499 'Mimic cvsps sorting order'
510 'Mimic cvsps sorting order'
500 l = l.split('/')
511 l = l.split('/')
501 r = r.split('/')
512 r = r.split('/')
502 nl = len(l)
513 nl = len(l)
503 nr = len(r)
514 nr = len(r)
504 n = min(nl, nr)
515 n = min(nl, nr)
505 for i in range(n):
516 for i in range(n):
506 if i + 1 == nl and nl < nr:
517 if i + 1 == nl and nl < nr:
507 return -1
518 return -1
508 elif i + 1 == nr and nl > nr:
519 elif i + 1 == nr and nl > nr:
509 return +1
520 return +1
510 elif l[i] < r[i]:
521 elif l[i] < r[i]:
511 return -1
522 return -1
512 elif l[i] > r[i]:
523 elif l[i] > r[i]:
513 return +1
524 return +1
514 return 0
525 return 0
515 def entitycompare(l, r):
526 def entitycompare(l, r):
516 return pathcompare(l.file, r.file)
527 return pathcompare(l.file, r.file)
517
528
518 c.entries.sort(entitycompare)
529 c.entries.sort(entitycompare)
519
530
520 # Sort changesets by date
531 # Sort changesets by date
521
532
522 def cscmp(l, r):
533 def cscmp(l, r):
523 d = sum(l.date) - sum(r.date)
534 d = sum(l.date) - sum(r.date)
524 if d:
535 if d:
525 return d
536 return d
526
537
527 # detect vendor branches and initial commits on a branch
538 # detect vendor branches and initial commits on a branch
528 le = {}
539 le = {}
529 for e in l.entries:
540 for e in l.entries:
530 le[e.rcs] = e.revision
541 le[e.rcs] = e.revision
531 re = {}
542 re = {}
532 for e in r.entries:
543 for e in r.entries:
533 re[e.rcs] = e.revision
544 re[e.rcs] = e.revision
534
545
535 d = 0
546 d = 0
536 for e in l.entries:
547 for e in l.entries:
537 if re.get(e.rcs, None) == e.parent:
548 if re.get(e.rcs, None) == e.parent:
538 assert not d
549 assert not d
539 d = 1
550 d = 1
540 break
551 break
541
552
542 for e in r.entries:
553 for e in r.entries:
543 if le.get(e.rcs, None) == e.parent:
554 if le.get(e.rcs, None) == e.parent:
544 assert not d
555 assert not d
545 d = -1
556 d = -1
546 break
557 break
547
558
548 return d
559 return d
549
560
550 changesets.sort(cscmp)
561 changesets.sort(cscmp)
551
562
552 # Collect tags
563 # Collect tags
553
564
554 globaltags = {}
565 globaltags = {}
555 for c in changesets:
566 for c in changesets:
556 tags = {}
567 tags = {}
557 for e in c.entries:
568 for e in c.entries:
558 for tag in e.tags:
569 for tag in e.tags:
559 # remember which is the latest changeset to have this tag
570 # remember which is the latest changeset to have this tag
560 globaltags[tag] = c
571 globaltags[tag] = c
561
572
562 for c in changesets:
573 for c in changesets:
563 tags = {}
574 tags = {}
564 for e in c.entries:
575 for e in c.entries:
565 for tag in e.tags:
576 for tag in e.tags:
566 tags[tag] = True
577 tags[tag] = True
567 # remember tags only if this is the latest changeset to have it
578 # remember tags only if this is the latest changeset to have it
568 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
579 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
569
580
570 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
581 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
571 # by inserting dummy changesets with two parents, and handle
582 # by inserting dummy changesets with two parents, and handle
572 # {{mergefrombranch BRANCHNAME}} by setting two parents.
583 # {{mergefrombranch BRANCHNAME}} by setting two parents.
573
584
574 if mergeto is None:
585 if mergeto is None:
575 mergeto = r'{{mergetobranch ([-\w]+)}}'
586 mergeto = r'{{mergetobranch ([-\w]+)}}'
576 if mergeto:
587 if mergeto:
577 mergeto = re.compile(mergeto)
588 mergeto = re.compile(mergeto)
578
589
579 if mergefrom is None:
590 if mergefrom is None:
580 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
591 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
581 if mergefrom:
592 if mergefrom:
582 mergefrom = re.compile(mergefrom)
593 mergefrom = re.compile(mergefrom)
583
594
584 versions = {} # changeset index where we saw any particular file version
595 versions = {} # changeset index where we saw any particular file version
585 branches = {} # changeset index where we saw a branch
596 branches = {} # changeset index where we saw a branch
586 n = len(changesets)
597 n = len(changesets)
587 i = 0
598 i = 0
588 while i<n:
599 while i<n:
589 c = changesets[i]
600 c = changesets[i]
590
601
591 for f in c.entries:
602 for f in c.entries:
592 versions[(f.rcs, f.revision)] = i
603 versions[(f.rcs, f.revision)] = i
593
604
594 p = None
605 p = None
595 if c.branch in branches:
606 if c.branch in branches:
596 p = branches[c.branch]
607 p = branches[c.branch]
597 else:
608 else:
598 for f in c.entries:
609 for f in c.entries:
599 p = max(p, versions.get((f.rcs, f.parent), None))
610 p = max(p, versions.get((f.rcs, f.parent), None))
600
611
601 c.parents = []
612 c.parents = []
602 if p is not None:
613 if p is not None:
603 p = changesets[p]
614 p = changesets[p]
604
615
605 # Ensure no changeset has a synthetic changeset as a parent.
616 # Ensure no changeset has a synthetic changeset as a parent.
606 while p.synthetic:
617 while p.synthetic:
607 assert len(p.parents) <= 1, \
618 assert len(p.parents) <= 1, \
608 _('synthetic changeset cannot have multiple parents')
619 _('synthetic changeset cannot have multiple parents')
609 if p.parents:
620 if p.parents:
610 p = p.parents[0]
621 p = p.parents[0]
611 else:
622 else:
612 p = None
623 p = None
613 break
624 break
614
625
615 if p is not None:
626 if p is not None:
616 c.parents.append(p)
627 c.parents.append(p)
617
628
618 if c.mergepoint:
629 if c.mergepoint:
619 if c.mergepoint == 'HEAD':
630 if c.mergepoint == 'HEAD':
620 c.mergepoint = None
631 c.mergepoint = None
621 c.parents.append(changesets[branches[c.mergepoint]])
632 c.parents.append(changesets[branches[c.mergepoint]])
622
633
623 if mergefrom:
634 if mergefrom:
624 m = mergefrom.search(c.comment)
635 m = mergefrom.search(c.comment)
625 if m:
636 if m:
626 m = m.group(1)
637 m = m.group(1)
627 if m == 'HEAD':
638 if m == 'HEAD':
628 m = None
639 m = None
629 candidate = changesets[branches[m]]
640 candidate = changesets[branches[m]]
630 if m in branches and c.branch != m and not candidate.synthetic:
641 if m in branches and c.branch != m and not candidate.synthetic:
631 c.parents.append(candidate)
642 c.parents.append(candidate)
632
643
633 if mergeto:
644 if mergeto:
634 m = mergeto.search(c.comment)
645 m = mergeto.search(c.comment)
635 if m:
646 if m:
636 try:
647 try:
637 m = m.group(1)
648 m = m.group(1)
638 if m == 'HEAD':
649 if m == 'HEAD':
639 m = None
650 m = None
640 except:
651 except:
641 m = None # if no group found then merge to HEAD
652 m = None # if no group found then merge to HEAD
642 if m in branches and c.branch != m:
653 if m in branches and c.branch != m:
643 # insert empty changeset for merge
654 # insert empty changeset for merge
644 cc = changeset(author=c.author, branch=m, date=c.date,
655 cc = changeset(author=c.author, branch=m, date=c.date,
645 comment='convert-repo: CVS merge from branch %s' % c.branch,
656 comment='convert-repo: CVS merge from branch %s' % c.branch,
646 entries=[], tags=[], parents=[changesets[branches[m]], c])
657 entries=[], tags=[], parents=[changesets[branches[m]], c])
647 changesets.insert(i + 1, cc)
658 changesets.insert(i + 1, cc)
648 branches[m] = i + 1
659 branches[m] = i + 1
649
660
650 # adjust our loop counters now we have inserted a new entry
661 # adjust our loop counters now we have inserted a new entry
651 n += 1
662 n += 1
652 i += 2
663 i += 2
653 continue
664 continue
654
665
655 branches[c.branch] = i
666 branches[c.branch] = i
656 i += 1
667 i += 1
657
668
658 # Drop synthetic changesets (safe now that we have ensured no other
669 # Drop synthetic changesets (safe now that we have ensured no other
659 # changesets can have them as parents).
670 # changesets can have them as parents).
660 i = 0
671 i = 0
661 while i < len(changesets):
672 while i < len(changesets):
662 if changesets[i].synthetic:
673 if changesets[i].synthetic:
663 del changesets[i]
674 del changesets[i]
664 else:
675 else:
665 i += 1
676 i += 1
666
677
667 # Number changesets
678 # Number changesets
668
679
669 for i, c in enumerate(changesets):
680 for i, c in enumerate(changesets):
670 c.id = i + 1
681 c.id = i + 1
671
682
672 ui.status(_('%d changeset entries\n') % len(changesets))
683 ui.status(_('%d changeset entries\n') % len(changesets))
673
684
674 return changesets
685 return changesets
675
686
676
687
677 def debugcvsps(ui, *args, **opts):
688 def debugcvsps(ui, *args, **opts):
678 '''Read CVS rlog for current directory or named path in repository, and
689 '''Read CVS rlog for current directory or named path in repository, and
679 convert the log to changesets based on matching commit log entries and dates.'''
690 convert the log to changesets based on matching commit log entries and dates.'''
680
691
681 if opts["new_cache"]:
692 if opts["new_cache"]:
682 cache = "write"
693 cache = "write"
683 elif opts["update_cache"]:
694 elif opts["update_cache"]:
684 cache = "update"
695 cache = "update"
685 else:
696 else:
686 cache = None
697 cache = None
687
698
688 revisions = opts["revisions"]
699 revisions = opts["revisions"]
689
700
690 try:
701 try:
691 if args:
702 if args:
692 log = []
703 log = []
693 for d in args:
704 for d in args:
694 log += createlog(ui, d, root=opts["root"], cache=cache)
705 log += createlog(ui, d, root=opts["root"], cache=cache)
695 else:
706 else:
696 log = createlog(ui, root=opts["root"], cache=cache)
707 log = createlog(ui, root=opts["root"], cache=cache)
697 except logerror, e:
708 except logerror, e:
698 ui.write("%r\n"%e)
709 ui.write("%r\n"%e)
699 return
710 return
700
711
701 changesets = createchangeset(ui, log, opts["fuzz"])
712 changesets = createchangeset(ui, log, opts["fuzz"])
702 del log
713 del log
703
714
704 # Print changesets (optionally filtered)
715 # Print changesets (optionally filtered)
705
716
706 off = len(revisions)
717 off = len(revisions)
707 branches = {} # latest version number in each branch
718 branches = {} # latest version number in each branch
708 ancestors = {} # parent branch
719 ancestors = {} # parent branch
709 for cs in changesets:
720 for cs in changesets:
710
721
711 if opts["ancestors"]:
722 if opts["ancestors"]:
712 if cs.branch not in branches and cs.parents and cs.parents[0].id:
723 if cs.branch not in branches and cs.parents and cs.parents[0].id:
713 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
724 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
714 branches[cs.branch] = cs.id
725 branches[cs.branch] = cs.id
715
726
716 # limit by branches
727 # limit by branches
717 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
728 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
718 continue
729 continue
719
730
720 if not off:
731 if not off:
721 # Note: trailing spaces on several lines here are needed to have
732 # Note: trailing spaces on several lines here are needed to have
722 # bug-for-bug compatibility with cvsps.
733 # bug-for-bug compatibility with cvsps.
723 ui.write('---------------------\n')
734 ui.write('---------------------\n')
724 ui.write('PatchSet %d \n' % cs.id)
735 ui.write('PatchSet %d \n' % cs.id)
725 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
736 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
726 ui.write('Author: %s\n' % cs.author)
737 ui.write('Author: %s\n' % cs.author)
727 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
738 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
728 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
739 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
729 ','.join(cs.tags) or '(none)'))
740 ','.join(cs.tags) or '(none)'))
730 if opts["parents"] and cs.parents:
741 if opts["parents"] and cs.parents:
731 if len(cs.parents)>1:
742 if len(cs.parents)>1:
732 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
743 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
733 else:
744 else:
734 ui.write('Parent: %d\n' % cs.parents[0].id)
745 ui.write('Parent: %d\n' % cs.parents[0].id)
735
746
736 if opts["ancestors"]:
747 if opts["ancestors"]:
737 b = cs.branch
748 b = cs.branch
738 r = []
749 r = []
739 while b:
750 while b:
740 b, c = ancestors[b]
751 b, c = ancestors[b]
741 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
752 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
742 if r:
753 if r:
743 ui.write('Ancestors: %s\n' % (','.join(r)))
754 ui.write('Ancestors: %s\n' % (','.join(r)))
744
755
745 ui.write('Log:\n')
756 ui.write('Log:\n')
746 ui.write('%s\n\n' % cs.comment)
757 ui.write('%s\n\n' % cs.comment)
747 ui.write('Members: \n')
758 ui.write('Members: \n')
748 for f in cs.entries:
759 for f in cs.entries:
749 fn = f.file
760 fn = f.file
750 if fn.startswith(opts["prefix"]):
761 if fn.startswith(opts["prefix"]):
751 fn = fn[len(opts["prefix"]):]
762 fn = fn[len(opts["prefix"]):]
752 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
763 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
753 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
764 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
754 ui.write('\n')
765 ui.write('\n')
755
766
756 # have we seen the start tag?
767 # have we seen the start tag?
757 if revisions and off:
768 if revisions and off:
758 if revisions[0] == str(cs.id) or \
769 if revisions[0] == str(cs.id) or \
759 revisions[0] in cs.tags:
770 revisions[0] in cs.tags:
760 off = False
771 off = False
761
772
762 # see if we reached the end tag
773 # see if we reached the end tag
763 if len(revisions)>1 and not off:
774 if len(revisions)>1 and not off:
764 if revisions[1] == str(cs.id) or \
775 if revisions[1] == str(cs.id) or \
765 revisions[1] in cs.tags:
776 revisions[1] in cs.tags:
766 break
777 break
General Comments 0
You need to be logged in to leave comments. Login now