##// END OF EJS Templates
convert/cvsps: use set.update for bulk update
Martin Geisler -
r8483:221786b9 default
parent child Browse files
Show More
@@ -1,781 +1,780
1 #
1 #
2 # Mercurial built-in replacement for cvsps.
2 # Mercurial built-in replacement for cvsps.
3 #
3 #
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2, incorporated herein by reference.
7 # GNU General Public License version 2, incorporated herein by reference.
8
8
9 import os
9 import os
10 import re
10 import re
11 import cPickle as pickle
11 import cPickle as pickle
12 from mercurial import util
12 from mercurial import util
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14
14
15 def listsort(list, key):
15 def listsort(list, key):
16 "helper to sort by key in Python 2.3"
16 "helper to sort by key in Python 2.3"
17 try:
17 try:
18 list.sort(key=key)
18 list.sort(key=key)
19 except TypeError:
19 except TypeError:
20 list.sort(lambda l, r: cmp(key(l), key(r)))
20 list.sort(lambda l, r: cmp(key(l), key(r)))
21
21
22 class logentry(object):
22 class logentry(object):
23 '''Class logentry has the following attributes:
23 '''Class logentry has the following attributes:
24 .author - author name as CVS knows it
24 .author - author name as CVS knows it
25 .branch - name of branch this revision is on
25 .branch - name of branch this revision is on
26 .branches - revision tuple of branches starting at this revision
26 .branches - revision tuple of branches starting at this revision
27 .comment - commit message
27 .comment - commit message
28 .date - the commit date as a (time, tz) tuple
28 .date - the commit date as a (time, tz) tuple
29 .dead - true if file revision is dead
29 .dead - true if file revision is dead
30 .file - Name of file
30 .file - Name of file
31 .lines - a tuple (+lines, -lines) or None
31 .lines - a tuple (+lines, -lines) or None
32 .parent - Previous revision of this entry
32 .parent - Previous revision of this entry
33 .rcs - name of file as returned from CVS
33 .rcs - name of file as returned from CVS
34 .revision - revision number as tuple
34 .revision - revision number as tuple
35 .tags - list of tags on the file
35 .tags - list of tags on the file
36 .synthetic - is this a synthetic "file ... added on ..." revision?
36 .synthetic - is this a synthetic "file ... added on ..." revision?
37 .mergepoint- the branch that has been merged from (if present in rlog output)
37 .mergepoint- the branch that has been merged from (if present in rlog output)
38 '''
38 '''
39 def __init__(self, **entries):
39 def __init__(self, **entries):
40 self.__dict__.update(entries)
40 self.__dict__.update(entries)
41
41
42 def __repr__(self):
42 def __repr__(self):
43 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
43 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
44 id(self),
44 id(self),
45 self.file,
45 self.file,
46 ".".join(map(str, self.revision)))
46 ".".join(map(str, self.revision)))
47
47
48 class logerror(Exception):
48 class logerror(Exception):
49 pass
49 pass
50
50
51 def getrepopath(cvspath):
51 def getrepopath(cvspath):
52 """Return the repository path from a CVS path.
52 """Return the repository path from a CVS path.
53
53
54 >>> getrepopath('/foo/bar')
54 >>> getrepopath('/foo/bar')
55 '/foo/bar'
55 '/foo/bar'
56 >>> getrepopath('c:/foo/bar')
56 >>> getrepopath('c:/foo/bar')
57 'c:/foo/bar'
57 'c:/foo/bar'
58 >>> getrepopath(':pserver:10/foo/bar')
58 >>> getrepopath(':pserver:10/foo/bar')
59 '/foo/bar'
59 '/foo/bar'
60 >>> getrepopath(':pserver:10c:/foo/bar')
60 >>> getrepopath(':pserver:10c:/foo/bar')
61 '/foo/bar'
61 '/foo/bar'
62 >>> getrepopath(':pserver:/foo/bar')
62 >>> getrepopath(':pserver:/foo/bar')
63 '/foo/bar'
63 '/foo/bar'
64 >>> getrepopath(':pserver:c:/foo/bar')
64 >>> getrepopath(':pserver:c:/foo/bar')
65 'c:/foo/bar'
65 'c:/foo/bar'
66 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
66 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
67 '/foo/bar'
67 '/foo/bar'
68 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
68 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
69 'c:/foo/bar'
69 'c:/foo/bar'
70 """
70 """
71 # According to CVS manual, CVS paths are expressed like:
71 # According to CVS manual, CVS paths are expressed like:
72 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
72 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
73 #
73 #
74 # Unfortunately, Windows absolute paths start with a drive letter
74 # Unfortunately, Windows absolute paths start with a drive letter
75 # like 'c:' making it harder to parse. Here we assume that drive
75 # like 'c:' making it harder to parse. Here we assume that drive
76 # letters are only one character long and any CVS component before
76 # letters are only one character long and any CVS component before
77 # the repository path is at least 2 characters long, and use this
77 # the repository path is at least 2 characters long, and use this
78 # to disambiguate.
78 # to disambiguate.
79 parts = cvspath.split(':')
79 parts = cvspath.split(':')
80 if len(parts) == 1:
80 if len(parts) == 1:
81 return parts[0]
81 return parts[0]
82 # Here there is an ambiguous case if we have a port number
82 # Here there is an ambiguous case if we have a port number
83 # immediately followed by a Windows driver letter. We assume this
83 # immediately followed by a Windows driver letter. We assume this
84 # never happens and decide it must be CVS path component,
84 # never happens and decide it must be CVS path component,
85 # therefore ignoring it.
85 # therefore ignoring it.
86 if len(parts[-2]) > 1:
86 if len(parts[-2]) > 1:
87 return parts[-1].lstrip('0123456789')
87 return parts[-1].lstrip('0123456789')
88 return parts[-2] + ':' + parts[-1]
88 return parts[-2] + ':' + parts[-1]
89
89
90 def createlog(ui, directory=None, root="", rlog=True, cache=None):
90 def createlog(ui, directory=None, root="", rlog=True, cache=None):
91 '''Collect the CVS rlog'''
91 '''Collect the CVS rlog'''
92
92
93 # Because we store many duplicate commit log messages, reusing strings
93 # Because we store many duplicate commit log messages, reusing strings
94 # saves a lot of memory and pickle storage space.
94 # saves a lot of memory and pickle storage space.
95 _scache = {}
95 _scache = {}
96 def scache(s):
96 def scache(s):
97 "return a shared version of a string"
97 "return a shared version of a string"
98 return _scache.setdefault(s, s)
98 return _scache.setdefault(s, s)
99
99
100 ui.status(_('collecting CVS rlog\n'))
100 ui.status(_('collecting CVS rlog\n'))
101
101
102 log = [] # list of logentry objects containing the CVS state
102 log = [] # list of logentry objects containing the CVS state
103
103
104 # patterns to match in CVS (r)log output, by state of use
104 # patterns to match in CVS (r)log output, by state of use
105 re_00 = re.compile('RCS file: (.+)$')
105 re_00 = re.compile('RCS file: (.+)$')
106 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
106 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
107 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
107 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
108 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
108 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
109 re_10 = re.compile('Working file: (.+)$')
109 re_10 = re.compile('Working file: (.+)$')
110 re_20 = re.compile('symbolic names:')
110 re_20 = re.compile('symbolic names:')
111 re_30 = re.compile('\t(.+): ([\\d.]+)$')
111 re_30 = re.compile('\t(.+): ([\\d.]+)$')
112 re_31 = re.compile('----------------------------$')
112 re_31 = re.compile('----------------------------$')
113 re_32 = re.compile('=============================================================================$')
113 re_32 = re.compile('=============================================================================$')
114 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
114 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
115 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?(.*mergepoint:\s+([^;]+);)?')
115 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?(.*mergepoint:\s+([^;]+);)?')
116 re_70 = re.compile('branches: (.+);$')
116 re_70 = re.compile('branches: (.+);$')
117
117
118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119
119
120 prefix = '' # leading path to strip of what we get from CVS
120 prefix = '' # leading path to strip of what we get from CVS
121
121
122 if directory is None:
122 if directory is None:
123 # Current working directory
123 # Current working directory
124
124
125 # Get the real directory in the repository
125 # Get the real directory in the repository
126 try:
126 try:
127 prefix = file(os.path.join('CVS','Repository')).read().strip()
127 prefix = file(os.path.join('CVS','Repository')).read().strip()
128 if prefix == ".":
128 if prefix == ".":
129 prefix = ""
129 prefix = ""
130 directory = prefix
130 directory = prefix
131 except IOError:
131 except IOError:
132 raise logerror('Not a CVS sandbox')
132 raise logerror('Not a CVS sandbox')
133
133
134 if prefix and not prefix.endswith(os.sep):
134 if prefix and not prefix.endswith(os.sep):
135 prefix += os.sep
135 prefix += os.sep
136
136
137 # Use the Root file in the sandbox, if it exists
137 # Use the Root file in the sandbox, if it exists
138 try:
138 try:
139 root = file(os.path.join('CVS','Root')).read().strip()
139 root = file(os.path.join('CVS','Root')).read().strip()
140 except IOError:
140 except IOError:
141 pass
141 pass
142
142
143 if not root:
143 if not root:
144 root = os.environ.get('CVSROOT', '')
144 root = os.environ.get('CVSROOT', '')
145
145
146 # read log cache if one exists
146 # read log cache if one exists
147 oldlog = []
147 oldlog = []
148 date = None
148 date = None
149
149
150 if cache:
150 if cache:
151 cachedir = os.path.expanduser('~/.hg.cvsps')
151 cachedir = os.path.expanduser('~/.hg.cvsps')
152 if not os.path.exists(cachedir):
152 if not os.path.exists(cachedir):
153 os.mkdir(cachedir)
153 os.mkdir(cachedir)
154
154
155 # The cvsps cache pickle needs a uniquified name, based on the
155 # The cvsps cache pickle needs a uniquified name, based on the
156 # repository location. The address may have all sort of nasties
156 # repository location. The address may have all sort of nasties
157 # in it, slashes, colons and such. So here we take just the
157 # in it, slashes, colons and such. So here we take just the
158 # alphanumerics, concatenated in a way that does not mix up the
158 # alphanumerics, concatenated in a way that does not mix up the
159 # various components, so that
159 # various components, so that
160 # :pserver:user@server:/path
160 # :pserver:user@server:/path
161 # and
161 # and
162 # /pserver/user/server/path
162 # /pserver/user/server/path
163 # are mapped to different cache file names.
163 # are mapped to different cache file names.
164 cachefile = root.split(":") + [directory, "cache"]
164 cachefile = root.split(":") + [directory, "cache"]
165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 cachefile = os.path.join(cachedir,
166 cachefile = os.path.join(cachedir,
167 '.'.join([s for s in cachefile if s]))
167 '.'.join([s for s in cachefile if s]))
168
168
169 if cache == 'update':
169 if cache == 'update':
170 try:
170 try:
171 ui.note(_('reading cvs log cache %s\n') % cachefile)
171 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 oldlog = pickle.load(file(cachefile))
172 oldlog = pickle.load(file(cachefile))
173 ui.note(_('cache has %d log entries\n') % len(oldlog))
173 ui.note(_('cache has %d log entries\n') % len(oldlog))
174 except Exception, e:
174 except Exception, e:
175 ui.note(_('error reading cache: %r\n') % e)
175 ui.note(_('error reading cache: %r\n') % e)
176
176
177 if oldlog:
177 if oldlog:
178 date = oldlog[-1].date # last commit date as a (time,tz) tuple
178 date = oldlog[-1].date # last commit date as a (time,tz) tuple
179 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
179 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
180
180
181 # build the CVS commandline
181 # build the CVS commandline
182 cmd = ['cvs', '-q']
182 cmd = ['cvs', '-q']
183 if root:
183 if root:
184 cmd.append('-d%s' % root)
184 cmd.append('-d%s' % root)
185 p = util.normpath(getrepopath(root))
185 p = util.normpath(getrepopath(root))
186 if not p.endswith('/'):
186 if not p.endswith('/'):
187 p += '/'
187 p += '/'
188 prefix = p + util.normpath(prefix)
188 prefix = p + util.normpath(prefix)
189 cmd.append(['log', 'rlog'][rlog])
189 cmd.append(['log', 'rlog'][rlog])
190 if date:
190 if date:
191 # no space between option and date string
191 # no space between option and date string
192 cmd.append('-d>%s' % date)
192 cmd.append('-d>%s' % date)
193 cmd.append(directory)
193 cmd.append(directory)
194
194
195 # state machine begins here
195 # state machine begins here
196 tags = {} # dictionary of revisions on current file with their tags
196 tags = {} # dictionary of revisions on current file with their tags
197 branchmap = {} # mapping between branch names and revision numbers
197 branchmap = {} # mapping between branch names and revision numbers
198 state = 0
198 state = 0
199 store = False # set when a new record can be appended
199 store = False # set when a new record can be appended
200
200
201 cmd = [util.shellquote(arg) for arg in cmd]
201 cmd = [util.shellquote(arg) for arg in cmd]
202 ui.note(_("running %s\n") % (' '.join(cmd)))
202 ui.note(_("running %s\n") % (' '.join(cmd)))
203 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
203 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
204
204
205 pfp = util.popen(' '.join(cmd))
205 pfp = util.popen(' '.join(cmd))
206 peek = pfp.readline()
206 peek = pfp.readline()
207 while True:
207 while True:
208 line = peek
208 line = peek
209 if line == '':
209 if line == '':
210 break
210 break
211 peek = pfp.readline()
211 peek = pfp.readline()
212 if line.endswith('\n'):
212 if line.endswith('\n'):
213 line = line[:-1]
213 line = line[:-1]
214 #ui.debug('state=%d line=%r\n' % (state, line))
214 #ui.debug('state=%d line=%r\n' % (state, line))
215
215
216 if state == 0:
216 if state == 0:
217 # initial state, consume input until we see 'RCS file'
217 # initial state, consume input until we see 'RCS file'
218 match = re_00.match(line)
218 match = re_00.match(line)
219 if match:
219 if match:
220 rcs = match.group(1)
220 rcs = match.group(1)
221 tags = {}
221 tags = {}
222 if rlog:
222 if rlog:
223 filename = util.normpath(rcs[:-2])
223 filename = util.normpath(rcs[:-2])
224 if filename.startswith(prefix):
224 if filename.startswith(prefix):
225 filename = filename[len(prefix):]
225 filename = filename[len(prefix):]
226 if filename.startswith('/'):
226 if filename.startswith('/'):
227 filename = filename[1:]
227 filename = filename[1:]
228 if filename.startswith('Attic/'):
228 if filename.startswith('Attic/'):
229 filename = filename[6:]
229 filename = filename[6:]
230 else:
230 else:
231 filename = filename.replace('/Attic/', '/')
231 filename = filename.replace('/Attic/', '/')
232 state = 2
232 state = 2
233 continue
233 continue
234 state = 1
234 state = 1
235 continue
235 continue
236 match = re_01.match(line)
236 match = re_01.match(line)
237 if match:
237 if match:
238 raise Exception(match.group(1))
238 raise Exception(match.group(1))
239 match = re_02.match(line)
239 match = re_02.match(line)
240 if match:
240 if match:
241 raise Exception(match.group(2))
241 raise Exception(match.group(2))
242 if re_03.match(line):
242 if re_03.match(line):
243 raise Exception(line)
243 raise Exception(line)
244
244
245 elif state == 1:
245 elif state == 1:
246 # expect 'Working file' (only when using log instead of rlog)
246 # expect 'Working file' (only when using log instead of rlog)
247 match = re_10.match(line)
247 match = re_10.match(line)
248 assert match, _('RCS file must be followed by working file')
248 assert match, _('RCS file must be followed by working file')
249 filename = util.normpath(match.group(1))
249 filename = util.normpath(match.group(1))
250 state = 2
250 state = 2
251
251
252 elif state == 2:
252 elif state == 2:
253 # expect 'symbolic names'
253 # expect 'symbolic names'
254 if re_20.match(line):
254 if re_20.match(line):
255 branchmap = {}
255 branchmap = {}
256 state = 3
256 state = 3
257
257
258 elif state == 3:
258 elif state == 3:
259 # read the symbolic names and store as tags
259 # read the symbolic names and store as tags
260 match = re_30.match(line)
260 match = re_30.match(line)
261 if match:
261 if match:
262 rev = [int(x) for x in match.group(2).split('.')]
262 rev = [int(x) for x in match.group(2).split('.')]
263
263
264 # Convert magic branch number to an odd-numbered one
264 # Convert magic branch number to an odd-numbered one
265 revn = len(rev)
265 revn = len(rev)
266 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
266 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
267 rev = rev[:-2] + rev[-1:]
267 rev = rev[:-2] + rev[-1:]
268 rev = tuple(rev)
268 rev = tuple(rev)
269
269
270 if rev not in tags:
270 if rev not in tags:
271 tags[rev] = []
271 tags[rev] = []
272 tags[rev].append(match.group(1))
272 tags[rev].append(match.group(1))
273 branchmap[match.group(1)] = match.group(2)
273 branchmap[match.group(1)] = match.group(2)
274
274
275 elif re_31.match(line):
275 elif re_31.match(line):
276 state = 5
276 state = 5
277 elif re_32.match(line):
277 elif re_32.match(line):
278 state = 0
278 state = 0
279
279
280 elif state == 4:
280 elif state == 4:
281 # expecting '------' separator before first revision
281 # expecting '------' separator before first revision
282 if re_31.match(line):
282 if re_31.match(line):
283 state = 5
283 state = 5
284 else:
284 else:
285 assert not re_32.match(line), _('must have at least some revisions')
285 assert not re_32.match(line), _('must have at least some revisions')
286
286
287 elif state == 5:
287 elif state == 5:
288 # expecting revision number and possibly (ignored) lock indication
288 # expecting revision number and possibly (ignored) lock indication
289 # we create the logentry here from values stored in states 0 to 4,
289 # we create the logentry here from values stored in states 0 to 4,
290 # as this state is re-entered for subsequent revisions of a file.
290 # as this state is re-entered for subsequent revisions of a file.
291 match = re_50.match(line)
291 match = re_50.match(line)
292 assert match, _('expected revision number')
292 assert match, _('expected revision number')
293 e = logentry(rcs=scache(rcs), file=scache(filename),
293 e = logentry(rcs=scache(rcs), file=scache(filename),
294 revision=tuple([int(x) for x in match.group(1).split('.')]),
294 revision=tuple([int(x) for x in match.group(1).split('.')]),
295 branches=[], parent=None,
295 branches=[], parent=None,
296 synthetic=False)
296 synthetic=False)
297 state = 6
297 state = 6
298
298
299 elif state == 6:
299 elif state == 6:
300 # expecting date, author, state, lines changed
300 # expecting date, author, state, lines changed
301 match = re_60.match(line)
301 match = re_60.match(line)
302 assert match, _('revision must be followed by date line')
302 assert match, _('revision must be followed by date line')
303 d = match.group(1)
303 d = match.group(1)
304 if d[2] == '/':
304 if d[2] == '/':
305 # Y2K
305 # Y2K
306 d = '19' + d
306 d = '19' + d
307
307
308 if len(d.split()) != 3:
308 if len(d.split()) != 3:
309 # cvs log dates always in GMT
309 # cvs log dates always in GMT
310 d = d + ' UTC'
310 d = d + ' UTC'
311 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
311 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
312 e.author = scache(match.group(2))
312 e.author = scache(match.group(2))
313 e.dead = match.group(3).lower() == 'dead'
313 e.dead = match.group(3).lower() == 'dead'
314
314
315 if match.group(5):
315 if match.group(5):
316 if match.group(6):
316 if match.group(6):
317 e.lines = (int(match.group(5)), int(match.group(6)))
317 e.lines = (int(match.group(5)), int(match.group(6)))
318 else:
318 else:
319 e.lines = (int(match.group(5)), 0)
319 e.lines = (int(match.group(5)), 0)
320 elif match.group(6):
320 elif match.group(6):
321 e.lines = (0, int(match.group(6)))
321 e.lines = (0, int(match.group(6)))
322 else:
322 else:
323 e.lines = None
323 e.lines = None
324
324
325 if match.group(7): # cvsnt mergepoint
325 if match.group(7): # cvsnt mergepoint
326 myrev = match.group(8).split('.')
326 myrev = match.group(8).split('.')
327 if len(myrev) == 2: # head
327 if len(myrev) == 2: # head
328 e.mergepoint = 'HEAD'
328 e.mergepoint = 'HEAD'
329 else:
329 else:
330 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
330 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
331 branches = [b for b in branchmap if branchmap[b] == myrev]
331 branches = [b for b in branchmap if branchmap[b] == myrev]
332 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
332 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
333 e.mergepoint = branches[0]
333 e.mergepoint = branches[0]
334 else:
334 else:
335 e.mergepoint = None
335 e.mergepoint = None
336 e.comment = []
336 e.comment = []
337 state = 7
337 state = 7
338
338
339 elif state == 7:
339 elif state == 7:
340 # read the revision numbers of branches that start at this revision
340 # read the revision numbers of branches that start at this revision
341 # or store the commit log message otherwise
341 # or store the commit log message otherwise
342 m = re_70.match(line)
342 m = re_70.match(line)
343 if m:
343 if m:
344 e.branches = [tuple([int(y) for y in x.strip().split('.')])
344 e.branches = [tuple([int(y) for y in x.strip().split('.')])
345 for x in m.group(1).split(';')]
345 for x in m.group(1).split(';')]
346 state = 8
346 state = 8
347 elif re_31.match(line) and re_50.match(peek):
347 elif re_31.match(line) and re_50.match(peek):
348 state = 5
348 state = 5
349 store = True
349 store = True
350 elif re_32.match(line):
350 elif re_32.match(line):
351 state = 0
351 state = 0
352 store = True
352 store = True
353 else:
353 else:
354 e.comment.append(line)
354 e.comment.append(line)
355
355
356 elif state == 8:
356 elif state == 8:
357 # store commit log message
357 # store commit log message
358 if re_31.match(line):
358 if re_31.match(line):
359 state = 5
359 state = 5
360 store = True
360 store = True
361 elif re_32.match(line):
361 elif re_32.match(line):
362 state = 0
362 state = 0
363 store = True
363 store = True
364 else:
364 else:
365 e.comment.append(line)
365 e.comment.append(line)
366
366
367 # When a file is added on a branch B1, CVS creates a synthetic
367 # When a file is added on a branch B1, CVS creates a synthetic
368 # dead trunk revision 1.1 so that the branch has a root.
368 # dead trunk revision 1.1 so that the branch has a root.
369 # Likewise, if you merge such a file to a later branch B2 (one
369 # Likewise, if you merge such a file to a later branch B2 (one
370 # that already existed when the file was added on B1), CVS
370 # that already existed when the file was added on B1), CVS
371 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
371 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
372 # these revisions now, but mark them synthetic so
372 # these revisions now, but mark them synthetic so
373 # createchangeset() can take care of them.
373 # createchangeset() can take care of them.
374 if (store and
374 if (store and
375 e.dead and
375 e.dead and
376 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
376 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
377 len(e.comment) == 1 and
377 len(e.comment) == 1 and
378 file_added_re.match(e.comment[0])):
378 file_added_re.match(e.comment[0])):
379 ui.debug(_('found synthetic revision in %s: %r\n')
379 ui.debug(_('found synthetic revision in %s: %r\n')
380 % (e.rcs, e.comment[0]))
380 % (e.rcs, e.comment[0]))
381 e.synthetic = True
381 e.synthetic = True
382
382
383 if store:
383 if store:
384 # clean up the results and save in the log.
384 # clean up the results and save in the log.
385 store = False
385 store = False
386 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
386 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
387 e.comment = scache('\n'.join(e.comment))
387 e.comment = scache('\n'.join(e.comment))
388
388
389 revn = len(e.revision)
389 revn = len(e.revision)
390 if revn > 3 and (revn % 2) == 0:
390 if revn > 3 and (revn % 2) == 0:
391 e.branch = tags.get(e.revision[:-1], [None])[0]
391 e.branch = tags.get(e.revision[:-1], [None])[0]
392 else:
392 else:
393 e.branch = None
393 e.branch = None
394
394
395 log.append(e)
395 log.append(e)
396
396
397 if len(log) % 100 == 0:
397 if len(log) % 100 == 0:
398 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
398 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
399
399
400 listsort(log, key=lambda x:(x.rcs, x.revision))
400 listsort(log, key=lambda x:(x.rcs, x.revision))
401
401
402 # find parent revisions of individual files
402 # find parent revisions of individual files
403 versions = {}
403 versions = {}
404 for e in log:
404 for e in log:
405 branch = e.revision[:-1]
405 branch = e.revision[:-1]
406 p = versions.get((e.rcs, branch), None)
406 p = versions.get((e.rcs, branch), None)
407 if p is None:
407 if p is None:
408 p = e.revision[:-2]
408 p = e.revision[:-2]
409 e.parent = p
409 e.parent = p
410 versions[(e.rcs, branch)] = e.revision
410 versions[(e.rcs, branch)] = e.revision
411
411
412 # update the log cache
412 # update the log cache
413 if cache:
413 if cache:
414 if log:
414 if log:
415 # join up the old and new logs
415 # join up the old and new logs
416 listsort(log, key=lambda x:x.date)
416 listsort(log, key=lambda x:x.date)
417
417
418 if oldlog and oldlog[-1].date >= log[0].date:
418 if oldlog and oldlog[-1].date >= log[0].date:
419 raise logerror('Log cache overlaps with new log entries,'
419 raise logerror('Log cache overlaps with new log entries,'
420 ' re-run without cache.')
420 ' re-run without cache.')
421
421
422 log = oldlog + log
422 log = oldlog + log
423
423
424 # write the new cachefile
424 # write the new cachefile
425 ui.note(_('writing cvs log cache %s\n') % cachefile)
425 ui.note(_('writing cvs log cache %s\n') % cachefile)
426 pickle.dump(log, file(cachefile, 'w'))
426 pickle.dump(log, file(cachefile, 'w'))
427 else:
427 else:
428 log = oldlog
428 log = oldlog
429
429
430 ui.status(_('%d log entries\n') % len(log))
430 ui.status(_('%d log entries\n') % len(log))
431
431
432 return log
432 return log
433
433
434
434
435 class changeset(object):
435 class changeset(object):
436 '''Class changeset has the following attributes:
436 '''Class changeset has the following attributes:
437 .id - integer identifying this changeset (list index)
437 .id - integer identifying this changeset (list index)
438 .author - author name as CVS knows it
438 .author - author name as CVS knows it
439 .branch - name of branch this changeset is on, or None
439 .branch - name of branch this changeset is on, or None
440 .comment - commit message
440 .comment - commit message
441 .date - the commit date as a (time,tz) tuple
441 .date - the commit date as a (time,tz) tuple
442 .entries - list of logentry objects in this changeset
442 .entries - list of logentry objects in this changeset
443 .parents - list of one or two parent changesets
443 .parents - list of one or two parent changesets
444 .tags - list of tags on this changeset
444 .tags - list of tags on this changeset
445 .synthetic - from synthetic revision "file ... added on branch ..."
445 .synthetic - from synthetic revision "file ... added on branch ..."
446 .mergepoint- the branch that has been merged from (if present in rlog output)
446 .mergepoint- the branch that has been merged from (if present in rlog output)
447 '''
447 '''
448 def __init__(self, **entries):
448 def __init__(self, **entries):
449 self.__dict__.update(entries)
449 self.__dict__.update(entries)
450
450
451 def __repr__(self):
451 def __repr__(self):
452 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
452 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
453 id(self),
453 id(self),
454 getattr(self, 'id', "(no id)"))
454 getattr(self, 'id', "(no id)"))
455
455
456 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
456 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
457 '''Convert log into changesets.'''
457 '''Convert log into changesets.'''
458
458
459 ui.status(_('creating changesets\n'))
459 ui.status(_('creating changesets\n'))
460
460
461 # Merge changesets
461 # Merge changesets
462
462
463 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
463 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
464
464
465 changesets = []
465 changesets = []
466 files = set()
466 files = set()
467 c = None
467 c = None
468 for i, e in enumerate(log):
468 for i, e in enumerate(log):
469
469
470 # Check if log entry belongs to the current changeset or not.
470 # Check if log entry belongs to the current changeset or not.
471 if not (c and
471 if not (c and
472 e.comment == c.comment and
472 e.comment == c.comment and
473 e.author == c.author and
473 e.author == c.author and
474 e.branch == c.branch and
474 e.branch == c.branch and
475 ((c.date[0] + c.date[1]) <=
475 ((c.date[0] + c.date[1]) <=
476 (e.date[0] + e.date[1]) <=
476 (e.date[0] + e.date[1]) <=
477 (c.date[0] + c.date[1]) + fuzz) and
477 (c.date[0] + c.date[1]) + fuzz) and
478 e.file not in files):
478 e.file not in files):
479 c = changeset(comment=e.comment, author=e.author,
479 c = changeset(comment=e.comment, author=e.author,
480 branch=e.branch, date=e.date, entries=[],
480 branch=e.branch, date=e.date, entries=[],
481 mergepoint=getattr(e, 'mergepoint', None))
481 mergepoint=getattr(e, 'mergepoint', None))
482 changesets.append(c)
482 changesets.append(c)
483 files = set()
483 files = set()
484 if len(changesets) % 100 == 0:
484 if len(changesets) % 100 == 0:
485 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
485 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
486 ui.status(util.ellipsis(t, 80) + '\n')
486 ui.status(util.ellipsis(t, 80) + '\n')
487
487
488 c.entries.append(e)
488 c.entries.append(e)
489 files.add(e.file)
489 files.add(e.file)
490 c.date = e.date # changeset date is date of latest commit in it
490 c.date = e.date # changeset date is date of latest commit in it
491
491
492 # Mark synthetic changesets
492 # Mark synthetic changesets
493
493
494 for c in changesets:
494 for c in changesets:
495 # Synthetic revisions always get their own changeset, because
495 # Synthetic revisions always get their own changeset, because
496 # the log message includes the filename. E.g. if you add file3
496 # the log message includes the filename. E.g. if you add file3
497 # and file4 on a branch, you get four log entries and three
497 # and file4 on a branch, you get four log entries and three
498 # changesets:
498 # changesets:
499 # "File file3 was added on branch ..." (synthetic, 1 entry)
499 # "File file3 was added on branch ..." (synthetic, 1 entry)
500 # "File file4 was added on branch ..." (synthetic, 1 entry)
500 # "File file4 was added on branch ..." (synthetic, 1 entry)
501 # "Add file3 and file4 to fix ..." (real, 2 entries)
501 # "Add file3 and file4 to fix ..." (real, 2 entries)
502 # Hence the check for 1 entry here.
502 # Hence the check for 1 entry here.
503 synth = getattr(c.entries[0], 'synthetic', None)
503 synth = getattr(c.entries[0], 'synthetic', None)
504 c.synthetic = (len(c.entries) == 1 and synth)
504 c.synthetic = (len(c.entries) == 1 and synth)
505
505
506 # Sort files in each changeset
506 # Sort files in each changeset
507
507
508 for c in changesets:
508 for c in changesets:
509 def pathcompare(l, r):
509 def pathcompare(l, r):
510 'Mimic cvsps sorting order'
510 'Mimic cvsps sorting order'
511 l = l.split('/')
511 l = l.split('/')
512 r = r.split('/')
512 r = r.split('/')
513 nl = len(l)
513 nl = len(l)
514 nr = len(r)
514 nr = len(r)
515 n = min(nl, nr)
515 n = min(nl, nr)
516 for i in range(n):
516 for i in range(n):
517 if i + 1 == nl and nl < nr:
517 if i + 1 == nl and nl < nr:
518 return -1
518 return -1
519 elif i + 1 == nr and nl > nr:
519 elif i + 1 == nr and nl > nr:
520 return +1
520 return +1
521 elif l[i] < r[i]:
521 elif l[i] < r[i]:
522 return -1
522 return -1
523 elif l[i] > r[i]:
523 elif l[i] > r[i]:
524 return +1
524 return +1
525 return 0
525 return 0
526 def entitycompare(l, r):
526 def entitycompare(l, r):
527 return pathcompare(l.file, r.file)
527 return pathcompare(l.file, r.file)
528
528
529 c.entries.sort(entitycompare)
529 c.entries.sort(entitycompare)
530
530
531 # Sort changesets by date
531 # Sort changesets by date
532
532
533 def cscmp(l, r):
533 def cscmp(l, r):
534 d = sum(l.date) - sum(r.date)
534 d = sum(l.date) - sum(r.date)
535 if d:
535 if d:
536 return d
536 return d
537
537
538 # detect vendor branches and initial commits on a branch
538 # detect vendor branches and initial commits on a branch
539 le = {}
539 le = {}
540 for e in l.entries:
540 for e in l.entries:
541 le[e.rcs] = e.revision
541 le[e.rcs] = e.revision
542 re = {}
542 re = {}
543 for e in r.entries:
543 for e in r.entries:
544 re[e.rcs] = e.revision
544 re[e.rcs] = e.revision
545
545
546 d = 0
546 d = 0
547 for e in l.entries:
547 for e in l.entries:
548 if re.get(e.rcs, None) == e.parent:
548 if re.get(e.rcs, None) == e.parent:
549 assert not d
549 assert not d
550 d = 1
550 d = 1
551 break
551 break
552
552
553 for e in r.entries:
553 for e in r.entries:
554 if le.get(e.rcs, None) == e.parent:
554 if le.get(e.rcs, None) == e.parent:
555 assert not d
555 assert not d
556 d = -1
556 d = -1
557 break
557 break
558
558
559 return d
559 return d
560
560
561 changesets.sort(cscmp)
561 changesets.sort(cscmp)
562
562
563 # Collect tags
563 # Collect tags
564
564
565 globaltags = {}
565 globaltags = {}
566 for c in changesets:
566 for c in changesets:
567 for e in c.entries:
567 for e in c.entries:
568 for tag in e.tags:
568 for tag in e.tags:
569 # remember which is the latest changeset to have this tag
569 # remember which is the latest changeset to have this tag
570 globaltags[tag] = c
570 globaltags[tag] = c
571
571
572 for c in changesets:
572 for c in changesets:
573 tags = set()
573 tags = set()
574 for e in c.entries:
574 for e in c.entries:
575 for tag in e.tags:
575 tags.update(e.tags)
576 tags.add(tag)
577 # remember tags only if this is the latest changeset to have it
576 # remember tags only if this is the latest changeset to have it
578 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
577 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
579
578
580 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
579 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
581 # by inserting dummy changesets with two parents, and handle
580 # by inserting dummy changesets with two parents, and handle
582 # {{mergefrombranch BRANCHNAME}} by setting two parents.
581 # {{mergefrombranch BRANCHNAME}} by setting two parents.
583
582
584 if mergeto is None:
583 if mergeto is None:
585 mergeto = r'{{mergetobranch ([-\w]+)}}'
584 mergeto = r'{{mergetobranch ([-\w]+)}}'
586 if mergeto:
585 if mergeto:
587 mergeto = re.compile(mergeto)
586 mergeto = re.compile(mergeto)
588
587
589 if mergefrom is None:
588 if mergefrom is None:
590 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
589 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
591 if mergefrom:
590 if mergefrom:
592 mergefrom = re.compile(mergefrom)
591 mergefrom = re.compile(mergefrom)
593
592
594 versions = {} # changeset index where we saw any particular file version
593 versions = {} # changeset index where we saw any particular file version
595 branches = {} # changeset index where we saw a branch
594 branches = {} # changeset index where we saw a branch
596 n = len(changesets)
595 n = len(changesets)
597 i = 0
596 i = 0
598 while i<n:
597 while i<n:
599 c = changesets[i]
598 c = changesets[i]
600
599
601 for f in c.entries:
600 for f in c.entries:
602 versions[(f.rcs, f.revision)] = i
601 versions[(f.rcs, f.revision)] = i
603
602
604 p = None
603 p = None
605 if c.branch in branches:
604 if c.branch in branches:
606 p = branches[c.branch]
605 p = branches[c.branch]
607 else:
606 else:
608 for f in c.entries:
607 for f in c.entries:
609 p = max(p, versions.get((f.rcs, f.parent), None))
608 p = max(p, versions.get((f.rcs, f.parent), None))
610
609
611 c.parents = []
610 c.parents = []
612 if p is not None:
611 if p is not None:
613 p = changesets[p]
612 p = changesets[p]
614
613
615 # Ensure no changeset has a synthetic changeset as a parent.
614 # Ensure no changeset has a synthetic changeset as a parent.
616 while p.synthetic:
615 while p.synthetic:
617 assert len(p.parents) <= 1, \
616 assert len(p.parents) <= 1, \
618 _('synthetic changeset cannot have multiple parents')
617 _('synthetic changeset cannot have multiple parents')
619 if p.parents:
618 if p.parents:
620 p = p.parents[0]
619 p = p.parents[0]
621 else:
620 else:
622 p = None
621 p = None
623 break
622 break
624
623
625 if p is not None:
624 if p is not None:
626 c.parents.append(p)
625 c.parents.append(p)
627
626
628 if c.mergepoint:
627 if c.mergepoint:
629 if c.mergepoint == 'HEAD':
628 if c.mergepoint == 'HEAD':
630 c.mergepoint = None
629 c.mergepoint = None
631 c.parents.append(changesets[branches[c.mergepoint]])
630 c.parents.append(changesets[branches[c.mergepoint]])
632
631
633 if mergefrom:
632 if mergefrom:
634 m = mergefrom.search(c.comment)
633 m = mergefrom.search(c.comment)
635 if m:
634 if m:
636 m = m.group(1)
635 m = m.group(1)
637 if m == 'HEAD':
636 if m == 'HEAD':
638 m = None
637 m = None
639 try:
638 try:
640 candidate = changesets[branches[m]]
639 candidate = changesets[branches[m]]
641 except KeyError:
640 except KeyError:
642 ui.warn(_("warning: CVS commit message references "
641 ui.warn(_("warning: CVS commit message references "
643 "non-existent branch %r:\n%s\n")
642 "non-existent branch %r:\n%s\n")
644 % (m, c.comment))
643 % (m, c.comment))
645 if m in branches and c.branch != m and not candidate.synthetic:
644 if m in branches and c.branch != m and not candidate.synthetic:
646 c.parents.append(candidate)
645 c.parents.append(candidate)
647
646
648 if mergeto:
647 if mergeto:
649 m = mergeto.search(c.comment)
648 m = mergeto.search(c.comment)
650 if m:
649 if m:
651 try:
650 try:
652 m = m.group(1)
651 m = m.group(1)
653 if m == 'HEAD':
652 if m == 'HEAD':
654 m = None
653 m = None
655 except:
654 except:
656 m = None # if no group found then merge to HEAD
655 m = None # if no group found then merge to HEAD
657 if m in branches and c.branch != m:
656 if m in branches and c.branch != m:
658 # insert empty changeset for merge
657 # insert empty changeset for merge
659 cc = changeset(author=c.author, branch=m, date=c.date,
658 cc = changeset(author=c.author, branch=m, date=c.date,
660 comment='convert-repo: CVS merge from branch %s' % c.branch,
659 comment='convert-repo: CVS merge from branch %s' % c.branch,
661 entries=[], tags=[], parents=[changesets[branches[m]], c])
660 entries=[], tags=[], parents=[changesets[branches[m]], c])
662 changesets.insert(i + 1, cc)
661 changesets.insert(i + 1, cc)
663 branches[m] = i + 1
662 branches[m] = i + 1
664
663
665 # adjust our loop counters now we have inserted a new entry
664 # adjust our loop counters now we have inserted a new entry
666 n += 1
665 n += 1
667 i += 2
666 i += 2
668 continue
667 continue
669
668
670 branches[c.branch] = i
669 branches[c.branch] = i
671 i += 1
670 i += 1
672
671
673 # Drop synthetic changesets (safe now that we have ensured no other
672 # Drop synthetic changesets (safe now that we have ensured no other
674 # changesets can have them as parents).
673 # changesets can have them as parents).
675 i = 0
674 i = 0
676 while i < len(changesets):
675 while i < len(changesets):
677 if changesets[i].synthetic:
676 if changesets[i].synthetic:
678 del changesets[i]
677 del changesets[i]
679 else:
678 else:
680 i += 1
679 i += 1
681
680
682 # Number changesets
681 # Number changesets
683
682
684 for i, c in enumerate(changesets):
683 for i, c in enumerate(changesets):
685 c.id = i + 1
684 c.id = i + 1
686
685
687 ui.status(_('%d changeset entries\n') % len(changesets))
686 ui.status(_('%d changeset entries\n') % len(changesets))
688
687
689 return changesets
688 return changesets
690
689
691
690
692 def debugcvsps(ui, *args, **opts):
691 def debugcvsps(ui, *args, **opts):
693 '''Read CVS rlog for current directory or named path in repository, and
692 '''Read CVS rlog for current directory or named path in repository, and
694 convert the log to changesets based on matching commit log entries and dates.'''
693 convert the log to changesets based on matching commit log entries and dates.'''
695
694
696 if opts["new_cache"]:
695 if opts["new_cache"]:
697 cache = "write"
696 cache = "write"
698 elif opts["update_cache"]:
697 elif opts["update_cache"]:
699 cache = "update"
698 cache = "update"
700 else:
699 else:
701 cache = None
700 cache = None
702
701
703 revisions = opts["revisions"]
702 revisions = opts["revisions"]
704
703
705 try:
704 try:
706 if args:
705 if args:
707 log = []
706 log = []
708 for d in args:
707 for d in args:
709 log += createlog(ui, d, root=opts["root"], cache=cache)
708 log += createlog(ui, d, root=opts["root"], cache=cache)
710 else:
709 else:
711 log = createlog(ui, root=opts["root"], cache=cache)
710 log = createlog(ui, root=opts["root"], cache=cache)
712 except logerror, e:
711 except logerror, e:
713 ui.write("%r\n"%e)
712 ui.write("%r\n"%e)
714 return
713 return
715
714
716 changesets = createchangeset(ui, log, opts["fuzz"])
715 changesets = createchangeset(ui, log, opts["fuzz"])
717 del log
716 del log
718
717
719 # Print changesets (optionally filtered)
718 # Print changesets (optionally filtered)
720
719
721 off = len(revisions)
720 off = len(revisions)
722 branches = {} # latest version number in each branch
721 branches = {} # latest version number in each branch
723 ancestors = {} # parent branch
722 ancestors = {} # parent branch
724 for cs in changesets:
723 for cs in changesets:
725
724
726 if opts["ancestors"]:
725 if opts["ancestors"]:
727 if cs.branch not in branches and cs.parents and cs.parents[0].id:
726 if cs.branch not in branches and cs.parents and cs.parents[0].id:
728 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
727 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
729 branches[cs.branch] = cs.id
728 branches[cs.branch] = cs.id
730
729
731 # limit by branches
730 # limit by branches
732 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
731 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
733 continue
732 continue
734
733
735 if not off:
734 if not off:
736 # Note: trailing spaces on several lines here are needed to have
735 # Note: trailing spaces on several lines here are needed to have
737 # bug-for-bug compatibility with cvsps.
736 # bug-for-bug compatibility with cvsps.
738 ui.write('---------------------\n')
737 ui.write('---------------------\n')
739 ui.write('PatchSet %d \n' % cs.id)
738 ui.write('PatchSet %d \n' % cs.id)
740 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
739 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
741 ui.write('Author: %s\n' % cs.author)
740 ui.write('Author: %s\n' % cs.author)
742 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
741 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
743 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
742 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
744 ','.join(cs.tags) or '(none)'))
743 ','.join(cs.tags) or '(none)'))
745 if opts["parents"] and cs.parents:
744 if opts["parents"] and cs.parents:
746 if len(cs.parents)>1:
745 if len(cs.parents)>1:
747 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
746 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
748 else:
747 else:
749 ui.write('Parent: %d\n' % cs.parents[0].id)
748 ui.write('Parent: %d\n' % cs.parents[0].id)
750
749
751 if opts["ancestors"]:
750 if opts["ancestors"]:
752 b = cs.branch
751 b = cs.branch
753 r = []
752 r = []
754 while b:
753 while b:
755 b, c = ancestors[b]
754 b, c = ancestors[b]
756 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
755 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
757 if r:
756 if r:
758 ui.write('Ancestors: %s\n' % (','.join(r)))
757 ui.write('Ancestors: %s\n' % (','.join(r)))
759
758
760 ui.write('Log:\n')
759 ui.write('Log:\n')
761 ui.write('%s\n\n' % cs.comment)
760 ui.write('%s\n\n' % cs.comment)
762 ui.write('Members: \n')
761 ui.write('Members: \n')
763 for f in cs.entries:
762 for f in cs.entries:
764 fn = f.file
763 fn = f.file
765 if fn.startswith(opts["prefix"]):
764 if fn.startswith(opts["prefix"]):
766 fn = fn[len(opts["prefix"]):]
765 fn = fn[len(opts["prefix"]):]
767 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
766 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
768 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
767 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
769 ui.write('\n')
768 ui.write('\n')
770
769
771 # have we seen the start tag?
770 # have we seen the start tag?
772 if revisions and off:
771 if revisions and off:
773 if revisions[0] == str(cs.id) or \
772 if revisions[0] == str(cs.id) or \
774 revisions[0] in cs.tags:
773 revisions[0] in cs.tags:
775 off = False
774 off = False
776
775
777 # see if we reached the end tag
776 # see if we reached the end tag
778 if len(revisions)>1 and not off:
777 if len(revisions)>1 and not off:
779 if revisions[1] == str(cs.id) or \
778 if revisions[1] == str(cs.id) or \
780 revisions[1] in cs.tags:
779 revisions[1] in cs.tags:
781 break
780 break
General Comments 0
You need to be logged in to leave comments. Login now