##// END OF EJS Templates
cvsps: recognize and eliminate CVS' synthetic "file added" revisions.
Greg Ward -
r7862:02981000 1.2.1 default
parent child Browse files
Show More
@@ -0,0 +1,65 b''
1 #!/bin/sh
2
3 # This feature requires use of builtin cvsps!
4 "$TESTDIR/hghave" cvs || exit 80
5
6 # XXX lots of duplication with other test-convert-cvs* scripts
7
8 set -e
9
10 echo "[extensions]" >> $HGRCPATH
11 echo "convert = " >> $HGRCPATH
12 echo "[convert]" >> $HGRCPATH
13 echo "cvsps=builtin" >> $HGRCPATH
14
15 echo % create cvs repository with one project
16 mkdir cvsrepo
17 cd cvsrepo
18 export CVSROOT=`pwd`
19 export CVS_OPTIONS=-f
20 cd ..
21
22 filter='sed "s:$CVSROOT:*REPO*:g"'
23 cvscall()
24 {
25 cvs -f "$@" | eval $filter
26 }
27
28 cvscall -q -d "$CVSROOT" init
29 mkdir cvsrepo/proj
30
31 cvscall co proj
32
33 echo % create file1 on the trunk
34 cd proj
35 touch file1
36 cvscall add file1
37 cvscall ci -m"add file1 on trunk" file1
38
39 echo % create two branches
40 cvscall tag -b v1_0
41 cvscall tag -b v1_1
42
43 echo % create file2 on branch v1_0
44 cvs up -rv1_0
45 touch file2
46 cvscall add file2
47 cvscall ci -m"add file2 on branch v1_0" file2
48
49 echo % create file3, file4 on branch v1_1
50 cvs up -rv1_1
51 touch file3
52 touch file4
53 cvscall add file3 file4
54 cvscall ci -m"add file3, file4 on branch v1_1" file3 file4
55
56 echo % merge file2 from v1_0 to v1_1
57 cvscall up -jv1_0
58 cvscall ci -m"merge file2 from v1_0 to v1_1"
59
60 echo % convert to hg
61 cd ..
62 hg convert proj proj.hg | eval $filter
63
64 echo % hg log output
65 hg -R proj.hg log --template "{rev} {desc}\n"
@@ -0,0 +1,72 b''
1 % create cvs repository with one project
2 cvs checkout: Updating proj
3 % create file1 on the trunk
4 cvs add: scheduling file `file1' for addition
5 cvs add: use 'cvs commit' to add this file permanently
6 RCS file: *REPO*/proj/file1,v
7 done
8 Checking in file1;
9 *REPO*/proj/file1,v <-- file1
10 initial revision: 1.1
11 done
12 % create two branches
13 cvs tag: Tagging .
14 T file1
15 cvs tag: Tagging .
16 T file1
17 % create file2 on branch v1_0
18 cvs update: Updating .
19 cvs add: scheduling file `file2' for addition on branch `v1_0'
20 cvs add: use 'cvs commit' to add this file permanently
21 RCS file: *REPO*/proj/Attic/file2,v
22 done
23 Checking in file2;
24 *REPO*/proj/Attic/file2,v <-- file2
25 new revision: 1.1.2.1; previous revision: 1.1
26 done
27 % create file3, file4 on branch v1_1
28 cvs update: Updating .
29 cvs update: file2 is no longer in the repository
30 cvs add: scheduling file `file3' for addition on branch `v1_1'
31 cvs add: scheduling file `file4' for addition on branch `v1_1'
32 cvs add: use 'cvs commit' to add these files permanently
33 RCS file: *REPO*/proj/Attic/file3,v
34 done
35 Checking in file3;
36 *REPO*/proj/Attic/file3,v <-- file3
37 new revision: 1.1.2.1; previous revision: 1.1
38 done
39 RCS file: *REPO*/proj/Attic/file4,v
40 done
41 Checking in file4;
42 *REPO*/proj/Attic/file4,v <-- file4
43 new revision: 1.1.2.1; previous revision: 1.1
44 done
45 % merge file2 from v1_0 to v1_1
46 cvs update: Updating .
47 U file2
48 cvs commit: Examining .
49 Checking in file2;
50 *REPO*/proj/Attic/file2,v <-- file2
51 new revision: 1.1.4.2; previous revision: 1.1.4.1
52 done
53 % convert to hg
54 initializing destination proj.hg repository
55 using builtin cvsps
56 collecting CVS rlog
57 9 log entries
58 creating changesets
59 4 changeset entries
60 connecting to *REPO*
61 scanning source...
62 sorting...
63 converting...
64 3 add file1 on trunk
65 2 add file2 on branch v1_0
66 1 add file3, file4 on branch v1_1
67 0 merge file2 from v1_0 to v1_1
68 % hg log output
69 3 merge file2 from v1_0 to v1_1
70 2 add file3, file4 on branch v1_1
71 1 add file2 on branch v1_0
72 0 add file1 on trunk
@@ -1,684 +1,740 b''
1 #
1 #
2 # Mercurial built-in replacement for cvsps.
2 # Mercurial built-in replacement for cvsps.
3 #
3 #
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 import os
9 import os
10 import re
10 import re
11 import cPickle as pickle
11 import cPickle as pickle
12 from mercurial import util
12 from mercurial import util
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14
14
15 def listsort(list, key):
15 def listsort(list, key):
16 "helper to sort by key in Python 2.3"
16 "helper to sort by key in Python 2.3"
17 try:
17 try:
18 list.sort(key=key)
18 list.sort(key=key)
19 except TypeError:
19 except TypeError:
20 list.sort(lambda l, r: cmp(key(l), key(r)))
20 list.sort(lambda l, r: cmp(key(l), key(r)))
21
21
22 class logentry(object):
22 class logentry(object):
23 '''Class logentry has the following attributes:
23 '''Class logentry has the following attributes:
24 .author - author name as CVS knows it
24 .author - author name as CVS knows it
25 .branch - name of branch this revision is on
25 .branch - name of branch this revision is on
26 .branches - revision tuple of branches starting at this revision
26 .branches - revision tuple of branches starting at this revision
27 .comment - commit message
27 .comment - commit message
28 .date - the commit date as a (time, tz) tuple
28 .date - the commit date as a (time, tz) tuple
29 .dead - true if file revision is dead
29 .dead - true if file revision is dead
30 .file - Name of file
30 .file - Name of file
31 .lines - a tuple (+lines, -lines) or None
31 .lines - a tuple (+lines, -lines) or None
32 .parent - Previous revision of this entry
32 .parent - Previous revision of this entry
33 .rcs - name of file as returned from CVS
33 .rcs - name of file as returned from CVS
34 .revision - revision number as tuple
34 .revision - revision number as tuple
35 .tags - list of tags on the file
35 .tags - list of tags on the file
36 .synthetic - is this a synthetic "file ... added on ..." revision?
36 '''
37 '''
37 def __init__(self, **entries):
38 def __init__(self, **entries):
38 self.__dict__.update(entries)
39 self.__dict__.update(entries)
39
40
40 class logerror(Exception):
41 class logerror(Exception):
41 pass
42 pass
42
43
43 def getrepopath(cvspath):
44 def getrepopath(cvspath):
44 """Return the repository path from a CVS path.
45 """Return the repository path from a CVS path.
45
46
46 >>> getrepopath('/foo/bar')
47 >>> getrepopath('/foo/bar')
47 '/foo/bar'
48 '/foo/bar'
48 >>> getrepopath('c:/foo/bar')
49 >>> getrepopath('c:/foo/bar')
49 'c:/foo/bar'
50 'c:/foo/bar'
50 >>> getrepopath(':pserver:10/foo/bar')
51 >>> getrepopath(':pserver:10/foo/bar')
51 '/foo/bar'
52 '/foo/bar'
52 >>> getrepopath(':pserver:10c:/foo/bar')
53 >>> getrepopath(':pserver:10c:/foo/bar')
53 '/foo/bar'
54 '/foo/bar'
54 >>> getrepopath(':pserver:/foo/bar')
55 >>> getrepopath(':pserver:/foo/bar')
55 '/foo/bar'
56 '/foo/bar'
56 >>> getrepopath(':pserver:c:/foo/bar')
57 >>> getrepopath(':pserver:c:/foo/bar')
57 'c:/foo/bar'
58 'c:/foo/bar'
58 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
59 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
59 '/foo/bar'
60 '/foo/bar'
60 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
61 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
61 'c:/foo/bar'
62 'c:/foo/bar'
62 """
63 """
63 # According to CVS manual, CVS paths are expressed like:
64 # According to CVS manual, CVS paths are expressed like:
64 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
65 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
65 #
66 #
66 # Unfortunately, Windows absolute paths start with a drive letter
67 # Unfortunately, Windows absolute paths start with a drive letter
67 # like 'c:' making it harder to parse. Here we assume that drive
68 # like 'c:' making it harder to parse. Here we assume that drive
68 # letters are only one character long and any CVS component before
69 # letters are only one character long and any CVS component before
69 # the repository path is at least 2 characters long, and use this
70 # the repository path is at least 2 characters long, and use this
70 # to disambiguate.
71 # to disambiguate.
71 parts = cvspath.split(':')
72 parts = cvspath.split(':')
72 if len(parts) == 1:
73 if len(parts) == 1:
73 return parts[0]
74 return parts[0]
74 # Here there is an ambiguous case if we have a port number
75 # Here there is an ambiguous case if we have a port number
75 # immediately followed by a Windows driver letter. We assume this
76 # immediately followed by a Windows driver letter. We assume this
76 # never happens and decide it must be CVS path component,
77 # never happens and decide it must be CVS path component,
77 # therefore ignoring it.
78 # therefore ignoring it.
78 if len(parts[-2]) > 1:
79 if len(parts[-2]) > 1:
79 return parts[-1].lstrip('0123456789')
80 return parts[-1].lstrip('0123456789')
80 return parts[-2] + ':' + parts[-1]
81 return parts[-2] + ':' + parts[-1]
81
82
82 def createlog(ui, directory=None, root="", rlog=True, cache=None):
83 def createlog(ui, directory=None, root="", rlog=True, cache=None):
83 '''Collect the CVS rlog'''
84 '''Collect the CVS rlog'''
84
85
85 # Because we store many duplicate commit log messages, reusing strings
86 # Because we store many duplicate commit log messages, reusing strings
86 # saves a lot of memory and pickle storage space.
87 # saves a lot of memory and pickle storage space.
87 _scache = {}
88 _scache = {}
88 def scache(s):
89 def scache(s):
89 "return a shared version of a string"
90 "return a shared version of a string"
90 return _scache.setdefault(s, s)
91 return _scache.setdefault(s, s)
91
92
92 ui.status(_('collecting CVS rlog\n'))
93 ui.status(_('collecting CVS rlog\n'))
93
94
94 log = [] # list of logentry objects containing the CVS state
95 log = [] # list of logentry objects containing the CVS state
95
96
96 # patterns to match in CVS (r)log output, by state of use
97 # patterns to match in CVS (r)log output, by state of use
97 re_00 = re.compile('RCS file: (.+)$')
98 re_00 = re.compile('RCS file: (.+)$')
98 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
99 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
99 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
100 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
100 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
101 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
101 re_10 = re.compile('Working file: (.+)$')
102 re_10 = re.compile('Working file: (.+)$')
102 re_20 = re.compile('symbolic names:')
103 re_20 = re.compile('symbolic names:')
103 re_30 = re.compile('\t(.+): ([\\d.]+)$')
104 re_30 = re.compile('\t(.+): ([\\d.]+)$')
104 re_31 = re.compile('----------------------------$')
105 re_31 = re.compile('----------------------------$')
105 re_32 = re.compile('=============================================================================$')
106 re_32 = re.compile('=============================================================================$')
106 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
107 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
107 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?')
108 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?')
108 re_70 = re.compile('branches: (.+);$')
109 re_70 = re.compile('branches: (.+);$')
109
110
111 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
112
110 prefix = '' # leading path to strip of what we get from CVS
113 prefix = '' # leading path to strip of what we get from CVS
111
114
112 if directory is None:
115 if directory is None:
113 # Current working directory
116 # Current working directory
114
117
115 # Get the real directory in the repository
118 # Get the real directory in the repository
116 try:
119 try:
117 prefix = file(os.path.join('CVS','Repository')).read().strip()
120 prefix = file(os.path.join('CVS','Repository')).read().strip()
118 if prefix == ".":
121 if prefix == ".":
119 prefix = ""
122 prefix = ""
120 directory = prefix
123 directory = prefix
121 except IOError:
124 except IOError:
122 raise logerror('Not a CVS sandbox')
125 raise logerror('Not a CVS sandbox')
123
126
124 if prefix and not prefix.endswith(os.sep):
127 if prefix and not prefix.endswith(os.sep):
125 prefix += os.sep
128 prefix += os.sep
126
129
127 # Use the Root file in the sandbox, if it exists
130 # Use the Root file in the sandbox, if it exists
128 try:
131 try:
129 root = file(os.path.join('CVS','Root')).read().strip()
132 root = file(os.path.join('CVS','Root')).read().strip()
130 except IOError:
133 except IOError:
131 pass
134 pass
132
135
133 if not root:
136 if not root:
134 root = os.environ.get('CVSROOT', '')
137 root = os.environ.get('CVSROOT', '')
135
138
136 # read log cache if one exists
139 # read log cache if one exists
137 oldlog = []
140 oldlog = []
138 date = None
141 date = None
139
142
140 if cache:
143 if cache:
141 cachedir = os.path.expanduser('~/.hg.cvsps')
144 cachedir = os.path.expanduser('~/.hg.cvsps')
142 if not os.path.exists(cachedir):
145 if not os.path.exists(cachedir):
143 os.mkdir(cachedir)
146 os.mkdir(cachedir)
144
147
145 # The cvsps cache pickle needs a uniquified name, based on the
148 # The cvsps cache pickle needs a uniquified name, based on the
146 # repository location. The address may have all sort of nasties
149 # repository location. The address may have all sort of nasties
147 # in it, slashes, colons and such. So here we take just the
150 # in it, slashes, colons and such. So here we take just the
148 # alphanumerics, concatenated in a way that does not mix up the
151 # alphanumerics, concatenated in a way that does not mix up the
149 # various components, so that
152 # various components, so that
150 # :pserver:user@server:/path
153 # :pserver:user@server:/path
151 # and
154 # and
152 # /pserver/user/server/path
155 # /pserver/user/server/path
153 # are mapped to different cache file names.
156 # are mapped to different cache file names.
154 cachefile = root.split(":") + [directory, "cache"]
157 cachefile = root.split(":") + [directory, "cache"]
155 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
158 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
156 cachefile = os.path.join(cachedir,
159 cachefile = os.path.join(cachedir,
157 '.'.join([s for s in cachefile if s]))
160 '.'.join([s for s in cachefile if s]))
158
161
159 if cache == 'update':
162 if cache == 'update':
160 try:
163 try:
161 ui.note(_('reading cvs log cache %s\n') % cachefile)
164 ui.note(_('reading cvs log cache %s\n') % cachefile)
162 oldlog = pickle.load(file(cachefile))
165 oldlog = pickle.load(file(cachefile))
163 ui.note(_('cache has %d log entries\n') % len(oldlog))
166 ui.note(_('cache has %d log entries\n') % len(oldlog))
164 except Exception, e:
167 except Exception, e:
165 ui.note(_('error reading cache: %r\n') % e)
168 ui.note(_('error reading cache: %r\n') % e)
166
169
167 if oldlog:
170 if oldlog:
168 date = oldlog[-1].date # last commit date as a (time,tz) tuple
171 date = oldlog[-1].date # last commit date as a (time,tz) tuple
169 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
172 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
170
173
171 # build the CVS commandline
174 # build the CVS commandline
172 cmd = ['cvs', '-q']
175 cmd = ['cvs', '-q']
173 if root:
176 if root:
174 cmd.append('-d%s' % root)
177 cmd.append('-d%s' % root)
175 p = util.normpath(getrepopath(root))
178 p = util.normpath(getrepopath(root))
176 if not p.endswith('/'):
179 if not p.endswith('/'):
177 p += '/'
180 p += '/'
178 prefix = p + util.normpath(prefix)
181 prefix = p + util.normpath(prefix)
179 cmd.append(['log', 'rlog'][rlog])
182 cmd.append(['log', 'rlog'][rlog])
180 if date:
183 if date:
181 # no space between option and date string
184 # no space between option and date string
182 cmd.append('-d>%s' % date)
185 cmd.append('-d>%s' % date)
183 cmd.append(directory)
186 cmd.append(directory)
184
187
185 # state machine begins here
188 # state machine begins here
186 tags = {} # dictionary of revisions on current file with their tags
189 tags = {} # dictionary of revisions on current file with their tags
187 state = 0
190 state = 0
188 store = False # set when a new record can be appended
191 store = False # set when a new record can be appended
189
192
190 cmd = [util.shellquote(arg) for arg in cmd]
193 cmd = [util.shellquote(arg) for arg in cmd]
191 ui.note(_("running %s\n") % (' '.join(cmd)))
194 ui.note(_("running %s\n") % (' '.join(cmd)))
192 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
195 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
193
196
194 pfp = util.popen(' '.join(cmd))
197 pfp = util.popen(' '.join(cmd))
195 peek = pfp.readline()
198 peek = pfp.readline()
196 while True:
199 while True:
197 line = peek
200 line = peek
198 if line == '':
201 if line == '':
199 break
202 break
200 peek = pfp.readline()
203 peek = pfp.readline()
201 if line.endswith('\n'):
204 if line.endswith('\n'):
202 line = line[:-1]
205 line = line[:-1]
203 #ui.debug('state=%d line=%r\n' % (state, line))
206 #ui.debug('state=%d line=%r\n' % (state, line))
204
207
205 if state == 0:
208 if state == 0:
206 # initial state, consume input until we see 'RCS file'
209 # initial state, consume input until we see 'RCS file'
207 match = re_00.match(line)
210 match = re_00.match(line)
208 if match:
211 if match:
209 rcs = match.group(1)
212 rcs = match.group(1)
210 tags = {}
213 tags = {}
211 if rlog:
214 if rlog:
212 filename = util.normpath(rcs[:-2])
215 filename = util.normpath(rcs[:-2])
213 if filename.startswith(prefix):
216 if filename.startswith(prefix):
214 filename = filename[len(prefix):]
217 filename = filename[len(prefix):]
215 if filename.startswith('/'):
218 if filename.startswith('/'):
216 filename = filename[1:]
219 filename = filename[1:]
217 if filename.startswith('Attic/'):
220 if filename.startswith('Attic/'):
218 filename = filename[6:]
221 filename = filename[6:]
219 else:
222 else:
220 filename = filename.replace('/Attic/', '/')
223 filename = filename.replace('/Attic/', '/')
221 state = 2
224 state = 2
222 continue
225 continue
223 state = 1
226 state = 1
224 continue
227 continue
225 match = re_01.match(line)
228 match = re_01.match(line)
226 if match:
229 if match:
227 raise Exception(match.group(1))
230 raise Exception(match.group(1))
228 match = re_02.match(line)
231 match = re_02.match(line)
229 if match:
232 if match:
230 raise Exception(match.group(2))
233 raise Exception(match.group(2))
231 if re_03.match(line):
234 if re_03.match(line):
232 raise Exception(line)
235 raise Exception(line)
233
236
234 elif state == 1:
237 elif state == 1:
235 # expect 'Working file' (only when using log instead of rlog)
238 # expect 'Working file' (only when using log instead of rlog)
236 match = re_10.match(line)
239 match = re_10.match(line)
237 assert match, _('RCS file must be followed by working file')
240 assert match, _('RCS file must be followed by working file')
238 filename = util.normpath(match.group(1))
241 filename = util.normpath(match.group(1))
239 state = 2
242 state = 2
240
243
241 elif state == 2:
244 elif state == 2:
242 # expect 'symbolic names'
245 # expect 'symbolic names'
243 if re_20.match(line):
246 if re_20.match(line):
244 state = 3
247 state = 3
245
248
246 elif state == 3:
249 elif state == 3:
247 # read the symbolic names and store as tags
250 # read the symbolic names and store as tags
248 match = re_30.match(line)
251 match = re_30.match(line)
249 if match:
252 if match:
250 rev = [int(x) for x in match.group(2).split('.')]
253 rev = [int(x) for x in match.group(2).split('.')]
251
254
252 # Convert magic branch number to an odd-numbered one
255 # Convert magic branch number to an odd-numbered one
253 revn = len(rev)
256 revn = len(rev)
254 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
257 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
255 rev = rev[:-2] + rev[-1:]
258 rev = rev[:-2] + rev[-1:]
256 rev = tuple(rev)
259 rev = tuple(rev)
257
260
258 if rev not in tags:
261 if rev not in tags:
259 tags[rev] = []
262 tags[rev] = []
260 tags[rev].append(match.group(1))
263 tags[rev].append(match.group(1))
261
264
262 elif re_31.match(line):
265 elif re_31.match(line):
263 state = 5
266 state = 5
264 elif re_32.match(line):
267 elif re_32.match(line):
265 state = 0
268 state = 0
266
269
267 elif state == 4:
270 elif state == 4:
268 # expecting '------' separator before first revision
271 # expecting '------' separator before first revision
269 if re_31.match(line):
272 if re_31.match(line):
270 state = 5
273 state = 5
271 else:
274 else:
272 assert not re_32.match(line), _('must have at least some revisions')
275 assert not re_32.match(line), _('must have at least some revisions')
273
276
274 elif state == 5:
277 elif state == 5:
275 # expecting revision number and possibly (ignored) lock indication
278 # expecting revision number and possibly (ignored) lock indication
276 # we create the logentry here from values stored in states 0 to 4,
279 # we create the logentry here from values stored in states 0 to 4,
277 # as this state is re-entered for subsequent revisions of a file.
280 # as this state is re-entered for subsequent revisions of a file.
278 match = re_50.match(line)
281 match = re_50.match(line)
279 assert match, _('expected revision number')
282 assert match, _('expected revision number')
280 e = logentry(rcs=scache(rcs), file=scache(filename),
283 e = logentry(rcs=scache(rcs), file=scache(filename),
281 revision=tuple([int(x) for x in match.group(1).split('.')]),
284 revision=tuple([int(x) for x in match.group(1).split('.')]),
282 branches=[], parent=None)
285 branches=[], parent=None,
286 synthetic=False)
283 state = 6
287 state = 6
284
288
285 elif state == 6:
289 elif state == 6:
286 # expecting date, author, state, lines changed
290 # expecting date, author, state, lines changed
287 match = re_60.match(line)
291 match = re_60.match(line)
288 assert match, _('revision must be followed by date line')
292 assert match, _('revision must be followed by date line')
289 d = match.group(1)
293 d = match.group(1)
290 if d[2] == '/':
294 if d[2] == '/':
291 # Y2K
295 # Y2K
292 d = '19' + d
296 d = '19' + d
293
297
294 if len(d.split()) != 3:
298 if len(d.split()) != 3:
295 # cvs log dates always in GMT
299 # cvs log dates always in GMT
296 d = d + ' UTC'
300 d = d + ' UTC'
297 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
301 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
298 e.author = scache(match.group(2))
302 e.author = scache(match.group(2))
299 e.dead = match.group(3).lower() == 'dead'
303 e.dead = match.group(3).lower() == 'dead'
300
304
301 if match.group(5):
305 if match.group(5):
302 if match.group(6):
306 if match.group(6):
303 e.lines = (int(match.group(5)), int(match.group(6)))
307 e.lines = (int(match.group(5)), int(match.group(6)))
304 else:
308 else:
305 e.lines = (int(match.group(5)), 0)
309 e.lines = (int(match.group(5)), 0)
306 elif match.group(6):
310 elif match.group(6):
307 e.lines = (0, int(match.group(6)))
311 e.lines = (0, int(match.group(6)))
308 else:
312 else:
309 e.lines = None
313 e.lines = None
310 e.comment = []
314 e.comment = []
311 state = 7
315 state = 7
312
316
313 elif state == 7:
317 elif state == 7:
314 # read the revision numbers of branches that start at this revision
318 # read the revision numbers of branches that start at this revision
315 # or store the commit log message otherwise
319 # or store the commit log message otherwise
316 m = re_70.match(line)
320 m = re_70.match(line)
317 if m:
321 if m:
318 e.branches = [tuple([int(y) for y in x.strip().split('.')])
322 e.branches = [tuple([int(y) for y in x.strip().split('.')])
319 for x in m.group(1).split(';')]
323 for x in m.group(1).split(';')]
320 state = 8
324 state = 8
321 elif re_31.match(line) and re_50.match(peek):
325 elif re_31.match(line) and re_50.match(peek):
322 state = 5
326 state = 5
323 store = True
327 store = True
324 elif re_32.match(line):
328 elif re_32.match(line):
325 state = 0
329 state = 0
326 store = True
330 store = True
327 else:
331 else:
328 e.comment.append(line)
332 e.comment.append(line)
329
333
330 elif state == 8:
334 elif state == 8:
331 # store commit log message
335 # store commit log message
332 if re_31.match(line):
336 if re_31.match(line):
333 state = 5
337 state = 5
334 store = True
338 store = True
335 elif re_32.match(line):
339 elif re_32.match(line):
336 state = 0
340 state = 0
337 store = True
341 store = True
338 else:
342 else:
339 e.comment.append(line)
343 e.comment.append(line)
340
344
345 # When a file is added on a branch B1, CVS creates a synthetic
346 # dead trunk revision 1.1 so that the branch has a root.
347 # Likewise, if you merge such a file to a later branch B2 (one
348 # that already existed when the file was added on B1), CVS
349 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
350 # these revisions now, but mark them synthetic so
351 # createchangeset() can take care of them.
352 if (store and
353 e.dead and
354 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
355 len(e.comment) == 1 and
356 file_added_re.match(e.comment[0])):
357 ui.debug(_('found synthetic rev in %s: %r\n')
358 % (e.rcs, e.comment[0]))
359 e.synthetic = True
360
341 if store:
361 if store:
342 # clean up the results and save in the log.
362 # clean up the results and save in the log.
343 store = False
363 store = False
344 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
364 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
345 e.comment = scache('\n'.join(e.comment))
365 e.comment = scache('\n'.join(e.comment))
346
366
347 revn = len(e.revision)
367 revn = len(e.revision)
348 if revn > 3 and (revn % 2) == 0:
368 if revn > 3 and (revn % 2) == 0:
349 e.branch = tags.get(e.revision[:-1], [None])[0]
369 e.branch = tags.get(e.revision[:-1], [None])[0]
350 else:
370 else:
351 e.branch = None
371 e.branch = None
352
372
353 log.append(e)
373 log.append(e)
354
374
355 if len(log) % 100 == 0:
375 if len(log) % 100 == 0:
356 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
376 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
357
377
358 listsort(log, key=lambda x:(x.rcs, x.revision))
378 listsort(log, key=lambda x:(x.rcs, x.revision))
359
379
360 # find parent revisions of individual files
380 # find parent revisions of individual files
361 versions = {}
381 versions = {}
362 for e in log:
382 for e in log:
363 branch = e.revision[:-1]
383 branch = e.revision[:-1]
364 p = versions.get((e.rcs, branch), None)
384 p = versions.get((e.rcs, branch), None)
365 if p is None:
385 if p is None:
366 p = e.revision[:-2]
386 p = e.revision[:-2]
367 e.parent = p
387 e.parent = p
368 versions[(e.rcs, branch)] = e.revision
388 versions[(e.rcs, branch)] = e.revision
369
389
370 # update the log cache
390 # update the log cache
371 if cache:
391 if cache:
372 if log:
392 if log:
373 # join up the old and new logs
393 # join up the old and new logs
374 listsort(log, key=lambda x:x.date)
394 listsort(log, key=lambda x:x.date)
375
395
376 if oldlog and oldlog[-1].date >= log[0].date:
396 if oldlog and oldlog[-1].date >= log[0].date:
377 raise logerror('Log cache overlaps with new log entries,'
397 raise logerror('Log cache overlaps with new log entries,'
378 ' re-run without cache.')
398 ' re-run without cache.')
379
399
380 log = oldlog + log
400 log = oldlog + log
381
401
382 # write the new cachefile
402 # write the new cachefile
383 ui.note(_('writing cvs log cache %s\n') % cachefile)
403 ui.note(_('writing cvs log cache %s\n') % cachefile)
384 pickle.dump(log, file(cachefile, 'w'))
404 pickle.dump(log, file(cachefile, 'w'))
385 else:
405 else:
386 log = oldlog
406 log = oldlog
387
407
388 ui.status(_('%d log entries\n') % len(log))
408 ui.status(_('%d log entries\n') % len(log))
389
409
390 return log
410 return log
391
411
392
412
393 class changeset(object):
413 class changeset(object):
394 '''Class changeset has the following attributes:
414 '''Class changeset has the following attributes:
395 .author - author name as CVS knows it
415 .author - author name as CVS knows it
396 .branch - name of branch this changeset is on, or None
416 .branch - name of branch this changeset is on, or None
397 .comment - commit message
417 .comment - commit message
398 .date - the commit date as a (time,tz) tuple
418 .date - the commit date as a (time,tz) tuple
399 .entries - list of logentry objects in this changeset
419 .entries - list of logentry objects in this changeset
400 .parents - list of one or two parent changesets
420 .parents - list of one or two parent changesets
401 .tags - list of tags on this changeset
421 .tags - list of tags on this changeset
422 .synthetic - from synthetic revision "file ... added on branch ..."
402 '''
423 '''
403 def __init__(self, **entries):
424 def __init__(self, **entries):
404 self.__dict__.update(entries)
425 self.__dict__.update(entries)
405
426
406 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
427 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
407 '''Convert log into changesets.'''
428 '''Convert log into changesets.'''
408
429
409 ui.status(_('creating changesets\n'))
430 ui.status(_('creating changesets\n'))
410
431
411 # Merge changesets
432 # Merge changesets
412
433
413 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
434 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
414
435
415 changesets = []
436 changesets = []
416 files = {}
437 files = {}
417 c = None
438 c = None
418 for i, e in enumerate(log):
439 for i, e in enumerate(log):
419
440
420 # Check if log entry belongs to the current changeset or not.
441 # Check if log entry belongs to the current changeset or not.
421 if not (c and
442 if not (c and
422 e.comment == c.comment and
443 e.comment == c.comment and
423 e.author == c.author and
444 e.author == c.author and
424 e.branch == c.branch and
445 e.branch == c.branch and
425 ((c.date[0] + c.date[1]) <=
446 ((c.date[0] + c.date[1]) <=
426 (e.date[0] + e.date[1]) <=
447 (e.date[0] + e.date[1]) <=
427 (c.date[0] + c.date[1]) + fuzz) and
448 (c.date[0] + c.date[1]) + fuzz) and
428 e.file not in files):
449 e.file not in files):
429 c = changeset(comment=e.comment, author=e.author,
450 c = changeset(comment=e.comment, author=e.author,
430 branch=e.branch, date=e.date, entries=[])
451 branch=e.branch, date=e.date, entries=[])
431 changesets.append(c)
452 changesets.append(c)
432 files = {}
453 files = {}
433 if len(changesets) % 100 == 0:
454 if len(changesets) % 100 == 0:
434 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
455 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
435 ui.status(util.ellipsis(t, 80) + '\n')
456 ui.status(util.ellipsis(t, 80) + '\n')
436
457
437 c.entries.append(e)
458 c.entries.append(e)
438 files[e.file] = True
459 files[e.file] = True
439 c.date = e.date # changeset date is date of latest commit in it
460 c.date = e.date # changeset date is date of latest commit in it
440
461
462 # Mark synthetic changesets
463
464 for c in changesets:
465 # Synthetic revisions always get their own changeset, because
466 # the log message includes the filename. E.g. if you add file3
467 # and file4 on a branch, you get four log entries and three
468 # changesets:
469 # "File file3 was added on branch ..." (synthetic, 1 entry)
470 # "File file4 was added on branch ..." (synthetic, 1 entry)
471 # "Add file3 and file4 to fix ..." (real, 2 entries)
472 # Hence the check for 1 entry here.
473 c.synthetic = (len(c.entries) == 1 and c.entries[0].synthetic)
474
441 # Sort files in each changeset
475 # Sort files in each changeset
442
476
443 for c in changesets:
477 for c in changesets:
444 def pathcompare(l, r):
478 def pathcompare(l, r):
445 'Mimic cvsps sorting order'
479 'Mimic cvsps sorting order'
446 l = l.split('/')
480 l = l.split('/')
447 r = r.split('/')
481 r = r.split('/')
448 nl = len(l)
482 nl = len(l)
449 nr = len(r)
483 nr = len(r)
450 n = min(nl, nr)
484 n = min(nl, nr)
451 for i in range(n):
485 for i in range(n):
452 if i + 1 == nl and nl < nr:
486 if i + 1 == nl and nl < nr:
453 return -1
487 return -1
454 elif i + 1 == nr and nl > nr:
488 elif i + 1 == nr and nl > nr:
455 return +1
489 return +1
456 elif l[i] < r[i]:
490 elif l[i] < r[i]:
457 return -1
491 return -1
458 elif l[i] > r[i]:
492 elif l[i] > r[i]:
459 return +1
493 return +1
460 return 0
494 return 0
461 def entitycompare(l, r):
495 def entitycompare(l, r):
462 return pathcompare(l.file, r.file)
496 return pathcompare(l.file, r.file)
463
497
464 c.entries.sort(entitycompare)
498 c.entries.sort(entitycompare)
465
499
466 # Sort changesets by date
500 # Sort changesets by date
467
501
468 def cscmp(l, r):
502 def cscmp(l, r):
469 d = sum(l.date) - sum(r.date)
503 d = sum(l.date) - sum(r.date)
470 if d:
504 if d:
471 return d
505 return d
472
506
473 # detect vendor branches and initial commits on a branch
507 # detect vendor branches and initial commits on a branch
474 le = {}
508 le = {}
475 for e in l.entries:
509 for e in l.entries:
476 le[e.rcs] = e.revision
510 le[e.rcs] = e.revision
477 re = {}
511 re = {}
478 for e in r.entries:
512 for e in r.entries:
479 re[e.rcs] = e.revision
513 re[e.rcs] = e.revision
480
514
481 d = 0
515 d = 0
482 for e in l.entries:
516 for e in l.entries:
483 if re.get(e.rcs, None) == e.parent:
517 if re.get(e.rcs, None) == e.parent:
484 assert not d
518 assert not d
485 d = 1
519 d = 1
486 break
520 break
487
521
488 for e in r.entries:
522 for e in r.entries:
489 if le.get(e.rcs, None) == e.parent:
523 if le.get(e.rcs, None) == e.parent:
490 assert not d
524 assert not d
491 d = -1
525 d = -1
492 break
526 break
493
527
494 return d
528 return d
495
529
496 changesets.sort(cscmp)
530 changesets.sort(cscmp)
497
531
498 # Collect tags
532 # Collect tags
499
533
500 globaltags = {}
534 globaltags = {}
501 for c in changesets:
535 for c in changesets:
502 tags = {}
536 tags = {}
503 for e in c.entries:
537 for e in c.entries:
504 for tag in e.tags:
538 for tag in e.tags:
505 # remember which is the latest changeset to have this tag
539 # remember which is the latest changeset to have this tag
506 globaltags[tag] = c
540 globaltags[tag] = c
507
541
508 for c in changesets:
542 for c in changesets:
509 tags = {}
543 tags = {}
510 for e in c.entries:
544 for e in c.entries:
511 for tag in e.tags:
545 for tag in e.tags:
512 tags[tag] = True
546 tags[tag] = True
513 # remember tags only if this is the latest changeset to have it
547 # remember tags only if this is the latest changeset to have it
514 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
548 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
515
549
516 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
550 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
517 # by inserting dummy changesets with two parents, and handle
551 # by inserting dummy changesets with two parents, and handle
518 # {{mergefrombranch BRANCHNAME}} by setting two parents.
552 # {{mergefrombranch BRANCHNAME}} by setting two parents.
519
553
520 if mergeto is None:
554 if mergeto is None:
521 mergeto = r'{{mergetobranch ([-\w]+)}}'
555 mergeto = r'{{mergetobranch ([-\w]+)}}'
522 if mergeto:
556 if mergeto:
523 mergeto = re.compile(mergeto)
557 mergeto = re.compile(mergeto)
524
558
525 if mergefrom is None:
559 if mergefrom is None:
526 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
560 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
527 if mergefrom:
561 if mergefrom:
528 mergefrom = re.compile(mergefrom)
562 mergefrom = re.compile(mergefrom)
529
563
530 versions = {} # changeset index where we saw any particular file version
564 versions = {} # changeset index where we saw any particular file version
531 branches = {} # changeset index where we saw a branch
565 branches = {} # changeset index where we saw a branch
532 n = len(changesets)
566 n = len(changesets)
533 i = 0
567 i = 0
534 while i<n:
568 while i<n:
535 c = changesets[i]
569 c = changesets[i]
536
570
537 for f in c.entries:
571 for f in c.entries:
538 versions[(f.rcs, f.revision)] = i
572 versions[(f.rcs, f.revision)] = i
539
573
540 p = None
574 p = None
541 if c.branch in branches:
575 if c.branch in branches:
542 p = branches[c.branch]
576 p = branches[c.branch]
543 else:
577 else:
544 for f in c.entries:
578 for f in c.entries:
545 p = max(p, versions.get((f.rcs, f.parent), None))
579 p = max(p, versions.get((f.rcs, f.parent), None))
546
580
547 c.parents = []
581 c.parents = []
548 if p is not None:
582 if p is not None:
549 c.parents.append(changesets[p])
583 p = changesets[p]
584
585 # Ensure no changeset has a synthetic changeset as a parent.
586 while p.synthetic:
587 assert len(p.parents) <= 1, \
588 _('synthetic changeset cannot have multiple parents')
589 if p.parents:
590 p = p.parents[0]
591 else:
592 p = None
593 break
594
595 if p is not None:
596 c.parents.append(p)
550
597
551 if mergefrom:
598 if mergefrom:
552 m = mergefrom.search(c.comment)
599 m = mergefrom.search(c.comment)
553 if m:
600 if m:
554 m = m.group(1)
601 m = m.group(1)
555 if m == 'HEAD':
602 if m == 'HEAD':
556 m = None
603 m = None
557 if m in branches and c.branch != m:
604 if m in branches and c.branch != m:
558 c.parents.append(changesets[branches[m]])
605 c.parents.append(changesets[branches[m]])
559
606
560 if mergeto:
607 if mergeto:
561 m = mergeto.search(c.comment)
608 m = mergeto.search(c.comment)
562 if m:
609 if m:
563 try:
610 try:
564 m = m.group(1)
611 m = m.group(1)
565 if m == 'HEAD':
612 if m == 'HEAD':
566 m = None
613 m = None
567 except:
614 except:
568 m = None # if no group found then merge to HEAD
615 m = None # if no group found then merge to HEAD
569 if m in branches and c.branch != m:
616 if m in branches and c.branch != m:
570 # insert empty changeset for merge
617 # insert empty changeset for merge
571 cc = changeset(author=c.author, branch=m, date=c.date,
618 cc = changeset(author=c.author, branch=m, date=c.date,
572 comment='convert-repo: CVS merge from branch %s' % c.branch,
619 comment='convert-repo: CVS merge from branch %s' % c.branch,
573 entries=[], tags=[], parents=[changesets[branches[m]], c])
620 entries=[], tags=[], parents=[changesets[branches[m]], c])
574 changesets.insert(i + 1, cc)
621 changesets.insert(i + 1, cc)
575 branches[m] = i + 1
622 branches[m] = i + 1
576
623
577 # adjust our loop counters now we have inserted a new entry
624 # adjust our loop counters now we have inserted a new entry
578 n += 1
625 n += 1
579 i += 2
626 i += 2
580 continue
627 continue
581
628
582 branches[c.branch] = i
629 branches[c.branch] = i
583 i += 1
630 i += 1
584
631
632 # Drop synthetic changesets (safe now that we have ensured no other
633 # changesets can have them as parents).
634 i = 0
635 while i < len(changesets):
636 if changesets[i].synthetic:
637 del changesets[i]
638 else:
639 i += 1
640
585 # Number changesets
641 # Number changesets
586
642
587 for i, c in enumerate(changesets):
643 for i, c in enumerate(changesets):
588 c.id = i + 1
644 c.id = i + 1
589
645
590 ui.status(_('%d changeset entries\n') % len(changesets))
646 ui.status(_('%d changeset entries\n') % len(changesets))
591
647
592 return changesets
648 return changesets
593
649
594
650
595 def debugcvsps(ui, *args, **opts):
651 def debugcvsps(ui, *args, **opts):
596 '''Read CVS rlog for current directory or named path in repository, and
652 '''Read CVS rlog for current directory or named path in repository, and
597 convert the log to changesets based on matching commit log entries and dates.'''
653 convert the log to changesets based on matching commit log entries and dates.'''
598
654
599 if opts["new_cache"]:
655 if opts["new_cache"]:
600 cache = "write"
656 cache = "write"
601 elif opts["update_cache"]:
657 elif opts["update_cache"]:
602 cache = "update"
658 cache = "update"
603 else:
659 else:
604 cache = None
660 cache = None
605
661
606 revisions = opts["revisions"]
662 revisions = opts["revisions"]
607
663
608 try:
664 try:
609 if args:
665 if args:
610 log = []
666 log = []
611 for d in args:
667 for d in args:
612 log += createlog(ui, d, root=opts["root"], cache=cache)
668 log += createlog(ui, d, root=opts["root"], cache=cache)
613 else:
669 else:
614 log = createlog(ui, root=opts["root"], cache=cache)
670 log = createlog(ui, root=opts["root"], cache=cache)
615 except logerror, e:
671 except logerror, e:
616 ui.write("%r\n"%e)
672 ui.write("%r\n"%e)
617 return
673 return
618
674
619 changesets = createchangeset(ui, log, opts["fuzz"])
675 changesets = createchangeset(ui, log, opts["fuzz"])
620 del log
676 del log
621
677
622 # Print changesets (optionally filtered)
678 # Print changesets (optionally filtered)
623
679
624 off = len(revisions)
680 off = len(revisions)
625 branches = {} # latest version number in each branch
681 branches = {} # latest version number in each branch
626 ancestors = {} # parent branch
682 ancestors = {} # parent branch
627 for cs in changesets:
683 for cs in changesets:
628
684
629 if opts["ancestors"]:
685 if opts["ancestors"]:
630 if cs.branch not in branches and cs.parents and cs.parents[0].id:
686 if cs.branch not in branches and cs.parents and cs.parents[0].id:
631 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
687 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
632 branches[cs.branch] = cs.id
688 branches[cs.branch] = cs.id
633
689
634 # limit by branches
690 # limit by branches
635 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
691 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
636 continue
692 continue
637
693
638 if not off:
694 if not off:
639 # Note: trailing spaces on several lines here are needed to have
695 # Note: trailing spaces on several lines here are needed to have
640 # bug-for-bug compatibility with cvsps.
696 # bug-for-bug compatibility with cvsps.
641 ui.write('---------------------\n')
697 ui.write('---------------------\n')
642 ui.write('PatchSet %d \n' % cs.id)
698 ui.write('PatchSet %d \n' % cs.id)
643 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
699 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
644 ui.write('Author: %s\n' % cs.author)
700 ui.write('Author: %s\n' % cs.author)
645 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
701 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
646 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
702 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
647 ','.join(cs.tags) or '(none)'))
703 ','.join(cs.tags) or '(none)'))
648 if opts["parents"] and cs.parents:
704 if opts["parents"] and cs.parents:
649 if len(cs.parents)>1:
705 if len(cs.parents)>1:
650 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
706 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
651 else:
707 else:
652 ui.write('Parent: %d\n' % cs.parents[0].id)
708 ui.write('Parent: %d\n' % cs.parents[0].id)
653
709
654 if opts["ancestors"]:
710 if opts["ancestors"]:
655 b = cs.branch
711 b = cs.branch
656 r = []
712 r = []
657 while b:
713 while b:
658 b, c = ancestors[b]
714 b, c = ancestors[b]
659 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
715 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
660 if r:
716 if r:
661 ui.write('Ancestors: %s\n' % (','.join(r)))
717 ui.write('Ancestors: %s\n' % (','.join(r)))
662
718
663 ui.write('Log:\n')
719 ui.write('Log:\n')
664 ui.write('%s\n\n' % cs.comment)
720 ui.write('%s\n\n' % cs.comment)
665 ui.write('Members: \n')
721 ui.write('Members: \n')
666 for f in cs.entries:
722 for f in cs.entries:
667 fn = f.file
723 fn = f.file
668 if fn.startswith(opts["prefix"]):
724 if fn.startswith(opts["prefix"]):
669 fn = fn[len(opts["prefix"]):]
725 fn = fn[len(opts["prefix"]):]
670 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
726 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
671 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
727 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
672 ui.write('\n')
728 ui.write('\n')
673
729
674 # have we seen the start tag?
730 # have we seen the start tag?
675 if revisions and off:
731 if revisions and off:
676 if revisions[0] == str(cs.id) or \
732 if revisions[0] == str(cs.id) or \
677 revisions[0] in cs.tags:
733 revisions[0] in cs.tags:
678 off = False
734 off = False
679
735
680 # see if we reached the end tag
736 # see if we reached the end tag
681 if len(revisions)>1 and not off:
737 if len(revisions)>1 and not off:
682 if revisions[1] == str(cs.id) or \
738 if revisions[1] == str(cs.id) or \
683 revisions[1] in cs.tags:
739 revisions[1] in cs.tags:
684 break
740 break
General Comments 0
You need to be logged in to leave comments. Login now