##// END OF EJS Templates
convert: added cvsnt mergepoint support
Henrik Stuart -
r7956:3e7611a8 default
parent child Browse files
Show More
@@ -0,0 +1,78 b''
1 #!/bin/bash
2
3 "$TESTDIR/hghave" cvs || exit 80
4
5 cvscall()
6 {
7 cvs -f "$@"
8 }
9
10 hgcat()
11 {
12 hg --cwd src-hg cat -r tip "$1"
13 }
14
15 echo "[extensions]" >> $HGRCPATH
16 echo "convert = " >> $HGRCPATH
17 echo "graphlog = " >> $HGRCPATH
18 echo "[convert]" >> $HGRCPATH
19 echo "cvsps=builtin" >> $HGRCPATH
20
21 echo % create cvs repository
22 mkdir cvsmaster
23 cd cvsmaster
24 export CVSROOT=`pwd`
25 export CVS_OPTIONS=-f
26 cd ..
27
28 cvscall -q -d "$CVSROOT" init
29
30 cvscall -q checkout -d cvsworktmp .
31 cd cvsworktmp
32 mkdir foo
33 cvscall -q add foo | sed -e 's/Directory .* added to the repository//g'
34 cd foo
35 echo foo > foo.txt
36 cvscall -q add foo.txt
37 cvscall -q ci -m "foo.txt" | sed 's/.*,v.*/checking in/g'
38
39 cd ../..
40 rm -rf cvsworktmp
41
42 cvscall -q checkout -d cvswork foo
43
44 cd cvswork
45
46 cvscall -q rtag -b -R MYBRANCH1 foo
47 cvscall -q up -P -r MYBRANCH1
48 echo bar > foo.txt
49 cvscall -q ci -m "bar" | sed 's/.*,v.*/checking in/g'
50 echo baz > foo.txt
51 cvscall -q ci -m "baz" | sed 's/.*,v.*/checking in/g'
52
53 cvscall -q rtag -b -R -r MYBRANCH1 MYBRANCH1_2 foo
54 cvscall -q up -P -r MYBRANCH1_2
55
56 echo bazzie > foo.txt
57 cvscall -q ci -m "bazzie" | sed 's/.*,v.*/checking in/g'
58
59 cvscall -q rtag -b -R MYBRANCH1_1 foo
60 cvscall -q up -P -r MYBRANCH1_1
61
62 echo quux > foo.txt
63 cvscall -q ci -m "quux" | sed 's/.*,v.*/checking in/g'
64 cvscall -q up -P -jMYBRANCH1 | sed 's/RCS file: .*,v/merging MYBRANCH1/g'
65 echo xyzzy > foo.txt
66 cvscall -q ci -m "merge" | sed 's/.*,v.*/checking in/g'
67
68 cvscall -q up -P -A
69
70 cvscall -q up -P -jMYBRANCH1_2 | sed 's/RCS file: .*,v/merging MYBRANCH1_2/g'
71 cvscall -q ci -m "merge" | sed 's/.*,v.*/checking in/g'
72
73 REALCVS=`which cvs`
74 echo "for x in \$*; do if [ \"\$x\" = \"rlog\" ]; then echo \"RCS file: $CVSROOT/foo/foo.txt,v\"; cat $TESTDIR/test-convert-cvs-builtincvsps-cvsnt-mergepoints.rlog; exit 0; fi; done; $REALCVS \$*" > cvs
75 chmod +x cvs
76 PATH=.:${PATH} hg debugcvsps --parents foo | sed -e 's/Author:.*/Author:/' -e 's/Date:.*/Date:/'
77
78 cd ..
@@ -0,0 +1,138 b''
1 % create cvs repository
2 U cvsworktmp/CVSROOT/checkoutlist
3 U cvsworktmp/CVSROOT/commitinfo
4 U cvsworktmp/CVSROOT/config
5 U cvsworktmp/CVSROOT/cvswrappers
6 U cvsworktmp/CVSROOT/loginfo
7 U cvsworktmp/CVSROOT/modules
8 U cvsworktmp/CVSROOT/notify
9 U cvsworktmp/CVSROOT/postadmin
10 U cvsworktmp/CVSROOT/postproxy
11 U cvsworktmp/CVSROOT/posttag
12 U cvsworktmp/CVSROOT/postwatch
13 U cvsworktmp/CVSROOT/preproxy
14 U cvsworktmp/CVSROOT/rcsinfo
15 U cvsworktmp/CVSROOT/taginfo
16 U cvsworktmp/CVSROOT/verifymsg
17
18 cvs add: use `cvs commit' to add this file permanently
19 checking in
20 initial revision: 1.1
21 U cvswork/foo.txt
22 checking in
23 new revision: 1.1.2.1; previous revision: 1.1
24 checking in
25 new revision: 1.1.2.2; previous revision: 1.1.2.1
26 checking in
27 new revision: 1.1.2.2.2.1; previous revision: 1.1.2.2
28 U foo.txt
29 checking in
30 new revision: 1.1.4.1; previous revision: 1.1
31 rcsmerge: warning: conflicts during merge
32 merging MYBRANCH1
33 retrieving revision 1.1
34 retrieving revision 1.1.2.2
35 Merging differences between 1.1 and 1.1.2.2 into foo.txt
36 checking in
37 new revision: 1.1.4.2; previous revision: 1.1.4.1
38 U foo.txt
39 merging MYBRANCH1_2
40 retrieving revision 1.1
41 retrieving revision 1.1.2.2.2.1
42 Merging differences between 1.1 and 1.1.2.2.2.1 into foo.txt
43 checking in
44 new revision: 1.2; previous revision: 1.1
45 collecting CVS rlog
46 7 log entries
47 creating changesets
48 7 changeset entries
49 ---------------------
50 PatchSet 1
51 Date:
52 Author:
53 Branch: HEAD
54 Tag: (none)
55 Log:
56 foo.txt
57
58 Members:
59 foo.txt:INITIAL->1.1
60
61 ---------------------
62 PatchSet 2
63 Date:
64 Author:
65 Branch: MYBRANCH1
66 Tag: (none)
67 Parent: 1
68 Log:
69 bar
70
71 Members:
72 foo.txt:1.1->1.1.2.1
73
74 ---------------------
75 PatchSet 3
76 Date:
77 Author:
78 Branch: MYBRANCH1
79 Tag: (none)
80 Parent: 2
81 Log:
82 baz
83
84 Members:
85 foo.txt:1.1.2.1->1.1.2.2
86
87 ---------------------
88 PatchSet 4
89 Date:
90 Author:
91 Branch: MYBRANCH1_1
92 Tag: (none)
93 Parent: 1
94 Log:
95 quux
96
97 Members:
98 foo.txt:1.1->1.1.4.1
99
100 ---------------------
101 PatchSet 5
102 Date:
103 Author:
104 Branch: MYBRANCH1_2
105 Tag: (none)
106 Parent: 3
107 Log:
108 bazzie
109
110 Members:
111 foo.txt:1.1.2.2->1.1.2.2.2.1
112
113 ---------------------
114 PatchSet 6
115 Date:
116 Author:
117 Branch: HEAD
118 Tag: (none)
119 Parents: 1,5
120 Log:
121 merge
122
123 Members:
124 foo.txt:1.1->1.2
125
126 ---------------------
127 PatchSet 7
128 Date:
129 Author:
130 Branch: MYBRANCH1_1
131 Tag: (none)
132 Parents: 4,3
133 Log:
134 merge
135
136 Members:
137 foo.txt:1.1.4.1->1.1.4.2
138
@@ -0,0 +1,42 b''
1 head: 1.2
2 branch:
3 locks: strict
4 access list:
5 symbolic names:
6 MYBRANCH1_2: 1.1.2.2.0.2
7 MYBRANCH1_1: 1.1.0.4
8 MYBRANCH1: 1.1.0.2
9 keyword substitution: kv
10 total revisions: 8; selected revisions: 8
11 description:
12 ----------------------------
13 revision 1.2
14 date: 2009/04/02 07:00:32; author: user; state: Exp; lines: +1 -1; kopt: kv; commitid: 14d449d462903487; mergepoint: 1.1.2.2.2.1; filename: foo.txt;
15 merge
16 ----------------------------
17 revision 1.1
18 date: 2009/04/02 06:50:43; author: user; state: Exp; kopt: kv; commitid: 17ac49d460432d04; filename: foo.txt;
19 branches: 1.1.2; 1.1.4;
20 foo.txt
21 ----------------------------
22 revision 1.1.4.2
23 date: 2009/04/02 07:02:51; author: user; state: Exp; lines: +1 -0; kopt: kv; commitid: 170049d4631b364d; mergepoint: 1.1.2.2; filename: foo.txt;
24 merge
25 ----------------------------
26 revision 1.1.4.1
27 date: 2009/04/02 06:53:42; author: user; state: Exp; lines: +1 -1; kopt: kv; commitid: dc849d460f52f49; filename: foo.txt;
28 quux
29 ----------------------------
30 revision 1.1.2.2
31 date: 2009/04/02 06:53:20; author: user; state: Exp; lines: +1 -1; kopt: kv; commitid: 8ec49d460e02f04; filename: foo.txt;
32 branches: 1.1.2.2.2;
33 baz
34 ----------------------------
35 revision 1.1.2.1
36 date: 2009/04/02 06:52:38; author: user; state: Exp; lines: +1 -1; kopt: kv; commitid: d5049d460b62e7b; filename: foo.txt;
37 bar
38 ----------------------------
39 revision 1.1.2.2.2.1
40 date: 2009/04/02 06:55:42; author: user; state: Exp; lines: +1 -1; kopt: kv; commitid: 11c849d4616d30d1; filename: foo.txt;
41 bazzie
42 =============================================================================
@@ -1,741 +1,764 b''
1 #
1 #
2 # Mercurial built-in replacement for cvsps.
2 # Mercurial built-in replacement for cvsps.
3 #
3 #
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 import os
9 import os
10 import re
10 import re
11 import cPickle as pickle
11 import cPickle as pickle
12 from mercurial import util
12 from mercurial import util
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14
14
15 def listsort(list, key):
15 def listsort(list, key):
16 "helper to sort by key in Python 2.3"
16 "helper to sort by key in Python 2.3"
17 try:
17 try:
18 list.sort(key=key)
18 list.sort(key=key)
19 except TypeError:
19 except TypeError:
20 list.sort(lambda l, r: cmp(key(l), key(r)))
20 list.sort(lambda l, r: cmp(key(l), key(r)))
21
21
22 class logentry(object):
22 class logentry(object):
23 '''Class logentry has the following attributes:
23 '''Class logentry has the following attributes:
24 .author - author name as CVS knows it
24 .author - author name as CVS knows it
25 .branch - name of branch this revision is on
25 .branch - name of branch this revision is on
26 .branches - revision tuple of branches starting at this revision
26 .branches - revision tuple of branches starting at this revision
27 .comment - commit message
27 .comment - commit message
28 .date - the commit date as a (time, tz) tuple
28 .date - the commit date as a (time, tz) tuple
29 .dead - true if file revision is dead
29 .dead - true if file revision is dead
30 .file - Name of file
30 .file - Name of file
31 .lines - a tuple (+lines, -lines) or None
31 .lines - a tuple (+lines, -lines) or None
32 .parent - Previous revision of this entry
32 .parent - Previous revision of this entry
33 .rcs - name of file as returned from CVS
33 .rcs - name of file as returned from CVS
34 .revision - revision number as tuple
34 .revision - revision number as tuple
35 .tags - list of tags on the file
35 .tags - list of tags on the file
36 .synthetic - is this a synthetic "file ... added on ..." revision?
36 .synthetic - is this a synthetic "file ... added on ..." revision?
37 .mergepoint- the branch that has been merged from (if present in rlog output)
37 '''
38 '''
38 def __init__(self, **entries):
39 def __init__(self, **entries):
39 self.__dict__.update(entries)
40 self.__dict__.update(entries)
40
41
41 class logerror(Exception):
42 class logerror(Exception):
42 pass
43 pass
43
44
44 def getrepopath(cvspath):
45 def getrepopath(cvspath):
45 """Return the repository path from a CVS path.
46 """Return the repository path from a CVS path.
46
47
47 >>> getrepopath('/foo/bar')
48 >>> getrepopath('/foo/bar')
48 '/foo/bar'
49 '/foo/bar'
49 >>> getrepopath('c:/foo/bar')
50 >>> getrepopath('c:/foo/bar')
50 'c:/foo/bar'
51 'c:/foo/bar'
51 >>> getrepopath(':pserver:10/foo/bar')
52 >>> getrepopath(':pserver:10/foo/bar')
52 '/foo/bar'
53 '/foo/bar'
53 >>> getrepopath(':pserver:10c:/foo/bar')
54 >>> getrepopath(':pserver:10c:/foo/bar')
54 '/foo/bar'
55 '/foo/bar'
55 >>> getrepopath(':pserver:/foo/bar')
56 >>> getrepopath(':pserver:/foo/bar')
56 '/foo/bar'
57 '/foo/bar'
57 >>> getrepopath(':pserver:c:/foo/bar')
58 >>> getrepopath(':pserver:c:/foo/bar')
58 'c:/foo/bar'
59 'c:/foo/bar'
59 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
60 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
60 '/foo/bar'
61 '/foo/bar'
61 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
62 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
62 'c:/foo/bar'
63 'c:/foo/bar'
63 """
64 """
64 # According to CVS manual, CVS paths are expressed like:
65 # According to CVS manual, CVS paths are expressed like:
65 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
66 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
66 #
67 #
67 # Unfortunately, Windows absolute paths start with a drive letter
68 # Unfortunately, Windows absolute paths start with a drive letter
68 # like 'c:' making it harder to parse. Here we assume that drive
69 # like 'c:' making it harder to parse. Here we assume that drive
69 # letters are only one character long and any CVS component before
70 # letters are only one character long and any CVS component before
70 # the repository path is at least 2 characters long, and use this
71 # the repository path is at least 2 characters long, and use this
71 # to disambiguate.
72 # to disambiguate.
72 parts = cvspath.split(':')
73 parts = cvspath.split(':')
73 if len(parts) == 1:
74 if len(parts) == 1:
74 return parts[0]
75 return parts[0]
75 # Here there is an ambiguous case if we have a port number
76 # Here there is an ambiguous case if we have a port number
76 # immediately followed by a Windows driver letter. We assume this
77 # immediately followed by a Windows driver letter. We assume this
77 # never happens and decide it must be CVS path component,
78 # never happens and decide it must be CVS path component,
78 # therefore ignoring it.
79 # therefore ignoring it.
79 if len(parts[-2]) > 1:
80 if len(parts[-2]) > 1:
80 return parts[-1].lstrip('0123456789')
81 return parts[-1].lstrip('0123456789')
81 return parts[-2] + ':' + parts[-1]
82 return parts[-2] + ':' + parts[-1]
82
83
83 def createlog(ui, directory=None, root="", rlog=True, cache=None):
84 def createlog(ui, directory=None, root="", rlog=True, cache=None):
84 '''Collect the CVS rlog'''
85 '''Collect the CVS rlog'''
85
86
86 # Because we store many duplicate commit log messages, reusing strings
87 # Because we store many duplicate commit log messages, reusing strings
87 # saves a lot of memory and pickle storage space.
88 # saves a lot of memory and pickle storage space.
88 _scache = {}
89 _scache = {}
89 def scache(s):
90 def scache(s):
90 "return a shared version of a string"
91 "return a shared version of a string"
91 return _scache.setdefault(s, s)
92 return _scache.setdefault(s, s)
92
93
93 ui.status(_('collecting CVS rlog\n'))
94 ui.status(_('collecting CVS rlog\n'))
94
95
95 log = [] # list of logentry objects containing the CVS state
96 log = [] # list of logentry objects containing the CVS state
96
97
97 # patterns to match in CVS (r)log output, by state of use
98 # patterns to match in CVS (r)log output, by state of use
98 re_00 = re.compile('RCS file: (.+)$')
99 re_00 = re.compile('RCS file: (.+)$')
99 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
100 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
100 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
101 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
101 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
102 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
102 re_10 = re.compile('Working file: (.+)$')
103 re_10 = re.compile('Working file: (.+)$')
103 re_20 = re.compile('symbolic names:')
104 re_20 = re.compile('symbolic names:')
104 re_30 = re.compile('\t(.+): ([\\d.]+)$')
105 re_30 = re.compile('\t(.+): ([\\d.]+)$')
105 re_31 = re.compile('----------------------------$')
106 re_31 = re.compile('----------------------------$')
106 re_32 = re.compile('=============================================================================$')
107 re_32 = re.compile('=============================================================================$')
107 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
108 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
108 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?')
109 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?(.*mergepoint:\s+([^;]+);)?')
109 re_70 = re.compile('branches: (.+);$')
110 re_70 = re.compile('branches: (.+);$')
110
111
111 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
112 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
112
113
113 prefix = '' # leading path to strip of what we get from CVS
114 prefix = '' # leading path to strip of what we get from CVS
114
115
115 if directory is None:
116 if directory is None:
116 # Current working directory
117 # Current working directory
117
118
118 # Get the real directory in the repository
119 # Get the real directory in the repository
119 try:
120 try:
120 prefix = file(os.path.join('CVS','Repository')).read().strip()
121 prefix = file(os.path.join('CVS','Repository')).read().strip()
121 if prefix == ".":
122 if prefix == ".":
122 prefix = ""
123 prefix = ""
123 directory = prefix
124 directory = prefix
124 except IOError:
125 except IOError:
125 raise logerror('Not a CVS sandbox')
126 raise logerror('Not a CVS sandbox')
126
127
127 if prefix and not prefix.endswith(os.sep):
128 if prefix and not prefix.endswith(os.sep):
128 prefix += os.sep
129 prefix += os.sep
129
130
130 # Use the Root file in the sandbox, if it exists
131 # Use the Root file in the sandbox, if it exists
131 try:
132 try:
132 root = file(os.path.join('CVS','Root')).read().strip()
133 root = file(os.path.join('CVS','Root')).read().strip()
133 except IOError:
134 except IOError:
134 pass
135 pass
135
136
136 if not root:
137 if not root:
137 root = os.environ.get('CVSROOT', '')
138 root = os.environ.get('CVSROOT', '')
138
139
139 # read log cache if one exists
140 # read log cache if one exists
140 oldlog = []
141 oldlog = []
141 date = None
142 date = None
142
143
143 if cache:
144 if cache:
144 cachedir = os.path.expanduser('~/.hg.cvsps')
145 cachedir = os.path.expanduser('~/.hg.cvsps')
145 if not os.path.exists(cachedir):
146 if not os.path.exists(cachedir):
146 os.mkdir(cachedir)
147 os.mkdir(cachedir)
147
148
148 # The cvsps cache pickle needs a uniquified name, based on the
149 # The cvsps cache pickle needs a uniquified name, based on the
149 # repository location. The address may have all sort of nasties
150 # repository location. The address may have all sort of nasties
150 # in it, slashes, colons and such. So here we take just the
151 # in it, slashes, colons and such. So here we take just the
151 # alphanumerics, concatenated in a way that does not mix up the
152 # alphanumerics, concatenated in a way that does not mix up the
152 # various components, so that
153 # various components, so that
153 # :pserver:user@server:/path
154 # :pserver:user@server:/path
154 # and
155 # and
155 # /pserver/user/server/path
156 # /pserver/user/server/path
156 # are mapped to different cache file names.
157 # are mapped to different cache file names.
157 cachefile = root.split(":") + [directory, "cache"]
158 cachefile = root.split(":") + [directory, "cache"]
158 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
159 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
159 cachefile = os.path.join(cachedir,
160 cachefile = os.path.join(cachedir,
160 '.'.join([s for s in cachefile if s]))
161 '.'.join([s for s in cachefile if s]))
161
162
162 if cache == 'update':
163 if cache == 'update':
163 try:
164 try:
164 ui.note(_('reading cvs log cache %s\n') % cachefile)
165 ui.note(_('reading cvs log cache %s\n') % cachefile)
165 oldlog = pickle.load(file(cachefile))
166 oldlog = pickle.load(file(cachefile))
166 ui.note(_('cache has %d log entries\n') % len(oldlog))
167 ui.note(_('cache has %d log entries\n') % len(oldlog))
167 except Exception, e:
168 except Exception, e:
168 ui.note(_('error reading cache: %r\n') % e)
169 ui.note(_('error reading cache: %r\n') % e)
169
170
170 if oldlog:
171 if oldlog:
171 date = oldlog[-1].date # last commit date as a (time,tz) tuple
172 date = oldlog[-1].date # last commit date as a (time,tz) tuple
172 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
173 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
173
174
174 # build the CVS commandline
175 # build the CVS commandline
175 cmd = ['cvs', '-q']
176 cmd = ['cvs', '-q']
176 if root:
177 if root:
177 cmd.append('-d%s' % root)
178 cmd.append('-d%s' % root)
178 p = util.normpath(getrepopath(root))
179 p = util.normpath(getrepopath(root))
179 if not p.endswith('/'):
180 if not p.endswith('/'):
180 p += '/'
181 p += '/'
181 prefix = p + util.normpath(prefix)
182 prefix = p + util.normpath(prefix)
182 cmd.append(['log', 'rlog'][rlog])
183 cmd.append(['log', 'rlog'][rlog])
183 if date:
184 if date:
184 # no space between option and date string
185 # no space between option and date string
185 cmd.append('-d>%s' % date)
186 cmd.append('-d>%s' % date)
186 cmd.append(directory)
187 cmd.append(directory)
187
188
188 # state machine begins here
189 # state machine begins here
189 tags = {} # dictionary of revisions on current file with their tags
190 tags = {} # dictionary of revisions on current file with their tags
191 branchmap = {} # mapping between branch names and revision numbers
190 state = 0
192 state = 0
191 store = False # set when a new record can be appended
193 store = False # set when a new record can be appended
192
194
193 cmd = [util.shellquote(arg) for arg in cmd]
195 cmd = [util.shellquote(arg) for arg in cmd]
194 ui.note(_("running %s\n") % (' '.join(cmd)))
196 ui.note(_("running %s\n") % (' '.join(cmd)))
195 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
197 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
196
198
197 pfp = util.popen(' '.join(cmd))
199 pfp = util.popen(' '.join(cmd))
198 peek = pfp.readline()
200 peek = pfp.readline()
199 while True:
201 while True:
200 line = peek
202 line = peek
201 if line == '':
203 if line == '':
202 break
204 break
203 peek = pfp.readline()
205 peek = pfp.readline()
204 if line.endswith('\n'):
206 if line.endswith('\n'):
205 line = line[:-1]
207 line = line[:-1]
206 #ui.debug('state=%d line=%r\n' % (state, line))
208 #ui.debug('state=%d line=%r\n' % (state, line))
207
209
208 if state == 0:
210 if state == 0:
209 # initial state, consume input until we see 'RCS file'
211 # initial state, consume input until we see 'RCS file'
210 match = re_00.match(line)
212 match = re_00.match(line)
211 if match:
213 if match:
212 rcs = match.group(1)
214 rcs = match.group(1)
213 tags = {}
215 tags = {}
214 if rlog:
216 if rlog:
215 filename = util.normpath(rcs[:-2])
217 filename = util.normpath(rcs[:-2])
216 if filename.startswith(prefix):
218 if filename.startswith(prefix):
217 filename = filename[len(prefix):]
219 filename = filename[len(prefix):]
218 if filename.startswith('/'):
220 if filename.startswith('/'):
219 filename = filename[1:]
221 filename = filename[1:]
220 if filename.startswith('Attic/'):
222 if filename.startswith('Attic/'):
221 filename = filename[6:]
223 filename = filename[6:]
222 else:
224 else:
223 filename = filename.replace('/Attic/', '/')
225 filename = filename.replace('/Attic/', '/')
224 state = 2
226 state = 2
225 continue
227 continue
226 state = 1
228 state = 1
227 continue
229 continue
228 match = re_01.match(line)
230 match = re_01.match(line)
229 if match:
231 if match:
230 raise Exception(match.group(1))
232 raise Exception(match.group(1))
231 match = re_02.match(line)
233 match = re_02.match(line)
232 if match:
234 if match:
233 raise Exception(match.group(2))
235 raise Exception(match.group(2))
234 if re_03.match(line):
236 if re_03.match(line):
235 raise Exception(line)
237 raise Exception(line)
236
238
237 elif state == 1:
239 elif state == 1:
238 # expect 'Working file' (only when using log instead of rlog)
240 # expect 'Working file' (only when using log instead of rlog)
239 match = re_10.match(line)
241 match = re_10.match(line)
240 assert match, _('RCS file must be followed by working file')
242 assert match, _('RCS file must be followed by working file')
241 filename = util.normpath(match.group(1))
243 filename = util.normpath(match.group(1))
242 state = 2
244 state = 2
243
245
244 elif state == 2:
246 elif state == 2:
245 # expect 'symbolic names'
247 # expect 'symbolic names'
246 if re_20.match(line):
248 if re_20.match(line):
249 branchmap = {}
247 state = 3
250 state = 3
248
251
249 elif state == 3:
252 elif state == 3:
250 # read the symbolic names and store as tags
253 # read the symbolic names and store as tags
251 match = re_30.match(line)
254 match = re_30.match(line)
252 if match:
255 if match:
253 rev = [int(x) for x in match.group(2).split('.')]
256 rev = [int(x) for x in match.group(2).split('.')]
254
257
255 # Convert magic branch number to an odd-numbered one
258 # Convert magic branch number to an odd-numbered one
256 revn = len(rev)
259 revn = len(rev)
257 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
260 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
258 rev = rev[:-2] + rev[-1:]
261 rev = rev[:-2] + rev[-1:]
259 rev = tuple(rev)
262 rev = tuple(rev)
260
263
261 if rev not in tags:
264 if rev not in tags:
262 tags[rev] = []
265 tags[rev] = []
263 tags[rev].append(match.group(1))
266 tags[rev].append(match.group(1))
267 branchmap[match.group(1)] = match.group(2)
264
268
265 elif re_31.match(line):
269 elif re_31.match(line):
266 state = 5
270 state = 5
267 elif re_32.match(line):
271 elif re_32.match(line):
268 state = 0
272 state = 0
269
273
270 elif state == 4:
274 elif state == 4:
271 # expecting '------' separator before first revision
275 # expecting '------' separator before first revision
272 if re_31.match(line):
276 if re_31.match(line):
273 state = 5
277 state = 5
274 else:
278 else:
275 assert not re_32.match(line), _('must have at least some revisions')
279 assert not re_32.match(line), _('must have at least some revisions')
276
280
277 elif state == 5:
281 elif state == 5:
278 # expecting revision number and possibly (ignored) lock indication
282 # expecting revision number and possibly (ignored) lock indication
279 # we create the logentry here from values stored in states 0 to 4,
283 # we create the logentry here from values stored in states 0 to 4,
280 # as this state is re-entered for subsequent revisions of a file.
284 # as this state is re-entered for subsequent revisions of a file.
281 match = re_50.match(line)
285 match = re_50.match(line)
282 assert match, _('expected revision number')
286 assert match, _('expected revision number')
283 e = logentry(rcs=scache(rcs), file=scache(filename),
287 e = logentry(rcs=scache(rcs), file=scache(filename),
284 revision=tuple([int(x) for x in match.group(1).split('.')]),
288 revision=tuple([int(x) for x in match.group(1).split('.')]),
285 branches=[], parent=None,
289 branches=[], parent=None,
286 synthetic=False)
290 synthetic=False)
287 state = 6
291 state = 6
288
292
289 elif state == 6:
293 elif state == 6:
290 # expecting date, author, state, lines changed
294 # expecting date, author, state, lines changed
291 match = re_60.match(line)
295 match = re_60.match(line)
292 assert match, _('revision must be followed by date line')
296 assert match, _('revision must be followed by date line')
293 d = match.group(1)
297 d = match.group(1)
294 if d[2] == '/':
298 if d[2] == '/':
295 # Y2K
299 # Y2K
296 d = '19' + d
300 d = '19' + d
297
301
298 if len(d.split()) != 3:
302 if len(d.split()) != 3:
299 # cvs log dates always in GMT
303 # cvs log dates always in GMT
300 d = d + ' UTC'
304 d = d + ' UTC'
301 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
305 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
302 e.author = scache(match.group(2))
306 e.author = scache(match.group(2))
303 e.dead = match.group(3).lower() == 'dead'
307 e.dead = match.group(3).lower() == 'dead'
304
308
305 if match.group(5):
309 if match.group(5):
306 if match.group(6):
310 if match.group(6):
307 e.lines = (int(match.group(5)), int(match.group(6)))
311 e.lines = (int(match.group(5)), int(match.group(6)))
308 else:
312 else:
309 e.lines = (int(match.group(5)), 0)
313 e.lines = (int(match.group(5)), 0)
310 elif match.group(6):
314 elif match.group(6):
311 e.lines = (0, int(match.group(6)))
315 e.lines = (0, int(match.group(6)))
312 else:
316 else:
313 e.lines = None
317 e.lines = None
318
319 if match.group(7): # cvsnt mergepoint
320 myrev = match.group(8).split('.')
321 if len(myrev) == 2: # head
322 e.mergepoint = 'HEAD'
323 else:
324 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
325 branches = [b for b in branchmap if branchmap[b] == myrev]
326 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
327 e.mergepoint = branches[0]
328 else:
329 e.mergepoint = None
314 e.comment = []
330 e.comment = []
315 state = 7
331 state = 7
316
332
317 elif state == 7:
333 elif state == 7:
318 # read the revision numbers of branches that start at this revision
334 # read the revision numbers of branches that start at this revision
319 # or store the commit log message otherwise
335 # or store the commit log message otherwise
320 m = re_70.match(line)
336 m = re_70.match(line)
321 if m:
337 if m:
322 e.branches = [tuple([int(y) for y in x.strip().split('.')])
338 e.branches = [tuple([int(y) for y in x.strip().split('.')])
323 for x in m.group(1).split(';')]
339 for x in m.group(1).split(';')]
324 state = 8
340 state = 8
325 elif re_31.match(line) and re_50.match(peek):
341 elif re_31.match(line) and re_50.match(peek):
326 state = 5
342 state = 5
327 store = True
343 store = True
328 elif re_32.match(line):
344 elif re_32.match(line):
329 state = 0
345 state = 0
330 store = True
346 store = True
331 else:
347 else:
332 e.comment.append(line)
348 e.comment.append(line)
333
349
334 elif state == 8:
350 elif state == 8:
335 # store commit log message
351 # store commit log message
336 if re_31.match(line):
352 if re_31.match(line):
337 state = 5
353 state = 5
338 store = True
354 store = True
339 elif re_32.match(line):
355 elif re_32.match(line):
340 state = 0
356 state = 0
341 store = True
357 store = True
342 else:
358 else:
343 e.comment.append(line)
359 e.comment.append(line)
344
360
345 # When a file is added on a branch B1, CVS creates a synthetic
361 # When a file is added on a branch B1, CVS creates a synthetic
346 # dead trunk revision 1.1 so that the branch has a root.
362 # dead trunk revision 1.1 so that the branch has a root.
347 # Likewise, if you merge such a file to a later branch B2 (one
363 # Likewise, if you merge such a file to a later branch B2 (one
348 # that already existed when the file was added on B1), CVS
364 # that already existed when the file was added on B1), CVS
349 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
365 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
350 # these revisions now, but mark them synthetic so
366 # these revisions now, but mark them synthetic so
351 # createchangeset() can take care of them.
367 # createchangeset() can take care of them.
352 if (store and
368 if (store and
353 e.dead and
369 e.dead and
354 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
370 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
355 len(e.comment) == 1 and
371 len(e.comment) == 1 and
356 file_added_re.match(e.comment[0])):
372 file_added_re.match(e.comment[0])):
357 ui.debug(_('found synthetic rev in %s: %r\n')
373 ui.debug(_('found synthetic rev in %s: %r\n')
358 % (e.rcs, e.comment[0]))
374 % (e.rcs, e.comment[0]))
359 e.synthetic = True
375 e.synthetic = True
360
376
361 if store:
377 if store:
362 # clean up the results and save in the log.
378 # clean up the results and save in the log.
363 store = False
379 store = False
364 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
380 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
365 e.comment = scache('\n'.join(e.comment))
381 e.comment = scache('\n'.join(e.comment))
366
382
367 revn = len(e.revision)
383 revn = len(e.revision)
368 if revn > 3 and (revn % 2) == 0:
384 if revn > 3 and (revn % 2) == 0:
369 e.branch = tags.get(e.revision[:-1], [None])[0]
385 e.branch = tags.get(e.revision[:-1], [None])[0]
370 else:
386 else:
371 e.branch = None
387 e.branch = None
372
388
373 log.append(e)
389 log.append(e)
374
390
375 if len(log) % 100 == 0:
391 if len(log) % 100 == 0:
376 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
392 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
377
393
378 listsort(log, key=lambda x:(x.rcs, x.revision))
394 listsort(log, key=lambda x:(x.rcs, x.revision))
379
395
380 # find parent revisions of individual files
396 # find parent revisions of individual files
381 versions = {}
397 versions = {}
382 for e in log:
398 for e in log:
383 branch = e.revision[:-1]
399 branch = e.revision[:-1]
384 p = versions.get((e.rcs, branch), None)
400 p = versions.get((e.rcs, branch), None)
385 if p is None:
401 if p is None:
386 p = e.revision[:-2]
402 p = e.revision[:-2]
387 e.parent = p
403 e.parent = p
388 versions[(e.rcs, branch)] = e.revision
404 versions[(e.rcs, branch)] = e.revision
389
405
390 # update the log cache
406 # update the log cache
391 if cache:
407 if cache:
392 if log:
408 if log:
393 # join up the old and new logs
409 # join up the old and new logs
394 listsort(log, key=lambda x:x.date)
410 listsort(log, key=lambda x:x.date)
395
411
396 if oldlog and oldlog[-1].date >= log[0].date:
412 if oldlog and oldlog[-1].date >= log[0].date:
397 raise logerror('Log cache overlaps with new log entries,'
413 raise logerror('Log cache overlaps with new log entries,'
398 ' re-run without cache.')
414 ' re-run without cache.')
399
415
400 log = oldlog + log
416 log = oldlog + log
401
417
402 # write the new cachefile
418 # write the new cachefile
403 ui.note(_('writing cvs log cache %s\n') % cachefile)
419 ui.note(_('writing cvs log cache %s\n') % cachefile)
404 pickle.dump(log, file(cachefile, 'w'))
420 pickle.dump(log, file(cachefile, 'w'))
405 else:
421 else:
406 log = oldlog
422 log = oldlog
407
423
408 ui.status(_('%d log entries\n') % len(log))
424 ui.status(_('%d log entries\n') % len(log))
409
425
410 return log
426 return log
411
427
412
428
413 class changeset(object):
429 class changeset(object):
414 '''Class changeset has the following attributes:
430 '''Class changeset has the following attributes:
415 .author - author name as CVS knows it
431 .author - author name as CVS knows it
416 .branch - name of branch this changeset is on, or None
432 .branch - name of branch this changeset is on, or None
417 .comment - commit message
433 .comment - commit message
418 .date - the commit date as a (time,tz) tuple
434 .date - the commit date as a (time,tz) tuple
419 .entries - list of logentry objects in this changeset
435 .entries - list of logentry objects in this changeset
420 .parents - list of one or two parent changesets
436 .parents - list of one or two parent changesets
421 .tags - list of tags on this changeset
437 .tags - list of tags on this changeset
422 .synthetic - from synthetic revision "file ... added on branch ..."
438 .synthetic - from synthetic revision "file ... added on branch ..."
439 .mergepoint- the branch that has been merged from (if present in rlog output)
423 '''
440 '''
424 def __init__(self, **entries):
441 def __init__(self, **entries):
425 self.__dict__.update(entries)
442 self.__dict__.update(entries)
426
443
427 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
444 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
428 '''Convert log into changesets.'''
445 '''Convert log into changesets.'''
429
446
430 ui.status(_('creating changesets\n'))
447 ui.status(_('creating changesets\n'))
431
448
432 # Merge changesets
449 # Merge changesets
433
450
434 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
451 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
435
452
436 changesets = []
453 changesets = []
437 files = {}
454 files = {}
438 c = None
455 c = None
439 for i, e in enumerate(log):
456 for i, e in enumerate(log):
440
457
441 # Check if log entry belongs to the current changeset or not.
458 # Check if log entry belongs to the current changeset or not.
442 if not (c and
459 if not (c and
443 e.comment == c.comment and
460 e.comment == c.comment and
444 e.author == c.author and
461 e.author == c.author and
445 e.branch == c.branch and
462 e.branch == c.branch and
446 ((c.date[0] + c.date[1]) <=
463 ((c.date[0] + c.date[1]) <=
447 (e.date[0] + e.date[1]) <=
464 (e.date[0] + e.date[1]) <=
448 (c.date[0] + c.date[1]) + fuzz) and
465 (c.date[0] + c.date[1]) + fuzz) and
449 e.file not in files):
466 e.file not in files):
450 c = changeset(comment=e.comment, author=e.author,
467 c = changeset(comment=e.comment, author=e.author,
451 branch=e.branch, date=e.date, entries=[])
468 branch=e.branch, date=e.date, entries=[],
469 mergepoint=e.mergepoint)
452 changesets.append(c)
470 changesets.append(c)
453 files = {}
471 files = {}
454 if len(changesets) % 100 == 0:
472 if len(changesets) % 100 == 0:
455 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
473 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
456 ui.status(util.ellipsis(t, 80) + '\n')
474 ui.status(util.ellipsis(t, 80) + '\n')
457
475
458 c.entries.append(e)
476 c.entries.append(e)
459 files[e.file] = True
477 files[e.file] = True
460 c.date = e.date # changeset date is date of latest commit in it
478 c.date = e.date # changeset date is date of latest commit in it
461
479
462 # Mark synthetic changesets
480 # Mark synthetic changesets
463
481
464 for c in changesets:
482 for c in changesets:
465 # Synthetic revisions always get their own changeset, because
483 # Synthetic revisions always get their own changeset, because
466 # the log message includes the filename. E.g. if you add file3
484 # the log message includes the filename. E.g. if you add file3
467 # and file4 on a branch, you get four log entries and three
485 # and file4 on a branch, you get four log entries and three
468 # changesets:
486 # changesets:
469 # "File file3 was added on branch ..." (synthetic, 1 entry)
487 # "File file3 was added on branch ..." (synthetic, 1 entry)
470 # "File file4 was added on branch ..." (synthetic, 1 entry)
488 # "File file4 was added on branch ..." (synthetic, 1 entry)
471 # "Add file3 and file4 to fix ..." (real, 2 entries)
489 # "Add file3 and file4 to fix ..." (real, 2 entries)
472 # Hence the check for 1 entry here.
490 # Hence the check for 1 entry here.
473 c.synthetic = (len(c.entries) == 1 and c.entries[0].synthetic)
491 c.synthetic = (len(c.entries) == 1 and c.entries[0].synthetic)
474
492
475 # Sort files in each changeset
493 # Sort files in each changeset
476
494
477 for c in changesets:
495 for c in changesets:
478 def pathcompare(l, r):
496 def pathcompare(l, r):
479 'Mimic cvsps sorting order'
497 'Mimic cvsps sorting order'
480 l = l.split('/')
498 l = l.split('/')
481 r = r.split('/')
499 r = r.split('/')
482 nl = len(l)
500 nl = len(l)
483 nr = len(r)
501 nr = len(r)
484 n = min(nl, nr)
502 n = min(nl, nr)
485 for i in range(n):
503 for i in range(n):
486 if i + 1 == nl and nl < nr:
504 if i + 1 == nl and nl < nr:
487 return -1
505 return -1
488 elif i + 1 == nr and nl > nr:
506 elif i + 1 == nr and nl > nr:
489 return +1
507 return +1
490 elif l[i] < r[i]:
508 elif l[i] < r[i]:
491 return -1
509 return -1
492 elif l[i] > r[i]:
510 elif l[i] > r[i]:
493 return +1
511 return +1
494 return 0
512 return 0
495 def entitycompare(l, r):
513 def entitycompare(l, r):
496 return pathcompare(l.file, r.file)
514 return pathcompare(l.file, r.file)
497
515
498 c.entries.sort(entitycompare)
516 c.entries.sort(entitycompare)
499
517
500 # Sort changesets by date
518 # Sort changesets by date
501
519
502 def cscmp(l, r):
520 def cscmp(l, r):
503 d = sum(l.date) - sum(r.date)
521 d = sum(l.date) - sum(r.date)
504 if d:
522 if d:
505 return d
523 return d
506
524
507 # detect vendor branches and initial commits on a branch
525 # detect vendor branches and initial commits on a branch
508 le = {}
526 le = {}
509 for e in l.entries:
527 for e in l.entries:
510 le[e.rcs] = e.revision
528 le[e.rcs] = e.revision
511 re = {}
529 re = {}
512 for e in r.entries:
530 for e in r.entries:
513 re[e.rcs] = e.revision
531 re[e.rcs] = e.revision
514
532
515 d = 0
533 d = 0
516 for e in l.entries:
534 for e in l.entries:
517 if re.get(e.rcs, None) == e.parent:
535 if re.get(e.rcs, None) == e.parent:
518 assert not d
536 assert not d
519 d = 1
537 d = 1
520 break
538 break
521
539
522 for e in r.entries:
540 for e in r.entries:
523 if le.get(e.rcs, None) == e.parent:
541 if le.get(e.rcs, None) == e.parent:
524 assert not d
542 assert not d
525 d = -1
543 d = -1
526 break
544 break
527
545
528 return d
546 return d
529
547
530 changesets.sort(cscmp)
548 changesets.sort(cscmp)
531
549
532 # Collect tags
550 # Collect tags
533
551
534 globaltags = {}
552 globaltags = {}
535 for c in changesets:
553 for c in changesets:
536 tags = {}
554 tags = {}
537 for e in c.entries:
555 for e in c.entries:
538 for tag in e.tags:
556 for tag in e.tags:
539 # remember which is the latest changeset to have this tag
557 # remember which is the latest changeset to have this tag
540 globaltags[tag] = c
558 globaltags[tag] = c
541
559
542 for c in changesets:
560 for c in changesets:
543 tags = {}
561 tags = {}
544 for e in c.entries:
562 for e in c.entries:
545 for tag in e.tags:
563 for tag in e.tags:
546 tags[tag] = True
564 tags[tag] = True
547 # remember tags only if this is the latest changeset to have it
565 # remember tags only if this is the latest changeset to have it
548 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
566 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
549
567
550 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
568 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
551 # by inserting dummy changesets with two parents, and handle
569 # by inserting dummy changesets with two parents, and handle
552 # {{mergefrombranch BRANCHNAME}} by setting two parents.
570 # {{mergefrombranch BRANCHNAME}} by setting two parents.
553
571
554 if mergeto is None:
572 if mergeto is None:
555 mergeto = r'{{mergetobranch ([-\w]+)}}'
573 mergeto = r'{{mergetobranch ([-\w]+)}}'
556 if mergeto:
574 if mergeto:
557 mergeto = re.compile(mergeto)
575 mergeto = re.compile(mergeto)
558
576
559 if mergefrom is None:
577 if mergefrom is None:
560 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
578 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
561 if mergefrom:
579 if mergefrom:
562 mergefrom = re.compile(mergefrom)
580 mergefrom = re.compile(mergefrom)
563
581
564 versions = {} # changeset index where we saw any particular file version
582 versions = {} # changeset index where we saw any particular file version
565 branches = {} # changeset index where we saw a branch
583 branches = {} # changeset index where we saw a branch
566 n = len(changesets)
584 n = len(changesets)
567 i = 0
585 i = 0
568 while i<n:
586 while i<n:
569 c = changesets[i]
587 c = changesets[i]
570
588
571 for f in c.entries:
589 for f in c.entries:
572 versions[(f.rcs, f.revision)] = i
590 versions[(f.rcs, f.revision)] = i
573
591
574 p = None
592 p = None
575 if c.branch in branches:
593 if c.branch in branches:
576 p = branches[c.branch]
594 p = branches[c.branch]
577 else:
595 else:
578 for f in c.entries:
596 for f in c.entries:
579 p = max(p, versions.get((f.rcs, f.parent), None))
597 p = max(p, versions.get((f.rcs, f.parent), None))
580
598
581 c.parents = []
599 c.parents = []
582 if p is not None:
600 if p is not None:
583 p = changesets[p]
601 p = changesets[p]
584
602
585 # Ensure no changeset has a synthetic changeset as a parent.
603 # Ensure no changeset has a synthetic changeset as a parent.
586 while p.synthetic:
604 while p.synthetic:
587 assert len(p.parents) <= 1, \
605 assert len(p.parents) <= 1, \
588 _('synthetic changeset cannot have multiple parents')
606 _('synthetic changeset cannot have multiple parents')
589 if p.parents:
607 if p.parents:
590 p = p.parents[0]
608 p = p.parents[0]
591 else:
609 else:
592 p = None
610 p = None
593 break
611 break
594
612
595 if p is not None:
613 if p is not None:
596 c.parents.append(p)
614 c.parents.append(p)
597
615
616 if c.mergepoint:
617 if c.mergepoint == 'HEAD':
618 c.mergepoint = None
619 c.parents.append(changesets[branches[c.mergepoint]])
620
598 if mergefrom:
621 if mergefrom:
599 m = mergefrom.search(c.comment)
622 m = mergefrom.search(c.comment)
600 if m:
623 if m:
601 m = m.group(1)
624 m = m.group(1)
602 if m == 'HEAD':
625 if m == 'HEAD':
603 m = None
626 m = None
604 candidate = changesets[branches[m]]
627 candidate = changesets[branches[m]]
605 if m in branches and c.branch != m and not candidate.synthetic:
628 if m in branches and c.branch != m and not candidate.synthetic:
606 c.parents.append(candidate)
629 c.parents.append(candidate)
607
630
608 if mergeto:
631 if mergeto:
609 m = mergeto.search(c.comment)
632 m = mergeto.search(c.comment)
610 if m:
633 if m:
611 try:
634 try:
612 m = m.group(1)
635 m = m.group(1)
613 if m == 'HEAD':
636 if m == 'HEAD':
614 m = None
637 m = None
615 except:
638 except:
616 m = None # if no group found then merge to HEAD
639 m = None # if no group found then merge to HEAD
617 if m in branches and c.branch != m:
640 if m in branches and c.branch != m:
618 # insert empty changeset for merge
641 # insert empty changeset for merge
619 cc = changeset(author=c.author, branch=m, date=c.date,
642 cc = changeset(author=c.author, branch=m, date=c.date,
620 comment='convert-repo: CVS merge from branch %s' % c.branch,
643 comment='convert-repo: CVS merge from branch %s' % c.branch,
621 entries=[], tags=[], parents=[changesets[branches[m]], c])
644 entries=[], tags=[], parents=[changesets[branches[m]], c])
622 changesets.insert(i + 1, cc)
645 changesets.insert(i + 1, cc)
623 branches[m] = i + 1
646 branches[m] = i + 1
624
647
625 # adjust our loop counters now we have inserted a new entry
648 # adjust our loop counters now we have inserted a new entry
626 n += 1
649 n += 1
627 i += 2
650 i += 2
628 continue
651 continue
629
652
630 branches[c.branch] = i
653 branches[c.branch] = i
631 i += 1
654 i += 1
632
655
633 # Drop synthetic changesets (safe now that we have ensured no other
656 # Drop synthetic changesets (safe now that we have ensured no other
634 # changesets can have them as parents).
657 # changesets can have them as parents).
635 i = 0
658 i = 0
636 while i < len(changesets):
659 while i < len(changesets):
637 if changesets[i].synthetic:
660 if changesets[i].synthetic:
638 del changesets[i]
661 del changesets[i]
639 else:
662 else:
640 i += 1
663 i += 1
641
664
642 # Number changesets
665 # Number changesets
643
666
644 for i, c in enumerate(changesets):
667 for i, c in enumerate(changesets):
645 c.id = i + 1
668 c.id = i + 1
646
669
647 ui.status(_('%d changeset entries\n') % len(changesets))
670 ui.status(_('%d changeset entries\n') % len(changesets))
648
671
649 return changesets
672 return changesets
650
673
651
674
652 def debugcvsps(ui, *args, **opts):
675 def debugcvsps(ui, *args, **opts):
653 '''Read CVS rlog for current directory or named path in repository, and
676 '''Read CVS rlog for current directory or named path in repository, and
654 convert the log to changesets based on matching commit log entries and dates.'''
677 convert the log to changesets based on matching commit log entries and dates.'''
655
678
656 if opts["new_cache"]:
679 if opts["new_cache"]:
657 cache = "write"
680 cache = "write"
658 elif opts["update_cache"]:
681 elif opts["update_cache"]:
659 cache = "update"
682 cache = "update"
660 else:
683 else:
661 cache = None
684 cache = None
662
685
663 revisions = opts["revisions"]
686 revisions = opts["revisions"]
664
687
665 try:
688 try:
666 if args:
689 if args:
667 log = []
690 log = []
668 for d in args:
691 for d in args:
669 log += createlog(ui, d, root=opts["root"], cache=cache)
692 log += createlog(ui, d, root=opts["root"], cache=cache)
670 else:
693 else:
671 log = createlog(ui, root=opts["root"], cache=cache)
694 log = createlog(ui, root=opts["root"], cache=cache)
672 except logerror, e:
695 except logerror, e:
673 ui.write("%r\n"%e)
696 ui.write("%r\n"%e)
674 return
697 return
675
698
676 changesets = createchangeset(ui, log, opts["fuzz"])
699 changesets = createchangeset(ui, log, opts["fuzz"])
677 del log
700 del log
678
701
679 # Print changesets (optionally filtered)
702 # Print changesets (optionally filtered)
680
703
681 off = len(revisions)
704 off = len(revisions)
682 branches = {} # latest version number in each branch
705 branches = {} # latest version number in each branch
683 ancestors = {} # parent branch
706 ancestors = {} # parent branch
684 for cs in changesets:
707 for cs in changesets:
685
708
686 if opts["ancestors"]:
709 if opts["ancestors"]:
687 if cs.branch not in branches and cs.parents and cs.parents[0].id:
710 if cs.branch not in branches and cs.parents and cs.parents[0].id:
688 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
711 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
689 branches[cs.branch] = cs.id
712 branches[cs.branch] = cs.id
690
713
691 # limit by branches
714 # limit by branches
692 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
715 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
693 continue
716 continue
694
717
695 if not off:
718 if not off:
696 # Note: trailing spaces on several lines here are needed to have
719 # Note: trailing spaces on several lines here are needed to have
697 # bug-for-bug compatibility with cvsps.
720 # bug-for-bug compatibility with cvsps.
698 ui.write('---------------------\n')
721 ui.write('---------------------\n')
699 ui.write('PatchSet %d \n' % cs.id)
722 ui.write('PatchSet %d \n' % cs.id)
700 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
723 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
701 ui.write('Author: %s\n' % cs.author)
724 ui.write('Author: %s\n' % cs.author)
702 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
725 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
703 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
726 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
704 ','.join(cs.tags) or '(none)'))
727 ','.join(cs.tags) or '(none)'))
705 if opts["parents"] and cs.parents:
728 if opts["parents"] and cs.parents:
706 if len(cs.parents)>1:
729 if len(cs.parents)>1:
707 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
730 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
708 else:
731 else:
709 ui.write('Parent: %d\n' % cs.parents[0].id)
732 ui.write('Parent: %d\n' % cs.parents[0].id)
710
733
711 if opts["ancestors"]:
734 if opts["ancestors"]:
712 b = cs.branch
735 b = cs.branch
713 r = []
736 r = []
714 while b:
737 while b:
715 b, c = ancestors[b]
738 b, c = ancestors[b]
716 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
739 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
717 if r:
740 if r:
718 ui.write('Ancestors: %s\n' % (','.join(r)))
741 ui.write('Ancestors: %s\n' % (','.join(r)))
719
742
720 ui.write('Log:\n')
743 ui.write('Log:\n')
721 ui.write('%s\n\n' % cs.comment)
744 ui.write('%s\n\n' % cs.comment)
722 ui.write('Members: \n')
745 ui.write('Members: \n')
723 for f in cs.entries:
746 for f in cs.entries:
724 fn = f.file
747 fn = f.file
725 if fn.startswith(opts["prefix"]):
748 if fn.startswith(opts["prefix"]):
726 fn = fn[len(opts["prefix"]):]
749 fn = fn[len(opts["prefix"]):]
727 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
750 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
728 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
751 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
729 ui.write('\n')
752 ui.write('\n')
730
753
731 # have we seen the start tag?
754 # have we seen the start tag?
732 if revisions and off:
755 if revisions and off:
733 if revisions[0] == str(cs.id) or \
756 if revisions[0] == str(cs.id) or \
734 revisions[0] in cs.tags:
757 revisions[0] in cs.tags:
735 off = False
758 off = False
736
759
737 # see if we reached the end tag
760 # see if we reached the end tag
738 if len(revisions)>1 and not off:
761 if len(revisions)>1 and not off:
739 if revisions[1] == str(cs.id) or \
762 if revisions[1] == str(cs.id) or \
740 revisions[1] in cs.tags:
763 revisions[1] in cs.tags:
741 break
764 break
General Comments 0
You need to be logged in to leave comments. Login now