##// END OF EJS Templates
convert: added cvsnt mergepoint support
Henrik Stuart -
r7956:3e7611a8 default
parent child Browse files
Show More
@@ -0,0 +1,78 b''
1 #!/bin/bash
2
3 "$TESTDIR/hghave" cvs || exit 80
4
5 cvscall()
6 {
7 cvs -f "$@"
8 }
9
10 hgcat()
11 {
12 hg --cwd src-hg cat -r tip "$1"
13 }
14
15 echo "[extensions]" >> $HGRCPATH
16 echo "convert = " >> $HGRCPATH
17 echo "graphlog = " >> $HGRCPATH
18 echo "[convert]" >> $HGRCPATH
19 echo "cvsps=builtin" >> $HGRCPATH
20
21 echo % create cvs repository
22 mkdir cvsmaster
23 cd cvsmaster
24 export CVSROOT=`pwd`
25 export CVS_OPTIONS=-f
26 cd ..
27
28 cvscall -q -d "$CVSROOT" init
29
30 cvscall -q checkout -d cvsworktmp .
31 cd cvsworktmp
32 mkdir foo
33 cvscall -q add foo | sed -e 's/Directory .* added to the repository//g'
34 cd foo
35 echo foo > foo.txt
36 cvscall -q add foo.txt
37 cvscall -q ci -m "foo.txt" | sed 's/.*,v.*/checking in/g'
38
39 cd ../..
40 rm -rf cvsworktmp
41
42 cvscall -q checkout -d cvswork foo
43
44 cd cvswork
45
46 cvscall -q rtag -b -R MYBRANCH1 foo
47 cvscall -q up -P -r MYBRANCH1
48 echo bar > foo.txt
49 cvscall -q ci -m "bar" | sed 's/.*,v.*/checking in/g'
50 echo baz > foo.txt
51 cvscall -q ci -m "baz" | sed 's/.*,v.*/checking in/g'
52
53 cvscall -q rtag -b -R -r MYBRANCH1 MYBRANCH1_2 foo
54 cvscall -q up -P -r MYBRANCH1_2
55
56 echo bazzie > foo.txt
57 cvscall -q ci -m "bazzie" | sed 's/.*,v.*/checking in/g'
58
59 cvscall -q rtag -b -R MYBRANCH1_1 foo
60 cvscall -q up -P -r MYBRANCH1_1
61
62 echo quux > foo.txt
63 cvscall -q ci -m "quux" | sed 's/.*,v.*/checking in/g'
64 cvscall -q up -P -jMYBRANCH1 | sed 's/RCS file: .*,v/merging MYBRANCH1/g'
65 echo xyzzy > foo.txt
66 cvscall -q ci -m "merge" | sed 's/.*,v.*/checking in/g'
67
68 cvscall -q up -P -A
69
70 cvscall -q up -P -jMYBRANCH1_2 | sed 's/RCS file: .*,v/merging MYBRANCH1_2/g'
71 cvscall -q ci -m "merge" | sed 's/.*,v.*/checking in/g'
72
73 REALCVS=`which cvs`
74 echo "for x in \$*; do if [ \"\$x\" = \"rlog\" ]; then echo \"RCS file: $CVSROOT/foo/foo.txt,v\"; cat $TESTDIR/test-convert-cvs-builtincvsps-cvsnt-mergepoints.rlog; exit 0; fi; done; $REALCVS \$*" > cvs
75 chmod +x cvs
76 PATH=.:${PATH} hg debugcvsps --parents foo | sed -e 's/Author:.*/Author:/' -e 's/Date:.*/Date:/'
77
78 cd ..
@@ -0,0 +1,138 b''
1 % create cvs repository
2 U cvsworktmp/CVSROOT/checkoutlist
3 U cvsworktmp/CVSROOT/commitinfo
4 U cvsworktmp/CVSROOT/config
5 U cvsworktmp/CVSROOT/cvswrappers
6 U cvsworktmp/CVSROOT/loginfo
7 U cvsworktmp/CVSROOT/modules
8 U cvsworktmp/CVSROOT/notify
9 U cvsworktmp/CVSROOT/postadmin
10 U cvsworktmp/CVSROOT/postproxy
11 U cvsworktmp/CVSROOT/posttag
12 U cvsworktmp/CVSROOT/postwatch
13 U cvsworktmp/CVSROOT/preproxy
14 U cvsworktmp/CVSROOT/rcsinfo
15 U cvsworktmp/CVSROOT/taginfo
16 U cvsworktmp/CVSROOT/verifymsg
17
18 cvs add: use `cvs commit' to add this file permanently
19 checking in
20 initial revision: 1.1
21 U cvswork/foo.txt
22 checking in
23 new revision: 1.1.2.1; previous revision: 1.1
24 checking in
25 new revision: 1.1.2.2; previous revision: 1.1.2.1
26 checking in
27 new revision: 1.1.2.2.2.1; previous revision: 1.1.2.2
28 U foo.txt
29 checking in
30 new revision: 1.1.4.1; previous revision: 1.1
31 rcsmerge: warning: conflicts during merge
32 merging MYBRANCH1
33 retrieving revision 1.1
34 retrieving revision 1.1.2.2
35 Merging differences between 1.1 and 1.1.2.2 into foo.txt
36 checking in
37 new revision: 1.1.4.2; previous revision: 1.1.4.1
38 U foo.txt
39 merging MYBRANCH1_2
40 retrieving revision 1.1
41 retrieving revision 1.1.2.2.2.1
42 Merging differences between 1.1 and 1.1.2.2.2.1 into foo.txt
43 checking in
44 new revision: 1.2; previous revision: 1.1
45 collecting CVS rlog
46 7 log entries
47 creating changesets
48 7 changeset entries
49 ---------------------
50 PatchSet 1
51 Date:
52 Author:
53 Branch: HEAD
54 Tag: (none)
55 Log:
56 foo.txt
57
58 Members:
59 foo.txt:INITIAL->1.1
60
61 ---------------------
62 PatchSet 2
63 Date:
64 Author:
65 Branch: MYBRANCH1
66 Tag: (none)
67 Parent: 1
68 Log:
69 bar
70
71 Members:
72 foo.txt:1.1->1.1.2.1
73
74 ---------------------
75 PatchSet 3
76 Date:
77 Author:
78 Branch: MYBRANCH1
79 Tag: (none)
80 Parent: 2
81 Log:
82 baz
83
84 Members:
85 foo.txt:1.1.2.1->1.1.2.2
86
87 ---------------------
88 PatchSet 4
89 Date:
90 Author:
91 Branch: MYBRANCH1_1
92 Tag: (none)
93 Parent: 1
94 Log:
95 quux
96
97 Members:
98 foo.txt:1.1->1.1.4.1
99
100 ---------------------
101 PatchSet 5
102 Date:
103 Author:
104 Branch: MYBRANCH1_2
105 Tag: (none)
106 Parent: 3
107 Log:
108 bazzie
109
110 Members:
111 foo.txt:1.1.2.2->1.1.2.2.2.1
112
113 ---------------------
114 PatchSet 6
115 Date:
116 Author:
117 Branch: HEAD
118 Tag: (none)
119 Parents: 1,5
120 Log:
121 merge
122
123 Members:
124 foo.txt:1.1->1.2
125
126 ---------------------
127 PatchSet 7
128 Date:
129 Author:
130 Branch: MYBRANCH1_1
131 Tag: (none)
132 Parents: 4,3
133 Log:
134 merge
135
136 Members:
137 foo.txt:1.1.4.1->1.1.4.2
138
@@ -0,0 +1,42 b''
1 head: 1.2
2 branch:
3 locks: strict
4 access list:
5 symbolic names:
6 MYBRANCH1_2: 1.1.2.2.0.2
7 MYBRANCH1_1: 1.1.0.4
8 MYBRANCH1: 1.1.0.2
9 keyword substitution: kv
10 total revisions: 8; selected revisions: 8
11 description:
12 ----------------------------
13 revision 1.2
14 date: 2009/04/02 07:00:32; author: user; state: Exp; lines: +1 -1; kopt: kv; commitid: 14d449d462903487; mergepoint: 1.1.2.2.2.1; filename: foo.txt;
15 merge
16 ----------------------------
17 revision 1.1
18 date: 2009/04/02 06:50:43; author: user; state: Exp; kopt: kv; commitid: 17ac49d460432d04; filename: foo.txt;
19 branches: 1.1.2; 1.1.4;
20 foo.txt
21 ----------------------------
22 revision 1.1.4.2
23 date: 2009/04/02 07:02:51; author: user; state: Exp; lines: +1 -0; kopt: kv; commitid: 170049d4631b364d; mergepoint: 1.1.2.2; filename: foo.txt;
24 merge
25 ----------------------------
26 revision 1.1.4.1
27 date: 2009/04/02 06:53:42; author: user; state: Exp; lines: +1 -1; kopt: kv; commitid: dc849d460f52f49; filename: foo.txt;
28 quux
29 ----------------------------
30 revision 1.1.2.2
31 date: 2009/04/02 06:53:20; author: user; state: Exp; lines: +1 -1; kopt: kv; commitid: 8ec49d460e02f04; filename: foo.txt;
32 branches: 1.1.2.2.2;
33 baz
34 ----------------------------
35 revision 1.1.2.1
36 date: 2009/04/02 06:52:38; author: user; state: Exp; lines: +1 -1; kopt: kv; commitid: d5049d460b62e7b; filename: foo.txt;
37 bar
38 ----------------------------
39 revision 1.1.2.2.2.1
40 date: 2009/04/02 06:55:42; author: user; state: Exp; lines: +1 -1; kopt: kv; commitid: 11c849d4616d30d1; filename: foo.txt;
41 bazzie
42 =============================================================================
@@ -1,741 +1,764 b''
1 1 #
2 2 # Mercurial built-in replacement for cvsps.
3 3 #
4 4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 import os
10 10 import re
11 11 import cPickle as pickle
12 12 from mercurial import util
13 13 from mercurial.i18n import _
14 14
15 15 def listsort(list, key):
16 16 "helper to sort by key in Python 2.3"
17 17 try:
18 18 list.sort(key=key)
19 19 except TypeError:
20 20 list.sort(lambda l, r: cmp(key(l), key(r)))
21 21
22 22 class logentry(object):
23 23 '''Class logentry has the following attributes:
24 24 .author - author name as CVS knows it
25 25 .branch - name of branch this revision is on
26 26 .branches - revision tuple of branches starting at this revision
27 27 .comment - commit message
28 28 .date - the commit date as a (time, tz) tuple
29 29 .dead - true if file revision is dead
30 30 .file - Name of file
31 31 .lines - a tuple (+lines, -lines) or None
32 32 .parent - Previous revision of this entry
33 33 .rcs - name of file as returned from CVS
34 34 .revision - revision number as tuple
35 35 .tags - list of tags on the file
36 36 .synthetic - is this a synthetic "file ... added on ..." revision?
37 .mergepoint- the branch that has been merged from (if present in rlog output)
37 38 '''
38 39 def __init__(self, **entries):
39 40 self.__dict__.update(entries)
40 41
41 42 class logerror(Exception):
42 43 pass
43 44
44 45 def getrepopath(cvspath):
45 46 """Return the repository path from a CVS path.
46 47
47 48 >>> getrepopath('/foo/bar')
48 49 '/foo/bar'
49 50 >>> getrepopath('c:/foo/bar')
50 51 'c:/foo/bar'
51 52 >>> getrepopath(':pserver:10/foo/bar')
52 53 '/foo/bar'
53 54 >>> getrepopath(':pserver:10c:/foo/bar')
54 55 '/foo/bar'
55 56 >>> getrepopath(':pserver:/foo/bar')
56 57 '/foo/bar'
57 58 >>> getrepopath(':pserver:c:/foo/bar')
58 59 'c:/foo/bar'
59 60 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
60 61 '/foo/bar'
61 62 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
62 63 'c:/foo/bar'
63 64 """
64 65 # According to CVS manual, CVS paths are expressed like:
65 66 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
66 67 #
67 68 # Unfortunately, Windows absolute paths start with a drive letter
68 69 # like 'c:' making it harder to parse. Here we assume that drive
69 70 # letters are only one character long and any CVS component before
70 71 # the repository path is at least 2 characters long, and use this
71 72 # to disambiguate.
72 73 parts = cvspath.split(':')
73 74 if len(parts) == 1:
74 75 return parts[0]
75 76 # Here there is an ambiguous case if we have a port number
76 77 # immediately followed by a Windows driver letter. We assume this
77 78 # never happens and decide it must be CVS path component,
78 79 # therefore ignoring it.
79 80 if len(parts[-2]) > 1:
80 81 return parts[-1].lstrip('0123456789')
81 82 return parts[-2] + ':' + parts[-1]
82 83
83 84 def createlog(ui, directory=None, root="", rlog=True, cache=None):
84 85 '''Collect the CVS rlog'''
85 86
86 87 # Because we store many duplicate commit log messages, reusing strings
87 88 # saves a lot of memory and pickle storage space.
88 89 _scache = {}
89 90 def scache(s):
90 91 "return a shared version of a string"
91 92 return _scache.setdefault(s, s)
92 93
93 94 ui.status(_('collecting CVS rlog\n'))
94 95
95 96 log = [] # list of logentry objects containing the CVS state
96 97
97 98 # patterns to match in CVS (r)log output, by state of use
98 99 re_00 = re.compile('RCS file: (.+)$')
99 100 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
100 101 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
101 102 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
102 103 re_10 = re.compile('Working file: (.+)$')
103 104 re_20 = re.compile('symbolic names:')
104 105 re_30 = re.compile('\t(.+): ([\\d.]+)$')
105 106 re_31 = re.compile('----------------------------$')
106 107 re_32 = re.compile('=============================================================================$')
107 108 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
108 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?')
109 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?(.*mergepoint:\s+([^;]+);)?')
109 110 re_70 = re.compile('branches: (.+);$')
110 111
111 112 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
112 113
113 114 prefix = '' # leading path to strip of what we get from CVS
114 115
115 116 if directory is None:
116 117 # Current working directory
117 118
118 119 # Get the real directory in the repository
119 120 try:
120 121 prefix = file(os.path.join('CVS','Repository')).read().strip()
121 122 if prefix == ".":
122 123 prefix = ""
123 124 directory = prefix
124 125 except IOError:
125 126 raise logerror('Not a CVS sandbox')
126 127
127 128 if prefix and not prefix.endswith(os.sep):
128 129 prefix += os.sep
129 130
130 131 # Use the Root file in the sandbox, if it exists
131 132 try:
132 133 root = file(os.path.join('CVS','Root')).read().strip()
133 134 except IOError:
134 135 pass
135 136
136 137 if not root:
137 138 root = os.environ.get('CVSROOT', '')
138 139
139 140 # read log cache if one exists
140 141 oldlog = []
141 142 date = None
142 143
143 144 if cache:
144 145 cachedir = os.path.expanduser('~/.hg.cvsps')
145 146 if not os.path.exists(cachedir):
146 147 os.mkdir(cachedir)
147 148
148 149 # The cvsps cache pickle needs a uniquified name, based on the
149 150 # repository location. The address may have all sort of nasties
150 151 # in it, slashes, colons and such. So here we take just the
151 152 # alphanumerics, concatenated in a way that does not mix up the
152 153 # various components, so that
153 154 # :pserver:user@server:/path
154 155 # and
155 156 # /pserver/user/server/path
156 157 # are mapped to different cache file names.
157 158 cachefile = root.split(":") + [directory, "cache"]
158 159 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
159 160 cachefile = os.path.join(cachedir,
160 161 '.'.join([s for s in cachefile if s]))
161 162
162 163 if cache == 'update':
163 164 try:
164 165 ui.note(_('reading cvs log cache %s\n') % cachefile)
165 166 oldlog = pickle.load(file(cachefile))
166 167 ui.note(_('cache has %d log entries\n') % len(oldlog))
167 168 except Exception, e:
168 169 ui.note(_('error reading cache: %r\n') % e)
169 170
170 171 if oldlog:
171 172 date = oldlog[-1].date # last commit date as a (time,tz) tuple
172 173 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
173 174
174 175 # build the CVS commandline
175 176 cmd = ['cvs', '-q']
176 177 if root:
177 178 cmd.append('-d%s' % root)
178 179 p = util.normpath(getrepopath(root))
179 180 if not p.endswith('/'):
180 181 p += '/'
181 182 prefix = p + util.normpath(prefix)
182 183 cmd.append(['log', 'rlog'][rlog])
183 184 if date:
184 185 # no space between option and date string
185 186 cmd.append('-d>%s' % date)
186 187 cmd.append(directory)
187 188
188 189 # state machine begins here
189 190 tags = {} # dictionary of revisions on current file with their tags
191 branchmap = {} # mapping between branch names and revision numbers
190 192 state = 0
191 193 store = False # set when a new record can be appended
192 194
193 195 cmd = [util.shellquote(arg) for arg in cmd]
194 196 ui.note(_("running %s\n") % (' '.join(cmd)))
195 197 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
196 198
197 199 pfp = util.popen(' '.join(cmd))
198 200 peek = pfp.readline()
199 201 while True:
200 202 line = peek
201 203 if line == '':
202 204 break
203 205 peek = pfp.readline()
204 206 if line.endswith('\n'):
205 207 line = line[:-1]
206 208 #ui.debug('state=%d line=%r\n' % (state, line))
207 209
208 210 if state == 0:
209 211 # initial state, consume input until we see 'RCS file'
210 212 match = re_00.match(line)
211 213 if match:
212 214 rcs = match.group(1)
213 215 tags = {}
214 216 if rlog:
215 217 filename = util.normpath(rcs[:-2])
216 218 if filename.startswith(prefix):
217 219 filename = filename[len(prefix):]
218 220 if filename.startswith('/'):
219 221 filename = filename[1:]
220 222 if filename.startswith('Attic/'):
221 223 filename = filename[6:]
222 224 else:
223 225 filename = filename.replace('/Attic/', '/')
224 226 state = 2
225 227 continue
226 228 state = 1
227 229 continue
228 230 match = re_01.match(line)
229 231 if match:
230 232 raise Exception(match.group(1))
231 233 match = re_02.match(line)
232 234 if match:
233 235 raise Exception(match.group(2))
234 236 if re_03.match(line):
235 237 raise Exception(line)
236 238
237 239 elif state == 1:
238 240 # expect 'Working file' (only when using log instead of rlog)
239 241 match = re_10.match(line)
240 242 assert match, _('RCS file must be followed by working file')
241 243 filename = util.normpath(match.group(1))
242 244 state = 2
243 245
244 246 elif state == 2:
245 247 # expect 'symbolic names'
246 248 if re_20.match(line):
249 branchmap = {}
247 250 state = 3
248 251
249 252 elif state == 3:
250 253 # read the symbolic names and store as tags
251 254 match = re_30.match(line)
252 255 if match:
253 256 rev = [int(x) for x in match.group(2).split('.')]
254 257
255 258 # Convert magic branch number to an odd-numbered one
256 259 revn = len(rev)
257 260 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
258 261 rev = rev[:-2] + rev[-1:]
259 262 rev = tuple(rev)
260 263
261 264 if rev not in tags:
262 265 tags[rev] = []
263 266 tags[rev].append(match.group(1))
267 branchmap[match.group(1)] = match.group(2)
264 268
265 269 elif re_31.match(line):
266 270 state = 5
267 271 elif re_32.match(line):
268 272 state = 0
269 273
270 274 elif state == 4:
271 275 # expecting '------' separator before first revision
272 276 if re_31.match(line):
273 277 state = 5
274 278 else:
275 279 assert not re_32.match(line), _('must have at least some revisions')
276 280
277 281 elif state == 5:
278 282 # expecting revision number and possibly (ignored) lock indication
279 283 # we create the logentry here from values stored in states 0 to 4,
280 284 # as this state is re-entered for subsequent revisions of a file.
281 285 match = re_50.match(line)
282 286 assert match, _('expected revision number')
283 287 e = logentry(rcs=scache(rcs), file=scache(filename),
284 288 revision=tuple([int(x) for x in match.group(1).split('.')]),
285 289 branches=[], parent=None,
286 290 synthetic=False)
287 291 state = 6
288 292
289 293 elif state == 6:
290 294 # expecting date, author, state, lines changed
291 295 match = re_60.match(line)
292 296 assert match, _('revision must be followed by date line')
293 297 d = match.group(1)
294 298 if d[2] == '/':
295 299 # Y2K
296 300 d = '19' + d
297 301
298 302 if len(d.split()) != 3:
299 303 # cvs log dates always in GMT
300 304 d = d + ' UTC'
301 305 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
302 306 e.author = scache(match.group(2))
303 307 e.dead = match.group(3).lower() == 'dead'
304 308
305 309 if match.group(5):
306 310 if match.group(6):
307 311 e.lines = (int(match.group(5)), int(match.group(6)))
308 312 else:
309 313 e.lines = (int(match.group(5)), 0)
310 314 elif match.group(6):
311 315 e.lines = (0, int(match.group(6)))
312 316 else:
313 317 e.lines = None
318
319 if match.group(7): # cvsnt mergepoint
320 myrev = match.group(8).split('.')
321 if len(myrev) == 2: # head
322 e.mergepoint = 'HEAD'
323 else:
324 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
325 branches = [b for b in branchmap if branchmap[b] == myrev]
326 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
327 e.mergepoint = branches[0]
328 else:
329 e.mergepoint = None
314 330 e.comment = []
315 331 state = 7
316 332
317 333 elif state == 7:
318 334 # read the revision numbers of branches that start at this revision
319 335 # or store the commit log message otherwise
320 336 m = re_70.match(line)
321 337 if m:
322 338 e.branches = [tuple([int(y) for y in x.strip().split('.')])
323 339 for x in m.group(1).split(';')]
324 340 state = 8
325 341 elif re_31.match(line) and re_50.match(peek):
326 342 state = 5
327 343 store = True
328 344 elif re_32.match(line):
329 345 state = 0
330 346 store = True
331 347 else:
332 348 e.comment.append(line)
333 349
334 350 elif state == 8:
335 351 # store commit log message
336 352 if re_31.match(line):
337 353 state = 5
338 354 store = True
339 355 elif re_32.match(line):
340 356 state = 0
341 357 store = True
342 358 else:
343 359 e.comment.append(line)
344 360
345 361 # When a file is added on a branch B1, CVS creates a synthetic
346 362 # dead trunk revision 1.1 so that the branch has a root.
347 363 # Likewise, if you merge such a file to a later branch B2 (one
348 364 # that already existed when the file was added on B1), CVS
349 365 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
350 366 # these revisions now, but mark them synthetic so
351 367 # createchangeset() can take care of them.
352 368 if (store and
353 369 e.dead and
354 370 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
355 371 len(e.comment) == 1 and
356 372 file_added_re.match(e.comment[0])):
357 373 ui.debug(_('found synthetic rev in %s: %r\n')
358 374 % (e.rcs, e.comment[0]))
359 375 e.synthetic = True
360 376
361 377 if store:
362 378 # clean up the results and save in the log.
363 379 store = False
364 380 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
365 381 e.comment = scache('\n'.join(e.comment))
366 382
367 383 revn = len(e.revision)
368 384 if revn > 3 and (revn % 2) == 0:
369 385 e.branch = tags.get(e.revision[:-1], [None])[0]
370 386 else:
371 387 e.branch = None
372 388
373 389 log.append(e)
374 390
375 391 if len(log) % 100 == 0:
376 392 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
377 393
378 394 listsort(log, key=lambda x:(x.rcs, x.revision))
379 395
380 396 # find parent revisions of individual files
381 397 versions = {}
382 398 for e in log:
383 399 branch = e.revision[:-1]
384 400 p = versions.get((e.rcs, branch), None)
385 401 if p is None:
386 402 p = e.revision[:-2]
387 403 e.parent = p
388 404 versions[(e.rcs, branch)] = e.revision
389 405
390 406 # update the log cache
391 407 if cache:
392 408 if log:
393 409 # join up the old and new logs
394 410 listsort(log, key=lambda x:x.date)
395 411
396 412 if oldlog and oldlog[-1].date >= log[0].date:
397 413 raise logerror('Log cache overlaps with new log entries,'
398 414 ' re-run without cache.')
399 415
400 416 log = oldlog + log
401 417
402 418 # write the new cachefile
403 419 ui.note(_('writing cvs log cache %s\n') % cachefile)
404 420 pickle.dump(log, file(cachefile, 'w'))
405 421 else:
406 422 log = oldlog
407 423
408 424 ui.status(_('%d log entries\n') % len(log))
409 425
410 426 return log
411 427
412 428
413 429 class changeset(object):
414 430 '''Class changeset has the following attributes:
415 431 .author - author name as CVS knows it
416 432 .branch - name of branch this changeset is on, or None
417 433 .comment - commit message
418 434 .date - the commit date as a (time,tz) tuple
419 435 .entries - list of logentry objects in this changeset
420 436 .parents - list of one or two parent changesets
421 437 .tags - list of tags on this changeset
422 438 .synthetic - from synthetic revision "file ... added on branch ..."
439 .mergepoint- the branch that has been merged from (if present in rlog output)
423 440 '''
424 441 def __init__(self, **entries):
425 442 self.__dict__.update(entries)
426 443
427 444 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
428 445 '''Convert log into changesets.'''
429 446
430 447 ui.status(_('creating changesets\n'))
431 448
432 449 # Merge changesets
433 450
434 451 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
435 452
436 453 changesets = []
437 454 files = {}
438 455 c = None
439 456 for i, e in enumerate(log):
440 457
441 458 # Check if log entry belongs to the current changeset or not.
442 459 if not (c and
443 460 e.comment == c.comment and
444 461 e.author == c.author and
445 462 e.branch == c.branch and
446 463 ((c.date[0] + c.date[1]) <=
447 464 (e.date[0] + e.date[1]) <=
448 465 (c.date[0] + c.date[1]) + fuzz) and
449 466 e.file not in files):
450 467 c = changeset(comment=e.comment, author=e.author,
451 branch=e.branch, date=e.date, entries=[])
468 branch=e.branch, date=e.date, entries=[],
469 mergepoint=e.mergepoint)
452 470 changesets.append(c)
453 471 files = {}
454 472 if len(changesets) % 100 == 0:
455 473 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
456 474 ui.status(util.ellipsis(t, 80) + '\n')
457 475
458 476 c.entries.append(e)
459 477 files[e.file] = True
460 478 c.date = e.date # changeset date is date of latest commit in it
461 479
462 480 # Mark synthetic changesets
463 481
464 482 for c in changesets:
465 483 # Synthetic revisions always get their own changeset, because
466 484 # the log message includes the filename. E.g. if you add file3
467 485 # and file4 on a branch, you get four log entries and three
468 486 # changesets:
469 487 # "File file3 was added on branch ..." (synthetic, 1 entry)
470 488 # "File file4 was added on branch ..." (synthetic, 1 entry)
471 489 # "Add file3 and file4 to fix ..." (real, 2 entries)
472 490 # Hence the check for 1 entry here.
473 491 c.synthetic = (len(c.entries) == 1 and c.entries[0].synthetic)
474 492
475 493 # Sort files in each changeset
476 494
477 495 for c in changesets:
478 496 def pathcompare(l, r):
479 497 'Mimic cvsps sorting order'
480 498 l = l.split('/')
481 499 r = r.split('/')
482 500 nl = len(l)
483 501 nr = len(r)
484 502 n = min(nl, nr)
485 503 for i in range(n):
486 504 if i + 1 == nl and nl < nr:
487 505 return -1
488 506 elif i + 1 == nr and nl > nr:
489 507 return +1
490 508 elif l[i] < r[i]:
491 509 return -1
492 510 elif l[i] > r[i]:
493 511 return +1
494 512 return 0
495 513 def entitycompare(l, r):
496 514 return pathcompare(l.file, r.file)
497 515
498 516 c.entries.sort(entitycompare)
499 517
500 518 # Sort changesets by date
501 519
502 520 def cscmp(l, r):
503 521 d = sum(l.date) - sum(r.date)
504 522 if d:
505 523 return d
506 524
507 525 # detect vendor branches and initial commits on a branch
508 526 le = {}
509 527 for e in l.entries:
510 528 le[e.rcs] = e.revision
511 529 re = {}
512 530 for e in r.entries:
513 531 re[e.rcs] = e.revision
514 532
515 533 d = 0
516 534 for e in l.entries:
517 535 if re.get(e.rcs, None) == e.parent:
518 536 assert not d
519 537 d = 1
520 538 break
521 539
522 540 for e in r.entries:
523 541 if le.get(e.rcs, None) == e.parent:
524 542 assert not d
525 543 d = -1
526 544 break
527 545
528 546 return d
529 547
530 548 changesets.sort(cscmp)
531 549
532 550 # Collect tags
533 551
534 552 globaltags = {}
535 553 for c in changesets:
536 554 tags = {}
537 555 for e in c.entries:
538 556 for tag in e.tags:
539 557 # remember which is the latest changeset to have this tag
540 558 globaltags[tag] = c
541 559
542 560 for c in changesets:
543 561 tags = {}
544 562 for e in c.entries:
545 563 for tag in e.tags:
546 564 tags[tag] = True
547 565 # remember tags only if this is the latest changeset to have it
548 566 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
549 567
550 568 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
551 569 # by inserting dummy changesets with two parents, and handle
552 570 # {{mergefrombranch BRANCHNAME}} by setting two parents.
553 571
554 572 if mergeto is None:
555 573 mergeto = r'{{mergetobranch ([-\w]+)}}'
556 574 if mergeto:
557 575 mergeto = re.compile(mergeto)
558 576
559 577 if mergefrom is None:
560 578 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
561 579 if mergefrom:
562 580 mergefrom = re.compile(mergefrom)
563 581
564 582 versions = {} # changeset index where we saw any particular file version
565 583 branches = {} # changeset index where we saw a branch
566 584 n = len(changesets)
567 585 i = 0
568 586 while i<n:
569 587 c = changesets[i]
570 588
571 589 for f in c.entries:
572 590 versions[(f.rcs, f.revision)] = i
573 591
574 592 p = None
575 593 if c.branch in branches:
576 594 p = branches[c.branch]
577 595 else:
578 596 for f in c.entries:
579 597 p = max(p, versions.get((f.rcs, f.parent), None))
580 598
581 599 c.parents = []
582 600 if p is not None:
583 601 p = changesets[p]
584 602
585 603 # Ensure no changeset has a synthetic changeset as a parent.
586 604 while p.synthetic:
587 605 assert len(p.parents) <= 1, \
588 606 _('synthetic changeset cannot have multiple parents')
589 607 if p.parents:
590 608 p = p.parents[0]
591 609 else:
592 610 p = None
593 611 break
594 612
595 613 if p is not None:
596 614 c.parents.append(p)
597 615
616 if c.mergepoint:
617 if c.mergepoint == 'HEAD':
618 c.mergepoint = None
619 c.parents.append(changesets[branches[c.mergepoint]])
620
598 621 if mergefrom:
599 622 m = mergefrom.search(c.comment)
600 623 if m:
601 624 m = m.group(1)
602 625 if m == 'HEAD':
603 626 m = None
604 627 candidate = changesets[branches[m]]
605 628 if m in branches and c.branch != m and not candidate.synthetic:
606 629 c.parents.append(candidate)
607 630
608 631 if mergeto:
609 632 m = mergeto.search(c.comment)
610 633 if m:
611 634 try:
612 635 m = m.group(1)
613 636 if m == 'HEAD':
614 637 m = None
615 638 except:
616 639 m = None # if no group found then merge to HEAD
617 640 if m in branches and c.branch != m:
618 641 # insert empty changeset for merge
619 642 cc = changeset(author=c.author, branch=m, date=c.date,
620 643 comment='convert-repo: CVS merge from branch %s' % c.branch,
621 644 entries=[], tags=[], parents=[changesets[branches[m]], c])
622 645 changesets.insert(i + 1, cc)
623 646 branches[m] = i + 1
624 647
625 648 # adjust our loop counters now we have inserted a new entry
626 649 n += 1
627 650 i += 2
628 651 continue
629 652
630 653 branches[c.branch] = i
631 654 i += 1
632 655
633 656 # Drop synthetic changesets (safe now that we have ensured no other
634 657 # changesets can have them as parents).
635 658 i = 0
636 659 while i < len(changesets):
637 660 if changesets[i].synthetic:
638 661 del changesets[i]
639 662 else:
640 663 i += 1
641 664
642 665 # Number changesets
643 666
644 667 for i, c in enumerate(changesets):
645 668 c.id = i + 1
646 669
647 670 ui.status(_('%d changeset entries\n') % len(changesets))
648 671
649 672 return changesets
650 673
651 674
652 675 def debugcvsps(ui, *args, **opts):
653 676 '''Read CVS rlog for current directory or named path in repository, and
654 677 convert the log to changesets based on matching commit log entries and dates.'''
655 678
656 679 if opts["new_cache"]:
657 680 cache = "write"
658 681 elif opts["update_cache"]:
659 682 cache = "update"
660 683 else:
661 684 cache = None
662 685
663 686 revisions = opts["revisions"]
664 687
665 688 try:
666 689 if args:
667 690 log = []
668 691 for d in args:
669 692 log += createlog(ui, d, root=opts["root"], cache=cache)
670 693 else:
671 694 log = createlog(ui, root=opts["root"], cache=cache)
672 695 except logerror, e:
673 696 ui.write("%r\n"%e)
674 697 return
675 698
676 699 changesets = createchangeset(ui, log, opts["fuzz"])
677 700 del log
678 701
679 702 # Print changesets (optionally filtered)
680 703
681 704 off = len(revisions)
682 705 branches = {} # latest version number in each branch
683 706 ancestors = {} # parent branch
684 707 for cs in changesets:
685 708
686 709 if opts["ancestors"]:
687 710 if cs.branch not in branches and cs.parents and cs.parents[0].id:
688 711 ancestors[cs.branch] = changesets[cs.parents[0].id-1].branch, cs.parents[0].id
689 712 branches[cs.branch] = cs.id
690 713
691 714 # limit by branches
692 715 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
693 716 continue
694 717
695 718 if not off:
696 719 # Note: trailing spaces on several lines here are needed to have
697 720 # bug-for-bug compatibility with cvsps.
698 721 ui.write('---------------------\n')
699 722 ui.write('PatchSet %d \n' % cs.id)
700 723 ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))
701 724 ui.write('Author: %s\n' % cs.author)
702 725 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
703 726 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
704 727 ','.join(cs.tags) or '(none)'))
705 728 if opts["parents"] and cs.parents:
706 729 if len(cs.parents)>1:
707 730 ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
708 731 else:
709 732 ui.write('Parent: %d\n' % cs.parents[0].id)
710 733
711 734 if opts["ancestors"]:
712 735 b = cs.branch
713 736 r = []
714 737 while b:
715 738 b, c = ancestors[b]
716 739 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
717 740 if r:
718 741 ui.write('Ancestors: %s\n' % (','.join(r)))
719 742
720 743 ui.write('Log:\n')
721 744 ui.write('%s\n\n' % cs.comment)
722 745 ui.write('Members: \n')
723 746 for f in cs.entries:
724 747 fn = f.file
725 748 if fn.startswith(opts["prefix"]):
726 749 fn = fn[len(opts["prefix"]):]
727 750 ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
728 751 '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
729 752 ui.write('\n')
730 753
731 754 # have we seen the start tag?
732 755 if revisions and off:
733 756 if revisions[0] == str(cs.id) or \
734 757 revisions[0] in cs.tags:
735 758 off = False
736 759
737 760 # see if we reached the end tag
738 761 if len(revisions)>1 and not off:
739 762 if revisions[1] == str(cs.id) or \
740 763 revisions[1] in cs.tags:
741 764 break
General Comments 0
You need to be logged in to leave comments. Login now