##// END OF EJS Templates
merge with crew
Benoit Boissinot -
r7100:baf12d52 merge default
parent child Browse files
Show More
@@ -1,548 +1,587 b''
1 #
1 #
2 # Mercurial built-in replacement for cvsps.
2 # Mercurial built-in replacement for cvsps.
3 #
3 #
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 import os
9 import os
10 import re
10 import re
11 import sys
11 import sys
12 import cPickle as pickle
12 import cPickle as pickle
13 from mercurial import util
13 from mercurial import util
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15
15
16 def listsort(list, key):
16 def listsort(list, key):
17 "helper to sort by key in Python 2.3"
17 "helper to sort by key in Python 2.3"
18 try:
18 try:
19 list.sort(key=key)
19 list.sort(key=key)
20 except TypeError:
20 except TypeError:
21 list.sort(lambda l, r: cmp(key(l), key(r)))
21 list.sort(lambda l, r: cmp(key(l), key(r)))
22
22
23 class logentry(object):
23 class logentry(object):
24 '''Class logentry has the following attributes:
24 '''Class logentry has the following attributes:
25 .author - author name as CVS knows it
25 .author - author name as CVS knows it
26 .branch - name of branch this revision is on
26 .branch - name of branch this revision is on
27 .branches - revision tuple of branches starting at this revision
27 .branches - revision tuple of branches starting at this revision
28 .comment - commit message
28 .comment - commit message
29 .date - the commit date as a (time, tz) tuple
29 .date - the commit date as a (time, tz) tuple
30 .dead - true if file revision is dead
30 .dead - true if file revision is dead
31 .file - Name of file
31 .file - Name of file
32 .lines - a tuple (+lines, -lines) or None
32 .lines - a tuple (+lines, -lines) or None
33 .parent - Previous revision of this entry
33 .parent - Previous revision of this entry
34 .rcs - name of file as returned from CVS
34 .rcs - name of file as returned from CVS
35 .revision - revision number as tuple
35 .revision - revision number as tuple
36 .tags - list of tags on the file
36 .tags - list of tags on the file
37 '''
37 '''
38 def __init__(self, **entries):
38 def __init__(self, **entries):
39 self.__dict__.update(entries)
39 self.__dict__.update(entries)
40
40
41 class logerror(Exception):
41 class logerror(Exception):
42 pass
42 pass
43
43
44 def getrepopath(cvspath):
45 """Return the repository path from a CVS path.
46
47 >>> getrepopath('/foo/bar')
48 '/foo/bar'
49 >>> getrepopath('c:/foo/bar')
50 'c:/foo/bar'
51 >>> getrepopath(':pserver:10/foo/bar')
52 '/foo/bar'
53 >>> getrepopath(':pserver:10c:/foo/bar')
54 '/foo/bar'
55 >>> getrepopath(':pserver:/foo/bar')
56 '/foo/bar'
57 >>> getrepopath(':pserver:c:/foo/bar')
58 'c:/foo/bar'
59 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
60 '/foo/bar'
61 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
62 'c:/foo/bar'
63 """
64 # According to CVS manual, CVS paths are expressed like:
65 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
66 #
67 # Unfortunately, Windows absolute paths start with a drive letter
68 # like 'c:' making it harder to parse. Here we assume that drive
69 # letters are only one character long and any CVS component before
70 # the repository path is at least 2 characters long, and use this
71 # to disambiguate.
72 parts = cvspath.split(':')
73 if len(parts) == 1:
74 return parts[0]
75 # Here there is an ambiguous case if we have a port number
76 # immediately followed by a Windows driver letter. We assume this
77 # never happens and decide it must be CVS path component,
78 # therefore ignoring it.
79 if len(parts[-2]) > 1:
80 return parts[-1].lstrip('0123456789')
81 return parts[-2] + ':' + parts[-1]
82
44 def createlog(ui, directory=None, root="", rlog=True, cache=None):
83 def createlog(ui, directory=None, root="", rlog=True, cache=None):
45 '''Collect the CVS rlog'''
84 '''Collect the CVS rlog'''
46
85
47 # Because we store many duplicate commit log messages, reusing strings
86 # Because we store many duplicate commit log messages, reusing strings
48 # saves a lot of memory and pickle storage space.
87 # saves a lot of memory and pickle storage space.
49 _scache = {}
88 _scache = {}
50 def scache(s):
89 def scache(s):
51 "return a shared version of a string"
90 "return a shared version of a string"
52 return _scache.setdefault(s, s)
91 return _scache.setdefault(s, s)
53
92
54 ui.status(_('collecting CVS rlog\n'))
93 ui.status(_('collecting CVS rlog\n'))
55
94
56 log = [] # list of logentry objects containing the CVS state
95 log = [] # list of logentry objects containing the CVS state
57
96
58 # patterns to match in CVS (r)log output, by state of use
97 # patterns to match in CVS (r)log output, by state of use
59 re_00 = re.compile('RCS file: (.+)$')
98 re_00 = re.compile('RCS file: (.+)$')
60 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
99 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
61 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
100 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
62 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
101 re_03 = re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
63 re_10 = re.compile('Working file: (.+)$')
102 re_10 = re.compile('Working file: (.+)$')
64 re_20 = re.compile('symbolic names:')
103 re_20 = re.compile('symbolic names:')
65 re_30 = re.compile('\t(.+): ([\\d.]+)$')
104 re_30 = re.compile('\t(.+): ([\\d.]+)$')
66 re_31 = re.compile('----------------------------$')
105 re_31 = re.compile('----------------------------$')
67 re_32 = re.compile('=============================================================================$')
106 re_32 = re.compile('=============================================================================$')
68 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
107 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
69 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?')
108 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?')
70 re_70 = re.compile('branches: (.+);$')
109 re_70 = re.compile('branches: (.+);$')
71
110
72 prefix = '' # leading path to strip of what we get from CVS
111 prefix = '' # leading path to strip of what we get from CVS
73
112
74 if directory is None:
113 if directory is None:
75 # Current working directory
114 # Current working directory
76
115
77 # Get the real directory in the repository
116 # Get the real directory in the repository
78 try:
117 try:
79 prefix = file(os.path.join('CVS','Repository')).read().strip()
118 prefix = file(os.path.join('CVS','Repository')).read().strip()
80 if prefix == ".":
119 if prefix == ".":
81 prefix = ""
120 prefix = ""
82 directory = prefix
121 directory = prefix
83 except IOError:
122 except IOError:
84 raise logerror('Not a CVS sandbox')
123 raise logerror('Not a CVS sandbox')
85
124
86 if prefix and not prefix.endswith('/'):
125 if prefix and not prefix.endswith(os.sep):
87 prefix += '/'
126 prefix += os.sep
88
127
89 # Use the Root file in the sandbox, if it exists
128 # Use the Root file in the sandbox, if it exists
90 try:
129 try:
91 root = file(os.path.join('CVS','Root')).read().strip()
130 root = file(os.path.join('CVS','Root')).read().strip()
92 except IOError:
131 except IOError:
93 pass
132 pass
94
133
95 if not root:
134 if not root:
96 root = os.environ.get('CVSROOT', '')
135 root = os.environ.get('CVSROOT', '')
97
136
98 # read log cache if one exists
137 # read log cache if one exists
99 oldlog = []
138 oldlog = []
100 date = None
139 date = None
101
140
102 if cache:
141 if cache:
103 cachedir = os.path.expanduser('~/.hg.cvsps')
142 cachedir = os.path.expanduser('~/.hg.cvsps')
104 if not os.path.exists(cachedir):
143 if not os.path.exists(cachedir):
105 os.mkdir(cachedir)
144 os.mkdir(cachedir)
106
145
107 # The cvsps cache pickle needs a uniquified name, based on the
146 # The cvsps cache pickle needs a uniquified name, based on the
108 # repository location. The address may have all sort of nasties
147 # repository location. The address may have all sort of nasties
109 # in it, slashes, colons and such. So here we take just the
148 # in it, slashes, colons and such. So here we take just the
110 # alphanumerics, concatenated in a way that does not mix up the
149 # alphanumerics, concatenated in a way that does not mix up the
111 # various components, so that
150 # various components, so that
112 # :pserver:user@server:/path
151 # :pserver:user@server:/path
113 # and
152 # and
114 # /pserver/user/server/path
153 # /pserver/user/server/path
115 # are mapped to different cache file names.
154 # are mapped to different cache file names.
116 cachefile = root.split(":") + [directory, "cache"]
155 cachefile = root.split(":") + [directory, "cache"]
117 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
156 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
118 cachefile = os.path.join(cachedir,
157 cachefile = os.path.join(cachedir,
119 '.'.join([s for s in cachefile if s]))
158 '.'.join([s for s in cachefile if s]))
120
159
121 if cache == 'update':
160 if cache == 'update':
122 try:
161 try:
123 ui.note(_('reading cvs log cache %s\n') % cachefile)
162 ui.note(_('reading cvs log cache %s\n') % cachefile)
124 oldlog = pickle.load(file(cachefile))
163 oldlog = pickle.load(file(cachefile))
125 ui.note(_('cache has %d log entries\n') % len(oldlog))
164 ui.note(_('cache has %d log entries\n') % len(oldlog))
126 except Exception, e:
165 except Exception, e:
127 ui.note(_('error reading cache: %r\n') % e)
166 ui.note(_('error reading cache: %r\n') % e)
128
167
129 if oldlog:
168 if oldlog:
130 date = oldlog[-1].date # last commit date as a (time,tz) tuple
169 date = oldlog[-1].date # last commit date as a (time,tz) tuple
131 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
170 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
132
171
133 # build the CVS commandline
172 # build the CVS commandline
134 cmd = ['cvs', '-q']
173 cmd = ['cvs', '-q']
135 if root:
174 if root:
136 cmd.append('-d%s' % root)
175 cmd.append('-d%s' % root)
137 p = root.split(':')[-1]
176 p = util.normpath(getrepopath(root))
138 if not p.endswith('/'):
177 if not p.endswith('/'):
139 p += '/'
178 p += '/'
140 prefix = p + prefix
179 prefix = p + util.normpath(prefix)
141 cmd.append(['log', 'rlog'][rlog])
180 cmd.append(['log', 'rlog'][rlog])
142 if date:
181 if date:
143 # no space between option and date string
182 # no space between option and date string
144 cmd.append('-d>%s' % date)
183 cmd.append('-d>%s' % date)
145 cmd.append(directory)
184 cmd.append(directory)
146
185
147 # state machine begins here
186 # state machine begins here
148 tags = {} # dictionary of revisions on current file with their tags
187 tags = {} # dictionary of revisions on current file with their tags
149 state = 0
188 state = 0
150 store = False # set when a new record can be appended
189 store = False # set when a new record can be appended
151
190
152 cmd = [util.shellquote(arg) for arg in cmd]
191 cmd = [util.shellquote(arg) for arg in cmd]
153 ui.note(_("running %s\n") % (' '.join(cmd)))
192 ui.note(_("running %s\n") % (' '.join(cmd)))
154 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
193 ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
155
194
156 for line in util.popen(' '.join(cmd)):
195 for line in util.popen(' '.join(cmd)):
157 if line.endswith('\n'):
196 if line.endswith('\n'):
158 line = line[:-1]
197 line = line[:-1]
159 #ui.debug('state=%d line=%r\n' % (state, line))
198 #ui.debug('state=%d line=%r\n' % (state, line))
160
199
161 if state == 0:
200 if state == 0:
162 # initial state, consume input until we see 'RCS file'
201 # initial state, consume input until we see 'RCS file'
163 match = re_00.match(line)
202 match = re_00.match(line)
164 if match:
203 if match:
165 rcs = match.group(1)
204 rcs = match.group(1)
166 tags = {}
205 tags = {}
167 if rlog:
206 if rlog:
168 filename = rcs[:-2]
207 filename = util.normpath(rcs[:-2])
169 if filename.startswith(prefix):
208 if filename.startswith(prefix):
170 filename = filename[len(prefix):]
209 filename = filename[len(prefix):]
171 if filename.startswith('/'):
210 if filename.startswith('/'):
172 filename = filename[1:]
211 filename = filename[1:]
173 if filename.startswith('Attic/'):
212 if filename.startswith('Attic/'):
174 filename = filename[6:]
213 filename = filename[6:]
175 else:
214 else:
176 filename = filename.replace('/Attic/', '/')
215 filename = filename.replace('/Attic/', '/')
177 state = 2
216 state = 2
178 continue
217 continue
179 state = 1
218 state = 1
180 continue
219 continue
181 match = re_01.match(line)
220 match = re_01.match(line)
182 if match:
221 if match:
183 raise Exception(match.group(1))
222 raise Exception(match.group(1))
184 match = re_02.match(line)
223 match = re_02.match(line)
185 if match:
224 if match:
186 raise Exception(match.group(2))
225 raise Exception(match.group(2))
187 if re_03.match(line):
226 if re_03.match(line):
188 raise Exception(line)
227 raise Exception(line)
189
228
190 elif state == 1:
229 elif state == 1:
191 # expect 'Working file' (only when using log instead of rlog)
230 # expect 'Working file' (only when using log instead of rlog)
192 match = re_10.match(line)
231 match = re_10.match(line)
193 assert match, _('RCS file must be followed by working file')
232 assert match, _('RCS file must be followed by working file')
194 filename = match.group(1)
233 filename = util.normpath(match.group(1))
195 state = 2
234 state = 2
196
235
197 elif state == 2:
236 elif state == 2:
198 # expect 'symbolic names'
237 # expect 'symbolic names'
199 if re_20.match(line):
238 if re_20.match(line):
200 state = 3
239 state = 3
201
240
202 elif state == 3:
241 elif state == 3:
203 # read the symbolic names and store as tags
242 # read the symbolic names and store as tags
204 match = re_30.match(line)
243 match = re_30.match(line)
205 if match:
244 if match:
206 rev = [int(x) for x in match.group(2).split('.')]
245 rev = [int(x) for x in match.group(2).split('.')]
207
246
208 # Convert magic branch number to an odd-numbered one
247 # Convert magic branch number to an odd-numbered one
209 revn = len(rev)
248 revn = len(rev)
210 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
249 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
211 rev = rev[:-2] + rev[-1:]
250 rev = rev[:-2] + rev[-1:]
212 rev = tuple(rev)
251 rev = tuple(rev)
213
252
214 if rev not in tags:
253 if rev not in tags:
215 tags[rev] = []
254 tags[rev] = []
216 tags[rev].append(match.group(1))
255 tags[rev].append(match.group(1))
217
256
218 elif re_31.match(line):
257 elif re_31.match(line):
219 state = 5
258 state = 5
220 elif re_32.match(line):
259 elif re_32.match(line):
221 state = 0
260 state = 0
222
261
223 elif state == 4:
262 elif state == 4:
224 # expecting '------' separator before first revision
263 # expecting '------' separator before first revision
225 if re_31.match(line):
264 if re_31.match(line):
226 state = 5
265 state = 5
227 else:
266 else:
228 assert not re_32.match(line), _('Must have at least some revisions')
267 assert not re_32.match(line), _('Must have at least some revisions')
229
268
230 elif state == 5:
269 elif state == 5:
231 # expecting revision number and possibly (ignored) lock indication
270 # expecting revision number and possibly (ignored) lock indication
232 # we create the logentry here from values stored in states 0 to 4,
271 # we create the logentry here from values stored in states 0 to 4,
233 # as this state is re-entered for subsequent revisions of a file.
272 # as this state is re-entered for subsequent revisions of a file.
234 match = re_50.match(line)
273 match = re_50.match(line)
235 assert match, _('expected revision number')
274 assert match, _('expected revision number')
236 e = logentry(rcs=scache(rcs), file=scache(filename),
275 e = logentry(rcs=scache(rcs), file=scache(filename),
237 revision=tuple([int(x) for x in match.group(1).split('.')]),
276 revision=tuple([int(x) for x in match.group(1).split('.')]),
238 branches=[], parent=None)
277 branches=[], parent=None)
239 state = 6
278 state = 6
240
279
241 elif state == 6:
280 elif state == 6:
242 # expecting date, author, state, lines changed
281 # expecting date, author, state, lines changed
243 match = re_60.match(line)
282 match = re_60.match(line)
244 assert match, _('revision must be followed by date line')
283 assert match, _('revision must be followed by date line')
245 d = match.group(1)
284 d = match.group(1)
246 if d[2] == '/':
285 if d[2] == '/':
247 # Y2K
286 # Y2K
248 d = '19' + d
287 d = '19' + d
249
288
250 if len(d.split()) != 3:
289 if len(d.split()) != 3:
251 # cvs log dates always in GMT
290 # cvs log dates always in GMT
252 d = d + ' UTC'
291 d = d + ' UTC'
253 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
292 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S'])
254 e.author = scache(match.group(2))
293 e.author = scache(match.group(2))
255 e.dead = match.group(3).lower() == 'dead'
294 e.dead = match.group(3).lower() == 'dead'
256
295
257 if match.group(5):
296 if match.group(5):
258 if match.group(6):
297 if match.group(6):
259 e.lines = (int(match.group(5)), int(match.group(6)))
298 e.lines = (int(match.group(5)), int(match.group(6)))
260 else:
299 else:
261 e.lines = (int(match.group(5)), 0)
300 e.lines = (int(match.group(5)), 0)
262 elif match.group(6):
301 elif match.group(6):
263 e.lines = (0, int(match.group(6)))
302 e.lines = (0, int(match.group(6)))
264 else:
303 else:
265 e.lines = None
304 e.lines = None
266 e.comment = []
305 e.comment = []
267 state = 7
306 state = 7
268
307
269 elif state == 7:
308 elif state == 7:
270 # read the revision numbers of branches that start at this revision
309 # read the revision numbers of branches that start at this revision
271 # or store the commit log message otherwise
310 # or store the commit log message otherwise
272 m = re_70.match(line)
311 m = re_70.match(line)
273 if m:
312 if m:
274 e.branches = [tuple([int(y) for y in x.strip().split('.')])
313 e.branches = [tuple([int(y) for y in x.strip().split('.')])
275 for x in m.group(1).split(';')]
314 for x in m.group(1).split(';')]
276 state = 8
315 state = 8
277 elif re_31.match(line):
316 elif re_31.match(line):
278 state = 5
317 state = 5
279 store = True
318 store = True
280 elif re_32.match(line):
319 elif re_32.match(line):
281 state = 0
320 state = 0
282 store = True
321 store = True
283 else:
322 else:
284 e.comment.append(line)
323 e.comment.append(line)
285
324
286 elif state == 8:
325 elif state == 8:
287 # store commit log message
326 # store commit log message
288 if re_31.match(line):
327 if re_31.match(line):
289 state = 5
328 state = 5
290 store = True
329 store = True
291 elif re_32.match(line):
330 elif re_32.match(line):
292 state = 0
331 state = 0
293 store = True
332 store = True
294 else:
333 else:
295 e.comment.append(line)
334 e.comment.append(line)
296
335
297 if store:
336 if store:
298 # clean up the results and save in the log.
337 # clean up the results and save in the log.
299 store = False
338 store = False
300 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
339 e.tags = util.sort([scache(x) for x in tags.get(e.revision, [])])
301 e.comment = scache('\n'.join(e.comment))
340 e.comment = scache('\n'.join(e.comment))
302
341
303 revn = len(e.revision)
342 revn = len(e.revision)
304 if revn > 3 and (revn % 2) == 0:
343 if revn > 3 and (revn % 2) == 0:
305 e.branch = tags.get(e.revision[:-1], [None])[0]
344 e.branch = tags.get(e.revision[:-1], [None])[0]
306 else:
345 else:
307 e.branch = None
346 e.branch = None
308
347
309 log.append(e)
348 log.append(e)
310
349
311 if len(log) % 100 == 0:
350 if len(log) % 100 == 0:
312 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
351 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
313
352
314 listsort(log, key=lambda x:(x.rcs, x.revision))
353 listsort(log, key=lambda x:(x.rcs, x.revision))
315
354
316 # find parent revisions of individual files
355 # find parent revisions of individual files
317 versions = {}
356 versions = {}
318 for e in log:
357 for e in log:
319 branch = e.revision[:-1]
358 branch = e.revision[:-1]
320 p = versions.get((e.rcs, branch), None)
359 p = versions.get((e.rcs, branch), None)
321 if p is None:
360 if p is None:
322 p = e.revision[:-2]
361 p = e.revision[:-2]
323 e.parent = p
362 e.parent = p
324 versions[(e.rcs, branch)] = e.revision
363 versions[(e.rcs, branch)] = e.revision
325
364
326 # update the log cache
365 # update the log cache
327 if cache:
366 if cache:
328 if log:
367 if log:
329 # join up the old and new logs
368 # join up the old and new logs
330 listsort(log, key=lambda x:x.date)
369 listsort(log, key=lambda x:x.date)
331
370
332 if oldlog and oldlog[-1].date >= log[0].date:
371 if oldlog and oldlog[-1].date >= log[0].date:
333 raise logerror('Log cache overlaps with new log entries,'
372 raise logerror('Log cache overlaps with new log entries,'
334 ' re-run without cache.')
373 ' re-run without cache.')
335
374
336 log = oldlog + log
375 log = oldlog + log
337
376
338 # write the new cachefile
377 # write the new cachefile
339 ui.note(_('writing cvs log cache %s\n') % cachefile)
378 ui.note(_('writing cvs log cache %s\n') % cachefile)
340 pickle.dump(log, file(cachefile, 'w'))
379 pickle.dump(log, file(cachefile, 'w'))
341 else:
380 else:
342 log = oldlog
381 log = oldlog
343
382
344 ui.status(_('%d log entries\n') % len(log))
383 ui.status(_('%d log entries\n') % len(log))
345
384
346 return log
385 return log
347
386
348
387
349 class changeset(object):
388 class changeset(object):
350 '''Class changeset has the following attributes:
389 '''Class changeset has the following attributes:
351 .author - author name as CVS knows it
390 .author - author name as CVS knows it
352 .branch - name of branch this changeset is on, or None
391 .branch - name of branch this changeset is on, or None
353 .comment - commit message
392 .comment - commit message
354 .date - the commit date as a (time,tz) tuple
393 .date - the commit date as a (time,tz) tuple
355 .entries - list of logentry objects in this changeset
394 .entries - list of logentry objects in this changeset
356 .parents - list of one or two parent changesets
395 .parents - list of one or two parent changesets
357 .tags - list of tags on this changeset
396 .tags - list of tags on this changeset
358 '''
397 '''
359 def __init__(self, **entries):
398 def __init__(self, **entries):
360 self.__dict__.update(entries)
399 self.__dict__.update(entries)
361
400
362 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
401 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
363 '''Convert log into changesets.'''
402 '''Convert log into changesets.'''
364
403
365 ui.status(_('creating changesets\n'))
404 ui.status(_('creating changesets\n'))
366
405
367 # Merge changesets
406 # Merge changesets
368
407
369 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
408 listsort(log, key=lambda x:(x.comment, x.author, x.branch, x.date))
370
409
371 changesets = []
410 changesets = []
372 files = {}
411 files = {}
373 c = None
412 c = None
374 for i, e in enumerate(log):
413 for i, e in enumerate(log):
375
414
376 # Check if log entry belongs to the current changeset or not.
415 # Check if log entry belongs to the current changeset or not.
377 if not (c and
416 if not (c and
378 e.comment == c.comment and
417 e.comment == c.comment and
379 e.author == c.author and
418 e.author == c.author and
380 e.branch == c.branch and
419 e.branch == c.branch and
381 ((c.date[0] + c.date[1]) <=
420 ((c.date[0] + c.date[1]) <=
382 (e.date[0] + e.date[1]) <=
421 (e.date[0] + e.date[1]) <=
383 (c.date[0] + c.date[1]) + fuzz) and
422 (c.date[0] + c.date[1]) + fuzz) and
384 e.file not in files):
423 e.file not in files):
385 c = changeset(comment=e.comment, author=e.author,
424 c = changeset(comment=e.comment, author=e.author,
386 branch=e.branch, date=e.date, entries=[])
425 branch=e.branch, date=e.date, entries=[])
387 changesets.append(c)
426 changesets.append(c)
388 files = {}
427 files = {}
389 if len(changesets) % 100 == 0:
428 if len(changesets) % 100 == 0:
390 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
429 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
391 ui.status(util.ellipsis(t, 80) + '\n')
430 ui.status(util.ellipsis(t, 80) + '\n')
392
431
393 c.entries.append(e)
432 c.entries.append(e)
394 files[e.file] = True
433 files[e.file] = True
395 c.date = e.date # changeset date is date of latest commit in it
434 c.date = e.date # changeset date is date of latest commit in it
396
435
397 # Sort files in each changeset
436 # Sort files in each changeset
398
437
399 for c in changesets:
438 for c in changesets:
400 def pathcompare(l, r):
439 def pathcompare(l, r):
401 'Mimic cvsps sorting order'
440 'Mimic cvsps sorting order'
402 l = l.split('/')
441 l = l.split('/')
403 r = r.split('/')
442 r = r.split('/')
404 nl = len(l)
443 nl = len(l)
405 nr = len(r)
444 nr = len(r)
406 n = min(nl, nr)
445 n = min(nl, nr)
407 for i in range(n):
446 for i in range(n):
408 if i + 1 == nl and nl < nr:
447 if i + 1 == nl and nl < nr:
409 return -1
448 return -1
410 elif i + 1 == nr and nl > nr:
449 elif i + 1 == nr and nl > nr:
411 return +1
450 return +1
412 elif l[i] < r[i]:
451 elif l[i] < r[i]:
413 return -1
452 return -1
414 elif l[i] > r[i]:
453 elif l[i] > r[i]:
415 return +1
454 return +1
416 return 0
455 return 0
417 def entitycompare(l, r):
456 def entitycompare(l, r):
418 return pathcompare(l.file, r.file)
457 return pathcompare(l.file, r.file)
419
458
420 c.entries.sort(entitycompare)
459 c.entries.sort(entitycompare)
421
460
422 # Sort changesets by date
461 # Sort changesets by date
423
462
424 def cscmp(l, r):
463 def cscmp(l, r):
425 d = sum(l.date) - sum(r.date)
464 d = sum(l.date) - sum(r.date)
426 if d:
465 if d:
427 return d
466 return d
428
467
429 # detect vendor branches and initial commits on a branch
468 # detect vendor branches and initial commits on a branch
430 le = {}
469 le = {}
431 for e in l.entries:
470 for e in l.entries:
432 le[e.rcs] = e.revision
471 le[e.rcs] = e.revision
433 re = {}
472 re = {}
434 for e in r.entries:
473 for e in r.entries:
435 re[e.rcs] = e.revision
474 re[e.rcs] = e.revision
436
475
437 d = 0
476 d = 0
438 for e in l.entries:
477 for e in l.entries:
439 if re.get(e.rcs, None) == e.parent:
478 if re.get(e.rcs, None) == e.parent:
440 assert not d
479 assert not d
441 d = 1
480 d = 1
442 break
481 break
443
482
444 for e in r.entries:
483 for e in r.entries:
445 if le.get(e.rcs, None) == e.parent:
484 if le.get(e.rcs, None) == e.parent:
446 assert not d
485 assert not d
447 d = -1
486 d = -1
448 break
487 break
449
488
450 return d
489 return d
451
490
452 changesets.sort(cscmp)
491 changesets.sort(cscmp)
453
492
454 # Collect tags
493 # Collect tags
455
494
456 globaltags = {}
495 globaltags = {}
457 for c in changesets:
496 for c in changesets:
458 tags = {}
497 tags = {}
459 for e in c.entries:
498 for e in c.entries:
460 for tag in e.tags:
499 for tag in e.tags:
461 # remember which is the latest changeset to have this tag
500 # remember which is the latest changeset to have this tag
462 globaltags[tag] = c
501 globaltags[tag] = c
463
502
464 for c in changesets:
503 for c in changesets:
465 tags = {}
504 tags = {}
466 for e in c.entries:
505 for e in c.entries:
467 for tag in e.tags:
506 for tag in e.tags:
468 tags[tag] = True
507 tags[tag] = True
469 # remember tags only if this is the latest changeset to have it
508 # remember tags only if this is the latest changeset to have it
470 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
509 c.tags = util.sort([tag for tag in tags if globaltags[tag] is c])
471
510
472 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
511 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
473 # by inserting dummy changesets with two parents, and handle
512 # by inserting dummy changesets with two parents, and handle
474 # {{mergefrombranch BRANCHNAME}} by setting two parents.
513 # {{mergefrombranch BRANCHNAME}} by setting two parents.
475
514
476 if mergeto is None:
515 if mergeto is None:
477 mergeto = r'{{mergetobranch ([-\w]+)}}'
516 mergeto = r'{{mergetobranch ([-\w]+)}}'
478 if mergeto:
517 if mergeto:
479 mergeto = re.compile(mergeto)
518 mergeto = re.compile(mergeto)
480
519
481 if mergefrom is None:
520 if mergefrom is None:
482 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
521 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
483 if mergefrom:
522 if mergefrom:
484 mergefrom = re.compile(mergefrom)
523 mergefrom = re.compile(mergefrom)
485
524
486 versions = {} # changeset index where we saw any particular file version
525 versions = {} # changeset index where we saw any particular file version
487 branches = {} # changeset index where we saw a branch
526 branches = {} # changeset index where we saw a branch
488 n = len(changesets)
527 n = len(changesets)
489 i = 0
528 i = 0
490 while i<n:
529 while i<n:
491 c = changesets[i]
530 c = changesets[i]
492
531
493 for f in c.entries:
532 for f in c.entries:
494 versions[(f.rcs, f.revision)] = i
533 versions[(f.rcs, f.revision)] = i
495
534
496 p = None
535 p = None
497 if c.branch in branches:
536 if c.branch in branches:
498 p = branches[c.branch]
537 p = branches[c.branch]
499 else:
538 else:
500 for f in c.entries:
539 for f in c.entries:
501 p = max(p, versions.get((f.rcs, f.parent), None))
540 p = max(p, versions.get((f.rcs, f.parent), None))
502
541
503 c.parents = []
542 c.parents = []
504 if p is not None:
543 if p is not None:
505 c.parents.append(changesets[p])
544 c.parents.append(changesets[p])
506
545
507 if mergefrom:
546 if mergefrom:
508 m = mergefrom.search(c.comment)
547 m = mergefrom.search(c.comment)
509 if m:
548 if m:
510 m = m.group(1)
549 m = m.group(1)
511 if m == 'HEAD':
550 if m == 'HEAD':
512 m = None
551 m = None
513 if m in branches and c.branch != m:
552 if m in branches and c.branch != m:
514 c.parents.append(changesets[branches[m]])
553 c.parents.append(changesets[branches[m]])
515
554
516 if mergeto:
555 if mergeto:
517 m = mergeto.search(c.comment)
556 m = mergeto.search(c.comment)
518 if m:
557 if m:
519 try:
558 try:
520 m = m.group(1)
559 m = m.group(1)
521 if m == 'HEAD':
560 if m == 'HEAD':
522 m = None
561 m = None
523 except:
562 except:
524 m = None # if no group found then merge to HEAD
563 m = None # if no group found then merge to HEAD
525 if m in branches and c.branch != m:
564 if m in branches and c.branch != m:
526 # insert empty changeset for merge
565 # insert empty changeset for merge
527 cc = changeset(author=c.author, branch=m, date=c.date,
566 cc = changeset(author=c.author, branch=m, date=c.date,
528 comment='convert-repo: CVS merge from branch %s' % c.branch,
567 comment='convert-repo: CVS merge from branch %s' % c.branch,
529 entries=[], tags=[], parents=[changesets[branches[m]], c])
568 entries=[], tags=[], parents=[changesets[branches[m]], c])
530 changesets.insert(i + 1, cc)
569 changesets.insert(i + 1, cc)
531 branches[m] = i + 1
570 branches[m] = i + 1
532
571
533 # adjust our loop counters now we have inserted a new entry
572 # adjust our loop counters now we have inserted a new entry
534 n += 1
573 n += 1
535 i += 2
574 i += 2
536 continue
575 continue
537
576
538 branches[c.branch] = i
577 branches[c.branch] = i
539 i += 1
578 i += 1
540
579
541 # Number changesets
580 # Number changesets
542
581
543 for i, c in enumerate(changesets):
582 for i, c in enumerate(changesets):
544 c.id = i + 1
583 c.id = i + 1
545
584
546 ui.status(_('%d changeset entries\n') % len(changesets))
585 ui.status(_('%d changeset entries\n') % len(changesets))
547
586
548 return changesets
587 return changesets
@@ -1,16 +1,19 b''
1 # this is hack to make sure no escape characters are inserted into the output
1 # this is hack to make sure no escape characters are inserted into the output
2 import os;
2 import os;
3 if 'TERM' in os.environ:
3 if 'TERM' in os.environ:
4 del os.environ['TERM']
4 del os.environ['TERM']
5 import doctest
5 import doctest
6
6
7 import mercurial.changelog
7 import mercurial.changelog
8 # test doctest from changelog
8 # test doctest from changelog
9
9
10 doctest.testmod(mercurial.changelog)
10 doctest.testmod(mercurial.changelog)
11
11
12 import mercurial.httprepo
12 import mercurial.httprepo
13 doctest.testmod(mercurial.httprepo)
13 doctest.testmod(mercurial.httprepo)
14
14
15 import mercurial.util
15 import mercurial.util
16 doctest.testmod(mercurial.util)
16 doctest.testmod(mercurial.util)
17
18 import hgext.convert.cvsps
19 doctest.testmod(hgext.convert.cvsps)
General Comments 0
You need to be logged in to leave comments. Login now