##// END OF EJS Templates
cvsps: fix computation of parent revisions when log caching is on...
Emanuele Giaquinta -
r26593:c60dfcc0 default
parent child Browse files
Show More
@@ -1,904 +1,914 b''
1 # Mercurial built-in replacement for cvsps.
1 # Mercurial built-in replacement for cvsps.
2 #
2 #
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 import re
9 import re
10 import cPickle as pickle
10 import cPickle as pickle
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 from mercurial import hook
12 from mercurial import hook
13 from mercurial import util
13 from mercurial import util
14
14
15 class logentry(object):
15 class logentry(object):
16 '''Class logentry has the following attributes:
16 '''Class logentry has the following attributes:
17 .author - author name as CVS knows it
17 .author - author name as CVS knows it
18 .branch - name of branch this revision is on
18 .branch - name of branch this revision is on
19 .branches - revision tuple of branches starting at this revision
19 .branches - revision tuple of branches starting at this revision
20 .comment - commit message
20 .comment - commit message
21 .commitid - CVS commitid or None
21 .commitid - CVS commitid or None
22 .date - the commit date as a (time, tz) tuple
22 .date - the commit date as a (time, tz) tuple
23 .dead - true if file revision is dead
23 .dead - true if file revision is dead
24 .file - Name of file
24 .file - Name of file
25 .lines - a tuple (+lines, -lines) or None
25 .lines - a tuple (+lines, -lines) or None
26 .parent - Previous revision of this entry
26 .parent - Previous revision of this entry
27 .rcs - name of file as returned from CVS
27 .rcs - name of file as returned from CVS
28 .revision - revision number as tuple
28 .revision - revision number as tuple
29 .tags - list of tags on the file
29 .tags - list of tags on the file
30 .synthetic - is this a synthetic "file ... added on ..." revision?
30 .synthetic - is this a synthetic "file ... added on ..." revision?
31 .mergepoint - the branch that has been merged from (if present in
31 .mergepoint - the branch that has been merged from (if present in
32 rlog output) or None
32 rlog output) or None
33 .branchpoints - the branches that start at the current entry or empty
33 .branchpoints - the branches that start at the current entry or empty
34 '''
34 '''
35 def __init__(self, **entries):
35 def __init__(self, **entries):
36 self.synthetic = False
36 self.synthetic = False
37 self.__dict__.update(entries)
37 self.__dict__.update(entries)
38
38
39 def __repr__(self):
39 def __repr__(self):
40 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
40 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
41 return "%s(%s)"%(type(self).__name__, ", ".join(items))
41 return "%s(%s)"%(type(self).__name__, ", ".join(items))
42
42
43 class logerror(Exception):
43 class logerror(Exception):
44 pass
44 pass
45
45
46 def getrepopath(cvspath):
46 def getrepopath(cvspath):
47 """Return the repository path from a CVS path.
47 """Return the repository path from a CVS path.
48
48
49 >>> getrepopath('/foo/bar')
49 >>> getrepopath('/foo/bar')
50 '/foo/bar'
50 '/foo/bar'
51 >>> getrepopath('c:/foo/bar')
51 >>> getrepopath('c:/foo/bar')
52 '/foo/bar'
52 '/foo/bar'
53 >>> getrepopath(':pserver:10/foo/bar')
53 >>> getrepopath(':pserver:10/foo/bar')
54 '/foo/bar'
54 '/foo/bar'
55 >>> getrepopath(':pserver:10c:/foo/bar')
55 >>> getrepopath(':pserver:10c:/foo/bar')
56 '/foo/bar'
56 '/foo/bar'
57 >>> getrepopath(':pserver:/foo/bar')
57 >>> getrepopath(':pserver:/foo/bar')
58 '/foo/bar'
58 '/foo/bar'
59 >>> getrepopath(':pserver:c:/foo/bar')
59 >>> getrepopath(':pserver:c:/foo/bar')
60 '/foo/bar'
60 '/foo/bar'
61 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
61 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
62 '/foo/bar'
62 '/foo/bar'
63 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
63 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
64 '/foo/bar'
64 '/foo/bar'
65 >>> getrepopath('user@server/path/to/repository')
65 >>> getrepopath('user@server/path/to/repository')
66 '/path/to/repository'
66 '/path/to/repository'
67 """
67 """
68 # According to CVS manual, CVS paths are expressed like:
68 # According to CVS manual, CVS paths are expressed like:
69 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
69 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
70 #
70 #
71 # CVSpath is splitted into parts and then position of the first occurrence
71 # CVSpath is splitted into parts and then position of the first occurrence
72 # of the '/' char after the '@' is located. The solution is the rest of the
72 # of the '/' char after the '@' is located. The solution is the rest of the
73 # string after that '/' sign including it
73 # string after that '/' sign including it
74
74
75 parts = cvspath.split(':')
75 parts = cvspath.split(':')
76 atposition = parts[-1].find('@')
76 atposition = parts[-1].find('@')
77 start = 0
77 start = 0
78
78
79 if atposition != -1:
79 if atposition != -1:
80 start = atposition
80 start = atposition
81
81
82 repopath = parts[-1][parts[-1].find('/', start):]
82 repopath = parts[-1][parts[-1].find('/', start):]
83 return repopath
83 return repopath
84
84
85 def createlog(ui, directory=None, root="", rlog=True, cache=None):
85 def createlog(ui, directory=None, root="", rlog=True, cache=None):
86 '''Collect the CVS rlog'''
86 '''Collect the CVS rlog'''
87
87
88 # Because we store many duplicate commit log messages, reusing strings
88 # Because we store many duplicate commit log messages, reusing strings
89 # saves a lot of memory and pickle storage space.
89 # saves a lot of memory and pickle storage space.
90 _scache = {}
90 _scache = {}
91 def scache(s):
91 def scache(s):
92 "return a shared version of a string"
92 "return a shared version of a string"
93 return _scache.setdefault(s, s)
93 return _scache.setdefault(s, s)
94
94
95 ui.status(_('collecting CVS rlog\n'))
95 ui.status(_('collecting CVS rlog\n'))
96
96
97 log = [] # list of logentry objects containing the CVS state
97 log = [] # list of logentry objects containing the CVS state
98
98
99 # patterns to match in CVS (r)log output, by state of use
99 # patterns to match in CVS (r)log output, by state of use
100 re_00 = re.compile('RCS file: (.+)$')
100 re_00 = re.compile('RCS file: (.+)$')
101 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
101 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
102 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
102 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
103 re_03 = re.compile("(Cannot access.+CVSROOT)|"
103 re_03 = re.compile("(Cannot access.+CVSROOT)|"
104 "(can't create temporary directory.+)$")
104 "(can't create temporary directory.+)$")
105 re_10 = re.compile('Working file: (.+)$')
105 re_10 = re.compile('Working file: (.+)$')
106 re_20 = re.compile('symbolic names:')
106 re_20 = re.compile('symbolic names:')
107 re_30 = re.compile('\t(.+): ([\\d.]+)$')
107 re_30 = re.compile('\t(.+): ([\\d.]+)$')
108 re_31 = re.compile('----------------------------$')
108 re_31 = re.compile('----------------------------$')
109 re_32 = re.compile('======================================='
109 re_32 = re.compile('======================================='
110 '======================================$')
110 '======================================$')
111 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
111 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
112 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
112 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
113 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
113 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
114 r'(\s+commitid:\s+([^;]+);)?'
114 r'(\s+commitid:\s+([^;]+);)?'
115 r'(.*mergepoint:\s+([^;]+);)?')
115 r'(.*mergepoint:\s+([^;]+);)?')
116 re_70 = re.compile('branches: (.+);$')
116 re_70 = re.compile('branches: (.+);$')
117
117
118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119
119
120 prefix = '' # leading path to strip of what we get from CVS
120 prefix = '' # leading path to strip of what we get from CVS
121
121
122 if directory is None:
122 if directory is None:
123 # Current working directory
123 # Current working directory
124
124
125 # Get the real directory in the repository
125 # Get the real directory in the repository
126 try:
126 try:
127 prefix = open(os.path.join('CVS','Repository')).read().strip()
127 prefix = open(os.path.join('CVS','Repository')).read().strip()
128 directory = prefix
128 directory = prefix
129 if prefix == ".":
129 if prefix == ".":
130 prefix = ""
130 prefix = ""
131 except IOError:
131 except IOError:
132 raise logerror(_('not a CVS sandbox'))
132 raise logerror(_('not a CVS sandbox'))
133
133
134 if prefix and not prefix.endswith(os.sep):
134 if prefix and not prefix.endswith(os.sep):
135 prefix += os.sep
135 prefix += os.sep
136
136
137 # Use the Root file in the sandbox, if it exists
137 # Use the Root file in the sandbox, if it exists
138 try:
138 try:
139 root = open(os.path.join('CVS','Root')).read().strip()
139 root = open(os.path.join('CVS','Root')).read().strip()
140 except IOError:
140 except IOError:
141 pass
141 pass
142
142
143 if not root:
143 if not root:
144 root = os.environ.get('CVSROOT', '')
144 root = os.environ.get('CVSROOT', '')
145
145
146 # read log cache if one exists
146 # read log cache if one exists
147 oldlog = []
147 oldlog = []
148 date = None
148 date = None
149
149
150 if cache:
150 if cache:
151 cachedir = os.path.expanduser('~/.hg.cvsps')
151 cachedir = os.path.expanduser('~/.hg.cvsps')
152 if not os.path.exists(cachedir):
152 if not os.path.exists(cachedir):
153 os.mkdir(cachedir)
153 os.mkdir(cachedir)
154
154
155 # The cvsps cache pickle needs a uniquified name, based on the
155 # The cvsps cache pickle needs a uniquified name, based on the
156 # repository location. The address may have all sort of nasties
156 # repository location. The address may have all sort of nasties
157 # in it, slashes, colons and such. So here we take just the
157 # in it, slashes, colons and such. So here we take just the
158 # alphanumeric characters, concatenated in a way that does not
158 # alphanumeric characters, concatenated in a way that does not
159 # mix up the various components, so that
159 # mix up the various components, so that
160 # :pserver:user@server:/path
160 # :pserver:user@server:/path
161 # and
161 # and
162 # /pserver/user/server/path
162 # /pserver/user/server/path
163 # are mapped to different cache file names.
163 # are mapped to different cache file names.
164 cachefile = root.split(":") + [directory, "cache"]
164 cachefile = root.split(":") + [directory, "cache"]
165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 cachefile = os.path.join(cachedir,
166 cachefile = os.path.join(cachedir,
167 '.'.join([s for s in cachefile if s]))
167 '.'.join([s for s in cachefile if s]))
168
168
169 if cache == 'update':
169 if cache == 'update':
170 try:
170 try:
171 ui.note(_('reading cvs log cache %s\n') % cachefile)
171 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 oldlog = pickle.load(open(cachefile))
172 oldlog = pickle.load(open(cachefile))
173 for e in oldlog:
173 for e in oldlog:
174 if not (util.safehasattr(e, 'branchpoints') and
174 if not (util.safehasattr(e, 'branchpoints') and
175 util.safehasattr(e, 'commitid') and
175 util.safehasattr(e, 'commitid') and
176 util.safehasattr(e, 'mergepoint')):
176 util.safehasattr(e, 'mergepoint')):
177 ui.status(_('ignoring old cache\n'))
177 ui.status(_('ignoring old cache\n'))
178 oldlog = []
178 oldlog = []
179 break
179 break
180
180
181 ui.note(_('cache has %d log entries\n') % len(oldlog))
181 ui.note(_('cache has %d log entries\n') % len(oldlog))
182 except Exception as e:
182 except Exception as e:
183 ui.note(_('error reading cache: %r\n') % e)
183 ui.note(_('error reading cache: %r\n') % e)
184
184
185 if oldlog:
185 if oldlog:
186 date = oldlog[-1].date # last commit date as a (time,tz) tuple
186 date = oldlog[-1].date # last commit date as a (time,tz) tuple
187 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
187 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
188
188
189 # build the CVS commandline
189 # build the CVS commandline
190 cmd = ['cvs', '-q']
190 cmd = ['cvs', '-q']
191 if root:
191 if root:
192 cmd.append('-d%s' % root)
192 cmd.append('-d%s' % root)
193 p = util.normpath(getrepopath(root))
193 p = util.normpath(getrepopath(root))
194 if not p.endswith('/'):
194 if not p.endswith('/'):
195 p += '/'
195 p += '/'
196 if prefix:
196 if prefix:
197 # looks like normpath replaces "" by "."
197 # looks like normpath replaces "" by "."
198 prefix = p + util.normpath(prefix)
198 prefix = p + util.normpath(prefix)
199 else:
199 else:
200 prefix = p
200 prefix = p
201 cmd.append(['log', 'rlog'][rlog])
201 cmd.append(['log', 'rlog'][rlog])
202 if date:
202 if date:
203 # no space between option and date string
203 # no space between option and date string
204 cmd.append('-d>%s' % date)
204 cmd.append('-d>%s' % date)
205 cmd.append(directory)
205 cmd.append(directory)
206
206
207 # state machine begins here
207 # state machine begins here
208 tags = {} # dictionary of revisions on current file with their tags
208 tags = {} # dictionary of revisions on current file with their tags
209 branchmap = {} # mapping between branch names and revision numbers
209 branchmap = {} # mapping between branch names and revision numbers
210 rcsmap = {}
210 state = 0
211 state = 0
211 store = False # set when a new record can be appended
212 store = False # set when a new record can be appended
212
213
213 cmd = [util.shellquote(arg) for arg in cmd]
214 cmd = [util.shellquote(arg) for arg in cmd]
214 ui.note(_("running %s\n") % (' '.join(cmd)))
215 ui.note(_("running %s\n") % (' '.join(cmd)))
215 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
216 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
216
217
217 pfp = util.popen(' '.join(cmd))
218 pfp = util.popen(' '.join(cmd))
218 peek = pfp.readline()
219 peek = pfp.readline()
219 while True:
220 while True:
220 line = peek
221 line = peek
221 if line == '':
222 if line == '':
222 break
223 break
223 peek = pfp.readline()
224 peek = pfp.readline()
224 if line.endswith('\n'):
225 if line.endswith('\n'):
225 line = line[:-1]
226 line = line[:-1]
226 #ui.debug('state=%d line=%r\n' % (state, line))
227 #ui.debug('state=%d line=%r\n' % (state, line))
227
228
228 if state == 0:
229 if state == 0:
229 # initial state, consume input until we see 'RCS file'
230 # initial state, consume input until we see 'RCS file'
230 match = re_00.match(line)
231 match = re_00.match(line)
231 if match:
232 if match:
232 rcs = match.group(1)
233 rcs = match.group(1)
233 tags = {}
234 tags = {}
234 if rlog:
235 if rlog:
235 filename = util.normpath(rcs[:-2])
236 filename = util.normpath(rcs[:-2])
236 if filename.startswith(prefix):
237 if filename.startswith(prefix):
237 filename = filename[len(prefix):]
238 filename = filename[len(prefix):]
238 if filename.startswith('/'):
239 if filename.startswith('/'):
239 filename = filename[1:]
240 filename = filename[1:]
240 if filename.startswith('Attic/'):
241 if filename.startswith('Attic/'):
241 filename = filename[6:]
242 filename = filename[6:]
242 else:
243 else:
243 filename = filename.replace('/Attic/', '/')
244 filename = filename.replace('/Attic/', '/')
244 state = 2
245 state = 2
245 continue
246 continue
246 state = 1
247 state = 1
247 continue
248 continue
248 match = re_01.match(line)
249 match = re_01.match(line)
249 if match:
250 if match:
250 raise logerror(match.group(1))
251 raise logerror(match.group(1))
251 match = re_02.match(line)
252 match = re_02.match(line)
252 if match:
253 if match:
253 raise logerror(match.group(2))
254 raise logerror(match.group(2))
254 if re_03.match(line):
255 if re_03.match(line):
255 raise logerror(line)
256 raise logerror(line)
256
257
257 elif state == 1:
258 elif state == 1:
258 # expect 'Working file' (only when using log instead of rlog)
259 # expect 'Working file' (only when using log instead of rlog)
259 match = re_10.match(line)
260 match = re_10.match(line)
260 assert match, _('RCS file must be followed by working file')
261 assert match, _('RCS file must be followed by working file')
261 filename = util.normpath(match.group(1))
262 filename = util.normpath(match.group(1))
262 state = 2
263 state = 2
263
264
264 elif state == 2:
265 elif state == 2:
265 # expect 'symbolic names'
266 # expect 'symbolic names'
266 if re_20.match(line):
267 if re_20.match(line):
267 branchmap = {}
268 branchmap = {}
268 state = 3
269 state = 3
269
270
270 elif state == 3:
271 elif state == 3:
271 # read the symbolic names and store as tags
272 # read the symbolic names and store as tags
272 match = re_30.match(line)
273 match = re_30.match(line)
273 if match:
274 if match:
274 rev = [int(x) for x in match.group(2).split('.')]
275 rev = [int(x) for x in match.group(2).split('.')]
275
276
276 # Convert magic branch number to an odd-numbered one
277 # Convert magic branch number to an odd-numbered one
277 revn = len(rev)
278 revn = len(rev)
278 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
279 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
279 rev = rev[:-2] + rev[-1:]
280 rev = rev[:-2] + rev[-1:]
280 rev = tuple(rev)
281 rev = tuple(rev)
281
282
282 if rev not in tags:
283 if rev not in tags:
283 tags[rev] = []
284 tags[rev] = []
284 tags[rev].append(match.group(1))
285 tags[rev].append(match.group(1))
285 branchmap[match.group(1)] = match.group(2)
286 branchmap[match.group(1)] = match.group(2)
286
287
287 elif re_31.match(line):
288 elif re_31.match(line):
288 state = 5
289 state = 5
289 elif re_32.match(line):
290 elif re_32.match(line):
290 state = 0
291 state = 0
291
292
292 elif state == 4:
293 elif state == 4:
293 # expecting '------' separator before first revision
294 # expecting '------' separator before first revision
294 if re_31.match(line):
295 if re_31.match(line):
295 state = 5
296 state = 5
296 else:
297 else:
297 assert not re_32.match(line), _('must have at least '
298 assert not re_32.match(line), _('must have at least '
298 'some revisions')
299 'some revisions')
299
300
300 elif state == 5:
301 elif state == 5:
301 # expecting revision number and possibly (ignored) lock indication
302 # expecting revision number and possibly (ignored) lock indication
302 # we create the logentry here from values stored in states 0 to 4,
303 # we create the logentry here from values stored in states 0 to 4,
303 # as this state is re-entered for subsequent revisions of a file.
304 # as this state is re-entered for subsequent revisions of a file.
304 match = re_50.match(line)
305 match = re_50.match(line)
305 assert match, _('expected revision number')
306 assert match, _('expected revision number')
306 e = logentry(rcs=scache(rcs),
307 e = logentry(rcs=scache(rcs),
307 file=scache(filename),
308 file=scache(filename),
308 revision=tuple([int(x) for x in
309 revision=tuple([int(x) for x in
309 match.group(1).split('.')]),
310 match.group(1).split('.')]),
310 branches=[],
311 branches=[],
311 parent=None,
312 parent=None,
312 commitid=None,
313 commitid=None,
313 mergepoint=None,
314 mergepoint=None,
314 branchpoints=set())
315 branchpoints=set())
315
316
316 state = 6
317 state = 6
317
318
318 elif state == 6:
319 elif state == 6:
319 # expecting date, author, state, lines changed
320 # expecting date, author, state, lines changed
320 match = re_60.match(line)
321 match = re_60.match(line)
321 assert match, _('revision must be followed by date line')
322 assert match, _('revision must be followed by date line')
322 d = match.group(1)
323 d = match.group(1)
323 if d[2] == '/':
324 if d[2] == '/':
324 # Y2K
325 # Y2K
325 d = '19' + d
326 d = '19' + d
326
327
327 if len(d.split()) != 3:
328 if len(d.split()) != 3:
328 # cvs log dates always in GMT
329 # cvs log dates always in GMT
329 d = d + ' UTC'
330 d = d + ' UTC'
330 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
331 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
331 '%Y/%m/%d %H:%M:%S',
332 '%Y/%m/%d %H:%M:%S',
332 '%Y-%m-%d %H:%M:%S'])
333 '%Y-%m-%d %H:%M:%S'])
333 e.author = scache(match.group(2))
334 e.author = scache(match.group(2))
334 e.dead = match.group(3).lower() == 'dead'
335 e.dead = match.group(3).lower() == 'dead'
335
336
336 if match.group(5):
337 if match.group(5):
337 if match.group(6):
338 if match.group(6):
338 e.lines = (int(match.group(5)), int(match.group(6)))
339 e.lines = (int(match.group(5)), int(match.group(6)))
339 else:
340 else:
340 e.lines = (int(match.group(5)), 0)
341 e.lines = (int(match.group(5)), 0)
341 elif match.group(6):
342 elif match.group(6):
342 e.lines = (0, int(match.group(6)))
343 e.lines = (0, int(match.group(6)))
343 else:
344 else:
344 e.lines = None
345 e.lines = None
345
346
346 if match.group(7): # cvs 1.12 commitid
347 if match.group(7): # cvs 1.12 commitid
347 e.commitid = match.group(8)
348 e.commitid = match.group(8)
348
349
349 if match.group(9): # cvsnt mergepoint
350 if match.group(9): # cvsnt mergepoint
350 myrev = match.group(10).split('.')
351 myrev = match.group(10).split('.')
351 if len(myrev) == 2: # head
352 if len(myrev) == 2: # head
352 e.mergepoint = 'HEAD'
353 e.mergepoint = 'HEAD'
353 else:
354 else:
354 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
355 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
355 branches = [b for b in branchmap if branchmap[b] == myrev]
356 branches = [b for b in branchmap if branchmap[b] == myrev]
356 assert len(branches) == 1, ('unknown branch: %s'
357 assert len(branches) == 1, ('unknown branch: %s'
357 % e.mergepoint)
358 % e.mergepoint)
358 e.mergepoint = branches[0]
359 e.mergepoint = branches[0]
359
360
360 e.comment = []
361 e.comment = []
361 state = 7
362 state = 7
362
363
363 elif state == 7:
364 elif state == 7:
364 # read the revision numbers of branches that start at this revision
365 # read the revision numbers of branches that start at this revision
365 # or store the commit log message otherwise
366 # or store the commit log message otherwise
366 m = re_70.match(line)
367 m = re_70.match(line)
367 if m:
368 if m:
368 e.branches = [tuple([int(y) for y in x.strip().split('.')])
369 e.branches = [tuple([int(y) for y in x.strip().split('.')])
369 for x in m.group(1).split(';')]
370 for x in m.group(1).split(';')]
370 state = 8
371 state = 8
371 elif re_31.match(line) and re_50.match(peek):
372 elif re_31.match(line) and re_50.match(peek):
372 state = 5
373 state = 5
373 store = True
374 store = True
374 elif re_32.match(line):
375 elif re_32.match(line):
375 state = 0
376 state = 0
376 store = True
377 store = True
377 else:
378 else:
378 e.comment.append(line)
379 e.comment.append(line)
379
380
380 elif state == 8:
381 elif state == 8:
381 # store commit log message
382 # store commit log message
382 if re_31.match(line):
383 if re_31.match(line):
383 cpeek = peek
384 cpeek = peek
384 if cpeek.endswith('\n'):
385 if cpeek.endswith('\n'):
385 cpeek = cpeek[:-1]
386 cpeek = cpeek[:-1]
386 if re_50.match(cpeek):
387 if re_50.match(cpeek):
387 state = 5
388 state = 5
388 store = True
389 store = True
389 else:
390 else:
390 e.comment.append(line)
391 e.comment.append(line)
391 elif re_32.match(line):
392 elif re_32.match(line):
392 state = 0
393 state = 0
393 store = True
394 store = True
394 else:
395 else:
395 e.comment.append(line)
396 e.comment.append(line)
396
397
397 # When a file is added on a branch B1, CVS creates a synthetic
398 # When a file is added on a branch B1, CVS creates a synthetic
398 # dead trunk revision 1.1 so that the branch has a root.
399 # dead trunk revision 1.1 so that the branch has a root.
399 # Likewise, if you merge such a file to a later branch B2 (one
400 # Likewise, if you merge such a file to a later branch B2 (one
400 # that already existed when the file was added on B1), CVS
401 # that already existed when the file was added on B1), CVS
401 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
402 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
402 # these revisions now, but mark them synthetic so
403 # these revisions now, but mark them synthetic so
403 # createchangeset() can take care of them.
404 # createchangeset() can take care of them.
404 if (store and
405 if (store and
405 e.dead and
406 e.dead and
406 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
407 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
407 len(e.comment) == 1 and
408 len(e.comment) == 1 and
408 file_added_re.match(e.comment[0])):
409 file_added_re.match(e.comment[0])):
409 ui.debug('found synthetic revision in %s: %r\n'
410 ui.debug('found synthetic revision in %s: %r\n'
410 % (e.rcs, e.comment[0]))
411 % (e.rcs, e.comment[0]))
411 e.synthetic = True
412 e.synthetic = True
412
413
413 if store:
414 if store:
414 # clean up the results and save in the log.
415 # clean up the results and save in the log.
415 store = False
416 store = False
416 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
417 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
417 e.comment = scache('\n'.join(e.comment))
418 e.comment = scache('\n'.join(e.comment))
418
419
419 revn = len(e.revision)
420 revn = len(e.revision)
420 if revn > 3 and (revn % 2) == 0:
421 if revn > 3 and (revn % 2) == 0:
421 e.branch = tags.get(e.revision[:-1], [None])[0]
422 e.branch = tags.get(e.revision[:-1], [None])[0]
422 else:
423 else:
423 e.branch = None
424 e.branch = None
424
425
425 # find the branches starting from this revision
426 # find the branches starting from this revision
426 branchpoints = set()
427 branchpoints = set()
427 for branch, revision in branchmap.iteritems():
428 for branch, revision in branchmap.iteritems():
428 revparts = tuple([int(i) for i in revision.split('.')])
429 revparts = tuple([int(i) for i in revision.split('.')])
429 if len(revparts) < 2: # bad tags
430 if len(revparts) < 2: # bad tags
430 continue
431 continue
431 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
432 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
432 # normal branch
433 # normal branch
433 if revparts[:-2] == e.revision:
434 if revparts[:-2] == e.revision:
434 branchpoints.add(branch)
435 branchpoints.add(branch)
435 elif revparts == (1, 1, 1): # vendor branch
436 elif revparts == (1, 1, 1): # vendor branch
436 if revparts in e.branches:
437 if revparts in e.branches:
437 branchpoints.add(branch)
438 branchpoints.add(branch)
438 e.branchpoints = branchpoints
439 e.branchpoints = branchpoints
439
440
440 log.append(e)
441 log.append(e)
441
442
443 rcsmap[e.rcs.replace('/Attic/', '/')] = e.rcs
444
442 if len(log) % 100 == 0:
445 if len(log) % 100 == 0:
443 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
446 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
444
447
445 log.sort(key=lambda x: (x.rcs, x.revision))
448 log.sort(key=lambda x: (x.rcs, x.revision))
446
449
447 # find parent revisions of individual files
450 # find parent revisions of individual files
448 versions = {}
451 versions = {}
452 for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)):
453 rcs = e.rcs.replace('/Attic/', '/')
454 if rcs in rcsmap:
455 e.rcs = rcsmap[rcs]
456 branch = e.revision[:-1]
457 versions[(e.rcs, branch)] = e.revision
458
449 for e in log:
459 for e in log:
450 branch = e.revision[:-1]
460 branch = e.revision[:-1]
451 p = versions.get((e.rcs, branch), None)
461 p = versions.get((e.rcs, branch), None)
452 if p is None:
462 if p is None:
453 p = e.revision[:-2]
463 p = e.revision[:-2]
454 e.parent = p
464 e.parent = p
455 versions[(e.rcs, branch)] = e.revision
465 versions[(e.rcs, branch)] = e.revision
456
466
457 # update the log cache
467 # update the log cache
458 if cache:
468 if cache:
459 if log:
469 if log:
460 # join up the old and new logs
470 # join up the old and new logs
461 log.sort(key=lambda x: x.date)
471 log.sort(key=lambda x: x.date)
462
472
463 if oldlog and oldlog[-1].date >= log[0].date:
473 if oldlog and oldlog[-1].date >= log[0].date:
464 raise logerror(_('log cache overlaps with new log entries,'
474 raise logerror(_('log cache overlaps with new log entries,'
465 ' re-run without cache.'))
475 ' re-run without cache.'))
466
476
467 log = oldlog + log
477 log = oldlog + log
468
478
469 # write the new cachefile
479 # write the new cachefile
470 ui.note(_('writing cvs log cache %s\n') % cachefile)
480 ui.note(_('writing cvs log cache %s\n') % cachefile)
471 pickle.dump(log, open(cachefile, 'w'))
481 pickle.dump(log, open(cachefile, 'w'))
472 else:
482 else:
473 log = oldlog
483 log = oldlog
474
484
475 ui.status(_('%d log entries\n') % len(log))
485 ui.status(_('%d log entries\n') % len(log))
476
486
477 hook.hook(ui, None, "cvslog", True, log=log)
487 hook.hook(ui, None, "cvslog", True, log=log)
478
488
479 return log
489 return log
480
490
481
491
482 class changeset(object):
492 class changeset(object):
483 '''Class changeset has the following attributes:
493 '''Class changeset has the following attributes:
484 .id - integer identifying this changeset (list index)
494 .id - integer identifying this changeset (list index)
485 .author - author name as CVS knows it
495 .author - author name as CVS knows it
486 .branch - name of branch this changeset is on, or None
496 .branch - name of branch this changeset is on, or None
487 .comment - commit message
497 .comment - commit message
488 .commitid - CVS commitid or None
498 .commitid - CVS commitid or None
489 .date - the commit date as a (time,tz) tuple
499 .date - the commit date as a (time,tz) tuple
490 .entries - list of logentry objects in this changeset
500 .entries - list of logentry objects in this changeset
491 .parents - list of one or two parent changesets
501 .parents - list of one or two parent changesets
492 .tags - list of tags on this changeset
502 .tags - list of tags on this changeset
493 .synthetic - from synthetic revision "file ... added on branch ..."
503 .synthetic - from synthetic revision "file ... added on branch ..."
494 .mergepoint- the branch that has been merged from or None
504 .mergepoint- the branch that has been merged from or None
495 .branchpoints- the branches that start at the current entry or empty
505 .branchpoints- the branches that start at the current entry or empty
496 '''
506 '''
497 def __init__(self, **entries):
507 def __init__(self, **entries):
498 self.id = None
508 self.id = None
499 self.synthetic = False
509 self.synthetic = False
500 self.__dict__.update(entries)
510 self.__dict__.update(entries)
501
511
502 def __repr__(self):
512 def __repr__(self):
503 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
513 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
504 return "%s(%s)"%(type(self).__name__, ", ".join(items))
514 return "%s(%s)"%(type(self).__name__, ", ".join(items))
505
515
506 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
516 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
507 '''Convert log into changesets.'''
517 '''Convert log into changesets.'''
508
518
509 ui.status(_('creating changesets\n'))
519 ui.status(_('creating changesets\n'))
510
520
511 # try to order commitids by date
521 # try to order commitids by date
512 mindate = {}
522 mindate = {}
513 for e in log:
523 for e in log:
514 if e.commitid:
524 if e.commitid:
515 mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
525 mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
516
526
517 # Merge changesets
527 # Merge changesets
518 log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
528 log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
519 x.author, x.branch, x.date, x.branchpoints))
529 x.author, x.branch, x.date, x.branchpoints))
520
530
521 changesets = []
531 changesets = []
522 files = set()
532 files = set()
523 c = None
533 c = None
524 for i, e in enumerate(log):
534 for i, e in enumerate(log):
525
535
526 # Check if log entry belongs to the current changeset or not.
536 # Check if log entry belongs to the current changeset or not.
527
537
528 # Since CVS is file-centric, two different file revisions with
538 # Since CVS is file-centric, two different file revisions with
529 # different branchpoints should be treated as belonging to two
539 # different branchpoints should be treated as belonging to two
530 # different changesets (and the ordering is important and not
540 # different changesets (and the ordering is important and not
531 # honoured by cvsps at this point).
541 # honoured by cvsps at this point).
532 #
542 #
533 # Consider the following case:
543 # Consider the following case:
534 # foo 1.1 branchpoints: [MYBRANCH]
544 # foo 1.1 branchpoints: [MYBRANCH]
535 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
545 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
536 #
546 #
537 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
547 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
538 # later version of foo may be in MYBRANCH2, so foo should be the
548 # later version of foo may be in MYBRANCH2, so foo should be the
539 # first changeset and bar the next and MYBRANCH and MYBRANCH2
549 # first changeset and bar the next and MYBRANCH and MYBRANCH2
540 # should both start off of the bar changeset. No provisions are
550 # should both start off of the bar changeset. No provisions are
541 # made to ensure that this is, in fact, what happens.
551 # made to ensure that this is, in fact, what happens.
542 if not (c and e.branchpoints == c.branchpoints and
552 if not (c and e.branchpoints == c.branchpoints and
543 (# cvs commitids
553 (# cvs commitids
544 (e.commitid is not None and e.commitid == c.commitid) or
554 (e.commitid is not None and e.commitid == c.commitid) or
545 (# no commitids, use fuzzy commit detection
555 (# no commitids, use fuzzy commit detection
546 (e.commitid is None or c.commitid is None) and
556 (e.commitid is None or c.commitid is None) and
547 e.comment == c.comment and
557 e.comment == c.comment and
548 e.author == c.author and
558 e.author == c.author and
549 e.branch == c.branch and
559 e.branch == c.branch and
550 ((c.date[0] + c.date[1]) <=
560 ((c.date[0] + c.date[1]) <=
551 (e.date[0] + e.date[1]) <=
561 (e.date[0] + e.date[1]) <=
552 (c.date[0] + c.date[1]) + fuzz) and
562 (c.date[0] + c.date[1]) + fuzz) and
553 e.file not in files))):
563 e.file not in files))):
554 c = changeset(comment=e.comment, author=e.author,
564 c = changeset(comment=e.comment, author=e.author,
555 branch=e.branch, date=e.date,
565 branch=e.branch, date=e.date,
556 entries=[], mergepoint=e.mergepoint,
566 entries=[], mergepoint=e.mergepoint,
557 branchpoints=e.branchpoints, commitid=e.commitid)
567 branchpoints=e.branchpoints, commitid=e.commitid)
558 changesets.append(c)
568 changesets.append(c)
559
569
560 files = set()
570 files = set()
561 if len(changesets) % 100 == 0:
571 if len(changesets) % 100 == 0:
562 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
572 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
563 ui.status(util.ellipsis(t, 80) + '\n')
573 ui.status(util.ellipsis(t, 80) + '\n')
564
574
565 c.entries.append(e)
575 c.entries.append(e)
566 files.add(e.file)
576 files.add(e.file)
567 c.date = e.date # changeset date is date of latest commit in it
577 c.date = e.date # changeset date is date of latest commit in it
568
578
569 # Mark synthetic changesets
579 # Mark synthetic changesets
570
580
571 for c in changesets:
581 for c in changesets:
572 # Synthetic revisions always get their own changeset, because
582 # Synthetic revisions always get their own changeset, because
573 # the log message includes the filename. E.g. if you add file3
583 # the log message includes the filename. E.g. if you add file3
574 # and file4 on a branch, you get four log entries and three
584 # and file4 on a branch, you get four log entries and three
575 # changesets:
585 # changesets:
576 # "File file3 was added on branch ..." (synthetic, 1 entry)
586 # "File file3 was added on branch ..." (synthetic, 1 entry)
577 # "File file4 was added on branch ..." (synthetic, 1 entry)
587 # "File file4 was added on branch ..." (synthetic, 1 entry)
578 # "Add file3 and file4 to fix ..." (real, 2 entries)
588 # "Add file3 and file4 to fix ..." (real, 2 entries)
579 # Hence the check for 1 entry here.
589 # Hence the check for 1 entry here.
580 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
590 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
581
591
582 # Sort files in each changeset
592 # Sort files in each changeset
583
593
584 def entitycompare(l, r):
594 def entitycompare(l, r):
585 'Mimic cvsps sorting order'
595 'Mimic cvsps sorting order'
586 l = l.file.split('/')
596 l = l.file.split('/')
587 r = r.file.split('/')
597 r = r.file.split('/')
588 nl = len(l)
598 nl = len(l)
589 nr = len(r)
599 nr = len(r)
590 n = min(nl, nr)
600 n = min(nl, nr)
591 for i in range(n):
601 for i in range(n):
592 if i + 1 == nl and nl < nr:
602 if i + 1 == nl and nl < nr:
593 return -1
603 return -1
594 elif i + 1 == nr and nl > nr:
604 elif i + 1 == nr and nl > nr:
595 return +1
605 return +1
596 elif l[i] < r[i]:
606 elif l[i] < r[i]:
597 return -1
607 return -1
598 elif l[i] > r[i]:
608 elif l[i] > r[i]:
599 return +1
609 return +1
600 return 0
610 return 0
601
611
602 for c in changesets:
612 for c in changesets:
603 c.entries.sort(entitycompare)
613 c.entries.sort(entitycompare)
604
614
605 # Sort changesets by date
615 # Sort changesets by date
606
616
607 odd = set()
617 odd = set()
608 def cscmp(l, r, odd=odd):
618 def cscmp(l, r, odd=odd):
609 d = sum(l.date) - sum(r.date)
619 d = sum(l.date) - sum(r.date)
610 if d:
620 if d:
611 return d
621 return d
612
622
613 # detect vendor branches and initial commits on a branch
623 # detect vendor branches and initial commits on a branch
614 le = {}
624 le = {}
615 for e in l.entries:
625 for e in l.entries:
616 le[e.rcs] = e.revision
626 le[e.rcs] = e.revision
617 re = {}
627 re = {}
618 for e in r.entries:
628 for e in r.entries:
619 re[e.rcs] = e.revision
629 re[e.rcs] = e.revision
620
630
621 d = 0
631 d = 0
622 for e in l.entries:
632 for e in l.entries:
623 if re.get(e.rcs, None) == e.parent:
633 if re.get(e.rcs, None) == e.parent:
624 assert not d
634 assert not d
625 d = 1
635 d = 1
626 break
636 break
627
637
628 for e in r.entries:
638 for e in r.entries:
629 if le.get(e.rcs, None) == e.parent:
639 if le.get(e.rcs, None) == e.parent:
630 if d:
640 if d:
631 odd.add((l, r))
641 odd.add((l, r))
632 d = -1
642 d = -1
633 break
643 break
634 # By this point, the changesets are sufficiently compared that
644 # By this point, the changesets are sufficiently compared that
635 # we don't really care about ordering. However, this leaves
645 # we don't really care about ordering. However, this leaves
636 # some race conditions in the tests, so we compare on the
646 # some race conditions in the tests, so we compare on the
637 # number of files modified, the files contained in each
647 # number of files modified, the files contained in each
638 # changeset, and the branchpoints in the change to ensure test
648 # changeset, and the branchpoints in the change to ensure test
639 # output remains stable.
649 # output remains stable.
640
650
641 # recommended replacement for cmp from
651 # recommended replacement for cmp from
642 # https://docs.python.org/3.0/whatsnew/3.0.html
652 # https://docs.python.org/3.0/whatsnew/3.0.html
643 c = lambda x, y: (x > y) - (x < y)
653 c = lambda x, y: (x > y) - (x < y)
644 # Sort bigger changes first.
654 # Sort bigger changes first.
645 if not d:
655 if not d:
646 d = c(len(l.entries), len(r.entries))
656 d = c(len(l.entries), len(r.entries))
647 # Try sorting by filename in the change.
657 # Try sorting by filename in the change.
648 if not d:
658 if not d:
649 d = c([e.file for e in l.entries], [e.file for e in r.entries])
659 d = c([e.file for e in l.entries], [e.file for e in r.entries])
650 # Try and put changes without a branch point before ones with
660 # Try and put changes without a branch point before ones with
651 # a branch point.
661 # a branch point.
652 if not d:
662 if not d:
653 d = c(len(l.branchpoints), len(r.branchpoints))
663 d = c(len(l.branchpoints), len(r.branchpoints))
654 return d
664 return d
655
665
656 changesets.sort(cscmp)
666 changesets.sort(cscmp)
657
667
658 # Collect tags
668 # Collect tags
659
669
660 globaltags = {}
670 globaltags = {}
661 for c in changesets:
671 for c in changesets:
662 for e in c.entries:
672 for e in c.entries:
663 for tag in e.tags:
673 for tag in e.tags:
664 # remember which is the latest changeset to have this tag
674 # remember which is the latest changeset to have this tag
665 globaltags[tag] = c
675 globaltags[tag] = c
666
676
667 for c in changesets:
677 for c in changesets:
668 tags = set()
678 tags = set()
669 for e in c.entries:
679 for e in c.entries:
670 tags.update(e.tags)
680 tags.update(e.tags)
671 # remember tags only if this is the latest changeset to have it
681 # remember tags only if this is the latest changeset to have it
672 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
682 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
673
683
674 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
684 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
675 # by inserting dummy changesets with two parents, and handle
685 # by inserting dummy changesets with two parents, and handle
676 # {{mergefrombranch BRANCHNAME}} by setting two parents.
686 # {{mergefrombranch BRANCHNAME}} by setting two parents.
677
687
678 if mergeto is None:
688 if mergeto is None:
679 mergeto = r'{{mergetobranch ([-\w]+)}}'
689 mergeto = r'{{mergetobranch ([-\w]+)}}'
680 if mergeto:
690 if mergeto:
681 mergeto = re.compile(mergeto)
691 mergeto = re.compile(mergeto)
682
692
683 if mergefrom is None:
693 if mergefrom is None:
684 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
694 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
685 if mergefrom:
695 if mergefrom:
686 mergefrom = re.compile(mergefrom)
696 mergefrom = re.compile(mergefrom)
687
697
688 versions = {} # changeset index where we saw any particular file version
698 versions = {} # changeset index where we saw any particular file version
689 branches = {} # changeset index where we saw a branch
699 branches = {} # changeset index where we saw a branch
690 n = len(changesets)
700 n = len(changesets)
691 i = 0
701 i = 0
692 while i < n:
702 while i < n:
693 c = changesets[i]
703 c = changesets[i]
694
704
695 for f in c.entries:
705 for f in c.entries:
696 versions[(f.rcs, f.revision)] = i
706 versions[(f.rcs, f.revision)] = i
697
707
698 p = None
708 p = None
699 if c.branch in branches:
709 if c.branch in branches:
700 p = branches[c.branch]
710 p = branches[c.branch]
701 else:
711 else:
702 # first changeset on a new branch
712 # first changeset on a new branch
703 # the parent is a changeset with the branch in its
713 # the parent is a changeset with the branch in its
704 # branchpoints such that it is the latest possible
714 # branchpoints such that it is the latest possible
705 # commit without any intervening, unrelated commits.
715 # commit without any intervening, unrelated commits.
706
716
707 for candidate in xrange(i):
717 for candidate in xrange(i):
708 if c.branch not in changesets[candidate].branchpoints:
718 if c.branch not in changesets[candidate].branchpoints:
709 if p is not None:
719 if p is not None:
710 break
720 break
711 continue
721 continue
712 p = candidate
722 p = candidate
713
723
714 c.parents = []
724 c.parents = []
715 if p is not None:
725 if p is not None:
716 p = changesets[p]
726 p = changesets[p]
717
727
718 # Ensure no changeset has a synthetic changeset as a parent.
728 # Ensure no changeset has a synthetic changeset as a parent.
719 while p.synthetic:
729 while p.synthetic:
720 assert len(p.parents) <= 1, \
730 assert len(p.parents) <= 1, \
721 _('synthetic changeset cannot have multiple parents')
731 _('synthetic changeset cannot have multiple parents')
722 if p.parents:
732 if p.parents:
723 p = p.parents[0]
733 p = p.parents[0]
724 else:
734 else:
725 p = None
735 p = None
726 break
736 break
727
737
728 if p is not None:
738 if p is not None:
729 c.parents.append(p)
739 c.parents.append(p)
730
740
731 if c.mergepoint:
741 if c.mergepoint:
732 if c.mergepoint == 'HEAD':
742 if c.mergepoint == 'HEAD':
733 c.mergepoint = None
743 c.mergepoint = None
734 c.parents.append(changesets[branches[c.mergepoint]])
744 c.parents.append(changesets[branches[c.mergepoint]])
735
745
736 if mergefrom:
746 if mergefrom:
737 m = mergefrom.search(c.comment)
747 m = mergefrom.search(c.comment)
738 if m:
748 if m:
739 m = m.group(1)
749 m = m.group(1)
740 if m == 'HEAD':
750 if m == 'HEAD':
741 m = None
751 m = None
742 try:
752 try:
743 candidate = changesets[branches[m]]
753 candidate = changesets[branches[m]]
744 except KeyError:
754 except KeyError:
745 ui.warn(_("warning: CVS commit message references "
755 ui.warn(_("warning: CVS commit message references "
746 "non-existent branch %r:\n%s\n")
756 "non-existent branch %r:\n%s\n")
747 % (m, c.comment))
757 % (m, c.comment))
748 if m in branches and c.branch != m and not candidate.synthetic:
758 if m in branches and c.branch != m and not candidate.synthetic:
749 c.parents.append(candidate)
759 c.parents.append(candidate)
750
760
751 if mergeto:
761 if mergeto:
752 m = mergeto.search(c.comment)
762 m = mergeto.search(c.comment)
753 if m:
763 if m:
754 if m.groups():
764 if m.groups():
755 m = m.group(1)
765 m = m.group(1)
756 if m == 'HEAD':
766 if m == 'HEAD':
757 m = None
767 m = None
758 else:
768 else:
759 m = None # if no group found then merge to HEAD
769 m = None # if no group found then merge to HEAD
760 if m in branches and c.branch != m:
770 if m in branches and c.branch != m:
761 # insert empty changeset for merge
771 # insert empty changeset for merge
762 cc = changeset(
772 cc = changeset(
763 author=c.author, branch=m, date=c.date,
773 author=c.author, branch=m, date=c.date,
764 comment='convert-repo: CVS merge from branch %s'
774 comment='convert-repo: CVS merge from branch %s'
765 % c.branch,
775 % c.branch,
766 entries=[], tags=[],
776 entries=[], tags=[],
767 parents=[changesets[branches[m]], c])
777 parents=[changesets[branches[m]], c])
768 changesets.insert(i + 1, cc)
778 changesets.insert(i + 1, cc)
769 branches[m] = i + 1
779 branches[m] = i + 1
770
780
771 # adjust our loop counters now we have inserted a new entry
781 # adjust our loop counters now we have inserted a new entry
772 n += 1
782 n += 1
773 i += 2
783 i += 2
774 continue
784 continue
775
785
776 branches[c.branch] = i
786 branches[c.branch] = i
777 i += 1
787 i += 1
778
788
779 # Drop synthetic changesets (safe now that we have ensured no other
789 # Drop synthetic changesets (safe now that we have ensured no other
780 # changesets can have them as parents).
790 # changesets can have them as parents).
781 i = 0
791 i = 0
782 while i < len(changesets):
792 while i < len(changesets):
783 if changesets[i].synthetic:
793 if changesets[i].synthetic:
784 del changesets[i]
794 del changesets[i]
785 else:
795 else:
786 i += 1
796 i += 1
787
797
788 # Number changesets
798 # Number changesets
789
799
790 for i, c in enumerate(changesets):
800 for i, c in enumerate(changesets):
791 c.id = i + 1
801 c.id = i + 1
792
802
793 if odd:
803 if odd:
794 for l, r in odd:
804 for l, r in odd:
795 if l.id is not None and r.id is not None:
805 if l.id is not None and r.id is not None:
796 ui.warn(_('changeset %d is both before and after %d\n')
806 ui.warn(_('changeset %d is both before and after %d\n')
797 % (l.id, r.id))
807 % (l.id, r.id))
798
808
799 ui.status(_('%d changeset entries\n') % len(changesets))
809 ui.status(_('%d changeset entries\n') % len(changesets))
800
810
801 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
811 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
802
812
803 return changesets
813 return changesets
804
814
805
815
806 def debugcvsps(ui, *args, **opts):
816 def debugcvsps(ui, *args, **opts):
807 '''Read CVS rlog for current directory or named path in
817 '''Read CVS rlog for current directory or named path in
808 repository, and convert the log to changesets based on matching
818 repository, and convert the log to changesets based on matching
809 commit log entries and dates.
819 commit log entries and dates.
810 '''
820 '''
811 if opts["new_cache"]:
821 if opts["new_cache"]:
812 cache = "write"
822 cache = "write"
813 elif opts["update_cache"]:
823 elif opts["update_cache"]:
814 cache = "update"
824 cache = "update"
815 else:
825 else:
816 cache = None
826 cache = None
817
827
818 revisions = opts["revisions"]
828 revisions = opts["revisions"]
819
829
820 try:
830 try:
821 if args:
831 if args:
822 log = []
832 log = []
823 for d in args:
833 for d in args:
824 log += createlog(ui, d, root=opts["root"], cache=cache)
834 log += createlog(ui, d, root=opts["root"], cache=cache)
825 else:
835 else:
826 log = createlog(ui, root=opts["root"], cache=cache)
836 log = createlog(ui, root=opts["root"], cache=cache)
827 except logerror as e:
837 except logerror as e:
828 ui.write("%r\n"%e)
838 ui.write("%r\n"%e)
829 return
839 return
830
840
831 changesets = createchangeset(ui, log, opts["fuzz"])
841 changesets = createchangeset(ui, log, opts["fuzz"])
832 del log
842 del log
833
843
834 # Print changesets (optionally filtered)
844 # Print changesets (optionally filtered)
835
845
836 off = len(revisions)
846 off = len(revisions)
837 branches = {} # latest version number in each branch
847 branches = {} # latest version number in each branch
838 ancestors = {} # parent branch
848 ancestors = {} # parent branch
839 for cs in changesets:
849 for cs in changesets:
840
850
841 if opts["ancestors"]:
851 if opts["ancestors"]:
842 if cs.branch not in branches and cs.parents and cs.parents[0].id:
852 if cs.branch not in branches and cs.parents and cs.parents[0].id:
843 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
853 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
844 cs.parents[0].id)
854 cs.parents[0].id)
845 branches[cs.branch] = cs.id
855 branches[cs.branch] = cs.id
846
856
847 # limit by branches
857 # limit by branches
848 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
858 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
849 continue
859 continue
850
860
851 if not off:
861 if not off:
852 # Note: trailing spaces on several lines here are needed to have
862 # Note: trailing spaces on several lines here are needed to have
853 # bug-for-bug compatibility with cvsps.
863 # bug-for-bug compatibility with cvsps.
854 ui.write('---------------------\n')
864 ui.write('---------------------\n')
855 ui.write(('PatchSet %d \n' % cs.id))
865 ui.write(('PatchSet %d \n' % cs.id))
856 ui.write(('Date: %s\n' % util.datestr(cs.date,
866 ui.write(('Date: %s\n' % util.datestr(cs.date,
857 '%Y/%m/%d %H:%M:%S %1%2')))
867 '%Y/%m/%d %H:%M:%S %1%2')))
858 ui.write(('Author: %s\n' % cs.author))
868 ui.write(('Author: %s\n' % cs.author))
859 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
869 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
860 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
870 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
861 ','.join(cs.tags) or '(none)')))
871 ','.join(cs.tags) or '(none)')))
862 if cs.branchpoints:
872 if cs.branchpoints:
863 ui.write(('Branchpoints: %s \n') %
873 ui.write(('Branchpoints: %s \n') %
864 ', '.join(sorted(cs.branchpoints)))
874 ', '.join(sorted(cs.branchpoints)))
865 if opts["parents"] and cs.parents:
875 if opts["parents"] and cs.parents:
866 if len(cs.parents) > 1:
876 if len(cs.parents) > 1:
867 ui.write(('Parents: %s\n' %
877 ui.write(('Parents: %s\n' %
868 (','.join([str(p.id) for p in cs.parents]))))
878 (','.join([str(p.id) for p in cs.parents]))))
869 else:
879 else:
870 ui.write(('Parent: %d\n' % cs.parents[0].id))
880 ui.write(('Parent: %d\n' % cs.parents[0].id))
871
881
872 if opts["ancestors"]:
882 if opts["ancestors"]:
873 b = cs.branch
883 b = cs.branch
874 r = []
884 r = []
875 while b:
885 while b:
876 b, c = ancestors[b]
886 b, c = ancestors[b]
877 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
887 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
878 if r:
888 if r:
879 ui.write(('Ancestors: %s\n' % (','.join(r))))
889 ui.write(('Ancestors: %s\n' % (','.join(r))))
880
890
881 ui.write(('Log:\n'))
891 ui.write(('Log:\n'))
882 ui.write('%s\n\n' % cs.comment)
892 ui.write('%s\n\n' % cs.comment)
883 ui.write(('Members: \n'))
893 ui.write(('Members: \n'))
884 for f in cs.entries:
894 for f in cs.entries:
885 fn = f.file
895 fn = f.file
886 if fn.startswith(opts["prefix"]):
896 if fn.startswith(opts["prefix"]):
887 fn = fn[len(opts["prefix"]):]
897 fn = fn[len(opts["prefix"]):]
888 ui.write('\t%s:%s->%s%s \n' % (
898 ui.write('\t%s:%s->%s%s \n' % (
889 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
899 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
890 '.'.join([str(x) for x in f.revision]),
900 '.'.join([str(x) for x in f.revision]),
891 ['', '(DEAD)'][f.dead]))
901 ['', '(DEAD)'][f.dead]))
892 ui.write('\n')
902 ui.write('\n')
893
903
894 # have we seen the start tag?
904 # have we seen the start tag?
895 if revisions and off:
905 if revisions and off:
896 if revisions[0] == str(cs.id) or \
906 if revisions[0] == str(cs.id) or \
897 revisions[0] in cs.tags:
907 revisions[0] in cs.tags:
898 off = False
908 off = False
899
909
900 # see if we reached the end tag
910 # see if we reached the end tag
901 if len(revisions) > 1 and not off:
911 if len(revisions) > 1 and not off:
902 if revisions[1] == str(cs.id) or \
912 if revisions[1] == str(cs.id) or \
903 revisions[1] in cs.tags:
913 revisions[1] in cs.tags:
904 break
914 break
@@ -1,470 +1,499 b''
1 #require cvs
1 #require cvs
2
2
3 $ cvscall()
3 $ cvscall()
4 > {
4 > {
5 > cvs -f "$@"
5 > cvs -f "$@"
6 > }
6 > }
7 $ hgcat()
7 $ hgcat()
8 > {
8 > {
9 > hg --cwd src-hg cat -r tip "$1"
9 > hg --cwd src-hg cat -r tip "$1"
10 > }
10 > }
11 $ echo "[extensions]" >> $HGRCPATH
11 $ echo "[extensions]" >> $HGRCPATH
12 $ echo "convert = " >> $HGRCPATH
12 $ echo "convert = " >> $HGRCPATH
13 $ cat > cvshooks.py <<EOF
13 $ cat > cvshooks.py <<EOF
14 > def cvslog(ui,repo,hooktype,log):
14 > def cvslog(ui,repo,hooktype,log):
15 > print "%s hook: %d entries"%(hooktype,len(log))
15 > print "%s hook: %d entries"%(hooktype,len(log))
16 >
16 >
17 > def cvschangesets(ui,repo,hooktype,changesets):
17 > def cvschangesets(ui,repo,hooktype,changesets):
18 > print "%s hook: %d changesets"%(hooktype,len(changesets))
18 > print "%s hook: %d changesets"%(hooktype,len(changesets))
19 > EOF
19 > EOF
20 $ hookpath=`pwd`
20 $ hookpath=`pwd`
21 $ cat <<EOF >> $HGRCPATH
21 $ cat <<EOF >> $HGRCPATH
22 > [hooks]
22 > [hooks]
23 > cvslog = python:$hookpath/cvshooks.py:cvslog
23 > cvslog = python:$hookpath/cvshooks.py:cvslog
24 > cvschangesets = python:$hookpath/cvshooks.py:cvschangesets
24 > cvschangesets = python:$hookpath/cvshooks.py:cvschangesets
25 > EOF
25 > EOF
26
26
27 create cvs repository
27 create cvs repository
28
28
29 $ mkdir cvsrepo
29 $ mkdir cvsrepo
30 $ cd cvsrepo
30 $ cd cvsrepo
31 $ CVSROOT=`pwd`
31 $ CVSROOT=`pwd`
32 $ export CVSROOT
32 $ export CVSROOT
33 $ CVS_OPTIONS=-f
33 $ CVS_OPTIONS=-f
34 $ export CVS_OPTIONS
34 $ export CVS_OPTIONS
35 $ cd ..
35 $ cd ..
36 $ rmdir cvsrepo
36 $ rmdir cvsrepo
37 $ cvscall -q -d "$CVSROOT" init
37 $ cvscall -q -d "$CVSROOT" init
38
38
39 create source directory
39 create source directory
40
40
41 $ mkdir src-temp
41 $ mkdir src-temp
42 $ cd src-temp
42 $ cd src-temp
43 $ echo a > a
43 $ echo a > a
44 $ mkdir b
44 $ mkdir b
45 $ cd b
45 $ cd b
46 $ echo c > c
46 $ echo c > c
47 $ cd ..
47 $ cd ..
48
48
49 import source directory
49 import source directory
50
50
51 $ cvscall -q import -m import src INITIAL start
51 $ cvscall -q import -m import src INITIAL start
52 N src/a
52 N src/a
53 N src/b/c
53 N src/b/c
54
54
55 No conflicts created by this import
55 No conflicts created by this import
56
56
57 $ cd ..
57 $ cd ..
58
58
59 checkout source directory
59 checkout source directory
60
60
61 $ cvscall -q checkout src
61 $ cvscall -q checkout src
62 U src/a
62 U src/a
63 U src/b/c
63 U src/b/c
64
64
65 commit a new revision changing b/c
65 commit a new revision changing b/c
66
66
67 $ cd src
67 $ cd src
68 $ sleep 1
68 $ sleep 1
69 $ echo c >> b/c
69 $ echo c >> b/c
70 $ cvscall -q commit -mci0 . | grep '<--'
70 $ cvscall -q commit -mci0 . | grep '<--'
71 $TESTTMP/cvsrepo/src/b/c,v <-- *c (glob)
71 $TESTTMP/cvsrepo/src/b/c,v <-- *c (glob)
72 $ cd ..
72 $ cd ..
73
73
74 convert fresh repo and also check localtimezone option
74 convert fresh repo and also check localtimezone option
75
75
76 NOTE: This doesn't check all time zones -- it merely determines that
76 NOTE: This doesn't check all time zones -- it merely determines that
77 the configuration option is taking effect.
77 the configuration option is taking effect.
78
78
79 An arbitrary (U.S.) time zone is used here. TZ=US/Hawaii is selected
79 An arbitrary (U.S.) time zone is used here. TZ=US/Hawaii is selected
80 since it does not use DST (unlike other U.S. time zones) and is always
80 since it does not use DST (unlike other U.S. time zones) and is always
81 a fixed difference from UTC.
81 a fixed difference from UTC.
82
82
83 $ TZ=US/Hawaii hg convert --config convert.localtimezone=True src src-hg
83 $ TZ=US/Hawaii hg convert --config convert.localtimezone=True src src-hg
84 initializing destination src-hg repository
84 initializing destination src-hg repository
85 connecting to $TESTTMP/cvsrepo
85 connecting to $TESTTMP/cvsrepo
86 scanning source...
86 scanning source...
87 collecting CVS rlog
87 collecting CVS rlog
88 5 log entries
88 5 log entries
89 cvslog hook: 5 entries
89 cvslog hook: 5 entries
90 creating changesets
90 creating changesets
91 3 changeset entries
91 3 changeset entries
92 cvschangesets hook: 3 changesets
92 cvschangesets hook: 3 changesets
93 sorting...
93 sorting...
94 converting...
94 converting...
95 2 Initial revision
95 2 Initial revision
96 1 ci0
96 1 ci0
97 0 import
97 0 import
98 updating tags
98 updating tags
99 $ hgcat a
99 $ hgcat a
100 a
100 a
101 $ hgcat b/c
101 $ hgcat b/c
102 c
102 c
103 c
103 c
104
104
105 convert fresh repo with --filemap
105 convert fresh repo with --filemap
106
106
107 $ echo include b/c > filemap
107 $ echo include b/c > filemap
108 $ hg convert --filemap filemap src src-filemap
108 $ hg convert --filemap filemap src src-filemap
109 initializing destination src-filemap repository
109 initializing destination src-filemap repository
110 connecting to $TESTTMP/cvsrepo
110 connecting to $TESTTMP/cvsrepo
111 scanning source...
111 scanning source...
112 collecting CVS rlog
112 collecting CVS rlog
113 5 log entries
113 5 log entries
114 cvslog hook: 5 entries
114 cvslog hook: 5 entries
115 creating changesets
115 creating changesets
116 3 changeset entries
116 3 changeset entries
117 cvschangesets hook: 3 changesets
117 cvschangesets hook: 3 changesets
118 sorting...
118 sorting...
119 converting...
119 converting...
120 2 Initial revision
120 2 Initial revision
121 1 ci0
121 1 ci0
122 0 import
122 0 import
123 filtering out empty revision
123 filtering out empty revision
124 repository tip rolled back to revision 1 (undo convert)
124 repository tip rolled back to revision 1 (undo convert)
125 updating tags
125 updating tags
126 $ hgcat b/c
126 $ hgcat b/c
127 c
127 c
128 c
128 c
129 $ hg -R src-filemap log --template '{rev} {desc} files: {files}\n'
129 $ hg -R src-filemap log --template '{rev} {desc} files: {files}\n'
130 2 update tags files: .hgtags
130 2 update tags files: .hgtags
131 1 ci0 files: b/c
131 1 ci0 files: b/c
132 0 Initial revision files: b/c
132 0 Initial revision files: b/c
133
133
134 convert full repository (issue1649)
134 convert full repository (issue1649)
135
135
136 $ cvscall -q -d "$CVSROOT" checkout -d srcfull "." | grep -v CVSROOT
136 $ cvscall -q -d "$CVSROOT" checkout -d srcfull "." | grep -v CVSROOT
137 U srcfull/src/a
137 U srcfull/src/a
138 U srcfull/src/b/c
138 U srcfull/src/b/c
139 $ ls srcfull
139 $ ls srcfull
140 CVS
140 CVS
141 CVSROOT
141 CVSROOT
142 src
142 src
143 $ hg convert srcfull srcfull-hg \
143 $ hg convert srcfull srcfull-hg \
144 > | grep -v 'log entries' | grep -v 'hook:' \
144 > | grep -v 'log entries' | grep -v 'hook:' \
145 > | grep -v '^[0-3] .*' # filter instable changeset order
145 > | grep -v '^[0-3] .*' # filter instable changeset order
146 initializing destination srcfull-hg repository
146 initializing destination srcfull-hg repository
147 connecting to $TESTTMP/cvsrepo
147 connecting to $TESTTMP/cvsrepo
148 scanning source...
148 scanning source...
149 collecting CVS rlog
149 collecting CVS rlog
150 creating changesets
150 creating changesets
151 4 changeset entries
151 4 changeset entries
152 sorting...
152 sorting...
153 converting...
153 converting...
154 updating tags
154 updating tags
155 $ hg cat -r tip --cwd srcfull-hg src/a
155 $ hg cat -r tip --cwd srcfull-hg src/a
156 a
156 a
157 $ hg cat -r tip --cwd srcfull-hg src/b/c
157 $ hg cat -r tip --cwd srcfull-hg src/b/c
158 c
158 c
159 c
159 c
160
160
161 commit new file revisions
161 commit new file revisions
162
162
163 $ cd src
163 $ cd src
164 $ echo a >> a
164 $ echo a >> a
165 $ echo c >> b/c
165 $ echo c >> b/c
166 $ cvscall -q commit -mci1 . | grep '<--'
166 $ cvscall -q commit -mci1 . | grep '<--'
167 $TESTTMP/cvsrepo/src/a,v <-- a
167 $TESTTMP/cvsrepo/src/a,v <-- a
168 $TESTTMP/cvsrepo/src/b/c,v <-- *c (glob)
168 $TESTTMP/cvsrepo/src/b/c,v <-- *c (glob)
169 $ cd ..
169 $ cd ..
170
170
171 convert again
171 convert again
172
172
173 $ TZ=US/Hawaii hg convert --config convert.localtimezone=True src src-hg
173 $ TZ=US/Hawaii hg convert --config convert.localtimezone=True src src-hg
174 connecting to $TESTTMP/cvsrepo
174 connecting to $TESTTMP/cvsrepo
175 scanning source...
175 scanning source...
176 collecting CVS rlog
176 collecting CVS rlog
177 7 log entries
177 7 log entries
178 cvslog hook: 7 entries
178 cvslog hook: 7 entries
179 creating changesets
179 creating changesets
180 4 changeset entries
180 4 changeset entries
181 cvschangesets hook: 4 changesets
181 cvschangesets hook: 4 changesets
182 sorting...
182 sorting...
183 converting...
183 converting...
184 0 ci1
184 0 ci1
185 $ hgcat a
185 $ hgcat a
186 a
186 a
187 a
187 a
188 $ hgcat b/c
188 $ hgcat b/c
189 c
189 c
190 c
190 c
191 c
191 c
192
192
193 convert again with --filemap
193 convert again with --filemap
194
194
195 $ hg convert --filemap filemap src src-filemap
195 $ hg convert --filemap filemap src src-filemap
196 connecting to $TESTTMP/cvsrepo
196 connecting to $TESTTMP/cvsrepo
197 scanning source...
197 scanning source...
198 collecting CVS rlog
198 collecting CVS rlog
199 7 log entries
199 7 log entries
200 cvslog hook: 7 entries
200 cvslog hook: 7 entries
201 creating changesets
201 creating changesets
202 4 changeset entries
202 4 changeset entries
203 cvschangesets hook: 4 changesets
203 cvschangesets hook: 4 changesets
204 sorting...
204 sorting...
205 converting...
205 converting...
206 0 ci1
206 0 ci1
207 $ hgcat b/c
207 $ hgcat b/c
208 c
208 c
209 c
209 c
210 c
210 c
211 $ hg -R src-filemap log --template '{rev} {desc} files: {files}\n'
211 $ hg -R src-filemap log --template '{rev} {desc} files: {files}\n'
212 3 ci1 files: b/c
212 3 ci1 files: b/c
213 2 update tags files: .hgtags
213 2 update tags files: .hgtags
214 1 ci0 files: b/c
214 1 ci0 files: b/c
215 0 Initial revision files: b/c
215 0 Initial revision files: b/c
216
216
217 commit branch
217 commit branch
218
218
219 $ cd src
219 $ cd src
220 $ cvs -q update -r1.1 b/c
220 $ cvs -q update -r1.1 b/c
221 U b/c
221 U b/c
222 $ cvs -q tag -b branch
222 $ cvs -q tag -b branch
223 T a
223 T a
224 T b/c
224 T b/c
225 $ cvs -q update -r branch > /dev/null
225 $ cvs -q update -r branch > /dev/null
226 $ echo d >> b/c
226 $ echo d >> b/c
227 $ cvs -q commit -mci2 . | grep '<--'
227 $ cvs -q commit -mci2 . | grep '<--'
228 $TESTTMP/cvsrepo/src/b/c,v <-- *c (glob)
228 $TESTTMP/cvsrepo/src/b/c,v <-- *c (glob)
229 $ cd ..
229 $ cd ..
230
230
231 convert again
231 convert again
232
232
233 $ TZ=US/Hawaii hg convert --config convert.localtimezone=True src src-hg
233 $ TZ=US/Hawaii hg convert --config convert.localtimezone=True src src-hg
234 connecting to $TESTTMP/cvsrepo
234 connecting to $TESTTMP/cvsrepo
235 scanning source...
235 scanning source...
236 collecting CVS rlog
236 collecting CVS rlog
237 8 log entries
237 8 log entries
238 cvslog hook: 8 entries
238 cvslog hook: 8 entries
239 creating changesets
239 creating changesets
240 5 changeset entries
240 5 changeset entries
241 cvschangesets hook: 5 changesets
241 cvschangesets hook: 5 changesets
242 sorting...
242 sorting...
243 converting...
243 converting...
244 0 ci2
244 0 ci2
245 $ hgcat b/c
245 $ hgcat b/c
246 c
246 c
247 d
247 d
248
248
249 convert again with --filemap
249 convert again with --filemap
250
250
251 $ TZ=US/Hawaii hg convert --config convert.localtimezone=True --filemap filemap src src-filemap
251 $ TZ=US/Hawaii hg convert --config convert.localtimezone=True --filemap filemap src src-filemap
252 connecting to $TESTTMP/cvsrepo
252 connecting to $TESTTMP/cvsrepo
253 scanning source...
253 scanning source...
254 collecting CVS rlog
254 collecting CVS rlog
255 8 log entries
255 8 log entries
256 cvslog hook: 8 entries
256 cvslog hook: 8 entries
257 creating changesets
257 creating changesets
258 5 changeset entries
258 5 changeset entries
259 cvschangesets hook: 5 changesets
259 cvschangesets hook: 5 changesets
260 sorting...
260 sorting...
261 converting...
261 converting...
262 0 ci2
262 0 ci2
263 $ hgcat b/c
263 $ hgcat b/c
264 c
264 c
265 d
265 d
266 $ hg -R src-filemap log --template '{rev} {desc} files: {files}\n'
266 $ hg -R src-filemap log --template '{rev} {desc} files: {files}\n'
267 4 ci2 files: b/c
267 4 ci2 files: b/c
268 3 ci1 files: b/c
268 3 ci1 files: b/c
269 2 update tags files: .hgtags
269 2 update tags files: .hgtags
270 1 ci0 files: b/c
270 1 ci0 files: b/c
271 0 Initial revision files: b/c
271 0 Initial revision files: b/c
272
272
273 commit a new revision with funny log message
273 commit a new revision with funny log message
274
274
275 $ cd src
275 $ cd src
276 $ sleep 1
276 $ sleep 1
277 $ echo e >> a
277 $ echo e >> a
278 $ cvscall -q commit -m'funny
278 $ cvscall -q commit -m'funny
279 > ----------------------------
279 > ----------------------------
280 > log message' . | grep '<--' |\
280 > log message' . | grep '<--' |\
281 > sed -e 's:.*src/\(.*\),v.*:checking in src/\1,v:g'
281 > sed -e 's:.*src/\(.*\),v.*:checking in src/\1,v:g'
282 checking in src/a,v
282 checking in src/a,v
283
283
284 commit new file revisions with some fuzz
284 commit new file revisions with some fuzz
285
285
286 $ sleep 1
286 $ sleep 1
287 $ echo f >> a
287 $ echo f >> a
288 $ cvscall -q commit -mfuzzy . | grep '<--'
288 $ cvscall -q commit -mfuzzy . | grep '<--'
289 $TESTTMP/cvsrepo/src/a,v <-- a
289 $TESTTMP/cvsrepo/src/a,v <-- a
290 $ sleep 4 # the two changes will be split if fuzz < 4
290 $ sleep 4 # the two changes will be split if fuzz < 4
291 $ echo g >> b/c
291 $ echo g >> b/c
292 $ cvscall -q commit -mfuzzy . | grep '<--'
292 $ cvscall -q commit -mfuzzy . | grep '<--'
293 $TESTTMP/cvsrepo/src/b/c,v <-- *c (glob)
293 $TESTTMP/cvsrepo/src/b/c,v <-- *c (glob)
294 $ cd ..
294 $ cd ..
295
295
296 convert again
296 convert again
297
297
298 $ TZ=US/Hawaii hg convert --config convert.cvsps.fuzz=2 --config convert.localtimezone=True src src-hg
298 $ TZ=US/Hawaii hg convert --config convert.cvsps.fuzz=2 --config convert.localtimezone=True src src-hg
299 connecting to $TESTTMP/cvsrepo
299 connecting to $TESTTMP/cvsrepo
300 scanning source...
300 scanning source...
301 collecting CVS rlog
301 collecting CVS rlog
302 11 log entries
302 11 log entries
303 cvslog hook: 11 entries
303 cvslog hook: 11 entries
304 creating changesets
304 creating changesets
305 8 changeset entries
305 8 changeset entries
306 cvschangesets hook: 8 changesets
306 cvschangesets hook: 8 changesets
307 sorting...
307 sorting...
308 converting...
308 converting...
309 2 funny
309 2 funny
310 1 fuzzy
310 1 fuzzy
311 0 fuzzy
311 0 fuzzy
312 $ hg -R src-hg log -G --template '{rev} ({branches}) {desc} date: {date|date} files: {files}\n'
312 $ hg -R src-hg log -G --template '{rev} ({branches}) {desc} date: {date|date} files: {files}\n'
313 o 8 (branch) fuzzy date: * -1000 files: b/c (glob)
313 o 8 (branch) fuzzy date: * -1000 files: b/c (glob)
314 |
314 |
315 o 7 (branch) fuzzy date: * -1000 files: a (glob)
315 o 7 (branch) fuzzy date: * -1000 files: a (glob)
316 |
316 |
317 o 6 (branch) funny
317 o 6 (branch) funny
318 | ----------------------------
318 | ----------------------------
319 | log message date: * -1000 files: a (glob)
319 | log message date: * -1000 files: a (glob)
320 o 5 (branch) ci2 date: * -1000 files: b/c (glob)
320 o 5 (branch) ci2 date: * -1000 files: b/c (glob)
321
321
322 o 4 () ci1 date: * -1000 files: a b/c (glob)
322 o 4 () ci1 date: * -1000 files: a b/c (glob)
323 |
323 |
324 o 3 () update tags date: * +0000 files: .hgtags (glob)
324 o 3 () update tags date: * +0000 files: .hgtags (glob)
325 |
325 |
326 | o 2 (INITIAL) import date: * -1000 files: (glob)
326 | o 2 (INITIAL) import date: * -1000 files: (glob)
327 | |
327 | |
328 o | 1 () ci0 date: * -1000 files: b/c (glob)
328 o | 1 () ci0 date: * -1000 files: b/c (glob)
329 |/
329 |/
330 o 0 () Initial revision date: * -1000 files: a b/c (glob)
330 o 0 () Initial revision date: * -1000 files: a b/c (glob)
331
331
332
332
333 testing debugcvsps
333 testing debugcvsps
334
334
335 $ cd src
335 $ cd src
336 $ hg debugcvsps --fuzz=2
336 $ hg debugcvsps --fuzz=2 -x >/dev/null
337
338 commit a new revision changing a and removing b/c
339
340 $ cvscall -q update -A
341 U a
342 U b/c
343 $ sleep 1
344 $ echo h >> a
345 $ cvscall -Q remove -f b/c
346 $ cvscall -q commit -mci | grep '<--'
347 $TESTTMP/cvsrepo/src/a,v <-- a
348 $TESTTMP/cvsrepo/src/b/c,v <-- *c (glob)
349
350 update and verify the cvsps cache
351
352 $ hg debugcvsps --fuzz=2 -u
337 collecting CVS rlog
353 collecting CVS rlog
338 11 log entries
354 13 log entries
339 cvslog hook: 11 entries
355 cvslog hook: 13 entries
340 creating changesets
356 creating changesets
341 10 changeset entries
357 11 changeset entries
342 cvschangesets hook: 10 changesets
358 cvschangesets hook: 11 changesets
343 ---------------------
359 ---------------------
344 PatchSet 1
360 PatchSet 1
345 Date: * (glob)
361 Date: * (glob)
346 Author: * (glob)
362 Author: * (glob)
347 Branch: HEAD
363 Branch: HEAD
348 Tag: (none)
364 Tag: (none)
349 Branchpoints: INITIAL
365 Branchpoints: INITIAL
350 Log:
366 Log:
351 Initial revision
367 Initial revision
352
368
353 Members:
369 Members:
354 a:INITIAL->1.1
370 a:INITIAL->1.1
355
371
356 ---------------------
372 ---------------------
357 PatchSet 2
373 PatchSet 2
358 Date: * (glob)
374 Date: * (glob)
359 Author: * (glob)
375 Author: * (glob)
360 Branch: HEAD
376 Branch: HEAD
361 Tag: (none)
377 Tag: (none)
362 Branchpoints: INITIAL, branch
378 Branchpoints: INITIAL, branch
363 Log:
379 Log:
364 Initial revision
380 Initial revision
365
381
366 Members:
382 Members:
367 b/c:INITIAL->1.1
383 b/c:INITIAL->1.1
368
384
369 ---------------------
385 ---------------------
370 PatchSet 3
386 PatchSet 3
371 Date: * (glob)
387 Date: * (glob)
372 Author: * (glob)
388 Author: * (glob)
373 Branch: INITIAL
389 Branch: INITIAL
374 Tag: start
390 Tag: start
375 Log:
391 Log:
376 import
392 import
377
393
378 Members:
394 Members:
379 a:1.1->1.1.1.1
395 a:1.1->1.1.1.1
380 b/c:1.1->1.1.1.1
396 b/c:1.1->1.1.1.1
381
397
382 ---------------------
398 ---------------------
383 PatchSet 4
399 PatchSet 4
384 Date: * (glob)
400 Date: * (glob)
385 Author: * (glob)
401 Author: * (glob)
386 Branch: HEAD
402 Branch: HEAD
387 Tag: (none)
403 Tag: (none)
388 Log:
404 Log:
389 ci0
405 ci0
390
406
391 Members:
407 Members:
392 b/c:1.1->1.2
408 b/c:1.1->1.2
393
409
394 ---------------------
410 ---------------------
395 PatchSet 5
411 PatchSet 5
396 Date: * (glob)
412 Date: * (glob)
397 Author: * (glob)
413 Author: * (glob)
398 Branch: HEAD
414 Branch: HEAD
399 Tag: (none)
415 Tag: (none)
400 Branchpoints: branch
416 Branchpoints: branch
401 Log:
417 Log:
402 ci1
418 ci1
403
419
404 Members:
420 Members:
405 a:1.1->1.2
421 a:1.1->1.2
406
422
407 ---------------------
423 ---------------------
408 PatchSet 6
424 PatchSet 6
409 Date: * (glob)
425 Date: * (glob)
410 Author: * (glob)
426 Author: * (glob)
411 Branch: HEAD
427 Branch: HEAD
412 Tag: (none)
428 Tag: (none)
413 Log:
429 Log:
414 ci1
430 ci1
415
431
416 Members:
432 Members:
417 b/c:1.2->1.3
433 b/c:1.2->1.3
418
434
419 ---------------------
435 ---------------------
420 PatchSet 7
436 PatchSet 7
421 Date: * (glob)
437 Date: * (glob)
422 Author: * (glob)
438 Author: * (glob)
423 Branch: branch
439 Branch: branch
424 Tag: (none)
440 Tag: (none)
425 Log:
441 Log:
426 ci2
442 ci2
427
443
428 Members:
444 Members:
429 b/c:1.1->1.1.2.1
445 b/c:1.1->1.1.2.1
430
446
431 ---------------------
447 ---------------------
432 PatchSet 8
448 PatchSet 8
433 Date: * (glob)
449 Date: * (glob)
434 Author: * (glob)
450 Author: * (glob)
435 Branch: branch
451 Branch: branch
436 Tag: (none)
452 Tag: (none)
437 Log:
453 Log:
438 funny
454 funny
439 ----------------------------
455 ----------------------------
440 log message
456 log message
441
457
442 Members:
458 Members:
443 a:1.2->1.2.2.1
459 a:1.2->1.2.2.1
444
460
445 ---------------------
461 ---------------------
446 PatchSet 9
462 PatchSet 9
447 Date: * (glob)
463 Date: * (glob)
448 Author: * (glob)
464 Author: * (glob)
449 Branch: branch
465 Branch: branch
450 Tag: (none)
466 Tag: (none)
451 Log:
467 Log:
452 fuzzy
468 fuzzy
453
469
454 Members:
470 Members:
455 a:1.2.2.1->1.2.2.2
471 a:1.2.2.1->1.2.2.2
456
472
457 ---------------------
473 ---------------------
458 PatchSet 10
474 PatchSet 10
459 Date: * (glob)
475 Date: * (glob)
460 Author: * (glob)
476 Author: * (glob)
461 Branch: branch
477 Branch: branch
462 Tag: (none)
478 Tag: (none)
463 Log:
479 Log:
464 fuzzy
480 fuzzy
465
481
466 Members:
482 Members:
467 b/c:1.1.2.1->1.1.2.2
483 b/c:1.1.2.1->1.1.2.2
468
484
485 ---------------------
486 PatchSet 11
487 Date: * (glob)
488 Author: * (glob)
489 Branch: HEAD
490 Tag: (none)
491 Log:
492 ci
493
494 Members:
495 a:1.2->1.3
496 b/c:1.3->1.4(DEAD)
497
469
498
470 $ cd ..
499 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now