##// END OF EJS Templates
convert: cvsps use absolute_import
timeless -
r28369:71176606 default
parent child Browse files
Show More
@@ -1,914 +1,918 b''
1 # Mercurial built-in replacement for cvsps.
1 # Mercurial built-in replacement for cvsps.
2 #
2 #
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7
8
9 import cPickle as pickle
8 import os
10 import os
9 import re
11 import re
10 import cPickle as pickle
12
13 from mercurial import (
14 hook,
15 util,
16 )
11 from mercurial.i18n import _
17 from mercurial.i18n import _
12 from mercurial import hook
13 from mercurial import util
14
18
15 class logentry(object):
19 class logentry(object):
16 '''Class logentry has the following attributes:
20 '''Class logentry has the following attributes:
17 .author - author name as CVS knows it
21 .author - author name as CVS knows it
18 .branch - name of branch this revision is on
22 .branch - name of branch this revision is on
19 .branches - revision tuple of branches starting at this revision
23 .branches - revision tuple of branches starting at this revision
20 .comment - commit message
24 .comment - commit message
21 .commitid - CVS commitid or None
25 .commitid - CVS commitid or None
22 .date - the commit date as a (time, tz) tuple
26 .date - the commit date as a (time, tz) tuple
23 .dead - true if file revision is dead
27 .dead - true if file revision is dead
24 .file - Name of file
28 .file - Name of file
25 .lines - a tuple (+lines, -lines) or None
29 .lines - a tuple (+lines, -lines) or None
26 .parent - Previous revision of this entry
30 .parent - Previous revision of this entry
27 .rcs - name of file as returned from CVS
31 .rcs - name of file as returned from CVS
28 .revision - revision number as tuple
32 .revision - revision number as tuple
29 .tags - list of tags on the file
33 .tags - list of tags on the file
30 .synthetic - is this a synthetic "file ... added on ..." revision?
34 .synthetic - is this a synthetic "file ... added on ..." revision?
31 .mergepoint - the branch that has been merged from (if present in
35 .mergepoint - the branch that has been merged from (if present in
32 rlog output) or None
36 rlog output) or None
33 .branchpoints - the branches that start at the current entry or empty
37 .branchpoints - the branches that start at the current entry or empty
34 '''
38 '''
35 def __init__(self, **entries):
39 def __init__(self, **entries):
36 self.synthetic = False
40 self.synthetic = False
37 self.__dict__.update(entries)
41 self.__dict__.update(entries)
38
42
39 def __repr__(self):
43 def __repr__(self):
40 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
44 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
41 return "%s(%s)"%(type(self).__name__, ", ".join(items))
45 return "%s(%s)"%(type(self).__name__, ", ".join(items))
42
46
43 class logerror(Exception):
47 class logerror(Exception):
44 pass
48 pass
45
49
46 def getrepopath(cvspath):
50 def getrepopath(cvspath):
47 """Return the repository path from a CVS path.
51 """Return the repository path from a CVS path.
48
52
49 >>> getrepopath('/foo/bar')
53 >>> getrepopath('/foo/bar')
50 '/foo/bar'
54 '/foo/bar'
51 >>> getrepopath('c:/foo/bar')
55 >>> getrepopath('c:/foo/bar')
52 '/foo/bar'
56 '/foo/bar'
53 >>> getrepopath(':pserver:10/foo/bar')
57 >>> getrepopath(':pserver:10/foo/bar')
54 '/foo/bar'
58 '/foo/bar'
55 >>> getrepopath(':pserver:10c:/foo/bar')
59 >>> getrepopath(':pserver:10c:/foo/bar')
56 '/foo/bar'
60 '/foo/bar'
57 >>> getrepopath(':pserver:/foo/bar')
61 >>> getrepopath(':pserver:/foo/bar')
58 '/foo/bar'
62 '/foo/bar'
59 >>> getrepopath(':pserver:c:/foo/bar')
63 >>> getrepopath(':pserver:c:/foo/bar')
60 '/foo/bar'
64 '/foo/bar'
61 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
65 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
62 '/foo/bar'
66 '/foo/bar'
63 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
67 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
64 '/foo/bar'
68 '/foo/bar'
65 >>> getrepopath('user@server/path/to/repository')
69 >>> getrepopath('user@server/path/to/repository')
66 '/path/to/repository'
70 '/path/to/repository'
67 """
71 """
68 # According to CVS manual, CVS paths are expressed like:
72 # According to CVS manual, CVS paths are expressed like:
69 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
73 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
70 #
74 #
71 # CVSpath is splitted into parts and then position of the first occurrence
75 # CVSpath is splitted into parts and then position of the first occurrence
72 # of the '/' char after the '@' is located. The solution is the rest of the
76 # of the '/' char after the '@' is located. The solution is the rest of the
73 # string after that '/' sign including it
77 # string after that '/' sign including it
74
78
75 parts = cvspath.split(':')
79 parts = cvspath.split(':')
76 atposition = parts[-1].find('@')
80 atposition = parts[-1].find('@')
77 start = 0
81 start = 0
78
82
79 if atposition != -1:
83 if atposition != -1:
80 start = atposition
84 start = atposition
81
85
82 repopath = parts[-1][parts[-1].find('/', start):]
86 repopath = parts[-1][parts[-1].find('/', start):]
83 return repopath
87 return repopath
84
88
85 def createlog(ui, directory=None, root="", rlog=True, cache=None):
89 def createlog(ui, directory=None, root="", rlog=True, cache=None):
86 '''Collect the CVS rlog'''
90 '''Collect the CVS rlog'''
87
91
88 # Because we store many duplicate commit log messages, reusing strings
92 # Because we store many duplicate commit log messages, reusing strings
89 # saves a lot of memory and pickle storage space.
93 # saves a lot of memory and pickle storage space.
90 _scache = {}
94 _scache = {}
91 def scache(s):
95 def scache(s):
92 "return a shared version of a string"
96 "return a shared version of a string"
93 return _scache.setdefault(s, s)
97 return _scache.setdefault(s, s)
94
98
95 ui.status(_('collecting CVS rlog\n'))
99 ui.status(_('collecting CVS rlog\n'))
96
100
97 log = [] # list of logentry objects containing the CVS state
101 log = [] # list of logentry objects containing the CVS state
98
102
99 # patterns to match in CVS (r)log output, by state of use
103 # patterns to match in CVS (r)log output, by state of use
100 re_00 = re.compile('RCS file: (.+)$')
104 re_00 = re.compile('RCS file: (.+)$')
101 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
105 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
102 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
106 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
103 re_03 = re.compile("(Cannot access.+CVSROOT)|"
107 re_03 = re.compile("(Cannot access.+CVSROOT)|"
104 "(can't create temporary directory.+)$")
108 "(can't create temporary directory.+)$")
105 re_10 = re.compile('Working file: (.+)$')
109 re_10 = re.compile('Working file: (.+)$')
106 re_20 = re.compile('symbolic names:')
110 re_20 = re.compile('symbolic names:')
107 re_30 = re.compile('\t(.+): ([\\d.]+)$')
111 re_30 = re.compile('\t(.+): ([\\d.]+)$')
108 re_31 = re.compile('----------------------------$')
112 re_31 = re.compile('----------------------------$')
109 re_32 = re.compile('======================================='
113 re_32 = re.compile('======================================='
110 '======================================$')
114 '======================================$')
111 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
115 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
112 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
116 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
113 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
117 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
114 r'(\s+commitid:\s+([^;]+);)?'
118 r'(\s+commitid:\s+([^;]+);)?'
115 r'(.*mergepoint:\s+([^;]+);)?')
119 r'(.*mergepoint:\s+([^;]+);)?')
116 re_70 = re.compile('branches: (.+);$')
120 re_70 = re.compile('branches: (.+);$')
117
121
118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
122 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119
123
120 prefix = '' # leading path to strip of what we get from CVS
124 prefix = '' # leading path to strip of what we get from CVS
121
125
122 if directory is None:
126 if directory is None:
123 # Current working directory
127 # Current working directory
124
128
125 # Get the real directory in the repository
129 # Get the real directory in the repository
126 try:
130 try:
127 prefix = open(os.path.join('CVS','Repository')).read().strip()
131 prefix = open(os.path.join('CVS','Repository')).read().strip()
128 directory = prefix
132 directory = prefix
129 if prefix == ".":
133 if prefix == ".":
130 prefix = ""
134 prefix = ""
131 except IOError:
135 except IOError:
132 raise logerror(_('not a CVS sandbox'))
136 raise logerror(_('not a CVS sandbox'))
133
137
134 if prefix and not prefix.endswith(os.sep):
138 if prefix and not prefix.endswith(os.sep):
135 prefix += os.sep
139 prefix += os.sep
136
140
137 # Use the Root file in the sandbox, if it exists
141 # Use the Root file in the sandbox, if it exists
138 try:
142 try:
139 root = open(os.path.join('CVS','Root')).read().strip()
143 root = open(os.path.join('CVS','Root')).read().strip()
140 except IOError:
144 except IOError:
141 pass
145 pass
142
146
143 if not root:
147 if not root:
144 root = os.environ.get('CVSROOT', '')
148 root = os.environ.get('CVSROOT', '')
145
149
146 # read log cache if one exists
150 # read log cache if one exists
147 oldlog = []
151 oldlog = []
148 date = None
152 date = None
149
153
150 if cache:
154 if cache:
151 cachedir = os.path.expanduser('~/.hg.cvsps')
155 cachedir = os.path.expanduser('~/.hg.cvsps')
152 if not os.path.exists(cachedir):
156 if not os.path.exists(cachedir):
153 os.mkdir(cachedir)
157 os.mkdir(cachedir)
154
158
155 # The cvsps cache pickle needs a uniquified name, based on the
159 # The cvsps cache pickle needs a uniquified name, based on the
156 # repository location. The address may have all sort of nasties
160 # repository location. The address may have all sort of nasties
157 # in it, slashes, colons and such. So here we take just the
161 # in it, slashes, colons and such. So here we take just the
158 # alphanumeric characters, concatenated in a way that does not
162 # alphanumeric characters, concatenated in a way that does not
159 # mix up the various components, so that
163 # mix up the various components, so that
160 # :pserver:user@server:/path
164 # :pserver:user@server:/path
161 # and
165 # and
162 # /pserver/user/server/path
166 # /pserver/user/server/path
163 # are mapped to different cache file names.
167 # are mapped to different cache file names.
164 cachefile = root.split(":") + [directory, "cache"]
168 cachefile = root.split(":") + [directory, "cache"]
165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
169 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 cachefile = os.path.join(cachedir,
170 cachefile = os.path.join(cachedir,
167 '.'.join([s for s in cachefile if s]))
171 '.'.join([s for s in cachefile if s]))
168
172
169 if cache == 'update':
173 if cache == 'update':
170 try:
174 try:
171 ui.note(_('reading cvs log cache %s\n') % cachefile)
175 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 oldlog = pickle.load(open(cachefile))
176 oldlog = pickle.load(open(cachefile))
173 for e in oldlog:
177 for e in oldlog:
174 if not (util.safehasattr(e, 'branchpoints') and
178 if not (util.safehasattr(e, 'branchpoints') and
175 util.safehasattr(e, 'commitid') and
179 util.safehasattr(e, 'commitid') and
176 util.safehasattr(e, 'mergepoint')):
180 util.safehasattr(e, 'mergepoint')):
177 ui.status(_('ignoring old cache\n'))
181 ui.status(_('ignoring old cache\n'))
178 oldlog = []
182 oldlog = []
179 break
183 break
180
184
181 ui.note(_('cache has %d log entries\n') % len(oldlog))
185 ui.note(_('cache has %d log entries\n') % len(oldlog))
182 except Exception as e:
186 except Exception as e:
183 ui.note(_('error reading cache: %r\n') % e)
187 ui.note(_('error reading cache: %r\n') % e)
184
188
185 if oldlog:
189 if oldlog:
186 date = oldlog[-1].date # last commit date as a (time,tz) tuple
190 date = oldlog[-1].date # last commit date as a (time,tz) tuple
187 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
191 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
188
192
189 # build the CVS commandline
193 # build the CVS commandline
190 cmd = ['cvs', '-q']
194 cmd = ['cvs', '-q']
191 if root:
195 if root:
192 cmd.append('-d%s' % root)
196 cmd.append('-d%s' % root)
193 p = util.normpath(getrepopath(root))
197 p = util.normpath(getrepopath(root))
194 if not p.endswith('/'):
198 if not p.endswith('/'):
195 p += '/'
199 p += '/'
196 if prefix:
200 if prefix:
197 # looks like normpath replaces "" by "."
201 # looks like normpath replaces "" by "."
198 prefix = p + util.normpath(prefix)
202 prefix = p + util.normpath(prefix)
199 else:
203 else:
200 prefix = p
204 prefix = p
201 cmd.append(['log', 'rlog'][rlog])
205 cmd.append(['log', 'rlog'][rlog])
202 if date:
206 if date:
203 # no space between option and date string
207 # no space between option and date string
204 cmd.append('-d>%s' % date)
208 cmd.append('-d>%s' % date)
205 cmd.append(directory)
209 cmd.append(directory)
206
210
207 # state machine begins here
211 # state machine begins here
208 tags = {} # dictionary of revisions on current file with their tags
212 tags = {} # dictionary of revisions on current file with their tags
209 branchmap = {} # mapping between branch names and revision numbers
213 branchmap = {} # mapping between branch names and revision numbers
210 rcsmap = {}
214 rcsmap = {}
211 state = 0
215 state = 0
212 store = False # set when a new record can be appended
216 store = False # set when a new record can be appended
213
217
214 cmd = [util.shellquote(arg) for arg in cmd]
218 cmd = [util.shellquote(arg) for arg in cmd]
215 ui.note(_("running %s\n") % (' '.join(cmd)))
219 ui.note(_("running %s\n") % (' '.join(cmd)))
216 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
220 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
217
221
218 pfp = util.popen(' '.join(cmd))
222 pfp = util.popen(' '.join(cmd))
219 peek = pfp.readline()
223 peek = pfp.readline()
220 while True:
224 while True:
221 line = peek
225 line = peek
222 if line == '':
226 if line == '':
223 break
227 break
224 peek = pfp.readline()
228 peek = pfp.readline()
225 if line.endswith('\n'):
229 if line.endswith('\n'):
226 line = line[:-1]
230 line = line[:-1]
227 #ui.debug('state=%d line=%r\n' % (state, line))
231 #ui.debug('state=%d line=%r\n' % (state, line))
228
232
229 if state == 0:
233 if state == 0:
230 # initial state, consume input until we see 'RCS file'
234 # initial state, consume input until we see 'RCS file'
231 match = re_00.match(line)
235 match = re_00.match(line)
232 if match:
236 if match:
233 rcs = match.group(1)
237 rcs = match.group(1)
234 tags = {}
238 tags = {}
235 if rlog:
239 if rlog:
236 filename = util.normpath(rcs[:-2])
240 filename = util.normpath(rcs[:-2])
237 if filename.startswith(prefix):
241 if filename.startswith(prefix):
238 filename = filename[len(prefix):]
242 filename = filename[len(prefix):]
239 if filename.startswith('/'):
243 if filename.startswith('/'):
240 filename = filename[1:]
244 filename = filename[1:]
241 if filename.startswith('Attic/'):
245 if filename.startswith('Attic/'):
242 filename = filename[6:]
246 filename = filename[6:]
243 else:
247 else:
244 filename = filename.replace('/Attic/', '/')
248 filename = filename.replace('/Attic/', '/')
245 state = 2
249 state = 2
246 continue
250 continue
247 state = 1
251 state = 1
248 continue
252 continue
249 match = re_01.match(line)
253 match = re_01.match(line)
250 if match:
254 if match:
251 raise logerror(match.group(1))
255 raise logerror(match.group(1))
252 match = re_02.match(line)
256 match = re_02.match(line)
253 if match:
257 if match:
254 raise logerror(match.group(2))
258 raise logerror(match.group(2))
255 if re_03.match(line):
259 if re_03.match(line):
256 raise logerror(line)
260 raise logerror(line)
257
261
258 elif state == 1:
262 elif state == 1:
259 # expect 'Working file' (only when using log instead of rlog)
263 # expect 'Working file' (only when using log instead of rlog)
260 match = re_10.match(line)
264 match = re_10.match(line)
261 assert match, _('RCS file must be followed by working file')
265 assert match, _('RCS file must be followed by working file')
262 filename = util.normpath(match.group(1))
266 filename = util.normpath(match.group(1))
263 state = 2
267 state = 2
264
268
265 elif state == 2:
269 elif state == 2:
266 # expect 'symbolic names'
270 # expect 'symbolic names'
267 if re_20.match(line):
271 if re_20.match(line):
268 branchmap = {}
272 branchmap = {}
269 state = 3
273 state = 3
270
274
271 elif state == 3:
275 elif state == 3:
272 # read the symbolic names and store as tags
276 # read the symbolic names and store as tags
273 match = re_30.match(line)
277 match = re_30.match(line)
274 if match:
278 if match:
275 rev = [int(x) for x in match.group(2).split('.')]
279 rev = [int(x) for x in match.group(2).split('.')]
276
280
277 # Convert magic branch number to an odd-numbered one
281 # Convert magic branch number to an odd-numbered one
278 revn = len(rev)
282 revn = len(rev)
279 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
283 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
280 rev = rev[:-2] + rev[-1:]
284 rev = rev[:-2] + rev[-1:]
281 rev = tuple(rev)
285 rev = tuple(rev)
282
286
283 if rev not in tags:
287 if rev not in tags:
284 tags[rev] = []
288 tags[rev] = []
285 tags[rev].append(match.group(1))
289 tags[rev].append(match.group(1))
286 branchmap[match.group(1)] = match.group(2)
290 branchmap[match.group(1)] = match.group(2)
287
291
288 elif re_31.match(line):
292 elif re_31.match(line):
289 state = 5
293 state = 5
290 elif re_32.match(line):
294 elif re_32.match(line):
291 state = 0
295 state = 0
292
296
293 elif state == 4:
297 elif state == 4:
294 # expecting '------' separator before first revision
298 # expecting '------' separator before first revision
295 if re_31.match(line):
299 if re_31.match(line):
296 state = 5
300 state = 5
297 else:
301 else:
298 assert not re_32.match(line), _('must have at least '
302 assert not re_32.match(line), _('must have at least '
299 'some revisions')
303 'some revisions')
300
304
301 elif state == 5:
305 elif state == 5:
302 # expecting revision number and possibly (ignored) lock indication
306 # expecting revision number and possibly (ignored) lock indication
303 # we create the logentry here from values stored in states 0 to 4,
307 # we create the logentry here from values stored in states 0 to 4,
304 # as this state is re-entered for subsequent revisions of a file.
308 # as this state is re-entered for subsequent revisions of a file.
305 match = re_50.match(line)
309 match = re_50.match(line)
306 assert match, _('expected revision number')
310 assert match, _('expected revision number')
307 e = logentry(rcs=scache(rcs),
311 e = logentry(rcs=scache(rcs),
308 file=scache(filename),
312 file=scache(filename),
309 revision=tuple([int(x) for x in
313 revision=tuple([int(x) for x in
310 match.group(1).split('.')]),
314 match.group(1).split('.')]),
311 branches=[],
315 branches=[],
312 parent=None,
316 parent=None,
313 commitid=None,
317 commitid=None,
314 mergepoint=None,
318 mergepoint=None,
315 branchpoints=set())
319 branchpoints=set())
316
320
317 state = 6
321 state = 6
318
322
319 elif state == 6:
323 elif state == 6:
320 # expecting date, author, state, lines changed
324 # expecting date, author, state, lines changed
321 match = re_60.match(line)
325 match = re_60.match(line)
322 assert match, _('revision must be followed by date line')
326 assert match, _('revision must be followed by date line')
323 d = match.group(1)
327 d = match.group(1)
324 if d[2] == '/':
328 if d[2] == '/':
325 # Y2K
329 # Y2K
326 d = '19' + d
330 d = '19' + d
327
331
328 if len(d.split()) != 3:
332 if len(d.split()) != 3:
329 # cvs log dates always in GMT
333 # cvs log dates always in GMT
330 d = d + ' UTC'
334 d = d + ' UTC'
331 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
335 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
332 '%Y/%m/%d %H:%M:%S',
336 '%Y/%m/%d %H:%M:%S',
333 '%Y-%m-%d %H:%M:%S'])
337 '%Y-%m-%d %H:%M:%S'])
334 e.author = scache(match.group(2))
338 e.author = scache(match.group(2))
335 e.dead = match.group(3).lower() == 'dead'
339 e.dead = match.group(3).lower() == 'dead'
336
340
337 if match.group(5):
341 if match.group(5):
338 if match.group(6):
342 if match.group(6):
339 e.lines = (int(match.group(5)), int(match.group(6)))
343 e.lines = (int(match.group(5)), int(match.group(6)))
340 else:
344 else:
341 e.lines = (int(match.group(5)), 0)
345 e.lines = (int(match.group(5)), 0)
342 elif match.group(6):
346 elif match.group(6):
343 e.lines = (0, int(match.group(6)))
347 e.lines = (0, int(match.group(6)))
344 else:
348 else:
345 e.lines = None
349 e.lines = None
346
350
347 if match.group(7): # cvs 1.12 commitid
351 if match.group(7): # cvs 1.12 commitid
348 e.commitid = match.group(8)
352 e.commitid = match.group(8)
349
353
350 if match.group(9): # cvsnt mergepoint
354 if match.group(9): # cvsnt mergepoint
351 myrev = match.group(10).split('.')
355 myrev = match.group(10).split('.')
352 if len(myrev) == 2: # head
356 if len(myrev) == 2: # head
353 e.mergepoint = 'HEAD'
357 e.mergepoint = 'HEAD'
354 else:
358 else:
355 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
359 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
356 branches = [b for b in branchmap if branchmap[b] == myrev]
360 branches = [b for b in branchmap if branchmap[b] == myrev]
357 assert len(branches) == 1, ('unknown branch: %s'
361 assert len(branches) == 1, ('unknown branch: %s'
358 % e.mergepoint)
362 % e.mergepoint)
359 e.mergepoint = branches[0]
363 e.mergepoint = branches[0]
360
364
361 e.comment = []
365 e.comment = []
362 state = 7
366 state = 7
363
367
364 elif state == 7:
368 elif state == 7:
365 # read the revision numbers of branches that start at this revision
369 # read the revision numbers of branches that start at this revision
366 # or store the commit log message otherwise
370 # or store the commit log message otherwise
367 m = re_70.match(line)
371 m = re_70.match(line)
368 if m:
372 if m:
369 e.branches = [tuple([int(y) for y in x.strip().split('.')])
373 e.branches = [tuple([int(y) for y in x.strip().split('.')])
370 for x in m.group(1).split(';')]
374 for x in m.group(1).split(';')]
371 state = 8
375 state = 8
372 elif re_31.match(line) and re_50.match(peek):
376 elif re_31.match(line) and re_50.match(peek):
373 state = 5
377 state = 5
374 store = True
378 store = True
375 elif re_32.match(line):
379 elif re_32.match(line):
376 state = 0
380 state = 0
377 store = True
381 store = True
378 else:
382 else:
379 e.comment.append(line)
383 e.comment.append(line)
380
384
381 elif state == 8:
385 elif state == 8:
382 # store commit log message
386 # store commit log message
383 if re_31.match(line):
387 if re_31.match(line):
384 cpeek = peek
388 cpeek = peek
385 if cpeek.endswith('\n'):
389 if cpeek.endswith('\n'):
386 cpeek = cpeek[:-1]
390 cpeek = cpeek[:-1]
387 if re_50.match(cpeek):
391 if re_50.match(cpeek):
388 state = 5
392 state = 5
389 store = True
393 store = True
390 else:
394 else:
391 e.comment.append(line)
395 e.comment.append(line)
392 elif re_32.match(line):
396 elif re_32.match(line):
393 state = 0
397 state = 0
394 store = True
398 store = True
395 else:
399 else:
396 e.comment.append(line)
400 e.comment.append(line)
397
401
398 # When a file is added on a branch B1, CVS creates a synthetic
402 # When a file is added on a branch B1, CVS creates a synthetic
399 # dead trunk revision 1.1 so that the branch has a root.
403 # dead trunk revision 1.1 so that the branch has a root.
400 # Likewise, if you merge such a file to a later branch B2 (one
404 # Likewise, if you merge such a file to a later branch B2 (one
401 # that already existed when the file was added on B1), CVS
405 # that already existed when the file was added on B1), CVS
402 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
406 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
403 # these revisions now, but mark them synthetic so
407 # these revisions now, but mark them synthetic so
404 # createchangeset() can take care of them.
408 # createchangeset() can take care of them.
405 if (store and
409 if (store and
406 e.dead and
410 e.dead and
407 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
411 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
408 len(e.comment) == 1 and
412 len(e.comment) == 1 and
409 file_added_re.match(e.comment[0])):
413 file_added_re.match(e.comment[0])):
410 ui.debug('found synthetic revision in %s: %r\n'
414 ui.debug('found synthetic revision in %s: %r\n'
411 % (e.rcs, e.comment[0]))
415 % (e.rcs, e.comment[0]))
412 e.synthetic = True
416 e.synthetic = True
413
417
414 if store:
418 if store:
415 # clean up the results and save in the log.
419 # clean up the results and save in the log.
416 store = False
420 store = False
417 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
421 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
418 e.comment = scache('\n'.join(e.comment))
422 e.comment = scache('\n'.join(e.comment))
419
423
420 revn = len(e.revision)
424 revn = len(e.revision)
421 if revn > 3 and (revn % 2) == 0:
425 if revn > 3 and (revn % 2) == 0:
422 e.branch = tags.get(e.revision[:-1], [None])[0]
426 e.branch = tags.get(e.revision[:-1], [None])[0]
423 else:
427 else:
424 e.branch = None
428 e.branch = None
425
429
426 # find the branches starting from this revision
430 # find the branches starting from this revision
427 branchpoints = set()
431 branchpoints = set()
428 for branch, revision in branchmap.iteritems():
432 for branch, revision in branchmap.iteritems():
429 revparts = tuple([int(i) for i in revision.split('.')])
433 revparts = tuple([int(i) for i in revision.split('.')])
430 if len(revparts) < 2: # bad tags
434 if len(revparts) < 2: # bad tags
431 continue
435 continue
432 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
436 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
433 # normal branch
437 # normal branch
434 if revparts[:-2] == e.revision:
438 if revparts[:-2] == e.revision:
435 branchpoints.add(branch)
439 branchpoints.add(branch)
436 elif revparts == (1, 1, 1): # vendor branch
440 elif revparts == (1, 1, 1): # vendor branch
437 if revparts in e.branches:
441 if revparts in e.branches:
438 branchpoints.add(branch)
442 branchpoints.add(branch)
439 e.branchpoints = branchpoints
443 e.branchpoints = branchpoints
440
444
441 log.append(e)
445 log.append(e)
442
446
443 rcsmap[e.rcs.replace('/Attic/', '/')] = e.rcs
447 rcsmap[e.rcs.replace('/Attic/', '/')] = e.rcs
444
448
445 if len(log) % 100 == 0:
449 if len(log) % 100 == 0:
446 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
450 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
447
451
448 log.sort(key=lambda x: (x.rcs, x.revision))
452 log.sort(key=lambda x: (x.rcs, x.revision))
449
453
450 # find parent revisions of individual files
454 # find parent revisions of individual files
451 versions = {}
455 versions = {}
452 for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)):
456 for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)):
453 rcs = e.rcs.replace('/Attic/', '/')
457 rcs = e.rcs.replace('/Attic/', '/')
454 if rcs in rcsmap:
458 if rcs in rcsmap:
455 e.rcs = rcsmap[rcs]
459 e.rcs = rcsmap[rcs]
456 branch = e.revision[:-1]
460 branch = e.revision[:-1]
457 versions[(e.rcs, branch)] = e.revision
461 versions[(e.rcs, branch)] = e.revision
458
462
459 for e in log:
463 for e in log:
460 branch = e.revision[:-1]
464 branch = e.revision[:-1]
461 p = versions.get((e.rcs, branch), None)
465 p = versions.get((e.rcs, branch), None)
462 if p is None:
466 if p is None:
463 p = e.revision[:-2]
467 p = e.revision[:-2]
464 e.parent = p
468 e.parent = p
465 versions[(e.rcs, branch)] = e.revision
469 versions[(e.rcs, branch)] = e.revision
466
470
467 # update the log cache
471 # update the log cache
468 if cache:
472 if cache:
469 if log:
473 if log:
470 # join up the old and new logs
474 # join up the old and new logs
471 log.sort(key=lambda x: x.date)
475 log.sort(key=lambda x: x.date)
472
476
473 if oldlog and oldlog[-1].date >= log[0].date:
477 if oldlog and oldlog[-1].date >= log[0].date:
474 raise logerror(_('log cache overlaps with new log entries,'
478 raise logerror(_('log cache overlaps with new log entries,'
475 ' re-run without cache.'))
479 ' re-run without cache.'))
476
480
477 log = oldlog + log
481 log = oldlog + log
478
482
479 # write the new cachefile
483 # write the new cachefile
480 ui.note(_('writing cvs log cache %s\n') % cachefile)
484 ui.note(_('writing cvs log cache %s\n') % cachefile)
481 pickle.dump(log, open(cachefile, 'w'))
485 pickle.dump(log, open(cachefile, 'w'))
482 else:
486 else:
483 log = oldlog
487 log = oldlog
484
488
485 ui.status(_('%d log entries\n') % len(log))
489 ui.status(_('%d log entries\n') % len(log))
486
490
487 hook.hook(ui, None, "cvslog", True, log=log)
491 hook.hook(ui, None, "cvslog", True, log=log)
488
492
489 return log
493 return log
490
494
491
495
492 class changeset(object):
496 class changeset(object):
493 '''Class changeset has the following attributes:
497 '''Class changeset has the following attributes:
494 .id - integer identifying this changeset (list index)
498 .id - integer identifying this changeset (list index)
495 .author - author name as CVS knows it
499 .author - author name as CVS knows it
496 .branch - name of branch this changeset is on, or None
500 .branch - name of branch this changeset is on, or None
497 .comment - commit message
501 .comment - commit message
498 .commitid - CVS commitid or None
502 .commitid - CVS commitid or None
499 .date - the commit date as a (time,tz) tuple
503 .date - the commit date as a (time,tz) tuple
500 .entries - list of logentry objects in this changeset
504 .entries - list of logentry objects in this changeset
501 .parents - list of one or two parent changesets
505 .parents - list of one or two parent changesets
502 .tags - list of tags on this changeset
506 .tags - list of tags on this changeset
503 .synthetic - from synthetic revision "file ... added on branch ..."
507 .synthetic - from synthetic revision "file ... added on branch ..."
504 .mergepoint- the branch that has been merged from or None
508 .mergepoint- the branch that has been merged from or None
505 .branchpoints- the branches that start at the current entry or empty
509 .branchpoints- the branches that start at the current entry or empty
506 '''
510 '''
507 def __init__(self, **entries):
511 def __init__(self, **entries):
508 self.id = None
512 self.id = None
509 self.synthetic = False
513 self.synthetic = False
510 self.__dict__.update(entries)
514 self.__dict__.update(entries)
511
515
512 def __repr__(self):
516 def __repr__(self):
513 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
517 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
514 return "%s(%s)"%(type(self).__name__, ", ".join(items))
518 return "%s(%s)"%(type(self).__name__, ", ".join(items))
515
519
516 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
520 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
517 '''Convert log into changesets.'''
521 '''Convert log into changesets.'''
518
522
519 ui.status(_('creating changesets\n'))
523 ui.status(_('creating changesets\n'))
520
524
521 # try to order commitids by date
525 # try to order commitids by date
522 mindate = {}
526 mindate = {}
523 for e in log:
527 for e in log:
524 if e.commitid:
528 if e.commitid:
525 mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
529 mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
526
530
527 # Merge changesets
531 # Merge changesets
528 log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
532 log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
529 x.author, x.branch, x.date, x.branchpoints))
533 x.author, x.branch, x.date, x.branchpoints))
530
534
531 changesets = []
535 changesets = []
532 files = set()
536 files = set()
533 c = None
537 c = None
534 for i, e in enumerate(log):
538 for i, e in enumerate(log):
535
539
536 # Check if log entry belongs to the current changeset or not.
540 # Check if log entry belongs to the current changeset or not.
537
541
538 # Since CVS is file-centric, two different file revisions with
542 # Since CVS is file-centric, two different file revisions with
539 # different branchpoints should be treated as belonging to two
543 # different branchpoints should be treated as belonging to two
540 # different changesets (and the ordering is important and not
544 # different changesets (and the ordering is important and not
541 # honoured by cvsps at this point).
545 # honoured by cvsps at this point).
542 #
546 #
543 # Consider the following case:
547 # Consider the following case:
544 # foo 1.1 branchpoints: [MYBRANCH]
548 # foo 1.1 branchpoints: [MYBRANCH]
545 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
549 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
546 #
550 #
547 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
551 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
548 # later version of foo may be in MYBRANCH2, so foo should be the
552 # later version of foo may be in MYBRANCH2, so foo should be the
549 # first changeset and bar the next and MYBRANCH and MYBRANCH2
553 # first changeset and bar the next and MYBRANCH and MYBRANCH2
550 # should both start off of the bar changeset. No provisions are
554 # should both start off of the bar changeset. No provisions are
551 # made to ensure that this is, in fact, what happens.
555 # made to ensure that this is, in fact, what happens.
552 if not (c and e.branchpoints == c.branchpoints and
556 if not (c and e.branchpoints == c.branchpoints and
553 (# cvs commitids
557 (# cvs commitids
554 (e.commitid is not None and e.commitid == c.commitid) or
558 (e.commitid is not None and e.commitid == c.commitid) or
555 (# no commitids, use fuzzy commit detection
559 (# no commitids, use fuzzy commit detection
556 (e.commitid is None or c.commitid is None) and
560 (e.commitid is None or c.commitid is None) and
557 e.comment == c.comment and
561 e.comment == c.comment and
558 e.author == c.author and
562 e.author == c.author and
559 e.branch == c.branch and
563 e.branch == c.branch and
560 ((c.date[0] + c.date[1]) <=
564 ((c.date[0] + c.date[1]) <=
561 (e.date[0] + e.date[1]) <=
565 (e.date[0] + e.date[1]) <=
562 (c.date[0] + c.date[1]) + fuzz) and
566 (c.date[0] + c.date[1]) + fuzz) and
563 e.file not in files))):
567 e.file not in files))):
564 c = changeset(comment=e.comment, author=e.author,
568 c = changeset(comment=e.comment, author=e.author,
565 branch=e.branch, date=e.date,
569 branch=e.branch, date=e.date,
566 entries=[], mergepoint=e.mergepoint,
570 entries=[], mergepoint=e.mergepoint,
567 branchpoints=e.branchpoints, commitid=e.commitid)
571 branchpoints=e.branchpoints, commitid=e.commitid)
568 changesets.append(c)
572 changesets.append(c)
569
573
570 files = set()
574 files = set()
571 if len(changesets) % 100 == 0:
575 if len(changesets) % 100 == 0:
572 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
576 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
573 ui.status(util.ellipsis(t, 80) + '\n')
577 ui.status(util.ellipsis(t, 80) + '\n')
574
578
575 c.entries.append(e)
579 c.entries.append(e)
576 files.add(e.file)
580 files.add(e.file)
577 c.date = e.date # changeset date is date of latest commit in it
581 c.date = e.date # changeset date is date of latest commit in it
578
582
579 # Mark synthetic changesets
583 # Mark synthetic changesets
580
584
581 for c in changesets:
585 for c in changesets:
582 # Synthetic revisions always get their own changeset, because
586 # Synthetic revisions always get their own changeset, because
583 # the log message includes the filename. E.g. if you add file3
587 # the log message includes the filename. E.g. if you add file3
584 # and file4 on a branch, you get four log entries and three
588 # and file4 on a branch, you get four log entries and three
585 # changesets:
589 # changesets:
586 # "File file3 was added on branch ..." (synthetic, 1 entry)
590 # "File file3 was added on branch ..." (synthetic, 1 entry)
587 # "File file4 was added on branch ..." (synthetic, 1 entry)
591 # "File file4 was added on branch ..." (synthetic, 1 entry)
588 # "Add file3 and file4 to fix ..." (real, 2 entries)
592 # "Add file3 and file4 to fix ..." (real, 2 entries)
589 # Hence the check for 1 entry here.
593 # Hence the check for 1 entry here.
590 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
594 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
591
595
592 # Sort files in each changeset
596 # Sort files in each changeset
593
597
594 def entitycompare(l, r):
598 def entitycompare(l, r):
595 'Mimic cvsps sorting order'
599 'Mimic cvsps sorting order'
596 l = l.file.split('/')
600 l = l.file.split('/')
597 r = r.file.split('/')
601 r = r.file.split('/')
598 nl = len(l)
602 nl = len(l)
599 nr = len(r)
603 nr = len(r)
600 n = min(nl, nr)
604 n = min(nl, nr)
601 for i in range(n):
605 for i in range(n):
602 if i + 1 == nl and nl < nr:
606 if i + 1 == nl and nl < nr:
603 return -1
607 return -1
604 elif i + 1 == nr and nl > nr:
608 elif i + 1 == nr and nl > nr:
605 return +1
609 return +1
606 elif l[i] < r[i]:
610 elif l[i] < r[i]:
607 return -1
611 return -1
608 elif l[i] > r[i]:
612 elif l[i] > r[i]:
609 return +1
613 return +1
610 return 0
614 return 0
611
615
612 for c in changesets:
616 for c in changesets:
613 c.entries.sort(entitycompare)
617 c.entries.sort(entitycompare)
614
618
615 # Sort changesets by date
619 # Sort changesets by date
616
620
617 odd = set()
621 odd = set()
618 def cscmp(l, r, odd=odd):
622 def cscmp(l, r, odd=odd):
619 d = sum(l.date) - sum(r.date)
623 d = sum(l.date) - sum(r.date)
620 if d:
624 if d:
621 return d
625 return d
622
626
623 # detect vendor branches and initial commits on a branch
627 # detect vendor branches and initial commits on a branch
624 le = {}
628 le = {}
625 for e in l.entries:
629 for e in l.entries:
626 le[e.rcs] = e.revision
630 le[e.rcs] = e.revision
627 re = {}
631 re = {}
628 for e in r.entries:
632 for e in r.entries:
629 re[e.rcs] = e.revision
633 re[e.rcs] = e.revision
630
634
631 d = 0
635 d = 0
632 for e in l.entries:
636 for e in l.entries:
633 if re.get(e.rcs, None) == e.parent:
637 if re.get(e.rcs, None) == e.parent:
634 assert not d
638 assert not d
635 d = 1
639 d = 1
636 break
640 break
637
641
638 for e in r.entries:
642 for e in r.entries:
639 if le.get(e.rcs, None) == e.parent:
643 if le.get(e.rcs, None) == e.parent:
640 if d:
644 if d:
641 odd.add((l, r))
645 odd.add((l, r))
642 d = -1
646 d = -1
643 break
647 break
644 # By this point, the changesets are sufficiently compared that
648 # By this point, the changesets are sufficiently compared that
645 # we don't really care about ordering. However, this leaves
649 # we don't really care about ordering. However, this leaves
646 # some race conditions in the tests, so we compare on the
650 # some race conditions in the tests, so we compare on the
647 # number of files modified, the files contained in each
651 # number of files modified, the files contained in each
648 # changeset, and the branchpoints in the change to ensure test
652 # changeset, and the branchpoints in the change to ensure test
649 # output remains stable.
653 # output remains stable.
650
654
651 # recommended replacement for cmp from
655 # recommended replacement for cmp from
652 # https://docs.python.org/3.0/whatsnew/3.0.html
656 # https://docs.python.org/3.0/whatsnew/3.0.html
653 c = lambda x, y: (x > y) - (x < y)
657 c = lambda x, y: (x > y) - (x < y)
654 # Sort bigger changes first.
658 # Sort bigger changes first.
655 if not d:
659 if not d:
656 d = c(len(l.entries), len(r.entries))
660 d = c(len(l.entries), len(r.entries))
657 # Try sorting by filename in the change.
661 # Try sorting by filename in the change.
658 if not d:
662 if not d:
659 d = c([e.file for e in l.entries], [e.file for e in r.entries])
663 d = c([e.file for e in l.entries], [e.file for e in r.entries])
660 # Try and put changes without a branch point before ones with
664 # Try and put changes without a branch point before ones with
661 # a branch point.
665 # a branch point.
662 if not d:
666 if not d:
663 d = c(len(l.branchpoints), len(r.branchpoints))
667 d = c(len(l.branchpoints), len(r.branchpoints))
664 return d
668 return d
665
669
666 changesets.sort(cscmp)
670 changesets.sort(cscmp)
667
671
668 # Collect tags
672 # Collect tags
669
673
670 globaltags = {}
674 globaltags = {}
671 for c in changesets:
675 for c in changesets:
672 for e in c.entries:
676 for e in c.entries:
673 for tag in e.tags:
677 for tag in e.tags:
674 # remember which is the latest changeset to have this tag
678 # remember which is the latest changeset to have this tag
675 globaltags[tag] = c
679 globaltags[tag] = c
676
680
677 for c in changesets:
681 for c in changesets:
678 tags = set()
682 tags = set()
679 for e in c.entries:
683 for e in c.entries:
680 tags.update(e.tags)
684 tags.update(e.tags)
681 # remember tags only if this is the latest changeset to have it
685 # remember tags only if this is the latest changeset to have it
682 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
686 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
683
687
684 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
688 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
685 # by inserting dummy changesets with two parents, and handle
689 # by inserting dummy changesets with two parents, and handle
686 # {{mergefrombranch BRANCHNAME}} by setting two parents.
690 # {{mergefrombranch BRANCHNAME}} by setting two parents.
687
691
688 if mergeto is None:
692 if mergeto is None:
689 mergeto = r'{{mergetobranch ([-\w]+)}}'
693 mergeto = r'{{mergetobranch ([-\w]+)}}'
690 if mergeto:
694 if mergeto:
691 mergeto = re.compile(mergeto)
695 mergeto = re.compile(mergeto)
692
696
693 if mergefrom is None:
697 if mergefrom is None:
694 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
698 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
695 if mergefrom:
699 if mergefrom:
696 mergefrom = re.compile(mergefrom)
700 mergefrom = re.compile(mergefrom)
697
701
698 versions = {} # changeset index where we saw any particular file version
702 versions = {} # changeset index where we saw any particular file version
699 branches = {} # changeset index where we saw a branch
703 branches = {} # changeset index where we saw a branch
700 n = len(changesets)
704 n = len(changesets)
701 i = 0
705 i = 0
702 while i < n:
706 while i < n:
703 c = changesets[i]
707 c = changesets[i]
704
708
705 for f in c.entries:
709 for f in c.entries:
706 versions[(f.rcs, f.revision)] = i
710 versions[(f.rcs, f.revision)] = i
707
711
708 p = None
712 p = None
709 if c.branch in branches:
713 if c.branch in branches:
710 p = branches[c.branch]
714 p = branches[c.branch]
711 else:
715 else:
712 # first changeset on a new branch
716 # first changeset on a new branch
713 # the parent is a changeset with the branch in its
717 # the parent is a changeset with the branch in its
714 # branchpoints such that it is the latest possible
718 # branchpoints such that it is the latest possible
715 # commit without any intervening, unrelated commits.
719 # commit without any intervening, unrelated commits.
716
720
717 for candidate in xrange(i):
721 for candidate in xrange(i):
718 if c.branch not in changesets[candidate].branchpoints:
722 if c.branch not in changesets[candidate].branchpoints:
719 if p is not None:
723 if p is not None:
720 break
724 break
721 continue
725 continue
722 p = candidate
726 p = candidate
723
727
724 c.parents = []
728 c.parents = []
725 if p is not None:
729 if p is not None:
726 p = changesets[p]
730 p = changesets[p]
727
731
728 # Ensure no changeset has a synthetic changeset as a parent.
732 # Ensure no changeset has a synthetic changeset as a parent.
729 while p.synthetic:
733 while p.synthetic:
730 assert len(p.parents) <= 1, \
734 assert len(p.parents) <= 1, \
731 _('synthetic changeset cannot have multiple parents')
735 _('synthetic changeset cannot have multiple parents')
732 if p.parents:
736 if p.parents:
733 p = p.parents[0]
737 p = p.parents[0]
734 else:
738 else:
735 p = None
739 p = None
736 break
740 break
737
741
738 if p is not None:
742 if p is not None:
739 c.parents.append(p)
743 c.parents.append(p)
740
744
741 if c.mergepoint:
745 if c.mergepoint:
742 if c.mergepoint == 'HEAD':
746 if c.mergepoint == 'HEAD':
743 c.mergepoint = None
747 c.mergepoint = None
744 c.parents.append(changesets[branches[c.mergepoint]])
748 c.parents.append(changesets[branches[c.mergepoint]])
745
749
746 if mergefrom:
750 if mergefrom:
747 m = mergefrom.search(c.comment)
751 m = mergefrom.search(c.comment)
748 if m:
752 if m:
749 m = m.group(1)
753 m = m.group(1)
750 if m == 'HEAD':
754 if m == 'HEAD':
751 m = None
755 m = None
752 try:
756 try:
753 candidate = changesets[branches[m]]
757 candidate = changesets[branches[m]]
754 except KeyError:
758 except KeyError:
755 ui.warn(_("warning: CVS commit message references "
759 ui.warn(_("warning: CVS commit message references "
756 "non-existent branch %r:\n%s\n")
760 "non-existent branch %r:\n%s\n")
757 % (m, c.comment))
761 % (m, c.comment))
758 if m in branches and c.branch != m and not candidate.synthetic:
762 if m in branches and c.branch != m and not candidate.synthetic:
759 c.parents.append(candidate)
763 c.parents.append(candidate)
760
764
761 if mergeto:
765 if mergeto:
762 m = mergeto.search(c.comment)
766 m = mergeto.search(c.comment)
763 if m:
767 if m:
764 if m.groups():
768 if m.groups():
765 m = m.group(1)
769 m = m.group(1)
766 if m == 'HEAD':
770 if m == 'HEAD':
767 m = None
771 m = None
768 else:
772 else:
769 m = None # if no group found then merge to HEAD
773 m = None # if no group found then merge to HEAD
770 if m in branches and c.branch != m:
774 if m in branches and c.branch != m:
771 # insert empty changeset for merge
775 # insert empty changeset for merge
772 cc = changeset(
776 cc = changeset(
773 author=c.author, branch=m, date=c.date,
777 author=c.author, branch=m, date=c.date,
774 comment='convert-repo: CVS merge from branch %s'
778 comment='convert-repo: CVS merge from branch %s'
775 % c.branch,
779 % c.branch,
776 entries=[], tags=[],
780 entries=[], tags=[],
777 parents=[changesets[branches[m]], c])
781 parents=[changesets[branches[m]], c])
778 changesets.insert(i + 1, cc)
782 changesets.insert(i + 1, cc)
779 branches[m] = i + 1
783 branches[m] = i + 1
780
784
781 # adjust our loop counters now we have inserted a new entry
785 # adjust our loop counters now we have inserted a new entry
782 n += 1
786 n += 1
783 i += 2
787 i += 2
784 continue
788 continue
785
789
786 branches[c.branch] = i
790 branches[c.branch] = i
787 i += 1
791 i += 1
788
792
789 # Drop synthetic changesets (safe now that we have ensured no other
793 # Drop synthetic changesets (safe now that we have ensured no other
790 # changesets can have them as parents).
794 # changesets can have them as parents).
791 i = 0
795 i = 0
792 while i < len(changesets):
796 while i < len(changesets):
793 if changesets[i].synthetic:
797 if changesets[i].synthetic:
794 del changesets[i]
798 del changesets[i]
795 else:
799 else:
796 i += 1
800 i += 1
797
801
798 # Number changesets
802 # Number changesets
799
803
800 for i, c in enumerate(changesets):
804 for i, c in enumerate(changesets):
801 c.id = i + 1
805 c.id = i + 1
802
806
803 if odd:
807 if odd:
804 for l, r in odd:
808 for l, r in odd:
805 if l.id is not None and r.id is not None:
809 if l.id is not None and r.id is not None:
806 ui.warn(_('changeset %d is both before and after %d\n')
810 ui.warn(_('changeset %d is both before and after %d\n')
807 % (l.id, r.id))
811 % (l.id, r.id))
808
812
809 ui.status(_('%d changeset entries\n') % len(changesets))
813 ui.status(_('%d changeset entries\n') % len(changesets))
810
814
811 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
815 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
812
816
813 return changesets
817 return changesets
814
818
815
819
816 def debugcvsps(ui, *args, **opts):
820 def debugcvsps(ui, *args, **opts):
817 '''Read CVS rlog for current directory or named path in
821 '''Read CVS rlog for current directory or named path in
818 repository, and convert the log to changesets based on matching
822 repository, and convert the log to changesets based on matching
819 commit log entries and dates.
823 commit log entries and dates.
820 '''
824 '''
821 if opts["new_cache"]:
825 if opts["new_cache"]:
822 cache = "write"
826 cache = "write"
823 elif opts["update_cache"]:
827 elif opts["update_cache"]:
824 cache = "update"
828 cache = "update"
825 else:
829 else:
826 cache = None
830 cache = None
827
831
828 revisions = opts["revisions"]
832 revisions = opts["revisions"]
829
833
830 try:
834 try:
831 if args:
835 if args:
832 log = []
836 log = []
833 for d in args:
837 for d in args:
834 log += createlog(ui, d, root=opts["root"], cache=cache)
838 log += createlog(ui, d, root=opts["root"], cache=cache)
835 else:
839 else:
836 log = createlog(ui, root=opts["root"], cache=cache)
840 log = createlog(ui, root=opts["root"], cache=cache)
837 except logerror as e:
841 except logerror as e:
838 ui.write("%r\n"%e)
842 ui.write("%r\n"%e)
839 return
843 return
840
844
841 changesets = createchangeset(ui, log, opts["fuzz"])
845 changesets = createchangeset(ui, log, opts["fuzz"])
842 del log
846 del log
843
847
844 # Print changesets (optionally filtered)
848 # Print changesets (optionally filtered)
845
849
846 off = len(revisions)
850 off = len(revisions)
847 branches = {} # latest version number in each branch
851 branches = {} # latest version number in each branch
848 ancestors = {} # parent branch
852 ancestors = {} # parent branch
849 for cs in changesets:
853 for cs in changesets:
850
854
851 if opts["ancestors"]:
855 if opts["ancestors"]:
852 if cs.branch not in branches and cs.parents and cs.parents[0].id:
856 if cs.branch not in branches and cs.parents and cs.parents[0].id:
853 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
857 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
854 cs.parents[0].id)
858 cs.parents[0].id)
855 branches[cs.branch] = cs.id
859 branches[cs.branch] = cs.id
856
860
857 # limit by branches
861 # limit by branches
858 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
862 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
859 continue
863 continue
860
864
861 if not off:
865 if not off:
862 # Note: trailing spaces on several lines here are needed to have
866 # Note: trailing spaces on several lines here are needed to have
863 # bug-for-bug compatibility with cvsps.
867 # bug-for-bug compatibility with cvsps.
864 ui.write('---------------------\n')
868 ui.write('---------------------\n')
865 ui.write(('PatchSet %d \n' % cs.id))
869 ui.write(('PatchSet %d \n' % cs.id))
866 ui.write(('Date: %s\n' % util.datestr(cs.date,
870 ui.write(('Date: %s\n' % util.datestr(cs.date,
867 '%Y/%m/%d %H:%M:%S %1%2')))
871 '%Y/%m/%d %H:%M:%S %1%2')))
868 ui.write(('Author: %s\n' % cs.author))
872 ui.write(('Author: %s\n' % cs.author))
869 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
873 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
870 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
874 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
871 ','.join(cs.tags) or '(none)')))
875 ','.join(cs.tags) or '(none)')))
872 if cs.branchpoints:
876 if cs.branchpoints:
873 ui.write(('Branchpoints: %s \n') %
877 ui.write(('Branchpoints: %s \n') %
874 ', '.join(sorted(cs.branchpoints)))
878 ', '.join(sorted(cs.branchpoints)))
875 if opts["parents"] and cs.parents:
879 if opts["parents"] and cs.parents:
876 if len(cs.parents) > 1:
880 if len(cs.parents) > 1:
877 ui.write(('Parents: %s\n' %
881 ui.write(('Parents: %s\n' %
878 (','.join([str(p.id) for p in cs.parents]))))
882 (','.join([str(p.id) for p in cs.parents]))))
879 else:
883 else:
880 ui.write(('Parent: %d\n' % cs.parents[0].id))
884 ui.write(('Parent: %d\n' % cs.parents[0].id))
881
885
882 if opts["ancestors"]:
886 if opts["ancestors"]:
883 b = cs.branch
887 b = cs.branch
884 r = []
888 r = []
885 while b:
889 while b:
886 b, c = ancestors[b]
890 b, c = ancestors[b]
887 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
891 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
888 if r:
892 if r:
889 ui.write(('Ancestors: %s\n' % (','.join(r))))
893 ui.write(('Ancestors: %s\n' % (','.join(r))))
890
894
891 ui.write(('Log:\n'))
895 ui.write(('Log:\n'))
892 ui.write('%s\n\n' % cs.comment)
896 ui.write('%s\n\n' % cs.comment)
893 ui.write(('Members: \n'))
897 ui.write(('Members: \n'))
894 for f in cs.entries:
898 for f in cs.entries:
895 fn = f.file
899 fn = f.file
896 if fn.startswith(opts["prefix"]):
900 if fn.startswith(opts["prefix"]):
897 fn = fn[len(opts["prefix"]):]
901 fn = fn[len(opts["prefix"]):]
898 ui.write('\t%s:%s->%s%s \n' % (
902 ui.write('\t%s:%s->%s%s \n' % (
899 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
903 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
900 '.'.join([str(x) for x in f.revision]),
904 '.'.join([str(x) for x in f.revision]),
901 ['', '(DEAD)'][f.dead]))
905 ['', '(DEAD)'][f.dead]))
902 ui.write('\n')
906 ui.write('\n')
903
907
904 # have we seen the start tag?
908 # have we seen the start tag?
905 if revisions and off:
909 if revisions and off:
906 if revisions[0] == str(cs.id) or \
910 if revisions[0] == str(cs.id) or \
907 revisions[0] in cs.tags:
911 revisions[0] in cs.tags:
908 off = False
912 off = False
909
913
910 # see if we reached the end tag
914 # see if we reached the end tag
911 if len(revisions) > 1 and not off:
915 if len(revisions) > 1 and not off:
912 if revisions[1] == str(cs.id) or \
916 if revisions[1] == str(cs.id) or \
913 revisions[1] in cs.tags:
917 revisions[1] in cs.tags:
914 break
918 break
@@ -1,154 +1,153 b''
1 #require test-repo
1 #require test-repo
2
2
3 $ cd "$TESTDIR"/..
3 $ cd "$TESTDIR"/..
4
4
5 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
5 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
6 contrib/check-code.py not using absolute_import
6 contrib/check-code.py not using absolute_import
7 contrib/check-code.py requires print_function
7 contrib/check-code.py requires print_function
8 contrib/debugshell.py not using absolute_import
8 contrib/debugshell.py not using absolute_import
9 contrib/hgfixes/fix_bytes.py not using absolute_import
9 contrib/hgfixes/fix_bytes.py not using absolute_import
10 contrib/hgfixes/fix_bytesmod.py not using absolute_import
10 contrib/hgfixes/fix_bytesmod.py not using absolute_import
11 contrib/hgfixes/fix_leftover_imports.py not using absolute_import
11 contrib/hgfixes/fix_leftover_imports.py not using absolute_import
12 contrib/import-checker.py not using absolute_import
12 contrib/import-checker.py not using absolute_import
13 contrib/import-checker.py requires print_function
13 contrib/import-checker.py requires print_function
14 contrib/memory.py not using absolute_import
14 contrib/memory.py not using absolute_import
15 contrib/perf.py not using absolute_import
15 contrib/perf.py not using absolute_import
16 contrib/python-hook-examples.py not using absolute_import
16 contrib/python-hook-examples.py not using absolute_import
17 contrib/revsetbenchmarks.py not using absolute_import
17 contrib/revsetbenchmarks.py not using absolute_import
18 contrib/revsetbenchmarks.py requires print_function
18 contrib/revsetbenchmarks.py requires print_function
19 contrib/showstack.py not using absolute_import
19 contrib/showstack.py not using absolute_import
20 contrib/synthrepo.py not using absolute_import
20 contrib/synthrepo.py not using absolute_import
21 contrib/win32/hgwebdir_wsgi.py not using absolute_import
21 contrib/win32/hgwebdir_wsgi.py not using absolute_import
22 doc/check-seclevel.py not using absolute_import
22 doc/check-seclevel.py not using absolute_import
23 doc/gendoc.py not using absolute_import
23 doc/gendoc.py not using absolute_import
24 doc/hgmanpage.py not using absolute_import
24 doc/hgmanpage.py not using absolute_import
25 hgext/__init__.py not using absolute_import
25 hgext/__init__.py not using absolute_import
26 hgext/color.py not using absolute_import
26 hgext/color.py not using absolute_import
27 hgext/convert/__init__.py not using absolute_import
27 hgext/convert/__init__.py not using absolute_import
28 hgext/convert/bzr.py not using absolute_import
28 hgext/convert/bzr.py not using absolute_import
29 hgext/convert/common.py not using absolute_import
29 hgext/convert/common.py not using absolute_import
30 hgext/convert/convcmd.py not using absolute_import
30 hgext/convert/convcmd.py not using absolute_import
31 hgext/convert/cvs.py not using absolute_import
31 hgext/convert/cvs.py not using absolute_import
32 hgext/convert/cvsps.py not using absolute_import
33 hgext/convert/hg.py not using absolute_import
32 hgext/convert/hg.py not using absolute_import
34 hgext/convert/monotone.py not using absolute_import
33 hgext/convert/monotone.py not using absolute_import
35 hgext/convert/p4.py not using absolute_import
34 hgext/convert/p4.py not using absolute_import
36 hgext/convert/subversion.py not using absolute_import
35 hgext/convert/subversion.py not using absolute_import
37 hgext/convert/transport.py not using absolute_import
36 hgext/convert/transport.py not using absolute_import
38 hgext/eol.py not using absolute_import
37 hgext/eol.py not using absolute_import
39 hgext/extdiff.py not using absolute_import
38 hgext/extdiff.py not using absolute_import
40 hgext/factotum.py not using absolute_import
39 hgext/factotum.py not using absolute_import
41 hgext/fetch.py not using absolute_import
40 hgext/fetch.py not using absolute_import
42 hgext/gpg.py not using absolute_import
41 hgext/gpg.py not using absolute_import
43 hgext/graphlog.py not using absolute_import
42 hgext/graphlog.py not using absolute_import
44 hgext/hgcia.py not using absolute_import
43 hgext/hgcia.py not using absolute_import
45 hgext/hgk.py not using absolute_import
44 hgext/hgk.py not using absolute_import
46 hgext/highlight/__init__.py not using absolute_import
45 hgext/highlight/__init__.py not using absolute_import
47 hgext/highlight/highlight.py not using absolute_import
46 hgext/highlight/highlight.py not using absolute_import
48 hgext/histedit.py not using absolute_import
47 hgext/histedit.py not using absolute_import
49 hgext/largefiles/__init__.py not using absolute_import
48 hgext/largefiles/__init__.py not using absolute_import
50 hgext/largefiles/basestore.py not using absolute_import
49 hgext/largefiles/basestore.py not using absolute_import
51 hgext/largefiles/lfcommands.py not using absolute_import
50 hgext/largefiles/lfcommands.py not using absolute_import
52 hgext/largefiles/lfutil.py not using absolute_import
51 hgext/largefiles/lfutil.py not using absolute_import
53 hgext/largefiles/localstore.py not using absolute_import
52 hgext/largefiles/localstore.py not using absolute_import
54 hgext/largefiles/overrides.py not using absolute_import
53 hgext/largefiles/overrides.py not using absolute_import
55 hgext/largefiles/proto.py not using absolute_import
54 hgext/largefiles/proto.py not using absolute_import
56 hgext/largefiles/remotestore.py not using absolute_import
55 hgext/largefiles/remotestore.py not using absolute_import
57 hgext/largefiles/reposetup.py not using absolute_import
56 hgext/largefiles/reposetup.py not using absolute_import
58 hgext/largefiles/uisetup.py not using absolute_import
57 hgext/largefiles/uisetup.py not using absolute_import
59 hgext/largefiles/wirestore.py not using absolute_import
58 hgext/largefiles/wirestore.py not using absolute_import
60 hgext/mq.py not using absolute_import
59 hgext/mq.py not using absolute_import
61 hgext/notify.py not using absolute_import
60 hgext/notify.py not using absolute_import
62 hgext/patchbomb.py not using absolute_import
61 hgext/patchbomb.py not using absolute_import
63 hgext/purge.py not using absolute_import
62 hgext/purge.py not using absolute_import
64 hgext/rebase.py not using absolute_import
63 hgext/rebase.py not using absolute_import
65 hgext/record.py not using absolute_import
64 hgext/record.py not using absolute_import
66 hgext/relink.py not using absolute_import
65 hgext/relink.py not using absolute_import
67 hgext/schemes.py not using absolute_import
66 hgext/schemes.py not using absolute_import
68 hgext/share.py not using absolute_import
67 hgext/share.py not using absolute_import
69 hgext/shelve.py not using absolute_import
68 hgext/shelve.py not using absolute_import
70 hgext/strip.py not using absolute_import
69 hgext/strip.py not using absolute_import
71 hgext/transplant.py not using absolute_import
70 hgext/transplant.py not using absolute_import
72 hgext/win32mbcs.py not using absolute_import
71 hgext/win32mbcs.py not using absolute_import
73 hgext/win32text.py not using absolute_import
72 hgext/win32text.py not using absolute_import
74 i18n/check-translation.py not using absolute_import
73 i18n/check-translation.py not using absolute_import
75 i18n/polib.py not using absolute_import
74 i18n/polib.py not using absolute_import
76 setup.py not using absolute_import
75 setup.py not using absolute_import
77 tests/filterpyflakes.py requires print_function
76 tests/filterpyflakes.py requires print_function
78 tests/generate-working-copy-states.py requires print_function
77 tests/generate-working-copy-states.py requires print_function
79 tests/get-with-headers.py requires print_function
78 tests/get-with-headers.py requires print_function
80 tests/heredoctest.py requires print_function
79 tests/heredoctest.py requires print_function
81 tests/hypothesishelpers.py not using absolute_import
80 tests/hypothesishelpers.py not using absolute_import
82 tests/hypothesishelpers.py requires print_function
81 tests/hypothesishelpers.py requires print_function
83 tests/killdaemons.py not using absolute_import
82 tests/killdaemons.py not using absolute_import
84 tests/md5sum.py not using absolute_import
83 tests/md5sum.py not using absolute_import
85 tests/mockblackbox.py not using absolute_import
84 tests/mockblackbox.py not using absolute_import
86 tests/printenv.py not using absolute_import
85 tests/printenv.py not using absolute_import
87 tests/readlink.py not using absolute_import
86 tests/readlink.py not using absolute_import
88 tests/readlink.py requires print_function
87 tests/readlink.py requires print_function
89 tests/revlog-formatv0.py not using absolute_import
88 tests/revlog-formatv0.py not using absolute_import
90 tests/run-tests.py not using absolute_import
89 tests/run-tests.py not using absolute_import
91 tests/seq.py not using absolute_import
90 tests/seq.py not using absolute_import
92 tests/seq.py requires print_function
91 tests/seq.py requires print_function
93 tests/silenttestrunner.py not using absolute_import
92 tests/silenttestrunner.py not using absolute_import
94 tests/silenttestrunner.py requires print_function
93 tests/silenttestrunner.py requires print_function
95 tests/sitecustomize.py not using absolute_import
94 tests/sitecustomize.py not using absolute_import
96 tests/svn-safe-append.py not using absolute_import
95 tests/svn-safe-append.py not using absolute_import
97 tests/svnxml.py not using absolute_import
96 tests/svnxml.py not using absolute_import
98 tests/test-ancestor.py requires print_function
97 tests/test-ancestor.py requires print_function
99 tests/test-atomictempfile.py not using absolute_import
98 tests/test-atomictempfile.py not using absolute_import
100 tests/test-batching.py not using absolute_import
99 tests/test-batching.py not using absolute_import
101 tests/test-batching.py requires print_function
100 tests/test-batching.py requires print_function
102 tests/test-bdiff.py not using absolute_import
101 tests/test-bdiff.py not using absolute_import
103 tests/test-bdiff.py requires print_function
102 tests/test-bdiff.py requires print_function
104 tests/test-context.py not using absolute_import
103 tests/test-context.py not using absolute_import
105 tests/test-context.py requires print_function
104 tests/test-context.py requires print_function
106 tests/test-demandimport.py not using absolute_import
105 tests/test-demandimport.py not using absolute_import
107 tests/test-demandimport.py requires print_function
106 tests/test-demandimport.py requires print_function
108 tests/test-dispatch.py not using absolute_import
107 tests/test-dispatch.py not using absolute_import
109 tests/test-dispatch.py requires print_function
108 tests/test-dispatch.py requires print_function
110 tests/test-doctest.py not using absolute_import
109 tests/test-doctest.py not using absolute_import
111 tests/test-duplicateoptions.py not using absolute_import
110 tests/test-duplicateoptions.py not using absolute_import
112 tests/test-duplicateoptions.py requires print_function
111 tests/test-duplicateoptions.py requires print_function
113 tests/test-filecache.py not using absolute_import
112 tests/test-filecache.py not using absolute_import
114 tests/test-filecache.py requires print_function
113 tests/test-filecache.py requires print_function
115 tests/test-filelog.py not using absolute_import
114 tests/test-filelog.py not using absolute_import
116 tests/test-filelog.py requires print_function
115 tests/test-filelog.py requires print_function
117 tests/test-hg-parseurl.py not using absolute_import
116 tests/test-hg-parseurl.py not using absolute_import
118 tests/test-hg-parseurl.py requires print_function
117 tests/test-hg-parseurl.py requires print_function
119 tests/test-hgweb-auth.py not using absolute_import
118 tests/test-hgweb-auth.py not using absolute_import
120 tests/test-hgweb-auth.py requires print_function
119 tests/test-hgweb-auth.py requires print_function
121 tests/test-hgwebdir-paths.py not using absolute_import
120 tests/test-hgwebdir-paths.py not using absolute_import
122 tests/test-hybridencode.py not using absolute_import
121 tests/test-hybridencode.py not using absolute_import
123 tests/test-hybridencode.py requires print_function
122 tests/test-hybridencode.py requires print_function
124 tests/test-lrucachedict.py not using absolute_import
123 tests/test-lrucachedict.py not using absolute_import
125 tests/test-lrucachedict.py requires print_function
124 tests/test-lrucachedict.py requires print_function
126 tests/test-manifest.py not using absolute_import
125 tests/test-manifest.py not using absolute_import
127 tests/test-minirst.py not using absolute_import
126 tests/test-minirst.py not using absolute_import
128 tests/test-minirst.py requires print_function
127 tests/test-minirst.py requires print_function
129 tests/test-parseindex2.py not using absolute_import
128 tests/test-parseindex2.py not using absolute_import
130 tests/test-parseindex2.py requires print_function
129 tests/test-parseindex2.py requires print_function
131 tests/test-pathencode.py not using absolute_import
130 tests/test-pathencode.py not using absolute_import
132 tests/test-pathencode.py requires print_function
131 tests/test-pathencode.py requires print_function
133 tests/test-propertycache.py not using absolute_import
132 tests/test-propertycache.py not using absolute_import
134 tests/test-propertycache.py requires print_function
133 tests/test-propertycache.py requires print_function
135 tests/test-revlog-ancestry.py not using absolute_import
134 tests/test-revlog-ancestry.py not using absolute_import
136 tests/test-revlog-ancestry.py requires print_function
135 tests/test-revlog-ancestry.py requires print_function
137 tests/test-run-tests.py not using absolute_import
136 tests/test-run-tests.py not using absolute_import
138 tests/test-simplemerge.py not using absolute_import
137 tests/test-simplemerge.py not using absolute_import
139 tests/test-status-inprocess.py not using absolute_import
138 tests/test-status-inprocess.py not using absolute_import
140 tests/test-status-inprocess.py requires print_function
139 tests/test-status-inprocess.py requires print_function
141 tests/test-symlink-os-yes-fs-no.py not using absolute_import
140 tests/test-symlink-os-yes-fs-no.py not using absolute_import
142 tests/test-trusted.py not using absolute_import
141 tests/test-trusted.py not using absolute_import
143 tests/test-trusted.py requires print_function
142 tests/test-trusted.py requires print_function
144 tests/test-ui-color.py not using absolute_import
143 tests/test-ui-color.py not using absolute_import
145 tests/test-ui-color.py requires print_function
144 tests/test-ui-color.py requires print_function
146 tests/test-ui-config.py not using absolute_import
145 tests/test-ui-config.py not using absolute_import
147 tests/test-ui-config.py requires print_function
146 tests/test-ui-config.py requires print_function
148 tests/test-ui-verbosity.py not using absolute_import
147 tests/test-ui-verbosity.py not using absolute_import
149 tests/test-ui-verbosity.py requires print_function
148 tests/test-ui-verbosity.py requires print_function
150 tests/test-url.py not using absolute_import
149 tests/test-url.py not using absolute_import
151 tests/test-url.py requires print_function
150 tests/test-url.py requires print_function
152 tests/test-walkrepo.py requires print_function
151 tests/test-walkrepo.py requires print_function
153 tests/test-wireproto.py requires print_function
152 tests/test-wireproto.py requires print_function
154 tests/tinyproxy.py requires print_function
153 tests/tinyproxy.py requires print_function
General Comments 0
You need to be logged in to leave comments. Login now