##// END OF EJS Templates
run-tests: stop writing a `python3` symlink pointing to python2...
marmoute -
r48294:23f5ed6d default
parent child Browse files
Show More
@@ -1,113 +1,113
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2
2
3 from __future__ import absolute_import
3 from __future__ import absolute_import
4
4
5 """
5 """
6 Small and dumb HTTP server for use in tests.
6 Small and dumb HTTP server for use in tests.
7 """
7 """
8
8
9 import optparse
9 import optparse
10 import os
10 import os
11 import signal
11 import signal
12 import socket
12 import socket
13 import sys
13 import sys
14
14
15 from mercurial import (
15 from mercurial import (
16 encoding,
16 encoding,
17 pycompat,
17 pycompat,
18 server,
18 server,
19 util,
19 util,
20 )
20 )
21
21
22 httpserver = util.httpserver
22 httpserver = util.httpserver
23 OptionParser = optparse.OptionParser
23 OptionParser = optparse.OptionParser
24
24
25 if os.environ.get('HGIPV6', '0') == '1':
25 if os.environ.get('HGIPV6', '0') == '1':
26
26
27 class simplehttpserver(httpserver.httpserver):
27 class simplehttpserver(httpserver.httpserver):
28 address_family = socket.AF_INET6
28 address_family = socket.AF_INET6
29
29
30
30
31 else:
31 else:
32 simplehttpserver = httpserver.httpserver
32 simplehttpserver = httpserver.httpserver
33
33
34
34
35 class _httprequesthandler(httpserver.simplehttprequesthandler):
35 class _httprequesthandler(httpserver.simplehttprequesthandler):
36 def log_message(self, format, *args):
36 def log_message(self, format, *args):
37 httpserver.simplehttprequesthandler.log_message(self, format, *args)
37 httpserver.simplehttprequesthandler.log_message(self, format, *args)
38 sys.stderr.flush()
38 sys.stderr.flush()
39
39
40
40
41 class simplehttpservice(object):
41 class simplehttpservice(object):
42 def __init__(self, host, port):
42 def __init__(self, host, port):
43 self.address = (host, port)
43 self.address = (host, port)
44
44
45 def init(self):
45 def init(self):
46 self.httpd = simplehttpserver(self.address, _httprequesthandler)
46 self.httpd = simplehttpserver(self.address, _httprequesthandler)
47
47
48 def run(self):
48 def run(self):
49 self.httpd.serve_forever()
49 self.httpd.serve_forever()
50
50
51
51
52 if __name__ == '__main__':
52 if __name__ == '__main__':
53 parser = OptionParser()
53 parser = OptionParser()
54 parser.add_option(
54 parser.add_option(
55 '-p',
55 '-p',
56 '--port',
56 '--port',
57 dest='port',
57 dest='port',
58 type='int',
58 type='int',
59 default=8000,
59 default=8000,
60 help='TCP port to listen on',
60 help='TCP port to listen on',
61 metavar='PORT',
61 metavar='PORT',
62 )
62 )
63 parser.add_option(
63 parser.add_option(
64 '-H',
64 '-H',
65 '--host',
65 '--host',
66 dest='host',
66 dest='host',
67 default='localhost',
67 default='localhost',
68 help='hostname or IP to listen on',
68 help='hostname or IP to listen on',
69 metavar='HOST',
69 metavar='HOST',
70 )
70 )
71 parser.add_option('--logfile', help='file name of access/error log')
71 parser.add_option('--logfile', help='file name of access/error log')
72 parser.add_option(
72 parser.add_option(
73 '--pid',
73 '--pid',
74 dest='pid',
74 dest='pid',
75 help='file name where the PID of the server is stored',
75 help='file name where the PID of the server is stored',
76 )
76 )
77 parser.add_option(
77 parser.add_option(
78 '-f',
78 '-f',
79 '--foreground',
79 '--foreground',
80 dest='foreground',
80 dest='foreground',
81 action='store_true',
81 action='store_true',
82 help='do not start the HTTP server in the background',
82 help='do not start the HTTP server in the background',
83 )
83 )
84 parser.add_option('--daemon-postexec', action='append')
84 parser.add_option('--daemon-postexec', action='append')
85
85
86 (options, args) = parser.parse_args()
86 (options, args) = parser.parse_args()
87
87
88 signal.signal(signal.SIGTERM, lambda x, y: sys.exit(0))
88 signal.signal(signal.SIGTERM, lambda x, y: sys.exit(0))
89
89
90 if options.foreground and options.logfile:
90 if options.foreground and options.logfile:
91 parser.error(
91 parser.error(
92 "options --logfile and --foreground are mutually " "exclusive"
92 "options --logfile and --foreground are mutually " "exclusive"
93 )
93 )
94 if options.foreground and options.pid:
94 if options.foreground and options.pid:
95 parser.error("options --pid and --foreground are mutually exclusive")
95 parser.error("options --pid and --foreground are mutually exclusive")
96
96
97 opts = {
97 opts = {
98 b'pid_file': options.pid,
98 b'pid_file': options.pid,
99 b'daemon': not options.foreground,
99 b'daemon': not options.foreground,
100 b'daemon_postexec': pycompat.rapply(
100 b'daemon_postexec': pycompat.rapply(
101 encoding.strtolocal, options.daemon_postexec
101 encoding.strtolocal, options.daemon_postexec
102 ),
102 ),
103 }
103 }
104 service = simplehttpservice(options.host, options.port)
104 service = simplehttpservice(options.host, options.port)
105 runargs = [sys.executable, __file__] + sys.argv[1:]
105 runargs = [sys.executable, __file__] + sys.argv[1:]
106 runargs = [pycompat.fsencode(a) for a in runargs]
106 runargs = [pycompat.fsencode(a) for a in runargs]
107 server.runservice(
107 server.runservice(
108 opts,
108 opts,
109 initfn=service.init,
109 initfn=service.init,
110 runfn=service.run,
110 runfn=service.run,
111 logfile=options.logfile,
111 logfile=options.logfile,
112 runargs=runargs,
112 runargs=runargs,
113 )
113 )
@@ -1,121 +1,121
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2
2
3 """dummy SMTP server for use in tests"""
3 """dummy SMTP server for use in tests"""
4
4
5 from __future__ import absolute_import
5 from __future__ import absolute_import
6
6
7 import asyncore
7 import asyncore
8 import optparse
8 import optparse
9 import smtpd
9 import smtpd
10 import ssl
10 import ssl
11 import sys
11 import sys
12 import traceback
12 import traceback
13
13
14 from mercurial import (
14 from mercurial import (
15 pycompat,
15 pycompat,
16 server,
16 server,
17 sslutil,
17 sslutil,
18 ui as uimod,
18 ui as uimod,
19 )
19 )
20
20
21
21
22 def log(msg):
22 def log(msg):
23 sys.stdout.write(msg)
23 sys.stdout.write(msg)
24 sys.stdout.flush()
24 sys.stdout.flush()
25
25
26
26
27 class dummysmtpserver(smtpd.SMTPServer):
27 class dummysmtpserver(smtpd.SMTPServer):
28 def __init__(self, localaddr):
28 def __init__(self, localaddr):
29 smtpd.SMTPServer.__init__(self, localaddr, remoteaddr=None)
29 smtpd.SMTPServer.__init__(self, localaddr, remoteaddr=None)
30
30
31 def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
31 def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
32 log('%s from=%s to=%s\n' % (peer[0], mailfrom, ', '.join(rcpttos)))
32 log('%s from=%s to=%s\n' % (peer[0], mailfrom, ', '.join(rcpttos)))
33
33
34 def handle_error(self):
34 def handle_error(self):
35 # On Windows, a bad SSL connection sometimes generates a WSAECONNRESET.
35 # On Windows, a bad SSL connection sometimes generates a WSAECONNRESET.
36 # The default handler will shutdown this server, and then both the
36 # The default handler will shutdown this server, and then both the
37 # current connection and subsequent ones fail on the client side with
37 # current connection and subsequent ones fail on the client side with
38 # "No connection could be made because the target machine actively
38 # "No connection could be made because the target machine actively
39 # refused it". If we eat the error, then the client properly aborts in
39 # refused it". If we eat the error, then the client properly aborts in
40 # the expected way, and the server is available for subsequent requests.
40 # the expected way, and the server is available for subsequent requests.
41 traceback.print_exc()
41 traceback.print_exc()
42
42
43
43
44 class dummysmtpsecureserver(dummysmtpserver):
44 class dummysmtpsecureserver(dummysmtpserver):
45 def __init__(self, localaddr, certfile):
45 def __init__(self, localaddr, certfile):
46 dummysmtpserver.__init__(self, localaddr)
46 dummysmtpserver.__init__(self, localaddr)
47 self._certfile = certfile
47 self._certfile = certfile
48
48
49 def handle_accept(self):
49 def handle_accept(self):
50 pair = self.accept()
50 pair = self.accept()
51 if not pair:
51 if not pair:
52 return
52 return
53 conn, addr = pair
53 conn, addr = pair
54 ui = uimod.ui.load()
54 ui = uimod.ui.load()
55 try:
55 try:
56 # wrap_socket() would block, but we don't care
56 # wrap_socket() would block, but we don't care
57 conn = sslutil.wrapserversocket(conn, ui, certfile=self._certfile)
57 conn = sslutil.wrapserversocket(conn, ui, certfile=self._certfile)
58 except ssl.SSLError:
58 except ssl.SSLError:
59 log('%s ssl error\n' % addr[0])
59 log('%s ssl error\n' % addr[0])
60 conn.close()
60 conn.close()
61 return
61 return
62 smtpd.SMTPChannel(self, conn, addr)
62 smtpd.SMTPChannel(self, conn, addr)
63
63
64
64
65 def run():
65 def run():
66 try:
66 try:
67 asyncore.loop()
67 asyncore.loop()
68 except KeyboardInterrupt:
68 except KeyboardInterrupt:
69 pass
69 pass
70
70
71
71
72 def _encodestrsonly(v):
72 def _encodestrsonly(v):
73 if isinstance(v, type(u'')):
73 if isinstance(v, type(u'')):
74 return v.encode('ascii')
74 return v.encode('ascii')
75 return v
75 return v
76
76
77
77
78 def bytesvars(obj):
78 def bytesvars(obj):
79 unidict = vars(obj)
79 unidict = vars(obj)
80 bd = {k.encode('ascii'): _encodestrsonly(v) for k, v in unidict.items()}
80 bd = {k.encode('ascii'): _encodestrsonly(v) for k, v in unidict.items()}
81 if bd[b'daemon_postexec'] is not None:
81 if bd[b'daemon_postexec'] is not None:
82 bd[b'daemon_postexec'] = [
82 bd[b'daemon_postexec'] = [
83 _encodestrsonly(v) for v in bd[b'daemon_postexec']
83 _encodestrsonly(v) for v in bd[b'daemon_postexec']
84 ]
84 ]
85 return bd
85 return bd
86
86
87
87
88 def main():
88 def main():
89 op = optparse.OptionParser()
89 op = optparse.OptionParser()
90 op.add_option('-d', '--daemon', action='store_true')
90 op.add_option('-d', '--daemon', action='store_true')
91 op.add_option('--daemon-postexec', action='append')
91 op.add_option('--daemon-postexec', action='append')
92 op.add_option('-p', '--port', type=int, default=8025)
92 op.add_option('-p', '--port', type=int, default=8025)
93 op.add_option('-a', '--address', default='localhost')
93 op.add_option('-a', '--address', default='localhost')
94 op.add_option('--pid-file', metavar='FILE')
94 op.add_option('--pid-file', metavar='FILE')
95 op.add_option('--tls', choices=['none', 'smtps'], default='none')
95 op.add_option('--tls', choices=['none', 'smtps'], default='none')
96 op.add_option('--certificate', metavar='FILE')
96 op.add_option('--certificate', metavar='FILE')
97
97
98 opts, args = op.parse_args()
98 opts, args = op.parse_args()
99 if opts.tls == 'smtps' and not opts.certificate:
99 if opts.tls == 'smtps' and not opts.certificate:
100 op.error('--certificate must be specified')
100 op.error('--certificate must be specified')
101
101
102 addr = (opts.address, opts.port)
102 addr = (opts.address, opts.port)
103
103
104 def init():
104 def init():
105 if opts.tls == 'none':
105 if opts.tls == 'none':
106 dummysmtpserver(addr)
106 dummysmtpserver(addr)
107 else:
107 else:
108 dummysmtpsecureserver(addr, opts.certificate)
108 dummysmtpsecureserver(addr, opts.certificate)
109 log('listening at %s:%d\n' % addr)
109 log('listening at %s:%d\n' % addr)
110
110
111 server.runservice(
111 server.runservice(
112 bytesvars(opts),
112 bytesvars(opts),
113 initfn=init,
113 initfn=init,
114 runfn=run,
114 runfn=run,
115 runargs=[pycompat.sysexecutable, pycompat.fsencode(__file__)]
115 runargs=[pycompat.sysexecutable, pycompat.fsencode(__file__)]
116 + pycompat.sysargv[1:],
116 + pycompat.sysargv[1:],
117 )
117 )
118
118
119
119
120 if __name__ == '__main__':
120 if __name__ == '__main__':
121 main()
121 main()
@@ -1,124 +1,124
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2
2
3 """This does HTTP GET requests given a host:port and path and returns
3 """This does HTTP GET requests given a host:port and path and returns
4 a subset of the headers plus the body of the result."""
4 a subset of the headers plus the body of the result."""
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import argparse
8 import argparse
9 import json
9 import json
10 import os
10 import os
11 import sys
11 import sys
12
12
13 from mercurial import (
13 from mercurial import (
14 pycompat,
14 pycompat,
15 util,
15 util,
16 )
16 )
17
17
18 httplib = util.httplib
18 httplib = util.httplib
19
19
20 try:
20 try:
21 import msvcrt
21 import msvcrt
22
22
23 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
23 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
24 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
24 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
25 except ImportError:
25 except ImportError:
26 pass
26 pass
27
27
28 stdout = getattr(sys.stdout, 'buffer', sys.stdout)
28 stdout = getattr(sys.stdout, 'buffer', sys.stdout)
29
29
30 parser = argparse.ArgumentParser()
30 parser = argparse.ArgumentParser()
31 parser.add_argument('--twice', action='store_true')
31 parser.add_argument('--twice', action='store_true')
32 parser.add_argument('--headeronly', action='store_true')
32 parser.add_argument('--headeronly', action='store_true')
33 parser.add_argument('--json', action='store_true')
33 parser.add_argument('--json', action='store_true')
34 parser.add_argument('--hgproto')
34 parser.add_argument('--hgproto')
35 parser.add_argument(
35 parser.add_argument(
36 '--requestheader',
36 '--requestheader',
37 nargs='*',
37 nargs='*',
38 default=[],
38 default=[],
39 help='Send an additional HTTP request header. Argument '
39 help='Send an additional HTTP request header. Argument '
40 'value is <header>=<value>',
40 'value is <header>=<value>',
41 )
41 )
42 parser.add_argument('--bodyfile', help='Write HTTP response body to a file')
42 parser.add_argument('--bodyfile', help='Write HTTP response body to a file')
43 parser.add_argument('host')
43 parser.add_argument('host')
44 parser.add_argument('path')
44 parser.add_argument('path')
45 parser.add_argument('show', nargs='*')
45 parser.add_argument('show', nargs='*')
46
46
47 args = parser.parse_args()
47 args = parser.parse_args()
48
48
49 twice = args.twice
49 twice = args.twice
50 headeronly = args.headeronly
50 headeronly = args.headeronly
51 formatjson = args.json
51 formatjson = args.json
52 hgproto = args.hgproto
52 hgproto = args.hgproto
53 requestheaders = args.requestheader
53 requestheaders = args.requestheader
54
54
55 tag = None
55 tag = None
56
56
57
57
58 def request(host, path, show):
58 def request(host, path, show):
59 assert not path.startswith('/'), path
59 assert not path.startswith('/'), path
60 global tag
60 global tag
61 headers = {}
61 headers = {}
62 if tag:
62 if tag:
63 headers['If-None-Match'] = tag
63 headers['If-None-Match'] = tag
64 if hgproto:
64 if hgproto:
65 headers['X-HgProto-1'] = hgproto
65 headers['X-HgProto-1'] = hgproto
66
66
67 for header in requestheaders:
67 for header in requestheaders:
68 key, value = header.split('=', 1)
68 key, value = header.split('=', 1)
69 headers[key] = value
69 headers[key] = value
70
70
71 conn = httplib.HTTPConnection(host)
71 conn = httplib.HTTPConnection(host)
72 conn.request("GET", '/' + path, None, headers)
72 conn.request("GET", '/' + path, None, headers)
73 response = conn.getresponse()
73 response = conn.getresponse()
74 stdout.write(
74 stdout.write(
75 b'%d %s\n' % (response.status, response.reason.encode('ascii'))
75 b'%d %s\n' % (response.status, response.reason.encode('ascii'))
76 )
76 )
77 if show[:1] == ['-']:
77 if show[:1] == ['-']:
78 show = sorted(
78 show = sorted(
79 h for h, v in response.getheaders() if h.lower() not in show
79 h for h, v in response.getheaders() if h.lower() not in show
80 )
80 )
81 for h in [h.lower() for h in show]:
81 for h in [h.lower() for h in show]:
82 if response.getheader(h, None) is not None:
82 if response.getheader(h, None) is not None:
83 stdout.write(
83 stdout.write(
84 b"%s: %s\n"
84 b"%s: %s\n"
85 % (h.encode('ascii'), response.getheader(h).encode('ascii'))
85 % (h.encode('ascii'), response.getheader(h).encode('ascii'))
86 )
86 )
87 if not headeronly:
87 if not headeronly:
88 stdout.write(b'\n')
88 stdout.write(b'\n')
89 data = response.read()
89 data = response.read()
90
90
91 if args.bodyfile:
91 if args.bodyfile:
92 bodyfh = open(args.bodyfile, 'wb')
92 bodyfh = open(args.bodyfile, 'wb')
93 else:
93 else:
94 bodyfh = stdout
94 bodyfh = stdout
95
95
96 # Pretty print JSON. This also has the beneficial side-effect
96 # Pretty print JSON. This also has the beneficial side-effect
97 # of verifying emitted JSON is well-formed.
97 # of verifying emitted JSON is well-formed.
98 if formatjson:
98 if formatjson:
99 # json.dumps() will print trailing newlines. Eliminate them
99 # json.dumps() will print trailing newlines. Eliminate them
100 # to make tests easier to write.
100 # to make tests easier to write.
101 data = pycompat.json_loads(data)
101 data = pycompat.json_loads(data)
102 lines = json.dumps(data, sort_keys=True, indent=2).splitlines()
102 lines = json.dumps(data, sort_keys=True, indent=2).splitlines()
103 for line in lines:
103 for line in lines:
104 bodyfh.write(pycompat.sysbytes(line.rstrip()))
104 bodyfh.write(pycompat.sysbytes(line.rstrip()))
105 bodyfh.write(b'\n')
105 bodyfh.write(b'\n')
106 else:
106 else:
107 bodyfh.write(data)
107 bodyfh.write(data)
108
108
109 if args.bodyfile:
109 if args.bodyfile:
110 bodyfh.close()
110 bodyfh.close()
111
111
112 if twice and response.getheader('ETag', None):
112 if twice and response.getheader('ETag', None):
113 tag = response.getheader('ETag')
113 tag = response.getheader('ETag')
114
114
115 return response.status
115 return response.status
116
116
117
117
118 status = request(args.host, args.path, args.show)
118 status = request(args.host, args.path, args.show)
119 if twice:
119 if twice:
120 status = request(args.host, args.path, args.show)
120 status = request(args.host, args.path, args.show)
121
121
122 if 200 <= status <= 305:
122 if 200 <= status <= 305:
123 sys.exit(0)
123 sys.exit(0)
124 sys.exit(1)
124 sys.exit(1)
@@ -1,78 +1,78
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2 """Test the running system for features availability. Exit with zero
2 """Test the running system for features availability. Exit with zero
3 if all features are there, non-zero otherwise. If a feature name is
3 if all features are there, non-zero otherwise. If a feature name is
4 prefixed with "no-", the absence of feature is tested.
4 prefixed with "no-", the absence of feature is tested.
5 """
5 """
6
6
7 from __future__ import absolute_import, print_function
7 from __future__ import absolute_import, print_function
8
8
9 import hghave
9 import hghave
10 import optparse
10 import optparse
11 import os
11 import os
12 import sys
12 import sys
13
13
14 checks = hghave.checks
14 checks = hghave.checks
15
15
16
16
17 def list_features():
17 def list_features():
18 for name, feature in sorted(checks.items()):
18 for name, feature in sorted(checks.items()):
19 desc = feature[1]
19 desc = feature[1]
20 print(name + ':', desc)
20 print(name + ':', desc)
21
21
22
22
23 def test_features():
23 def test_features():
24 failed = 0
24 failed = 0
25 for name, feature in checks.items():
25 for name, feature in checks.items():
26 check, _ = feature
26 check, _ = feature
27 try:
27 try:
28 check()
28 check()
29 except Exception as e:
29 except Exception as e:
30 print("feature %s failed: %s" % (name, e))
30 print("feature %s failed: %s" % (name, e))
31 failed += 1
31 failed += 1
32 return failed
32 return failed
33
33
34
34
35 parser = optparse.OptionParser("%prog [options] [features]")
35 parser = optparse.OptionParser("%prog [options] [features]")
36 parser.add_option(
36 parser.add_option(
37 "--test-features", action="store_true", help="test available features"
37 "--test-features", action="store_true", help="test available features"
38 )
38 )
39 parser.add_option(
39 parser.add_option(
40 "--list-features", action="store_true", help="list available features"
40 "--list-features", action="store_true", help="list available features"
41 )
41 )
42
42
43
43
44 def _loadaddon():
44 def _loadaddon():
45 if 'TESTDIR' in os.environ:
45 if 'TESTDIR' in os.environ:
46 # loading from '.' isn't needed, because `hghave` should be
46 # loading from '.' isn't needed, because `hghave` should be
47 # running at TESTTMP in this case
47 # running at TESTTMP in this case
48 path = os.environ['TESTDIR']
48 path = os.environ['TESTDIR']
49 else:
49 else:
50 path = '.'
50 path = '.'
51
51
52 if not os.path.exists(os.path.join(path, 'hghaveaddon.py')):
52 if not os.path.exists(os.path.join(path, 'hghaveaddon.py')):
53 return
53 return
54
54
55 sys.path.insert(0, path)
55 sys.path.insert(0, path)
56 try:
56 try:
57 import hghaveaddon
57 import hghaveaddon
58
58
59 assert hghaveaddon # silence pyflakes
59 assert hghaveaddon # silence pyflakes
60 except BaseException as inst:
60 except BaseException as inst:
61 sys.stderr.write(
61 sys.stderr.write(
62 'failed to import hghaveaddon.py from %r: %s\n' % (path, inst)
62 'failed to import hghaveaddon.py from %r: %s\n' % (path, inst)
63 )
63 )
64 sys.exit(2)
64 sys.exit(2)
65 sys.path.pop(0)
65 sys.path.pop(0)
66
66
67
67
68 if __name__ == '__main__':
68 if __name__ == '__main__':
69 options, args = parser.parse_args()
69 options, args = parser.parse_args()
70 _loadaddon()
70 _loadaddon()
71 if options.list_features:
71 if options.list_features:
72 list_features()
72 list_features()
73 sys.exit(0)
73 sys.exit(0)
74
74
75 if options.test_features:
75 if options.test_features:
76 sys.exit(test_features())
76 sys.exit(test_features())
77
77
78 hghave.require(args)
78 hghave.require(args)
@@ -1,3937 +1,3945
1 #!/usr/bin/env python3
1 #!/usr/bin/env python3
2 #
2 #
3 # run-tests.py - Run a set of tests on Mercurial
3 # run-tests.py - Run a set of tests on Mercurial
4 #
4 #
5 # Copyright 2006 Olivia Mackall <olivia@selenic.com>
5 # Copyright 2006 Olivia Mackall <olivia@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 # Modifying this script is tricky because it has many modes:
10 # Modifying this script is tricky because it has many modes:
11 # - serial (default) vs parallel (-jN, N > 1)
11 # - serial (default) vs parallel (-jN, N > 1)
12 # - no coverage (default) vs coverage (-c, -C, -s)
12 # - no coverage (default) vs coverage (-c, -C, -s)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 # - tests are a mix of shell scripts and Python scripts
14 # - tests are a mix of shell scripts and Python scripts
15 #
15 #
16 # If you change this script, it is recommended that you ensure you
16 # If you change this script, it is recommended that you ensure you
17 # haven't broken it by running it in various modes with a representative
17 # haven't broken it by running it in various modes with a representative
18 # sample of test scripts. For example:
18 # sample of test scripts. For example:
19 #
19 #
20 # 1) serial, no coverage, temp install:
20 # 1) serial, no coverage, temp install:
21 # ./run-tests.py test-s*
21 # ./run-tests.py test-s*
22 # 2) serial, no coverage, local hg:
22 # 2) serial, no coverage, local hg:
23 # ./run-tests.py --local test-s*
23 # ./run-tests.py --local test-s*
24 # 3) serial, coverage, temp install:
24 # 3) serial, coverage, temp install:
25 # ./run-tests.py -c test-s*
25 # ./run-tests.py -c test-s*
26 # 4) serial, coverage, local hg:
26 # 4) serial, coverage, local hg:
27 # ./run-tests.py -c --local test-s* # unsupported
27 # ./run-tests.py -c --local test-s* # unsupported
28 # 5) parallel, no coverage, temp install:
28 # 5) parallel, no coverage, temp install:
29 # ./run-tests.py -j2 test-s*
29 # ./run-tests.py -j2 test-s*
30 # 6) parallel, no coverage, local hg:
30 # 6) parallel, no coverage, local hg:
31 # ./run-tests.py -j2 --local test-s*
31 # ./run-tests.py -j2 --local test-s*
32 # 7) parallel, coverage, temp install:
32 # 7) parallel, coverage, temp install:
33 # ./run-tests.py -j2 -c test-s* # currently broken
33 # ./run-tests.py -j2 -c test-s* # currently broken
34 # 8) parallel, coverage, local install:
34 # 8) parallel, coverage, local install:
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 # 9) parallel, custom tmp dir:
36 # 9) parallel, custom tmp dir:
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 # 10) parallel, pure, tests that call run-tests:
38 # 10) parallel, pure, tests that call run-tests:
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 #
40 #
41 # (You could use any subset of the tests: test-s* happens to match
41 # (You could use any subset of the tests: test-s* happens to match
42 # enough that it's worth doing parallel runs, few enough that it
42 # enough that it's worth doing parallel runs, few enough that it
43 # completes fairly quickly, includes both shell and Python scripts, and
43 # completes fairly quickly, includes both shell and Python scripts, and
44 # includes some scripts that run daemon processes.)
44 # includes some scripts that run daemon processes.)
45
45
46 from __future__ import absolute_import, print_function
46 from __future__ import absolute_import, print_function
47
47
48 import argparse
48 import argparse
49 import collections
49 import collections
50 import contextlib
50 import contextlib
51 import difflib
51 import difflib
52 import distutils.version as version
52 import distutils.version as version
53 import errno
53 import errno
54 import json
54 import json
55 import multiprocessing
55 import multiprocessing
56 import os
56 import os
57 import platform
57 import platform
58 import random
58 import random
59 import re
59 import re
60 import shutil
60 import shutil
61 import signal
61 import signal
62 import socket
62 import socket
63 import subprocess
63 import subprocess
64 import sys
64 import sys
65 import sysconfig
65 import sysconfig
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 import unittest
69 import unittest
70 import uuid
70 import uuid
71 import xml.dom.minidom as minidom
71 import xml.dom.minidom as minidom
72
72
73 try:
73 try:
74 import Queue as queue
74 import Queue as queue
75 except ImportError:
75 except ImportError:
76 import queue
76 import queue
77
77
78 try:
78 try:
79 import shlex
79 import shlex
80
80
81 shellquote = shlex.quote
81 shellquote = shlex.quote
82 except (ImportError, AttributeError):
82 except (ImportError, AttributeError):
83 import pipes
83 import pipes
84
84
85 shellquote = pipes.quote
85 shellquote = pipes.quote
86
86
87 processlock = threading.Lock()
87 processlock = threading.Lock()
88
88
89 pygmentspresent = False
89 pygmentspresent = False
90 try: # is pygments installed
90 try: # is pygments installed
91 import pygments
91 import pygments
92 import pygments.lexers as lexers
92 import pygments.lexers as lexers
93 import pygments.lexer as lexer
93 import pygments.lexer as lexer
94 import pygments.formatters as formatters
94 import pygments.formatters as formatters
95 import pygments.token as token
95 import pygments.token as token
96 import pygments.style as style
96 import pygments.style as style
97
97
98 if os.name == 'nt':
98 if os.name == 'nt':
99 hgpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
99 hgpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
100 sys.path.append(hgpath)
100 sys.path.append(hgpath)
101 try:
101 try:
102 from mercurial import win32 # pytype: disable=import-error
102 from mercurial import win32 # pytype: disable=import-error
103
103
104 # Don't check the result code because it fails on heptapod, but
104 # Don't check the result code because it fails on heptapod, but
105 # something is able to convert to color anyway.
105 # something is able to convert to color anyway.
106 win32.enablevtmode()
106 win32.enablevtmode()
107 finally:
107 finally:
108 sys.path = sys.path[:-1]
108 sys.path = sys.path[:-1]
109
109
110 pygmentspresent = True
110 pygmentspresent = True
111 difflexer = lexers.DiffLexer()
111 difflexer = lexers.DiffLexer()
112 terminal256formatter = formatters.Terminal256Formatter()
112 terminal256formatter = formatters.Terminal256Formatter()
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115
115
116 if pygmentspresent:
116 if pygmentspresent:
117
117
118 class TestRunnerStyle(style.Style):
118 class TestRunnerStyle(style.Style):
119 default_style = ""
119 default_style = ""
120 skipped = token.string_to_tokentype("Token.Generic.Skipped")
120 skipped = token.string_to_tokentype("Token.Generic.Skipped")
121 failed = token.string_to_tokentype("Token.Generic.Failed")
121 failed = token.string_to_tokentype("Token.Generic.Failed")
122 skippedname = token.string_to_tokentype("Token.Generic.SName")
122 skippedname = token.string_to_tokentype("Token.Generic.SName")
123 failedname = token.string_to_tokentype("Token.Generic.FName")
123 failedname = token.string_to_tokentype("Token.Generic.FName")
124 styles = {
124 styles = {
125 skipped: '#e5e5e5',
125 skipped: '#e5e5e5',
126 skippedname: '#00ffff',
126 skippedname: '#00ffff',
127 failed: '#7f0000',
127 failed: '#7f0000',
128 failedname: '#ff0000',
128 failedname: '#ff0000',
129 }
129 }
130
130
131 class TestRunnerLexer(lexer.RegexLexer):
131 class TestRunnerLexer(lexer.RegexLexer):
132 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
132 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
133 tokens = {
133 tokens = {
134 'root': [
134 'root': [
135 (r'^Skipped', token.Generic.Skipped, 'skipped'),
135 (r'^Skipped', token.Generic.Skipped, 'skipped'),
136 (r'^Failed ', token.Generic.Failed, 'failed'),
136 (r'^Failed ', token.Generic.Failed, 'failed'),
137 (r'^ERROR: ', token.Generic.Failed, 'failed'),
137 (r'^ERROR: ', token.Generic.Failed, 'failed'),
138 ],
138 ],
139 'skipped': [
139 'skipped': [
140 (testpattern, token.Generic.SName),
140 (testpattern, token.Generic.SName),
141 (r':.*', token.Generic.Skipped),
141 (r':.*', token.Generic.Skipped),
142 ],
142 ],
143 'failed': [
143 'failed': [
144 (testpattern, token.Generic.FName),
144 (testpattern, token.Generic.FName),
145 (r'(:| ).*', token.Generic.Failed),
145 (r'(:| ).*', token.Generic.Failed),
146 ],
146 ],
147 }
147 }
148
148
149 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
149 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
150 runnerlexer = TestRunnerLexer()
150 runnerlexer = TestRunnerLexer()
151
151
152 origenviron = os.environ.copy()
152 origenviron = os.environ.copy()
153
153
154 if sys.version_info > (3, 5, 0):
154 if sys.version_info > (3, 5, 0):
155 PYTHON3 = True
155 PYTHON3 = True
156 xrange = range # we use xrange in one place, and we'd rather not use range
156 xrange = range # we use xrange in one place, and we'd rather not use range
157
157
158 def _sys2bytes(p):
158 def _sys2bytes(p):
159 if p is None:
159 if p is None:
160 return p
160 return p
161 return p.encode('utf-8')
161 return p.encode('utf-8')
162
162
163 def _bytes2sys(p):
163 def _bytes2sys(p):
164 if p is None:
164 if p is None:
165 return p
165 return p
166 return p.decode('utf-8')
166 return p.decode('utf-8')
167
167
168 osenvironb = getattr(os, 'environb', None)
168 osenvironb = getattr(os, 'environb', None)
169 if osenvironb is None:
169 if osenvironb is None:
170 # Windows lacks os.environb, for instance. A proxy over the real thing
170 # Windows lacks os.environb, for instance. A proxy over the real thing
171 # instead of a copy allows the environment to be updated via bytes on
171 # instead of a copy allows the environment to be updated via bytes on
172 # all platforms.
172 # all platforms.
173 class environbytes(object):
173 class environbytes(object):
174 def __init__(self, strenv):
174 def __init__(self, strenv):
175 self.__len__ = strenv.__len__
175 self.__len__ = strenv.__len__
176 self.clear = strenv.clear
176 self.clear = strenv.clear
177 self._strenv = strenv
177 self._strenv = strenv
178
178
179 def __getitem__(self, k):
179 def __getitem__(self, k):
180 v = self._strenv.__getitem__(_bytes2sys(k))
180 v = self._strenv.__getitem__(_bytes2sys(k))
181 return _sys2bytes(v)
181 return _sys2bytes(v)
182
182
183 def __setitem__(self, k, v):
183 def __setitem__(self, k, v):
184 self._strenv.__setitem__(_bytes2sys(k), _bytes2sys(v))
184 self._strenv.__setitem__(_bytes2sys(k), _bytes2sys(v))
185
185
186 def __delitem__(self, k):
186 def __delitem__(self, k):
187 self._strenv.__delitem__(_bytes2sys(k))
187 self._strenv.__delitem__(_bytes2sys(k))
188
188
189 def __contains__(self, k):
189 def __contains__(self, k):
190 return self._strenv.__contains__(_bytes2sys(k))
190 return self._strenv.__contains__(_bytes2sys(k))
191
191
192 def __iter__(self):
192 def __iter__(self):
193 return iter([_sys2bytes(k) for k in iter(self._strenv)])
193 return iter([_sys2bytes(k) for k in iter(self._strenv)])
194
194
195 def get(self, k, default=None):
195 def get(self, k, default=None):
196 v = self._strenv.get(_bytes2sys(k), _bytes2sys(default))
196 v = self._strenv.get(_bytes2sys(k), _bytes2sys(default))
197 return _sys2bytes(v)
197 return _sys2bytes(v)
198
198
199 def pop(self, k, default=None):
199 def pop(self, k, default=None):
200 v = self._strenv.pop(_bytes2sys(k), _bytes2sys(default))
200 v = self._strenv.pop(_bytes2sys(k), _bytes2sys(default))
201 return _sys2bytes(v)
201 return _sys2bytes(v)
202
202
203 osenvironb = environbytes(os.environ)
203 osenvironb = environbytes(os.environ)
204
204
205 getcwdb = getattr(os, 'getcwdb')
205 getcwdb = getattr(os, 'getcwdb')
206 if not getcwdb or os.name == 'nt':
206 if not getcwdb or os.name == 'nt':
207 getcwdb = lambda: _sys2bytes(os.getcwd())
207 getcwdb = lambda: _sys2bytes(os.getcwd())
208
208
209 elif sys.version_info >= (3, 0, 0):
209 elif sys.version_info >= (3, 0, 0):
210 print(
210 print(
211 '%s is only supported on Python 3.5+ and 2.7, not %s'
211 '%s is only supported on Python 3.5+ and 2.7, not %s'
212 % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
212 % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
213 )
213 )
214 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
214 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
215 else:
215 else:
216 PYTHON3 = False
216 PYTHON3 = False
217
217
218 # In python 2.x, path operations are generally done using
218 # In python 2.x, path operations are generally done using
219 # bytestrings by default, so we don't have to do any extra
219 # bytestrings by default, so we don't have to do any extra
220 # fiddling there. We define the wrapper functions anyway just to
220 # fiddling there. We define the wrapper functions anyway just to
221 # help keep code consistent between platforms.
221 # help keep code consistent between platforms.
222 def _sys2bytes(p):
222 def _sys2bytes(p):
223 return p
223 return p
224
224
225 _bytes2sys = _sys2bytes
225 _bytes2sys = _sys2bytes
226 osenvironb = os.environ
226 osenvironb = os.environ
227 getcwdb = os.getcwd
227 getcwdb = os.getcwd
228
228
229 # For Windows support
229 # For Windows support
230 wifexited = getattr(os, "WIFEXITED", lambda x: False)
230 wifexited = getattr(os, "WIFEXITED", lambda x: False)
231
231
232 # Whether to use IPv6
232 # Whether to use IPv6
233 def checksocketfamily(name, port=20058):
233 def checksocketfamily(name, port=20058):
234 """return true if we can listen on localhost using family=name
234 """return true if we can listen on localhost using family=name
235
235
236 name should be either 'AF_INET', or 'AF_INET6'.
236 name should be either 'AF_INET', or 'AF_INET6'.
237 port being used is okay - EADDRINUSE is considered as successful.
237 port being used is okay - EADDRINUSE is considered as successful.
238 """
238 """
239 family = getattr(socket, name, None)
239 family = getattr(socket, name, None)
240 if family is None:
240 if family is None:
241 return False
241 return False
242 try:
242 try:
243 s = socket.socket(family, socket.SOCK_STREAM)
243 s = socket.socket(family, socket.SOCK_STREAM)
244 s.bind(('localhost', port))
244 s.bind(('localhost', port))
245 s.close()
245 s.close()
246 return True
246 return True
247 except socket.error as exc:
247 except socket.error as exc:
248 if exc.errno == errno.EADDRINUSE:
248 if exc.errno == errno.EADDRINUSE:
249 return True
249 return True
250 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
250 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
251 return False
251 return False
252 else:
252 else:
253 raise
253 raise
254 else:
254 else:
255 return False
255 return False
256
256
257
257
258 # useipv6 will be set by parseargs
258 # useipv6 will be set by parseargs
259 useipv6 = None
259 useipv6 = None
260
260
261
261
262 def checkportisavailable(port):
262 def checkportisavailable(port):
263 """return true if a port seems free to bind on localhost"""
263 """return true if a port seems free to bind on localhost"""
264 if useipv6:
264 if useipv6:
265 family = socket.AF_INET6
265 family = socket.AF_INET6
266 else:
266 else:
267 family = socket.AF_INET
267 family = socket.AF_INET
268 try:
268 try:
269 with contextlib.closing(socket.socket(family, socket.SOCK_STREAM)) as s:
269 with contextlib.closing(socket.socket(family, socket.SOCK_STREAM)) as s:
270 s.bind(('localhost', port))
270 s.bind(('localhost', port))
271 return True
271 return True
272 except socket.error as exc:
272 except socket.error as exc:
273 if os.name == 'nt' and exc.errno == errno.WSAEACCES:
273 if os.name == 'nt' and exc.errno == errno.WSAEACCES:
274 return False
274 return False
275 elif PYTHON3:
275 elif PYTHON3:
276 # TODO: make a proper exception handler after dropping py2. This
276 # TODO: make a proper exception handler after dropping py2. This
277 # works because socket.error is an alias for OSError on py3,
277 # works because socket.error is an alias for OSError on py3,
278 # which is also the baseclass of PermissionError.
278 # which is also the baseclass of PermissionError.
279 if isinstance(exc, PermissionError):
279 if isinstance(exc, PermissionError):
280 return False
280 return False
281 if exc.errno not in (
281 if exc.errno not in (
282 errno.EADDRINUSE,
282 errno.EADDRINUSE,
283 errno.EADDRNOTAVAIL,
283 errno.EADDRNOTAVAIL,
284 errno.EPROTONOSUPPORT,
284 errno.EPROTONOSUPPORT,
285 ):
285 ):
286 raise
286 raise
287 return False
287 return False
288
288
289
289
290 closefds = os.name == 'posix'
290 closefds = os.name == 'posix'
291
291
292
292
293 def Popen4(cmd, wd, timeout, env=None):
293 def Popen4(cmd, wd, timeout, env=None):
294 processlock.acquire()
294 processlock.acquire()
295 p = subprocess.Popen(
295 p = subprocess.Popen(
296 _bytes2sys(cmd),
296 _bytes2sys(cmd),
297 shell=True,
297 shell=True,
298 bufsize=-1,
298 bufsize=-1,
299 cwd=_bytes2sys(wd),
299 cwd=_bytes2sys(wd),
300 env=env,
300 env=env,
301 close_fds=closefds,
301 close_fds=closefds,
302 stdin=subprocess.PIPE,
302 stdin=subprocess.PIPE,
303 stdout=subprocess.PIPE,
303 stdout=subprocess.PIPE,
304 stderr=subprocess.STDOUT,
304 stderr=subprocess.STDOUT,
305 )
305 )
306 processlock.release()
306 processlock.release()
307
307
308 p.fromchild = p.stdout
308 p.fromchild = p.stdout
309 p.tochild = p.stdin
309 p.tochild = p.stdin
310 p.childerr = p.stderr
310 p.childerr = p.stderr
311
311
312 p.timeout = False
312 p.timeout = False
313 if timeout:
313 if timeout:
314
314
315 def t():
315 def t():
316 start = time.time()
316 start = time.time()
317 while time.time() - start < timeout and p.returncode is None:
317 while time.time() - start < timeout and p.returncode is None:
318 time.sleep(0.1)
318 time.sleep(0.1)
319 p.timeout = True
319 p.timeout = True
320 vlog('# Timout reached for process %d' % p.pid)
320 vlog('# Timout reached for process %d' % p.pid)
321 if p.returncode is None:
321 if p.returncode is None:
322 terminate(p)
322 terminate(p)
323
323
324 threading.Thread(target=t).start()
324 threading.Thread(target=t).start()
325
325
326 return p
326 return p
327
327
328
328
329 if sys.executable:
329 if sys.executable:
330 sysexecutable = sys.executable
330 sysexecutable = sys.executable
331 elif os.environ.get('PYTHONEXECUTABLE'):
331 elif os.environ.get('PYTHONEXECUTABLE'):
332 sysexecutable = os.environ['PYTHONEXECUTABLE']
332 sysexecutable = os.environ['PYTHONEXECUTABLE']
333 elif os.environ.get('PYTHON'):
333 elif os.environ.get('PYTHON'):
334 sysexecutable = os.environ['PYTHON']
334 sysexecutable = os.environ['PYTHON']
335 else:
335 else:
336 raise AssertionError('Could not find Python interpreter')
336 raise AssertionError('Could not find Python interpreter')
337
337
338 PYTHON = _sys2bytes(sysexecutable.replace('\\', '/'))
338 PYTHON = _sys2bytes(sysexecutable.replace('\\', '/'))
339 IMPL_PATH = b'PYTHONPATH'
339 IMPL_PATH = b'PYTHONPATH'
340 if 'java' in sys.platform:
340 if 'java' in sys.platform:
341 IMPL_PATH = b'JYTHONPATH'
341 IMPL_PATH = b'JYTHONPATH'
342
342
343 default_defaults = {
343 default_defaults = {
344 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
344 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
345 'timeout': ('HGTEST_TIMEOUT', 360),
345 'timeout': ('HGTEST_TIMEOUT', 360),
346 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500),
346 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500),
347 'port': ('HGTEST_PORT', 20059),
347 'port': ('HGTEST_PORT', 20059),
348 'shell': ('HGTEST_SHELL', 'sh'),
348 'shell': ('HGTEST_SHELL', 'sh'),
349 }
349 }
350
350
351 defaults = default_defaults.copy()
351 defaults = default_defaults.copy()
352
352
353
353
354 def canonpath(path):
354 def canonpath(path):
355 return os.path.realpath(os.path.expanduser(path))
355 return os.path.realpath(os.path.expanduser(path))
356
356
357
357
358 def parselistfiles(files, listtype, warn=True):
358 def parselistfiles(files, listtype, warn=True):
359 entries = dict()
359 entries = dict()
360 for filename in files:
360 for filename in files:
361 try:
361 try:
362 path = os.path.expanduser(os.path.expandvars(filename))
362 path = os.path.expanduser(os.path.expandvars(filename))
363 f = open(path, "rb")
363 f = open(path, "rb")
364 except IOError as err:
364 except IOError as err:
365 if err.errno != errno.ENOENT:
365 if err.errno != errno.ENOENT:
366 raise
366 raise
367 if warn:
367 if warn:
368 print("warning: no such %s file: %s" % (listtype, filename))
368 print("warning: no such %s file: %s" % (listtype, filename))
369 continue
369 continue
370
370
371 for line in f.readlines():
371 for line in f.readlines():
372 line = line.split(b'#', 1)[0].strip()
372 line = line.split(b'#', 1)[0].strip()
373 if line:
373 if line:
374 # Ensure path entries are compatible with os.path.relpath()
374 # Ensure path entries are compatible with os.path.relpath()
375 entries[os.path.normpath(line)] = filename
375 entries[os.path.normpath(line)] = filename
376
376
377 f.close()
377 f.close()
378 return entries
378 return entries
379
379
380
380
381 def parsettestcases(path):
381 def parsettestcases(path):
382 """read a .t test file, return a set of test case names
382 """read a .t test file, return a set of test case names
383
383
384 If path does not exist, return an empty set.
384 If path does not exist, return an empty set.
385 """
385 """
386 cases = []
386 cases = []
387 try:
387 try:
388 with open(path, 'rb') as f:
388 with open(path, 'rb') as f:
389 for l in f:
389 for l in f:
390 if l.startswith(b'#testcases '):
390 if l.startswith(b'#testcases '):
391 cases.append(sorted(l[11:].split()))
391 cases.append(sorted(l[11:].split()))
392 except IOError as ex:
392 except IOError as ex:
393 if ex.errno != errno.ENOENT:
393 if ex.errno != errno.ENOENT:
394 raise
394 raise
395 return cases
395 return cases
396
396
397
397
398 def getparser():
398 def getparser():
399 """Obtain the OptionParser used by the CLI."""
399 """Obtain the OptionParser used by the CLI."""
400 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
400 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
401
401
402 selection = parser.add_argument_group('Test Selection')
402 selection = parser.add_argument_group('Test Selection')
403 selection.add_argument(
403 selection.add_argument(
404 '--allow-slow-tests',
404 '--allow-slow-tests',
405 action='store_true',
405 action='store_true',
406 help='allow extremely slow tests',
406 help='allow extremely slow tests',
407 )
407 )
408 selection.add_argument(
408 selection.add_argument(
409 "--blacklist",
409 "--blacklist",
410 action="append",
410 action="append",
411 help="skip tests listed in the specified blacklist file",
411 help="skip tests listed in the specified blacklist file",
412 )
412 )
413 selection.add_argument(
413 selection.add_argument(
414 "--changed",
414 "--changed",
415 help="run tests that are changed in parent rev or working directory",
415 help="run tests that are changed in parent rev or working directory",
416 )
416 )
417 selection.add_argument(
417 selection.add_argument(
418 "-k", "--keywords", help="run tests matching keywords"
418 "-k", "--keywords", help="run tests matching keywords"
419 )
419 )
420 selection.add_argument(
420 selection.add_argument(
421 "-r", "--retest", action="store_true", help="retest failed tests"
421 "-r", "--retest", action="store_true", help="retest failed tests"
422 )
422 )
423 selection.add_argument(
423 selection.add_argument(
424 "--test-list",
424 "--test-list",
425 action="append",
425 action="append",
426 help="read tests to run from the specified file",
426 help="read tests to run from the specified file",
427 )
427 )
428 selection.add_argument(
428 selection.add_argument(
429 "--whitelist",
429 "--whitelist",
430 action="append",
430 action="append",
431 help="always run tests listed in the specified whitelist file",
431 help="always run tests listed in the specified whitelist file",
432 )
432 )
433 selection.add_argument(
433 selection.add_argument(
434 'tests', metavar='TESTS', nargs='*', help='Tests to run'
434 'tests', metavar='TESTS', nargs='*', help='Tests to run'
435 )
435 )
436
436
437 harness = parser.add_argument_group('Test Harness Behavior')
437 harness = parser.add_argument_group('Test Harness Behavior')
438 harness.add_argument(
438 harness.add_argument(
439 '--bisect-repo',
439 '--bisect-repo',
440 metavar='bisect_repo',
440 metavar='bisect_repo',
441 help=(
441 help=(
442 "Path of a repo to bisect. Use together with " "--known-good-rev"
442 "Path of a repo to bisect. Use together with " "--known-good-rev"
443 ),
443 ),
444 )
444 )
445 harness.add_argument(
445 harness.add_argument(
446 "-d",
446 "-d",
447 "--debug",
447 "--debug",
448 action="store_true",
448 action="store_true",
449 help="debug mode: write output of test scripts to console"
449 help="debug mode: write output of test scripts to console"
450 " rather than capturing and diffing it (disables timeout)",
450 " rather than capturing and diffing it (disables timeout)",
451 )
451 )
452 harness.add_argument(
452 harness.add_argument(
453 "-f",
453 "-f",
454 "--first",
454 "--first",
455 action="store_true",
455 action="store_true",
456 help="exit on the first test failure",
456 help="exit on the first test failure",
457 )
457 )
458 harness.add_argument(
458 harness.add_argument(
459 "-i",
459 "-i",
460 "--interactive",
460 "--interactive",
461 action="store_true",
461 action="store_true",
462 help="prompt to accept changed output",
462 help="prompt to accept changed output",
463 )
463 )
464 harness.add_argument(
464 harness.add_argument(
465 "-j",
465 "-j",
466 "--jobs",
466 "--jobs",
467 type=int,
467 type=int,
468 help="number of jobs to run in parallel"
468 help="number of jobs to run in parallel"
469 " (default: $%s or %d)" % defaults['jobs'],
469 " (default: $%s or %d)" % defaults['jobs'],
470 )
470 )
471 harness.add_argument(
471 harness.add_argument(
472 "--keep-tmpdir",
472 "--keep-tmpdir",
473 action="store_true",
473 action="store_true",
474 help="keep temporary directory after running tests",
474 help="keep temporary directory after running tests",
475 )
475 )
476 harness.add_argument(
476 harness.add_argument(
477 '--known-good-rev',
477 '--known-good-rev',
478 metavar="known_good_rev",
478 metavar="known_good_rev",
479 help=(
479 help=(
480 "Automatically bisect any failures using this "
480 "Automatically bisect any failures using this "
481 "revision as a known-good revision."
481 "revision as a known-good revision."
482 ),
482 ),
483 )
483 )
484 harness.add_argument(
484 harness.add_argument(
485 "--list-tests",
485 "--list-tests",
486 action="store_true",
486 action="store_true",
487 help="list tests instead of running them",
487 help="list tests instead of running them",
488 )
488 )
489 harness.add_argument(
489 harness.add_argument(
490 "--loop", action="store_true", help="loop tests repeatedly"
490 "--loop", action="store_true", help="loop tests repeatedly"
491 )
491 )
492 harness.add_argument(
492 harness.add_argument(
493 '--random', action="store_true", help='run tests in random order'
493 '--random', action="store_true", help='run tests in random order'
494 )
494 )
495 harness.add_argument(
495 harness.add_argument(
496 '--order-by-runtime',
496 '--order-by-runtime',
497 action="store_true",
497 action="store_true",
498 help='run slowest tests first, according to .testtimes',
498 help='run slowest tests first, according to .testtimes',
499 )
499 )
500 harness.add_argument(
500 harness.add_argument(
501 "-p",
501 "-p",
502 "--port",
502 "--port",
503 type=int,
503 type=int,
504 help="port on which servers should listen"
504 help="port on which servers should listen"
505 " (default: $%s or %d)" % defaults['port'],
505 " (default: $%s or %d)" % defaults['port'],
506 )
506 )
507 harness.add_argument(
507 harness.add_argument(
508 '--profile-runner',
508 '--profile-runner',
509 action='store_true',
509 action='store_true',
510 help='run statprof on run-tests',
510 help='run statprof on run-tests',
511 )
511 )
512 harness.add_argument(
512 harness.add_argument(
513 "-R", "--restart", action="store_true", help="restart at last error"
513 "-R", "--restart", action="store_true", help="restart at last error"
514 )
514 )
515 harness.add_argument(
515 harness.add_argument(
516 "--runs-per-test",
516 "--runs-per-test",
517 type=int,
517 type=int,
518 dest="runs_per_test",
518 dest="runs_per_test",
519 help="run each test N times (default=1)",
519 help="run each test N times (default=1)",
520 default=1,
520 default=1,
521 )
521 )
522 harness.add_argument(
522 harness.add_argument(
523 "--shell", help="shell to use (default: $%s or %s)" % defaults['shell']
523 "--shell", help="shell to use (default: $%s or %s)" % defaults['shell']
524 )
524 )
525 harness.add_argument(
525 harness.add_argument(
526 '--showchannels', action='store_true', help='show scheduling channels'
526 '--showchannels', action='store_true', help='show scheduling channels'
527 )
527 )
528 harness.add_argument(
528 harness.add_argument(
529 "--slowtimeout",
529 "--slowtimeout",
530 type=int,
530 type=int,
531 help="kill errant slow tests after SLOWTIMEOUT seconds"
531 help="kill errant slow tests after SLOWTIMEOUT seconds"
532 " (default: $%s or %d)" % defaults['slowtimeout'],
532 " (default: $%s or %d)" % defaults['slowtimeout'],
533 )
533 )
534 harness.add_argument(
534 harness.add_argument(
535 "-t",
535 "-t",
536 "--timeout",
536 "--timeout",
537 type=int,
537 type=int,
538 help="kill errant tests after TIMEOUT seconds"
538 help="kill errant tests after TIMEOUT seconds"
539 " (default: $%s or %d)" % defaults['timeout'],
539 " (default: $%s or %d)" % defaults['timeout'],
540 )
540 )
541 harness.add_argument(
541 harness.add_argument(
542 "--tmpdir",
542 "--tmpdir",
543 help="run tests in the given temporary directory"
543 help="run tests in the given temporary directory"
544 " (implies --keep-tmpdir)",
544 " (implies --keep-tmpdir)",
545 )
545 )
546 harness.add_argument(
546 harness.add_argument(
547 "-v", "--verbose", action="store_true", help="output verbose messages"
547 "-v", "--verbose", action="store_true", help="output verbose messages"
548 )
548 )
549
549
550 hgconf = parser.add_argument_group('Mercurial Configuration')
550 hgconf = parser.add_argument_group('Mercurial Configuration')
551 hgconf.add_argument(
551 hgconf.add_argument(
552 "--chg",
552 "--chg",
553 action="store_true",
553 action="store_true",
554 help="install and use chg wrapper in place of hg",
554 help="install and use chg wrapper in place of hg",
555 )
555 )
556 hgconf.add_argument(
556 hgconf.add_argument(
557 "--chg-debug",
557 "--chg-debug",
558 action="store_true",
558 action="store_true",
559 help="show chg debug logs",
559 help="show chg debug logs",
560 )
560 )
561 hgconf.add_argument(
561 hgconf.add_argument(
562 "--rhg",
562 "--rhg",
563 action="store_true",
563 action="store_true",
564 help="install and use rhg Rust implementation in place of hg",
564 help="install and use rhg Rust implementation in place of hg",
565 )
565 )
566 hgconf.add_argument("--compiler", help="compiler to build with")
566 hgconf.add_argument("--compiler", help="compiler to build with")
567 hgconf.add_argument(
567 hgconf.add_argument(
568 '--extra-config-opt',
568 '--extra-config-opt',
569 action="append",
569 action="append",
570 default=[],
570 default=[],
571 help='set the given config opt in the test hgrc',
571 help='set the given config opt in the test hgrc',
572 )
572 )
573 hgconf.add_argument(
573 hgconf.add_argument(
574 "-l",
574 "-l",
575 "--local",
575 "--local",
576 action="store_true",
576 action="store_true",
577 help="shortcut for --with-hg=<testdir>/../hg, "
577 help="shortcut for --with-hg=<testdir>/../hg, "
578 "--with-rhg=<testdir>/../rust/target/release/rhg if --rhg is set, "
578 "--with-rhg=<testdir>/../rust/target/release/rhg if --rhg is set, "
579 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
579 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
580 )
580 )
581 hgconf.add_argument(
581 hgconf.add_argument(
582 "--ipv6",
582 "--ipv6",
583 action="store_true",
583 action="store_true",
584 help="prefer IPv6 to IPv4 for network related tests",
584 help="prefer IPv6 to IPv4 for network related tests",
585 )
585 )
586 hgconf.add_argument(
586 hgconf.add_argument(
587 "--pure",
587 "--pure",
588 action="store_true",
588 action="store_true",
589 help="use pure Python code instead of C extensions",
589 help="use pure Python code instead of C extensions",
590 )
590 )
591 hgconf.add_argument(
591 hgconf.add_argument(
592 "--rust",
592 "--rust",
593 action="store_true",
593 action="store_true",
594 help="use Rust code alongside C extensions",
594 help="use Rust code alongside C extensions",
595 )
595 )
596 hgconf.add_argument(
596 hgconf.add_argument(
597 "--no-rust",
597 "--no-rust",
598 action="store_true",
598 action="store_true",
599 help="do not use Rust code even if compiled",
599 help="do not use Rust code even if compiled",
600 )
600 )
601 hgconf.add_argument(
601 hgconf.add_argument(
602 "--with-chg",
602 "--with-chg",
603 metavar="CHG",
603 metavar="CHG",
604 help="use specified chg wrapper in place of hg",
604 help="use specified chg wrapper in place of hg",
605 )
605 )
606 hgconf.add_argument(
606 hgconf.add_argument(
607 "--with-rhg",
607 "--with-rhg",
608 metavar="RHG",
608 metavar="RHG",
609 help="use specified rhg Rust implementation in place of hg",
609 help="use specified rhg Rust implementation in place of hg",
610 )
610 )
611 hgconf.add_argument(
611 hgconf.add_argument(
612 "--with-hg",
612 "--with-hg",
613 metavar="HG",
613 metavar="HG",
614 help="test using specified hg script rather than a "
614 help="test using specified hg script rather than a "
615 "temporary installation",
615 "temporary installation",
616 )
616 )
617
617
618 reporting = parser.add_argument_group('Results Reporting')
618 reporting = parser.add_argument_group('Results Reporting')
619 reporting.add_argument(
619 reporting.add_argument(
620 "-C",
620 "-C",
621 "--annotate",
621 "--annotate",
622 action="store_true",
622 action="store_true",
623 help="output files annotated with coverage",
623 help="output files annotated with coverage",
624 )
624 )
625 reporting.add_argument(
625 reporting.add_argument(
626 "--color",
626 "--color",
627 choices=["always", "auto", "never"],
627 choices=["always", "auto", "never"],
628 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
628 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
629 help="colorisation: always|auto|never (default: auto)",
629 help="colorisation: always|auto|never (default: auto)",
630 )
630 )
631 reporting.add_argument(
631 reporting.add_argument(
632 "-c",
632 "-c",
633 "--cover",
633 "--cover",
634 action="store_true",
634 action="store_true",
635 help="print a test coverage report",
635 help="print a test coverage report",
636 )
636 )
637 reporting.add_argument(
637 reporting.add_argument(
638 '--exceptions',
638 '--exceptions',
639 action='store_true',
639 action='store_true',
640 help='log all exceptions and generate an exception report',
640 help='log all exceptions and generate an exception report',
641 )
641 )
642 reporting.add_argument(
642 reporting.add_argument(
643 "-H",
643 "-H",
644 "--htmlcov",
644 "--htmlcov",
645 action="store_true",
645 action="store_true",
646 help="create an HTML report of the coverage of the files",
646 help="create an HTML report of the coverage of the files",
647 )
647 )
648 reporting.add_argument(
648 reporting.add_argument(
649 "--json",
649 "--json",
650 action="store_true",
650 action="store_true",
651 help="store test result data in 'report.json' file",
651 help="store test result data in 'report.json' file",
652 )
652 )
653 reporting.add_argument(
653 reporting.add_argument(
654 "--outputdir",
654 "--outputdir",
655 help="directory to write error logs to (default=test directory)",
655 help="directory to write error logs to (default=test directory)",
656 )
656 )
657 reporting.add_argument(
657 reporting.add_argument(
658 "-n", "--nodiff", action="store_true", help="skip showing test changes"
658 "-n", "--nodiff", action="store_true", help="skip showing test changes"
659 )
659 )
660 reporting.add_argument(
660 reporting.add_argument(
661 "-S",
661 "-S",
662 "--noskips",
662 "--noskips",
663 action="store_true",
663 action="store_true",
664 help="don't report skip tests verbosely",
664 help="don't report skip tests verbosely",
665 )
665 )
666 reporting.add_argument(
666 reporting.add_argument(
667 "--time", action="store_true", help="time how long each test takes"
667 "--time", action="store_true", help="time how long each test takes"
668 )
668 )
669 reporting.add_argument("--view", help="external diff viewer")
669 reporting.add_argument("--view", help="external diff viewer")
670 reporting.add_argument(
670 reporting.add_argument(
671 "--xunit", help="record xunit results at specified path"
671 "--xunit", help="record xunit results at specified path"
672 )
672 )
673
673
674 for option, (envvar, default) in defaults.items():
674 for option, (envvar, default) in defaults.items():
675 defaults[option] = type(default)(os.environ.get(envvar, default))
675 defaults[option] = type(default)(os.environ.get(envvar, default))
676 parser.set_defaults(**defaults)
676 parser.set_defaults(**defaults)
677
677
678 return parser
678 return parser
679
679
680
680
681 def parseargs(args, parser):
681 def parseargs(args, parser):
682 """Parse arguments with our OptionParser and validate results."""
682 """Parse arguments with our OptionParser and validate results."""
683 options = parser.parse_args(args)
683 options = parser.parse_args(args)
684
684
685 # jython is always pure
685 # jython is always pure
686 if 'java' in sys.platform or '__pypy__' in sys.modules:
686 if 'java' in sys.platform or '__pypy__' in sys.modules:
687 options.pure = True
687 options.pure = True
688
688
689 if platform.python_implementation() != 'CPython' and options.rust:
689 if platform.python_implementation() != 'CPython' and options.rust:
690 parser.error('Rust extensions are only available with CPython')
690 parser.error('Rust extensions are only available with CPython')
691
691
692 if options.pure and options.rust:
692 if options.pure and options.rust:
693 parser.error('--rust cannot be used with --pure')
693 parser.error('--rust cannot be used with --pure')
694
694
695 if options.rust and options.no_rust:
695 if options.rust and options.no_rust:
696 parser.error('--rust cannot be used with --no-rust')
696 parser.error('--rust cannot be used with --no-rust')
697
697
698 if options.local:
698 if options.local:
699 if options.with_hg or options.with_rhg or options.with_chg:
699 if options.with_hg or options.with_rhg or options.with_chg:
700 parser.error(
700 parser.error(
701 '--local cannot be used with --with-hg or --with-rhg or --with-chg'
701 '--local cannot be used with --with-hg or --with-rhg or --with-chg'
702 )
702 )
703 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
703 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
704 reporootdir = os.path.dirname(testdir)
704 reporootdir = os.path.dirname(testdir)
705 pathandattrs = [(b'hg', 'with_hg')]
705 pathandattrs = [(b'hg', 'with_hg')]
706 if options.chg:
706 if options.chg:
707 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
707 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
708 if options.rhg:
708 if options.rhg:
709 pathandattrs.append((b'rust/target/release/rhg', 'with_rhg'))
709 pathandattrs.append((b'rust/target/release/rhg', 'with_rhg'))
710 for relpath, attr in pathandattrs:
710 for relpath, attr in pathandattrs:
711 binpath = os.path.join(reporootdir, relpath)
711 binpath = os.path.join(reporootdir, relpath)
712 if os.name != 'nt' and not os.access(binpath, os.X_OK):
712 if os.name != 'nt' and not os.access(binpath, os.X_OK):
713 parser.error(
713 parser.error(
714 '--local specified, but %r not found or '
714 '--local specified, but %r not found or '
715 'not executable' % binpath
715 'not executable' % binpath
716 )
716 )
717 setattr(options, attr, _bytes2sys(binpath))
717 setattr(options, attr, _bytes2sys(binpath))
718
718
719 if options.with_hg:
719 if options.with_hg:
720 options.with_hg = canonpath(_sys2bytes(options.with_hg))
720 options.with_hg = canonpath(_sys2bytes(options.with_hg))
721 if not (
721 if not (
722 os.path.isfile(options.with_hg)
722 os.path.isfile(options.with_hg)
723 and os.access(options.with_hg, os.X_OK)
723 and os.access(options.with_hg, os.X_OK)
724 ):
724 ):
725 parser.error('--with-hg must specify an executable hg script')
725 parser.error('--with-hg must specify an executable hg script')
726 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
726 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
727 sys.stderr.write('warning: --with-hg should specify an hg script\n')
727 sys.stderr.write('warning: --with-hg should specify an hg script\n')
728 sys.stderr.flush()
728 sys.stderr.flush()
729
729
730 if (options.chg or options.with_chg) and os.name == 'nt':
730 if (options.chg or options.with_chg) and os.name == 'nt':
731 parser.error('chg does not work on %s' % os.name)
731 parser.error('chg does not work on %s' % os.name)
732 if (options.rhg or options.with_rhg) and os.name == 'nt':
732 if (options.rhg or options.with_rhg) and os.name == 'nt':
733 parser.error('rhg does not work on %s' % os.name)
733 parser.error('rhg does not work on %s' % os.name)
734 if options.with_chg:
734 if options.with_chg:
735 options.chg = False # no installation to temporary location
735 options.chg = False # no installation to temporary location
736 options.with_chg = canonpath(_sys2bytes(options.with_chg))
736 options.with_chg = canonpath(_sys2bytes(options.with_chg))
737 if not (
737 if not (
738 os.path.isfile(options.with_chg)
738 os.path.isfile(options.with_chg)
739 and os.access(options.with_chg, os.X_OK)
739 and os.access(options.with_chg, os.X_OK)
740 ):
740 ):
741 parser.error('--with-chg must specify a chg executable')
741 parser.error('--with-chg must specify a chg executable')
742 if options.with_rhg:
742 if options.with_rhg:
743 options.rhg = False # no installation to temporary location
743 options.rhg = False # no installation to temporary location
744 options.with_rhg = canonpath(_sys2bytes(options.with_rhg))
744 options.with_rhg = canonpath(_sys2bytes(options.with_rhg))
745 if not (
745 if not (
746 os.path.isfile(options.with_rhg)
746 os.path.isfile(options.with_rhg)
747 and os.access(options.with_rhg, os.X_OK)
747 and os.access(options.with_rhg, os.X_OK)
748 ):
748 ):
749 parser.error('--with-rhg must specify a rhg executable')
749 parser.error('--with-rhg must specify a rhg executable')
750 if options.chg and options.with_hg:
750 if options.chg and options.with_hg:
751 # chg shares installation location with hg
751 # chg shares installation location with hg
752 parser.error(
752 parser.error(
753 '--chg does not work when --with-hg is specified '
753 '--chg does not work when --with-hg is specified '
754 '(use --with-chg instead)'
754 '(use --with-chg instead)'
755 )
755 )
756 if options.rhg and options.with_hg:
756 if options.rhg and options.with_hg:
757 # rhg shares installation location with hg
757 # rhg shares installation location with hg
758 parser.error(
758 parser.error(
759 '--rhg does not work when --with-hg is specified '
759 '--rhg does not work when --with-hg is specified '
760 '(use --with-rhg instead)'
760 '(use --with-rhg instead)'
761 )
761 )
762 if options.rhg and options.chg:
762 if options.rhg and options.chg:
763 parser.error('--rhg and --chg do not work together')
763 parser.error('--rhg and --chg do not work together')
764
764
765 if options.color == 'always' and not pygmentspresent:
765 if options.color == 'always' and not pygmentspresent:
766 sys.stderr.write(
766 sys.stderr.write(
767 'warning: --color=always ignored because '
767 'warning: --color=always ignored because '
768 'pygments is not installed\n'
768 'pygments is not installed\n'
769 )
769 )
770
770
771 if options.bisect_repo and not options.known_good_rev:
771 if options.bisect_repo and not options.known_good_rev:
772 parser.error("--bisect-repo cannot be used without --known-good-rev")
772 parser.error("--bisect-repo cannot be used without --known-good-rev")
773
773
774 global useipv6
774 global useipv6
775 if options.ipv6:
775 if options.ipv6:
776 useipv6 = checksocketfamily('AF_INET6')
776 useipv6 = checksocketfamily('AF_INET6')
777 else:
777 else:
778 # only use IPv6 if IPv4 is unavailable and IPv6 is available
778 # only use IPv6 if IPv4 is unavailable and IPv6 is available
779 useipv6 = (not checksocketfamily('AF_INET')) and checksocketfamily(
779 useipv6 = (not checksocketfamily('AF_INET')) and checksocketfamily(
780 'AF_INET6'
780 'AF_INET6'
781 )
781 )
782
782
783 options.anycoverage = options.cover or options.annotate or options.htmlcov
783 options.anycoverage = options.cover or options.annotate or options.htmlcov
784 if options.anycoverage:
784 if options.anycoverage:
785 try:
785 try:
786 import coverage
786 import coverage
787
787
788 covver = version.StrictVersion(coverage.__version__).version
788 covver = version.StrictVersion(coverage.__version__).version
789 if covver < (3, 3):
789 if covver < (3, 3):
790 parser.error('coverage options require coverage 3.3 or later')
790 parser.error('coverage options require coverage 3.3 or later')
791 except ImportError:
791 except ImportError:
792 parser.error('coverage options now require the coverage package')
792 parser.error('coverage options now require the coverage package')
793
793
794 if options.anycoverage and options.local:
794 if options.anycoverage and options.local:
795 # this needs some path mangling somewhere, I guess
795 # this needs some path mangling somewhere, I guess
796 parser.error(
796 parser.error(
797 "sorry, coverage options do not work when --local " "is specified"
797 "sorry, coverage options do not work when --local " "is specified"
798 )
798 )
799
799
800 if options.anycoverage and options.with_hg:
800 if options.anycoverage and options.with_hg:
801 parser.error(
801 parser.error(
802 "sorry, coverage options do not work when --with-hg " "is specified"
802 "sorry, coverage options do not work when --with-hg " "is specified"
803 )
803 )
804
804
805 global verbose
805 global verbose
806 if options.verbose:
806 if options.verbose:
807 verbose = ''
807 verbose = ''
808
808
809 if options.tmpdir:
809 if options.tmpdir:
810 options.tmpdir = canonpath(options.tmpdir)
810 options.tmpdir = canonpath(options.tmpdir)
811
811
812 if options.jobs < 1:
812 if options.jobs < 1:
813 parser.error('--jobs must be positive')
813 parser.error('--jobs must be positive')
814 if options.interactive and options.debug:
814 if options.interactive and options.debug:
815 parser.error("-i/--interactive and -d/--debug are incompatible")
815 parser.error("-i/--interactive and -d/--debug are incompatible")
816 if options.debug:
816 if options.debug:
817 if options.timeout != defaults['timeout']:
817 if options.timeout != defaults['timeout']:
818 sys.stderr.write('warning: --timeout option ignored with --debug\n')
818 sys.stderr.write('warning: --timeout option ignored with --debug\n')
819 if options.slowtimeout != defaults['slowtimeout']:
819 if options.slowtimeout != defaults['slowtimeout']:
820 sys.stderr.write(
820 sys.stderr.write(
821 'warning: --slowtimeout option ignored with --debug\n'
821 'warning: --slowtimeout option ignored with --debug\n'
822 )
822 )
823 options.timeout = 0
823 options.timeout = 0
824 options.slowtimeout = 0
824 options.slowtimeout = 0
825
825
826 if options.blacklist:
826 if options.blacklist:
827 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
827 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
828 if options.whitelist:
828 if options.whitelist:
829 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
829 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
830 else:
830 else:
831 options.whitelisted = {}
831 options.whitelisted = {}
832
832
833 if options.showchannels:
833 if options.showchannels:
834 options.nodiff = True
834 options.nodiff = True
835
835
836 return options
836 return options
837
837
838
838
839 def rename(src, dst):
839 def rename(src, dst):
840 """Like os.rename(), trade atomicity and opened files friendliness
840 """Like os.rename(), trade atomicity and opened files friendliness
841 for existing destination support.
841 for existing destination support.
842 """
842 """
843 shutil.copy(src, dst)
843 shutil.copy(src, dst)
844 os.remove(src)
844 os.remove(src)
845
845
846
846
847 def makecleanable(path):
847 def makecleanable(path):
848 """Try to fix directory permission recursively so that the entire tree
848 """Try to fix directory permission recursively so that the entire tree
849 can be deleted"""
849 can be deleted"""
850 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
850 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
851 for d in dirnames:
851 for d in dirnames:
852 p = os.path.join(dirpath, d)
852 p = os.path.join(dirpath, d)
853 try:
853 try:
854 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
854 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
855 except OSError:
855 except OSError:
856 pass
856 pass
857
857
858
858
859 _unified_diff = difflib.unified_diff
859 _unified_diff = difflib.unified_diff
860 if PYTHON3:
860 if PYTHON3:
861 import functools
861 import functools
862
862
863 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
863 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
864
864
865
865
866 def getdiff(expected, output, ref, err):
866 def getdiff(expected, output, ref, err):
867 servefail = False
867 servefail = False
868 lines = []
868 lines = []
869 for line in _unified_diff(expected, output, ref, err):
869 for line in _unified_diff(expected, output, ref, err):
870 if line.startswith(b'+++') or line.startswith(b'---'):
870 if line.startswith(b'+++') or line.startswith(b'---'):
871 line = line.replace(b'\\', b'/')
871 line = line.replace(b'\\', b'/')
872 if line.endswith(b' \n'):
872 if line.endswith(b' \n'):
873 line = line[:-2] + b'\n'
873 line = line[:-2] + b'\n'
874 lines.append(line)
874 lines.append(line)
875 if not servefail and line.startswith(
875 if not servefail and line.startswith(
876 b'+ abort: child process failed to start'
876 b'+ abort: child process failed to start'
877 ):
877 ):
878 servefail = True
878 servefail = True
879
879
880 return servefail, lines
880 return servefail, lines
881
881
882
882
883 verbose = False
883 verbose = False
884
884
885
885
886 def vlog(*msg):
886 def vlog(*msg):
887 """Log only when in verbose mode."""
887 """Log only when in verbose mode."""
888 if verbose is False:
888 if verbose is False:
889 return
889 return
890
890
891 return log(*msg)
891 return log(*msg)
892
892
893
893
894 # Bytes that break XML even in a CDATA block: control characters 0-31
894 # Bytes that break XML even in a CDATA block: control characters 0-31
895 # sans \t, \n and \r
895 # sans \t, \n and \r
896 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
896 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
897
897
898 # Match feature conditionalized output lines in the form, capturing the feature
898 # Match feature conditionalized output lines in the form, capturing the feature
899 # list in group 2, and the preceeding line output in group 1:
899 # list in group 2, and the preceeding line output in group 1:
900 #
900 #
901 # output..output (feature !)\n
901 # output..output (feature !)\n
902 optline = re.compile(br'(.*) \((.+?) !\)\n$')
902 optline = re.compile(br'(.*) \((.+?) !\)\n$')
903
903
904
904
905 def cdatasafe(data):
905 def cdatasafe(data):
906 """Make a string safe to include in a CDATA block.
906 """Make a string safe to include in a CDATA block.
907
907
908 Certain control characters are illegal in a CDATA block, and
908 Certain control characters are illegal in a CDATA block, and
909 there's no way to include a ]]> in a CDATA either. This function
909 there's no way to include a ]]> in a CDATA either. This function
910 replaces illegal bytes with ? and adds a space between the ]] so
910 replaces illegal bytes with ? and adds a space between the ]] so
911 that it won't break the CDATA block.
911 that it won't break the CDATA block.
912 """
912 """
913 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
913 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
914
914
915
915
916 def log(*msg):
916 def log(*msg):
917 """Log something to stdout.
917 """Log something to stdout.
918
918
919 Arguments are strings to print.
919 Arguments are strings to print.
920 """
920 """
921 with iolock:
921 with iolock:
922 if verbose:
922 if verbose:
923 print(verbose, end=' ')
923 print(verbose, end=' ')
924 for m in msg:
924 for m in msg:
925 print(m, end=' ')
925 print(m, end=' ')
926 print()
926 print()
927 sys.stdout.flush()
927 sys.stdout.flush()
928
928
929
929
930 def highlightdiff(line, color):
930 def highlightdiff(line, color):
931 if not color:
931 if not color:
932 return line
932 return line
933 assert pygmentspresent
933 assert pygmentspresent
934 return pygments.highlight(
934 return pygments.highlight(
935 line.decode('latin1'), difflexer, terminal256formatter
935 line.decode('latin1'), difflexer, terminal256formatter
936 ).encode('latin1')
936 ).encode('latin1')
937
937
938
938
939 def highlightmsg(msg, color):
939 def highlightmsg(msg, color):
940 if not color:
940 if not color:
941 return msg
941 return msg
942 assert pygmentspresent
942 assert pygmentspresent
943 return pygments.highlight(msg, runnerlexer, runnerformatter)
943 return pygments.highlight(msg, runnerlexer, runnerformatter)
944
944
945
945
946 def terminate(proc):
946 def terminate(proc):
947 """Terminate subprocess"""
947 """Terminate subprocess"""
948 vlog('# Terminating process %d' % proc.pid)
948 vlog('# Terminating process %d' % proc.pid)
949 try:
949 try:
950 proc.terminate()
950 proc.terminate()
951 except OSError:
951 except OSError:
952 pass
952 pass
953
953
954
954
955 def killdaemons(pidfile):
955 def killdaemons(pidfile):
956 import killdaemons as killmod
956 import killdaemons as killmod
957
957
958 return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog)
958 return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog)
959
959
960
960
961 class Test(unittest.TestCase):
961 class Test(unittest.TestCase):
962 """Encapsulates a single, runnable test.
962 """Encapsulates a single, runnable test.
963
963
964 While this class conforms to the unittest.TestCase API, it differs in that
964 While this class conforms to the unittest.TestCase API, it differs in that
965 instances need to be instantiated manually. (Typically, unittest.TestCase
965 instances need to be instantiated manually. (Typically, unittest.TestCase
966 classes are instantiated automatically by scanning modules.)
966 classes are instantiated automatically by scanning modules.)
967 """
967 """
968
968
969 # Status code reserved for skipped tests (used by hghave).
969 # Status code reserved for skipped tests (used by hghave).
970 SKIPPED_STATUS = 80
970 SKIPPED_STATUS = 80
971
971
972 def __init__(
972 def __init__(
973 self,
973 self,
974 path,
974 path,
975 outputdir,
975 outputdir,
976 tmpdir,
976 tmpdir,
977 keeptmpdir=False,
977 keeptmpdir=False,
978 debug=False,
978 debug=False,
979 first=False,
979 first=False,
980 timeout=None,
980 timeout=None,
981 startport=None,
981 startport=None,
982 extraconfigopts=None,
982 extraconfigopts=None,
983 shell=None,
983 shell=None,
984 hgcommand=None,
984 hgcommand=None,
985 slowtimeout=None,
985 slowtimeout=None,
986 usechg=False,
986 usechg=False,
987 chgdebug=False,
987 chgdebug=False,
988 useipv6=False,
988 useipv6=False,
989 ):
989 ):
990 """Create a test from parameters.
990 """Create a test from parameters.
991
991
992 path is the full path to the file defining the test.
992 path is the full path to the file defining the test.
993
993
994 tmpdir is the main temporary directory to use for this test.
994 tmpdir is the main temporary directory to use for this test.
995
995
996 keeptmpdir determines whether to keep the test's temporary directory
996 keeptmpdir determines whether to keep the test's temporary directory
997 after execution. It defaults to removal (False).
997 after execution. It defaults to removal (False).
998
998
999 debug mode will make the test execute verbosely, with unfiltered
999 debug mode will make the test execute verbosely, with unfiltered
1000 output.
1000 output.
1001
1001
1002 timeout controls the maximum run time of the test. It is ignored when
1002 timeout controls the maximum run time of the test. It is ignored when
1003 debug is True. See slowtimeout for tests with #require slow.
1003 debug is True. See slowtimeout for tests with #require slow.
1004
1004
1005 slowtimeout overrides timeout if the test has #require slow.
1005 slowtimeout overrides timeout if the test has #require slow.
1006
1006
1007 startport controls the starting port number to use for this test. Each
1007 startport controls the starting port number to use for this test. Each
1008 test will reserve 3 port numbers for execution. It is the caller's
1008 test will reserve 3 port numbers for execution. It is the caller's
1009 responsibility to allocate a non-overlapping port range to Test
1009 responsibility to allocate a non-overlapping port range to Test
1010 instances.
1010 instances.
1011
1011
1012 extraconfigopts is an iterable of extra hgrc config options. Values
1012 extraconfigopts is an iterable of extra hgrc config options. Values
1013 must have the form "key=value" (something understood by hgrc). Values
1013 must have the form "key=value" (something understood by hgrc). Values
1014 of the form "foo.key=value" will result in "[foo] key=value".
1014 of the form "foo.key=value" will result in "[foo] key=value".
1015
1015
1016 shell is the shell to execute tests in.
1016 shell is the shell to execute tests in.
1017 """
1017 """
1018 if timeout is None:
1018 if timeout is None:
1019 timeout = defaults['timeout']
1019 timeout = defaults['timeout']
1020 if startport is None:
1020 if startport is None:
1021 startport = defaults['port']
1021 startport = defaults['port']
1022 if slowtimeout is None:
1022 if slowtimeout is None:
1023 slowtimeout = defaults['slowtimeout']
1023 slowtimeout = defaults['slowtimeout']
1024 self.path = path
1024 self.path = path
1025 self.relpath = os.path.relpath(path)
1025 self.relpath = os.path.relpath(path)
1026 self.bname = os.path.basename(path)
1026 self.bname = os.path.basename(path)
1027 self.name = _bytes2sys(self.bname)
1027 self.name = _bytes2sys(self.bname)
1028 self._testdir = os.path.dirname(path)
1028 self._testdir = os.path.dirname(path)
1029 self._outputdir = outputdir
1029 self._outputdir = outputdir
1030 self._tmpname = os.path.basename(path)
1030 self._tmpname = os.path.basename(path)
1031 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
1031 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
1032
1032
1033 self._threadtmp = tmpdir
1033 self._threadtmp = tmpdir
1034 self._keeptmpdir = keeptmpdir
1034 self._keeptmpdir = keeptmpdir
1035 self._debug = debug
1035 self._debug = debug
1036 self._first = first
1036 self._first = first
1037 self._timeout = timeout
1037 self._timeout = timeout
1038 self._slowtimeout = slowtimeout
1038 self._slowtimeout = slowtimeout
1039 self._startport = startport
1039 self._startport = startport
1040 self._extraconfigopts = extraconfigopts or []
1040 self._extraconfigopts = extraconfigopts or []
1041 self._shell = _sys2bytes(shell)
1041 self._shell = _sys2bytes(shell)
1042 self._hgcommand = hgcommand or b'hg'
1042 self._hgcommand = hgcommand or b'hg'
1043 self._usechg = usechg
1043 self._usechg = usechg
1044 self._chgdebug = chgdebug
1044 self._chgdebug = chgdebug
1045 self._useipv6 = useipv6
1045 self._useipv6 = useipv6
1046
1046
1047 self._aborted = False
1047 self._aborted = False
1048 self._daemonpids = []
1048 self._daemonpids = []
1049 self._finished = None
1049 self._finished = None
1050 self._ret = None
1050 self._ret = None
1051 self._out = None
1051 self._out = None
1052 self._skipped = None
1052 self._skipped = None
1053 self._testtmp = None
1053 self._testtmp = None
1054 self._chgsockdir = None
1054 self._chgsockdir = None
1055
1055
1056 self._refout = self.readrefout()
1056 self._refout = self.readrefout()
1057
1057
1058 def readrefout(self):
1058 def readrefout(self):
1059 """read reference output"""
1059 """read reference output"""
1060 # If we're not in --debug mode and reference output file exists,
1060 # If we're not in --debug mode and reference output file exists,
1061 # check test output against it.
1061 # check test output against it.
1062 if self._debug:
1062 if self._debug:
1063 return None # to match "out is None"
1063 return None # to match "out is None"
1064 elif os.path.exists(self.refpath):
1064 elif os.path.exists(self.refpath):
1065 with open(self.refpath, 'rb') as f:
1065 with open(self.refpath, 'rb') as f:
1066 return f.read().splitlines(True)
1066 return f.read().splitlines(True)
1067 else:
1067 else:
1068 return []
1068 return []
1069
1069
1070 # needed to get base class __repr__ running
1070 # needed to get base class __repr__ running
1071 @property
1071 @property
1072 def _testMethodName(self):
1072 def _testMethodName(self):
1073 return self.name
1073 return self.name
1074
1074
1075 def __str__(self):
1075 def __str__(self):
1076 return self.name
1076 return self.name
1077
1077
1078 def shortDescription(self):
1078 def shortDescription(self):
1079 return self.name
1079 return self.name
1080
1080
1081 def setUp(self):
1081 def setUp(self):
1082 """Tasks to perform before run()."""
1082 """Tasks to perform before run()."""
1083 self._finished = False
1083 self._finished = False
1084 self._ret = None
1084 self._ret = None
1085 self._out = None
1085 self._out = None
1086 self._skipped = None
1086 self._skipped = None
1087
1087
1088 try:
1088 try:
1089 os.mkdir(self._threadtmp)
1089 os.mkdir(self._threadtmp)
1090 except OSError as e:
1090 except OSError as e:
1091 if e.errno != errno.EEXIST:
1091 if e.errno != errno.EEXIST:
1092 raise
1092 raise
1093
1093
1094 name = self._tmpname
1094 name = self._tmpname
1095 self._testtmp = os.path.join(self._threadtmp, name)
1095 self._testtmp = os.path.join(self._threadtmp, name)
1096 os.mkdir(self._testtmp)
1096 os.mkdir(self._testtmp)
1097
1097
1098 # Remove any previous output files.
1098 # Remove any previous output files.
1099 if os.path.exists(self.errpath):
1099 if os.path.exists(self.errpath):
1100 try:
1100 try:
1101 os.remove(self.errpath)
1101 os.remove(self.errpath)
1102 except OSError as e:
1102 except OSError as e:
1103 # We might have raced another test to clean up a .err
1103 # We might have raced another test to clean up a .err
1104 # file, so ignore ENOENT when removing a previous .err
1104 # file, so ignore ENOENT when removing a previous .err
1105 # file.
1105 # file.
1106 if e.errno != errno.ENOENT:
1106 if e.errno != errno.ENOENT:
1107 raise
1107 raise
1108
1108
1109 if self._usechg:
1109 if self._usechg:
1110 self._chgsockdir = os.path.join(
1110 self._chgsockdir = os.path.join(
1111 self._threadtmp, b'%s.chgsock' % name
1111 self._threadtmp, b'%s.chgsock' % name
1112 )
1112 )
1113 os.mkdir(self._chgsockdir)
1113 os.mkdir(self._chgsockdir)
1114
1114
1115 def run(self, result):
1115 def run(self, result):
1116 """Run this test and report results against a TestResult instance."""
1116 """Run this test and report results against a TestResult instance."""
1117 # This function is extremely similar to unittest.TestCase.run(). Once
1117 # This function is extremely similar to unittest.TestCase.run(). Once
1118 # we require Python 2.7 (or at least its version of unittest), this
1118 # we require Python 2.7 (or at least its version of unittest), this
1119 # function can largely go away.
1119 # function can largely go away.
1120 self._result = result
1120 self._result = result
1121 result.startTest(self)
1121 result.startTest(self)
1122 try:
1122 try:
1123 try:
1123 try:
1124 self.setUp()
1124 self.setUp()
1125 except (KeyboardInterrupt, SystemExit):
1125 except (KeyboardInterrupt, SystemExit):
1126 self._aborted = True
1126 self._aborted = True
1127 raise
1127 raise
1128 except Exception:
1128 except Exception:
1129 result.addError(self, sys.exc_info())
1129 result.addError(self, sys.exc_info())
1130 return
1130 return
1131
1131
1132 success = False
1132 success = False
1133 try:
1133 try:
1134 self.runTest()
1134 self.runTest()
1135 except KeyboardInterrupt:
1135 except KeyboardInterrupt:
1136 self._aborted = True
1136 self._aborted = True
1137 raise
1137 raise
1138 except unittest.SkipTest as e:
1138 except unittest.SkipTest as e:
1139 result.addSkip(self, str(e))
1139 result.addSkip(self, str(e))
1140 # The base class will have already counted this as a
1140 # The base class will have already counted this as a
1141 # test we "ran", but we want to exclude skipped tests
1141 # test we "ran", but we want to exclude skipped tests
1142 # from those we count towards those run.
1142 # from those we count towards those run.
1143 result.testsRun -= 1
1143 result.testsRun -= 1
1144 except self.failureException as e:
1144 except self.failureException as e:
1145 # This differs from unittest in that we don't capture
1145 # This differs from unittest in that we don't capture
1146 # the stack trace. This is for historical reasons and
1146 # the stack trace. This is for historical reasons and
1147 # this decision could be revisited in the future,
1147 # this decision could be revisited in the future,
1148 # especially for PythonTest instances.
1148 # especially for PythonTest instances.
1149 if result.addFailure(self, str(e)):
1149 if result.addFailure(self, str(e)):
1150 success = True
1150 success = True
1151 except Exception:
1151 except Exception:
1152 result.addError(self, sys.exc_info())
1152 result.addError(self, sys.exc_info())
1153 else:
1153 else:
1154 success = True
1154 success = True
1155
1155
1156 try:
1156 try:
1157 self.tearDown()
1157 self.tearDown()
1158 except (KeyboardInterrupt, SystemExit):
1158 except (KeyboardInterrupt, SystemExit):
1159 self._aborted = True
1159 self._aborted = True
1160 raise
1160 raise
1161 except Exception:
1161 except Exception:
1162 result.addError(self, sys.exc_info())
1162 result.addError(self, sys.exc_info())
1163 success = False
1163 success = False
1164
1164
1165 if success:
1165 if success:
1166 result.addSuccess(self)
1166 result.addSuccess(self)
1167 finally:
1167 finally:
1168 result.stopTest(self, interrupted=self._aborted)
1168 result.stopTest(self, interrupted=self._aborted)
1169
1169
1170 def runTest(self):
1170 def runTest(self):
1171 """Run this test instance.
1171 """Run this test instance.
1172
1172
1173 This will return a tuple describing the result of the test.
1173 This will return a tuple describing the result of the test.
1174 """
1174 """
1175 env = self._getenv()
1175 env = self._getenv()
1176 self._genrestoreenv(env)
1176 self._genrestoreenv(env)
1177 self._daemonpids.append(env['DAEMON_PIDS'])
1177 self._daemonpids.append(env['DAEMON_PIDS'])
1178 self._createhgrc(env['HGRCPATH'])
1178 self._createhgrc(env['HGRCPATH'])
1179
1179
1180 vlog('# Test', self.name)
1180 vlog('# Test', self.name)
1181
1181
1182 ret, out = self._run(env)
1182 ret, out = self._run(env)
1183 self._finished = True
1183 self._finished = True
1184 self._ret = ret
1184 self._ret = ret
1185 self._out = out
1185 self._out = out
1186
1186
1187 def describe(ret):
1187 def describe(ret):
1188 if ret < 0:
1188 if ret < 0:
1189 return 'killed by signal: %d' % -ret
1189 return 'killed by signal: %d' % -ret
1190 return 'returned error code %d' % ret
1190 return 'returned error code %d' % ret
1191
1191
1192 self._skipped = False
1192 self._skipped = False
1193
1193
1194 if ret == self.SKIPPED_STATUS:
1194 if ret == self.SKIPPED_STATUS:
1195 if out is None: # Debug mode, nothing to parse.
1195 if out is None: # Debug mode, nothing to parse.
1196 missing = ['unknown']
1196 missing = ['unknown']
1197 failed = None
1197 failed = None
1198 else:
1198 else:
1199 missing, failed = TTest.parsehghaveoutput(out)
1199 missing, failed = TTest.parsehghaveoutput(out)
1200
1200
1201 if not missing:
1201 if not missing:
1202 missing = ['skipped']
1202 missing = ['skipped']
1203
1203
1204 if failed:
1204 if failed:
1205 self.fail('hg have failed checking for %s' % failed[-1])
1205 self.fail('hg have failed checking for %s' % failed[-1])
1206 else:
1206 else:
1207 self._skipped = True
1207 self._skipped = True
1208 raise unittest.SkipTest(missing[-1])
1208 raise unittest.SkipTest(missing[-1])
1209 elif ret == 'timeout':
1209 elif ret == 'timeout':
1210 self.fail('timed out')
1210 self.fail('timed out')
1211 elif ret is False:
1211 elif ret is False:
1212 self.fail('no result code from test')
1212 self.fail('no result code from test')
1213 elif out != self._refout:
1213 elif out != self._refout:
1214 # Diff generation may rely on written .err file.
1214 # Diff generation may rely on written .err file.
1215 if (
1215 if (
1216 (ret != 0 or out != self._refout)
1216 (ret != 0 or out != self._refout)
1217 and not self._skipped
1217 and not self._skipped
1218 and not self._debug
1218 and not self._debug
1219 ):
1219 ):
1220 with open(self.errpath, 'wb') as f:
1220 with open(self.errpath, 'wb') as f:
1221 for line in out:
1221 for line in out:
1222 f.write(line)
1222 f.write(line)
1223
1223
1224 # The result object handles diff calculation for us.
1224 # The result object handles diff calculation for us.
1225 with firstlock:
1225 with firstlock:
1226 if self._result.addOutputMismatch(self, ret, out, self._refout):
1226 if self._result.addOutputMismatch(self, ret, out, self._refout):
1227 # change was accepted, skip failing
1227 # change was accepted, skip failing
1228 return
1228 return
1229 if self._first:
1229 if self._first:
1230 global firsterror
1230 global firsterror
1231 firsterror = True
1231 firsterror = True
1232
1232
1233 if ret:
1233 if ret:
1234 msg = 'output changed and ' + describe(ret)
1234 msg = 'output changed and ' + describe(ret)
1235 else:
1235 else:
1236 msg = 'output changed'
1236 msg = 'output changed'
1237
1237
1238 self.fail(msg)
1238 self.fail(msg)
1239 elif ret:
1239 elif ret:
1240 self.fail(describe(ret))
1240 self.fail(describe(ret))
1241
1241
1242 def tearDown(self):
1242 def tearDown(self):
1243 """Tasks to perform after run()."""
1243 """Tasks to perform after run()."""
1244 for entry in self._daemonpids:
1244 for entry in self._daemonpids:
1245 killdaemons(entry)
1245 killdaemons(entry)
1246 self._daemonpids = []
1246 self._daemonpids = []
1247
1247
1248 if self._keeptmpdir:
1248 if self._keeptmpdir:
1249 log(
1249 log(
1250 '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s'
1250 '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s'
1251 % (
1251 % (
1252 _bytes2sys(self._testtmp),
1252 _bytes2sys(self._testtmp),
1253 _bytes2sys(self._threadtmp),
1253 _bytes2sys(self._threadtmp),
1254 )
1254 )
1255 )
1255 )
1256 else:
1256 else:
1257 try:
1257 try:
1258 shutil.rmtree(self._testtmp)
1258 shutil.rmtree(self._testtmp)
1259 except OSError:
1259 except OSError:
1260 # unreadable directory may be left in $TESTTMP; fix permission
1260 # unreadable directory may be left in $TESTTMP; fix permission
1261 # and try again
1261 # and try again
1262 makecleanable(self._testtmp)
1262 makecleanable(self._testtmp)
1263 shutil.rmtree(self._testtmp, True)
1263 shutil.rmtree(self._testtmp, True)
1264 shutil.rmtree(self._threadtmp, True)
1264 shutil.rmtree(self._threadtmp, True)
1265
1265
1266 if self._usechg:
1266 if self._usechg:
1267 # chgservers will stop automatically after they find the socket
1267 # chgservers will stop automatically after they find the socket
1268 # files are deleted
1268 # files are deleted
1269 shutil.rmtree(self._chgsockdir, True)
1269 shutil.rmtree(self._chgsockdir, True)
1270
1270
1271 if (
1271 if (
1272 (self._ret != 0 or self._out != self._refout)
1272 (self._ret != 0 or self._out != self._refout)
1273 and not self._skipped
1273 and not self._skipped
1274 and not self._debug
1274 and not self._debug
1275 and self._out
1275 and self._out
1276 ):
1276 ):
1277 with open(self.errpath, 'wb') as f:
1277 with open(self.errpath, 'wb') as f:
1278 for line in self._out:
1278 for line in self._out:
1279 f.write(line)
1279 f.write(line)
1280
1280
1281 vlog("# Ret was:", self._ret, '(%s)' % self.name)
1281 vlog("# Ret was:", self._ret, '(%s)' % self.name)
1282
1282
1283 def _run(self, env):
1283 def _run(self, env):
1284 # This should be implemented in child classes to run tests.
1284 # This should be implemented in child classes to run tests.
1285 raise unittest.SkipTest('unknown test type')
1285 raise unittest.SkipTest('unknown test type')
1286
1286
1287 def abort(self):
1287 def abort(self):
1288 """Terminate execution of this test."""
1288 """Terminate execution of this test."""
1289 self._aborted = True
1289 self._aborted = True
1290
1290
1291 def _portmap(self, i):
1291 def _portmap(self, i):
1292 offset = b'' if i == 0 else b'%d' % i
1292 offset = b'' if i == 0 else b'%d' % i
1293 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1293 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1294
1294
1295 def _getreplacements(self):
1295 def _getreplacements(self):
1296 """Obtain a mapping of text replacements to apply to test output.
1296 """Obtain a mapping of text replacements to apply to test output.
1297
1297
1298 Test output needs to be normalized so it can be compared to expected
1298 Test output needs to be normalized so it can be compared to expected
1299 output. This function defines how some of that normalization will
1299 output. This function defines how some of that normalization will
1300 occur.
1300 occur.
1301 """
1301 """
1302 r = [
1302 r = [
1303 # This list should be parallel to defineport in _getenv
1303 # This list should be parallel to defineport in _getenv
1304 self._portmap(0),
1304 self._portmap(0),
1305 self._portmap(1),
1305 self._portmap(1),
1306 self._portmap(2),
1306 self._portmap(2),
1307 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1307 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1308 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1308 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1309 ]
1309 ]
1310 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1310 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1311
1311
1312 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1312 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1313
1313
1314 if os.path.exists(replacementfile):
1314 if os.path.exists(replacementfile):
1315 data = {}
1315 data = {}
1316 with open(replacementfile, mode='rb') as source:
1316 with open(replacementfile, mode='rb') as source:
1317 # the intermediate 'compile' step help with debugging
1317 # the intermediate 'compile' step help with debugging
1318 code = compile(source.read(), replacementfile, 'exec')
1318 code = compile(source.read(), replacementfile, 'exec')
1319 exec(code, data)
1319 exec(code, data)
1320 for value in data.get('substitutions', ()):
1320 for value in data.get('substitutions', ()):
1321 if len(value) != 2:
1321 if len(value) != 2:
1322 msg = 'malformatted substitution in %s: %r'
1322 msg = 'malformatted substitution in %s: %r'
1323 msg %= (replacementfile, value)
1323 msg %= (replacementfile, value)
1324 raise ValueError(msg)
1324 raise ValueError(msg)
1325 r.append(value)
1325 r.append(value)
1326 return r
1326 return r
1327
1327
1328 def _escapepath(self, p):
1328 def _escapepath(self, p):
1329 if os.name == 'nt':
1329 if os.name == 'nt':
1330 return b''.join(
1330 return b''.join(
1331 c.isalpha()
1331 c.isalpha()
1332 and b'[%s%s]' % (c.lower(), c.upper())
1332 and b'[%s%s]' % (c.lower(), c.upper())
1333 or c in b'/\\'
1333 or c in b'/\\'
1334 and br'[/\\]'
1334 and br'[/\\]'
1335 or c.isdigit()
1335 or c.isdigit()
1336 and c
1336 and c
1337 or b'\\' + c
1337 or b'\\' + c
1338 for c in [p[i : i + 1] for i in range(len(p))]
1338 for c in [p[i : i + 1] for i in range(len(p))]
1339 )
1339 )
1340 else:
1340 else:
1341 return re.escape(p)
1341 return re.escape(p)
1342
1342
1343 def _localip(self):
1343 def _localip(self):
1344 if self._useipv6:
1344 if self._useipv6:
1345 return b'::1'
1345 return b'::1'
1346 else:
1346 else:
1347 return b'127.0.0.1'
1347 return b'127.0.0.1'
1348
1348
1349 def _genrestoreenv(self, testenv):
1349 def _genrestoreenv(self, testenv):
1350 """Generate a script that can be used by tests to restore the original
1350 """Generate a script that can be used by tests to restore the original
1351 environment."""
1351 environment."""
1352 # Put the restoreenv script inside self._threadtmp
1352 # Put the restoreenv script inside self._threadtmp
1353 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1353 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1354 testenv['HGTEST_RESTOREENV'] = _bytes2sys(scriptpath)
1354 testenv['HGTEST_RESTOREENV'] = _bytes2sys(scriptpath)
1355
1355
1356 # Only restore environment variable names that the shell allows
1356 # Only restore environment variable names that the shell allows
1357 # us to export.
1357 # us to export.
1358 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1358 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1359
1359
1360 # Do not restore these variables; otherwise tests would fail.
1360 # Do not restore these variables; otherwise tests would fail.
1361 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1361 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1362
1362
1363 with open(scriptpath, 'w') as envf:
1363 with open(scriptpath, 'w') as envf:
1364 for name, value in origenviron.items():
1364 for name, value in origenviron.items():
1365 if not name_regex.match(name):
1365 if not name_regex.match(name):
1366 # Skip environment variables with unusual names not
1366 # Skip environment variables with unusual names not
1367 # allowed by most shells.
1367 # allowed by most shells.
1368 continue
1368 continue
1369 if name in reqnames:
1369 if name in reqnames:
1370 continue
1370 continue
1371 envf.write('%s=%s\n' % (name, shellquote(value)))
1371 envf.write('%s=%s\n' % (name, shellquote(value)))
1372
1372
1373 for name in testenv:
1373 for name in testenv:
1374 if name in origenviron or name in reqnames:
1374 if name in origenviron or name in reqnames:
1375 continue
1375 continue
1376 envf.write('unset %s\n' % (name,))
1376 envf.write('unset %s\n' % (name,))
1377
1377
1378 def _getenv(self):
1378 def _getenv(self):
1379 """Obtain environment variables to use during test execution."""
1379 """Obtain environment variables to use during test execution."""
1380
1380
1381 def defineport(i):
1381 def defineport(i):
1382 offset = '' if i == 0 else '%s' % i
1382 offset = '' if i == 0 else '%s' % i
1383 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1383 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1384
1384
1385 env = os.environ.copy()
1385 env = os.environ.copy()
1386 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1386 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1387 env['HGEMITWARNINGS'] = '1'
1387 env['HGEMITWARNINGS'] = '1'
1388 env['TESTTMP'] = _bytes2sys(self._testtmp)
1388 env['TESTTMP'] = _bytes2sys(self._testtmp)
1389 uid_file = os.path.join(_bytes2sys(self._testtmp), 'UID')
1389 uid_file = os.path.join(_bytes2sys(self._testtmp), 'UID')
1390 env['HGTEST_UUIDFILE'] = uid_file
1390 env['HGTEST_UUIDFILE'] = uid_file
1391 env['TESTNAME'] = self.name
1391 env['TESTNAME'] = self.name
1392 env['HOME'] = _bytes2sys(self._testtmp)
1392 env['HOME'] = _bytes2sys(self._testtmp)
1393 if os.name == 'nt':
1393 if os.name == 'nt':
1394 env['REALUSERPROFILE'] = env['USERPROFILE']
1394 env['REALUSERPROFILE'] = env['USERPROFILE']
1395 # py3.8+ ignores HOME: https://bugs.python.org/issue36264
1395 # py3.8+ ignores HOME: https://bugs.python.org/issue36264
1396 env['USERPROFILE'] = env['HOME']
1396 env['USERPROFILE'] = env['HOME']
1397 formated_timeout = _bytes2sys(b"%d" % default_defaults['timeout'][1])
1397 formated_timeout = _bytes2sys(b"%d" % default_defaults['timeout'][1])
1398 env['HGTEST_TIMEOUT_DEFAULT'] = formated_timeout
1398 env['HGTEST_TIMEOUT_DEFAULT'] = formated_timeout
1399 env['HGTEST_TIMEOUT'] = _bytes2sys(b"%d" % self._timeout)
1399 env['HGTEST_TIMEOUT'] = _bytes2sys(b"%d" % self._timeout)
1400 # This number should match portneeded in _getport
1400 # This number should match portneeded in _getport
1401 for port in xrange(3):
1401 for port in xrange(3):
1402 # This list should be parallel to _portmap in _getreplacements
1402 # This list should be parallel to _portmap in _getreplacements
1403 defineport(port)
1403 defineport(port)
1404 env["HGRCPATH"] = _bytes2sys(os.path.join(self._threadtmp, b'.hgrc'))
1404 env["HGRCPATH"] = _bytes2sys(os.path.join(self._threadtmp, b'.hgrc'))
1405 env["DAEMON_PIDS"] = _bytes2sys(
1405 env["DAEMON_PIDS"] = _bytes2sys(
1406 os.path.join(self._threadtmp, b'daemon.pids')
1406 os.path.join(self._threadtmp, b'daemon.pids')
1407 )
1407 )
1408 env["HGEDITOR"] = (
1408 env["HGEDITOR"] = (
1409 '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"'
1409 '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"'
1410 )
1410 )
1411 env["HGUSER"] = "test"
1411 env["HGUSER"] = "test"
1412 env["HGENCODING"] = "ascii"
1412 env["HGENCODING"] = "ascii"
1413 env["HGENCODINGMODE"] = "strict"
1413 env["HGENCODINGMODE"] = "strict"
1414 env["HGHOSTNAME"] = "test-hostname"
1414 env["HGHOSTNAME"] = "test-hostname"
1415 env['HGIPV6'] = str(int(self._useipv6))
1415 env['HGIPV6'] = str(int(self._useipv6))
1416 # See contrib/catapipe.py for how to use this functionality.
1416 # See contrib/catapipe.py for how to use this functionality.
1417 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1417 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1418 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1418 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1419 # non-test one in as a default, otherwise set to devnull
1419 # non-test one in as a default, otherwise set to devnull
1420 env['HGTESTCATAPULTSERVERPIPE'] = env.get(
1420 env['HGTESTCATAPULTSERVERPIPE'] = env.get(
1421 'HGCATAPULTSERVERPIPE', os.devnull
1421 'HGCATAPULTSERVERPIPE', os.devnull
1422 )
1422 )
1423
1423
1424 extraextensions = []
1424 extraextensions = []
1425 for opt in self._extraconfigopts:
1425 for opt in self._extraconfigopts:
1426 section, key = opt.split('.', 1)
1426 section, key = opt.split('.', 1)
1427 if section != 'extensions':
1427 if section != 'extensions':
1428 continue
1428 continue
1429 name = key.split('=', 1)[0]
1429 name = key.split('=', 1)[0]
1430 extraextensions.append(name)
1430 extraextensions.append(name)
1431
1431
1432 if extraextensions:
1432 if extraextensions:
1433 env['HGTESTEXTRAEXTENSIONS'] = ' '.join(extraextensions)
1433 env['HGTESTEXTRAEXTENSIONS'] = ' '.join(extraextensions)
1434
1434
1435 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1435 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1436 # IP addresses.
1436 # IP addresses.
1437 env['LOCALIP'] = _bytes2sys(self._localip())
1437 env['LOCALIP'] = _bytes2sys(self._localip())
1438
1438
1439 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1439 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1440 # but this is needed for testing python instances like dummyssh,
1440 # but this is needed for testing python instances like dummyssh,
1441 # dummysmtpd.py, and dumbhttp.py.
1441 # dummysmtpd.py, and dumbhttp.py.
1442 if PYTHON3 and os.name == 'nt':
1442 if PYTHON3 and os.name == 'nt':
1443 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1443 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1444
1444
1445 # Modified HOME in test environment can confuse Rust tools. So set
1445 # Modified HOME in test environment can confuse Rust tools. So set
1446 # CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is
1446 # CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is
1447 # present and these variables aren't already defined.
1447 # present and these variables aren't already defined.
1448 cargo_home_path = os.path.expanduser('~/.cargo')
1448 cargo_home_path = os.path.expanduser('~/.cargo')
1449 rustup_home_path = os.path.expanduser('~/.rustup')
1449 rustup_home_path = os.path.expanduser('~/.rustup')
1450
1450
1451 if os.path.exists(cargo_home_path) and b'CARGO_HOME' not in osenvironb:
1451 if os.path.exists(cargo_home_path) and b'CARGO_HOME' not in osenvironb:
1452 env['CARGO_HOME'] = cargo_home_path
1452 env['CARGO_HOME'] = cargo_home_path
1453 if (
1453 if (
1454 os.path.exists(rustup_home_path)
1454 os.path.exists(rustup_home_path)
1455 and b'RUSTUP_HOME' not in osenvironb
1455 and b'RUSTUP_HOME' not in osenvironb
1456 ):
1456 ):
1457 env['RUSTUP_HOME'] = rustup_home_path
1457 env['RUSTUP_HOME'] = rustup_home_path
1458
1458
1459 # Reset some environment variables to well-known values so that
1459 # Reset some environment variables to well-known values so that
1460 # the tests produce repeatable output.
1460 # the tests produce repeatable output.
1461 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1461 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1462 env['TZ'] = 'GMT'
1462 env['TZ'] = 'GMT'
1463 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1463 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1464 env['COLUMNS'] = '80'
1464 env['COLUMNS'] = '80'
1465 env['TERM'] = 'xterm'
1465 env['TERM'] = 'xterm'
1466
1466
1467 dropped = [
1467 dropped = [
1468 'CDPATH',
1468 'CDPATH',
1469 'CHGDEBUG',
1469 'CHGDEBUG',
1470 'EDITOR',
1470 'EDITOR',
1471 'GREP_OPTIONS',
1471 'GREP_OPTIONS',
1472 'HG',
1472 'HG',
1473 'HGMERGE',
1473 'HGMERGE',
1474 'HGPLAIN',
1474 'HGPLAIN',
1475 'HGPLAINEXCEPT',
1475 'HGPLAINEXCEPT',
1476 'HGPROF',
1476 'HGPROF',
1477 'http_proxy',
1477 'http_proxy',
1478 'no_proxy',
1478 'no_proxy',
1479 'NO_PROXY',
1479 'NO_PROXY',
1480 'PAGER',
1480 'PAGER',
1481 'VISUAL',
1481 'VISUAL',
1482 ]
1482 ]
1483
1483
1484 for k in dropped:
1484 for k in dropped:
1485 if k in env:
1485 if k in env:
1486 del env[k]
1486 del env[k]
1487
1487
1488 # unset env related to hooks
1488 # unset env related to hooks
1489 for k in list(env):
1489 for k in list(env):
1490 if k.startswith('HG_'):
1490 if k.startswith('HG_'):
1491 del env[k]
1491 del env[k]
1492
1492
1493 if self._usechg:
1493 if self._usechg:
1494 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1494 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1495 if self._chgdebug:
1495 if self._chgdebug:
1496 env['CHGDEBUG'] = 'true'
1496 env['CHGDEBUG'] = 'true'
1497
1497
1498 return env
1498 return env
1499
1499
1500 def _createhgrc(self, path):
1500 def _createhgrc(self, path):
1501 """Create an hgrc file for this test."""
1501 """Create an hgrc file for this test."""
1502 with open(path, 'wb') as hgrc:
1502 with open(path, 'wb') as hgrc:
1503 hgrc.write(b'[ui]\n')
1503 hgrc.write(b'[ui]\n')
1504 hgrc.write(b'slash = True\n')
1504 hgrc.write(b'slash = True\n')
1505 hgrc.write(b'interactive = False\n')
1505 hgrc.write(b'interactive = False\n')
1506 hgrc.write(b'detailed-exit-code = True\n')
1506 hgrc.write(b'detailed-exit-code = True\n')
1507 hgrc.write(b'merge = internal:merge\n')
1507 hgrc.write(b'merge = internal:merge\n')
1508 hgrc.write(b'mergemarkers = detailed\n')
1508 hgrc.write(b'mergemarkers = detailed\n')
1509 hgrc.write(b'promptecho = True\n')
1509 hgrc.write(b'promptecho = True\n')
1510 hgrc.write(b'timeout.warn=15\n')
1510 hgrc.write(b'timeout.warn=15\n')
1511 hgrc.write(b'[defaults]\n')
1511 hgrc.write(b'[defaults]\n')
1512 hgrc.write(b'[devel]\n')
1512 hgrc.write(b'[devel]\n')
1513 hgrc.write(b'all-warnings = true\n')
1513 hgrc.write(b'all-warnings = true\n')
1514 hgrc.write(b'default-date = 0 0\n')
1514 hgrc.write(b'default-date = 0 0\n')
1515 hgrc.write(b'[largefiles]\n')
1515 hgrc.write(b'[largefiles]\n')
1516 hgrc.write(
1516 hgrc.write(
1517 b'usercache = %s\n'
1517 b'usercache = %s\n'
1518 % (os.path.join(self._testtmp, b'.cache/largefiles'))
1518 % (os.path.join(self._testtmp, b'.cache/largefiles'))
1519 )
1519 )
1520 hgrc.write(b'[lfs]\n')
1520 hgrc.write(b'[lfs]\n')
1521 hgrc.write(
1521 hgrc.write(
1522 b'usercache = %s\n'
1522 b'usercache = %s\n'
1523 % (os.path.join(self._testtmp, b'.cache/lfs'))
1523 % (os.path.join(self._testtmp, b'.cache/lfs'))
1524 )
1524 )
1525 hgrc.write(b'[web]\n')
1525 hgrc.write(b'[web]\n')
1526 hgrc.write(b'address = localhost\n')
1526 hgrc.write(b'address = localhost\n')
1527 hgrc.write(b'ipv6 = %r\n' % self._useipv6)
1527 hgrc.write(b'ipv6 = %r\n' % self._useipv6)
1528 hgrc.write(b'server-header = testing stub value\n')
1528 hgrc.write(b'server-header = testing stub value\n')
1529
1529
1530 for opt in self._extraconfigopts:
1530 for opt in self._extraconfigopts:
1531 section, key = _sys2bytes(opt).split(b'.', 1)
1531 section, key = _sys2bytes(opt).split(b'.', 1)
1532 assert b'=' in key, (
1532 assert b'=' in key, (
1533 'extra config opt %s must ' 'have an = for assignment' % opt
1533 'extra config opt %s must ' 'have an = for assignment' % opt
1534 )
1534 )
1535 hgrc.write(b'[%s]\n%s\n' % (section, key))
1535 hgrc.write(b'[%s]\n%s\n' % (section, key))
1536
1536
1537 def fail(self, msg):
1537 def fail(self, msg):
1538 # unittest differentiates between errored and failed.
1538 # unittest differentiates between errored and failed.
1539 # Failed is denoted by AssertionError (by default at least).
1539 # Failed is denoted by AssertionError (by default at least).
1540 raise AssertionError(msg)
1540 raise AssertionError(msg)
1541
1541
1542 def _runcommand(self, cmd, env, normalizenewlines=False):
1542 def _runcommand(self, cmd, env, normalizenewlines=False):
1543 """Run command in a sub-process, capturing the output (stdout and
1543 """Run command in a sub-process, capturing the output (stdout and
1544 stderr).
1544 stderr).
1545
1545
1546 Return a tuple (exitcode, output). output is None in debug mode.
1546 Return a tuple (exitcode, output). output is None in debug mode.
1547 """
1547 """
1548 if self._debug:
1548 if self._debug:
1549 proc = subprocess.Popen(
1549 proc = subprocess.Popen(
1550 _bytes2sys(cmd),
1550 _bytes2sys(cmd),
1551 shell=True,
1551 shell=True,
1552 cwd=_bytes2sys(self._testtmp),
1552 cwd=_bytes2sys(self._testtmp),
1553 env=env,
1553 env=env,
1554 )
1554 )
1555 ret = proc.wait()
1555 ret = proc.wait()
1556 return (ret, None)
1556 return (ret, None)
1557
1557
1558 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1558 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1559
1559
1560 def cleanup():
1560 def cleanup():
1561 terminate(proc)
1561 terminate(proc)
1562 ret = proc.wait()
1562 ret = proc.wait()
1563 if ret == 0:
1563 if ret == 0:
1564 ret = signal.SIGTERM << 8
1564 ret = signal.SIGTERM << 8
1565 killdaemons(env['DAEMON_PIDS'])
1565 killdaemons(env['DAEMON_PIDS'])
1566 return ret
1566 return ret
1567
1567
1568 proc.tochild.close()
1568 proc.tochild.close()
1569
1569
1570 try:
1570 try:
1571 output = proc.fromchild.read()
1571 output = proc.fromchild.read()
1572 except KeyboardInterrupt:
1572 except KeyboardInterrupt:
1573 vlog('# Handling keyboard interrupt')
1573 vlog('# Handling keyboard interrupt')
1574 cleanup()
1574 cleanup()
1575 raise
1575 raise
1576
1576
1577 ret = proc.wait()
1577 ret = proc.wait()
1578 if wifexited(ret):
1578 if wifexited(ret):
1579 ret = os.WEXITSTATUS(ret)
1579 ret = os.WEXITSTATUS(ret)
1580
1580
1581 if proc.timeout:
1581 if proc.timeout:
1582 ret = 'timeout'
1582 ret = 'timeout'
1583
1583
1584 if ret:
1584 if ret:
1585 killdaemons(env['DAEMON_PIDS'])
1585 killdaemons(env['DAEMON_PIDS'])
1586
1586
1587 for s, r in self._getreplacements():
1587 for s, r in self._getreplacements():
1588 output = re.sub(s, r, output)
1588 output = re.sub(s, r, output)
1589
1589
1590 if normalizenewlines:
1590 if normalizenewlines:
1591 output = output.replace(b'\r\n', b'\n')
1591 output = output.replace(b'\r\n', b'\n')
1592
1592
1593 return ret, output.splitlines(True)
1593 return ret, output.splitlines(True)
1594
1594
1595
1595
1596 class PythonTest(Test):
1596 class PythonTest(Test):
1597 """A Python-based test."""
1597 """A Python-based test."""
1598
1598
1599 @property
1599 @property
1600 def refpath(self):
1600 def refpath(self):
1601 return os.path.join(self._testdir, b'%s.out' % self.bname)
1601 return os.path.join(self._testdir, b'%s.out' % self.bname)
1602
1602
1603 def _run(self, env):
1603 def _run(self, env):
1604 # Quote the python(3) executable for Windows
1604 # Quote the python(3) executable for Windows
1605 cmd = b'"%s" "%s"' % (PYTHON, self.path)
1605 cmd = b'"%s" "%s"' % (PYTHON, self.path)
1606 vlog("# Running", cmd.decode("utf-8"))
1606 vlog("# Running", cmd.decode("utf-8"))
1607 normalizenewlines = os.name == 'nt'
1607 normalizenewlines = os.name == 'nt'
1608 result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines)
1608 result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines)
1609 if self._aborted:
1609 if self._aborted:
1610 raise KeyboardInterrupt()
1610 raise KeyboardInterrupt()
1611
1611
1612 return result
1612 return result
1613
1613
1614
1614
1615 # Some glob patterns apply only in some circumstances, so the script
1615 # Some glob patterns apply only in some circumstances, so the script
1616 # might want to remove (glob) annotations that otherwise should be
1616 # might want to remove (glob) annotations that otherwise should be
1617 # retained.
1617 # retained.
1618 checkcodeglobpats = [
1618 checkcodeglobpats = [
1619 # On Windows it looks like \ doesn't require a (glob), but we know
1619 # On Windows it looks like \ doesn't require a (glob), but we know
1620 # better.
1620 # better.
1621 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1621 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1622 re.compile(br'^moving \S+/.*[^)]$'),
1622 re.compile(br'^moving \S+/.*[^)]$'),
1623 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1623 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1624 # Not all platforms have 127.0.0.1 as loopback (though most do),
1624 # Not all platforms have 127.0.0.1 as loopback (though most do),
1625 # so we always glob that too.
1625 # so we always glob that too.
1626 re.compile(br'.*\$LOCALIP.*$'),
1626 re.compile(br'.*\$LOCALIP.*$'),
1627 ]
1627 ]
1628
1628
1629 bchr = chr
1629 bchr = chr
1630 if PYTHON3:
1630 if PYTHON3:
1631 bchr = lambda x: bytes([x])
1631 bchr = lambda x: bytes([x])
1632
1632
1633 WARN_UNDEFINED = 1
1633 WARN_UNDEFINED = 1
1634 WARN_YES = 2
1634 WARN_YES = 2
1635 WARN_NO = 3
1635 WARN_NO = 3
1636
1636
1637 MARK_OPTIONAL = b" (?)\n"
1637 MARK_OPTIONAL = b" (?)\n"
1638
1638
1639
1639
1640 def isoptional(line):
1640 def isoptional(line):
1641 return line.endswith(MARK_OPTIONAL)
1641 return line.endswith(MARK_OPTIONAL)
1642
1642
1643
1643
1644 class TTest(Test):
1644 class TTest(Test):
1645 """A "t test" is a test backed by a .t file."""
1645 """A "t test" is a test backed by a .t file."""
1646
1646
1647 SKIPPED_PREFIX = b'skipped: '
1647 SKIPPED_PREFIX = b'skipped: '
1648 FAILED_PREFIX = b'hghave check failed: '
1648 FAILED_PREFIX = b'hghave check failed: '
1649 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1649 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1650
1650
1651 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1651 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1652 ESCAPEMAP = {bchr(i): br'\x%02x' % i for i in range(256)}
1652 ESCAPEMAP = {bchr(i): br'\x%02x' % i for i in range(256)}
1653 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1653 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1654
1654
1655 def __init__(self, path, *args, **kwds):
1655 def __init__(self, path, *args, **kwds):
1656 # accept an extra "case" parameter
1656 # accept an extra "case" parameter
1657 case = kwds.pop('case', [])
1657 case = kwds.pop('case', [])
1658 self._case = case
1658 self._case = case
1659 self._allcases = {x for y in parsettestcases(path) for x in y}
1659 self._allcases = {x for y in parsettestcases(path) for x in y}
1660 super(TTest, self).__init__(path, *args, **kwds)
1660 super(TTest, self).__init__(path, *args, **kwds)
1661 if case:
1661 if case:
1662 casepath = b'#'.join(case)
1662 casepath = b'#'.join(case)
1663 self.name = '%s#%s' % (self.name, _bytes2sys(casepath))
1663 self.name = '%s#%s' % (self.name, _bytes2sys(casepath))
1664 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1664 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1665 self._tmpname += b'-%s' % casepath.replace(b'#', b'-')
1665 self._tmpname += b'-%s' % casepath.replace(b'#', b'-')
1666 self._have = {}
1666 self._have = {}
1667
1667
1668 @property
1668 @property
1669 def refpath(self):
1669 def refpath(self):
1670 return os.path.join(self._testdir, self.bname)
1670 return os.path.join(self._testdir, self.bname)
1671
1671
1672 def _run(self, env):
1672 def _run(self, env):
1673 with open(self.path, 'rb') as f:
1673 with open(self.path, 'rb') as f:
1674 lines = f.readlines()
1674 lines = f.readlines()
1675
1675
1676 # .t file is both reference output and the test input, keep reference
1676 # .t file is both reference output and the test input, keep reference
1677 # output updated with the the test input. This avoids some race
1677 # output updated with the the test input. This avoids some race
1678 # conditions where the reference output does not match the actual test.
1678 # conditions where the reference output does not match the actual test.
1679 if self._refout is not None:
1679 if self._refout is not None:
1680 self._refout = lines
1680 self._refout = lines
1681
1681
1682 salt, script, after, expected = self._parsetest(lines)
1682 salt, script, after, expected = self._parsetest(lines)
1683
1683
1684 # Write out the generated script.
1684 # Write out the generated script.
1685 fname = b'%s.sh' % self._testtmp
1685 fname = b'%s.sh' % self._testtmp
1686 with open(fname, 'wb') as f:
1686 with open(fname, 'wb') as f:
1687 for l in script:
1687 for l in script:
1688 f.write(l)
1688 f.write(l)
1689
1689
1690 cmd = b'%s "%s"' % (self._shell, fname)
1690 cmd = b'%s "%s"' % (self._shell, fname)
1691 vlog("# Running", cmd.decode("utf-8"))
1691 vlog("# Running", cmd.decode("utf-8"))
1692
1692
1693 exitcode, output = self._runcommand(cmd, env)
1693 exitcode, output = self._runcommand(cmd, env)
1694
1694
1695 if self._aborted:
1695 if self._aborted:
1696 raise KeyboardInterrupt()
1696 raise KeyboardInterrupt()
1697
1697
1698 # Do not merge output if skipped. Return hghave message instead.
1698 # Do not merge output if skipped. Return hghave message instead.
1699 # Similarly, with --debug, output is None.
1699 # Similarly, with --debug, output is None.
1700 if exitcode == self.SKIPPED_STATUS or output is None:
1700 if exitcode == self.SKIPPED_STATUS or output is None:
1701 return exitcode, output
1701 return exitcode, output
1702
1702
1703 return self._processoutput(exitcode, output, salt, after, expected)
1703 return self._processoutput(exitcode, output, salt, after, expected)
1704
1704
1705 def _hghave(self, reqs):
1705 def _hghave(self, reqs):
1706 allreqs = b' '.join(reqs)
1706 allreqs = b' '.join(reqs)
1707
1707
1708 self._detectslow(reqs)
1708 self._detectslow(reqs)
1709
1709
1710 if allreqs in self._have:
1710 if allreqs in self._have:
1711 return self._have.get(allreqs)
1711 return self._have.get(allreqs)
1712
1712
1713 # TODO do something smarter when all other uses of hghave are gone.
1713 # TODO do something smarter when all other uses of hghave are gone.
1714 runtestdir = osenvironb[b'RUNTESTDIR']
1714 runtestdir = osenvironb[b'RUNTESTDIR']
1715 tdir = runtestdir.replace(b'\\', b'/')
1715 tdir = runtestdir.replace(b'\\', b'/')
1716 proc = Popen4(
1716 proc = Popen4(
1717 b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
1717 b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
1718 self._testtmp,
1718 self._testtmp,
1719 0,
1719 0,
1720 self._getenv(),
1720 self._getenv(),
1721 )
1721 )
1722 stdout, stderr = proc.communicate()
1722 stdout, stderr = proc.communicate()
1723 ret = proc.wait()
1723 ret = proc.wait()
1724 if wifexited(ret):
1724 if wifexited(ret):
1725 ret = os.WEXITSTATUS(ret)
1725 ret = os.WEXITSTATUS(ret)
1726 if ret == 2:
1726 if ret == 2:
1727 print(stdout.decode('utf-8'))
1727 print(stdout.decode('utf-8'))
1728 sys.exit(1)
1728 sys.exit(1)
1729
1729
1730 if ret != 0:
1730 if ret != 0:
1731 self._have[allreqs] = (False, stdout)
1731 self._have[allreqs] = (False, stdout)
1732 return False, stdout
1732 return False, stdout
1733
1733
1734 self._have[allreqs] = (True, None)
1734 self._have[allreqs] = (True, None)
1735 return True, None
1735 return True, None
1736
1736
1737 def _detectslow(self, reqs):
1737 def _detectslow(self, reqs):
1738 """update the timeout of slow test when appropriate"""
1738 """update the timeout of slow test when appropriate"""
1739 if b'slow' in reqs:
1739 if b'slow' in reqs:
1740 self._timeout = self._slowtimeout
1740 self._timeout = self._slowtimeout
1741
1741
1742 def _iftest(self, args):
1742 def _iftest(self, args):
1743 # implements "#if"
1743 # implements "#if"
1744 reqs = []
1744 reqs = []
1745 for arg in args:
1745 for arg in args:
1746 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1746 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1747 if arg[3:] in self._case:
1747 if arg[3:] in self._case:
1748 return False
1748 return False
1749 elif arg in self._allcases:
1749 elif arg in self._allcases:
1750 if arg not in self._case:
1750 if arg not in self._case:
1751 return False
1751 return False
1752 else:
1752 else:
1753 reqs.append(arg)
1753 reqs.append(arg)
1754 self._detectslow(reqs)
1754 self._detectslow(reqs)
1755 return self._hghave(reqs)[0]
1755 return self._hghave(reqs)[0]
1756
1756
1757 def _parsetest(self, lines):
1757 def _parsetest(self, lines):
1758 # We generate a shell script which outputs unique markers to line
1758 # We generate a shell script which outputs unique markers to line
1759 # up script results with our source. These markers include input
1759 # up script results with our source. These markers include input
1760 # line number and the last return code.
1760 # line number and the last return code.
1761 salt = b"SALT%d" % time.time()
1761 salt = b"SALT%d" % time.time()
1762
1762
1763 def addsalt(line, inpython):
1763 def addsalt(line, inpython):
1764 if inpython:
1764 if inpython:
1765 script.append(b'%s %d 0\n' % (salt, line))
1765 script.append(b'%s %d 0\n' % (salt, line))
1766 else:
1766 else:
1767 script.append(b'echo %s %d $?\n' % (salt, line))
1767 script.append(b'echo %s %d $?\n' % (salt, line))
1768
1768
1769 activetrace = []
1769 activetrace = []
1770 session = str(uuid.uuid4())
1770 session = str(uuid.uuid4())
1771 if PYTHON3:
1771 if PYTHON3:
1772 session = session.encode('ascii')
1772 session = session.encode('ascii')
1773 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or os.getenv(
1773 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or os.getenv(
1774 'HGCATAPULTSERVERPIPE'
1774 'HGCATAPULTSERVERPIPE'
1775 )
1775 )
1776
1776
1777 def toggletrace(cmd=None):
1777 def toggletrace(cmd=None):
1778 if not hgcatapult or hgcatapult == os.devnull:
1778 if not hgcatapult or hgcatapult == os.devnull:
1779 return
1779 return
1780
1780
1781 if activetrace:
1781 if activetrace:
1782 script.append(
1782 script.append(
1783 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1783 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1784 % (session, activetrace[0])
1784 % (session, activetrace[0])
1785 )
1785 )
1786 if cmd is None:
1786 if cmd is None:
1787 return
1787 return
1788
1788
1789 if isinstance(cmd, str):
1789 if isinstance(cmd, str):
1790 quoted = shellquote(cmd.strip())
1790 quoted = shellquote(cmd.strip())
1791 else:
1791 else:
1792 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1792 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1793 quoted = quoted.replace(b'\\', b'\\\\')
1793 quoted = quoted.replace(b'\\', b'\\\\')
1794 script.append(
1794 script.append(
1795 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1795 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1796 % (session, quoted)
1796 % (session, quoted)
1797 )
1797 )
1798 activetrace[0:] = [quoted]
1798 activetrace[0:] = [quoted]
1799
1799
1800 script = []
1800 script = []
1801
1801
1802 # After we run the shell script, we re-unify the script output
1802 # After we run the shell script, we re-unify the script output
1803 # with non-active parts of the source, with synchronization by our
1803 # with non-active parts of the source, with synchronization by our
1804 # SALT line number markers. The after table contains the non-active
1804 # SALT line number markers. The after table contains the non-active
1805 # components, ordered by line number.
1805 # components, ordered by line number.
1806 after = {}
1806 after = {}
1807
1807
1808 # Expected shell script output.
1808 # Expected shell script output.
1809 expected = {}
1809 expected = {}
1810
1810
1811 pos = prepos = -1
1811 pos = prepos = -1
1812
1812
1813 # True or False when in a true or false conditional section
1813 # True or False when in a true or false conditional section
1814 skipping = None
1814 skipping = None
1815
1815
1816 # We keep track of whether or not we're in a Python block so we
1816 # We keep track of whether or not we're in a Python block so we
1817 # can generate the surrounding doctest magic.
1817 # can generate the surrounding doctest magic.
1818 inpython = False
1818 inpython = False
1819
1819
1820 if self._debug:
1820 if self._debug:
1821 script.append(b'set -x\n')
1821 script.append(b'set -x\n')
1822 if self._hgcommand != b'hg':
1822 if self._hgcommand != b'hg':
1823 script.append(b'alias hg="%s"\n' % self._hgcommand)
1823 script.append(b'alias hg="%s"\n' % self._hgcommand)
1824 if os.getenv('MSYSTEM'):
1824 if os.getenv('MSYSTEM'):
1825 script.append(b'alias pwd="pwd -W"\n')
1825 script.append(b'alias pwd="pwd -W"\n')
1826
1826
1827 if hgcatapult and hgcatapult != os.devnull:
1827 if hgcatapult and hgcatapult != os.devnull:
1828 if PYTHON3:
1828 if PYTHON3:
1829 hgcatapult = hgcatapult.encode('utf8')
1829 hgcatapult = hgcatapult.encode('utf8')
1830 cataname = self.name.encode('utf8')
1830 cataname = self.name.encode('utf8')
1831 else:
1831 else:
1832 cataname = self.name
1832 cataname = self.name
1833
1833
1834 # Kludge: use a while loop to keep the pipe from getting
1834 # Kludge: use a while loop to keep the pipe from getting
1835 # closed by our echo commands. The still-running file gets
1835 # closed by our echo commands. The still-running file gets
1836 # reaped at the end of the script, which causes the while
1836 # reaped at the end of the script, which causes the while
1837 # loop to exit and closes the pipe. Sigh.
1837 # loop to exit and closes the pipe. Sigh.
1838 script.append(
1838 script.append(
1839 b'rtendtracing() {\n'
1839 b'rtendtracing() {\n'
1840 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1840 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1841 b' rm -f "$TESTTMP/.still-running"\n'
1841 b' rm -f "$TESTTMP/.still-running"\n'
1842 b'}\n'
1842 b'}\n'
1843 b'trap "rtendtracing" 0\n'
1843 b'trap "rtendtracing" 0\n'
1844 b'touch "$TESTTMP/.still-running"\n'
1844 b'touch "$TESTTMP/.still-running"\n'
1845 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1845 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1846 b'> %(catapult)s &\n'
1846 b'> %(catapult)s &\n'
1847 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1847 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1848 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1848 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1849 % {
1849 % {
1850 b'name': cataname,
1850 b'name': cataname,
1851 b'session': session,
1851 b'session': session,
1852 b'catapult': hgcatapult,
1852 b'catapult': hgcatapult,
1853 }
1853 }
1854 )
1854 )
1855
1855
1856 if self._case:
1856 if self._case:
1857 casestr = b'#'.join(self._case)
1857 casestr = b'#'.join(self._case)
1858 if isinstance(casestr, str):
1858 if isinstance(casestr, str):
1859 quoted = shellquote(casestr)
1859 quoted = shellquote(casestr)
1860 else:
1860 else:
1861 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1861 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1862 script.append(b'TESTCASE=%s\n' % quoted)
1862 script.append(b'TESTCASE=%s\n' % quoted)
1863 script.append(b'export TESTCASE\n')
1863 script.append(b'export TESTCASE\n')
1864
1864
1865 n = 0
1865 n = 0
1866 for n, l in enumerate(lines):
1866 for n, l in enumerate(lines):
1867 if not l.endswith(b'\n'):
1867 if not l.endswith(b'\n'):
1868 l += b'\n'
1868 l += b'\n'
1869 if l.startswith(b'#require'):
1869 if l.startswith(b'#require'):
1870 lsplit = l.split()
1870 lsplit = l.split()
1871 if len(lsplit) < 2 or lsplit[0] != b'#require':
1871 if len(lsplit) < 2 or lsplit[0] != b'#require':
1872 after.setdefault(pos, []).append(
1872 after.setdefault(pos, []).append(
1873 b' !!! invalid #require\n'
1873 b' !!! invalid #require\n'
1874 )
1874 )
1875 if not skipping:
1875 if not skipping:
1876 haveresult, message = self._hghave(lsplit[1:])
1876 haveresult, message = self._hghave(lsplit[1:])
1877 if not haveresult:
1877 if not haveresult:
1878 script = [b'echo "%s"\nexit 80\n' % message]
1878 script = [b'echo "%s"\nexit 80\n' % message]
1879 break
1879 break
1880 after.setdefault(pos, []).append(l)
1880 after.setdefault(pos, []).append(l)
1881 elif l.startswith(b'#if'):
1881 elif l.startswith(b'#if'):
1882 lsplit = l.split()
1882 lsplit = l.split()
1883 if len(lsplit) < 2 or lsplit[0] != b'#if':
1883 if len(lsplit) < 2 or lsplit[0] != b'#if':
1884 after.setdefault(pos, []).append(b' !!! invalid #if\n')
1884 after.setdefault(pos, []).append(b' !!! invalid #if\n')
1885 if skipping is not None:
1885 if skipping is not None:
1886 after.setdefault(pos, []).append(b' !!! nested #if\n')
1886 after.setdefault(pos, []).append(b' !!! nested #if\n')
1887 skipping = not self._iftest(lsplit[1:])
1887 skipping = not self._iftest(lsplit[1:])
1888 after.setdefault(pos, []).append(l)
1888 after.setdefault(pos, []).append(l)
1889 elif l.startswith(b'#else'):
1889 elif l.startswith(b'#else'):
1890 if skipping is None:
1890 if skipping is None:
1891 after.setdefault(pos, []).append(b' !!! missing #if\n')
1891 after.setdefault(pos, []).append(b' !!! missing #if\n')
1892 skipping = not skipping
1892 skipping = not skipping
1893 after.setdefault(pos, []).append(l)
1893 after.setdefault(pos, []).append(l)
1894 elif l.startswith(b'#endif'):
1894 elif l.startswith(b'#endif'):
1895 if skipping is None:
1895 if skipping is None:
1896 after.setdefault(pos, []).append(b' !!! missing #if\n')
1896 after.setdefault(pos, []).append(b' !!! missing #if\n')
1897 skipping = None
1897 skipping = None
1898 after.setdefault(pos, []).append(l)
1898 after.setdefault(pos, []).append(l)
1899 elif skipping:
1899 elif skipping:
1900 after.setdefault(pos, []).append(l)
1900 after.setdefault(pos, []).append(l)
1901 elif l.startswith(b' >>> '): # python inlines
1901 elif l.startswith(b' >>> '): # python inlines
1902 after.setdefault(pos, []).append(l)
1902 after.setdefault(pos, []).append(l)
1903 prepos = pos
1903 prepos = pos
1904 pos = n
1904 pos = n
1905 if not inpython:
1905 if not inpython:
1906 # We've just entered a Python block. Add the header.
1906 # We've just entered a Python block. Add the header.
1907 inpython = True
1907 inpython = True
1908 addsalt(prepos, False) # Make sure we report the exit code.
1908 addsalt(prepos, False) # Make sure we report the exit code.
1909 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1909 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1910 addsalt(n, True)
1910 addsalt(n, True)
1911 script.append(l[2:])
1911 script.append(l[2:])
1912 elif l.startswith(b' ... '): # python inlines
1912 elif l.startswith(b' ... '): # python inlines
1913 after.setdefault(prepos, []).append(l)
1913 after.setdefault(prepos, []).append(l)
1914 script.append(l[2:])
1914 script.append(l[2:])
1915 elif l.startswith(b' $ '): # commands
1915 elif l.startswith(b' $ '): # commands
1916 if inpython:
1916 if inpython:
1917 script.append(b'EOF\n')
1917 script.append(b'EOF\n')
1918 inpython = False
1918 inpython = False
1919 after.setdefault(pos, []).append(l)
1919 after.setdefault(pos, []).append(l)
1920 prepos = pos
1920 prepos = pos
1921 pos = n
1921 pos = n
1922 addsalt(n, False)
1922 addsalt(n, False)
1923 rawcmd = l[4:]
1923 rawcmd = l[4:]
1924 cmd = rawcmd.split()
1924 cmd = rawcmd.split()
1925 toggletrace(rawcmd)
1925 toggletrace(rawcmd)
1926 if len(cmd) == 2 and cmd[0] == b'cd':
1926 if len(cmd) == 2 and cmd[0] == b'cd':
1927 rawcmd = b'cd %s || exit 1\n' % cmd[1]
1927 rawcmd = b'cd %s || exit 1\n' % cmd[1]
1928 script.append(rawcmd)
1928 script.append(rawcmd)
1929 elif l.startswith(b' > '): # continuations
1929 elif l.startswith(b' > '): # continuations
1930 after.setdefault(prepos, []).append(l)
1930 after.setdefault(prepos, []).append(l)
1931 script.append(l[4:])
1931 script.append(l[4:])
1932 elif l.startswith(b' '): # results
1932 elif l.startswith(b' '): # results
1933 # Queue up a list of expected results.
1933 # Queue up a list of expected results.
1934 expected.setdefault(pos, []).append(l[2:])
1934 expected.setdefault(pos, []).append(l[2:])
1935 else:
1935 else:
1936 if inpython:
1936 if inpython:
1937 script.append(b'EOF\n')
1937 script.append(b'EOF\n')
1938 inpython = False
1938 inpython = False
1939 # Non-command/result. Queue up for merged output.
1939 # Non-command/result. Queue up for merged output.
1940 after.setdefault(pos, []).append(l)
1940 after.setdefault(pos, []).append(l)
1941
1941
1942 if inpython:
1942 if inpython:
1943 script.append(b'EOF\n')
1943 script.append(b'EOF\n')
1944 if skipping is not None:
1944 if skipping is not None:
1945 after.setdefault(pos, []).append(b' !!! missing #endif\n')
1945 after.setdefault(pos, []).append(b' !!! missing #endif\n')
1946 addsalt(n + 1, False)
1946 addsalt(n + 1, False)
1947 # Need to end any current per-command trace
1947 # Need to end any current per-command trace
1948 if activetrace:
1948 if activetrace:
1949 toggletrace()
1949 toggletrace()
1950 return salt, script, after, expected
1950 return salt, script, after, expected
1951
1951
1952 def _processoutput(self, exitcode, output, salt, after, expected):
1952 def _processoutput(self, exitcode, output, salt, after, expected):
1953 # Merge the script output back into a unified test.
1953 # Merge the script output back into a unified test.
1954 warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not
1954 warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not
1955 if exitcode != 0:
1955 if exitcode != 0:
1956 warnonly = WARN_NO
1956 warnonly = WARN_NO
1957
1957
1958 pos = -1
1958 pos = -1
1959 postout = []
1959 postout = []
1960 for out_rawline in output:
1960 for out_rawline in output:
1961 out_line, cmd_line = out_rawline, None
1961 out_line, cmd_line = out_rawline, None
1962 if salt in out_rawline:
1962 if salt in out_rawline:
1963 out_line, cmd_line = out_rawline.split(salt, 1)
1963 out_line, cmd_line = out_rawline.split(salt, 1)
1964
1964
1965 pos, postout, warnonly = self._process_out_line(
1965 pos, postout, warnonly = self._process_out_line(
1966 out_line, pos, postout, expected, warnonly
1966 out_line, pos, postout, expected, warnonly
1967 )
1967 )
1968 pos, postout = self._process_cmd_line(cmd_line, pos, postout, after)
1968 pos, postout = self._process_cmd_line(cmd_line, pos, postout, after)
1969
1969
1970 if pos in after:
1970 if pos in after:
1971 postout += after.pop(pos)
1971 postout += after.pop(pos)
1972
1972
1973 if warnonly == WARN_YES:
1973 if warnonly == WARN_YES:
1974 exitcode = False # Set exitcode to warned.
1974 exitcode = False # Set exitcode to warned.
1975
1975
1976 return exitcode, postout
1976 return exitcode, postout
1977
1977
1978 def _process_out_line(self, out_line, pos, postout, expected, warnonly):
1978 def _process_out_line(self, out_line, pos, postout, expected, warnonly):
1979 while out_line:
1979 while out_line:
1980 if not out_line.endswith(b'\n'):
1980 if not out_line.endswith(b'\n'):
1981 out_line += b' (no-eol)\n'
1981 out_line += b' (no-eol)\n'
1982
1982
1983 # Find the expected output at the current position.
1983 # Find the expected output at the current position.
1984 els = [None]
1984 els = [None]
1985 if expected.get(pos, None):
1985 if expected.get(pos, None):
1986 els = expected[pos]
1986 els = expected[pos]
1987
1987
1988 optional = []
1988 optional = []
1989 for i, el in enumerate(els):
1989 for i, el in enumerate(els):
1990 r = False
1990 r = False
1991 if el:
1991 if el:
1992 r, exact = self.linematch(el, out_line)
1992 r, exact = self.linematch(el, out_line)
1993 if isinstance(r, str):
1993 if isinstance(r, str):
1994 if r == '-glob':
1994 if r == '-glob':
1995 out_line = ''.join(el.rsplit(' (glob)', 1))
1995 out_line = ''.join(el.rsplit(' (glob)', 1))
1996 r = '' # Warn only this line.
1996 r = '' # Warn only this line.
1997 elif r == "retry":
1997 elif r == "retry":
1998 postout.append(b' ' + el)
1998 postout.append(b' ' + el)
1999 else:
1999 else:
2000 log('\ninfo, unknown linematch result: %r\n' % r)
2000 log('\ninfo, unknown linematch result: %r\n' % r)
2001 r = False
2001 r = False
2002 if r:
2002 if r:
2003 els.pop(i)
2003 els.pop(i)
2004 break
2004 break
2005 if el:
2005 if el:
2006 if isoptional(el):
2006 if isoptional(el):
2007 optional.append(i)
2007 optional.append(i)
2008 else:
2008 else:
2009 m = optline.match(el)
2009 m = optline.match(el)
2010 if m:
2010 if m:
2011 conditions = [c for c in m.group(2).split(b' ')]
2011 conditions = [c for c in m.group(2).split(b' ')]
2012
2012
2013 if not self._iftest(conditions):
2013 if not self._iftest(conditions):
2014 optional.append(i)
2014 optional.append(i)
2015 if exact:
2015 if exact:
2016 # Don't allow line to be matches against a later
2016 # Don't allow line to be matches against a later
2017 # line in the output
2017 # line in the output
2018 els.pop(i)
2018 els.pop(i)
2019 break
2019 break
2020
2020
2021 if r:
2021 if r:
2022 if r == "retry":
2022 if r == "retry":
2023 continue
2023 continue
2024 # clean up any optional leftovers
2024 # clean up any optional leftovers
2025 for i in optional:
2025 for i in optional:
2026 postout.append(b' ' + els[i])
2026 postout.append(b' ' + els[i])
2027 for i in reversed(optional):
2027 for i in reversed(optional):
2028 del els[i]
2028 del els[i]
2029 postout.append(b' ' + el)
2029 postout.append(b' ' + el)
2030 else:
2030 else:
2031 if self.NEEDESCAPE(out_line):
2031 if self.NEEDESCAPE(out_line):
2032 out_line = TTest._stringescape(
2032 out_line = TTest._stringescape(
2033 b'%s (esc)\n' % out_line.rstrip(b'\n')
2033 b'%s (esc)\n' % out_line.rstrip(b'\n')
2034 )
2034 )
2035 postout.append(b' ' + out_line) # Let diff deal with it.
2035 postout.append(b' ' + out_line) # Let diff deal with it.
2036 if r != '': # If line failed.
2036 if r != '': # If line failed.
2037 warnonly = WARN_NO
2037 warnonly = WARN_NO
2038 elif warnonly == WARN_UNDEFINED:
2038 elif warnonly == WARN_UNDEFINED:
2039 warnonly = WARN_YES
2039 warnonly = WARN_YES
2040 break
2040 break
2041 else:
2041 else:
2042 # clean up any optional leftovers
2042 # clean up any optional leftovers
2043 while expected.get(pos, None):
2043 while expected.get(pos, None):
2044 el = expected[pos].pop(0)
2044 el = expected[pos].pop(0)
2045 if el:
2045 if el:
2046 if not isoptional(el):
2046 if not isoptional(el):
2047 m = optline.match(el)
2047 m = optline.match(el)
2048 if m:
2048 if m:
2049 conditions = [c for c in m.group(2).split(b' ')]
2049 conditions = [c for c in m.group(2).split(b' ')]
2050
2050
2051 if self._iftest(conditions):
2051 if self._iftest(conditions):
2052 # Don't append as optional line
2052 # Don't append as optional line
2053 continue
2053 continue
2054 else:
2054 else:
2055 continue
2055 continue
2056 postout.append(b' ' + el)
2056 postout.append(b' ' + el)
2057 return pos, postout, warnonly
2057 return pos, postout, warnonly
2058
2058
2059 def _process_cmd_line(self, cmd_line, pos, postout, after):
2059 def _process_cmd_line(self, cmd_line, pos, postout, after):
2060 """process a "command" part of a line from unified test output"""
2060 """process a "command" part of a line from unified test output"""
2061 if cmd_line:
2061 if cmd_line:
2062 # Add on last return code.
2062 # Add on last return code.
2063 ret = int(cmd_line.split()[1])
2063 ret = int(cmd_line.split()[1])
2064 if ret != 0:
2064 if ret != 0:
2065 postout.append(b' [%d]\n' % ret)
2065 postout.append(b' [%d]\n' % ret)
2066 if pos in after:
2066 if pos in after:
2067 # Merge in non-active test bits.
2067 # Merge in non-active test bits.
2068 postout += after.pop(pos)
2068 postout += after.pop(pos)
2069 pos = int(cmd_line.split()[0])
2069 pos = int(cmd_line.split()[0])
2070 return pos, postout
2070 return pos, postout
2071
2071
2072 @staticmethod
2072 @staticmethod
2073 def rematch(el, l):
2073 def rematch(el, l):
2074 try:
2074 try:
2075 # parse any flags at the beginning of the regex. Only 'i' is
2075 # parse any flags at the beginning of the regex. Only 'i' is
2076 # supported right now, but this should be easy to extend.
2076 # supported right now, but this should be easy to extend.
2077 flags, el = re.match(br'^(\(\?i\))?(.*)', el).groups()[0:2]
2077 flags, el = re.match(br'^(\(\?i\))?(.*)', el).groups()[0:2]
2078 flags = flags or b''
2078 flags = flags or b''
2079 el = flags + b'(?:' + el + b')'
2079 el = flags + b'(?:' + el + b')'
2080 # use \Z to ensure that the regex matches to the end of the string
2080 # use \Z to ensure that the regex matches to the end of the string
2081 if os.name == 'nt':
2081 if os.name == 'nt':
2082 return re.match(el + br'\r?\n\Z', l)
2082 return re.match(el + br'\r?\n\Z', l)
2083 return re.match(el + br'\n\Z', l)
2083 return re.match(el + br'\n\Z', l)
2084 except re.error:
2084 except re.error:
2085 # el is an invalid regex
2085 # el is an invalid regex
2086 return False
2086 return False
2087
2087
2088 @staticmethod
2088 @staticmethod
2089 def globmatch(el, l):
2089 def globmatch(el, l):
2090 # The only supported special characters are * and ? plus / which also
2090 # The only supported special characters are * and ? plus / which also
2091 # matches \ on windows. Escaping of these characters is supported.
2091 # matches \ on windows. Escaping of these characters is supported.
2092 if el + b'\n' == l:
2092 if el + b'\n' == l:
2093 if os.altsep:
2093 if os.altsep:
2094 # matching on "/" is not needed for this line
2094 # matching on "/" is not needed for this line
2095 for pat in checkcodeglobpats:
2095 for pat in checkcodeglobpats:
2096 if pat.match(el):
2096 if pat.match(el):
2097 return True
2097 return True
2098 return b'-glob'
2098 return b'-glob'
2099 return True
2099 return True
2100 el = el.replace(b'$LOCALIP', b'*')
2100 el = el.replace(b'$LOCALIP', b'*')
2101 i, n = 0, len(el)
2101 i, n = 0, len(el)
2102 res = b''
2102 res = b''
2103 while i < n:
2103 while i < n:
2104 c = el[i : i + 1]
2104 c = el[i : i + 1]
2105 i += 1
2105 i += 1
2106 if c == b'\\' and i < n and el[i : i + 1] in b'*?\\/':
2106 if c == b'\\' and i < n and el[i : i + 1] in b'*?\\/':
2107 res += el[i - 1 : i + 1]
2107 res += el[i - 1 : i + 1]
2108 i += 1
2108 i += 1
2109 elif c == b'*':
2109 elif c == b'*':
2110 res += b'.*'
2110 res += b'.*'
2111 elif c == b'?':
2111 elif c == b'?':
2112 res += b'.'
2112 res += b'.'
2113 elif c == b'/' and os.altsep:
2113 elif c == b'/' and os.altsep:
2114 res += b'[/\\\\]'
2114 res += b'[/\\\\]'
2115 else:
2115 else:
2116 res += re.escape(c)
2116 res += re.escape(c)
2117 return TTest.rematch(res, l)
2117 return TTest.rematch(res, l)
2118
2118
2119 def linematch(self, el, l):
2119 def linematch(self, el, l):
2120 if el == l: # perfect match (fast)
2120 if el == l: # perfect match (fast)
2121 return True, True
2121 return True, True
2122 retry = False
2122 retry = False
2123 if isoptional(el):
2123 if isoptional(el):
2124 retry = "retry"
2124 retry = "retry"
2125 el = el[: -len(MARK_OPTIONAL)] + b"\n"
2125 el = el[: -len(MARK_OPTIONAL)] + b"\n"
2126 else:
2126 else:
2127 m = optline.match(el)
2127 m = optline.match(el)
2128 if m:
2128 if m:
2129 conditions = [c for c in m.group(2).split(b' ')]
2129 conditions = [c for c in m.group(2).split(b' ')]
2130
2130
2131 el = m.group(1) + b"\n"
2131 el = m.group(1) + b"\n"
2132 if not self._iftest(conditions):
2132 if not self._iftest(conditions):
2133 # listed feature missing, should not match
2133 # listed feature missing, should not match
2134 return "retry", False
2134 return "retry", False
2135
2135
2136 if el.endswith(b" (esc)\n"):
2136 if el.endswith(b" (esc)\n"):
2137 if PYTHON3:
2137 if PYTHON3:
2138 el = el[:-7].decode('unicode_escape') + '\n'
2138 el = el[:-7].decode('unicode_escape') + '\n'
2139 el = el.encode('latin-1')
2139 el = el.encode('latin-1')
2140 else:
2140 else:
2141 el = el[:-7].decode('string-escape') + '\n'
2141 el = el[:-7].decode('string-escape') + '\n'
2142 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
2142 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
2143 return True, True
2143 return True, True
2144 if el.endswith(b" (re)\n"):
2144 if el.endswith(b" (re)\n"):
2145 return (TTest.rematch(el[:-6], l) or retry), False
2145 return (TTest.rematch(el[:-6], l) or retry), False
2146 if el.endswith(b" (glob)\n"):
2146 if el.endswith(b" (glob)\n"):
2147 # ignore '(glob)' added to l by 'replacements'
2147 # ignore '(glob)' added to l by 'replacements'
2148 if l.endswith(b" (glob)\n"):
2148 if l.endswith(b" (glob)\n"):
2149 l = l[:-8] + b"\n"
2149 l = l[:-8] + b"\n"
2150 return (TTest.globmatch(el[:-8], l) or retry), False
2150 return (TTest.globmatch(el[:-8], l) or retry), False
2151 if os.altsep:
2151 if os.altsep:
2152 _l = l.replace(b'\\', b'/')
2152 _l = l.replace(b'\\', b'/')
2153 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
2153 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
2154 return True, True
2154 return True, True
2155 return retry, True
2155 return retry, True
2156
2156
2157 @staticmethod
2157 @staticmethod
2158 def parsehghaveoutput(lines):
2158 def parsehghaveoutput(lines):
2159 """Parse hghave log lines.
2159 """Parse hghave log lines.
2160
2160
2161 Return tuple of lists (missing, failed):
2161 Return tuple of lists (missing, failed):
2162 * the missing/unknown features
2162 * the missing/unknown features
2163 * the features for which existence check failed"""
2163 * the features for which existence check failed"""
2164 missing = []
2164 missing = []
2165 failed = []
2165 failed = []
2166 for line in lines:
2166 for line in lines:
2167 if line.startswith(TTest.SKIPPED_PREFIX):
2167 if line.startswith(TTest.SKIPPED_PREFIX):
2168 line = line.splitlines()[0]
2168 line = line.splitlines()[0]
2169 missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :]))
2169 missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :]))
2170 elif line.startswith(TTest.FAILED_PREFIX):
2170 elif line.startswith(TTest.FAILED_PREFIX):
2171 line = line.splitlines()[0]
2171 line = line.splitlines()[0]
2172 failed.append(_bytes2sys(line[len(TTest.FAILED_PREFIX) :]))
2172 failed.append(_bytes2sys(line[len(TTest.FAILED_PREFIX) :]))
2173
2173
2174 return missing, failed
2174 return missing, failed
2175
2175
2176 @staticmethod
2176 @staticmethod
2177 def _escapef(m):
2177 def _escapef(m):
2178 return TTest.ESCAPEMAP[m.group(0)]
2178 return TTest.ESCAPEMAP[m.group(0)]
2179
2179
2180 @staticmethod
2180 @staticmethod
2181 def _stringescape(s):
2181 def _stringescape(s):
2182 return TTest.ESCAPESUB(TTest._escapef, s)
2182 return TTest.ESCAPESUB(TTest._escapef, s)
2183
2183
2184
2184
2185 iolock = threading.RLock()
2185 iolock = threading.RLock()
2186 firstlock = threading.RLock()
2186 firstlock = threading.RLock()
2187 firsterror = False
2187 firsterror = False
2188
2188
2189
2189
2190 class TestResult(unittest._TextTestResult):
2190 class TestResult(unittest._TextTestResult):
2191 """Holds results when executing via unittest."""
2191 """Holds results when executing via unittest."""
2192
2192
2193 # Don't worry too much about accessing the non-public _TextTestResult.
2193 # Don't worry too much about accessing the non-public _TextTestResult.
2194 # It is relatively common in Python testing tools.
2194 # It is relatively common in Python testing tools.
2195 def __init__(self, options, *args, **kwargs):
2195 def __init__(self, options, *args, **kwargs):
2196 super(TestResult, self).__init__(*args, **kwargs)
2196 super(TestResult, self).__init__(*args, **kwargs)
2197
2197
2198 self._options = options
2198 self._options = options
2199
2199
2200 # unittest.TestResult didn't have skipped until 2.7. We need to
2200 # unittest.TestResult didn't have skipped until 2.7. We need to
2201 # polyfill it.
2201 # polyfill it.
2202 self.skipped = []
2202 self.skipped = []
2203
2203
2204 # We have a custom "ignored" result that isn't present in any Python
2204 # We have a custom "ignored" result that isn't present in any Python
2205 # unittest implementation. It is very similar to skipped. It may make
2205 # unittest implementation. It is very similar to skipped. It may make
2206 # sense to map it into skip some day.
2206 # sense to map it into skip some day.
2207 self.ignored = []
2207 self.ignored = []
2208
2208
2209 self.times = []
2209 self.times = []
2210 self._firststarttime = None
2210 self._firststarttime = None
2211 # Data stored for the benefit of generating xunit reports.
2211 # Data stored for the benefit of generating xunit reports.
2212 self.successes = []
2212 self.successes = []
2213 self.faildata = {}
2213 self.faildata = {}
2214
2214
2215 if options.color == 'auto':
2215 if options.color == 'auto':
2216 isatty = self.stream.isatty()
2216 isatty = self.stream.isatty()
2217 # For some reason, redirecting stdout on Windows disables the ANSI
2217 # For some reason, redirecting stdout on Windows disables the ANSI
2218 # color processing of stderr, which is what is used to print the
2218 # color processing of stderr, which is what is used to print the
2219 # output. Therefore, both must be tty on Windows to enable color.
2219 # output. Therefore, both must be tty on Windows to enable color.
2220 if os.name == 'nt':
2220 if os.name == 'nt':
2221 isatty = isatty and sys.stdout.isatty()
2221 isatty = isatty and sys.stdout.isatty()
2222 self.color = pygmentspresent and isatty
2222 self.color = pygmentspresent and isatty
2223 elif options.color == 'never':
2223 elif options.color == 'never':
2224 self.color = False
2224 self.color = False
2225 else: # 'always', for testing purposes
2225 else: # 'always', for testing purposes
2226 self.color = pygmentspresent
2226 self.color = pygmentspresent
2227
2227
2228 def onStart(self, test):
2228 def onStart(self, test):
2229 """Can be overriden by custom TestResult"""
2229 """Can be overriden by custom TestResult"""
2230
2230
2231 def onEnd(self):
2231 def onEnd(self):
2232 """Can be overriden by custom TestResult"""
2232 """Can be overriden by custom TestResult"""
2233
2233
2234 def addFailure(self, test, reason):
2234 def addFailure(self, test, reason):
2235 self.failures.append((test, reason))
2235 self.failures.append((test, reason))
2236
2236
2237 if self._options.first:
2237 if self._options.first:
2238 self.stop()
2238 self.stop()
2239 else:
2239 else:
2240 with iolock:
2240 with iolock:
2241 if reason == "timed out":
2241 if reason == "timed out":
2242 self.stream.write('t')
2242 self.stream.write('t')
2243 else:
2243 else:
2244 if not self._options.nodiff:
2244 if not self._options.nodiff:
2245 self.stream.write('\n')
2245 self.stream.write('\n')
2246 # Exclude the '\n' from highlighting to lex correctly
2246 # Exclude the '\n' from highlighting to lex correctly
2247 formatted = 'ERROR: %s output changed\n' % test
2247 formatted = 'ERROR: %s output changed\n' % test
2248 self.stream.write(highlightmsg(formatted, self.color))
2248 self.stream.write(highlightmsg(formatted, self.color))
2249 self.stream.write('!')
2249 self.stream.write('!')
2250
2250
2251 self.stream.flush()
2251 self.stream.flush()
2252
2252
2253 def addSuccess(self, test):
2253 def addSuccess(self, test):
2254 with iolock:
2254 with iolock:
2255 super(TestResult, self).addSuccess(test)
2255 super(TestResult, self).addSuccess(test)
2256 self.successes.append(test)
2256 self.successes.append(test)
2257
2257
2258 def addError(self, test, err):
2258 def addError(self, test, err):
2259 super(TestResult, self).addError(test, err)
2259 super(TestResult, self).addError(test, err)
2260 if self._options.first:
2260 if self._options.first:
2261 self.stop()
2261 self.stop()
2262
2262
2263 # Polyfill.
2263 # Polyfill.
2264 def addSkip(self, test, reason):
2264 def addSkip(self, test, reason):
2265 self.skipped.append((test, reason))
2265 self.skipped.append((test, reason))
2266 with iolock:
2266 with iolock:
2267 if self.showAll:
2267 if self.showAll:
2268 self.stream.writeln('skipped %s' % reason)
2268 self.stream.writeln('skipped %s' % reason)
2269 else:
2269 else:
2270 self.stream.write('s')
2270 self.stream.write('s')
2271 self.stream.flush()
2271 self.stream.flush()
2272
2272
2273 def addIgnore(self, test, reason):
2273 def addIgnore(self, test, reason):
2274 self.ignored.append((test, reason))
2274 self.ignored.append((test, reason))
2275 with iolock:
2275 with iolock:
2276 if self.showAll:
2276 if self.showAll:
2277 self.stream.writeln('ignored %s' % reason)
2277 self.stream.writeln('ignored %s' % reason)
2278 else:
2278 else:
2279 if reason not in ('not retesting', "doesn't match keyword"):
2279 if reason not in ('not retesting', "doesn't match keyword"):
2280 self.stream.write('i')
2280 self.stream.write('i')
2281 else:
2281 else:
2282 self.testsRun += 1
2282 self.testsRun += 1
2283 self.stream.flush()
2283 self.stream.flush()
2284
2284
2285 def addOutputMismatch(self, test, ret, got, expected):
2285 def addOutputMismatch(self, test, ret, got, expected):
2286 """Record a mismatch in test output for a particular test."""
2286 """Record a mismatch in test output for a particular test."""
2287 if self.shouldStop or firsterror:
2287 if self.shouldStop or firsterror:
2288 # don't print, some other test case already failed and
2288 # don't print, some other test case already failed and
2289 # printed, we're just stale and probably failed due to our
2289 # printed, we're just stale and probably failed due to our
2290 # temp dir getting cleaned up.
2290 # temp dir getting cleaned up.
2291 return
2291 return
2292
2292
2293 accepted = False
2293 accepted = False
2294 lines = []
2294 lines = []
2295
2295
2296 with iolock:
2296 with iolock:
2297 if self._options.nodiff:
2297 if self._options.nodiff:
2298 pass
2298 pass
2299 elif self._options.view:
2299 elif self._options.view:
2300 v = self._options.view
2300 v = self._options.view
2301 subprocess.call(
2301 subprocess.call(
2302 r'"%s" "%s" "%s"'
2302 r'"%s" "%s" "%s"'
2303 % (v, _bytes2sys(test.refpath), _bytes2sys(test.errpath)),
2303 % (v, _bytes2sys(test.refpath), _bytes2sys(test.errpath)),
2304 shell=True,
2304 shell=True,
2305 )
2305 )
2306 else:
2306 else:
2307 servefail, lines = getdiff(
2307 servefail, lines = getdiff(
2308 expected, got, test.refpath, test.errpath
2308 expected, got, test.refpath, test.errpath
2309 )
2309 )
2310 self.stream.write('\n')
2310 self.stream.write('\n')
2311 for line in lines:
2311 for line in lines:
2312 line = highlightdiff(line, self.color)
2312 line = highlightdiff(line, self.color)
2313 if PYTHON3:
2313 if PYTHON3:
2314 self.stream.flush()
2314 self.stream.flush()
2315 self.stream.buffer.write(line)
2315 self.stream.buffer.write(line)
2316 self.stream.buffer.flush()
2316 self.stream.buffer.flush()
2317 else:
2317 else:
2318 self.stream.write(line)
2318 self.stream.write(line)
2319 self.stream.flush()
2319 self.stream.flush()
2320
2320
2321 if servefail:
2321 if servefail:
2322 raise test.failureException(
2322 raise test.failureException(
2323 'server failed to start (HGPORT=%s)' % test._startport
2323 'server failed to start (HGPORT=%s)' % test._startport
2324 )
2324 )
2325
2325
2326 # handle interactive prompt without releasing iolock
2326 # handle interactive prompt without releasing iolock
2327 if self._options.interactive:
2327 if self._options.interactive:
2328 if test.readrefout() != expected:
2328 if test.readrefout() != expected:
2329 self.stream.write(
2329 self.stream.write(
2330 'Reference output has changed (run again to prompt '
2330 'Reference output has changed (run again to prompt '
2331 'changes)'
2331 'changes)'
2332 )
2332 )
2333 else:
2333 else:
2334 self.stream.write('Accept this change? [y/N] ')
2334 self.stream.write('Accept this change? [y/N] ')
2335 self.stream.flush()
2335 self.stream.flush()
2336 answer = sys.stdin.readline().strip()
2336 answer = sys.stdin.readline().strip()
2337 if answer.lower() in ('y', 'yes'):
2337 if answer.lower() in ('y', 'yes'):
2338 if test.path.endswith(b'.t'):
2338 if test.path.endswith(b'.t'):
2339 rename(test.errpath, test.path)
2339 rename(test.errpath, test.path)
2340 else:
2340 else:
2341 rename(test.errpath, b'%s.out' % test.path)
2341 rename(test.errpath, b'%s.out' % test.path)
2342 accepted = True
2342 accepted = True
2343 if not accepted:
2343 if not accepted:
2344 self.faildata[test.name] = b''.join(lines)
2344 self.faildata[test.name] = b''.join(lines)
2345
2345
2346 return accepted
2346 return accepted
2347
2347
2348 def startTest(self, test):
2348 def startTest(self, test):
2349 super(TestResult, self).startTest(test)
2349 super(TestResult, self).startTest(test)
2350
2350
2351 # os.times module computes the user time and system time spent by
2351 # os.times module computes the user time and system time spent by
2352 # child's processes along with real elapsed time taken by a process.
2352 # child's processes along with real elapsed time taken by a process.
2353 # This module has one limitation. It can only work for Linux user
2353 # This module has one limitation. It can only work for Linux user
2354 # and not for Windows. Hence why we fall back to another function
2354 # and not for Windows. Hence why we fall back to another function
2355 # for wall time calculations.
2355 # for wall time calculations.
2356 test.started_times = os.times()
2356 test.started_times = os.times()
2357 # TODO use a monotonic clock once support for Python 2.7 is dropped.
2357 # TODO use a monotonic clock once support for Python 2.7 is dropped.
2358 test.started_time = time.time()
2358 test.started_time = time.time()
2359 if self._firststarttime is None: # thread racy but irrelevant
2359 if self._firststarttime is None: # thread racy but irrelevant
2360 self._firststarttime = test.started_time
2360 self._firststarttime = test.started_time
2361
2361
2362 def stopTest(self, test, interrupted=False):
2362 def stopTest(self, test, interrupted=False):
2363 super(TestResult, self).stopTest(test)
2363 super(TestResult, self).stopTest(test)
2364
2364
2365 test.stopped_times = os.times()
2365 test.stopped_times = os.times()
2366 stopped_time = time.time()
2366 stopped_time = time.time()
2367
2367
2368 starttime = test.started_times
2368 starttime = test.started_times
2369 endtime = test.stopped_times
2369 endtime = test.stopped_times
2370 origin = self._firststarttime
2370 origin = self._firststarttime
2371 self.times.append(
2371 self.times.append(
2372 (
2372 (
2373 test.name,
2373 test.name,
2374 endtime[2] - starttime[2], # user space CPU time
2374 endtime[2] - starttime[2], # user space CPU time
2375 endtime[3] - starttime[3], # sys space CPU time
2375 endtime[3] - starttime[3], # sys space CPU time
2376 stopped_time - test.started_time, # real time
2376 stopped_time - test.started_time, # real time
2377 test.started_time - origin, # start date in run context
2377 test.started_time - origin, # start date in run context
2378 stopped_time - origin, # end date in run context
2378 stopped_time - origin, # end date in run context
2379 )
2379 )
2380 )
2380 )
2381
2381
2382 if interrupted:
2382 if interrupted:
2383 with iolock:
2383 with iolock:
2384 self.stream.writeln(
2384 self.stream.writeln(
2385 'INTERRUPTED: %s (after %d seconds)'
2385 'INTERRUPTED: %s (after %d seconds)'
2386 % (test.name, self.times[-1][3])
2386 % (test.name, self.times[-1][3])
2387 )
2387 )
2388
2388
2389
2389
2390 def getTestResult():
2390 def getTestResult():
2391 """
2391 """
2392 Returns the relevant test result
2392 Returns the relevant test result
2393 """
2393 """
2394 if "CUSTOM_TEST_RESULT" in os.environ:
2394 if "CUSTOM_TEST_RESULT" in os.environ:
2395 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
2395 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
2396 return testresultmodule.TestResult
2396 return testresultmodule.TestResult
2397 else:
2397 else:
2398 return TestResult
2398 return TestResult
2399
2399
2400
2400
2401 class TestSuite(unittest.TestSuite):
2401 class TestSuite(unittest.TestSuite):
2402 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
2402 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
2403
2403
2404 def __init__(
2404 def __init__(
2405 self,
2405 self,
2406 testdir,
2406 testdir,
2407 jobs=1,
2407 jobs=1,
2408 whitelist=None,
2408 whitelist=None,
2409 blacklist=None,
2409 blacklist=None,
2410 keywords=None,
2410 keywords=None,
2411 loop=False,
2411 loop=False,
2412 runs_per_test=1,
2412 runs_per_test=1,
2413 loadtest=None,
2413 loadtest=None,
2414 showchannels=False,
2414 showchannels=False,
2415 *args,
2415 *args,
2416 **kwargs
2416 **kwargs
2417 ):
2417 ):
2418 """Create a new instance that can run tests with a configuration.
2418 """Create a new instance that can run tests with a configuration.
2419
2419
2420 testdir specifies the directory where tests are executed from. This
2420 testdir specifies the directory where tests are executed from. This
2421 is typically the ``tests`` directory from Mercurial's source
2421 is typically the ``tests`` directory from Mercurial's source
2422 repository.
2422 repository.
2423
2423
2424 jobs specifies the number of jobs to run concurrently. Each test
2424 jobs specifies the number of jobs to run concurrently. Each test
2425 executes on its own thread. Tests actually spawn new processes, so
2425 executes on its own thread. Tests actually spawn new processes, so
2426 state mutation should not be an issue.
2426 state mutation should not be an issue.
2427
2427
2428 If there is only one job, it will use the main thread.
2428 If there is only one job, it will use the main thread.
2429
2429
2430 whitelist and blacklist denote tests that have been whitelisted and
2430 whitelist and blacklist denote tests that have been whitelisted and
2431 blacklisted, respectively. These arguments don't belong in TestSuite.
2431 blacklisted, respectively. These arguments don't belong in TestSuite.
2432 Instead, whitelist and blacklist should be handled by the thing that
2432 Instead, whitelist and blacklist should be handled by the thing that
2433 populates the TestSuite with tests. They are present to preserve
2433 populates the TestSuite with tests. They are present to preserve
2434 backwards compatible behavior which reports skipped tests as part
2434 backwards compatible behavior which reports skipped tests as part
2435 of the results.
2435 of the results.
2436
2436
2437 keywords denotes key words that will be used to filter which tests
2437 keywords denotes key words that will be used to filter which tests
2438 to execute. This arguably belongs outside of TestSuite.
2438 to execute. This arguably belongs outside of TestSuite.
2439
2439
2440 loop denotes whether to loop over tests forever.
2440 loop denotes whether to loop over tests forever.
2441 """
2441 """
2442 super(TestSuite, self).__init__(*args, **kwargs)
2442 super(TestSuite, self).__init__(*args, **kwargs)
2443
2443
2444 self._jobs = jobs
2444 self._jobs = jobs
2445 self._whitelist = whitelist
2445 self._whitelist = whitelist
2446 self._blacklist = blacklist
2446 self._blacklist = blacklist
2447 self._keywords = keywords
2447 self._keywords = keywords
2448 self._loop = loop
2448 self._loop = loop
2449 self._runs_per_test = runs_per_test
2449 self._runs_per_test = runs_per_test
2450 self._loadtest = loadtest
2450 self._loadtest = loadtest
2451 self._showchannels = showchannels
2451 self._showchannels = showchannels
2452
2452
2453 def run(self, result):
2453 def run(self, result):
2454 # We have a number of filters that need to be applied. We do this
2454 # We have a number of filters that need to be applied. We do this
2455 # here instead of inside Test because it makes the running logic for
2455 # here instead of inside Test because it makes the running logic for
2456 # Test simpler.
2456 # Test simpler.
2457 tests = []
2457 tests = []
2458 num_tests = [0]
2458 num_tests = [0]
2459 for test in self._tests:
2459 for test in self._tests:
2460
2460
2461 def get():
2461 def get():
2462 num_tests[0] += 1
2462 num_tests[0] += 1
2463 if getattr(test, 'should_reload', False):
2463 if getattr(test, 'should_reload', False):
2464 return self._loadtest(test, num_tests[0])
2464 return self._loadtest(test, num_tests[0])
2465 return test
2465 return test
2466
2466
2467 if not os.path.exists(test.path):
2467 if not os.path.exists(test.path):
2468 result.addSkip(test, "Doesn't exist")
2468 result.addSkip(test, "Doesn't exist")
2469 continue
2469 continue
2470
2470
2471 is_whitelisted = self._whitelist and (
2471 is_whitelisted = self._whitelist and (
2472 test.relpath in self._whitelist or test.bname in self._whitelist
2472 test.relpath in self._whitelist or test.bname in self._whitelist
2473 )
2473 )
2474 if not is_whitelisted:
2474 if not is_whitelisted:
2475 is_blacklisted = self._blacklist and (
2475 is_blacklisted = self._blacklist and (
2476 test.relpath in self._blacklist
2476 test.relpath in self._blacklist
2477 or test.bname in self._blacklist
2477 or test.bname in self._blacklist
2478 )
2478 )
2479 if is_blacklisted:
2479 if is_blacklisted:
2480 result.addSkip(test, 'blacklisted')
2480 result.addSkip(test, 'blacklisted')
2481 continue
2481 continue
2482 if self._keywords:
2482 if self._keywords:
2483 with open(test.path, 'rb') as f:
2483 with open(test.path, 'rb') as f:
2484 t = f.read().lower() + test.bname.lower()
2484 t = f.read().lower() + test.bname.lower()
2485 ignored = False
2485 ignored = False
2486 for k in self._keywords.lower().split():
2486 for k in self._keywords.lower().split():
2487 if k not in t:
2487 if k not in t:
2488 result.addIgnore(test, "doesn't match keyword")
2488 result.addIgnore(test, "doesn't match keyword")
2489 ignored = True
2489 ignored = True
2490 break
2490 break
2491
2491
2492 if ignored:
2492 if ignored:
2493 continue
2493 continue
2494 for _ in xrange(self._runs_per_test):
2494 for _ in xrange(self._runs_per_test):
2495 tests.append(get())
2495 tests.append(get())
2496
2496
2497 runtests = list(tests)
2497 runtests = list(tests)
2498 done = queue.Queue()
2498 done = queue.Queue()
2499 running = 0
2499 running = 0
2500
2500
2501 channels = [""] * self._jobs
2501 channels = [""] * self._jobs
2502
2502
2503 def job(test, result):
2503 def job(test, result):
2504 for n, v in enumerate(channels):
2504 for n, v in enumerate(channels):
2505 if not v:
2505 if not v:
2506 channel = n
2506 channel = n
2507 break
2507 break
2508 else:
2508 else:
2509 raise ValueError('Could not find output channel')
2509 raise ValueError('Could not find output channel')
2510 channels[channel] = "=" + test.name[5:].split(".")[0]
2510 channels[channel] = "=" + test.name[5:].split(".")[0]
2511 try:
2511 try:
2512 test(result)
2512 test(result)
2513 done.put(None)
2513 done.put(None)
2514 except KeyboardInterrupt:
2514 except KeyboardInterrupt:
2515 pass
2515 pass
2516 except: # re-raises
2516 except: # re-raises
2517 done.put(('!', test, 'run-test raised an error, see traceback'))
2517 done.put(('!', test, 'run-test raised an error, see traceback'))
2518 raise
2518 raise
2519 finally:
2519 finally:
2520 try:
2520 try:
2521 channels[channel] = ''
2521 channels[channel] = ''
2522 except IndexError:
2522 except IndexError:
2523 pass
2523 pass
2524
2524
2525 def stat():
2525 def stat():
2526 count = 0
2526 count = 0
2527 while channels:
2527 while channels:
2528 d = '\n%03s ' % count
2528 d = '\n%03s ' % count
2529 for n, v in enumerate(channels):
2529 for n, v in enumerate(channels):
2530 if v:
2530 if v:
2531 d += v[0]
2531 d += v[0]
2532 channels[n] = v[1:] or '.'
2532 channels[n] = v[1:] or '.'
2533 else:
2533 else:
2534 d += ' '
2534 d += ' '
2535 d += ' '
2535 d += ' '
2536 with iolock:
2536 with iolock:
2537 sys.stdout.write(d + ' ')
2537 sys.stdout.write(d + ' ')
2538 sys.stdout.flush()
2538 sys.stdout.flush()
2539 for x in xrange(10):
2539 for x in xrange(10):
2540 if channels:
2540 if channels:
2541 time.sleep(0.1)
2541 time.sleep(0.1)
2542 count += 1
2542 count += 1
2543
2543
2544 stoppedearly = False
2544 stoppedearly = False
2545
2545
2546 if self._showchannels:
2546 if self._showchannels:
2547 statthread = threading.Thread(target=stat, name="stat")
2547 statthread = threading.Thread(target=stat, name="stat")
2548 statthread.start()
2548 statthread.start()
2549
2549
2550 try:
2550 try:
2551 while tests or running:
2551 while tests or running:
2552 if not done.empty() or running == self._jobs or not tests:
2552 if not done.empty() or running == self._jobs or not tests:
2553 try:
2553 try:
2554 done.get(True, 1)
2554 done.get(True, 1)
2555 running -= 1
2555 running -= 1
2556 if result and result.shouldStop:
2556 if result and result.shouldStop:
2557 stoppedearly = True
2557 stoppedearly = True
2558 break
2558 break
2559 except queue.Empty:
2559 except queue.Empty:
2560 continue
2560 continue
2561 if tests and not running == self._jobs:
2561 if tests and not running == self._jobs:
2562 test = tests.pop(0)
2562 test = tests.pop(0)
2563 if self._loop:
2563 if self._loop:
2564 if getattr(test, 'should_reload', False):
2564 if getattr(test, 'should_reload', False):
2565 num_tests[0] += 1
2565 num_tests[0] += 1
2566 tests.append(self._loadtest(test, num_tests[0]))
2566 tests.append(self._loadtest(test, num_tests[0]))
2567 else:
2567 else:
2568 tests.append(test)
2568 tests.append(test)
2569 if self._jobs == 1:
2569 if self._jobs == 1:
2570 job(test, result)
2570 job(test, result)
2571 else:
2571 else:
2572 t = threading.Thread(
2572 t = threading.Thread(
2573 target=job, name=test.name, args=(test, result)
2573 target=job, name=test.name, args=(test, result)
2574 )
2574 )
2575 t.start()
2575 t.start()
2576 running += 1
2576 running += 1
2577
2577
2578 # If we stop early we still need to wait on started tests to
2578 # If we stop early we still need to wait on started tests to
2579 # finish. Otherwise, there is a race between the test completing
2579 # finish. Otherwise, there is a race between the test completing
2580 # and the test's cleanup code running. This could result in the
2580 # and the test's cleanup code running. This could result in the
2581 # test reporting incorrect.
2581 # test reporting incorrect.
2582 if stoppedearly:
2582 if stoppedearly:
2583 while running:
2583 while running:
2584 try:
2584 try:
2585 done.get(True, 1)
2585 done.get(True, 1)
2586 running -= 1
2586 running -= 1
2587 except queue.Empty:
2587 except queue.Empty:
2588 continue
2588 continue
2589 except KeyboardInterrupt:
2589 except KeyboardInterrupt:
2590 for test in runtests:
2590 for test in runtests:
2591 test.abort()
2591 test.abort()
2592
2592
2593 channels = []
2593 channels = []
2594
2594
2595 return result
2595 return result
2596
2596
2597
2597
2598 # Save the most recent 5 wall-clock runtimes of each test to a
2598 # Save the most recent 5 wall-clock runtimes of each test to a
2599 # human-readable text file named .testtimes. Tests are sorted
2599 # human-readable text file named .testtimes. Tests are sorted
2600 # alphabetically, while times for each test are listed from oldest to
2600 # alphabetically, while times for each test are listed from oldest to
2601 # newest.
2601 # newest.
2602
2602
2603
2603
2604 def loadtimes(outputdir):
2604 def loadtimes(outputdir):
2605 times = []
2605 times = []
2606 try:
2606 try:
2607 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2607 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2608 for line in fp:
2608 for line in fp:
2609 m = re.match('(.*?) ([0-9. ]+)', line)
2609 m = re.match('(.*?) ([0-9. ]+)', line)
2610 times.append(
2610 times.append(
2611 (m.group(1), [float(t) for t in m.group(2).split()])
2611 (m.group(1), [float(t) for t in m.group(2).split()])
2612 )
2612 )
2613 except IOError as err:
2613 except IOError as err:
2614 if err.errno != errno.ENOENT:
2614 if err.errno != errno.ENOENT:
2615 raise
2615 raise
2616 return times
2616 return times
2617
2617
2618
2618
2619 def savetimes(outputdir, result):
2619 def savetimes(outputdir, result):
2620 saved = dict(loadtimes(outputdir))
2620 saved = dict(loadtimes(outputdir))
2621 maxruns = 5
2621 maxruns = 5
2622 skipped = {str(t[0]) for t in result.skipped}
2622 skipped = {str(t[0]) for t in result.skipped}
2623 for tdata in result.times:
2623 for tdata in result.times:
2624 test, real = tdata[0], tdata[3]
2624 test, real = tdata[0], tdata[3]
2625 if test not in skipped:
2625 if test not in skipped:
2626 ts = saved.setdefault(test, [])
2626 ts = saved.setdefault(test, [])
2627 ts.append(real)
2627 ts.append(real)
2628 ts[:] = ts[-maxruns:]
2628 ts[:] = ts[-maxruns:]
2629
2629
2630 fd, tmpname = tempfile.mkstemp(
2630 fd, tmpname = tempfile.mkstemp(
2631 prefix=b'.testtimes', dir=outputdir, text=True
2631 prefix=b'.testtimes', dir=outputdir, text=True
2632 )
2632 )
2633 with os.fdopen(fd, 'w') as fp:
2633 with os.fdopen(fd, 'w') as fp:
2634 for name, ts in sorted(saved.items()):
2634 for name, ts in sorted(saved.items()):
2635 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2635 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2636 timepath = os.path.join(outputdir, b'.testtimes')
2636 timepath = os.path.join(outputdir, b'.testtimes')
2637 try:
2637 try:
2638 os.unlink(timepath)
2638 os.unlink(timepath)
2639 except OSError:
2639 except OSError:
2640 pass
2640 pass
2641 try:
2641 try:
2642 os.rename(tmpname, timepath)
2642 os.rename(tmpname, timepath)
2643 except OSError:
2643 except OSError:
2644 pass
2644 pass
2645
2645
2646
2646
2647 class TextTestRunner(unittest.TextTestRunner):
2647 class TextTestRunner(unittest.TextTestRunner):
2648 """Custom unittest test runner that uses appropriate settings."""
2648 """Custom unittest test runner that uses appropriate settings."""
2649
2649
2650 def __init__(self, runner, *args, **kwargs):
2650 def __init__(self, runner, *args, **kwargs):
2651 super(TextTestRunner, self).__init__(*args, **kwargs)
2651 super(TextTestRunner, self).__init__(*args, **kwargs)
2652
2652
2653 self._runner = runner
2653 self._runner = runner
2654
2654
2655 self._result = getTestResult()(
2655 self._result = getTestResult()(
2656 self._runner.options, self.stream, self.descriptions, self.verbosity
2656 self._runner.options, self.stream, self.descriptions, self.verbosity
2657 )
2657 )
2658
2658
2659 def listtests(self, test):
2659 def listtests(self, test):
2660 test = sorted(test, key=lambda t: t.name)
2660 test = sorted(test, key=lambda t: t.name)
2661
2661
2662 self._result.onStart(test)
2662 self._result.onStart(test)
2663
2663
2664 for t in test:
2664 for t in test:
2665 print(t.name)
2665 print(t.name)
2666 self._result.addSuccess(t)
2666 self._result.addSuccess(t)
2667
2667
2668 if self._runner.options.xunit:
2668 if self._runner.options.xunit:
2669 with open(self._runner.options.xunit, "wb") as xuf:
2669 with open(self._runner.options.xunit, "wb") as xuf:
2670 self._writexunit(self._result, xuf)
2670 self._writexunit(self._result, xuf)
2671
2671
2672 if self._runner.options.json:
2672 if self._runner.options.json:
2673 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2673 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2674 with open(jsonpath, 'w') as fp:
2674 with open(jsonpath, 'w') as fp:
2675 self._writejson(self._result, fp)
2675 self._writejson(self._result, fp)
2676
2676
2677 return self._result
2677 return self._result
2678
2678
2679 def run(self, test):
2679 def run(self, test):
2680 self._result.onStart(test)
2680 self._result.onStart(test)
2681 test(self._result)
2681 test(self._result)
2682
2682
2683 failed = len(self._result.failures)
2683 failed = len(self._result.failures)
2684 skipped = len(self._result.skipped)
2684 skipped = len(self._result.skipped)
2685 ignored = len(self._result.ignored)
2685 ignored = len(self._result.ignored)
2686
2686
2687 with iolock:
2687 with iolock:
2688 self.stream.writeln('')
2688 self.stream.writeln('')
2689
2689
2690 if not self._runner.options.noskips:
2690 if not self._runner.options.noskips:
2691 for test, msg in sorted(
2691 for test, msg in sorted(
2692 self._result.skipped, key=lambda s: s[0].name
2692 self._result.skipped, key=lambda s: s[0].name
2693 ):
2693 ):
2694 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2694 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2695 msg = highlightmsg(formatted, self._result.color)
2695 msg = highlightmsg(formatted, self._result.color)
2696 self.stream.write(msg)
2696 self.stream.write(msg)
2697 for test, msg in sorted(
2697 for test, msg in sorted(
2698 self._result.failures, key=lambda f: f[0].name
2698 self._result.failures, key=lambda f: f[0].name
2699 ):
2699 ):
2700 formatted = 'Failed %s: %s\n' % (test.name, msg)
2700 formatted = 'Failed %s: %s\n' % (test.name, msg)
2701 self.stream.write(highlightmsg(formatted, self._result.color))
2701 self.stream.write(highlightmsg(formatted, self._result.color))
2702 for test, msg in sorted(
2702 for test, msg in sorted(
2703 self._result.errors, key=lambda e: e[0].name
2703 self._result.errors, key=lambda e: e[0].name
2704 ):
2704 ):
2705 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2705 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2706
2706
2707 if self._runner.options.xunit:
2707 if self._runner.options.xunit:
2708 with open(self._runner.options.xunit, "wb") as xuf:
2708 with open(self._runner.options.xunit, "wb") as xuf:
2709 self._writexunit(self._result, xuf)
2709 self._writexunit(self._result, xuf)
2710
2710
2711 if self._runner.options.json:
2711 if self._runner.options.json:
2712 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2712 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2713 with open(jsonpath, 'w') as fp:
2713 with open(jsonpath, 'w') as fp:
2714 self._writejson(self._result, fp)
2714 self._writejson(self._result, fp)
2715
2715
2716 self._runner._checkhglib('Tested')
2716 self._runner._checkhglib('Tested')
2717
2717
2718 savetimes(self._runner._outputdir, self._result)
2718 savetimes(self._runner._outputdir, self._result)
2719
2719
2720 if failed and self._runner.options.known_good_rev:
2720 if failed and self._runner.options.known_good_rev:
2721 self._bisecttests(t for t, m in self._result.failures)
2721 self._bisecttests(t for t, m in self._result.failures)
2722 self.stream.writeln(
2722 self.stream.writeln(
2723 '# Ran %d tests, %d skipped, %d failed.'
2723 '# Ran %d tests, %d skipped, %d failed.'
2724 % (self._result.testsRun, skipped + ignored, failed)
2724 % (self._result.testsRun, skipped + ignored, failed)
2725 )
2725 )
2726 if failed:
2726 if failed:
2727 self.stream.writeln(
2727 self.stream.writeln(
2728 'python hash seed: %s' % os.environ['PYTHONHASHSEED']
2728 'python hash seed: %s' % os.environ['PYTHONHASHSEED']
2729 )
2729 )
2730 if self._runner.options.time:
2730 if self._runner.options.time:
2731 self.printtimes(self._result.times)
2731 self.printtimes(self._result.times)
2732
2732
2733 if self._runner.options.exceptions:
2733 if self._runner.options.exceptions:
2734 exceptions = aggregateexceptions(
2734 exceptions = aggregateexceptions(
2735 os.path.join(self._runner._outputdir, b'exceptions')
2735 os.path.join(self._runner._outputdir, b'exceptions')
2736 )
2736 )
2737
2737
2738 self.stream.writeln('Exceptions Report:')
2738 self.stream.writeln('Exceptions Report:')
2739 self.stream.writeln(
2739 self.stream.writeln(
2740 '%d total from %d frames'
2740 '%d total from %d frames'
2741 % (exceptions['total'], len(exceptions['exceptioncounts']))
2741 % (exceptions['total'], len(exceptions['exceptioncounts']))
2742 )
2742 )
2743 combined = exceptions['combined']
2743 combined = exceptions['combined']
2744 for key in sorted(combined, key=combined.get, reverse=True):
2744 for key in sorted(combined, key=combined.get, reverse=True):
2745 frame, line, exc = key
2745 frame, line, exc = key
2746 totalcount, testcount, leastcount, leasttest = combined[key]
2746 totalcount, testcount, leastcount, leasttest = combined[key]
2747
2747
2748 self.stream.writeln(
2748 self.stream.writeln(
2749 '%d (%d tests)\t%s: %s (%s - %d total)'
2749 '%d (%d tests)\t%s: %s (%s - %d total)'
2750 % (
2750 % (
2751 totalcount,
2751 totalcount,
2752 testcount,
2752 testcount,
2753 frame,
2753 frame,
2754 exc,
2754 exc,
2755 leasttest,
2755 leasttest,
2756 leastcount,
2756 leastcount,
2757 )
2757 )
2758 )
2758 )
2759
2759
2760 self.stream.flush()
2760 self.stream.flush()
2761
2761
2762 return self._result
2762 return self._result
2763
2763
2764 def _bisecttests(self, tests):
2764 def _bisecttests(self, tests):
2765 bisectcmd = ['hg', 'bisect']
2765 bisectcmd = ['hg', 'bisect']
2766 bisectrepo = self._runner.options.bisect_repo
2766 bisectrepo = self._runner.options.bisect_repo
2767 if bisectrepo:
2767 if bisectrepo:
2768 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2768 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2769
2769
2770 def pread(args):
2770 def pread(args):
2771 env = os.environ.copy()
2771 env = os.environ.copy()
2772 env['HGPLAIN'] = '1'
2772 env['HGPLAIN'] = '1'
2773 p = subprocess.Popen(
2773 p = subprocess.Popen(
2774 args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env
2774 args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env
2775 )
2775 )
2776 data = p.stdout.read()
2776 data = p.stdout.read()
2777 p.wait()
2777 p.wait()
2778 return data
2778 return data
2779
2779
2780 for test in tests:
2780 for test in tests:
2781 pread(bisectcmd + ['--reset']),
2781 pread(bisectcmd + ['--reset']),
2782 pread(bisectcmd + ['--bad', '.'])
2782 pread(bisectcmd + ['--bad', '.'])
2783 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2783 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2784 # TODO: we probably need to forward more options
2784 # TODO: we probably need to forward more options
2785 # that alter hg's behavior inside the tests.
2785 # that alter hg's behavior inside the tests.
2786 opts = ''
2786 opts = ''
2787 withhg = self._runner.options.with_hg
2787 withhg = self._runner.options.with_hg
2788 if withhg:
2788 if withhg:
2789 opts += ' --with-hg=%s ' % shellquote(_bytes2sys(withhg))
2789 opts += ' --with-hg=%s ' % shellquote(_bytes2sys(withhg))
2790 rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test)
2790 rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test)
2791 data = pread(bisectcmd + ['--command', rtc])
2791 data = pread(bisectcmd + ['--command', rtc])
2792 m = re.search(
2792 m = re.search(
2793 (
2793 (
2794 br'\nThe first (?P<goodbad>bad|good) revision '
2794 br'\nThe first (?P<goodbad>bad|good) revision '
2795 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2795 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2796 br'summary: +(?P<summary>[^\n]+)\n'
2796 br'summary: +(?P<summary>[^\n]+)\n'
2797 ),
2797 ),
2798 data,
2798 data,
2799 (re.MULTILINE | re.DOTALL),
2799 (re.MULTILINE | re.DOTALL),
2800 )
2800 )
2801 if m is None:
2801 if m is None:
2802 self.stream.writeln(
2802 self.stream.writeln(
2803 'Failed to identify failure point for %s' % test
2803 'Failed to identify failure point for %s' % test
2804 )
2804 )
2805 continue
2805 continue
2806 dat = m.groupdict()
2806 dat = m.groupdict()
2807 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2807 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2808 self.stream.writeln(
2808 self.stream.writeln(
2809 '%s %s by %s (%s)'
2809 '%s %s by %s (%s)'
2810 % (
2810 % (
2811 test,
2811 test,
2812 verb,
2812 verb,
2813 dat['node'].decode('ascii'),
2813 dat['node'].decode('ascii'),
2814 dat['summary'].decode('utf8', 'ignore'),
2814 dat['summary'].decode('utf8', 'ignore'),
2815 )
2815 )
2816 )
2816 )
2817
2817
2818 def printtimes(self, times):
2818 def printtimes(self, times):
2819 # iolock held by run
2819 # iolock held by run
2820 self.stream.writeln('# Producing time report')
2820 self.stream.writeln('# Producing time report')
2821 times.sort(key=lambda t: (t[3]))
2821 times.sort(key=lambda t: (t[3]))
2822 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2822 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2823 self.stream.writeln(
2823 self.stream.writeln(
2824 '%-7s %-7s %-7s %-7s %-7s %s'
2824 '%-7s %-7s %-7s %-7s %-7s %s'
2825 % ('start', 'end', 'cuser', 'csys', 'real', 'Test')
2825 % ('start', 'end', 'cuser', 'csys', 'real', 'Test')
2826 )
2826 )
2827 for tdata in times:
2827 for tdata in times:
2828 test = tdata[0]
2828 test = tdata[0]
2829 cuser, csys, real, start, end = tdata[1:6]
2829 cuser, csys, real, start, end = tdata[1:6]
2830 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2830 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2831
2831
2832 @staticmethod
2832 @staticmethod
2833 def _writexunit(result, outf):
2833 def _writexunit(result, outf):
2834 # See http://llg.cubic.org/docs/junit/ for a reference.
2834 # See http://llg.cubic.org/docs/junit/ for a reference.
2835 timesd = {t[0]: t[3] for t in result.times}
2835 timesd = {t[0]: t[3] for t in result.times}
2836 doc = minidom.Document()
2836 doc = minidom.Document()
2837 s = doc.createElement('testsuite')
2837 s = doc.createElement('testsuite')
2838 s.setAttribute('errors', "0") # TODO
2838 s.setAttribute('errors', "0") # TODO
2839 s.setAttribute('failures', str(len(result.failures)))
2839 s.setAttribute('failures', str(len(result.failures)))
2840 s.setAttribute('name', 'run-tests')
2840 s.setAttribute('name', 'run-tests')
2841 s.setAttribute(
2841 s.setAttribute(
2842 'skipped', str(len(result.skipped) + len(result.ignored))
2842 'skipped', str(len(result.skipped) + len(result.ignored))
2843 )
2843 )
2844 s.setAttribute('tests', str(result.testsRun))
2844 s.setAttribute('tests', str(result.testsRun))
2845 doc.appendChild(s)
2845 doc.appendChild(s)
2846 for tc in result.successes:
2846 for tc in result.successes:
2847 t = doc.createElement('testcase')
2847 t = doc.createElement('testcase')
2848 t.setAttribute('name', tc.name)
2848 t.setAttribute('name', tc.name)
2849 tctime = timesd.get(tc.name)
2849 tctime = timesd.get(tc.name)
2850 if tctime is not None:
2850 if tctime is not None:
2851 t.setAttribute('time', '%.3f' % tctime)
2851 t.setAttribute('time', '%.3f' % tctime)
2852 s.appendChild(t)
2852 s.appendChild(t)
2853 for tc, err in sorted(result.faildata.items()):
2853 for tc, err in sorted(result.faildata.items()):
2854 t = doc.createElement('testcase')
2854 t = doc.createElement('testcase')
2855 t.setAttribute('name', tc)
2855 t.setAttribute('name', tc)
2856 tctime = timesd.get(tc)
2856 tctime = timesd.get(tc)
2857 if tctime is not None:
2857 if tctime is not None:
2858 t.setAttribute('time', '%.3f' % tctime)
2858 t.setAttribute('time', '%.3f' % tctime)
2859 # createCDATASection expects a unicode or it will
2859 # createCDATASection expects a unicode or it will
2860 # convert using default conversion rules, which will
2860 # convert using default conversion rules, which will
2861 # fail if string isn't ASCII.
2861 # fail if string isn't ASCII.
2862 err = cdatasafe(err).decode('utf-8', 'replace')
2862 err = cdatasafe(err).decode('utf-8', 'replace')
2863 cd = doc.createCDATASection(err)
2863 cd = doc.createCDATASection(err)
2864 # Use 'failure' here instead of 'error' to match errors = 0,
2864 # Use 'failure' here instead of 'error' to match errors = 0,
2865 # failures = len(result.failures) in the testsuite element.
2865 # failures = len(result.failures) in the testsuite element.
2866 failelem = doc.createElement('failure')
2866 failelem = doc.createElement('failure')
2867 failelem.setAttribute('message', 'output changed')
2867 failelem.setAttribute('message', 'output changed')
2868 failelem.setAttribute('type', 'output-mismatch')
2868 failelem.setAttribute('type', 'output-mismatch')
2869 failelem.appendChild(cd)
2869 failelem.appendChild(cd)
2870 t.appendChild(failelem)
2870 t.appendChild(failelem)
2871 s.appendChild(t)
2871 s.appendChild(t)
2872 for tc, message in result.skipped:
2872 for tc, message in result.skipped:
2873 # According to the schema, 'skipped' has no attributes. So store
2873 # According to the schema, 'skipped' has no attributes. So store
2874 # the skip message as a text node instead.
2874 # the skip message as a text node instead.
2875 t = doc.createElement('testcase')
2875 t = doc.createElement('testcase')
2876 t.setAttribute('name', tc.name)
2876 t.setAttribute('name', tc.name)
2877 binmessage = message.encode('utf-8')
2877 binmessage = message.encode('utf-8')
2878 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2878 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2879 cd = doc.createCDATASection(message)
2879 cd = doc.createCDATASection(message)
2880 skipelem = doc.createElement('skipped')
2880 skipelem = doc.createElement('skipped')
2881 skipelem.appendChild(cd)
2881 skipelem.appendChild(cd)
2882 t.appendChild(skipelem)
2882 t.appendChild(skipelem)
2883 s.appendChild(t)
2883 s.appendChild(t)
2884 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2884 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2885
2885
2886 @staticmethod
2886 @staticmethod
2887 def _writejson(result, outf):
2887 def _writejson(result, outf):
2888 timesd = {}
2888 timesd = {}
2889 for tdata in result.times:
2889 for tdata in result.times:
2890 test = tdata[0]
2890 test = tdata[0]
2891 timesd[test] = tdata[1:]
2891 timesd[test] = tdata[1:]
2892
2892
2893 outcome = {}
2893 outcome = {}
2894 groups = [
2894 groups = [
2895 ('success', ((tc, None) for tc in result.successes)),
2895 ('success', ((tc, None) for tc in result.successes)),
2896 ('failure', result.failures),
2896 ('failure', result.failures),
2897 ('skip', result.skipped),
2897 ('skip', result.skipped),
2898 ]
2898 ]
2899 for res, testcases in groups:
2899 for res, testcases in groups:
2900 for tc, __ in testcases:
2900 for tc, __ in testcases:
2901 if tc.name in timesd:
2901 if tc.name in timesd:
2902 diff = result.faildata.get(tc.name, b'')
2902 diff = result.faildata.get(tc.name, b'')
2903 try:
2903 try:
2904 diff = diff.decode('unicode_escape')
2904 diff = diff.decode('unicode_escape')
2905 except UnicodeDecodeError as e:
2905 except UnicodeDecodeError as e:
2906 diff = '%r decoding diff, sorry' % e
2906 diff = '%r decoding diff, sorry' % e
2907 tres = {
2907 tres = {
2908 'result': res,
2908 'result': res,
2909 'time': ('%0.3f' % timesd[tc.name][2]),
2909 'time': ('%0.3f' % timesd[tc.name][2]),
2910 'cuser': ('%0.3f' % timesd[tc.name][0]),
2910 'cuser': ('%0.3f' % timesd[tc.name][0]),
2911 'csys': ('%0.3f' % timesd[tc.name][1]),
2911 'csys': ('%0.3f' % timesd[tc.name][1]),
2912 'start': ('%0.3f' % timesd[tc.name][3]),
2912 'start': ('%0.3f' % timesd[tc.name][3]),
2913 'end': ('%0.3f' % timesd[tc.name][4]),
2913 'end': ('%0.3f' % timesd[tc.name][4]),
2914 'diff': diff,
2914 'diff': diff,
2915 }
2915 }
2916 else:
2916 else:
2917 # blacklisted test
2917 # blacklisted test
2918 tres = {'result': res}
2918 tres = {'result': res}
2919
2919
2920 outcome[tc.name] = tres
2920 outcome[tc.name] = tres
2921 jsonout = json.dumps(
2921 jsonout = json.dumps(
2922 outcome, sort_keys=True, indent=4, separators=(',', ': ')
2922 outcome, sort_keys=True, indent=4, separators=(',', ': ')
2923 )
2923 )
2924 outf.writelines(("testreport =", jsonout))
2924 outf.writelines(("testreport =", jsonout))
2925
2925
2926
2926
2927 def sorttests(testdescs, previoustimes, shuffle=False):
2927 def sorttests(testdescs, previoustimes, shuffle=False):
2928 """Do an in-place sort of tests."""
2928 """Do an in-place sort of tests."""
2929 if shuffle:
2929 if shuffle:
2930 random.shuffle(testdescs)
2930 random.shuffle(testdescs)
2931 return
2931 return
2932
2932
2933 if previoustimes:
2933 if previoustimes:
2934
2934
2935 def sortkey(f):
2935 def sortkey(f):
2936 f = f['path']
2936 f = f['path']
2937 if f in previoustimes:
2937 if f in previoustimes:
2938 # Use most recent time as estimate
2938 # Use most recent time as estimate
2939 return -(previoustimes[f][-1])
2939 return -(previoustimes[f][-1])
2940 else:
2940 else:
2941 # Default to a rather arbitrary value of 1 second for new tests
2941 # Default to a rather arbitrary value of 1 second for new tests
2942 return -1.0
2942 return -1.0
2943
2943
2944 else:
2944 else:
2945 # keywords for slow tests
2945 # keywords for slow tests
2946 slow = {
2946 slow = {
2947 b'svn': 10,
2947 b'svn': 10,
2948 b'cvs': 10,
2948 b'cvs': 10,
2949 b'hghave': 10,
2949 b'hghave': 10,
2950 b'largefiles-update': 10,
2950 b'largefiles-update': 10,
2951 b'run-tests': 10,
2951 b'run-tests': 10,
2952 b'corruption': 10,
2952 b'corruption': 10,
2953 b'race': 10,
2953 b'race': 10,
2954 b'i18n': 10,
2954 b'i18n': 10,
2955 b'check': 100,
2955 b'check': 100,
2956 b'gendoc': 100,
2956 b'gendoc': 100,
2957 b'contrib-perf': 200,
2957 b'contrib-perf': 200,
2958 b'merge-combination': 100,
2958 b'merge-combination': 100,
2959 }
2959 }
2960 perf = {}
2960 perf = {}
2961
2961
2962 def sortkey(f):
2962 def sortkey(f):
2963 # run largest tests first, as they tend to take the longest
2963 # run largest tests first, as they tend to take the longest
2964 f = f['path']
2964 f = f['path']
2965 try:
2965 try:
2966 return perf[f]
2966 return perf[f]
2967 except KeyError:
2967 except KeyError:
2968 try:
2968 try:
2969 val = -os.stat(f).st_size
2969 val = -os.stat(f).st_size
2970 except OSError as e:
2970 except OSError as e:
2971 if e.errno != errno.ENOENT:
2971 if e.errno != errno.ENOENT:
2972 raise
2972 raise
2973 perf[f] = -1e9 # file does not exist, tell early
2973 perf[f] = -1e9 # file does not exist, tell early
2974 return -1e9
2974 return -1e9
2975 for kw, mul in slow.items():
2975 for kw, mul in slow.items():
2976 if kw in f:
2976 if kw in f:
2977 val *= mul
2977 val *= mul
2978 if f.endswith(b'.py'):
2978 if f.endswith(b'.py'):
2979 val /= 10.0
2979 val /= 10.0
2980 perf[f] = val / 1000.0
2980 perf[f] = val / 1000.0
2981 return perf[f]
2981 return perf[f]
2982
2982
2983 testdescs.sort(key=sortkey)
2983 testdescs.sort(key=sortkey)
2984
2984
2985
2985
2986 class TestRunner(object):
2986 class TestRunner(object):
2987 """Holds context for executing tests.
2987 """Holds context for executing tests.
2988
2988
2989 Tests rely on a lot of state. This object holds it for them.
2989 Tests rely on a lot of state. This object holds it for them.
2990 """
2990 """
2991
2991
2992 # Programs required to run tests.
2992 # Programs required to run tests.
2993 REQUIREDTOOLS = [
2993 REQUIREDTOOLS = [
2994 b'diff',
2994 b'diff',
2995 b'grep',
2995 b'grep',
2996 b'unzip',
2996 b'unzip',
2997 b'gunzip',
2997 b'gunzip',
2998 b'bunzip2',
2998 b'bunzip2',
2999 b'sed',
2999 b'sed',
3000 ]
3000 ]
3001
3001
3002 # Maps file extensions to test class.
3002 # Maps file extensions to test class.
3003 TESTTYPES = [
3003 TESTTYPES = [
3004 (b'.py', PythonTest),
3004 (b'.py', PythonTest),
3005 (b'.t', TTest),
3005 (b'.t', TTest),
3006 ]
3006 ]
3007
3007
3008 def __init__(self):
3008 def __init__(self):
3009 self.options = None
3009 self.options = None
3010 self._hgroot = None
3010 self._hgroot = None
3011 self._testdir = None
3011 self._testdir = None
3012 self._outputdir = None
3012 self._outputdir = None
3013 self._hgtmp = None
3013 self._hgtmp = None
3014 self._installdir = None
3014 self._installdir = None
3015 self._bindir = None
3015 self._bindir = None
3016 self._tmpbindir = None
3016 self._tmpbindir = None
3017 self._pythondir = None
3017 self._pythondir = None
3018 self._coveragefile = None
3018 self._coveragefile = None
3019 self._createdfiles = []
3019 self._createdfiles = []
3020 self._hgcommand = None
3020 self._hgcommand = None
3021 self._hgpath = None
3021 self._hgpath = None
3022 self._portoffset = 0
3022 self._portoffset = 0
3023 self._ports = {}
3023 self._ports = {}
3024
3024
3025 def run(self, args, parser=None):
3025 def run(self, args, parser=None):
3026 """Run the test suite."""
3026 """Run the test suite."""
3027 oldmask = os.umask(0o22)
3027 oldmask = os.umask(0o22)
3028 try:
3028 try:
3029 parser = parser or getparser()
3029 parser = parser or getparser()
3030 options = parseargs(args, parser)
3030 options = parseargs(args, parser)
3031 tests = [_sys2bytes(a) for a in options.tests]
3031 tests = [_sys2bytes(a) for a in options.tests]
3032 if options.test_list is not None:
3032 if options.test_list is not None:
3033 for listfile in options.test_list:
3033 for listfile in options.test_list:
3034 with open(listfile, 'rb') as f:
3034 with open(listfile, 'rb') as f:
3035 tests.extend(t for t in f.read().splitlines() if t)
3035 tests.extend(t for t in f.read().splitlines() if t)
3036 self.options = options
3036 self.options = options
3037
3037
3038 self._checktools()
3038 self._checktools()
3039 testdescs = self.findtests(tests)
3039 testdescs = self.findtests(tests)
3040 if options.profile_runner:
3040 if options.profile_runner:
3041 import statprof
3041 import statprof
3042
3042
3043 statprof.start()
3043 statprof.start()
3044 result = self._run(testdescs)
3044 result = self._run(testdescs)
3045 if options.profile_runner:
3045 if options.profile_runner:
3046 statprof.stop()
3046 statprof.stop()
3047 statprof.display()
3047 statprof.display()
3048 return result
3048 return result
3049
3049
3050 finally:
3050 finally:
3051 os.umask(oldmask)
3051 os.umask(oldmask)
3052
3052
3053 def _run(self, testdescs):
3053 def _run(self, testdescs):
3054 testdir = getcwdb()
3054 testdir = getcwdb()
3055 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
3055 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
3056 # assume all tests in same folder for now
3056 # assume all tests in same folder for now
3057 if testdescs:
3057 if testdescs:
3058 pathname = os.path.dirname(testdescs[0]['path'])
3058 pathname = os.path.dirname(testdescs[0]['path'])
3059 if pathname:
3059 if pathname:
3060 testdir = os.path.join(testdir, pathname)
3060 testdir = os.path.join(testdir, pathname)
3061 self._testdir = osenvironb[b'TESTDIR'] = testdir
3061 self._testdir = osenvironb[b'TESTDIR'] = testdir
3062 if self.options.outputdir:
3062 if self.options.outputdir:
3063 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3063 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3064 else:
3064 else:
3065 self._outputdir = getcwdb()
3065 self._outputdir = getcwdb()
3066 if testdescs and pathname:
3066 if testdescs and pathname:
3067 self._outputdir = os.path.join(self._outputdir, pathname)
3067 self._outputdir = os.path.join(self._outputdir, pathname)
3068 previoustimes = {}
3068 previoustimes = {}
3069 if self.options.order_by_runtime:
3069 if self.options.order_by_runtime:
3070 previoustimes = dict(loadtimes(self._outputdir))
3070 previoustimes = dict(loadtimes(self._outputdir))
3071 sorttests(testdescs, previoustimes, shuffle=self.options.random)
3071 sorttests(testdescs, previoustimes, shuffle=self.options.random)
3072
3072
3073 if 'PYTHONHASHSEED' not in os.environ:
3073 if 'PYTHONHASHSEED' not in os.environ:
3074 # use a random python hash seed all the time
3074 # use a random python hash seed all the time
3075 # we do the randomness ourself to know what seed is used
3075 # we do the randomness ourself to know what seed is used
3076 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
3076 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
3077
3077
3078 # Rayon (Rust crate for multi-threading) will use all logical CPU cores
3078 # Rayon (Rust crate for multi-threading) will use all logical CPU cores
3079 # by default, causing thrashing on high-cpu-count systems.
3079 # by default, causing thrashing on high-cpu-count systems.
3080 # Setting its limit to 3 during tests should still let us uncover
3080 # Setting its limit to 3 during tests should still let us uncover
3081 # multi-threading bugs while keeping the thrashing reasonable.
3081 # multi-threading bugs while keeping the thrashing reasonable.
3082 os.environ.setdefault("RAYON_NUM_THREADS", "3")
3082 os.environ.setdefault("RAYON_NUM_THREADS", "3")
3083
3083
3084 if self.options.tmpdir:
3084 if self.options.tmpdir:
3085 self.options.keep_tmpdir = True
3085 self.options.keep_tmpdir = True
3086 tmpdir = _sys2bytes(self.options.tmpdir)
3086 tmpdir = _sys2bytes(self.options.tmpdir)
3087 if os.path.exists(tmpdir):
3087 if os.path.exists(tmpdir):
3088 # Meaning of tmpdir has changed since 1.3: we used to create
3088 # Meaning of tmpdir has changed since 1.3: we used to create
3089 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
3089 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
3090 # tmpdir already exists.
3090 # tmpdir already exists.
3091 print("error: temp dir %r already exists" % tmpdir)
3091 print("error: temp dir %r already exists" % tmpdir)
3092 return 1
3092 return 1
3093
3093
3094 os.makedirs(tmpdir)
3094 os.makedirs(tmpdir)
3095 else:
3095 else:
3096 d = None
3096 d = None
3097 if os.name == 'nt':
3097 if os.name == 'nt':
3098 # without this, we get the default temp dir location, but
3098 # without this, we get the default temp dir location, but
3099 # in all lowercase, which causes troubles with paths (issue3490)
3099 # in all lowercase, which causes troubles with paths (issue3490)
3100 d = osenvironb.get(b'TMP', None)
3100 d = osenvironb.get(b'TMP', None)
3101 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
3101 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
3102
3102
3103 self._hgtmp = osenvironb[b'HGTMP'] = os.path.realpath(tmpdir)
3103 self._hgtmp = osenvironb[b'HGTMP'] = os.path.realpath(tmpdir)
3104
3104
3105 if self.options.with_hg:
3105 if self.options.with_hg:
3106 self._installdir = None
3106 self._installdir = None
3107 whg = self.options.with_hg
3107 whg = self.options.with_hg
3108 self._bindir = os.path.dirname(os.path.realpath(whg))
3108 self._bindir = os.path.dirname(os.path.realpath(whg))
3109 assert isinstance(self._bindir, bytes)
3109 assert isinstance(self._bindir, bytes)
3110 self._hgcommand = os.path.basename(whg)
3110 self._hgcommand = os.path.basename(whg)
3111 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
3111 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
3112 os.makedirs(self._tmpbindir)
3112 os.makedirs(self._tmpbindir)
3113
3113
3114 normbin = os.path.normpath(os.path.abspath(whg))
3114 normbin = os.path.normpath(os.path.abspath(whg))
3115 normbin = normbin.replace(_sys2bytes(os.sep), b'/')
3115 normbin = normbin.replace(_sys2bytes(os.sep), b'/')
3116
3116
3117 # Other Python scripts in the test harness need to
3117 # Other Python scripts in the test harness need to
3118 # `import mercurial`. If `hg` is a Python script, we assume
3118 # `import mercurial`. If `hg` is a Python script, we assume
3119 # the Mercurial modules are relative to its path and tell the tests
3119 # the Mercurial modules are relative to its path and tell the tests
3120 # to load Python modules from its directory.
3120 # to load Python modules from its directory.
3121 with open(whg, 'rb') as fh:
3121 with open(whg, 'rb') as fh:
3122 initial = fh.read(1024)
3122 initial = fh.read(1024)
3123
3123
3124 if re.match(b'#!.*python', initial):
3124 if re.match(b'#!.*python', initial):
3125 self._pythondir = self._bindir
3125 self._pythondir = self._bindir
3126 # If it looks like our in-repo Rust binary, use the source root.
3126 # If it looks like our in-repo Rust binary, use the source root.
3127 # This is a bit hacky. But rhg is still not supported outside the
3127 # This is a bit hacky. But rhg is still not supported outside the
3128 # source directory. So until it is, do the simple thing.
3128 # source directory. So until it is, do the simple thing.
3129 elif re.search(b'/rust/target/[^/]+/hg', normbin):
3129 elif re.search(b'/rust/target/[^/]+/hg', normbin):
3130 self._pythondir = os.path.dirname(self._testdir)
3130 self._pythondir = os.path.dirname(self._testdir)
3131 # Fall back to the legacy behavior.
3131 # Fall back to the legacy behavior.
3132 else:
3132 else:
3133 self._pythondir = self._bindir
3133 self._pythondir = self._bindir
3134
3134
3135 else:
3135 else:
3136 self._installdir = os.path.join(self._hgtmp, b"install")
3136 self._installdir = os.path.join(self._hgtmp, b"install")
3137 self._bindir = os.path.join(self._installdir, b"bin")
3137 self._bindir = os.path.join(self._installdir, b"bin")
3138 self._hgcommand = b'hg'
3138 self._hgcommand = b'hg'
3139 self._tmpbindir = self._bindir
3139 self._tmpbindir = self._bindir
3140 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
3140 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
3141
3141
3142 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
3142 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
3143 # a python script and feed it to python.exe. Legacy stdio is force
3143 # a python script and feed it to python.exe. Legacy stdio is force
3144 # enabled by hg.exe, and this is a more realistic way to launch hg
3144 # enabled by hg.exe, and this is a more realistic way to launch hg
3145 # anyway.
3145 # anyway.
3146 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
3146 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
3147 self._hgcommand += b'.exe'
3147 self._hgcommand += b'.exe'
3148
3148
3149 # set CHGHG, then replace "hg" command by "chg"
3149 # set CHGHG, then replace "hg" command by "chg"
3150 chgbindir = self._bindir
3150 chgbindir = self._bindir
3151 if self.options.chg or self.options.with_chg:
3151 if self.options.chg or self.options.with_chg:
3152 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
3152 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
3153 else:
3153 else:
3154 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
3154 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
3155 if self.options.chg:
3155 if self.options.chg:
3156 self._hgcommand = b'chg'
3156 self._hgcommand = b'chg'
3157 elif self.options.with_chg:
3157 elif self.options.with_chg:
3158 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
3158 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
3159 self._hgcommand = os.path.basename(self.options.with_chg)
3159 self._hgcommand = os.path.basename(self.options.with_chg)
3160
3160
3161 # configure fallback and replace "hg" command by "rhg"
3161 # configure fallback and replace "hg" command by "rhg"
3162 rhgbindir = self._bindir
3162 rhgbindir = self._bindir
3163 if self.options.rhg or self.options.with_rhg:
3163 if self.options.rhg or self.options.with_rhg:
3164 # Affects hghave.py
3164 # Affects hghave.py
3165 osenvironb[b'RHG_INSTALLED_AS_HG'] = b'1'
3165 osenvironb[b'RHG_INSTALLED_AS_HG'] = b'1'
3166 # Affects configuration. Alternatives would be setting configuration through
3166 # Affects configuration. Alternatives would be setting configuration through
3167 # `$HGRCPATH` but some tests override that, or changing `_hgcommand` to include
3167 # `$HGRCPATH` but some tests override that, or changing `_hgcommand` to include
3168 # `--config` but that disrupts tests that print command lines and check expected
3168 # `--config` but that disrupts tests that print command lines and check expected
3169 # output.
3169 # output.
3170 osenvironb[b'RHG_ON_UNSUPPORTED'] = b'fallback'
3170 osenvironb[b'RHG_ON_UNSUPPORTED'] = b'fallback'
3171 osenvironb[b'RHG_FALLBACK_EXECUTABLE'] = os.path.join(
3171 osenvironb[b'RHG_FALLBACK_EXECUTABLE'] = os.path.join(
3172 self._bindir, self._hgcommand
3172 self._bindir, self._hgcommand
3173 )
3173 )
3174 if self.options.rhg:
3174 if self.options.rhg:
3175 self._hgcommand = b'rhg'
3175 self._hgcommand = b'rhg'
3176 elif self.options.with_rhg:
3176 elif self.options.with_rhg:
3177 rhgbindir = os.path.dirname(os.path.realpath(self.options.with_rhg))
3177 rhgbindir = os.path.dirname(os.path.realpath(self.options.with_rhg))
3178 self._hgcommand = os.path.basename(self.options.with_rhg)
3178 self._hgcommand = os.path.basename(self.options.with_rhg)
3179
3179
3180 osenvironb[b"BINDIR"] = self._bindir
3180 osenvironb[b"BINDIR"] = self._bindir
3181 osenvironb[b"PYTHON"] = PYTHON
3181 osenvironb[b"PYTHON"] = PYTHON
3182
3182
3183 fileb = _sys2bytes(__file__)
3183 fileb = _sys2bytes(__file__)
3184 runtestdir = os.path.abspath(os.path.dirname(fileb))
3184 runtestdir = os.path.abspath(os.path.dirname(fileb))
3185 osenvironb[b'RUNTESTDIR'] = runtestdir
3185 osenvironb[b'RUNTESTDIR'] = runtestdir
3186 if PYTHON3:
3186 if PYTHON3:
3187 sepb = _sys2bytes(os.pathsep)
3187 sepb = _sys2bytes(os.pathsep)
3188 else:
3188 else:
3189 sepb = os.pathsep
3189 sepb = os.pathsep
3190 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
3190 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
3191 if os.path.islink(__file__):
3191 if os.path.islink(__file__):
3192 # test helper will likely be at the end of the symlink
3192 # test helper will likely be at the end of the symlink
3193 realfile = os.path.realpath(fileb)
3193 realfile = os.path.realpath(fileb)
3194 realdir = os.path.abspath(os.path.dirname(realfile))
3194 realdir = os.path.abspath(os.path.dirname(realfile))
3195 path.insert(2, realdir)
3195 path.insert(2, realdir)
3196 if chgbindir != self._bindir:
3196 if chgbindir != self._bindir:
3197 path.insert(1, chgbindir)
3197 path.insert(1, chgbindir)
3198 if rhgbindir != self._bindir:
3198 if rhgbindir != self._bindir:
3199 path.insert(1, rhgbindir)
3199 path.insert(1, rhgbindir)
3200 if self._testdir != runtestdir:
3200 if self._testdir != runtestdir:
3201 path = [self._testdir] + path
3201 path = [self._testdir] + path
3202 if self._tmpbindir != self._bindir:
3202 if self._tmpbindir != self._bindir:
3203 path = [self._tmpbindir] + path
3203 path = [self._tmpbindir] + path
3204 osenvironb[b"PATH"] = sepb.join(path)
3204 osenvironb[b"PATH"] = sepb.join(path)
3205
3205
3206 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
3206 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
3207 # can run .../tests/run-tests.py test-foo where test-foo
3207 # can run .../tests/run-tests.py test-foo where test-foo
3208 # adds an extension to HGRC. Also include run-test.py directory to
3208 # adds an extension to HGRC. Also include run-test.py directory to
3209 # import modules like heredoctest.
3209 # import modules like heredoctest.
3210 pypath = [self._pythondir, self._testdir, runtestdir]
3210 pypath = [self._pythondir, self._testdir, runtestdir]
3211 # We have to augment PYTHONPATH, rather than simply replacing
3211 # We have to augment PYTHONPATH, rather than simply replacing
3212 # it, in case external libraries are only available via current
3212 # it, in case external libraries are only available via current
3213 # PYTHONPATH. (In particular, the Subversion bindings on OS X
3213 # PYTHONPATH. (In particular, the Subversion bindings on OS X
3214 # are in /opt/subversion.)
3214 # are in /opt/subversion.)
3215 oldpypath = osenvironb.get(IMPL_PATH)
3215 oldpypath = osenvironb.get(IMPL_PATH)
3216 if oldpypath:
3216 if oldpypath:
3217 pypath.append(oldpypath)
3217 pypath.append(oldpypath)
3218 osenvironb[IMPL_PATH] = sepb.join(pypath)
3218 osenvironb[IMPL_PATH] = sepb.join(pypath)
3219
3219
3220 if self.options.pure:
3220 if self.options.pure:
3221 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
3221 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
3222 os.environ["HGMODULEPOLICY"] = "py"
3222 os.environ["HGMODULEPOLICY"] = "py"
3223 if self.options.rust:
3223 if self.options.rust:
3224 os.environ["HGMODULEPOLICY"] = "rust+c"
3224 os.environ["HGMODULEPOLICY"] = "rust+c"
3225 if self.options.no_rust:
3225 if self.options.no_rust:
3226 current_policy = os.environ.get("HGMODULEPOLICY", "")
3226 current_policy = os.environ.get("HGMODULEPOLICY", "")
3227 if current_policy.startswith("rust+"):
3227 if current_policy.startswith("rust+"):
3228 os.environ["HGMODULEPOLICY"] = current_policy[len("rust+") :]
3228 os.environ["HGMODULEPOLICY"] = current_policy[len("rust+") :]
3229 os.environ.pop("HGWITHRUSTEXT", None)
3229 os.environ.pop("HGWITHRUSTEXT", None)
3230
3230
3231 if self.options.allow_slow_tests:
3231 if self.options.allow_slow_tests:
3232 os.environ["HGTEST_SLOW"] = "slow"
3232 os.environ["HGTEST_SLOW"] = "slow"
3233 elif 'HGTEST_SLOW' in os.environ:
3233 elif 'HGTEST_SLOW' in os.environ:
3234 del os.environ['HGTEST_SLOW']
3234 del os.environ['HGTEST_SLOW']
3235
3235
3236 self._coveragefile = os.path.join(self._testdir, b'.coverage')
3236 self._coveragefile = os.path.join(self._testdir, b'.coverage')
3237
3237
3238 if self.options.exceptions:
3238 if self.options.exceptions:
3239 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
3239 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
3240 try:
3240 try:
3241 os.makedirs(exceptionsdir)
3241 os.makedirs(exceptionsdir)
3242 except OSError as e:
3242 except OSError as e:
3243 if e.errno != errno.EEXIST:
3243 if e.errno != errno.EEXIST:
3244 raise
3244 raise
3245
3245
3246 # Remove all existing exception reports.
3246 # Remove all existing exception reports.
3247 for f in os.listdir(exceptionsdir):
3247 for f in os.listdir(exceptionsdir):
3248 os.unlink(os.path.join(exceptionsdir, f))
3248 os.unlink(os.path.join(exceptionsdir, f))
3249
3249
3250 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
3250 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
3251 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
3251 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
3252 self.options.extra_config_opt.append(
3252 self.options.extra_config_opt.append(
3253 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')
3253 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')
3254 )
3254 )
3255
3255
3256 vlog("# Using TESTDIR", _bytes2sys(self._testdir))
3256 vlog("# Using TESTDIR", _bytes2sys(self._testdir))
3257 vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR']))
3257 vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR']))
3258 vlog("# Using HGTMP", _bytes2sys(self._hgtmp))
3258 vlog("# Using HGTMP", _bytes2sys(self._hgtmp))
3259 vlog("# Using PATH", os.environ["PATH"])
3259 vlog("# Using PATH", os.environ["PATH"])
3260 vlog(
3260 vlog(
3261 "# Using",
3261 "# Using",
3262 _bytes2sys(IMPL_PATH),
3262 _bytes2sys(IMPL_PATH),
3263 _bytes2sys(osenvironb[IMPL_PATH]),
3263 _bytes2sys(osenvironb[IMPL_PATH]),
3264 )
3264 )
3265 vlog("# Writing to directory", _bytes2sys(self._outputdir))
3265 vlog("# Writing to directory", _bytes2sys(self._outputdir))
3266
3266
3267 try:
3267 try:
3268 return self._runtests(testdescs) or 0
3268 return self._runtests(testdescs) or 0
3269 finally:
3269 finally:
3270 time.sleep(0.1)
3270 time.sleep(0.1)
3271 self._cleanup()
3271 self._cleanup()
3272
3272
3273 def findtests(self, args):
3273 def findtests(self, args):
3274 """Finds possible test files from arguments.
3274 """Finds possible test files from arguments.
3275
3275
3276 If you wish to inject custom tests into the test harness, this would
3276 If you wish to inject custom tests into the test harness, this would
3277 be a good function to monkeypatch or override in a derived class.
3277 be a good function to monkeypatch or override in a derived class.
3278 """
3278 """
3279 if not args:
3279 if not args:
3280 if self.options.changed:
3280 if self.options.changed:
3281 proc = Popen4(
3281 proc = Popen4(
3282 b'hg st --rev "%s" -man0 .'
3282 b'hg st --rev "%s" -man0 .'
3283 % _sys2bytes(self.options.changed),
3283 % _sys2bytes(self.options.changed),
3284 None,
3284 None,
3285 0,
3285 0,
3286 )
3286 )
3287 stdout, stderr = proc.communicate()
3287 stdout, stderr = proc.communicate()
3288 args = stdout.strip(b'\0').split(b'\0')
3288 args = stdout.strip(b'\0').split(b'\0')
3289 else:
3289 else:
3290 args = os.listdir(b'.')
3290 args = os.listdir(b'.')
3291
3291
3292 expanded_args = []
3292 expanded_args = []
3293 for arg in args:
3293 for arg in args:
3294 if os.path.isdir(arg):
3294 if os.path.isdir(arg):
3295 if not arg.endswith(b'/'):
3295 if not arg.endswith(b'/'):
3296 arg += b'/'
3296 arg += b'/'
3297 expanded_args.extend([arg + a for a in os.listdir(arg)])
3297 expanded_args.extend([arg + a for a in os.listdir(arg)])
3298 else:
3298 else:
3299 expanded_args.append(arg)
3299 expanded_args.append(arg)
3300 args = expanded_args
3300 args = expanded_args
3301
3301
3302 testcasepattern = re.compile(br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))')
3302 testcasepattern = re.compile(br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))')
3303 tests = []
3303 tests = []
3304 for t in args:
3304 for t in args:
3305 case = []
3305 case = []
3306
3306
3307 if not (
3307 if not (
3308 os.path.basename(t).startswith(b'test-')
3308 os.path.basename(t).startswith(b'test-')
3309 and (t.endswith(b'.py') or t.endswith(b'.t'))
3309 and (t.endswith(b'.py') or t.endswith(b'.t'))
3310 ):
3310 ):
3311
3311
3312 m = testcasepattern.match(os.path.basename(t))
3312 m = testcasepattern.match(os.path.basename(t))
3313 if m is not None:
3313 if m is not None:
3314 t_basename, casestr = m.groups()
3314 t_basename, casestr = m.groups()
3315 t = os.path.join(os.path.dirname(t), t_basename)
3315 t = os.path.join(os.path.dirname(t), t_basename)
3316 if casestr:
3316 if casestr:
3317 case = casestr.split(b'#')
3317 case = casestr.split(b'#')
3318 else:
3318 else:
3319 continue
3319 continue
3320
3320
3321 if t.endswith(b'.t'):
3321 if t.endswith(b'.t'):
3322 # .t file may contain multiple test cases
3322 # .t file may contain multiple test cases
3323 casedimensions = parsettestcases(t)
3323 casedimensions = parsettestcases(t)
3324 if casedimensions:
3324 if casedimensions:
3325 cases = []
3325 cases = []
3326
3326
3327 def addcases(case, casedimensions):
3327 def addcases(case, casedimensions):
3328 if not casedimensions:
3328 if not casedimensions:
3329 cases.append(case)
3329 cases.append(case)
3330 else:
3330 else:
3331 for c in casedimensions[0]:
3331 for c in casedimensions[0]:
3332 addcases(case + [c], casedimensions[1:])
3332 addcases(case + [c], casedimensions[1:])
3333
3333
3334 addcases([], casedimensions)
3334 addcases([], casedimensions)
3335 if case and case in cases:
3335 if case and case in cases:
3336 cases = [case]
3336 cases = [case]
3337 elif case:
3337 elif case:
3338 # Ignore invalid cases
3338 # Ignore invalid cases
3339 cases = []
3339 cases = []
3340 else:
3340 else:
3341 pass
3341 pass
3342 tests += [{'path': t, 'case': c} for c in sorted(cases)]
3342 tests += [{'path': t, 'case': c} for c in sorted(cases)]
3343 else:
3343 else:
3344 tests.append({'path': t})
3344 tests.append({'path': t})
3345 else:
3345 else:
3346 tests.append({'path': t})
3346 tests.append({'path': t})
3347
3347
3348 if self.options.retest:
3348 if self.options.retest:
3349 retest_args = []
3349 retest_args = []
3350 for test in tests:
3350 for test in tests:
3351 errpath = self._geterrpath(test)
3351 errpath = self._geterrpath(test)
3352 if os.path.exists(errpath):
3352 if os.path.exists(errpath):
3353 retest_args.append(test)
3353 retest_args.append(test)
3354 tests = retest_args
3354 tests = retest_args
3355 return tests
3355 return tests
3356
3356
3357 def _runtests(self, testdescs):
3357 def _runtests(self, testdescs):
3358 def _reloadtest(test, i):
3358 def _reloadtest(test, i):
3359 # convert a test back to its description dict
3359 # convert a test back to its description dict
3360 desc = {'path': test.path}
3360 desc = {'path': test.path}
3361 case = getattr(test, '_case', [])
3361 case = getattr(test, '_case', [])
3362 if case:
3362 if case:
3363 desc['case'] = case
3363 desc['case'] = case
3364 return self._gettest(desc, i)
3364 return self._gettest(desc, i)
3365
3365
3366 try:
3366 try:
3367 if self.options.restart:
3367 if self.options.restart:
3368 orig = list(testdescs)
3368 orig = list(testdescs)
3369 while testdescs:
3369 while testdescs:
3370 desc = testdescs[0]
3370 desc = testdescs[0]
3371 errpath = self._geterrpath(desc)
3371 errpath = self._geterrpath(desc)
3372 if os.path.exists(errpath):
3372 if os.path.exists(errpath):
3373 break
3373 break
3374 testdescs.pop(0)
3374 testdescs.pop(0)
3375 if not testdescs:
3375 if not testdescs:
3376 print("running all tests")
3376 print("running all tests")
3377 testdescs = orig
3377 testdescs = orig
3378
3378
3379 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
3379 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
3380 num_tests = len(tests) * self.options.runs_per_test
3380 num_tests = len(tests) * self.options.runs_per_test
3381
3381
3382 jobs = min(num_tests, self.options.jobs)
3382 jobs = min(num_tests, self.options.jobs)
3383
3383
3384 failed = False
3384 failed = False
3385 kws = self.options.keywords
3385 kws = self.options.keywords
3386 if kws is not None and PYTHON3:
3386 if kws is not None and PYTHON3:
3387 kws = kws.encode('utf-8')
3387 kws = kws.encode('utf-8')
3388
3388
3389 suite = TestSuite(
3389 suite = TestSuite(
3390 self._testdir,
3390 self._testdir,
3391 jobs=jobs,
3391 jobs=jobs,
3392 whitelist=self.options.whitelisted,
3392 whitelist=self.options.whitelisted,
3393 blacklist=self.options.blacklist,
3393 blacklist=self.options.blacklist,
3394 keywords=kws,
3394 keywords=kws,
3395 loop=self.options.loop,
3395 loop=self.options.loop,
3396 runs_per_test=self.options.runs_per_test,
3396 runs_per_test=self.options.runs_per_test,
3397 showchannels=self.options.showchannels,
3397 showchannels=self.options.showchannels,
3398 tests=tests,
3398 tests=tests,
3399 loadtest=_reloadtest,
3399 loadtest=_reloadtest,
3400 )
3400 )
3401 verbosity = 1
3401 verbosity = 1
3402 if self.options.list_tests:
3402 if self.options.list_tests:
3403 verbosity = 0
3403 verbosity = 0
3404 elif self.options.verbose:
3404 elif self.options.verbose:
3405 verbosity = 2
3405 verbosity = 2
3406 runner = TextTestRunner(self, verbosity=verbosity)
3406 runner = TextTestRunner(self, verbosity=verbosity)
3407
3407
3408 if self.options.list_tests:
3408 if self.options.list_tests:
3409 result = runner.listtests(suite)
3409 result = runner.listtests(suite)
3410 else:
3410 else:
3411 if self._installdir:
3411 if self._installdir:
3412 self._installhg()
3412 self._installhg()
3413 self._checkhglib("Testing")
3413 self._checkhglib("Testing")
3414 else:
3414 else:
3415 self._usecorrectpython()
3415 self._usecorrectpython()
3416 if self.options.chg:
3416 if self.options.chg:
3417 assert self._installdir
3417 assert self._installdir
3418 self._installchg()
3418 self._installchg()
3419 if self.options.rhg:
3419 if self.options.rhg:
3420 assert self._installdir
3420 assert self._installdir
3421 self._installrhg()
3421 self._installrhg()
3422
3422
3423 log(
3423 log(
3424 'running %d tests using %d parallel processes'
3424 'running %d tests using %d parallel processes'
3425 % (num_tests, jobs)
3425 % (num_tests, jobs)
3426 )
3426 )
3427
3427
3428 result = runner.run(suite)
3428 result = runner.run(suite)
3429
3429
3430 if result.failures or result.errors:
3430 if result.failures or result.errors:
3431 failed = True
3431 failed = True
3432
3432
3433 result.onEnd()
3433 result.onEnd()
3434
3434
3435 if self.options.anycoverage:
3435 if self.options.anycoverage:
3436 self._outputcoverage()
3436 self._outputcoverage()
3437 except KeyboardInterrupt:
3437 except KeyboardInterrupt:
3438 failed = True
3438 failed = True
3439 print("\ninterrupted!")
3439 print("\ninterrupted!")
3440
3440
3441 if failed:
3441 if failed:
3442 return 1
3442 return 1
3443
3443
3444 def _geterrpath(self, test):
3444 def _geterrpath(self, test):
3445 # test['path'] is a relative path
3445 # test['path'] is a relative path
3446 if 'case' in test:
3446 if 'case' in test:
3447 # for multiple dimensions test cases
3447 # for multiple dimensions test cases
3448 casestr = b'#'.join(test['case'])
3448 casestr = b'#'.join(test['case'])
3449 errpath = b'%s#%s.err' % (test['path'], casestr)
3449 errpath = b'%s#%s.err' % (test['path'], casestr)
3450 else:
3450 else:
3451 errpath = b'%s.err' % test['path']
3451 errpath = b'%s.err' % test['path']
3452 if self.options.outputdir:
3452 if self.options.outputdir:
3453 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3453 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3454 errpath = os.path.join(self._outputdir, errpath)
3454 errpath = os.path.join(self._outputdir, errpath)
3455 return errpath
3455 return errpath
3456
3456
3457 def _getport(self, count):
3457 def _getport(self, count):
3458 port = self._ports.get(count) # do we have a cached entry?
3458 port = self._ports.get(count) # do we have a cached entry?
3459 if port is None:
3459 if port is None:
3460 portneeded = 3
3460 portneeded = 3
3461 # above 100 tries we just give up and let test reports failure
3461 # above 100 tries we just give up and let test reports failure
3462 for tries in xrange(100):
3462 for tries in xrange(100):
3463 allfree = True
3463 allfree = True
3464 port = self.options.port + self._portoffset
3464 port = self.options.port + self._portoffset
3465 for idx in xrange(portneeded):
3465 for idx in xrange(portneeded):
3466 if not checkportisavailable(port + idx):
3466 if not checkportisavailable(port + idx):
3467 allfree = False
3467 allfree = False
3468 break
3468 break
3469 self._portoffset += portneeded
3469 self._portoffset += portneeded
3470 if allfree:
3470 if allfree:
3471 break
3471 break
3472 self._ports[count] = port
3472 self._ports[count] = port
3473 return port
3473 return port
3474
3474
3475 def _gettest(self, testdesc, count):
3475 def _gettest(self, testdesc, count):
3476 """Obtain a Test by looking at its filename.
3476 """Obtain a Test by looking at its filename.
3477
3477
3478 Returns a Test instance. The Test may not be runnable if it doesn't
3478 Returns a Test instance. The Test may not be runnable if it doesn't
3479 map to a known type.
3479 map to a known type.
3480 """
3480 """
3481 path = testdesc['path']
3481 path = testdesc['path']
3482 lctest = path.lower()
3482 lctest = path.lower()
3483 testcls = Test
3483 testcls = Test
3484
3484
3485 for ext, cls in self.TESTTYPES:
3485 for ext, cls in self.TESTTYPES:
3486 if lctest.endswith(ext):
3486 if lctest.endswith(ext):
3487 testcls = cls
3487 testcls = cls
3488 break
3488 break
3489
3489
3490 refpath = os.path.join(getcwdb(), path)
3490 refpath = os.path.join(getcwdb(), path)
3491 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
3491 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
3492
3492
3493 # extra keyword parameters. 'case' is used by .t tests
3493 # extra keyword parameters. 'case' is used by .t tests
3494 kwds = {k: testdesc[k] for k in ['case'] if k in testdesc}
3494 kwds = {k: testdesc[k] for k in ['case'] if k in testdesc}
3495
3495
3496 t = testcls(
3496 t = testcls(
3497 refpath,
3497 refpath,
3498 self._outputdir,
3498 self._outputdir,
3499 tmpdir,
3499 tmpdir,
3500 keeptmpdir=self.options.keep_tmpdir,
3500 keeptmpdir=self.options.keep_tmpdir,
3501 debug=self.options.debug,
3501 debug=self.options.debug,
3502 first=self.options.first,
3502 first=self.options.first,
3503 timeout=self.options.timeout,
3503 timeout=self.options.timeout,
3504 startport=self._getport(count),
3504 startport=self._getport(count),
3505 extraconfigopts=self.options.extra_config_opt,
3505 extraconfigopts=self.options.extra_config_opt,
3506 shell=self.options.shell,
3506 shell=self.options.shell,
3507 hgcommand=self._hgcommand,
3507 hgcommand=self._hgcommand,
3508 usechg=bool(self.options.with_chg or self.options.chg),
3508 usechg=bool(self.options.with_chg or self.options.chg),
3509 chgdebug=self.options.chg_debug,
3509 chgdebug=self.options.chg_debug,
3510 useipv6=useipv6,
3510 useipv6=useipv6,
3511 **kwds
3511 **kwds
3512 )
3512 )
3513 t.should_reload = True
3513 t.should_reload = True
3514 return t
3514 return t
3515
3515
3516 def _cleanup(self):
3516 def _cleanup(self):
3517 """Clean up state from this test invocation."""
3517 """Clean up state from this test invocation."""
3518 if self.options.keep_tmpdir:
3518 if self.options.keep_tmpdir:
3519 return
3519 return
3520
3520
3521 vlog("# Cleaning up HGTMP", _bytes2sys(self._hgtmp))
3521 vlog("# Cleaning up HGTMP", _bytes2sys(self._hgtmp))
3522 shutil.rmtree(self._hgtmp, True)
3522 shutil.rmtree(self._hgtmp, True)
3523 for f in self._createdfiles:
3523 for f in self._createdfiles:
3524 try:
3524 try:
3525 os.remove(f)
3525 os.remove(f)
3526 except OSError:
3526 except OSError:
3527 pass
3527 pass
3528
3528
3529 def _usecorrectpython(self):
3529 def _usecorrectpython(self):
3530 """Configure the environment to use the appropriate Python in tests."""
3530 """Configure the environment to use the appropriate Python in tests."""
3531 # Tests must use the same interpreter as us or bad things will happen.
3531 # Tests must use the same interpreter as us or bad things will happen.
3532 if sys.platform == 'win32':
3532 if sys.platform == 'win32':
3533 pyexename = b'python.exe'
3533 pyexe_names = [b'python', b'python.exe']
3534 elif sys.version_info[0] < 3:
3535 pyexe_names = [b'python', b'python2']
3534 else:
3536 else:
3535 pyexename = b'python3' # XXX this is wrong with python2...
3537 pyexe_names = [b'python', b'python3']
3536
3538
3537 # os.symlink() is a thing with py3 on Windows, but it requires
3539 # os.symlink() is a thing with py3 on Windows, but it requires
3538 # Administrator rights.
3540 # Administrator rights.
3539 if getattr(os, 'symlink', None) and os.name != 'nt':
3541 if getattr(os, 'symlink', None) and os.name != 'nt':
3540 msg = "# Making python executable in test path a symlink to '%s'"
3542 msg = "# Making python executable in test path a symlink to '%s'"
3541 msg %= sysexecutable
3543 msg %= sysexecutable
3542 vlog(msg)
3544 vlog(msg)
3543 for pyexename in [pyexename]:
3545 for pyexename in pyexe_names:
3544 mypython = os.path.join(self._tmpbindir, pyexename)
3546 mypython = os.path.join(self._tmpbindir, pyexename)
3545 try:
3547 try:
3546 if os.readlink(mypython) == sysexecutable:
3548 if os.readlink(mypython) == sysexecutable:
3547 continue
3549 continue
3548 os.unlink(mypython)
3550 os.unlink(mypython)
3549 except OSError as err:
3551 except OSError as err:
3550 if err.errno != errno.ENOENT:
3552 if err.errno != errno.ENOENT:
3551 raise
3553 raise
3552 if self._findprogram(pyexename) != sysexecutable:
3554 if self._findprogram(pyexename) != sysexecutable:
3553 try:
3555 try:
3554 os.symlink(sysexecutable, mypython)
3556 os.symlink(sysexecutable, mypython)
3555 self._createdfiles.append(mypython)
3557 self._createdfiles.append(mypython)
3556 except OSError as err:
3558 except OSError as err:
3557 # child processes may race, which is harmless
3559 # child processes may race, which is harmless
3558 if err.errno != errno.EEXIST:
3560 if err.errno != errno.EEXIST:
3559 raise
3561 raise
3560 else:
3562 else:
3561 # Windows doesn't have `python3.exe`, and MSYS cannot understand the
3563 # Windows doesn't have `python3.exe`, and MSYS cannot understand the
3562 # reparse point with that name provided by Microsoft. Create a
3564 # reparse point with that name provided by Microsoft. Create a
3563 # simple script on PATH with that name that delegates to the py3
3565 # simple script on PATH with that name that delegates to the py3
3564 # launcher so the shebang lines work.
3566 # launcher so the shebang lines work.
3565 if os.getenv('MSYSTEM'):
3567 if os.getenv('MSYSTEM'):
3566 with open(osenvironb[b'RUNTESTDIR'] + b'/python3', 'wb') as f:
3568 with open(osenvironb[b'RUNTESTDIR'] + b'/python3', 'wb') as f:
3567 f.write(b'#!/bin/sh\n')
3569 f.write(b'#!/bin/sh\n')
3568 f.write(b'py -3.%d "$@"\n' % sys.version_info[1])
3570 f.write(b'py -3.%d "$@"\n' % sys.version_info[1])
3571 if os.getenv('MSYSTEM'):
3572 with open(osenvironb[b'RUNTESTDIR'] + b'/python2', 'wb') as f:
3573 f.write(b'#!/bin/sh\n')
3574 f.write(b'py -2.%d "$@"\n' % sys.version_info[1])
3569
3575
3570 exedir, exename = os.path.split(sysexecutable)
3576 exedir, exename = os.path.split(sysexecutable)
3577 for pyexename in pyexe_names:
3571 msg = "# Modifying search path to find %s as %s in '%s'"
3578 msg = "# Modifying search path to find %s as %s in '%s'"
3572 msg %= (exename, pyexename, exedir)
3579 msg %= (exename, pyexename, exedir)
3573 vlog(msg)
3580 vlog(msg)
3574 path = os.environ['PATH'].split(os.pathsep)
3581 path = os.environ['PATH'].split(os.pathsep)
3575 while exedir in path:
3582 while exedir in path:
3576 path.remove(exedir)
3583 path.remove(exedir)
3577
3584
3578 # Binaries installed by pip into the user area like pylint.exe may
3585 # Binaries installed by pip into the user area like pylint.exe may
3579 # not be in PATH by default.
3586 # not be in PATH by default.
3580 extra_paths = [exedir]
3587 extra_paths = [exedir]
3581 vi = sys.version_info
3588 vi = sys.version_info
3582 appdata = os.environ.get('APPDATA')
3589 appdata = os.environ.get('APPDATA')
3583 if appdata is not None:
3590 if appdata is not None:
3584 scripts_dir = os.path.join(
3591 scripts_dir = os.path.join(
3585 appdata,
3592 appdata,
3586 'Python',
3593 'Python',
3587 'Python%d%d' % (vi[0], vi[1]),
3594 'Python%d%d' % (vi[0], vi[1]),
3588 'Scripts',
3595 'Scripts',
3589 )
3596 )
3590
3597
3591 if vi.major == 2:
3598 if vi.major == 2:
3592 scripts_dir = os.path.join(
3599 scripts_dir = os.path.join(
3593 appdata,
3600 appdata,
3594 'Python',
3601 'Python',
3595 'Scripts',
3602 'Scripts',
3596 )
3603 )
3597
3604
3598 extra_paths.append(scripts_dir)
3605 extra_paths.append(scripts_dir)
3599
3606
3600 os.environ['PATH'] = os.pathsep.join(extra_paths + path)
3607 os.environ['PATH'] = os.pathsep.join(extra_paths + path)
3608 for pyexename in pyexe_names:
3601 if not self._findprogram(pyexename):
3609 if not self._findprogram(pyexename):
3602 print("WARNING: Cannot find %s in search path" % pyexename)
3610 print("WARNING: Cannot find %s in search path" % pyexename)
3603
3611
3604 def _installhg(self):
3612 def _installhg(self):
3605 """Install hg into the test environment.
3613 """Install hg into the test environment.
3606
3614
3607 This will also configure hg with the appropriate testing settings.
3615 This will also configure hg with the appropriate testing settings.
3608 """
3616 """
3609 vlog("# Performing temporary installation of HG")
3617 vlog("# Performing temporary installation of HG")
3610 installerrs = os.path.join(self._hgtmp, b"install.err")
3618 installerrs = os.path.join(self._hgtmp, b"install.err")
3611 compiler = ''
3619 compiler = ''
3612 if self.options.compiler:
3620 if self.options.compiler:
3613 compiler = '--compiler ' + self.options.compiler
3621 compiler = '--compiler ' + self.options.compiler
3614 setup_opts = b""
3622 setup_opts = b""
3615 if self.options.pure:
3623 if self.options.pure:
3616 setup_opts = b"--pure"
3624 setup_opts = b"--pure"
3617 elif self.options.rust:
3625 elif self.options.rust:
3618 setup_opts = b"--rust"
3626 setup_opts = b"--rust"
3619 elif self.options.no_rust:
3627 elif self.options.no_rust:
3620 setup_opts = b"--no-rust"
3628 setup_opts = b"--no-rust"
3621
3629
3622 # Run installer in hg root
3630 # Run installer in hg root
3623 script = os.path.realpath(sys.argv[0])
3631 script = os.path.realpath(sys.argv[0])
3624 exe = sysexecutable
3632 exe = sysexecutable
3625 if PYTHON3:
3633 if PYTHON3:
3626 compiler = _sys2bytes(compiler)
3634 compiler = _sys2bytes(compiler)
3627 script = _sys2bytes(script)
3635 script = _sys2bytes(script)
3628 exe = _sys2bytes(exe)
3636 exe = _sys2bytes(exe)
3629 hgroot = os.path.dirname(os.path.dirname(script))
3637 hgroot = os.path.dirname(os.path.dirname(script))
3630 self._hgroot = hgroot
3638 self._hgroot = hgroot
3631 os.chdir(hgroot)
3639 os.chdir(hgroot)
3632 nohome = b'--home=""'
3640 nohome = b'--home=""'
3633 if os.name == 'nt':
3641 if os.name == 'nt':
3634 # The --home="" trick works only on OS where os.sep == '/'
3642 # The --home="" trick works only on OS where os.sep == '/'
3635 # because of a distutils convert_path() fast-path. Avoid it at
3643 # because of a distutils convert_path() fast-path. Avoid it at
3636 # least on Windows for now, deal with .pydistutils.cfg bugs
3644 # least on Windows for now, deal with .pydistutils.cfg bugs
3637 # when they happen.
3645 # when they happen.
3638 nohome = b''
3646 nohome = b''
3639 cmd = (
3647 cmd = (
3640 b'"%(exe)s" setup.py %(setup_opts)s clean --all'
3648 b'"%(exe)s" setup.py %(setup_opts)s clean --all'
3641 b' build %(compiler)s --build-base="%(base)s"'
3649 b' build %(compiler)s --build-base="%(base)s"'
3642 b' install --force --prefix="%(prefix)s"'
3650 b' install --force --prefix="%(prefix)s"'
3643 b' --install-lib="%(libdir)s"'
3651 b' --install-lib="%(libdir)s"'
3644 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3652 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3645 % {
3653 % {
3646 b'exe': exe,
3654 b'exe': exe,
3647 b'setup_opts': setup_opts,
3655 b'setup_opts': setup_opts,
3648 b'compiler': compiler,
3656 b'compiler': compiler,
3649 b'base': os.path.join(self._hgtmp, b"build"),
3657 b'base': os.path.join(self._hgtmp, b"build"),
3650 b'prefix': self._installdir,
3658 b'prefix': self._installdir,
3651 b'libdir': self._pythondir,
3659 b'libdir': self._pythondir,
3652 b'bindir': self._bindir,
3660 b'bindir': self._bindir,
3653 b'nohome': nohome,
3661 b'nohome': nohome,
3654 b'logfile': installerrs,
3662 b'logfile': installerrs,
3655 }
3663 }
3656 )
3664 )
3657
3665
3658 # setuptools requires install directories to exist.
3666 # setuptools requires install directories to exist.
3659 def makedirs(p):
3667 def makedirs(p):
3660 try:
3668 try:
3661 os.makedirs(p)
3669 os.makedirs(p)
3662 except OSError as e:
3670 except OSError as e:
3663 if e.errno != errno.EEXIST:
3671 if e.errno != errno.EEXIST:
3664 raise
3672 raise
3665
3673
3666 makedirs(self._pythondir)
3674 makedirs(self._pythondir)
3667 makedirs(self._bindir)
3675 makedirs(self._bindir)
3668
3676
3669 vlog("# Running", cmd.decode("utf-8"))
3677 vlog("# Running", cmd.decode("utf-8"))
3670 if subprocess.call(_bytes2sys(cmd), shell=True) == 0:
3678 if subprocess.call(_bytes2sys(cmd), shell=True) == 0:
3671 if not self.options.verbose:
3679 if not self.options.verbose:
3672 try:
3680 try:
3673 os.remove(installerrs)
3681 os.remove(installerrs)
3674 except OSError as e:
3682 except OSError as e:
3675 if e.errno != errno.ENOENT:
3683 if e.errno != errno.ENOENT:
3676 raise
3684 raise
3677 else:
3685 else:
3678 with open(installerrs, 'rb') as f:
3686 with open(installerrs, 'rb') as f:
3679 for line in f:
3687 for line in f:
3680 if PYTHON3:
3688 if PYTHON3:
3681 sys.stdout.buffer.write(line)
3689 sys.stdout.buffer.write(line)
3682 else:
3690 else:
3683 sys.stdout.write(line)
3691 sys.stdout.write(line)
3684 sys.exit(1)
3692 sys.exit(1)
3685 os.chdir(self._testdir)
3693 os.chdir(self._testdir)
3686
3694
3687 self._usecorrectpython()
3695 self._usecorrectpython()
3688
3696
3689 hgbat = os.path.join(self._bindir, b'hg.bat')
3697 hgbat = os.path.join(self._bindir, b'hg.bat')
3690 if os.path.isfile(hgbat):
3698 if os.path.isfile(hgbat):
3691 # hg.bat expects to be put in bin/scripts while run-tests.py
3699 # hg.bat expects to be put in bin/scripts while run-tests.py
3692 # installation layout put it in bin/ directly. Fix it
3700 # installation layout put it in bin/ directly. Fix it
3693 with open(hgbat, 'rb') as f:
3701 with open(hgbat, 'rb') as f:
3694 data = f.read()
3702 data = f.read()
3695 if br'"%~dp0..\python" "%~dp0hg" %*' in data:
3703 if br'"%~dp0..\python" "%~dp0hg" %*' in data:
3696 data = data.replace(
3704 data = data.replace(
3697 br'"%~dp0..\python" "%~dp0hg" %*',
3705 br'"%~dp0..\python" "%~dp0hg" %*',
3698 b'"%~dp0python" "%~dp0hg" %*',
3706 b'"%~dp0python" "%~dp0hg" %*',
3699 )
3707 )
3700 with open(hgbat, 'wb') as f:
3708 with open(hgbat, 'wb') as f:
3701 f.write(data)
3709 f.write(data)
3702 else:
3710 else:
3703 print('WARNING: cannot fix hg.bat reference to python.exe')
3711 print('WARNING: cannot fix hg.bat reference to python.exe')
3704
3712
3705 if self.options.anycoverage:
3713 if self.options.anycoverage:
3706 custom = os.path.join(
3714 custom = os.path.join(
3707 osenvironb[b'RUNTESTDIR'], b'sitecustomize.py'
3715 osenvironb[b'RUNTESTDIR'], b'sitecustomize.py'
3708 )
3716 )
3709 target = os.path.join(self._pythondir, b'sitecustomize.py')
3717 target = os.path.join(self._pythondir, b'sitecustomize.py')
3710 vlog('# Installing coverage trigger to %s' % target)
3718 vlog('# Installing coverage trigger to %s' % target)
3711 shutil.copyfile(custom, target)
3719 shutil.copyfile(custom, target)
3712 rc = os.path.join(self._testdir, b'.coveragerc')
3720 rc = os.path.join(self._testdir, b'.coveragerc')
3713 vlog('# Installing coverage rc to %s' % rc)
3721 vlog('# Installing coverage rc to %s' % rc)
3714 osenvironb[b'COVERAGE_PROCESS_START'] = rc
3722 osenvironb[b'COVERAGE_PROCESS_START'] = rc
3715 covdir = os.path.join(self._installdir, b'..', b'coverage')
3723 covdir = os.path.join(self._installdir, b'..', b'coverage')
3716 try:
3724 try:
3717 os.mkdir(covdir)
3725 os.mkdir(covdir)
3718 except OSError as e:
3726 except OSError as e:
3719 if e.errno != errno.EEXIST:
3727 if e.errno != errno.EEXIST:
3720 raise
3728 raise
3721
3729
3722 osenvironb[b'COVERAGE_DIR'] = covdir
3730 osenvironb[b'COVERAGE_DIR'] = covdir
3723
3731
3724 def _checkhglib(self, verb):
3732 def _checkhglib(self, verb):
3725 """Ensure that the 'mercurial' package imported by python is
3733 """Ensure that the 'mercurial' package imported by python is
3726 the one we expect it to be. If not, print a warning to stderr."""
3734 the one we expect it to be. If not, print a warning to stderr."""
3727 if (self._bindir == self._pythondir) and (
3735 if (self._bindir == self._pythondir) and (
3728 self._bindir != self._tmpbindir
3736 self._bindir != self._tmpbindir
3729 ):
3737 ):
3730 # The pythondir has been inferred from --with-hg flag.
3738 # The pythondir has been inferred from --with-hg flag.
3731 # We cannot expect anything sensible here.
3739 # We cannot expect anything sensible here.
3732 return
3740 return
3733 expecthg = os.path.join(self._pythondir, b'mercurial')
3741 expecthg = os.path.join(self._pythondir, b'mercurial')
3734 actualhg = self._gethgpath()
3742 actualhg = self._gethgpath()
3735 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3743 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3736 sys.stderr.write(
3744 sys.stderr.write(
3737 'warning: %s with unexpected mercurial lib: %s\n'
3745 'warning: %s with unexpected mercurial lib: %s\n'
3738 ' (expected %s)\n' % (verb, actualhg, expecthg)
3746 ' (expected %s)\n' % (verb, actualhg, expecthg)
3739 )
3747 )
3740
3748
3741 def _gethgpath(self):
3749 def _gethgpath(self):
3742 """Return the path to the mercurial package that is actually found by
3750 """Return the path to the mercurial package that is actually found by
3743 the current Python interpreter."""
3751 the current Python interpreter."""
3744 if self._hgpath is not None:
3752 if self._hgpath is not None:
3745 return self._hgpath
3753 return self._hgpath
3746
3754
3747 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3755 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3748 cmd = cmd % PYTHON
3756 cmd = cmd % PYTHON
3749 if PYTHON3:
3757 if PYTHON3:
3750 cmd = _bytes2sys(cmd)
3758 cmd = _bytes2sys(cmd)
3751
3759
3752 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3760 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3753 out, err = p.communicate()
3761 out, err = p.communicate()
3754
3762
3755 self._hgpath = out.strip()
3763 self._hgpath = out.strip()
3756
3764
3757 return self._hgpath
3765 return self._hgpath
3758
3766
3759 def _installchg(self):
3767 def _installchg(self):
3760 """Install chg into the test environment"""
3768 """Install chg into the test environment"""
3761 vlog('# Performing temporary installation of CHG')
3769 vlog('# Performing temporary installation of CHG')
3762 assert os.path.dirname(self._bindir) == self._installdir
3770 assert os.path.dirname(self._bindir) == self._installdir
3763 assert self._hgroot, 'must be called after _installhg()'
3771 assert self._hgroot, 'must be called after _installhg()'
3764 cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % {
3772 cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % {
3765 b'make': b'make', # TODO: switch by option or environment?
3773 b'make': b'make', # TODO: switch by option or environment?
3766 b'prefix': self._installdir,
3774 b'prefix': self._installdir,
3767 }
3775 }
3768 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3776 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3769 vlog("# Running", cmd)
3777 vlog("# Running", cmd)
3770 proc = subprocess.Popen(
3778 proc = subprocess.Popen(
3771 cmd,
3779 cmd,
3772 shell=True,
3780 shell=True,
3773 cwd=cwd,
3781 cwd=cwd,
3774 stdin=subprocess.PIPE,
3782 stdin=subprocess.PIPE,
3775 stdout=subprocess.PIPE,
3783 stdout=subprocess.PIPE,
3776 stderr=subprocess.STDOUT,
3784 stderr=subprocess.STDOUT,
3777 )
3785 )
3778 out, _err = proc.communicate()
3786 out, _err = proc.communicate()
3779 if proc.returncode != 0:
3787 if proc.returncode != 0:
3780 if PYTHON3:
3788 if PYTHON3:
3781 sys.stdout.buffer.write(out)
3789 sys.stdout.buffer.write(out)
3782 else:
3790 else:
3783 sys.stdout.write(out)
3791 sys.stdout.write(out)
3784 sys.exit(1)
3792 sys.exit(1)
3785
3793
3786 def _installrhg(self):
3794 def _installrhg(self):
3787 """Install rhg into the test environment"""
3795 """Install rhg into the test environment"""
3788 vlog('# Performing temporary installation of rhg')
3796 vlog('# Performing temporary installation of rhg')
3789 assert os.path.dirname(self._bindir) == self._installdir
3797 assert os.path.dirname(self._bindir) == self._installdir
3790 assert self._hgroot, 'must be called after _installhg()'
3798 assert self._hgroot, 'must be called after _installhg()'
3791 cmd = b'"%(make)s" install-rhg PREFIX="%(prefix)s"' % {
3799 cmd = b'"%(make)s" install-rhg PREFIX="%(prefix)s"' % {
3792 b'make': b'make', # TODO: switch by option or environment?
3800 b'make': b'make', # TODO: switch by option or environment?
3793 b'prefix': self._installdir,
3801 b'prefix': self._installdir,
3794 }
3802 }
3795 cwd = self._hgroot
3803 cwd = self._hgroot
3796 vlog("# Running", cmd)
3804 vlog("# Running", cmd)
3797 proc = subprocess.Popen(
3805 proc = subprocess.Popen(
3798 cmd,
3806 cmd,
3799 shell=True,
3807 shell=True,
3800 cwd=cwd,
3808 cwd=cwd,
3801 stdin=subprocess.PIPE,
3809 stdin=subprocess.PIPE,
3802 stdout=subprocess.PIPE,
3810 stdout=subprocess.PIPE,
3803 stderr=subprocess.STDOUT,
3811 stderr=subprocess.STDOUT,
3804 )
3812 )
3805 out, _err = proc.communicate()
3813 out, _err = proc.communicate()
3806 if proc.returncode != 0:
3814 if proc.returncode != 0:
3807 if PYTHON3:
3815 if PYTHON3:
3808 sys.stdout.buffer.write(out)
3816 sys.stdout.buffer.write(out)
3809 else:
3817 else:
3810 sys.stdout.write(out)
3818 sys.stdout.write(out)
3811 sys.exit(1)
3819 sys.exit(1)
3812
3820
3813 def _outputcoverage(self):
3821 def _outputcoverage(self):
3814 """Produce code coverage output."""
3822 """Produce code coverage output."""
3815 import coverage
3823 import coverage
3816
3824
3817 coverage = coverage.coverage
3825 coverage = coverage.coverage
3818
3826
3819 vlog('# Producing coverage report')
3827 vlog('# Producing coverage report')
3820 # chdir is the easiest way to get short, relative paths in the
3828 # chdir is the easiest way to get short, relative paths in the
3821 # output.
3829 # output.
3822 os.chdir(self._hgroot)
3830 os.chdir(self._hgroot)
3823 covdir = os.path.join(_bytes2sys(self._installdir), '..', 'coverage')
3831 covdir = os.path.join(_bytes2sys(self._installdir), '..', 'coverage')
3824 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3832 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3825
3833
3826 # Map install directory paths back to source directory.
3834 # Map install directory paths back to source directory.
3827 cov.config.paths['srcdir'] = ['.', _bytes2sys(self._pythondir)]
3835 cov.config.paths['srcdir'] = ['.', _bytes2sys(self._pythondir)]
3828
3836
3829 cov.combine()
3837 cov.combine()
3830
3838
3831 omit = [
3839 omit = [
3832 _bytes2sys(os.path.join(x, b'*'))
3840 _bytes2sys(os.path.join(x, b'*'))
3833 for x in [self._bindir, self._testdir]
3841 for x in [self._bindir, self._testdir]
3834 ]
3842 ]
3835 cov.report(ignore_errors=True, omit=omit)
3843 cov.report(ignore_errors=True, omit=omit)
3836
3844
3837 if self.options.htmlcov:
3845 if self.options.htmlcov:
3838 htmldir = os.path.join(_bytes2sys(self._outputdir), 'htmlcov')
3846 htmldir = os.path.join(_bytes2sys(self._outputdir), 'htmlcov')
3839 cov.html_report(directory=htmldir, omit=omit)
3847 cov.html_report(directory=htmldir, omit=omit)
3840 if self.options.annotate:
3848 if self.options.annotate:
3841 adir = os.path.join(_bytes2sys(self._outputdir), 'annotated')
3849 adir = os.path.join(_bytes2sys(self._outputdir), 'annotated')
3842 if not os.path.isdir(adir):
3850 if not os.path.isdir(adir):
3843 os.mkdir(adir)
3851 os.mkdir(adir)
3844 cov.annotate(directory=adir, omit=omit)
3852 cov.annotate(directory=adir, omit=omit)
3845
3853
3846 def _findprogram(self, program):
3854 def _findprogram(self, program):
3847 """Search PATH for a executable program"""
3855 """Search PATH for a executable program"""
3848 dpb = _sys2bytes(os.defpath)
3856 dpb = _sys2bytes(os.defpath)
3849 sepb = _sys2bytes(os.pathsep)
3857 sepb = _sys2bytes(os.pathsep)
3850 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3858 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3851 name = os.path.join(p, program)
3859 name = os.path.join(p, program)
3852 if os.name == 'nt' or os.access(name, os.X_OK):
3860 if os.name == 'nt' or os.access(name, os.X_OK):
3853 return _bytes2sys(name)
3861 return _bytes2sys(name)
3854 return None
3862 return None
3855
3863
3856 def _checktools(self):
3864 def _checktools(self):
3857 """Ensure tools required to run tests are present."""
3865 """Ensure tools required to run tests are present."""
3858 for p in self.REQUIREDTOOLS:
3866 for p in self.REQUIREDTOOLS:
3859 if os.name == 'nt' and not p.endswith(b'.exe'):
3867 if os.name == 'nt' and not p.endswith(b'.exe'):
3860 p += b'.exe'
3868 p += b'.exe'
3861 found = self._findprogram(p)
3869 found = self._findprogram(p)
3862 p = p.decode("utf-8")
3870 p = p.decode("utf-8")
3863 if found:
3871 if found:
3864 vlog("# Found prerequisite", p, "at", found)
3872 vlog("# Found prerequisite", p, "at", found)
3865 else:
3873 else:
3866 print("WARNING: Did not find prerequisite tool: %s " % p)
3874 print("WARNING: Did not find prerequisite tool: %s " % p)
3867
3875
3868
3876
3869 def aggregateexceptions(path):
3877 def aggregateexceptions(path):
3870 exceptioncounts = collections.Counter()
3878 exceptioncounts = collections.Counter()
3871 testsbyfailure = collections.defaultdict(set)
3879 testsbyfailure = collections.defaultdict(set)
3872 failuresbytest = collections.defaultdict(set)
3880 failuresbytest = collections.defaultdict(set)
3873
3881
3874 for f in os.listdir(path):
3882 for f in os.listdir(path):
3875 with open(os.path.join(path, f), 'rb') as fh:
3883 with open(os.path.join(path, f), 'rb') as fh:
3876 data = fh.read().split(b'\0')
3884 data = fh.read().split(b'\0')
3877 if len(data) != 5:
3885 if len(data) != 5:
3878 continue
3886 continue
3879
3887
3880 exc, mainframe, hgframe, hgline, testname = data
3888 exc, mainframe, hgframe, hgline, testname = data
3881 exc = exc.decode('utf-8')
3889 exc = exc.decode('utf-8')
3882 mainframe = mainframe.decode('utf-8')
3890 mainframe = mainframe.decode('utf-8')
3883 hgframe = hgframe.decode('utf-8')
3891 hgframe = hgframe.decode('utf-8')
3884 hgline = hgline.decode('utf-8')
3892 hgline = hgline.decode('utf-8')
3885 testname = testname.decode('utf-8')
3893 testname = testname.decode('utf-8')
3886
3894
3887 key = (hgframe, hgline, exc)
3895 key = (hgframe, hgline, exc)
3888 exceptioncounts[key] += 1
3896 exceptioncounts[key] += 1
3889 testsbyfailure[key].add(testname)
3897 testsbyfailure[key].add(testname)
3890 failuresbytest[testname].add(key)
3898 failuresbytest[testname].add(key)
3891
3899
3892 # Find test having fewest failures for each failure.
3900 # Find test having fewest failures for each failure.
3893 leastfailing = {}
3901 leastfailing = {}
3894 for key, tests in testsbyfailure.items():
3902 for key, tests in testsbyfailure.items():
3895 fewesttest = None
3903 fewesttest = None
3896 fewestcount = 99999999
3904 fewestcount = 99999999
3897 for test in sorted(tests):
3905 for test in sorted(tests):
3898 if len(failuresbytest[test]) < fewestcount:
3906 if len(failuresbytest[test]) < fewestcount:
3899 fewesttest = test
3907 fewesttest = test
3900 fewestcount = len(failuresbytest[test])
3908 fewestcount = len(failuresbytest[test])
3901
3909
3902 leastfailing[key] = (fewestcount, fewesttest)
3910 leastfailing[key] = (fewestcount, fewesttest)
3903
3911
3904 # Create a combined counter so we can sort by total occurrences and
3912 # Create a combined counter so we can sort by total occurrences and
3905 # impacted tests.
3913 # impacted tests.
3906 combined = {}
3914 combined = {}
3907 for key in exceptioncounts:
3915 for key in exceptioncounts:
3908 combined[key] = (
3916 combined[key] = (
3909 exceptioncounts[key],
3917 exceptioncounts[key],
3910 len(testsbyfailure[key]),
3918 len(testsbyfailure[key]),
3911 leastfailing[key][0],
3919 leastfailing[key][0],
3912 leastfailing[key][1],
3920 leastfailing[key][1],
3913 )
3921 )
3914
3922
3915 return {
3923 return {
3916 'exceptioncounts': exceptioncounts,
3924 'exceptioncounts': exceptioncounts,
3917 'total': sum(exceptioncounts.values()),
3925 'total': sum(exceptioncounts.values()),
3918 'combined': combined,
3926 'combined': combined,
3919 'leastfailing': leastfailing,
3927 'leastfailing': leastfailing,
3920 'byfailure': testsbyfailure,
3928 'byfailure': testsbyfailure,
3921 'bytest': failuresbytest,
3929 'bytest': failuresbytest,
3922 }
3930 }
3923
3931
3924
3932
3925 if __name__ == '__main__':
3933 if __name__ == '__main__':
3926 runner = TestRunner()
3934 runner = TestRunner()
3927
3935
3928 try:
3936 try:
3929 import msvcrt
3937 import msvcrt
3930
3938
3931 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3939 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3932 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3940 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3933 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3941 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3934 except ImportError:
3942 except ImportError:
3935 pass
3943 pass
3936
3944
3937 sys.exit(runner.run(sys.argv[1:]))
3945 sys.exit(runner.run(sys.argv[1:]))
@@ -1,65 +1,65
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2 """
2 """
3 Tests the behavior of filelog w.r.t. data starting with '\1\n'
3 Tests the behavior of filelog w.r.t. data starting with '\1\n'
4 """
4 """
5 from __future__ import absolute_import, print_function
5 from __future__ import absolute_import, print_function
6
6
7 from mercurial.node import hex
7 from mercurial.node import hex
8 from mercurial import (
8 from mercurial import (
9 hg,
9 hg,
10 ui as uimod,
10 ui as uimod,
11 )
11 )
12
12
13 myui = uimod.ui.load()
13 myui = uimod.ui.load()
14 repo = hg.repository(myui, path=b'.', create=True)
14 repo = hg.repository(myui, path=b'.', create=True)
15
15
16 fl = repo.file(b'foobar')
16 fl = repo.file(b'foobar')
17
17
18
18
19 def addrev(text, renamed=False):
19 def addrev(text, renamed=False):
20 if renamed:
20 if renamed:
21 # data doesn't matter. Just make sure filelog.renamed() returns True
21 # data doesn't matter. Just make sure filelog.renamed() returns True
22 meta = {b'copyrev': hex(repo.nullid), b'copy': b'bar'}
22 meta = {b'copyrev': hex(repo.nullid), b'copy': b'bar'}
23 else:
23 else:
24 meta = {}
24 meta = {}
25
25
26 lock = t = None
26 lock = t = None
27 try:
27 try:
28 lock = repo.lock()
28 lock = repo.lock()
29 t = repo.transaction(b'commit')
29 t = repo.transaction(b'commit')
30 node = fl.add(text, meta, t, 0, repo.nullid, repo.nullid)
30 node = fl.add(text, meta, t, 0, repo.nullid, repo.nullid)
31 return node
31 return node
32 finally:
32 finally:
33 if t:
33 if t:
34 t.close()
34 t.close()
35 if lock:
35 if lock:
36 lock.release()
36 lock.release()
37
37
38
38
39 def error(text):
39 def error(text):
40 print('ERROR: ' + text)
40 print('ERROR: ' + text)
41
41
42
42
43 textwith = b'\1\nfoo'
43 textwith = b'\1\nfoo'
44 without = b'foo'
44 without = b'foo'
45
45
46 node = addrev(textwith)
46 node = addrev(textwith)
47 if not textwith == fl.read(node):
47 if not textwith == fl.read(node):
48 error('filelog.read for data starting with \\1\\n')
48 error('filelog.read for data starting with \\1\\n')
49 if fl.cmp(node, textwith) or not fl.cmp(node, without):
49 if fl.cmp(node, textwith) or not fl.cmp(node, without):
50 error('filelog.cmp for data starting with \\1\\n')
50 error('filelog.cmp for data starting with \\1\\n')
51 if fl.size(0) != len(textwith):
51 if fl.size(0) != len(textwith):
52 error(
52 error(
53 'FIXME: This is a known failure of filelog.size for data starting '
53 'FIXME: This is a known failure of filelog.size for data starting '
54 'with \\1\\n'
54 'with \\1\\n'
55 )
55 )
56
56
57 node = addrev(textwith, renamed=True)
57 node = addrev(textwith, renamed=True)
58 if not textwith == fl.read(node):
58 if not textwith == fl.read(node):
59 error('filelog.read for a renaming + data starting with \\1\\n')
59 error('filelog.read for a renaming + data starting with \\1\\n')
60 if fl.cmp(node, textwith) or not fl.cmp(node, without):
60 if fl.cmp(node, textwith) or not fl.cmp(node, without):
61 error('filelog.cmp for a renaming + data starting with \\1\\n')
61 error('filelog.cmp for a renaming + data starting with \\1\\n')
62 if fl.size(1) != len(textwith):
62 if fl.size(1) != len(textwith):
63 error('filelog.size for a renaming + data starting with \\1\\n')
63 error('filelog.size for a renaming + data starting with \\1\\n')
64
64
65 print('OK.')
65 print('OK.')
@@ -1,416 +1,416
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2 from __future__ import absolute_import, print_function
2 from __future__ import absolute_import, print_function
3
3
4 import hashlib
4 import hashlib
5 import os
5 import os
6 import random
6 import random
7 import shutil
7 import shutil
8 import stat
8 import stat
9 import struct
9 import struct
10 import sys
10 import sys
11 import tempfile
11 import tempfile
12 import time
12 import time
13 import unittest
13 import unittest
14
14
15 import silenttestrunner
15 import silenttestrunner
16
16
17 # Load the local remotefilelog, not the system one
17 # Load the local remotefilelog, not the system one
18 sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
18 sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
19 from mercurial.node import sha1nodeconstants
19 from mercurial.node import sha1nodeconstants
20 from mercurial import policy
20 from mercurial import policy
21
21
22 if not policy._packageprefs.get(policy.policy, (False, False))[1]:
22 if not policy._packageprefs.get(policy.policy, (False, False))[1]:
23 if __name__ == '__main__':
23 if __name__ == '__main__':
24 msg = "skipped: pure module not available with module policy:"
24 msg = "skipped: pure module not available with module policy:"
25 print(msg, policy.policy, file=sys.stderr)
25 print(msg, policy.policy, file=sys.stderr)
26 sys.exit(80)
26 sys.exit(80)
27
27
28 from mercurial import (
28 from mercurial import (
29 pycompat,
29 pycompat,
30 ui as uimod,
30 ui as uimod,
31 )
31 )
32 from hgext.remotefilelog import (
32 from hgext.remotefilelog import (
33 basepack,
33 basepack,
34 constants,
34 constants,
35 datapack,
35 datapack,
36 )
36 )
37
37
38
38
39 class datapacktestsbase(object):
39 class datapacktestsbase(object):
40 def __init__(self, datapackreader, paramsavailable):
40 def __init__(self, datapackreader, paramsavailable):
41 self.datapackreader = datapackreader
41 self.datapackreader = datapackreader
42 self.paramsavailable = paramsavailable
42 self.paramsavailable = paramsavailable
43
43
44 def setUp(self):
44 def setUp(self):
45 self.tempdirs = []
45 self.tempdirs = []
46
46
47 def tearDown(self):
47 def tearDown(self):
48 for d in self.tempdirs:
48 for d in self.tempdirs:
49 shutil.rmtree(d)
49 shutil.rmtree(d)
50
50
51 def makeTempDir(self):
51 def makeTempDir(self):
52 tempdir = pycompat.bytestr(tempfile.mkdtemp())
52 tempdir = pycompat.bytestr(tempfile.mkdtemp())
53 self.tempdirs.append(tempdir)
53 self.tempdirs.append(tempdir)
54 return tempdir
54 return tempdir
55
55
56 def getHash(self, content):
56 def getHash(self, content):
57 return hashlib.sha1(content).digest()
57 return hashlib.sha1(content).digest()
58
58
59 def getFakeHash(self):
59 def getFakeHash(self):
60 return b''.join(
60 return b''.join(
61 pycompat.bytechr(random.randint(0, 255)) for _ in range(20)
61 pycompat.bytechr(random.randint(0, 255)) for _ in range(20)
62 )
62 )
63
63
64 def createPack(self, revisions=None, packdir=None):
64 def createPack(self, revisions=None, packdir=None):
65 if revisions is None:
65 if revisions is None:
66 revisions = [
66 revisions = [
67 (
67 (
68 b"filename",
68 b"filename",
69 self.getFakeHash(),
69 self.getFakeHash(),
70 sha1nodeconstants.nullid,
70 sha1nodeconstants.nullid,
71 b"content",
71 b"content",
72 )
72 )
73 ]
73 ]
74
74
75 if packdir is None:
75 if packdir is None:
76 packdir = self.makeTempDir()
76 packdir = self.makeTempDir()
77
77
78 packer = datapack.mutabledatapack(uimod.ui(), packdir, version=2)
78 packer = datapack.mutabledatapack(uimod.ui(), packdir, version=2)
79
79
80 for args in revisions:
80 for args in revisions:
81 filename, node, base, content = args[0:4]
81 filename, node, base, content = args[0:4]
82 # meta is optional
82 # meta is optional
83 meta = None
83 meta = None
84 if len(args) > 4:
84 if len(args) > 4:
85 meta = args[4]
85 meta = args[4]
86 packer.add(filename, node, base, content, metadata=meta)
86 packer.add(filename, node, base, content, metadata=meta)
87
87
88 path = packer.close()
88 path = packer.close()
89 return self.datapackreader(path)
89 return self.datapackreader(path)
90
90
91 def _testAddSingle(self, content):
91 def _testAddSingle(self, content):
92 """Test putting a simple blob into a pack and reading it out."""
92 """Test putting a simple blob into a pack and reading it out."""
93 filename = b"foo"
93 filename = b"foo"
94 node = self.getHash(content)
94 node = self.getHash(content)
95
95
96 revisions = [(filename, node, sha1nodeconstants.nullid, content)]
96 revisions = [(filename, node, sha1nodeconstants.nullid, content)]
97 pack = self.createPack(revisions)
97 pack = self.createPack(revisions)
98 if self.paramsavailable:
98 if self.paramsavailable:
99 self.assertEqual(
99 self.assertEqual(
100 pack.params.fanoutprefix, basepack.SMALLFANOUTPREFIX
100 pack.params.fanoutprefix, basepack.SMALLFANOUTPREFIX
101 )
101 )
102
102
103 chain = pack.getdeltachain(filename, node)
103 chain = pack.getdeltachain(filename, node)
104 self.assertEqual(content, chain[0][4])
104 self.assertEqual(content, chain[0][4])
105
105
106 def testAddSingle(self):
106 def testAddSingle(self):
107 self._testAddSingle(b'')
107 self._testAddSingle(b'')
108
108
109 def testAddSingleEmpty(self):
109 def testAddSingleEmpty(self):
110 self._testAddSingle(b'abcdef')
110 self._testAddSingle(b'abcdef')
111
111
112 def testAddMultiple(self):
112 def testAddMultiple(self):
113 """Test putting multiple unrelated blobs into a pack and reading them
113 """Test putting multiple unrelated blobs into a pack and reading them
114 out.
114 out.
115 """
115 """
116 revisions = []
116 revisions = []
117 for i in range(10):
117 for i in range(10):
118 filename = b"foo%d" % i
118 filename = b"foo%d" % i
119 content = b"abcdef%d" % i
119 content = b"abcdef%d" % i
120 node = self.getHash(content)
120 node = self.getHash(content)
121 revisions.append((filename, node, self.getFakeHash(), content))
121 revisions.append((filename, node, self.getFakeHash(), content))
122
122
123 pack = self.createPack(revisions)
123 pack = self.createPack(revisions)
124
124
125 for filename, node, base, content in revisions:
125 for filename, node, base, content in revisions:
126 entry = pack.getdelta(filename, node)
126 entry = pack.getdelta(filename, node)
127 self.assertEqual((content, filename, base, {}), entry)
127 self.assertEqual((content, filename, base, {}), entry)
128
128
129 chain = pack.getdeltachain(filename, node)
129 chain = pack.getdeltachain(filename, node)
130 self.assertEqual(content, chain[0][4])
130 self.assertEqual(content, chain[0][4])
131
131
132 def testAddDeltas(self):
132 def testAddDeltas(self):
133 """Test putting multiple delta blobs into a pack and read the chain."""
133 """Test putting multiple delta blobs into a pack and read the chain."""
134 revisions = []
134 revisions = []
135 filename = b"foo"
135 filename = b"foo"
136 lastnode = sha1nodeconstants.nullid
136 lastnode = sha1nodeconstants.nullid
137 for i in range(10):
137 for i in range(10):
138 content = b"abcdef%d" % i
138 content = b"abcdef%d" % i
139 node = self.getHash(content)
139 node = self.getHash(content)
140 revisions.append((filename, node, lastnode, content))
140 revisions.append((filename, node, lastnode, content))
141 lastnode = node
141 lastnode = node
142
142
143 pack = self.createPack(revisions)
143 pack = self.createPack(revisions)
144
144
145 entry = pack.getdelta(filename, revisions[0][1])
145 entry = pack.getdelta(filename, revisions[0][1])
146 realvalue = (revisions[0][3], filename, revisions[0][2], {})
146 realvalue = (revisions[0][3], filename, revisions[0][2], {})
147 self.assertEqual(entry, realvalue)
147 self.assertEqual(entry, realvalue)
148
148
149 # Test that the chain for the final entry has all the others
149 # Test that the chain for the final entry has all the others
150 chain = pack.getdeltachain(filename, node)
150 chain = pack.getdeltachain(filename, node)
151 for i in range(10):
151 for i in range(10):
152 content = b"abcdef%d" % i
152 content = b"abcdef%d" % i
153 self.assertEqual(content, chain[-i - 1][4])
153 self.assertEqual(content, chain[-i - 1][4])
154
154
155 def testPackMany(self):
155 def testPackMany(self):
156 """Pack many related and unrelated objects."""
156 """Pack many related and unrelated objects."""
157 # Build a random pack file
157 # Build a random pack file
158 revisions = []
158 revisions = []
159 blobs = {}
159 blobs = {}
160 random.seed(0)
160 random.seed(0)
161 for i in range(100):
161 for i in range(100):
162 filename = b"filename-%d" % i
162 filename = b"filename-%d" % i
163 filerevs = []
163 filerevs = []
164 for j in range(random.randint(1, 100)):
164 for j in range(random.randint(1, 100)):
165 content = b"content-%d" % j
165 content = b"content-%d" % j
166 node = self.getHash(content)
166 node = self.getHash(content)
167 lastnode = sha1nodeconstants.nullid
167 lastnode = sha1nodeconstants.nullid
168 if len(filerevs) > 0:
168 if len(filerevs) > 0:
169 lastnode = filerevs[random.randint(0, len(filerevs) - 1)]
169 lastnode = filerevs[random.randint(0, len(filerevs) - 1)]
170 filerevs.append(node)
170 filerevs.append(node)
171 blobs[(filename, node, lastnode)] = content
171 blobs[(filename, node, lastnode)] = content
172 revisions.append((filename, node, lastnode, content))
172 revisions.append((filename, node, lastnode, content))
173
173
174 pack = self.createPack(revisions)
174 pack = self.createPack(revisions)
175
175
176 # Verify the pack contents
176 # Verify the pack contents
177 for (filename, node, lastnode), content in sorted(blobs.items()):
177 for (filename, node, lastnode), content in sorted(blobs.items()):
178 chain = pack.getdeltachain(filename, node)
178 chain = pack.getdeltachain(filename, node)
179 for entry in chain:
179 for entry in chain:
180 expectedcontent = blobs[(entry[0], entry[1], entry[3])]
180 expectedcontent = blobs[(entry[0], entry[1], entry[3])]
181 self.assertEqual(entry[4], expectedcontent)
181 self.assertEqual(entry[4], expectedcontent)
182
182
183 def testPackMetadata(self):
183 def testPackMetadata(self):
184 revisions = []
184 revisions = []
185 for i in range(100):
185 for i in range(100):
186 filename = b'%d.txt' % i
186 filename = b'%d.txt' % i
187 content = b'put-something-here \n' * i
187 content = b'put-something-here \n' * i
188 node = self.getHash(content)
188 node = self.getHash(content)
189 meta = {
189 meta = {
190 constants.METAKEYFLAG: i ** 4,
190 constants.METAKEYFLAG: i ** 4,
191 constants.METAKEYSIZE: len(content),
191 constants.METAKEYSIZE: len(content),
192 b'Z': b'random_string',
192 b'Z': b'random_string',
193 b'_': b'\0' * i,
193 b'_': b'\0' * i,
194 }
194 }
195 revisions.append(
195 revisions.append(
196 (filename, node, sha1nodeconstants.nullid, content, meta)
196 (filename, node, sha1nodeconstants.nullid, content, meta)
197 )
197 )
198 pack = self.createPack(revisions)
198 pack = self.createPack(revisions)
199 for name, node, x, content, origmeta in revisions:
199 for name, node, x, content, origmeta in revisions:
200 parsedmeta = pack.getmeta(name, node)
200 parsedmeta = pack.getmeta(name, node)
201 # flag == 0 should be optimized out
201 # flag == 0 should be optimized out
202 if origmeta[constants.METAKEYFLAG] == 0:
202 if origmeta[constants.METAKEYFLAG] == 0:
203 del origmeta[constants.METAKEYFLAG]
203 del origmeta[constants.METAKEYFLAG]
204 self.assertEqual(parsedmeta, origmeta)
204 self.assertEqual(parsedmeta, origmeta)
205
205
206 def testGetMissing(self):
206 def testGetMissing(self):
207 """Test the getmissing() api."""
207 """Test the getmissing() api."""
208 revisions = []
208 revisions = []
209 filename = b"foo"
209 filename = b"foo"
210 lastnode = sha1nodeconstants.nullid
210 lastnode = sha1nodeconstants.nullid
211 for i in range(10):
211 for i in range(10):
212 content = b"abcdef%d" % i
212 content = b"abcdef%d" % i
213 node = self.getHash(content)
213 node = self.getHash(content)
214 revisions.append((filename, node, lastnode, content))
214 revisions.append((filename, node, lastnode, content))
215 lastnode = node
215 lastnode = node
216
216
217 pack = self.createPack(revisions)
217 pack = self.createPack(revisions)
218
218
219 missing = pack.getmissing([(b"foo", revisions[0][1])])
219 missing = pack.getmissing([(b"foo", revisions[0][1])])
220 self.assertFalse(missing)
220 self.assertFalse(missing)
221
221
222 missing = pack.getmissing(
222 missing = pack.getmissing(
223 [(b"foo", revisions[0][1]), (b"foo", revisions[1][1])]
223 [(b"foo", revisions[0][1]), (b"foo", revisions[1][1])]
224 )
224 )
225 self.assertFalse(missing)
225 self.assertFalse(missing)
226
226
227 fakenode = self.getFakeHash()
227 fakenode = self.getFakeHash()
228 missing = pack.getmissing(
228 missing = pack.getmissing(
229 [(b"foo", revisions[0][1]), (b"foo", fakenode)]
229 [(b"foo", revisions[0][1]), (b"foo", fakenode)]
230 )
230 )
231 self.assertEqual(missing, [(b"foo", fakenode)])
231 self.assertEqual(missing, [(b"foo", fakenode)])
232
232
233 def testAddThrows(self):
233 def testAddThrows(self):
234 pack = self.createPack()
234 pack = self.createPack()
235
235
236 try:
236 try:
237 pack.add(b'filename', sha1nodeconstants.nullid, b'contents')
237 pack.add(b'filename', sha1nodeconstants.nullid, b'contents')
238 self.assertTrue(False, "datapack.add should throw")
238 self.assertTrue(False, "datapack.add should throw")
239 except RuntimeError:
239 except RuntimeError:
240 pass
240 pass
241
241
242 def testBadVersionThrows(self):
242 def testBadVersionThrows(self):
243 pack = self.createPack()
243 pack = self.createPack()
244 path = pack.path + b'.datapack'
244 path = pack.path + b'.datapack'
245 with open(path, 'rb') as f:
245 with open(path, 'rb') as f:
246 raw = f.read()
246 raw = f.read()
247 raw = struct.pack('!B', 255) + raw[1:]
247 raw = struct.pack('!B', 255) + raw[1:]
248 os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
248 os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
249 with open(path, 'wb+') as f:
249 with open(path, 'wb+') as f:
250 f.write(raw)
250 f.write(raw)
251
251
252 try:
252 try:
253 self.datapackreader(pack.path)
253 self.datapackreader(pack.path)
254 self.assertTrue(False, "bad version number should have thrown")
254 self.assertTrue(False, "bad version number should have thrown")
255 except RuntimeError:
255 except RuntimeError:
256 pass
256 pass
257
257
258 def testMissingDeltabase(self):
258 def testMissingDeltabase(self):
259 fakenode = self.getFakeHash()
259 fakenode = self.getFakeHash()
260 revisions = [(b"filename", fakenode, self.getFakeHash(), b"content")]
260 revisions = [(b"filename", fakenode, self.getFakeHash(), b"content")]
261 pack = self.createPack(revisions)
261 pack = self.createPack(revisions)
262 chain = pack.getdeltachain(b"filename", fakenode)
262 chain = pack.getdeltachain(b"filename", fakenode)
263 self.assertEqual(len(chain), 1)
263 self.assertEqual(len(chain), 1)
264
264
265 def testLargePack(self):
265 def testLargePack(self):
266 """Test creating and reading from a large pack with over X entries.
266 """Test creating and reading from a large pack with over X entries.
267 This causes it to use a 2^16 fanout table instead."""
267 This causes it to use a 2^16 fanout table instead."""
268 revisions = []
268 revisions = []
269 blobs = {}
269 blobs = {}
270 total = basepack.SMALLFANOUTCUTOFF + 1
270 total = basepack.SMALLFANOUTCUTOFF + 1
271 for i in pycompat.xrange(total):
271 for i in pycompat.xrange(total):
272 filename = b"filename-%d" % i
272 filename = b"filename-%d" % i
273 content = filename
273 content = filename
274 node = self.getHash(content)
274 node = self.getHash(content)
275 blobs[(filename, node)] = content
275 blobs[(filename, node)] = content
276 revisions.append(
276 revisions.append(
277 (filename, node, sha1nodeconstants.nullid, content)
277 (filename, node, sha1nodeconstants.nullid, content)
278 )
278 )
279
279
280 pack = self.createPack(revisions)
280 pack = self.createPack(revisions)
281 if self.paramsavailable:
281 if self.paramsavailable:
282 self.assertEqual(
282 self.assertEqual(
283 pack.params.fanoutprefix, basepack.LARGEFANOUTPREFIX
283 pack.params.fanoutprefix, basepack.LARGEFANOUTPREFIX
284 )
284 )
285
285
286 for (filename, node), content in blobs.items():
286 for (filename, node), content in blobs.items():
287 actualcontent = pack.getdeltachain(filename, node)[0][4]
287 actualcontent = pack.getdeltachain(filename, node)[0][4]
288 self.assertEqual(actualcontent, content)
288 self.assertEqual(actualcontent, content)
289
289
290 def testPacksCache(self):
290 def testPacksCache(self):
291 """Test that we remember the most recent packs while fetching the delta
291 """Test that we remember the most recent packs while fetching the delta
292 chain."""
292 chain."""
293
293
294 packdir = self.makeTempDir()
294 packdir = self.makeTempDir()
295 deltachains = []
295 deltachains = []
296
296
297 numpacks = 10
297 numpacks = 10
298 revisionsperpack = 100
298 revisionsperpack = 100
299
299
300 for i in range(numpacks):
300 for i in range(numpacks):
301 chain = []
301 chain = []
302 revision = (
302 revision = (
303 b'%d' % i,
303 b'%d' % i,
304 self.getFakeHash(),
304 self.getFakeHash(),
305 sha1nodeconstants.nullid,
305 sha1nodeconstants.nullid,
306 b"content",
306 b"content",
307 )
307 )
308
308
309 for _ in range(revisionsperpack):
309 for _ in range(revisionsperpack):
310 chain.append(revision)
310 chain.append(revision)
311 revision = (
311 revision = (
312 b'%d' % i,
312 b'%d' % i,
313 self.getFakeHash(),
313 self.getFakeHash(),
314 revision[1],
314 revision[1],
315 self.getFakeHash(),
315 self.getFakeHash(),
316 )
316 )
317
317
318 self.createPack(chain, packdir)
318 self.createPack(chain, packdir)
319 deltachains.append(chain)
319 deltachains.append(chain)
320
320
321 class testdatapackstore(datapack.datapackstore):
321 class testdatapackstore(datapack.datapackstore):
322 # Ensures that we are not keeping everything in the cache.
322 # Ensures that we are not keeping everything in the cache.
323 DEFAULTCACHESIZE = numpacks // 2
323 DEFAULTCACHESIZE = numpacks // 2
324
324
325 store = testdatapackstore(uimod.ui(), packdir)
325 store = testdatapackstore(uimod.ui(), packdir)
326
326
327 random.shuffle(deltachains)
327 random.shuffle(deltachains)
328 for randomchain in deltachains:
328 for randomchain in deltachains:
329 revision = random.choice(randomchain)
329 revision = random.choice(randomchain)
330 chain = store.getdeltachain(revision[0], revision[1])
330 chain = store.getdeltachain(revision[0], revision[1])
331
331
332 mostrecentpack = next(iter(store.packs), None)
332 mostrecentpack = next(iter(store.packs), None)
333 self.assertEqual(
333 self.assertEqual(
334 mostrecentpack.getdeltachain(revision[0], revision[1]), chain
334 mostrecentpack.getdeltachain(revision[0], revision[1]), chain
335 )
335 )
336
336
337 self.assertEqual(randomchain.index(revision) + 1, len(chain))
337 self.assertEqual(randomchain.index(revision) + 1, len(chain))
338
338
339 # perf test off by default since it's slow
339 # perf test off by default since it's slow
340 def _testIndexPerf(self):
340 def _testIndexPerf(self):
341 random.seed(0)
341 random.seed(0)
342 print("Multi-get perf test")
342 print("Multi-get perf test")
343 packsizes = [
343 packsizes = [
344 100,
344 100,
345 10000,
345 10000,
346 100000,
346 100000,
347 500000,
347 500000,
348 1000000,
348 1000000,
349 3000000,
349 3000000,
350 ]
350 ]
351 lookupsizes = [
351 lookupsizes = [
352 10,
352 10,
353 100,
353 100,
354 1000,
354 1000,
355 10000,
355 10000,
356 100000,
356 100000,
357 1000000,
357 1000000,
358 ]
358 ]
359 for packsize in packsizes:
359 for packsize in packsizes:
360 revisions = []
360 revisions = []
361 for i in pycompat.xrange(packsize):
361 for i in pycompat.xrange(packsize):
362 filename = b"filename-%d" % i
362 filename = b"filename-%d" % i
363 content = b"content-%d" % i
363 content = b"content-%d" % i
364 node = self.getHash(content)
364 node = self.getHash(content)
365 revisions.append(
365 revisions.append(
366 (filename, node, sha1nodeconstants.nullid, content)
366 (filename, node, sha1nodeconstants.nullid, content)
367 )
367 )
368
368
369 path = self.createPack(revisions).path
369 path = self.createPack(revisions).path
370
370
371 # Perf of large multi-get
371 # Perf of large multi-get
372 import gc
372 import gc
373
373
374 gc.disable()
374 gc.disable()
375 pack = self.datapackreader(path)
375 pack = self.datapackreader(path)
376 for lookupsize in lookupsizes:
376 for lookupsize in lookupsizes:
377 if lookupsize > packsize:
377 if lookupsize > packsize:
378 continue
378 continue
379 random.shuffle(revisions)
379 random.shuffle(revisions)
380 findnodes = [(rev[0], rev[1]) for rev in revisions]
380 findnodes = [(rev[0], rev[1]) for rev in revisions]
381
381
382 start = time.time()
382 start = time.time()
383 pack.getmissing(findnodes[:lookupsize])
383 pack.getmissing(findnodes[:lookupsize])
384 elapsed = time.time() - start
384 elapsed = time.time() - start
385 print(
385 print(
386 "%s pack %d lookups = %0.04f"
386 "%s pack %d lookups = %0.04f"
387 % (
387 % (
388 ('%d' % packsize).rjust(7),
388 ('%d' % packsize).rjust(7),
389 ('%d' % lookupsize).rjust(7),
389 ('%d' % lookupsize).rjust(7),
390 elapsed,
390 elapsed,
391 )
391 )
392 )
392 )
393
393
394 print("")
394 print("")
395 gc.enable()
395 gc.enable()
396
396
397 # The perf test is meant to produce output, so we always fail the test
397 # The perf test is meant to produce output, so we always fail the test
398 # so the user sees the output.
398 # so the user sees the output.
399 raise RuntimeError("perf test always fails")
399 raise RuntimeError("perf test always fails")
400
400
401
401
402 class datapacktests(datapacktestsbase, unittest.TestCase):
402 class datapacktests(datapacktestsbase, unittest.TestCase):
403 def __init__(self, *args, **kwargs):
403 def __init__(self, *args, **kwargs):
404 datapacktestsbase.__init__(self, datapack.datapack, True)
404 datapacktestsbase.__init__(self, datapack.datapack, True)
405 unittest.TestCase.__init__(self, *args, **kwargs)
405 unittest.TestCase.__init__(self, *args, **kwargs)
406
406
407
407
408 # TODO:
408 # TODO:
409 # datapack store:
409 # datapack store:
410 # - getmissing
410 # - getmissing
411 # - GC two packs into one
411 # - GC two packs into one
412
412
413 if __name__ == '__main__':
413 if __name__ == '__main__':
414 if pycompat.iswindows:
414 if pycompat.iswindows:
415 sys.exit(80) # Skip on Windows
415 sys.exit(80) # Skip on Windows
416 silenttestrunner.main(__name__)
416 silenttestrunner.main(__name__)
@@ -1,313 +1,313
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2 from __future__ import absolute_import
2 from __future__ import absolute_import
3
3
4 import hashlib
4 import hashlib
5 import os
5 import os
6 import random
6 import random
7 import shutil
7 import shutil
8 import stat
8 import stat
9 import struct
9 import struct
10 import sys
10 import sys
11 import tempfile
11 import tempfile
12 import unittest
12 import unittest
13
13
14 import silenttestrunner
14 import silenttestrunner
15
15
16 from mercurial.node import sha1nodeconstants
16 from mercurial.node import sha1nodeconstants
17 from mercurial import (
17 from mercurial import (
18 pycompat,
18 pycompat,
19 ui as uimod,
19 ui as uimod,
20 )
20 )
21
21
22 # Load the local remotefilelog, not the system one
22 # Load the local remotefilelog, not the system one
23 sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
23 sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
24 from hgext.remotefilelog import (
24 from hgext.remotefilelog import (
25 basepack,
25 basepack,
26 historypack,
26 historypack,
27 )
27 )
28
28
29
29
30 class histpacktests(unittest.TestCase):
30 class histpacktests(unittest.TestCase):
31 def setUp(self):
31 def setUp(self):
32 self.tempdirs = []
32 self.tempdirs = []
33
33
34 def tearDown(self):
34 def tearDown(self):
35 for d in self.tempdirs:
35 for d in self.tempdirs:
36 shutil.rmtree(d)
36 shutil.rmtree(d)
37
37
38 def makeTempDir(self):
38 def makeTempDir(self):
39 tempdir = tempfile.mkdtemp()
39 tempdir = tempfile.mkdtemp()
40 self.tempdirs.append(tempdir)
40 self.tempdirs.append(tempdir)
41 return pycompat.fsencode(tempdir)
41 return pycompat.fsencode(tempdir)
42
42
43 def getHash(self, content):
43 def getHash(self, content):
44 return hashlib.sha1(content).digest()
44 return hashlib.sha1(content).digest()
45
45
46 def getFakeHash(self):
46 def getFakeHash(self):
47 return b''.join(
47 return b''.join(
48 pycompat.bytechr(random.randint(0, 255)) for _ in range(20)
48 pycompat.bytechr(random.randint(0, 255)) for _ in range(20)
49 )
49 )
50
50
51 def createPack(self, revisions=None):
51 def createPack(self, revisions=None):
52 """Creates and returns a historypack containing the specified revisions.
52 """Creates and returns a historypack containing the specified revisions.
53
53
54 `revisions` is a list of tuples, where each tuple contains a filanem,
54 `revisions` is a list of tuples, where each tuple contains a filanem,
55 node, p1node, p2node, and linknode.
55 node, p1node, p2node, and linknode.
56 """
56 """
57 if revisions is None:
57 if revisions is None:
58 revisions = [
58 revisions = [
59 (
59 (
60 b"filename",
60 b"filename",
61 self.getFakeHash(),
61 self.getFakeHash(),
62 sha1nodeconstants.nullid,
62 sha1nodeconstants.nullid,
63 sha1nodeconstants.nullid,
63 sha1nodeconstants.nullid,
64 self.getFakeHash(),
64 self.getFakeHash(),
65 None,
65 None,
66 )
66 )
67 ]
67 ]
68
68
69 packdir = pycompat.fsencode(self.makeTempDir())
69 packdir = pycompat.fsencode(self.makeTempDir())
70 packer = historypack.mutablehistorypack(uimod.ui(), packdir, version=2)
70 packer = historypack.mutablehistorypack(uimod.ui(), packdir, version=2)
71
71
72 for filename, node, p1, p2, linknode, copyfrom in revisions:
72 for filename, node, p1, p2, linknode, copyfrom in revisions:
73 packer.add(filename, node, p1, p2, linknode, copyfrom)
73 packer.add(filename, node, p1, p2, linknode, copyfrom)
74
74
75 path = packer.close()
75 path = packer.close()
76 return historypack.historypack(path)
76 return historypack.historypack(path)
77
77
78 def testAddSingle(self):
78 def testAddSingle(self):
79 """Test putting a single entry into a pack and reading it out."""
79 """Test putting a single entry into a pack and reading it out."""
80 filename = b"foo"
80 filename = b"foo"
81 node = self.getFakeHash()
81 node = self.getFakeHash()
82 p1 = self.getFakeHash()
82 p1 = self.getFakeHash()
83 p2 = self.getFakeHash()
83 p2 = self.getFakeHash()
84 linknode = self.getFakeHash()
84 linknode = self.getFakeHash()
85
85
86 revisions = [(filename, node, p1, p2, linknode, None)]
86 revisions = [(filename, node, p1, p2, linknode, None)]
87 pack = self.createPack(revisions)
87 pack = self.createPack(revisions)
88
88
89 actual = pack.getancestors(filename, node)[node]
89 actual = pack.getancestors(filename, node)[node]
90 self.assertEqual(p1, actual[0])
90 self.assertEqual(p1, actual[0])
91 self.assertEqual(p2, actual[1])
91 self.assertEqual(p2, actual[1])
92 self.assertEqual(linknode, actual[2])
92 self.assertEqual(linknode, actual[2])
93
93
94 def testAddMultiple(self):
94 def testAddMultiple(self):
95 """Test putting multiple unrelated revisions into a pack and reading
95 """Test putting multiple unrelated revisions into a pack and reading
96 them out.
96 them out.
97 """
97 """
98 revisions = []
98 revisions = []
99 for i in range(10):
99 for i in range(10):
100 filename = b"foo-%d" % i
100 filename = b"foo-%d" % i
101 node = self.getFakeHash()
101 node = self.getFakeHash()
102 p1 = self.getFakeHash()
102 p1 = self.getFakeHash()
103 p2 = self.getFakeHash()
103 p2 = self.getFakeHash()
104 linknode = self.getFakeHash()
104 linknode = self.getFakeHash()
105 revisions.append((filename, node, p1, p2, linknode, None))
105 revisions.append((filename, node, p1, p2, linknode, None))
106
106
107 pack = self.createPack(revisions)
107 pack = self.createPack(revisions)
108
108
109 for filename, node, p1, p2, linknode, copyfrom in revisions:
109 for filename, node, p1, p2, linknode, copyfrom in revisions:
110 actual = pack.getancestors(filename, node)[node]
110 actual = pack.getancestors(filename, node)[node]
111 self.assertEqual(p1, actual[0])
111 self.assertEqual(p1, actual[0])
112 self.assertEqual(p2, actual[1])
112 self.assertEqual(p2, actual[1])
113 self.assertEqual(linknode, actual[2])
113 self.assertEqual(linknode, actual[2])
114 self.assertEqual(copyfrom, actual[3])
114 self.assertEqual(copyfrom, actual[3])
115
115
116 def testAddAncestorChain(self):
116 def testAddAncestorChain(self):
117 """Test putting multiple revisions in into a pack and read the ancestor
117 """Test putting multiple revisions in into a pack and read the ancestor
118 chain.
118 chain.
119 """
119 """
120 revisions = []
120 revisions = []
121 filename = b"foo"
121 filename = b"foo"
122 lastnode = sha1nodeconstants.nullid
122 lastnode = sha1nodeconstants.nullid
123 for i in range(10):
123 for i in range(10):
124 node = self.getFakeHash()
124 node = self.getFakeHash()
125 revisions.append(
125 revisions.append(
126 (
126 (
127 filename,
127 filename,
128 node,
128 node,
129 lastnode,
129 lastnode,
130 sha1nodeconstants.nullid,
130 sha1nodeconstants.nullid,
131 sha1nodeconstants.nullid,
131 sha1nodeconstants.nullid,
132 None,
132 None,
133 )
133 )
134 )
134 )
135 lastnode = node
135 lastnode = node
136
136
137 # revisions must be added in topological order, newest first
137 # revisions must be added in topological order, newest first
138 revisions = list(reversed(revisions))
138 revisions = list(reversed(revisions))
139 pack = self.createPack(revisions)
139 pack = self.createPack(revisions)
140
140
141 # Test that the chain has all the entries
141 # Test that the chain has all the entries
142 ancestors = pack.getancestors(revisions[0][0], revisions[0][1])
142 ancestors = pack.getancestors(revisions[0][0], revisions[0][1])
143 for filename, node, p1, p2, linknode, copyfrom in revisions:
143 for filename, node, p1, p2, linknode, copyfrom in revisions:
144 ap1, ap2, alinknode, acopyfrom = ancestors[node]
144 ap1, ap2, alinknode, acopyfrom = ancestors[node]
145 self.assertEqual(ap1, p1)
145 self.assertEqual(ap1, p1)
146 self.assertEqual(ap2, p2)
146 self.assertEqual(ap2, p2)
147 self.assertEqual(alinknode, linknode)
147 self.assertEqual(alinknode, linknode)
148 self.assertEqual(acopyfrom, copyfrom)
148 self.assertEqual(acopyfrom, copyfrom)
149
149
150 def testPackMany(self):
150 def testPackMany(self):
151 """Pack many related and unrelated ancestors."""
151 """Pack many related and unrelated ancestors."""
152 # Build a random pack file
152 # Build a random pack file
153 allentries = {}
153 allentries = {}
154 ancestorcounts = {}
154 ancestorcounts = {}
155 revisions = []
155 revisions = []
156 random.seed(0)
156 random.seed(0)
157 for i in range(100):
157 for i in range(100):
158 filename = b"filename-%d" % i
158 filename = b"filename-%d" % i
159 entries = []
159 entries = []
160 p2 = sha1nodeconstants.nullid
160 p2 = sha1nodeconstants.nullid
161 linknode = sha1nodeconstants.nullid
161 linknode = sha1nodeconstants.nullid
162 for j in range(random.randint(1, 100)):
162 for j in range(random.randint(1, 100)):
163 node = self.getFakeHash()
163 node = self.getFakeHash()
164 p1 = sha1nodeconstants.nullid
164 p1 = sha1nodeconstants.nullid
165 if len(entries) > 0:
165 if len(entries) > 0:
166 p1 = entries[random.randint(0, len(entries) - 1)]
166 p1 = entries[random.randint(0, len(entries) - 1)]
167 entries.append(node)
167 entries.append(node)
168 revisions.append((filename, node, p1, p2, linknode, None))
168 revisions.append((filename, node, p1, p2, linknode, None))
169 allentries[(filename, node)] = (p1, p2, linknode)
169 allentries[(filename, node)] = (p1, p2, linknode)
170 if p1 == sha1nodeconstants.nullid:
170 if p1 == sha1nodeconstants.nullid:
171 ancestorcounts[(filename, node)] = 1
171 ancestorcounts[(filename, node)] = 1
172 else:
172 else:
173 newcount = ancestorcounts[(filename, p1)] + 1
173 newcount = ancestorcounts[(filename, p1)] + 1
174 ancestorcounts[(filename, node)] = newcount
174 ancestorcounts[(filename, node)] = newcount
175
175
176 # Must add file entries in reverse topological order
176 # Must add file entries in reverse topological order
177 revisions = list(reversed(revisions))
177 revisions = list(reversed(revisions))
178 pack = self.createPack(revisions)
178 pack = self.createPack(revisions)
179
179
180 # Verify the pack contents
180 # Verify the pack contents
181 for (filename, node) in allentries:
181 for (filename, node) in allentries:
182 ancestors = pack.getancestors(filename, node)
182 ancestors = pack.getancestors(filename, node)
183 self.assertEqual(ancestorcounts[(filename, node)], len(ancestors))
183 self.assertEqual(ancestorcounts[(filename, node)], len(ancestors))
184 for anode, (ap1, ap2, alinknode, copyfrom) in ancestors.items():
184 for anode, (ap1, ap2, alinknode, copyfrom) in ancestors.items():
185 ep1, ep2, elinknode = allentries[(filename, anode)]
185 ep1, ep2, elinknode = allentries[(filename, anode)]
186 self.assertEqual(ap1, ep1)
186 self.assertEqual(ap1, ep1)
187 self.assertEqual(ap2, ep2)
187 self.assertEqual(ap2, ep2)
188 self.assertEqual(alinknode, elinknode)
188 self.assertEqual(alinknode, elinknode)
189 self.assertEqual(copyfrom, None)
189 self.assertEqual(copyfrom, None)
190
190
191 def testGetNodeInfo(self):
191 def testGetNodeInfo(self):
192 revisions = []
192 revisions = []
193 filename = b"foo"
193 filename = b"foo"
194 lastnode = sha1nodeconstants.nullid
194 lastnode = sha1nodeconstants.nullid
195 for i in range(10):
195 for i in range(10):
196 node = self.getFakeHash()
196 node = self.getFakeHash()
197 revisions.append(
197 revisions.append(
198 (
198 (
199 filename,
199 filename,
200 node,
200 node,
201 lastnode,
201 lastnode,
202 sha1nodeconstants.nullid,
202 sha1nodeconstants.nullid,
203 sha1nodeconstants.nullid,
203 sha1nodeconstants.nullid,
204 None,
204 None,
205 )
205 )
206 )
206 )
207 lastnode = node
207 lastnode = node
208
208
209 pack = self.createPack(revisions)
209 pack = self.createPack(revisions)
210
210
211 # Test that getnodeinfo returns the expected results
211 # Test that getnodeinfo returns the expected results
212 for filename, node, p1, p2, linknode, copyfrom in revisions:
212 for filename, node, p1, p2, linknode, copyfrom in revisions:
213 ap1, ap2, alinknode, acopyfrom = pack.getnodeinfo(filename, node)
213 ap1, ap2, alinknode, acopyfrom = pack.getnodeinfo(filename, node)
214 self.assertEqual(ap1, p1)
214 self.assertEqual(ap1, p1)
215 self.assertEqual(ap2, p2)
215 self.assertEqual(ap2, p2)
216 self.assertEqual(alinknode, linknode)
216 self.assertEqual(alinknode, linknode)
217 self.assertEqual(acopyfrom, copyfrom)
217 self.assertEqual(acopyfrom, copyfrom)
218
218
219 def testGetMissing(self):
219 def testGetMissing(self):
220 """Test the getmissing() api."""
220 """Test the getmissing() api."""
221 revisions = []
221 revisions = []
222 filename = b"foo"
222 filename = b"foo"
223 for i in range(10):
223 for i in range(10):
224 node = self.getFakeHash()
224 node = self.getFakeHash()
225 p1 = self.getFakeHash()
225 p1 = self.getFakeHash()
226 p2 = self.getFakeHash()
226 p2 = self.getFakeHash()
227 linknode = self.getFakeHash()
227 linknode = self.getFakeHash()
228 revisions.append((filename, node, p1, p2, linknode, None))
228 revisions.append((filename, node, p1, p2, linknode, None))
229
229
230 pack = self.createPack(revisions)
230 pack = self.createPack(revisions)
231
231
232 missing = pack.getmissing([(filename, revisions[0][1])])
232 missing = pack.getmissing([(filename, revisions[0][1])])
233 self.assertFalse(missing)
233 self.assertFalse(missing)
234
234
235 missing = pack.getmissing(
235 missing = pack.getmissing(
236 [(filename, revisions[0][1]), (filename, revisions[1][1])]
236 [(filename, revisions[0][1]), (filename, revisions[1][1])]
237 )
237 )
238 self.assertFalse(missing)
238 self.assertFalse(missing)
239
239
240 fakenode = self.getFakeHash()
240 fakenode = self.getFakeHash()
241 missing = pack.getmissing(
241 missing = pack.getmissing(
242 [(filename, revisions[0][1]), (filename, fakenode)]
242 [(filename, revisions[0][1]), (filename, fakenode)]
243 )
243 )
244 self.assertEqual(missing, [(filename, fakenode)])
244 self.assertEqual(missing, [(filename, fakenode)])
245
245
246 # Test getmissing on a non-existant filename
246 # Test getmissing on a non-existant filename
247 missing = pack.getmissing([(b"bar", fakenode)])
247 missing = pack.getmissing([(b"bar", fakenode)])
248 self.assertEqual(missing, [(b"bar", fakenode)])
248 self.assertEqual(missing, [(b"bar", fakenode)])
249
249
250 def testAddThrows(self):
250 def testAddThrows(self):
251 pack = self.createPack()
251 pack = self.createPack()
252
252
253 try:
253 try:
254 pack.add(
254 pack.add(
255 b'filename',
255 b'filename',
256 sha1nodeconstants.nullid,
256 sha1nodeconstants.nullid,
257 sha1nodeconstants.nullid,
257 sha1nodeconstants.nullid,
258 sha1nodeconstants.nullid,
258 sha1nodeconstants.nullid,
259 sha1nodeconstants.nullid,
259 sha1nodeconstants.nullid,
260 None,
260 None,
261 )
261 )
262 self.assertTrue(False, "historypack.add should throw")
262 self.assertTrue(False, "historypack.add should throw")
263 except RuntimeError:
263 except RuntimeError:
264 pass
264 pass
265
265
266 def testBadVersionThrows(self):
266 def testBadVersionThrows(self):
267 pack = self.createPack()
267 pack = self.createPack()
268 path = pack.path + b'.histpack'
268 path = pack.path + b'.histpack'
269 with open(path, 'rb') as f:
269 with open(path, 'rb') as f:
270 raw = f.read()
270 raw = f.read()
271 raw = struct.pack('!B', 255) + raw[1:]
271 raw = struct.pack('!B', 255) + raw[1:]
272 os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
272 os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
273 with open(path, 'wb+') as f:
273 with open(path, 'wb+') as f:
274 f.write(raw)
274 f.write(raw)
275
275
276 try:
276 try:
277 historypack.historypack(pack.path)
277 historypack.historypack(pack.path)
278 self.assertTrue(False, "bad version number should have thrown")
278 self.assertTrue(False, "bad version number should have thrown")
279 except RuntimeError:
279 except RuntimeError:
280 pass
280 pass
281
281
282 def testLargePack(self):
282 def testLargePack(self):
283 """Test creating and reading from a large pack with over X entries.
283 """Test creating and reading from a large pack with over X entries.
284 This causes it to use a 2^16 fanout table instead."""
284 This causes it to use a 2^16 fanout table instead."""
285 total = basepack.SMALLFANOUTCUTOFF + 1
285 total = basepack.SMALLFANOUTCUTOFF + 1
286 revisions = []
286 revisions = []
287 for i in pycompat.xrange(total):
287 for i in pycompat.xrange(total):
288 filename = b"foo-%d" % i
288 filename = b"foo-%d" % i
289 node = self.getFakeHash()
289 node = self.getFakeHash()
290 p1 = self.getFakeHash()
290 p1 = self.getFakeHash()
291 p2 = self.getFakeHash()
291 p2 = self.getFakeHash()
292 linknode = self.getFakeHash()
292 linknode = self.getFakeHash()
293 revisions.append((filename, node, p1, p2, linknode, None))
293 revisions.append((filename, node, p1, p2, linknode, None))
294
294
295 pack = self.createPack(revisions)
295 pack = self.createPack(revisions)
296 self.assertEqual(pack.params.fanoutprefix, basepack.LARGEFANOUTPREFIX)
296 self.assertEqual(pack.params.fanoutprefix, basepack.LARGEFANOUTPREFIX)
297
297
298 for filename, node, p1, p2, linknode, copyfrom in revisions:
298 for filename, node, p1, p2, linknode, copyfrom in revisions:
299 actual = pack.getancestors(filename, node)[node]
299 actual = pack.getancestors(filename, node)[node]
300 self.assertEqual(p1, actual[0])
300 self.assertEqual(p1, actual[0])
301 self.assertEqual(p2, actual[1])
301 self.assertEqual(p2, actual[1])
302 self.assertEqual(linknode, actual[2])
302 self.assertEqual(linknode, actual[2])
303 self.assertEqual(copyfrom, actual[3])
303 self.assertEqual(copyfrom, actual[3])
304
304
305
305
306 # TODO:
306 # TODO:
307 # histpack store:
307 # histpack store:
308 # - repack two packs into one
308 # - repack two packs into one
309
309
310 if __name__ == '__main__':
310 if __name__ == '__main__':
311 if pycompat.iswindows:
311 if pycompat.iswindows:
312 sys.exit(80) # Skip on Windows
312 sys.exit(80) # Skip on Windows
313 silenttestrunner.main(__name__)
313 silenttestrunner.main(__name__)
@@ -1,2038 +1,2069
1 This file tests the behavior of run-tests.py itself.
1 This file tests the behavior of run-tests.py itself.
2
2
3 Avoid interference from actual test env:
3 Avoid interference from actual test env:
4
4
5 $ . "$TESTDIR/helper-runtests.sh"
5 $ . "$TESTDIR/helper-runtests.sh"
6
6
7 Smoke test with install
7 Smoke test with install
8 ============
8 ============
9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
10 running 0 tests using 0 parallel processes
10 running 0 tests using 0 parallel processes
11
11
12 # Ran 0 tests, 0 skipped, 0 failed.
12 # Ran 0 tests, 0 skipped, 0 failed.
13
13
14 Define a helper to avoid the install step
14 Define a helper to avoid the install step
15 =============
15 =============
16 $ rt()
16 $ rt()
17 > {
17 > {
18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@"
18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@"
19 > }
19 > }
20
20
21 error paths
21 error paths
22
22
23 #if symlink
23 #if symlink
24 $ ln -s `which true` hg
24 $ ln -s `which true` hg
25 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
25 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
26 warning: --with-hg should specify an hg script
26 warning: --with-hg should specify an hg script
27 running 0 tests using 0 parallel processes
27 running 0 tests using 0 parallel processes
28
28
29 # Ran 0 tests, 0 skipped, 0 failed.
29 # Ran 0 tests, 0 skipped, 0 failed.
30 $ rm hg
30 $ rm hg
31 #endif
31 #endif
32
32
33 #if execbit
33 #if execbit
34 $ touch hg
34 $ touch hg
35 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
35 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
36 usage: run-tests.py [options] [tests]
36 usage: run-tests.py [options] [tests]
37 run-tests.py: error: --with-hg must specify an executable hg script
37 run-tests.py: error: --with-hg must specify an executable hg script
38 [2]
38 [2]
39 $ rm hg
39 $ rm hg
40 #endif
40 #endif
41
41
42 Features for testing optional lines
42 Features for testing optional lines
43 ===================================
43 ===================================
44
44
45 $ cat > hghaveaddon.py <<EOF
45 $ cat > hghaveaddon.py <<EOF
46 > import hghave
46 > import hghave
47 > @hghave.check("custom", "custom hghave feature")
47 > @hghave.check("custom", "custom hghave feature")
48 > def has_custom():
48 > def has_custom():
49 > return True
49 > return True
50 > @hghave.check("missing", "missing hghave feature")
50 > @hghave.check("missing", "missing hghave feature")
51 > def has_missing():
51 > def has_missing():
52 > return False
52 > return False
53 > EOF
53 > EOF
54
54
55 an empty test
55 an empty test
56 =======================
56 =======================
57
57
58 $ touch test-empty.t
58 $ touch test-empty.t
59 $ rt
59 $ rt
60 running 1 tests using 1 parallel processes
60 running 1 tests using 1 parallel processes
61 .
61 .
62 # Ran 1 tests, 0 skipped, 0 failed.
62 # Ran 1 tests, 0 skipped, 0 failed.
63 $ rm test-empty.t
63 $ rm test-empty.t
64
64
65 a succesful test
65 a succesful test
66 =======================
66 =======================
67
67
68 $ cat > test-success.t << EOF
68 $ cat > test-success.t << EOF
69 > $ echo babar
69 > $ echo babar
70 > babar
70 > babar
71 > $ echo xyzzy
71 > $ echo xyzzy
72 > dont_print (?)
72 > dont_print (?)
73 > nothing[42]line (re) (?)
73 > nothing[42]line (re) (?)
74 > never*happens (glob) (?)
74 > never*happens (glob) (?)
75 > more_nothing (?)
75 > more_nothing (?)
76 > xyzzy
76 > xyzzy
77 > nor this (?)
77 > nor this (?)
78 > $ printf 'abc\ndef\nxyz\n'
78 > $ printf 'abc\ndef\nxyz\n'
79 > 123 (?)
79 > 123 (?)
80 > abc
80 > abc
81 > def (?)
81 > def (?)
82 > 456 (?)
82 > 456 (?)
83 > xyz
83 > xyz
84 > $ printf 'zyx\nwvu\ntsr\n'
84 > $ printf 'zyx\nwvu\ntsr\n'
85 > abc (?)
85 > abc (?)
86 > zyx (custom !)
86 > zyx (custom !)
87 > wvu
87 > wvu
88 > no_print (no-custom !)
88 > no_print (no-custom !)
89 > tsr (no-missing !)
89 > tsr (no-missing !)
90 > missing (missing !)
90 > missing (missing !)
91 > EOF
91 > EOF
92
92
93 $ rt
93 $ rt
94 running 1 tests using 1 parallel processes
94 running 1 tests using 1 parallel processes
95 .
95 .
96 # Ran 1 tests, 0 skipped, 0 failed.
96 # Ran 1 tests, 0 skipped, 0 failed.
97
97
98 failing test
98 failing test
99 ==================
99 ==================
100
100
101 test churn with globs
101 test churn with globs
102 $ cat > test-failure.t <<EOF
102 $ cat > test-failure.t <<EOF
103 > $ echo "bar-baz"; echo "bar-bad"; echo foo
103 > $ echo "bar-baz"; echo "bar-bad"; echo foo
104 > bar*bad (glob)
104 > bar*bad (glob)
105 > bar*baz (glob)
105 > bar*baz (glob)
106 > | fo (re)
106 > | fo (re)
107 > EOF
107 > EOF
108 $ rt test-failure.t
108 $ rt test-failure.t
109 running 1 tests using 1 parallel processes
109 running 1 tests using 1 parallel processes
110
110
111 --- $TESTTMP/test-failure.t
111 --- $TESTTMP/test-failure.t
112 +++ $TESTTMP/test-failure.t.err
112 +++ $TESTTMP/test-failure.t.err
113 @@ -1,4 +1,4 @@
113 @@ -1,4 +1,4 @@
114 $ echo "bar-baz"; echo "bar-bad"; echo foo
114 $ echo "bar-baz"; echo "bar-bad"; echo foo
115 + bar*baz (glob)
115 + bar*baz (glob)
116 bar*bad (glob)
116 bar*bad (glob)
117 - bar*baz (glob)
117 - bar*baz (glob)
118 - | fo (re)
118 - | fo (re)
119 + foo
119 + foo
120
120
121 ERROR: test-failure.t output changed
121 ERROR: test-failure.t output changed
122 !
122 !
123 Failed test-failure.t: output changed
123 Failed test-failure.t: output changed
124 # Ran 1 tests, 0 skipped, 1 failed.
124 # Ran 1 tests, 0 skipped, 1 failed.
125 python hash seed: * (glob)
125 python hash seed: * (glob)
126 [1]
126 [1]
127
127
128 test how multiple globs gets matched with lines in output
128 test how multiple globs gets matched with lines in output
129 $ cat > test-failure-globs.t <<EOF
129 $ cat > test-failure-globs.t <<EOF
130 > $ echo "context"; echo "context"; \
130 > $ echo "context"; echo "context"; \
131 > echo "key: 1"; echo "value: not a"; \
131 > echo "key: 1"; echo "value: not a"; \
132 > echo "key: 2"; echo "value: not b"; \
132 > echo "key: 2"; echo "value: not b"; \
133 > echo "key: 3"; echo "value: c"; \
133 > echo "key: 3"; echo "value: c"; \
134 > echo "key: 4"; echo "value: d"
134 > echo "key: 4"; echo "value: d"
135 > context
135 > context
136 > context
136 > context
137 > key: 1
137 > key: 1
138 > value: a
138 > value: a
139 > key: 2
139 > key: 2
140 > value: b
140 > value: b
141 > key: 3
141 > key: 3
142 > value: * (glob)
142 > value: * (glob)
143 > key: 4
143 > key: 4
144 > value: * (glob)
144 > value: * (glob)
145 > EOF
145 > EOF
146 $ rt test-failure-globs.t
146 $ rt test-failure-globs.t
147 running 1 tests using 1 parallel processes
147 running 1 tests using 1 parallel processes
148
148
149 --- $TESTTMP/test-failure-globs.t
149 --- $TESTTMP/test-failure-globs.t
150 +++ $TESTTMP/test-failure-globs.t.err
150 +++ $TESTTMP/test-failure-globs.t.err
151 @@ -2,9 +2,9 @@
151 @@ -2,9 +2,9 @@
152 context
152 context
153 context
153 context
154 key: 1
154 key: 1
155 - value: a
155 - value: a
156 + value: not a
156 + value: not a
157 key: 2
157 key: 2
158 - value: b
158 - value: b
159 + value: not b
159 + value: not b
160 key: 3
160 key: 3
161 value: * (glob)
161 value: * (glob)
162 key: 4
162 key: 4
163
163
164 ERROR: test-failure-globs.t output changed
164 ERROR: test-failure-globs.t output changed
165 !
165 !
166 Failed test-failure-globs.t: output changed
166 Failed test-failure-globs.t: output changed
167 # Ran 1 tests, 0 skipped, 1 failed.
167 # Ran 1 tests, 0 skipped, 1 failed.
168 python hash seed: * (glob)
168 python hash seed: * (glob)
169 [1]
169 [1]
170 $ rm test-failure-globs.t
170 $ rm test-failure-globs.t
171
171
172 test diff colorisation
172 test diff colorisation
173
173
174 #if no-windows pygments
174 #if no-windows pygments
175 $ rt test-failure.t --color always
175 $ rt test-failure.t --color always
176 running 1 tests using 1 parallel processes
176 running 1 tests using 1 parallel processes
177
177
178 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
178 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
179 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
179 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
180 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
180 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
181 $ echo "bar-baz"; echo "bar-bad"; echo foo
181 $ echo "bar-baz"; echo "bar-bad"; echo foo
182 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
182 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
183 bar*bad (glob)
183 bar*bad (glob)
184 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
184 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
185 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
185 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
186 \x1b[38;5;34m+ foo\x1b[39m (esc)
186 \x1b[38;5;34m+ foo\x1b[39m (esc)
187
187
188 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
188 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
189 !
189 !
190 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
190 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
191 # Ran 1 tests, 0 skipped, 1 failed.
191 # Ran 1 tests, 0 skipped, 1 failed.
192 python hash seed: * (glob)
192 python hash seed: * (glob)
193 [1]
193 [1]
194
194
195 $ rt test-failure.t 2> tmp.log
195 $ rt test-failure.t 2> tmp.log
196 running 1 tests using 1 parallel processes
196 running 1 tests using 1 parallel processes
197 [1]
197 [1]
198 $ cat tmp.log
198 $ cat tmp.log
199
199
200 --- $TESTTMP/test-failure.t
200 --- $TESTTMP/test-failure.t
201 +++ $TESTTMP/test-failure.t.err
201 +++ $TESTTMP/test-failure.t.err
202 @@ -1,4 +1,4 @@
202 @@ -1,4 +1,4 @@
203 $ echo "bar-baz"; echo "bar-bad"; echo foo
203 $ echo "bar-baz"; echo "bar-bad"; echo foo
204 + bar*baz (glob)
204 + bar*baz (glob)
205 bar*bad (glob)
205 bar*bad (glob)
206 - bar*baz (glob)
206 - bar*baz (glob)
207 - | fo (re)
207 - | fo (re)
208 + foo
208 + foo
209
209
210 ERROR: test-failure.t output changed
210 ERROR: test-failure.t output changed
211 !
211 !
212 Failed test-failure.t: output changed
212 Failed test-failure.t: output changed
213 # Ran 1 tests, 0 skipped, 1 failed.
213 # Ran 1 tests, 0 skipped, 1 failed.
214 python hash seed: * (glob)
214 python hash seed: * (glob)
215 #endif
215 #endif
216
216
217 $ cat > test-failure.t << EOF
217 $ cat > test-failure.t << EOF
218 > $ true
218 > $ true
219 > should go away (true !)
219 > should go away (true !)
220 > $ true
220 > $ true
221 > should stay (false !)
221 > should stay (false !)
222 >
222 >
223 > Should remove first line, not second or third
223 > Should remove first line, not second or third
224 > $ echo 'testing'
224 > $ echo 'testing'
225 > baz*foo (glob) (true !)
225 > baz*foo (glob) (true !)
226 > foobar*foo (glob) (false !)
226 > foobar*foo (glob) (false !)
227 > te*ting (glob) (true !)
227 > te*ting (glob) (true !)
228 >
228 >
229 > Should keep first two lines, remove third and last
229 > Should keep first two lines, remove third and last
230 > $ echo 'testing'
230 > $ echo 'testing'
231 > test.ng (re) (true !)
231 > test.ng (re) (true !)
232 > foo.ar (re) (false !)
232 > foo.ar (re) (false !)
233 > b.r (re) (true !)
233 > b.r (re) (true !)
234 > missing (?)
234 > missing (?)
235 > awol (true !)
235 > awol (true !)
236 >
236 >
237 > The "missing" line should stay, even though awol is dropped
237 > The "missing" line should stay, even though awol is dropped
238 > $ echo 'testing'
238 > $ echo 'testing'
239 > test.ng (re) (true !)
239 > test.ng (re) (true !)
240 > foo.ar (?)
240 > foo.ar (?)
241 > awol
241 > awol
242 > missing (?)
242 > missing (?)
243 > EOF
243 > EOF
244 $ rt test-failure.t
244 $ rt test-failure.t
245 running 1 tests using 1 parallel processes
245 running 1 tests using 1 parallel processes
246
246
247 --- $TESTTMP/test-failure.t
247 --- $TESTTMP/test-failure.t
248 +++ $TESTTMP/test-failure.t.err
248 +++ $TESTTMP/test-failure.t.err
249 @@ -1,11 +1,9 @@
249 @@ -1,11 +1,9 @@
250 $ true
250 $ true
251 - should go away (true !)
251 - should go away (true !)
252 $ true
252 $ true
253 should stay (false !)
253 should stay (false !)
254
254
255 Should remove first line, not second or third
255 Should remove first line, not second or third
256 $ echo 'testing'
256 $ echo 'testing'
257 - baz*foo (glob) (true !)
257 - baz*foo (glob) (true !)
258 foobar*foo (glob) (false !)
258 foobar*foo (glob) (false !)
259 te*ting (glob) (true !)
259 te*ting (glob) (true !)
260
260
261 foo.ar (re) (false !)
261 foo.ar (re) (false !)
262 missing (?)
262 missing (?)
263 @@ -13,13 +11,10 @@
263 @@ -13,13 +11,10 @@
264 $ echo 'testing'
264 $ echo 'testing'
265 test.ng (re) (true !)
265 test.ng (re) (true !)
266 foo.ar (re) (false !)
266 foo.ar (re) (false !)
267 - b.r (re) (true !)
267 - b.r (re) (true !)
268 missing (?)
268 missing (?)
269 - awol (true !)
269 - awol (true !)
270
270
271 The "missing" line should stay, even though awol is dropped
271 The "missing" line should stay, even though awol is dropped
272 $ echo 'testing'
272 $ echo 'testing'
273 test.ng (re) (true !)
273 test.ng (re) (true !)
274 foo.ar (?)
274 foo.ar (?)
275 - awol
275 - awol
276 missing (?)
276 missing (?)
277
277
278 ERROR: test-failure.t output changed
278 ERROR: test-failure.t output changed
279 !
279 !
280 Failed test-failure.t: output changed
280 Failed test-failure.t: output changed
281 # Ran 1 tests, 0 skipped, 1 failed.
281 # Ran 1 tests, 0 skipped, 1 failed.
282 python hash seed: * (glob)
282 python hash seed: * (glob)
283 [1]
283 [1]
284
284
285 basic failing test
285 basic failing test
286 $ cat > test-failure.t << EOF
286 $ cat > test-failure.t << EOF
287 > $ echo babar
287 > $ echo babar
288 > rataxes
288 > rataxes
289 > This is a noop statement so that
289 > This is a noop statement so that
290 > this test is still more bytes than success.
290 > this test is still more bytes than success.
291 > pad pad pad pad............................................................
291 > pad pad pad pad............................................................
292 > pad pad pad pad............................................................
292 > pad pad pad pad............................................................
293 > pad pad pad pad............................................................
293 > pad pad pad pad............................................................
294 > pad pad pad pad............................................................
294 > pad pad pad pad............................................................
295 > pad pad pad pad............................................................
295 > pad pad pad pad............................................................
296 > pad pad pad pad............................................................
296 > pad pad pad pad............................................................
297 > EOF
297 > EOF
298
298
299 >>> fh = open('test-failure-unicode.t', 'wb')
299 >>> fh = open('test-failure-unicode.t', 'wb')
300 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
300 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
301 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
301 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
302
302
303 $ rt
303 $ rt
304 running 3 tests using 1 parallel processes
304 running 3 tests using 1 parallel processes
305
305
306 --- $TESTTMP/test-failure.t
306 --- $TESTTMP/test-failure.t
307 +++ $TESTTMP/test-failure.t.err
307 +++ $TESTTMP/test-failure.t.err
308 @@ -1,5 +1,5 @@
308 @@ -1,5 +1,5 @@
309 $ echo babar
309 $ echo babar
310 - rataxes
310 - rataxes
311 + babar
311 + babar
312 This is a noop statement so that
312 This is a noop statement so that
313 this test is still more bytes than success.
313 this test is still more bytes than success.
314 pad pad pad pad............................................................
314 pad pad pad pad............................................................
315
315
316 ERROR: test-failure.t output changed
316 ERROR: test-failure.t output changed
317 !.
317 !.
318 --- $TESTTMP/test-failure-unicode.t
318 --- $TESTTMP/test-failure-unicode.t
319 +++ $TESTTMP/test-failure-unicode.t.err
319 +++ $TESTTMP/test-failure-unicode.t.err
320 @@ -1,2 +1,2 @@
320 @@ -1,2 +1,2 @@
321 $ echo babar\xce\xb1 (esc)
321 $ echo babar\xce\xb1 (esc)
322 - l\xce\xb5\xce\xb5t (esc)
322 - l\xce\xb5\xce\xb5t (esc)
323 + babar\xce\xb1 (esc)
323 + babar\xce\xb1 (esc)
324
324
325 ERROR: test-failure-unicode.t output changed
325 ERROR: test-failure-unicode.t output changed
326 !
326 !
327 Failed test-failure-unicode.t: output changed
327 Failed test-failure-unicode.t: output changed
328 Failed test-failure.t: output changed
328 Failed test-failure.t: output changed
329 # Ran 3 tests, 0 skipped, 2 failed.
329 # Ran 3 tests, 0 skipped, 2 failed.
330 python hash seed: * (glob)
330 python hash seed: * (glob)
331 [1]
331 [1]
332
332
333 test --outputdir
333 test --outputdir
334 $ mkdir output
334 $ mkdir output
335 $ rt --outputdir output
335 $ rt --outputdir output
336 running 3 tests using 1 parallel processes
336 running 3 tests using 1 parallel processes
337
337
338 --- $TESTTMP/test-failure.t
338 --- $TESTTMP/test-failure.t
339 +++ $TESTTMP/output/test-failure.t.err
339 +++ $TESTTMP/output/test-failure.t.err
340 @@ -1,5 +1,5 @@
340 @@ -1,5 +1,5 @@
341 $ echo babar
341 $ echo babar
342 - rataxes
342 - rataxes
343 + babar
343 + babar
344 This is a noop statement so that
344 This is a noop statement so that
345 this test is still more bytes than success.
345 this test is still more bytes than success.
346 pad pad pad pad............................................................
346 pad pad pad pad............................................................
347
347
348 ERROR: test-failure.t output changed
348 ERROR: test-failure.t output changed
349 !.
349 !.
350 --- $TESTTMP/test-failure-unicode.t
350 --- $TESTTMP/test-failure-unicode.t
351 +++ $TESTTMP/output/test-failure-unicode.t.err
351 +++ $TESTTMP/output/test-failure-unicode.t.err
352 @@ -1,2 +1,2 @@
352 @@ -1,2 +1,2 @@
353 $ echo babar\xce\xb1 (esc)
353 $ echo babar\xce\xb1 (esc)
354 - l\xce\xb5\xce\xb5t (esc)
354 - l\xce\xb5\xce\xb5t (esc)
355 + babar\xce\xb1 (esc)
355 + babar\xce\xb1 (esc)
356
356
357 ERROR: test-failure-unicode.t output changed
357 ERROR: test-failure-unicode.t output changed
358 !
358 !
359 Failed test-failure-unicode.t: output changed
359 Failed test-failure-unicode.t: output changed
360 Failed test-failure.t: output changed
360 Failed test-failure.t: output changed
361 # Ran 3 tests, 0 skipped, 2 failed.
361 # Ran 3 tests, 0 skipped, 2 failed.
362 python hash seed: * (glob)
362 python hash seed: * (glob)
363 [1]
363 [1]
364 $ ls -a output
364 $ ls -a output
365 .
365 .
366 ..
366 ..
367 .testtimes
367 .testtimes
368 test-failure-unicode.t.err
368 test-failure-unicode.t.err
369 test-failure.t.err
369 test-failure.t.err
370
370
371 test --xunit support
371 test --xunit support
372 $ rt --xunit=xunit.xml
372 $ rt --xunit=xunit.xml
373 running 3 tests using 1 parallel processes
373 running 3 tests using 1 parallel processes
374
374
375 --- $TESTTMP/test-failure.t
375 --- $TESTTMP/test-failure.t
376 +++ $TESTTMP/test-failure.t.err
376 +++ $TESTTMP/test-failure.t.err
377 @@ -1,5 +1,5 @@
377 @@ -1,5 +1,5 @@
378 $ echo babar
378 $ echo babar
379 - rataxes
379 - rataxes
380 + babar
380 + babar
381 This is a noop statement so that
381 This is a noop statement so that
382 this test is still more bytes than success.
382 this test is still more bytes than success.
383 pad pad pad pad............................................................
383 pad pad pad pad............................................................
384
384
385 ERROR: test-failure.t output changed
385 ERROR: test-failure.t output changed
386 !.
386 !.
387 --- $TESTTMP/test-failure-unicode.t
387 --- $TESTTMP/test-failure-unicode.t
388 +++ $TESTTMP/test-failure-unicode.t.err
388 +++ $TESTTMP/test-failure-unicode.t.err
389 @@ -1,2 +1,2 @@
389 @@ -1,2 +1,2 @@
390 $ echo babar\xce\xb1 (esc)
390 $ echo babar\xce\xb1 (esc)
391 - l\xce\xb5\xce\xb5t (esc)
391 - l\xce\xb5\xce\xb5t (esc)
392 + babar\xce\xb1 (esc)
392 + babar\xce\xb1 (esc)
393
393
394 ERROR: test-failure-unicode.t output changed
394 ERROR: test-failure-unicode.t output changed
395 !
395 !
396 Failed test-failure-unicode.t: output changed
396 Failed test-failure-unicode.t: output changed
397 Failed test-failure.t: output changed
397 Failed test-failure.t: output changed
398 # Ran 3 tests, 0 skipped, 2 failed.
398 # Ran 3 tests, 0 skipped, 2 failed.
399 python hash seed: * (glob)
399 python hash seed: * (glob)
400 [1]
400 [1]
401 $ cat xunit.xml
401 $ cat xunit.xml
402 <?xml version="1.0" encoding="utf-8"?>
402 <?xml version="1.0" encoding="utf-8"?>
403 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
403 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
404 <testcase name="test-success.t" time="*"/> (glob)
404 <testcase name="test-success.t" time="*"/> (glob)
405 <testcase name="test-failure-unicode.t" time="*"> (glob)
405 <testcase name="test-failure-unicode.t" time="*"> (glob)
406 <failure message="output changed" type="output-mismatch"><![CDATA[--- $TESTTMP/test-failure-unicode.t (py38 !)
406 <failure message="output changed" type="output-mismatch"><![CDATA[--- $TESTTMP/test-failure-unicode.t (py38 !)
407 <failure message="output changed" type="output-mismatch"> (no-py38 !)
407 <failure message="output changed" type="output-mismatch"> (no-py38 !)
408 <![CDATA[--- $TESTTMP/test-failure-unicode.t (no-py38 !)
408 <![CDATA[--- $TESTTMP/test-failure-unicode.t (no-py38 !)
409 +++ $TESTTMP/test-failure-unicode.t.err
409 +++ $TESTTMP/test-failure-unicode.t.err
410 @@ -1,2 +1,2 @@
410 @@ -1,2 +1,2 @@
411 $ echo babar\xce\xb1 (esc)
411 $ echo babar\xce\xb1 (esc)
412 - l\xce\xb5\xce\xb5t (esc)
412 - l\xce\xb5\xce\xb5t (esc)
413 + babar\xce\xb1 (esc)
413 + babar\xce\xb1 (esc)
414 ]]></failure> (py38 !)
414 ]]></failure> (py38 !)
415 ]]> </failure> (no-py38 !)
415 ]]> </failure> (no-py38 !)
416 </testcase>
416 </testcase>
417 <testcase name="test-failure.t" time="*"> (glob)
417 <testcase name="test-failure.t" time="*"> (glob)
418 <failure message="output changed" type="output-mismatch"><![CDATA[--- $TESTTMP/test-failure.t (py38 !)
418 <failure message="output changed" type="output-mismatch"><![CDATA[--- $TESTTMP/test-failure.t (py38 !)
419 <failure message="output changed" type="output-mismatch"> (no-py38 !)
419 <failure message="output changed" type="output-mismatch"> (no-py38 !)
420 <![CDATA[--- $TESTTMP/test-failure.t (no-py38 !)
420 <![CDATA[--- $TESTTMP/test-failure.t (no-py38 !)
421 +++ $TESTTMP/test-failure.t.err
421 +++ $TESTTMP/test-failure.t.err
422 @@ -1,5 +1,5 @@
422 @@ -1,5 +1,5 @@
423 $ echo babar
423 $ echo babar
424 - rataxes
424 - rataxes
425 + babar
425 + babar
426 This is a noop statement so that
426 This is a noop statement so that
427 this test is still more bytes than success.
427 this test is still more bytes than success.
428 pad pad pad pad............................................................
428 pad pad pad pad............................................................
429 ]]></failure> (py38 !)
429 ]]></failure> (py38 !)
430 ]]> </failure> (no-py38 !)
430 ]]> </failure> (no-py38 !)
431 </testcase>
431 </testcase>
432 </testsuite>
432 </testsuite>
433
433
434 $ cat .testtimes
434 $ cat .testtimes
435 test-empty.t * (glob)
435 test-empty.t * (glob)
436 test-failure-globs.t * (glob)
436 test-failure-globs.t * (glob)
437 test-failure-unicode.t * (glob)
437 test-failure-unicode.t * (glob)
438 test-failure.t * (glob)
438 test-failure.t * (glob)
439 test-success.t * (glob)
439 test-success.t * (glob)
440
440
441 $ rt --list-tests
441 $ rt --list-tests
442 test-failure-unicode.t
442 test-failure-unicode.t
443 test-failure.t
443 test-failure.t
444 test-success.t
444 test-success.t
445
445
446 $ rt --list-tests --json
446 $ rt --list-tests --json
447 test-failure-unicode.t
447 test-failure-unicode.t
448 test-failure.t
448 test-failure.t
449 test-success.t
449 test-success.t
450 $ cat report.json
450 $ cat report.json
451 testreport ={
451 testreport ={
452 "test-failure-unicode.t": {
452 "test-failure-unicode.t": {
453 "result": "success"
453 "result": "success"
454 },
454 },
455 "test-failure.t": {
455 "test-failure.t": {
456 "result": "success"
456 "result": "success"
457 },
457 },
458 "test-success.t": {
458 "test-success.t": {
459 "result": "success"
459 "result": "success"
460 }
460 }
461 } (no-eol)
461 } (no-eol)
462
462
463 $ rt --list-tests --xunit=xunit.xml
463 $ rt --list-tests --xunit=xunit.xml
464 test-failure-unicode.t
464 test-failure-unicode.t
465 test-failure.t
465 test-failure.t
466 test-success.t
466 test-success.t
467 $ cat xunit.xml
467 $ cat xunit.xml
468 <?xml version="1.0" encoding="utf-8"?>
468 <?xml version="1.0" encoding="utf-8"?>
469 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
469 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
470 <testcase name="test-failure-unicode.t"/>
470 <testcase name="test-failure-unicode.t"/>
471 <testcase name="test-failure.t"/>
471 <testcase name="test-failure.t"/>
472 <testcase name="test-success.t"/>
472 <testcase name="test-success.t"/>
473 </testsuite>
473 </testsuite>
474
474
475 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
475 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
476 test-failure-unicode.t
476 test-failure-unicode.t
477 test-failure.t
477 test-failure.t
478 $ cat output/report.json
478 $ cat output/report.json
479 testreport ={
479 testreport ={
480 "test-failure-unicode.t": {
480 "test-failure-unicode.t": {
481 "result": "success"
481 "result": "success"
482 },
482 },
483 "test-failure.t": {
483 "test-failure.t": {
484 "result": "success"
484 "result": "success"
485 }
485 }
486 } (no-eol)
486 } (no-eol)
487 $ cat xunit.xml
487 $ cat xunit.xml
488 <?xml version="1.0" encoding="utf-8"?>
488 <?xml version="1.0" encoding="utf-8"?>
489 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
489 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
490 <testcase name="test-failure-unicode.t"/>
490 <testcase name="test-failure-unicode.t"/>
491 <testcase name="test-failure.t"/>
491 <testcase name="test-failure.t"/>
492 </testsuite>
492 </testsuite>
493
493
494 $ rm test-failure-unicode.t
494 $ rm test-failure-unicode.t
495
495
496 test for --retest
496 test for --retest
497 ====================
497 ====================
498
498
499 $ rt --retest
499 $ rt --retest
500 running 1 tests using 1 parallel processes
500 running 1 tests using 1 parallel processes
501
501
502 --- $TESTTMP/test-failure.t
502 --- $TESTTMP/test-failure.t
503 +++ $TESTTMP/test-failure.t.err
503 +++ $TESTTMP/test-failure.t.err
504 @@ -1,5 +1,5 @@
504 @@ -1,5 +1,5 @@
505 $ echo babar
505 $ echo babar
506 - rataxes
506 - rataxes
507 + babar
507 + babar
508 This is a noop statement so that
508 This is a noop statement so that
509 this test is still more bytes than success.
509 this test is still more bytes than success.
510 pad pad pad pad............................................................
510 pad pad pad pad............................................................
511
511
512 ERROR: test-failure.t output changed
512 ERROR: test-failure.t output changed
513 !
513 !
514 Failed test-failure.t: output changed
514 Failed test-failure.t: output changed
515 # Ran 1 tests, 0 skipped, 1 failed.
515 # Ran 1 tests, 0 skipped, 1 failed.
516 python hash seed: * (glob)
516 python hash seed: * (glob)
517 [1]
517 [1]
518
518
519 --retest works with --outputdir
519 --retest works with --outputdir
520 $ rm -r output
520 $ rm -r output
521 $ mkdir output
521 $ mkdir output
522 $ mv test-failure.t.err output
522 $ mv test-failure.t.err output
523 $ rt --retest --outputdir output
523 $ rt --retest --outputdir output
524 running 1 tests using 1 parallel processes
524 running 1 tests using 1 parallel processes
525
525
526 --- $TESTTMP/test-failure.t
526 --- $TESTTMP/test-failure.t
527 +++ $TESTTMP/output/test-failure.t.err
527 +++ $TESTTMP/output/test-failure.t.err
528 @@ -1,5 +1,5 @@
528 @@ -1,5 +1,5 @@
529 $ echo babar
529 $ echo babar
530 - rataxes
530 - rataxes
531 + babar
531 + babar
532 This is a noop statement so that
532 This is a noop statement so that
533 this test is still more bytes than success.
533 this test is still more bytes than success.
534 pad pad pad pad............................................................
534 pad pad pad pad............................................................
535
535
536 ERROR: test-failure.t output changed
536 ERROR: test-failure.t output changed
537 !
537 !
538 Failed test-failure.t: output changed
538 Failed test-failure.t: output changed
539 # Ran 1 tests, 0 skipped, 1 failed.
539 # Ran 1 tests, 0 skipped, 1 failed.
540 python hash seed: * (glob)
540 python hash seed: * (glob)
541 [1]
541 [1]
542
542
543 Selecting Tests To Run
543 Selecting Tests To Run
544 ======================
544 ======================
545
545
546 successful
546 successful
547
547
548 $ rt test-success.t
548 $ rt test-success.t
549 running 1 tests using 1 parallel processes
549 running 1 tests using 1 parallel processes
550 .
550 .
551 # Ran 1 tests, 0 skipped, 0 failed.
551 # Ran 1 tests, 0 skipped, 0 failed.
552
552
553 success w/ keyword
553 success w/ keyword
554 $ rt -k xyzzy
554 $ rt -k xyzzy
555 running 2 tests using 1 parallel processes
555 running 2 tests using 1 parallel processes
556 .
556 .
557 # Ran 2 tests, 1 skipped, 0 failed.
557 # Ran 2 tests, 1 skipped, 0 failed.
558
558
559 failed
559 failed
560
560
561 $ rt test-failure.t
561 $ rt test-failure.t
562 running 1 tests using 1 parallel processes
562 running 1 tests using 1 parallel processes
563
563
564 --- $TESTTMP/test-failure.t
564 --- $TESTTMP/test-failure.t
565 +++ $TESTTMP/test-failure.t.err
565 +++ $TESTTMP/test-failure.t.err
566 @@ -1,5 +1,5 @@
566 @@ -1,5 +1,5 @@
567 $ echo babar
567 $ echo babar
568 - rataxes
568 - rataxes
569 + babar
569 + babar
570 This is a noop statement so that
570 This is a noop statement so that
571 this test is still more bytes than success.
571 this test is still more bytes than success.
572 pad pad pad pad............................................................
572 pad pad pad pad............................................................
573
573
574 ERROR: test-failure.t output changed
574 ERROR: test-failure.t output changed
575 !
575 !
576 Failed test-failure.t: output changed
576 Failed test-failure.t: output changed
577 # Ran 1 tests, 0 skipped, 1 failed.
577 # Ran 1 tests, 0 skipped, 1 failed.
578 python hash seed: * (glob)
578 python hash seed: * (glob)
579 [1]
579 [1]
580
580
581 failure w/ keyword
581 failure w/ keyword
582 $ rt -k rataxes
582 $ rt -k rataxes
583 running 2 tests using 1 parallel processes
583 running 2 tests using 1 parallel processes
584
584
585 --- $TESTTMP/test-failure.t
585 --- $TESTTMP/test-failure.t
586 +++ $TESTTMP/test-failure.t.err
586 +++ $TESTTMP/test-failure.t.err
587 @@ -1,5 +1,5 @@
587 @@ -1,5 +1,5 @@
588 $ echo babar
588 $ echo babar
589 - rataxes
589 - rataxes
590 + babar
590 + babar
591 This is a noop statement so that
591 This is a noop statement so that
592 this test is still more bytes than success.
592 this test is still more bytes than success.
593 pad pad pad pad............................................................
593 pad pad pad pad............................................................
594
594
595 ERROR: test-failure.t output changed
595 ERROR: test-failure.t output changed
596 !
596 !
597 Failed test-failure.t: output changed
597 Failed test-failure.t: output changed
598 # Ran 2 tests, 1 skipped, 1 failed.
598 # Ran 2 tests, 1 skipped, 1 failed.
599 python hash seed: * (glob)
599 python hash seed: * (glob)
600 [1]
600 [1]
601
601
602 Verify that when a process fails to start we show a useful message
602 Verify that when a process fails to start we show a useful message
603 ==================================================================
603 ==================================================================
604
604
605 $ cat > test-serve-fail.t <<EOF
605 $ cat > test-serve-fail.t <<EOF
606 > $ echo 'abort: child process failed to start blah'
606 > $ echo 'abort: child process failed to start blah'
607 > EOF
607 > EOF
608 $ rt test-serve-fail.t
608 $ rt test-serve-fail.t
609 running 1 tests using 1 parallel processes
609 running 1 tests using 1 parallel processes
610
610
611 --- $TESTTMP/test-serve-fail.t
611 --- $TESTTMP/test-serve-fail.t
612 +++ $TESTTMP/test-serve-fail.t.err
612 +++ $TESTTMP/test-serve-fail.t.err
613 @@ -1* +1,2 @@ (glob)
613 @@ -1* +1,2 @@ (glob)
614 $ echo 'abort: child process failed to start blah'
614 $ echo 'abort: child process failed to start blah'
615 + abort: child process failed to start blah
615 + abort: child process failed to start blah
616
616
617 ERROR: test-serve-fail.t output changed
617 ERROR: test-serve-fail.t output changed
618 !
618 !
619 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
619 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
620 # Ran 1 tests, 0 skipped, 1 failed.
620 # Ran 1 tests, 0 skipped, 1 failed.
621 python hash seed: * (glob)
621 python hash seed: * (glob)
622 [1]
622 [1]
623 $ rm test-serve-fail.t
623 $ rm test-serve-fail.t
624
624
625 Verify that we can try other ports
625 Verify that we can try other ports
626 ===================================
626 ===================================
627
627
628 Extensions aren't inherited by the invoked run-tests.py. An extension
628 Extensions aren't inherited by the invoked run-tests.py. An extension
629 introducing a repository requirement could cause this to fail. So we force
629 introducing a repository requirement could cause this to fail. So we force
630 HGRCPATH to get a clean environment.
630 HGRCPATH to get a clean environment.
631
631
632 $ HGRCPATH= hg init inuse
632 $ HGRCPATH= hg init inuse
633 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
633 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
634 $ cat blocks.pid >> $DAEMON_PIDS
634 $ cat blocks.pid >> $DAEMON_PIDS
635 $ cat > test-serve-inuse.t <<EOF
635 $ cat > test-serve-inuse.t <<EOF
636 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
636 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
637 > $ cat hg.pid >> \$DAEMON_PIDS
637 > $ cat hg.pid >> \$DAEMON_PIDS
638 > EOF
638 > EOF
639 $ rt test-serve-inuse.t
639 $ rt test-serve-inuse.t
640 running 1 tests using 1 parallel processes
640 running 1 tests using 1 parallel processes
641 .
641 .
642 # Ran 1 tests, 0 skipped, 0 failed.
642 # Ran 1 tests, 0 skipped, 0 failed.
643 $ rm test-serve-inuse.t
643 $ rm test-serve-inuse.t
644 $ killdaemons.py $DAEMON_PIDS
644 $ killdaemons.py $DAEMON_PIDS
645
645
646 Running In Debug Mode
646 Running In Debug Mode
647 ======================
647 ======================
648
648
649 $ rt --debug 2>&1 | grep -v pwd
649 $ rt --debug 2>&1 | grep -v pwd
650 running 2 tests using 1 parallel processes
650 running 2 tests using 1 parallel processes
651 + alias hg=hg.exe (windows !)
651 + alias hg=hg.exe (windows !)
652 + echo *SALT* 0 0 (glob)
652 + echo *SALT* 0 0 (glob)
653 *SALT* 0 0 (glob)
653 *SALT* 0 0 (glob)
654 + echo babar
654 + echo babar
655 babar
655 babar
656 + echo *SALT* 10 0 (glob)
656 + echo *SALT* 10 0 (glob)
657 *SALT* 10 0 (glob)
657 *SALT* 10 0 (glob)
658 .+ alias hg=hg.exe (windows !)
658 .+ alias hg=hg.exe (windows !)
659 *+ echo *SALT* 0 0 (glob)
659 *+ echo *SALT* 0 0 (glob)
660 *SALT* 0 0 (glob)
660 *SALT* 0 0 (glob)
661 + echo babar
661 + echo babar
662 babar
662 babar
663 + echo *SALT* 2 0 (glob)
663 + echo *SALT* 2 0 (glob)
664 *SALT* 2 0 (glob)
664 *SALT* 2 0 (glob)
665 + echo xyzzy
665 + echo xyzzy
666 xyzzy
666 xyzzy
667 + echo *SALT* 9 0 (glob)
667 + echo *SALT* 9 0 (glob)
668 *SALT* 9 0 (glob)
668 *SALT* 9 0 (glob)
669 + printf *abc\ndef\nxyz\n* (glob)
669 + printf *abc\ndef\nxyz\n* (glob)
670 abc
670 abc
671 def
671 def
672 xyz
672 xyz
673 + echo *SALT* 15 0 (glob)
673 + echo *SALT* 15 0 (glob)
674 *SALT* 15 0 (glob)
674 *SALT* 15 0 (glob)
675 + printf *zyx\nwvu\ntsr\n* (glob)
675 + printf *zyx\nwvu\ntsr\n* (glob)
676 zyx
676 zyx
677 wvu
677 wvu
678 tsr
678 tsr
679 + echo *SALT* 22 0 (glob)
679 + echo *SALT* 22 0 (glob)
680 *SALT* 22 0 (glob)
680 *SALT* 22 0 (glob)
681 .
681 .
682 # Ran 2 tests, 0 skipped, 0 failed.
682 # Ran 2 tests, 0 skipped, 0 failed.
683
683
684 Parallel runs
684 Parallel runs
685 ==============
685 ==============
686
686
687 (duplicate the failing test to get predictable output)
687 (duplicate the failing test to get predictable output)
688 $ cp test-failure.t test-failure-copy.t
688 $ cp test-failure.t test-failure-copy.t
689
689
690 $ rt --jobs 2 test-failure*.t -n
690 $ rt --jobs 2 test-failure*.t -n
691 running 2 tests using 2 parallel processes
691 running 2 tests using 2 parallel processes
692 !!
692 !!
693 Failed test-failure*.t: output changed (glob)
693 Failed test-failure*.t: output changed (glob)
694 Failed test-failure*.t: output changed (glob)
694 Failed test-failure*.t: output changed (glob)
695 # Ran 2 tests, 0 skipped, 2 failed.
695 # Ran 2 tests, 0 skipped, 2 failed.
696 python hash seed: * (glob)
696 python hash seed: * (glob)
697 [1]
697 [1]
698
698
699 failures in parallel with --first should only print one failure
699 failures in parallel with --first should only print one failure
700 $ rt --jobs 2 --first test-failure*.t
700 $ rt --jobs 2 --first test-failure*.t
701 running 2 tests using 2 parallel processes
701 running 2 tests using 2 parallel processes
702
702
703 --- $TESTTMP/test-failure*.t (glob)
703 --- $TESTTMP/test-failure*.t (glob)
704 +++ $TESTTMP/test-failure*.t.err (glob)
704 +++ $TESTTMP/test-failure*.t.err (glob)
705 @@ -1,5 +1,5 @@
705 @@ -1,5 +1,5 @@
706 $ echo babar
706 $ echo babar
707 - rataxes
707 - rataxes
708 + babar
708 + babar
709 This is a noop statement so that
709 This is a noop statement so that
710 this test is still more bytes than success.
710 this test is still more bytes than success.
711 pad pad pad pad............................................................
711 pad pad pad pad............................................................
712
712
713 Failed test-failure*.t: output changed (glob)
713 Failed test-failure*.t: output changed (glob)
714 Failed test-failure*.t: output changed (glob)
714 Failed test-failure*.t: output changed (glob)
715 # Ran 2 tests, 0 skipped, 2 failed.
715 # Ran 2 tests, 0 skipped, 2 failed.
716 python hash seed: * (glob)
716 python hash seed: * (glob)
717 [1]
717 [1]
718
718
719
719
720 (delete the duplicated test file)
720 (delete the duplicated test file)
721 $ rm test-failure-copy.t
721 $ rm test-failure-copy.t
722
722
723 multiple runs per test should be parallelized
723 multiple runs per test should be parallelized
724
724
725 $ rt --jobs 2 --runs-per-test 2 test-success.t
725 $ rt --jobs 2 --runs-per-test 2 test-success.t
726 running 2 tests using 2 parallel processes
726 running 2 tests using 2 parallel processes
727 ..
727 ..
728 # Ran 2 tests, 0 skipped, 0 failed.
728 # Ran 2 tests, 0 skipped, 0 failed.
729
729
730 Interactive run
730 Interactive run
731 ===============
731 ===============
732
732
733 (backup the failing test)
733 (backup the failing test)
734 $ cp test-failure.t backup
734 $ cp test-failure.t backup
735
735
736 Refuse the fix
736 Refuse the fix
737
737
738 $ echo 'n' | rt -i
738 $ echo 'n' | rt -i
739 running 2 tests using 1 parallel processes
739 running 2 tests using 1 parallel processes
740
740
741 --- $TESTTMP/test-failure.t
741 --- $TESTTMP/test-failure.t
742 +++ $TESTTMP/test-failure.t.err
742 +++ $TESTTMP/test-failure.t.err
743 @@ -1,5 +1,5 @@
743 @@ -1,5 +1,5 @@
744 $ echo babar
744 $ echo babar
745 - rataxes
745 - rataxes
746 + babar
746 + babar
747 This is a noop statement so that
747 This is a noop statement so that
748 this test is still more bytes than success.
748 this test is still more bytes than success.
749 pad pad pad pad............................................................
749 pad pad pad pad............................................................
750 Accept this change? [y/N]
750 Accept this change? [y/N]
751 ERROR: test-failure.t output changed
751 ERROR: test-failure.t output changed
752 !.
752 !.
753 Failed test-failure.t: output changed
753 Failed test-failure.t: output changed
754 # Ran 2 tests, 0 skipped, 1 failed.
754 # Ran 2 tests, 0 skipped, 1 failed.
755 python hash seed: * (glob)
755 python hash seed: * (glob)
756 [1]
756 [1]
757
757
758 $ cat test-failure.t
758 $ cat test-failure.t
759 $ echo babar
759 $ echo babar
760 rataxes
760 rataxes
761 This is a noop statement so that
761 This is a noop statement so that
762 this test is still more bytes than success.
762 this test is still more bytes than success.
763 pad pad pad pad............................................................
763 pad pad pad pad............................................................
764 pad pad pad pad............................................................
764 pad pad pad pad............................................................
765 pad pad pad pad............................................................
765 pad pad pad pad............................................................
766 pad pad pad pad............................................................
766 pad pad pad pad............................................................
767 pad pad pad pad............................................................
767 pad pad pad pad............................................................
768 pad pad pad pad............................................................
768 pad pad pad pad............................................................
769
769
770 Interactive with custom view
770 Interactive with custom view
771
771
772 $ echo 'n' | rt -i --view echo
772 $ echo 'n' | rt -i --view echo
773 running 2 tests using 1 parallel processes
773 running 2 tests using 1 parallel processes
774 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
774 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
775 Accept this change? [y/N]* (glob)
775 Accept this change? [y/N]* (glob)
776 ERROR: test-failure.t output changed
776 ERROR: test-failure.t output changed
777 !.
777 !.
778 Failed test-failure.t: output changed
778 Failed test-failure.t: output changed
779 # Ran 2 tests, 0 skipped, 1 failed.
779 # Ran 2 tests, 0 skipped, 1 failed.
780 python hash seed: * (glob)
780 python hash seed: * (glob)
781 [1]
781 [1]
782
782
783 View the fix
783 View the fix
784
784
785 $ echo 'y' | rt --view echo
785 $ echo 'y' | rt --view echo
786 running 2 tests using 1 parallel processes
786 running 2 tests using 1 parallel processes
787 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
787 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
788
788
789 ERROR: test-failure.t output changed
789 ERROR: test-failure.t output changed
790 !.
790 !.
791 Failed test-failure.t: output changed
791 Failed test-failure.t: output changed
792 # Ran 2 tests, 0 skipped, 1 failed.
792 # Ran 2 tests, 0 skipped, 1 failed.
793 python hash seed: * (glob)
793 python hash seed: * (glob)
794 [1]
794 [1]
795
795
796 Accept the fix
796 Accept the fix
797
797
798 $ cat >> test-failure.t <<EOF
798 $ cat >> test-failure.t <<EOF
799 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
799 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
800 > saved backup bundle to \$TESTTMP/foo.hg
800 > saved backup bundle to \$TESTTMP/foo.hg
801 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
801 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
802 > saved backup bundle to $TESTTMP\\foo.hg
802 > saved backup bundle to $TESTTMP\\foo.hg
803 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
803 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
804 > saved backup bundle to \$TESTTMP/*.hg (glob)
804 > saved backup bundle to \$TESTTMP/*.hg (glob)
805 > EOF
805 > EOF
806 $ echo 'y' | rt -i 2>&1
806 $ echo 'y' | rt -i 2>&1
807 running 2 tests using 1 parallel processes
807 running 2 tests using 1 parallel processes
808
808
809 --- $TESTTMP/test-failure.t
809 --- $TESTTMP/test-failure.t
810 +++ $TESTTMP/test-failure.t.err
810 +++ $TESTTMP/test-failure.t.err
811 @@ -1,5 +1,5 @@
811 @@ -1,5 +1,5 @@
812 $ echo babar
812 $ echo babar
813 - rataxes
813 - rataxes
814 + babar
814 + babar
815 This is a noop statement so that
815 This is a noop statement so that
816 this test is still more bytes than success.
816 this test is still more bytes than success.
817 pad pad pad pad............................................................
817 pad pad pad pad............................................................
818 @@ -11,6 +11,6 @@
818 @@ -11,6 +11,6 @@
819 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
819 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
820 saved backup bundle to $TESTTMP/foo.hg
820 saved backup bundle to $TESTTMP/foo.hg
821 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
821 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
822 - saved backup bundle to $TESTTMP\foo.hg
822 - saved backup bundle to $TESTTMP\foo.hg
823 + saved backup bundle to $TESTTMP/foo.hg
823 + saved backup bundle to $TESTTMP/foo.hg
824 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
824 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
825 saved backup bundle to $TESTTMP/*.hg (glob)
825 saved backup bundle to $TESTTMP/*.hg (glob)
826 Accept this change? [y/N] ..
826 Accept this change? [y/N] ..
827 # Ran 2 tests, 0 skipped, 0 failed.
827 # Ran 2 tests, 0 skipped, 0 failed.
828
828
829 $ sed -e 's,(glob)$,&<,g' test-failure.t
829 $ sed -e 's,(glob)$,&<,g' test-failure.t
830 $ echo babar
830 $ echo babar
831 babar
831 babar
832 This is a noop statement so that
832 This is a noop statement so that
833 this test is still more bytes than success.
833 this test is still more bytes than success.
834 pad pad pad pad............................................................
834 pad pad pad pad............................................................
835 pad pad pad pad............................................................
835 pad pad pad pad............................................................
836 pad pad pad pad............................................................
836 pad pad pad pad............................................................
837 pad pad pad pad............................................................
837 pad pad pad pad............................................................
838 pad pad pad pad............................................................
838 pad pad pad pad............................................................
839 pad pad pad pad............................................................
839 pad pad pad pad............................................................
840 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
840 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
841 saved backup bundle to $TESTTMP/foo.hg
841 saved backup bundle to $TESTTMP/foo.hg
842 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
842 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
843 saved backup bundle to $TESTTMP/foo.hg
843 saved backup bundle to $TESTTMP/foo.hg
844 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
844 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
845 saved backup bundle to $TESTTMP/*.hg (glob)<
845 saved backup bundle to $TESTTMP/*.hg (glob)<
846
846
847 $ rm test-failure.t
847 $ rm test-failure.t
848
848
849 Race condition - test file was modified when test is running
849 Race condition - test file was modified when test is running
850
850
851 $ TESTRACEDIR=`pwd`
851 $ TESTRACEDIR=`pwd`
852 $ export TESTRACEDIR
852 $ export TESTRACEDIR
853 $ cat > test-race.t <<EOF
853 $ cat > test-race.t <<EOF
854 > $ echo 1
854 > $ echo 1
855 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
855 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
856 > EOF
856 > EOF
857
857
858 $ rt -i test-race.t
858 $ rt -i test-race.t
859 running 1 tests using 1 parallel processes
859 running 1 tests using 1 parallel processes
860
860
861 --- $TESTTMP/test-race.t
861 --- $TESTTMP/test-race.t
862 +++ $TESTTMP/test-race.t.err
862 +++ $TESTTMP/test-race.t.err
863 @@ -1,2 +1,3 @@
863 @@ -1,2 +1,3 @@
864 $ echo 1
864 $ echo 1
865 + 1
865 + 1
866 $ echo "# a new line" >> $TESTTMP/test-race.t
866 $ echo "# a new line" >> $TESTTMP/test-race.t
867 Reference output has changed (run again to prompt changes)
867 Reference output has changed (run again to prompt changes)
868 ERROR: test-race.t output changed
868 ERROR: test-race.t output changed
869 !
869 !
870 Failed test-race.t: output changed
870 Failed test-race.t: output changed
871 # Ran 1 tests, 0 skipped, 1 failed.
871 # Ran 1 tests, 0 skipped, 1 failed.
872 python hash seed: * (glob)
872 python hash seed: * (glob)
873 [1]
873 [1]
874
874
875 $ rm test-race.t
875 $ rm test-race.t
876
876
877 When "#testcases" is used in .t files
877 When "#testcases" is used in .t files
878
878
879 $ cat >> test-cases.t <<EOF
879 $ cat >> test-cases.t <<EOF
880 > #testcases a b
880 > #testcases a b
881 > #if a
881 > #if a
882 > $ echo 1
882 > $ echo 1
883 > #endif
883 > #endif
884 > #if b
884 > #if b
885 > $ echo 2
885 > $ echo 2
886 > #endif
886 > #endif
887 > EOF
887 > EOF
888
888
889 $ cat <<EOF | rt -i test-cases.t 2>&1
889 $ cat <<EOF | rt -i test-cases.t 2>&1
890 > y
890 > y
891 > y
891 > y
892 > EOF
892 > EOF
893 running 2 tests using 1 parallel processes
893 running 2 tests using 1 parallel processes
894
894
895 --- $TESTTMP/test-cases.t
895 --- $TESTTMP/test-cases.t
896 +++ $TESTTMP/test-cases.t#a.err
896 +++ $TESTTMP/test-cases.t#a.err
897 @@ -1,6 +1,7 @@
897 @@ -1,6 +1,7 @@
898 #testcases a b
898 #testcases a b
899 #if a
899 #if a
900 $ echo 1
900 $ echo 1
901 + 1
901 + 1
902 #endif
902 #endif
903 #if b
903 #if b
904 $ echo 2
904 $ echo 2
905 Accept this change? [y/N] .
905 Accept this change? [y/N] .
906 --- $TESTTMP/test-cases.t
906 --- $TESTTMP/test-cases.t
907 +++ $TESTTMP/test-cases.t#b.err
907 +++ $TESTTMP/test-cases.t#b.err
908 @@ -5,4 +5,5 @@
908 @@ -5,4 +5,5 @@
909 #endif
909 #endif
910 #if b
910 #if b
911 $ echo 2
911 $ echo 2
912 + 2
912 + 2
913 #endif
913 #endif
914 Accept this change? [y/N] .
914 Accept this change? [y/N] .
915 # Ran 2 tests, 0 skipped, 0 failed.
915 # Ran 2 tests, 0 skipped, 0 failed.
916
916
917 $ cat test-cases.t
917 $ cat test-cases.t
918 #testcases a b
918 #testcases a b
919 #if a
919 #if a
920 $ echo 1
920 $ echo 1
921 1
921 1
922 #endif
922 #endif
923 #if b
923 #if b
924 $ echo 2
924 $ echo 2
925 2
925 2
926 #endif
926 #endif
927
927
928 $ cat >> test-cases.t <<'EOF'
928 $ cat >> test-cases.t <<'EOF'
929 > #if a
929 > #if a
930 > $ NAME=A
930 > $ NAME=A
931 > #else
931 > #else
932 > $ NAME=B
932 > $ NAME=B
933 > #endif
933 > #endif
934 > $ echo $NAME
934 > $ echo $NAME
935 > A (a !)
935 > A (a !)
936 > B (b !)
936 > B (b !)
937 > EOF
937 > EOF
938 $ rt test-cases.t
938 $ rt test-cases.t
939 running 2 tests using 1 parallel processes
939 running 2 tests using 1 parallel processes
940 ..
940 ..
941 # Ran 2 tests, 0 skipped, 0 failed.
941 # Ran 2 tests, 0 skipped, 0 failed.
942
942
943 When using multiple dimensions of "#testcases" in .t files
943 When using multiple dimensions of "#testcases" in .t files
944
944
945 $ cat > test-cases.t <<'EOF'
945 $ cat > test-cases.t <<'EOF'
946 > #testcases a b
946 > #testcases a b
947 > #testcases c d
947 > #testcases c d
948 > #if a d
948 > #if a d
949 > $ echo $TESTCASE
949 > $ echo $TESTCASE
950 > a#d
950 > a#d
951 > #endif
951 > #endif
952 > #if b c
952 > #if b c
953 > $ echo yes
953 > $ echo yes
954 > no
954 > no
955 > #endif
955 > #endif
956 > EOF
956 > EOF
957 $ rt test-cases.t
957 $ rt test-cases.t
958 running 4 tests using 1 parallel processes
958 running 4 tests using 1 parallel processes
959 ..
959 ..
960 --- $TESTTMP/test-cases.t
960 --- $TESTTMP/test-cases.t
961 +++ $TESTTMP/test-cases.t#b#c.err
961 +++ $TESTTMP/test-cases.t#b#c.err
962 @@ -6,5 +6,5 @@
962 @@ -6,5 +6,5 @@
963 #endif
963 #endif
964 #if b c
964 #if b c
965 $ echo yes
965 $ echo yes
966 - no
966 - no
967 + yes
967 + yes
968 #endif
968 #endif
969
969
970 ERROR: test-cases.t#b#c output changed
970 ERROR: test-cases.t#b#c output changed
971 !.
971 !.
972 Failed test-cases.t#b#c: output changed
972 Failed test-cases.t#b#c: output changed
973 # Ran 4 tests, 0 skipped, 1 failed.
973 # Ran 4 tests, 0 skipped, 1 failed.
974 python hash seed: * (glob)
974 python hash seed: * (glob)
975 [1]
975 [1]
976
976
977 $ rt --retest
977 $ rt --retest
978 running 1 tests using 1 parallel processes
978 running 1 tests using 1 parallel processes
979
979
980 --- $TESTTMP/test-cases.t
980 --- $TESTTMP/test-cases.t
981 +++ $TESTTMP/test-cases.t#b#c.err
981 +++ $TESTTMP/test-cases.t#b#c.err
982 @@ -6,5 +6,5 @@
982 @@ -6,5 +6,5 @@
983 #endif
983 #endif
984 #if b c
984 #if b c
985 $ echo yes
985 $ echo yes
986 - no
986 - no
987 + yes
987 + yes
988 #endif
988 #endif
989
989
990 ERROR: test-cases.t#b#c output changed
990 ERROR: test-cases.t#b#c output changed
991 !
991 !
992 Failed test-cases.t#b#c: output changed
992 Failed test-cases.t#b#c: output changed
993 # Ran 1 tests, 0 skipped, 1 failed.
993 # Ran 1 tests, 0 skipped, 1 failed.
994 python hash seed: * (glob)
994 python hash seed: * (glob)
995 [1]
995 [1]
996 $ rm test-cases.t#b#c.err
996 $ rm test-cases.t#b#c.err
997 $ rm test-cases.t
997 $ rm test-cases.t
998
998
999 (reinstall)
999 (reinstall)
1000 $ mv backup test-failure.t
1000 $ mv backup test-failure.t
1001
1001
1002 No Diff
1002 No Diff
1003 ===============
1003 ===============
1004
1004
1005 $ rt --nodiff
1005 $ rt --nodiff
1006 running 2 tests using 1 parallel processes
1006 running 2 tests using 1 parallel processes
1007 !.
1007 !.
1008 Failed test-failure.t: output changed
1008 Failed test-failure.t: output changed
1009 # Ran 2 tests, 0 skipped, 1 failed.
1009 # Ran 2 tests, 0 skipped, 1 failed.
1010 python hash seed: * (glob)
1010 python hash seed: * (glob)
1011 [1]
1011 [1]
1012
1012
1013 test --tmpdir support
1013 test --tmpdir support
1014 $ rt --tmpdir=$TESTTMP/keep test-success.t
1014 $ rt --tmpdir=$TESTTMP/keep test-success.t
1015 running 1 tests using 1 parallel processes
1015 running 1 tests using 1 parallel processes
1016
1016
1017 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
1017 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
1018 Keeping threadtmp dir: $TESTTMP/keep/child1
1018 Keeping threadtmp dir: $TESTTMP/keep/child1
1019 .
1019 .
1020 # Ran 1 tests, 0 skipped, 0 failed.
1020 # Ran 1 tests, 0 skipped, 0 failed.
1021
1021
1022 timeouts
1022 timeouts
1023 ========
1023 ========
1024 $ cat > test-timeout.t <<EOF
1024 $ cat > test-timeout.t <<EOF
1025 > $ sleep 2
1025 > $ sleep 2
1026 > $ echo pass
1026 > $ echo pass
1027 > pass
1027 > pass
1028 > EOF
1028 > EOF
1029 > echo '#require slow' > test-slow-timeout.t
1029 > echo '#require slow' > test-slow-timeout.t
1030 > cat test-timeout.t >> test-slow-timeout.t
1030 > cat test-timeout.t >> test-slow-timeout.t
1031 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
1031 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
1032 running 2 tests using 1 parallel processes
1032 running 2 tests using 1 parallel processes
1033 st
1033 st
1034 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
1034 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
1035 Failed test-timeout.t: timed out
1035 Failed test-timeout.t: timed out
1036 # Ran 1 tests, 1 skipped, 1 failed.
1036 # Ran 1 tests, 1 skipped, 1 failed.
1037 python hash seed: * (glob)
1037 python hash seed: * (glob)
1038 [1]
1038 [1]
1039 $ rt --timeout=1 --slowtimeout=3 \
1039 $ rt --timeout=1 --slowtimeout=3 \
1040 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1040 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1041 running 2 tests using 1 parallel processes
1041 running 2 tests using 1 parallel processes
1042 .t
1042 .t
1043 Failed test-timeout.t: timed out
1043 Failed test-timeout.t: timed out
1044 # Ran 2 tests, 0 skipped, 1 failed.
1044 # Ran 2 tests, 0 skipped, 1 failed.
1045 python hash seed: * (glob)
1045 python hash seed: * (glob)
1046 [1]
1046 [1]
1047 $ rm test-timeout.t test-slow-timeout.t
1047 $ rm test-timeout.t test-slow-timeout.t
1048
1048
1049 test for --time
1049 test for --time
1050 ==================
1050 ==================
1051
1051
1052 $ rt test-success.t --time
1052 $ rt test-success.t --time
1053 running 1 tests using 1 parallel processes
1053 running 1 tests using 1 parallel processes
1054 .
1054 .
1055 # Ran 1 tests, 0 skipped, 0 failed.
1055 # Ran 1 tests, 0 skipped, 0 failed.
1056 # Producing time report
1056 # Producing time report
1057 start end cuser csys real Test
1057 start end cuser csys real Test
1058 \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} test-success.t (re)
1058 \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} test-success.t (re)
1059
1059
1060 test for --time with --job enabled
1060 test for --time with --job enabled
1061 ====================================
1061 ====================================
1062
1062
1063 $ rt test-success.t --time --jobs 2
1063 $ rt test-success.t --time --jobs 2
1064 running 1 tests using 1 parallel processes
1064 running 1 tests using 1 parallel processes
1065 .
1065 .
1066 # Ran 1 tests, 0 skipped, 0 failed.
1066 # Ran 1 tests, 0 skipped, 0 failed.
1067 # Producing time report
1067 # Producing time report
1068 start end cuser csys real Test
1068 start end cuser csys real Test
1069 \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} test-success.t (re)
1069 \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} test-success.t (re)
1070
1070
1071 Skips
1071 Skips
1072 ================
1072 ================
1073 $ cat > test-skip.t <<EOF
1073 $ cat > test-skip.t <<EOF
1074 > $ echo xyzzy
1074 > $ echo xyzzy
1075 > #if true
1075 > #if true
1076 > #require false
1076 > #require false
1077 > #end
1077 > #end
1078 > EOF
1078 > EOF
1079 $ cat > test-noskip.t <<EOF
1079 $ cat > test-noskip.t <<EOF
1080 > #if false
1080 > #if false
1081 > #require false
1081 > #require false
1082 > #endif
1082 > #endif
1083 > EOF
1083 > EOF
1084 $ rt --nodiff
1084 $ rt --nodiff
1085 running 4 tests using 1 parallel processes
1085 running 4 tests using 1 parallel processes
1086 !.s.
1086 !.s.
1087 Skipped test-skip.t: missing feature: nail clipper
1087 Skipped test-skip.t: missing feature: nail clipper
1088 Failed test-failure.t: output changed
1088 Failed test-failure.t: output changed
1089 # Ran 3 tests, 1 skipped, 1 failed.
1089 # Ran 3 tests, 1 skipped, 1 failed.
1090 python hash seed: * (glob)
1090 python hash seed: * (glob)
1091 [1]
1091 [1]
1092
1092
1093 $ rm test-noskip.t
1093 $ rm test-noskip.t
1094 $ rt --keyword xyzzy
1094 $ rt --keyword xyzzy
1095 running 3 tests using 1 parallel processes
1095 running 3 tests using 1 parallel processes
1096 .s
1096 .s
1097 Skipped test-skip.t: missing feature: nail clipper
1097 Skipped test-skip.t: missing feature: nail clipper
1098 # Ran 2 tests, 2 skipped, 0 failed.
1098 # Ran 2 tests, 2 skipped, 0 failed.
1099
1099
1100 Skips with xml
1100 Skips with xml
1101 $ rt --keyword xyzzy \
1101 $ rt --keyword xyzzy \
1102 > --xunit=xunit.xml
1102 > --xunit=xunit.xml
1103 running 3 tests using 1 parallel processes
1103 running 3 tests using 1 parallel processes
1104 .s
1104 .s
1105 Skipped test-skip.t: missing feature: nail clipper
1105 Skipped test-skip.t: missing feature: nail clipper
1106 # Ran 2 tests, 2 skipped, 0 failed.
1106 # Ran 2 tests, 2 skipped, 0 failed.
1107 $ cat xunit.xml
1107 $ cat xunit.xml
1108 <?xml version="1.0" encoding="utf-8"?>
1108 <?xml version="1.0" encoding="utf-8"?>
1109 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1109 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1110 <testcase name="test-success.t" time="*"/> (glob)
1110 <testcase name="test-success.t" time="*"/> (glob)
1111 <testcase name="test-skip.t">
1111 <testcase name="test-skip.t">
1112 <skipped><![CDATA[missing feature: nail clipper]]></skipped> (py38 !)
1112 <skipped><![CDATA[missing feature: nail clipper]]></skipped> (py38 !)
1113 <skipped> (no-py38 !)
1113 <skipped> (no-py38 !)
1114 <![CDATA[missing feature: nail clipper]]> </skipped> (no-py38 !)
1114 <![CDATA[missing feature: nail clipper]]> </skipped> (no-py38 !)
1115 </testcase>
1115 </testcase>
1116 </testsuite>
1116 </testsuite>
1117
1117
1118 Missing skips or blacklisted skips don't count as executed:
1118 Missing skips or blacklisted skips don't count as executed:
1119 $ mkdir tests
1119 $ mkdir tests
1120 $ echo tests/test-failure.t > blacklist
1120 $ echo tests/test-failure.t > blacklist
1121 $ cp test-failure.t tests
1121 $ cp test-failure.t tests
1122 $ rt --blacklist=blacklist --json\
1122 $ rt --blacklist=blacklist --json\
1123 > tests/test-failure.t tests/test-bogus.t
1123 > tests/test-failure.t tests/test-bogus.t
1124 running 2 tests using 1 parallel processes
1124 running 2 tests using 1 parallel processes
1125 ss
1125 ss
1126 Skipped test-bogus.t: Doesn't exist
1126 Skipped test-bogus.t: Doesn't exist
1127 Skipped test-failure.t: blacklisted
1127 Skipped test-failure.t: blacklisted
1128 # Ran 0 tests, 2 skipped, 0 failed.
1128 # Ran 0 tests, 2 skipped, 0 failed.
1129 $ cat tests/report.json
1129 $ cat tests/report.json
1130 testreport ={
1130 testreport ={
1131 "test-bogus.t": {
1131 "test-bogus.t": {
1132 "result": "skip"
1132 "result": "skip"
1133 },
1133 },
1134 "test-failure.t": {
1134 "test-failure.t": {
1135 "result": "skip"
1135 "result": "skip"
1136 }
1136 }
1137 } (no-eol)
1137 } (no-eol)
1138 $ rm -r tests
1138 $ rm -r tests
1139 $ echo test-failure.t > blacklist
1139 $ echo test-failure.t > blacklist
1140
1140
1141 Whitelist trumps blacklist
1141 Whitelist trumps blacklist
1142 $ echo test-failure.t > whitelist
1142 $ echo test-failure.t > whitelist
1143 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1143 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1144 > test-failure.t test-bogus.t
1144 > test-failure.t test-bogus.t
1145 running 2 tests using 1 parallel processes
1145 running 2 tests using 1 parallel processes
1146 s
1146 s
1147 --- $TESTTMP/test-failure.t
1147 --- $TESTTMP/test-failure.t
1148 +++ $TESTTMP/test-failure.t.err
1148 +++ $TESTTMP/test-failure.t.err
1149 @@ -1,5 +1,5 @@
1149 @@ -1,5 +1,5 @@
1150 $ echo babar
1150 $ echo babar
1151 - rataxes
1151 - rataxes
1152 + babar
1152 + babar
1153 This is a noop statement so that
1153 This is a noop statement so that
1154 this test is still more bytes than success.
1154 this test is still more bytes than success.
1155 pad pad pad pad............................................................
1155 pad pad pad pad............................................................
1156
1156
1157 ERROR: test-failure.t output changed
1157 ERROR: test-failure.t output changed
1158 !
1158 !
1159 Skipped test-bogus.t: Doesn't exist
1159 Skipped test-bogus.t: Doesn't exist
1160 Failed test-failure.t: output changed
1160 Failed test-failure.t: output changed
1161 # Ran 1 tests, 1 skipped, 1 failed.
1161 # Ran 1 tests, 1 skipped, 1 failed.
1162 python hash seed: * (glob)
1162 python hash seed: * (glob)
1163 [1]
1163 [1]
1164
1164
1165 Ensure that --test-list causes only the tests listed in that file to
1165 Ensure that --test-list causes only the tests listed in that file to
1166 be executed.
1166 be executed.
1167 $ echo test-success.t >> onlytest
1167 $ echo test-success.t >> onlytest
1168 $ rt --test-list=onlytest
1168 $ rt --test-list=onlytest
1169 running 1 tests using 1 parallel processes
1169 running 1 tests using 1 parallel processes
1170 .
1170 .
1171 # Ran 1 tests, 0 skipped, 0 failed.
1171 # Ran 1 tests, 0 skipped, 0 failed.
1172 $ echo test-bogus.t >> anothertest
1172 $ echo test-bogus.t >> anothertest
1173 $ rt --test-list=onlytest --test-list=anothertest
1173 $ rt --test-list=onlytest --test-list=anothertest
1174 running 2 tests using 1 parallel processes
1174 running 2 tests using 1 parallel processes
1175 s.
1175 s.
1176 Skipped test-bogus.t: Doesn't exist
1176 Skipped test-bogus.t: Doesn't exist
1177 # Ran 1 tests, 1 skipped, 0 failed.
1177 # Ran 1 tests, 1 skipped, 0 failed.
1178 $ rm onlytest anothertest
1178 $ rm onlytest anothertest
1179
1179
1180 test for --json
1180 test for --json
1181 ==================
1181 ==================
1182
1182
1183 $ rt --json
1183 $ rt --json
1184 running 3 tests using 1 parallel processes
1184 running 3 tests using 1 parallel processes
1185
1185
1186 --- $TESTTMP/test-failure.t
1186 --- $TESTTMP/test-failure.t
1187 +++ $TESTTMP/test-failure.t.err
1187 +++ $TESTTMP/test-failure.t.err
1188 @@ -1,5 +1,5 @@
1188 @@ -1,5 +1,5 @@
1189 $ echo babar
1189 $ echo babar
1190 - rataxes
1190 - rataxes
1191 + babar
1191 + babar
1192 This is a noop statement so that
1192 This is a noop statement so that
1193 this test is still more bytes than success.
1193 this test is still more bytes than success.
1194 pad pad pad pad............................................................
1194 pad pad pad pad............................................................
1195
1195
1196 ERROR: test-failure.t output changed
1196 ERROR: test-failure.t output changed
1197 !.s
1197 !.s
1198 Skipped test-skip.t: missing feature: nail clipper
1198 Skipped test-skip.t: missing feature: nail clipper
1199 Failed test-failure.t: output changed
1199 Failed test-failure.t: output changed
1200 # Ran 2 tests, 1 skipped, 1 failed.
1200 # Ran 2 tests, 1 skipped, 1 failed.
1201 python hash seed: * (glob)
1201 python hash seed: * (glob)
1202 [1]
1202 [1]
1203
1203
1204 $ cat report.json
1204 $ cat report.json
1205 testreport ={
1205 testreport ={
1206 "test-failure.t": [\{] (re)
1206 "test-failure.t": [\{] (re)
1207 "csys": "\s*\d+\.\d{3,4}", ? (re)
1207 "csys": "\s*\d+\.\d{3,4}", ? (re)
1208 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1208 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1209 "diff": "---.+\+\+\+.+", ? (re)
1209 "diff": "---.+\+\+\+.+", ? (re)
1210 "end": "\s*\d+\.\d{3,4}", ? (re)
1210 "end": "\s*\d+\.\d{3,4}", ? (re)
1211 "result": "failure", ? (re)
1211 "result": "failure", ? (re)
1212 "start": "\s*\d+\.\d{3,4}", ? (re)
1212 "start": "\s*\d+\.\d{3,4}", ? (re)
1213 "time": "\s*\d+\.\d{3,4}" (re)
1213 "time": "\s*\d+\.\d{3,4}" (re)
1214 }, ? (re)
1214 }, ? (re)
1215 "test-skip.t": {
1215 "test-skip.t": {
1216 "csys": "\s*\d+\.\d{3,4}", ? (re)
1216 "csys": "\s*\d+\.\d{3,4}", ? (re)
1217 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1217 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1218 "diff": "", ? (re)
1218 "diff": "", ? (re)
1219 "end": "\s*\d+\.\d{3,4}", ? (re)
1219 "end": "\s*\d+\.\d{3,4}", ? (re)
1220 "result": "skip", ? (re)
1220 "result": "skip", ? (re)
1221 "start": "\s*\d+\.\d{3,4}", ? (re)
1221 "start": "\s*\d+\.\d{3,4}", ? (re)
1222 "time": "\s*\d+\.\d{3,4}" (re)
1222 "time": "\s*\d+\.\d{3,4}" (re)
1223 }, ? (re)
1223 }, ? (re)
1224 "test-success.t": [\{] (re)
1224 "test-success.t": [\{] (re)
1225 "csys": "\s*\d+\.\d{3,4}", ? (re)
1225 "csys": "\s*\d+\.\d{3,4}", ? (re)
1226 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1226 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1227 "diff": "", ? (re)
1227 "diff": "", ? (re)
1228 "end": "\s*\d+\.\d{3,4}", ? (re)
1228 "end": "\s*\d+\.\d{3,4}", ? (re)
1229 "result": "success", ? (re)
1229 "result": "success", ? (re)
1230 "start": "\s*\d+\.\d{3,4}", ? (re)
1230 "start": "\s*\d+\.\d{3,4}", ? (re)
1231 "time": "\s*\d+\.\d{3,4}" (re)
1231 "time": "\s*\d+\.\d{3,4}" (re)
1232 }
1232 }
1233 } (no-eol)
1233 } (no-eol)
1234 --json with --outputdir
1234 --json with --outputdir
1235
1235
1236 $ rm report.json
1236 $ rm report.json
1237 $ rm -r output
1237 $ rm -r output
1238 $ mkdir output
1238 $ mkdir output
1239 $ rt --json --outputdir output
1239 $ rt --json --outputdir output
1240 running 3 tests using 1 parallel processes
1240 running 3 tests using 1 parallel processes
1241
1241
1242 --- $TESTTMP/test-failure.t
1242 --- $TESTTMP/test-failure.t
1243 +++ $TESTTMP/output/test-failure.t.err
1243 +++ $TESTTMP/output/test-failure.t.err
1244 @@ -1,5 +1,5 @@
1244 @@ -1,5 +1,5 @@
1245 $ echo babar
1245 $ echo babar
1246 - rataxes
1246 - rataxes
1247 + babar
1247 + babar
1248 This is a noop statement so that
1248 This is a noop statement so that
1249 this test is still more bytes than success.
1249 this test is still more bytes than success.
1250 pad pad pad pad............................................................
1250 pad pad pad pad............................................................
1251
1251
1252 ERROR: test-failure.t output changed
1252 ERROR: test-failure.t output changed
1253 !.s
1253 !.s
1254 Skipped test-skip.t: missing feature: nail clipper
1254 Skipped test-skip.t: missing feature: nail clipper
1255 Failed test-failure.t: output changed
1255 Failed test-failure.t: output changed
1256 # Ran 2 tests, 1 skipped, 1 failed.
1256 # Ran 2 tests, 1 skipped, 1 failed.
1257 python hash seed: * (glob)
1257 python hash seed: * (glob)
1258 [1]
1258 [1]
1259 $ f report.json
1259 $ f report.json
1260 report.json: file not found
1260 report.json: file not found
1261 $ cat output/report.json
1261 $ cat output/report.json
1262 testreport ={
1262 testreport ={
1263 "test-failure.t": [\{] (re)
1263 "test-failure.t": [\{] (re)
1264 "csys": "\s*\d+\.\d{3,4}", ? (re)
1264 "csys": "\s*\d+\.\d{3,4}", ? (re)
1265 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1265 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1266 "diff": "---.+\+\+\+.+", ? (re)
1266 "diff": "---.+\+\+\+.+", ? (re)
1267 "end": "\s*\d+\.\d{3,4}", ? (re)
1267 "end": "\s*\d+\.\d{3,4}", ? (re)
1268 "result": "failure", ? (re)
1268 "result": "failure", ? (re)
1269 "start": "\s*\d+\.\d{3,4}", ? (re)
1269 "start": "\s*\d+\.\d{3,4}", ? (re)
1270 "time": "\s*\d+\.\d{3,4}" (re)
1270 "time": "\s*\d+\.\d{3,4}" (re)
1271 }, ? (re)
1271 }, ? (re)
1272 "test-skip.t": {
1272 "test-skip.t": {
1273 "csys": "\s*\d+\.\d{3,4}", ? (re)
1273 "csys": "\s*\d+\.\d{3,4}", ? (re)
1274 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1274 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1275 "diff": "", ? (re)
1275 "diff": "", ? (re)
1276 "end": "\s*\d+\.\d{3,4}", ? (re)
1276 "end": "\s*\d+\.\d{3,4}", ? (re)
1277 "result": "skip", ? (re)
1277 "result": "skip", ? (re)
1278 "start": "\s*\d+\.\d{3,4}", ? (re)
1278 "start": "\s*\d+\.\d{3,4}", ? (re)
1279 "time": "\s*\d+\.\d{3,4}" (re)
1279 "time": "\s*\d+\.\d{3,4}" (re)
1280 }, ? (re)
1280 }, ? (re)
1281 "test-success.t": [\{] (re)
1281 "test-success.t": [\{] (re)
1282 "csys": "\s*\d+\.\d{3,4}", ? (re)
1282 "csys": "\s*\d+\.\d{3,4}", ? (re)
1283 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1283 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1284 "diff": "", ? (re)
1284 "diff": "", ? (re)
1285 "end": "\s*\d+\.\d{3,4}", ? (re)
1285 "end": "\s*\d+\.\d{3,4}", ? (re)
1286 "result": "success", ? (re)
1286 "result": "success", ? (re)
1287 "start": "\s*\d+\.\d{3,4}", ? (re)
1287 "start": "\s*\d+\.\d{3,4}", ? (re)
1288 "time": "\s*\d+\.\d{3,4}" (re)
1288 "time": "\s*\d+\.\d{3,4}" (re)
1289 }
1289 }
1290 } (no-eol)
1290 } (no-eol)
1291 $ ls -a output
1291 $ ls -a output
1292 .
1292 .
1293 ..
1293 ..
1294 .testtimes
1294 .testtimes
1295 report.json
1295 report.json
1296 test-failure.t.err
1296 test-failure.t.err
1297
1297
1298 Test that failed test accepted through interactive are properly reported:
1298 Test that failed test accepted through interactive are properly reported:
1299
1299
1300 $ cp test-failure.t backup
1300 $ cp test-failure.t backup
1301 $ echo y | rt --json -i
1301 $ echo y | rt --json -i
1302 running 3 tests using 1 parallel processes
1302 running 3 tests using 1 parallel processes
1303
1303
1304 --- $TESTTMP/test-failure.t
1304 --- $TESTTMP/test-failure.t
1305 +++ $TESTTMP/test-failure.t.err
1305 +++ $TESTTMP/test-failure.t.err
1306 @@ -1,5 +1,5 @@
1306 @@ -1,5 +1,5 @@
1307 $ echo babar
1307 $ echo babar
1308 - rataxes
1308 - rataxes
1309 + babar
1309 + babar
1310 This is a noop statement so that
1310 This is a noop statement so that
1311 this test is still more bytes than success.
1311 this test is still more bytes than success.
1312 pad pad pad pad............................................................
1312 pad pad pad pad............................................................
1313 Accept this change? [y/N] ..s
1313 Accept this change? [y/N] ..s
1314 Skipped test-skip.t: missing feature: nail clipper
1314 Skipped test-skip.t: missing feature: nail clipper
1315 # Ran 2 tests, 1 skipped, 0 failed.
1315 # Ran 2 tests, 1 skipped, 0 failed.
1316
1316
1317 $ cat report.json
1317 $ cat report.json
1318 testreport ={
1318 testreport ={
1319 "test-failure.t": [\{] (re)
1319 "test-failure.t": [\{] (re)
1320 "csys": "\s*\d+\.\d{3,4}", ? (re)
1320 "csys": "\s*\d+\.\d{3,4}", ? (re)
1321 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1321 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1322 "diff": "", ? (re)
1322 "diff": "", ? (re)
1323 "end": "\s*\d+\.\d{3,4}", ? (re)
1323 "end": "\s*\d+\.\d{3,4}", ? (re)
1324 "result": "success", ? (re)
1324 "result": "success", ? (re)
1325 "start": "\s*\d+\.\d{3,4}", ? (re)
1325 "start": "\s*\d+\.\d{3,4}", ? (re)
1326 "time": "\s*\d+\.\d{3,4}" (re)
1326 "time": "\s*\d+\.\d{3,4}" (re)
1327 }, ? (re)
1327 }, ? (re)
1328 "test-skip.t": {
1328 "test-skip.t": {
1329 "csys": "\s*\d+\.\d{3,4}", ? (re)
1329 "csys": "\s*\d+\.\d{3,4}", ? (re)
1330 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1330 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1331 "diff": "", ? (re)
1331 "diff": "", ? (re)
1332 "end": "\s*\d+\.\d{3,4}", ? (re)
1332 "end": "\s*\d+\.\d{3,4}", ? (re)
1333 "result": "skip", ? (re)
1333 "result": "skip", ? (re)
1334 "start": "\s*\d+\.\d{3,4}", ? (re)
1334 "start": "\s*\d+\.\d{3,4}", ? (re)
1335 "time": "\s*\d+\.\d{3,4}" (re)
1335 "time": "\s*\d+\.\d{3,4}" (re)
1336 }, ? (re)
1336 }, ? (re)
1337 "test-success.t": [\{] (re)
1337 "test-success.t": [\{] (re)
1338 "csys": "\s*\d+\.\d{3,4}", ? (re)
1338 "csys": "\s*\d+\.\d{3,4}", ? (re)
1339 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1339 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1340 "diff": "", ? (re)
1340 "diff": "", ? (re)
1341 "end": "\s*\d+\.\d{3,4}", ? (re)
1341 "end": "\s*\d+\.\d{3,4}", ? (re)
1342 "result": "success", ? (re)
1342 "result": "success", ? (re)
1343 "start": "\s*\d+\.\d{3,4}", ? (re)
1343 "start": "\s*\d+\.\d{3,4}", ? (re)
1344 "time": "\s*\d+\.\d{3,4}" (re)
1344 "time": "\s*\d+\.\d{3,4}" (re)
1345 }
1345 }
1346 } (no-eol)
1346 } (no-eol)
1347 $ mv backup test-failure.t
1347 $ mv backup test-failure.t
1348
1348
1349 backslash on end of line with glob matching is handled properly
1349 backslash on end of line with glob matching is handled properly
1350
1350
1351 $ cat > test-glob-backslash.t << EOF
1351 $ cat > test-glob-backslash.t << EOF
1352 > $ echo 'foo bar \\'
1352 > $ echo 'foo bar \\'
1353 > foo * \ (glob)
1353 > foo * \ (glob)
1354 > EOF
1354 > EOF
1355
1355
1356 $ rt test-glob-backslash.t
1356 $ rt test-glob-backslash.t
1357 running 1 tests using 1 parallel processes
1357 running 1 tests using 1 parallel processes
1358 .
1358 .
1359 # Ran 1 tests, 0 skipped, 0 failed.
1359 # Ran 1 tests, 0 skipped, 0 failed.
1360
1360
1361 $ rm -f test-glob-backslash.t
1361 $ rm -f test-glob-backslash.t
1362
1362
1363 Test globbing of local IP addresses
1363 Test globbing of local IP addresses
1364 $ echo 172.16.18.1
1364 $ echo 172.16.18.1
1365 $LOCALIP (glob)
1365 $LOCALIP (glob)
1366 $ echo dead:beef::1
1366 $ echo dead:beef::1
1367 $LOCALIP (glob)
1367 $LOCALIP (glob)
1368
1368
1369 Add support for external test formatter
1369 Add support for external test formatter
1370 =======================================
1370 =======================================
1371
1371
1372 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@" test-success.t test-failure.t
1372 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@" test-success.t test-failure.t
1373 running 2 tests using 1 parallel processes
1373 running 2 tests using 1 parallel processes
1374
1374
1375 # Ran 2 tests, 0 skipped, 0 failed.
1375 # Ran 2 tests, 0 skipped, 0 failed.
1376 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1376 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1377 FAILURE! test-failure.t output changed
1377 FAILURE! test-failure.t output changed
1378 SUCCESS! test-success.t
1378 SUCCESS! test-success.t
1379 ON_END!
1379 ON_END!
1380
1380
1381 Test reusability for third party tools
1381 Test reusability for third party tools
1382 ======================================
1382 ======================================
1383
1383
1384 $ mkdir "$TESTTMP"/anothertests
1384 $ mkdir "$TESTTMP"/anothertests
1385 $ cd "$TESTTMP"/anothertests
1385 $ cd "$TESTTMP"/anothertests
1386
1386
1387 test that `run-tests.py` can execute hghave, even if it runs not in
1387 test that `run-tests.py` can execute hghave, even if it runs not in
1388 Mercurial source tree.
1388 Mercurial source tree.
1389
1389
1390 $ cat > test-hghave.t <<EOF
1390 $ cat > test-hghave.t <<EOF
1391 > #require true
1391 > #require true
1392 > $ echo foo
1392 > $ echo foo
1393 > foo
1393 > foo
1394 > EOF
1394 > EOF
1395 $ rt test-hghave.t
1395 $ rt test-hghave.t
1396 running 1 tests using 1 parallel processes
1396 running 1 tests using 1 parallel processes
1397 .
1397 .
1398 # Ran 1 tests, 0 skipped, 0 failed.
1398 # Ran 1 tests, 0 skipped, 0 failed.
1399
1399
1400 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1400 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1401 running is placed.
1401 running is placed.
1402
1402
1403 $ cat > test-runtestdir.t <<EOF
1403 $ cat > test-runtestdir.t <<EOF
1404 > - $TESTDIR, in which test-run-tests.t is placed
1404 > - $TESTDIR, in which test-run-tests.t is placed
1405 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1405 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1406 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1406 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1407 >
1407 >
1408 > #if windows
1408 > #if windows
1409 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1409 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1410 > #else
1410 > #else
1411 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1411 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1412 > #endif
1412 > #endif
1413 > If this prints a path, that means RUNTESTDIR didn't equal
1413 > If this prints a path, that means RUNTESTDIR didn't equal
1414 > TESTDIR as it should have.
1414 > TESTDIR as it should have.
1415 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1415 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1416 > This should print the start of check-code. If this passes but the
1416 > This should print the start of check-code. If this passes but the
1417 > previous check failed, that means we found a copy of check-code at whatever
1417 > previous check failed, that means we found a copy of check-code at whatever
1418 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1418 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1419 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python3@#!USRBINENVPY@'
1419 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python3@#!USRBINENVPY@'
1420 > #!USRBINENVPY
1420 > #!USRBINENVPY
1421 > #
1421 > #
1422 > # check-code - a style and portability checker for Mercurial
1422 > # check-code - a style and portability checker for Mercurial
1423 > EOF
1423 > EOF
1424 $ rt test-runtestdir.t
1424 $ rt test-runtestdir.t
1425 running 1 tests using 1 parallel processes
1425 running 1 tests using 1 parallel processes
1426 .
1426 .
1427 # Ran 1 tests, 0 skipped, 0 failed.
1427 # Ran 1 tests, 0 skipped, 0 failed.
1428
1428
1429 #if execbit
1429 #if execbit
1430
1430
1431 test that TESTDIR is referred in PATH
1431 test that TESTDIR is referred in PATH
1432
1432
1433 $ cat > custom-command.sh <<EOF
1433 $ cat > custom-command.sh <<EOF
1434 > #!/bin/sh
1434 > #!/bin/sh
1435 > echo "hello world"
1435 > echo "hello world"
1436 > EOF
1436 > EOF
1437 $ chmod +x custom-command.sh
1437 $ chmod +x custom-command.sh
1438 $ cat > test-testdir-path.t <<EOF
1438 $ cat > test-testdir-path.t <<EOF
1439 > $ custom-command.sh
1439 > $ custom-command.sh
1440 > hello world
1440 > hello world
1441 > EOF
1441 > EOF
1442 $ rt test-testdir-path.t
1442 $ rt test-testdir-path.t
1443 running 1 tests using 1 parallel processes
1443 running 1 tests using 1 parallel processes
1444 .
1444 .
1445 # Ran 1 tests, 0 skipped, 0 failed.
1445 # Ran 1 tests, 0 skipped, 0 failed.
1446
1446
1447 #endif
1447 #endif
1448
1448
1449 test support for --allow-slow-tests
1449 test support for --allow-slow-tests
1450 $ cat > test-very-slow-test.t <<EOF
1450 $ cat > test-very-slow-test.t <<EOF
1451 > #require slow
1451 > #require slow
1452 > $ echo pass
1452 > $ echo pass
1453 > pass
1453 > pass
1454 > EOF
1454 > EOF
1455 $ rt test-very-slow-test.t
1455 $ rt test-very-slow-test.t
1456 running 1 tests using 1 parallel processes
1456 running 1 tests using 1 parallel processes
1457 s
1457 s
1458 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1458 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1459 # Ran 0 tests, 1 skipped, 0 failed.
1459 # Ran 0 tests, 1 skipped, 0 failed.
1460 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1460 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1461 running 1 tests using 1 parallel processes
1461 running 1 tests using 1 parallel processes
1462 .
1462 .
1463 # Ran 1 tests, 0 skipped, 0 failed.
1463 # Ran 1 tests, 0 skipped, 0 failed.
1464
1464
1465 support for running a test outside the current directory
1465 support for running a test outside the current directory
1466 $ mkdir nonlocal
1466 $ mkdir nonlocal
1467 $ cat > nonlocal/test-is-not-here.t << EOF
1467 $ cat > nonlocal/test-is-not-here.t << EOF
1468 > $ echo pass
1468 > $ echo pass
1469 > pass
1469 > pass
1470 > EOF
1470 > EOF
1471 $ rt nonlocal/test-is-not-here.t
1471 $ rt nonlocal/test-is-not-here.t
1472 running 1 tests using 1 parallel processes
1472 running 1 tests using 1 parallel processes
1473 .
1473 .
1474 # Ran 1 tests, 0 skipped, 0 failed.
1474 # Ran 1 tests, 0 skipped, 0 failed.
1475
1475
1476 support for automatically discovering test if arg is a folder
1476 support for automatically discovering test if arg is a folder
1477 $ mkdir tmp && cd tmp
1477 $ mkdir tmp && cd tmp
1478
1478
1479 $ cat > test-uno.t << EOF
1479 $ cat > test-uno.t << EOF
1480 > $ echo line
1480 > $ echo line
1481 > line
1481 > line
1482 > EOF
1482 > EOF
1483
1483
1484 $ cp test-uno.t test-dos.t
1484 $ cp test-uno.t test-dos.t
1485 $ cd ..
1485 $ cd ..
1486 $ cp -R tmp tmpp
1486 $ cp -R tmp tmpp
1487 $ cp tmp/test-uno.t test-solo.t
1487 $ cp tmp/test-uno.t test-solo.t
1488
1488
1489 $ rt tmp/ test-solo.t tmpp
1489 $ rt tmp/ test-solo.t tmpp
1490 running 5 tests using 1 parallel processes
1490 running 5 tests using 1 parallel processes
1491 .....
1491 .....
1492 # Ran 5 tests, 0 skipped, 0 failed.
1492 # Ran 5 tests, 0 skipped, 0 failed.
1493 $ rm -rf tmp tmpp
1493 $ rm -rf tmp tmpp
1494
1494
1495 support for running run-tests.py from another directory
1495 support for running run-tests.py from another directory
1496 $ mkdir tmp && cd tmp
1496 $ mkdir tmp && cd tmp
1497
1497
1498 $ cat > useful-file.sh << EOF
1498 $ cat > useful-file.sh << EOF
1499 > important command
1499 > important command
1500 > EOF
1500 > EOF
1501
1501
1502 $ cat > test-folder.t << EOF
1502 $ cat > test-folder.t << EOF
1503 > $ cat \$TESTDIR/useful-file.sh
1503 > $ cat \$TESTDIR/useful-file.sh
1504 > important command
1504 > important command
1505 > EOF
1505 > EOF
1506
1506
1507 $ cat > test-folder-fail.t << EOF
1507 $ cat > test-folder-fail.t << EOF
1508 > $ cat \$TESTDIR/useful-file.sh
1508 > $ cat \$TESTDIR/useful-file.sh
1509 > important commando
1509 > important commando
1510 > EOF
1510 > EOF
1511
1511
1512 $ cd ..
1512 $ cd ..
1513 $ rt tmp/test-*.t
1513 $ rt tmp/test-*.t
1514 running 2 tests using 1 parallel processes
1514 running 2 tests using 1 parallel processes
1515
1515
1516 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1516 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1517 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1517 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1518 @@ -1,2 +1,2 @@
1518 @@ -1,2 +1,2 @@
1519 $ cat $TESTDIR/useful-file.sh
1519 $ cat $TESTDIR/useful-file.sh
1520 - important commando
1520 - important commando
1521 + important command
1521 + important command
1522
1522
1523 ERROR: test-folder-fail.t output changed
1523 ERROR: test-folder-fail.t output changed
1524 !.
1524 !.
1525 Failed test-folder-fail.t: output changed
1525 Failed test-folder-fail.t: output changed
1526 # Ran 2 tests, 0 skipped, 1 failed.
1526 # Ran 2 tests, 0 skipped, 1 failed.
1527 python hash seed: * (glob)
1527 python hash seed: * (glob)
1528 [1]
1528 [1]
1529
1529
1530 support for bisecting failed tests automatically
1530 support for bisecting failed tests automatically
1531 $ hg init bisect
1531 $ hg init bisect
1532 $ cd bisect
1532 $ cd bisect
1533 $ cat >> test-bisect.t <<EOF
1533 $ cat >> test-bisect.t <<EOF
1534 > $ echo pass
1534 > $ echo pass
1535 > pass
1535 > pass
1536 > EOF
1536 > EOF
1537 $ hg add test-bisect.t
1537 $ hg add test-bisect.t
1538 $ hg ci -m 'good'
1538 $ hg ci -m 'good'
1539 $ cat >> test-bisect.t <<EOF
1539 $ cat >> test-bisect.t <<EOF
1540 > $ echo pass
1540 > $ echo pass
1541 > fail
1541 > fail
1542 > EOF
1542 > EOF
1543 $ hg ci -m 'bad'
1543 $ hg ci -m 'bad'
1544 $ rt --known-good-rev=0 test-bisect.t
1544 $ rt --known-good-rev=0 test-bisect.t
1545 running 1 tests using 1 parallel processes
1545 running 1 tests using 1 parallel processes
1546
1546
1547 --- $TESTTMP/anothertests/bisect/test-bisect.t
1547 --- $TESTTMP/anothertests/bisect/test-bisect.t
1548 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1548 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1549 @@ -1,4 +1,4 @@
1549 @@ -1,4 +1,4 @@
1550 $ echo pass
1550 $ echo pass
1551 pass
1551 pass
1552 $ echo pass
1552 $ echo pass
1553 - fail
1553 - fail
1554 + pass
1554 + pass
1555
1555
1556 ERROR: test-bisect.t output changed
1556 ERROR: test-bisect.t output changed
1557 !
1557 !
1558 Failed test-bisect.t: output changed
1558 Failed test-bisect.t: output changed
1559 test-bisect.t broken by 72cbf122d116 (bad)
1559 test-bisect.t broken by 72cbf122d116 (bad)
1560 # Ran 1 tests, 0 skipped, 1 failed.
1560 # Ran 1 tests, 0 skipped, 1 failed.
1561 python hash seed: * (glob)
1561 python hash seed: * (glob)
1562 [1]
1562 [1]
1563
1563
1564 $ cd ..
1564 $ cd ..
1565
1565
1566 support bisecting a separate repo
1566 support bisecting a separate repo
1567
1567
1568 $ hg init bisect-dependent
1568 $ hg init bisect-dependent
1569 $ cd bisect-dependent
1569 $ cd bisect-dependent
1570 $ cat > test-bisect-dependent.t <<EOF
1570 $ cat > test-bisect-dependent.t <<EOF
1571 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1571 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1572 > pass
1572 > pass
1573 > EOF
1573 > EOF
1574 $ hg commit -Am dependent test-bisect-dependent.t
1574 $ hg commit -Am dependent test-bisect-dependent.t
1575
1575
1576 $ rt --known-good-rev=0 test-bisect-dependent.t
1576 $ rt --known-good-rev=0 test-bisect-dependent.t
1577 running 1 tests using 1 parallel processes
1577 running 1 tests using 1 parallel processes
1578
1578
1579 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1579 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1580 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1580 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1581 @@ -1,2 +1,2 @@
1581 @@ -1,2 +1,2 @@
1582 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1582 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1583 - pass
1583 - pass
1584 + fail
1584 + fail
1585
1585
1586 ERROR: test-bisect-dependent.t output changed
1586 ERROR: test-bisect-dependent.t output changed
1587 !
1587 !
1588 Failed test-bisect-dependent.t: output changed
1588 Failed test-bisect-dependent.t: output changed
1589 Failed to identify failure point for test-bisect-dependent.t
1589 Failed to identify failure point for test-bisect-dependent.t
1590 # Ran 1 tests, 0 skipped, 1 failed.
1590 # Ran 1 tests, 0 skipped, 1 failed.
1591 python hash seed: * (glob)
1591 python hash seed: * (glob)
1592 [1]
1592 [1]
1593
1593
1594 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1594 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1595 usage: run-tests.py [options] [tests]
1595 usage: run-tests.py [options] [tests]
1596 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1596 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1597 [2]
1597 [2]
1598
1598
1599 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1599 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1600 running 1 tests using 1 parallel processes
1600 running 1 tests using 1 parallel processes
1601
1601
1602 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1602 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1603 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1603 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1604 @@ -1,2 +1,2 @@
1604 @@ -1,2 +1,2 @@
1605 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1605 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1606 - pass
1606 - pass
1607 + fail
1607 + fail
1608
1608
1609 ERROR: test-bisect-dependent.t output changed
1609 ERROR: test-bisect-dependent.t output changed
1610 !
1610 !
1611 Failed test-bisect-dependent.t: output changed
1611 Failed test-bisect-dependent.t: output changed
1612 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1612 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1613 # Ran 1 tests, 0 skipped, 1 failed.
1613 # Ran 1 tests, 0 skipped, 1 failed.
1614 python hash seed: * (glob)
1614 python hash seed: * (glob)
1615 [1]
1615 [1]
1616
1616
1617 $ cd ..
1617 $ cd ..
1618
1618
1619 Test a broken #if statement doesn't break run-tests threading.
1619 Test a broken #if statement doesn't break run-tests threading.
1620 ==============================================================
1620 ==============================================================
1621 $ mkdir broken
1621 $ mkdir broken
1622 $ cd broken
1622 $ cd broken
1623 $ cat > test-broken.t <<EOF
1623 $ cat > test-broken.t <<EOF
1624 > true
1624 > true
1625 > #if notarealhghavefeature
1625 > #if notarealhghavefeature
1626 > $ false
1626 > $ false
1627 > #endif
1627 > #endif
1628 > EOF
1628 > EOF
1629 $ for f in 1 2 3 4 ; do
1629 $ for f in 1 2 3 4 ; do
1630 > cat > test-works-$f.t <<EOF
1630 > cat > test-works-$f.t <<EOF
1631 > This is test case $f
1631 > This is test case $f
1632 > $ sleep 1
1632 > $ sleep 1
1633 > EOF
1633 > EOF
1634 > done
1634 > done
1635 $ rt -j 2
1635 $ rt -j 2
1636 running 5 tests using 2 parallel processes
1636 running 5 tests using 2 parallel processes
1637 ....
1637 ....
1638 # Ran 5 tests, 0 skipped, 0 failed.
1638 # Ran 5 tests, 0 skipped, 0 failed.
1639 skipped: unknown feature: notarealhghavefeature
1639 skipped: unknown feature: notarealhghavefeature
1640
1640
1641 $ cd ..
1641 $ cd ..
1642 $ rm -rf broken
1642 $ rm -rf broken
1643
1643
1644 Test cases in .t files
1644 Test cases in .t files
1645 ======================
1645 ======================
1646 $ mkdir cases
1646 $ mkdir cases
1647 $ cd cases
1647 $ cd cases
1648 $ cat > test-cases-abc.t <<'EOF'
1648 $ cat > test-cases-abc.t <<'EOF'
1649 > #testcases A B C
1649 > #testcases A B C
1650 > $ V=B
1650 > $ V=B
1651 > #if A
1651 > #if A
1652 > $ V=A
1652 > $ V=A
1653 > #endif
1653 > #endif
1654 > #if C
1654 > #if C
1655 > $ V=C
1655 > $ V=C
1656 > #endif
1656 > #endif
1657 > $ echo $V | sed 's/A/C/'
1657 > $ echo $V | sed 's/A/C/'
1658 > C
1658 > C
1659 > #if C
1659 > #if C
1660 > $ [ $V = C ]
1660 > $ [ $V = C ]
1661 > #endif
1661 > #endif
1662 > #if A
1662 > #if A
1663 > $ [ $V = C ]
1663 > $ [ $V = C ]
1664 > [1]
1664 > [1]
1665 > #endif
1665 > #endif
1666 > #if no-C
1666 > #if no-C
1667 > $ [ $V = C ]
1667 > $ [ $V = C ]
1668 > [1]
1668 > [1]
1669 > #endif
1669 > #endif
1670 > $ [ $V = D ]
1670 > $ [ $V = D ]
1671 > [1]
1671 > [1]
1672 > EOF
1672 > EOF
1673 $ rt
1673 $ rt
1674 running 3 tests using 1 parallel processes
1674 running 3 tests using 1 parallel processes
1675 .
1675 .
1676 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1676 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1677 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1677 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1678 @@ -7,7 +7,7 @@
1678 @@ -7,7 +7,7 @@
1679 $ V=C
1679 $ V=C
1680 #endif
1680 #endif
1681 $ echo $V | sed 's/A/C/'
1681 $ echo $V | sed 's/A/C/'
1682 - C
1682 - C
1683 + B
1683 + B
1684 #if C
1684 #if C
1685 $ [ $V = C ]
1685 $ [ $V = C ]
1686 #endif
1686 #endif
1687
1687
1688 ERROR: test-cases-abc.t#B output changed
1688 ERROR: test-cases-abc.t#B output changed
1689 !.
1689 !.
1690 Failed test-cases-abc.t#B: output changed
1690 Failed test-cases-abc.t#B: output changed
1691 # Ran 3 tests, 0 skipped, 1 failed.
1691 # Ran 3 tests, 0 skipped, 1 failed.
1692 python hash seed: * (glob)
1692 python hash seed: * (glob)
1693 [1]
1693 [1]
1694
1694
1695 --restart works
1695 --restart works
1696
1696
1697 $ rt --restart
1697 $ rt --restart
1698 running 2 tests using 1 parallel processes
1698 running 2 tests using 1 parallel processes
1699
1699
1700 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1700 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1701 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1701 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1702 @@ -7,7 +7,7 @@
1702 @@ -7,7 +7,7 @@
1703 $ V=C
1703 $ V=C
1704 #endif
1704 #endif
1705 $ echo $V | sed 's/A/C/'
1705 $ echo $V | sed 's/A/C/'
1706 - C
1706 - C
1707 + B
1707 + B
1708 #if C
1708 #if C
1709 $ [ $V = C ]
1709 $ [ $V = C ]
1710 #endif
1710 #endif
1711
1711
1712 ERROR: test-cases-abc.t#B output changed
1712 ERROR: test-cases-abc.t#B output changed
1713 !.
1713 !.
1714 Failed test-cases-abc.t#B: output changed
1714 Failed test-cases-abc.t#B: output changed
1715 # Ran 2 tests, 0 skipped, 1 failed.
1715 # Ran 2 tests, 0 skipped, 1 failed.
1716 python hash seed: * (glob)
1716 python hash seed: * (glob)
1717 [1]
1717 [1]
1718
1718
1719 --restart works with outputdir
1719 --restart works with outputdir
1720
1720
1721 $ mkdir output
1721 $ mkdir output
1722 $ mv test-cases-abc.t#B.err output
1722 $ mv test-cases-abc.t#B.err output
1723 $ rt --restart --outputdir output
1723 $ rt --restart --outputdir output
1724 running 2 tests using 1 parallel processes
1724 running 2 tests using 1 parallel processes
1725
1725
1726 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1726 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1727 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1727 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1728 @@ -7,7 +7,7 @@
1728 @@ -7,7 +7,7 @@
1729 $ V=C
1729 $ V=C
1730 #endif
1730 #endif
1731 $ echo $V | sed 's/A/C/'
1731 $ echo $V | sed 's/A/C/'
1732 - C
1732 - C
1733 + B
1733 + B
1734 #if C
1734 #if C
1735 $ [ $V = C ]
1735 $ [ $V = C ]
1736 #endif
1736 #endif
1737
1737
1738 ERROR: test-cases-abc.t#B output changed
1738 ERROR: test-cases-abc.t#B output changed
1739 !.
1739 !.
1740 Failed test-cases-abc.t#B: output changed
1740 Failed test-cases-abc.t#B: output changed
1741 # Ran 2 tests, 0 skipped, 1 failed.
1741 # Ran 2 tests, 0 skipped, 1 failed.
1742 python hash seed: * (glob)
1742 python hash seed: * (glob)
1743 [1]
1743 [1]
1744
1744
1745 Test TESTCASE variable
1745 Test TESTCASE variable
1746
1746
1747 $ cat > test-cases-ab.t <<'EOF'
1747 $ cat > test-cases-ab.t <<'EOF'
1748 > $ dostuff() {
1748 > $ dostuff() {
1749 > > echo "In case $TESTCASE"
1749 > > echo "In case $TESTCASE"
1750 > > }
1750 > > }
1751 > #testcases A B
1751 > #testcases A B
1752 > #if A
1752 > #if A
1753 > $ dostuff
1753 > $ dostuff
1754 > In case A
1754 > In case A
1755 > #endif
1755 > #endif
1756 > #if B
1756 > #if B
1757 > $ dostuff
1757 > $ dostuff
1758 > In case B
1758 > In case B
1759 > #endif
1759 > #endif
1760 > EOF
1760 > EOF
1761 $ rt test-cases-ab.t
1761 $ rt test-cases-ab.t
1762 running 2 tests using 1 parallel processes
1762 running 2 tests using 1 parallel processes
1763 ..
1763 ..
1764 # Ran 2 tests, 0 skipped, 0 failed.
1764 # Ran 2 tests, 0 skipped, 0 failed.
1765
1765
1766 Support running a specific test case
1766 Support running a specific test case
1767
1767
1768 $ rt "test-cases-abc.t#B"
1768 $ rt "test-cases-abc.t#B"
1769 running 1 tests using 1 parallel processes
1769 running 1 tests using 1 parallel processes
1770
1770
1771 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1771 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1772 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1772 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1773 @@ -7,7 +7,7 @@
1773 @@ -7,7 +7,7 @@
1774 $ V=C
1774 $ V=C
1775 #endif
1775 #endif
1776 $ echo $V | sed 's/A/C/'
1776 $ echo $V | sed 's/A/C/'
1777 - C
1777 - C
1778 + B
1778 + B
1779 #if C
1779 #if C
1780 $ [ $V = C ]
1780 $ [ $V = C ]
1781 #endif
1781 #endif
1782
1782
1783 ERROR: test-cases-abc.t#B output changed
1783 ERROR: test-cases-abc.t#B output changed
1784 !
1784 !
1785 Failed test-cases-abc.t#B: output changed
1785 Failed test-cases-abc.t#B: output changed
1786 # Ran 1 tests, 0 skipped, 1 failed.
1786 # Ran 1 tests, 0 skipped, 1 failed.
1787 python hash seed: * (glob)
1787 python hash seed: * (glob)
1788 [1]
1788 [1]
1789
1789
1790 Support running multiple test cases in the same file
1790 Support running multiple test cases in the same file
1791
1791
1792 $ rt test-cases-abc.t#B test-cases-abc.t#C
1792 $ rt test-cases-abc.t#B test-cases-abc.t#C
1793 running 2 tests using 1 parallel processes
1793 running 2 tests using 1 parallel processes
1794
1794
1795 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1795 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1796 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1796 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1797 @@ -7,7 +7,7 @@
1797 @@ -7,7 +7,7 @@
1798 $ V=C
1798 $ V=C
1799 #endif
1799 #endif
1800 $ echo $V | sed 's/A/C/'
1800 $ echo $V | sed 's/A/C/'
1801 - C
1801 - C
1802 + B
1802 + B
1803 #if C
1803 #if C
1804 $ [ $V = C ]
1804 $ [ $V = C ]
1805 #endif
1805 #endif
1806
1806
1807 ERROR: test-cases-abc.t#B output changed
1807 ERROR: test-cases-abc.t#B output changed
1808 !.
1808 !.
1809 Failed test-cases-abc.t#B: output changed
1809 Failed test-cases-abc.t#B: output changed
1810 # Ran 2 tests, 0 skipped, 1 failed.
1810 # Ran 2 tests, 0 skipped, 1 failed.
1811 python hash seed: * (glob)
1811 python hash seed: * (glob)
1812 [1]
1812 [1]
1813
1813
1814 Support ignoring invalid test cases
1814 Support ignoring invalid test cases
1815
1815
1816 $ rt test-cases-abc.t#B test-cases-abc.t#D
1816 $ rt test-cases-abc.t#B test-cases-abc.t#D
1817 running 1 tests using 1 parallel processes
1817 running 1 tests using 1 parallel processes
1818
1818
1819 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1819 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1820 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1820 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1821 @@ -7,7 +7,7 @@
1821 @@ -7,7 +7,7 @@
1822 $ V=C
1822 $ V=C
1823 #endif
1823 #endif
1824 $ echo $V | sed 's/A/C/'
1824 $ echo $V | sed 's/A/C/'
1825 - C
1825 - C
1826 + B
1826 + B
1827 #if C
1827 #if C
1828 $ [ $V = C ]
1828 $ [ $V = C ]
1829 #endif
1829 #endif
1830
1830
1831 ERROR: test-cases-abc.t#B output changed
1831 ERROR: test-cases-abc.t#B output changed
1832 !
1832 !
1833 Failed test-cases-abc.t#B: output changed
1833 Failed test-cases-abc.t#B: output changed
1834 # Ran 1 tests, 0 skipped, 1 failed.
1834 # Ran 1 tests, 0 skipped, 1 failed.
1835 python hash seed: * (glob)
1835 python hash seed: * (glob)
1836 [1]
1836 [1]
1837
1837
1838 Support running complex test cases names
1838 Support running complex test cases names
1839
1839
1840 $ cat > test-cases-advanced-cases.t <<'EOF'
1840 $ cat > test-cases-advanced-cases.t <<'EOF'
1841 > #testcases simple case-with-dashes casewith_-.chars
1841 > #testcases simple case-with-dashes casewith_-.chars
1842 > $ echo $TESTCASE
1842 > $ echo $TESTCASE
1843 > simple
1843 > simple
1844 > EOF
1844 > EOF
1845
1845
1846 $ cat test-cases-advanced-cases.t
1846 $ cat test-cases-advanced-cases.t
1847 #testcases simple case-with-dashes casewith_-.chars
1847 #testcases simple case-with-dashes casewith_-.chars
1848 $ echo $TESTCASE
1848 $ echo $TESTCASE
1849 simple
1849 simple
1850
1850
1851 $ rt test-cases-advanced-cases.t
1851 $ rt test-cases-advanced-cases.t
1852 running 3 tests using 1 parallel processes
1852 running 3 tests using 1 parallel processes
1853
1853
1854 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1854 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1855 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1855 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1856 @@ -1,3 +1,3 @@
1856 @@ -1,3 +1,3 @@
1857 #testcases simple case-with-dashes casewith_-.chars
1857 #testcases simple case-with-dashes casewith_-.chars
1858 $ echo $TESTCASE
1858 $ echo $TESTCASE
1859 - simple
1859 - simple
1860 + case-with-dashes
1860 + case-with-dashes
1861
1861
1862 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1862 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1863 !
1863 !
1864 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1864 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1865 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1865 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1866 @@ -1,3 +1,3 @@
1866 @@ -1,3 +1,3 @@
1867 #testcases simple case-with-dashes casewith_-.chars
1867 #testcases simple case-with-dashes casewith_-.chars
1868 $ echo $TESTCASE
1868 $ echo $TESTCASE
1869 - simple
1869 - simple
1870 + casewith_-.chars
1870 + casewith_-.chars
1871
1871
1872 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1872 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1873 !.
1873 !.
1874 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1874 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1875 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1875 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1876 # Ran 3 tests, 0 skipped, 2 failed.
1876 # Ran 3 tests, 0 skipped, 2 failed.
1877 python hash seed: * (glob)
1877 python hash seed: * (glob)
1878 [1]
1878 [1]
1879
1879
1880 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1880 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1881 running 1 tests using 1 parallel processes
1881 running 1 tests using 1 parallel processes
1882
1882
1883 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1883 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1884 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1884 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1885 @@ -1,3 +1,3 @@
1885 @@ -1,3 +1,3 @@
1886 #testcases simple case-with-dashes casewith_-.chars
1886 #testcases simple case-with-dashes casewith_-.chars
1887 $ echo $TESTCASE
1887 $ echo $TESTCASE
1888 - simple
1888 - simple
1889 + case-with-dashes
1889 + case-with-dashes
1890
1890
1891 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1891 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1892 !
1892 !
1893 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1893 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1894 # Ran 1 tests, 0 skipped, 1 failed.
1894 # Ran 1 tests, 0 skipped, 1 failed.
1895 python hash seed: * (glob)
1895 python hash seed: * (glob)
1896 [1]
1896 [1]
1897
1897
1898 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1898 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1899 running 1 tests using 1 parallel processes
1899 running 1 tests using 1 parallel processes
1900
1900
1901 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1901 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1902 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1902 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1903 @@ -1,3 +1,3 @@
1903 @@ -1,3 +1,3 @@
1904 #testcases simple case-with-dashes casewith_-.chars
1904 #testcases simple case-with-dashes casewith_-.chars
1905 $ echo $TESTCASE
1905 $ echo $TESTCASE
1906 - simple
1906 - simple
1907 + casewith_-.chars
1907 + casewith_-.chars
1908
1908
1909 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1909 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1910 !
1910 !
1911 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1911 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1912 # Ran 1 tests, 0 skipped, 1 failed.
1912 # Ran 1 tests, 0 skipped, 1 failed.
1913 python hash seed: * (glob)
1913 python hash seed: * (glob)
1914 [1]
1914 [1]
1915
1915
1916 Test automatic pattern replacement
1916 Test automatic pattern replacement
1917 ==================================
1917 ==================================
1918
1918
1919 $ cat << EOF >> common-pattern.py
1919 $ cat << EOF >> common-pattern.py
1920 > substitutions = [
1920 > substitutions = [
1921 > (br'foo-(.*)\\b',
1921 > (br'foo-(.*)\\b',
1922 > br'\$XXX=\\1\$'),
1922 > br'\$XXX=\\1\$'),
1923 > (br'bar\\n',
1923 > (br'bar\\n',
1924 > br'\$YYY$\\n'),
1924 > br'\$YYY$\\n'),
1925 > ]
1925 > ]
1926 > EOF
1926 > EOF
1927
1927
1928 $ cat << EOF >> test-substitution.t
1928 $ cat << EOF >> test-substitution.t
1929 > $ echo foo-12
1929 > $ echo foo-12
1930 > \$XXX=12$
1930 > \$XXX=12$
1931 > $ echo foo-42
1931 > $ echo foo-42
1932 > \$XXX=42$
1932 > \$XXX=42$
1933 > $ echo bar prior
1933 > $ echo bar prior
1934 > bar prior
1934 > bar prior
1935 > $ echo lastbar
1935 > $ echo lastbar
1936 > last\$YYY$
1936 > last\$YYY$
1937 > $ echo foo-bar foo-baz
1937 > $ echo foo-bar foo-baz
1938 > EOF
1938 > EOF
1939
1939
1940 $ rt test-substitution.t
1940 $ rt test-substitution.t
1941 running 1 tests using 1 parallel processes
1941 running 1 tests using 1 parallel processes
1942
1942
1943 --- $TESTTMP/anothertests/cases/test-substitution.t
1943 --- $TESTTMP/anothertests/cases/test-substitution.t
1944 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1944 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1945 @@ -7,3 +7,4 @@
1945 @@ -7,3 +7,4 @@
1946 $ echo lastbar
1946 $ echo lastbar
1947 last$YYY$
1947 last$YYY$
1948 $ echo foo-bar foo-baz
1948 $ echo foo-bar foo-baz
1949 + $XXX=bar foo-baz$
1949 + $XXX=bar foo-baz$
1950
1950
1951 ERROR: test-substitution.t output changed
1951 ERROR: test-substitution.t output changed
1952 !
1952 !
1953 Failed test-substitution.t: output changed
1953 Failed test-substitution.t: output changed
1954 # Ran 1 tests, 0 skipped, 1 failed.
1954 # Ran 1 tests, 0 skipped, 1 failed.
1955 python hash seed: * (glob)
1955 python hash seed: * (glob)
1956 [1]
1956 [1]
1957
1957
1958 --extra-config-opt works
1958 --extra-config-opt works
1959
1959
1960 $ cat << EOF >> test-config-opt.t
1960 $ cat << EOF >> test-config-opt.t
1961 > $ hg init test-config-opt
1961 > $ hg init test-config-opt
1962 > $ hg -R test-config-opt purge
1962 > $ hg -R test-config-opt purge
1963 > $ echo "HGTESTEXTRAEXTENSIONS: \$HGTESTEXTRAEXTENSIONS"
1963 > $ echo "HGTESTEXTRAEXTENSIONS: \$HGTESTEXTRAEXTENSIONS"
1964 > HGTESTEXTRAEXTENSIONS: purge
1964 > HGTESTEXTRAEXTENSIONS: purge
1965 > EOF
1965 > EOF
1966
1966
1967 $ rt --extra-config-opt extensions.purge= \
1967 $ rt --extra-config-opt extensions.purge= \
1968 > --extra-config-opt not.an.extension=True test-config-opt.t
1968 > --extra-config-opt not.an.extension=True test-config-opt.t
1969 running 1 tests using 1 parallel processes
1969 running 1 tests using 1 parallel processes
1970 .
1970 .
1971 # Ran 1 tests, 0 skipped, 0 failed.
1971 # Ran 1 tests, 0 skipped, 0 failed.
1972
1972
1973 Test conditional output matching
1973 Test conditional output matching
1974 ================================
1974 ================================
1975
1975
1976 $ cat << EOF >> test-conditional-matching.t
1976 $ cat << EOF >> test-conditional-matching.t
1977 > #testcases foo bar
1977 > #testcases foo bar
1978 > $ echo richtig
1978 > $ echo richtig
1979 > richtig (true !)
1979 > richtig (true !)
1980 > $ echo falsch
1980 > $ echo falsch
1981 > falsch (false !)
1981 > falsch (false !)
1982 > #if foo
1982 > #if foo
1983 > $ echo arthur
1983 > $ echo arthur
1984 > arthur (bar !)
1984 > arthur (bar !)
1985 > #endif
1985 > #endif
1986 > $ echo celeste
1986 > $ echo celeste
1987 > celeste (foo !)
1987 > celeste (foo !)
1988 > $ echo zephir
1988 > $ echo zephir
1989 > zephir (bar !)
1989 > zephir (bar !)
1990 > EOF
1990 > EOF
1991
1991
1992 $ rt test-conditional-matching.t
1992 $ rt test-conditional-matching.t
1993 running 2 tests using 1 parallel processes
1993 running 2 tests using 1 parallel processes
1994
1994
1995 --- $TESTTMP/anothertests/cases/test-conditional-matching.t
1995 --- $TESTTMP/anothertests/cases/test-conditional-matching.t
1996 +++ $TESTTMP/anothertests/cases/test-conditional-matching.t#bar.err
1996 +++ $TESTTMP/anothertests/cases/test-conditional-matching.t#bar.err
1997 @@ -3,11 +3,13 @@
1997 @@ -3,11 +3,13 @@
1998 richtig (true !)
1998 richtig (true !)
1999 $ echo falsch
1999 $ echo falsch
2000 falsch (false !)
2000 falsch (false !)
2001 + falsch
2001 + falsch
2002 #if foo
2002 #if foo
2003 $ echo arthur
2003 $ echo arthur
2004 arthur \(bar !\) (re)
2004 arthur \(bar !\) (re)
2005 #endif
2005 #endif
2006 $ echo celeste
2006 $ echo celeste
2007 celeste \(foo !\) (re)
2007 celeste \(foo !\) (re)
2008 + celeste
2008 + celeste
2009 $ echo zephir
2009 $ echo zephir
2010 zephir \(bar !\) (re)
2010 zephir \(bar !\) (re)
2011
2011
2012 ERROR: test-conditional-matching.t#bar output changed
2012 ERROR: test-conditional-matching.t#bar output changed
2013 !
2013 !
2014 --- $TESTTMP/anothertests/cases/test-conditional-matching.t
2014 --- $TESTTMP/anothertests/cases/test-conditional-matching.t
2015 +++ $TESTTMP/anothertests/cases/test-conditional-matching.t#foo.err
2015 +++ $TESTTMP/anothertests/cases/test-conditional-matching.t#foo.err
2016 @@ -3,11 +3,14 @@
2016 @@ -3,11 +3,14 @@
2017 richtig (true !)
2017 richtig (true !)
2018 $ echo falsch
2018 $ echo falsch
2019 falsch (false !)
2019 falsch (false !)
2020 + falsch
2020 + falsch
2021 #if foo
2021 #if foo
2022 $ echo arthur
2022 $ echo arthur
2023 arthur \(bar !\) (re)
2023 arthur \(bar !\) (re)
2024 + arthur
2024 + arthur
2025 #endif
2025 #endif
2026 $ echo celeste
2026 $ echo celeste
2027 celeste \(foo !\) (re)
2027 celeste \(foo !\) (re)
2028 $ echo zephir
2028 $ echo zephir
2029 zephir \(bar !\) (re)
2029 zephir \(bar !\) (re)
2030 + zephir
2030 + zephir
2031
2031
2032 ERROR: test-conditional-matching.t#foo output changed
2032 ERROR: test-conditional-matching.t#foo output changed
2033 !
2033 !
2034 Failed test-conditional-matching.t#bar: output changed
2034 Failed test-conditional-matching.t#bar: output changed
2035 Failed test-conditional-matching.t#foo: output changed
2035 Failed test-conditional-matching.t#foo: output changed
2036 # Ran 2 tests, 0 skipped, 2 failed.
2036 # Ran 2 tests, 0 skipped, 2 failed.
2037 python hash seed: * (glob)
2037 python hash seed: * (glob)
2038 [1]
2038 [1]
2039
2040 Test that a proper "python" has been set up
2041 ===========================================
2042
2043 (with a small check-code work around)
2044 $ printf "#!/usr/bi" > test-py3.tmp
2045 $ printf "n/en" >> test-py3.tmp
2046 $ cat << EOF >> test-py3.tmp
2047 > v python3
2048 > import sys
2049 > print('.'.join(str(x) for x in sys.version_info))
2050 > EOF
2051 $ mv test-py3.tmp test-py3.py
2052 $ chmod +x test-py3.py
2053
2054 (with a small check-code work around)
2055 $ printf "#!/usr/bi" > test-py.tmp
2056 $ printf "n/en" >> test-py.tmp
2057 $ cat << EOF >> test-py.tmp
2058 > v python
2059 > import sys
2060 > print('.'.join(str(x) for x in sys.version_info))
2061 > EOF
2062 $ mv test-py.tmp test-py.py
2063 $ chmod +x test-py.py
2064
2065 $ ./test-py3.py
2066 3.* (glob)
2067 $ ./test-py.py
2068 2.* (glob) (no-py3 !)
2069 3.* (glob) (py3 !)
@@ -1,50 +1,50
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2 from __future__ import absolute_import, print_function
2 from __future__ import absolute_import, print_function
3
3
4 import sys
4 import sys
5
5
6 from mercurial import (
6 from mercurial import (
7 commands,
7 commands,
8 localrepo,
8 localrepo,
9 ui as uimod,
9 ui as uimod,
10 )
10 )
11
11
12 print_ = print
12 print_ = print
13
13
14
14
15 def print(*args, **kwargs):
15 def print(*args, **kwargs):
16 """print() wrapper that flushes stdout buffers to avoid py3 buffer issues
16 """print() wrapper that flushes stdout buffers to avoid py3 buffer issues
17
17
18 We could also just write directly to sys.stdout.buffer the way the
18 We could also just write directly to sys.stdout.buffer the way the
19 ui object will, but this was easier for porting the test.
19 ui object will, but this was easier for porting the test.
20 """
20 """
21 print_(*args, **kwargs)
21 print_(*args, **kwargs)
22 sys.stdout.flush()
22 sys.stdout.flush()
23
23
24
24
25 u = uimod.ui.load()
25 u = uimod.ui.load()
26
26
27 print('% creating repo')
27 print('% creating repo')
28 repo = localrepo.instance(u, b'.', create=True)
28 repo = localrepo.instance(u, b'.', create=True)
29
29
30 f = open('test.py', 'w')
30 f = open('test.py', 'w')
31 try:
31 try:
32 f.write('foo\n')
32 f.write('foo\n')
33 finally:
33 finally:
34 f.close
34 f.close
35
35
36 print('% add and commit')
36 print('% add and commit')
37 commands.add(u, repo, b'test.py')
37 commands.add(u, repo, b'test.py')
38 commands.commit(u, repo, message=b'*')
38 commands.commit(u, repo, message=b'*')
39 commands.status(u, repo, clean=True)
39 commands.status(u, repo, clean=True)
40
40
41
41
42 print('% change')
42 print('% change')
43 f = open('test.py', 'w')
43 f = open('test.py', 'w')
44 try:
44 try:
45 f.write('bar\n')
45 f.write('bar\n')
46 finally:
46 finally:
47 f.close()
47 f.close()
48
48
49 # this would return clean instead of changed before the fix
49 # this would return clean instead of changed before the fix
50 commands.status(u, repo, clean=True, modified=True)
50 commands.status(u, repo, clean=True, modified=True)
@@ -1,365 +1,365
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2 """
2 """
3 Tests the buffering behavior of stdio streams in `mercurial.utils.procutil`.
3 Tests the buffering behavior of stdio streams in `mercurial.utils.procutil`.
4 """
4 """
5 from __future__ import absolute_import
5 from __future__ import absolute_import
6
6
7 import contextlib
7 import contextlib
8 import errno
8 import errno
9 import os
9 import os
10 import signal
10 import signal
11 import subprocess
11 import subprocess
12 import sys
12 import sys
13 import tempfile
13 import tempfile
14 import unittest
14 import unittest
15
15
16 from mercurial import pycompat, util
16 from mercurial import pycompat, util
17
17
18
18
19 if pycompat.ispy3:
19 if pycompat.ispy3:
20
20
21 def set_noninheritable(fd):
21 def set_noninheritable(fd):
22 # On Python 3, file descriptors are non-inheritable by default.
22 # On Python 3, file descriptors are non-inheritable by default.
23 pass
23 pass
24
24
25
25
26 else:
26 else:
27 if pycompat.iswindows:
27 if pycompat.iswindows:
28 # unused
28 # unused
29 set_noninheritable = None
29 set_noninheritable = None
30 else:
30 else:
31 import fcntl
31 import fcntl
32
32
33 def set_noninheritable(fd):
33 def set_noninheritable(fd):
34 old = fcntl.fcntl(fd, fcntl.F_GETFD)
34 old = fcntl.fcntl(fd, fcntl.F_GETFD)
35 fcntl.fcntl(fd, fcntl.F_SETFD, old | fcntl.FD_CLOEXEC)
35 fcntl.fcntl(fd, fcntl.F_SETFD, old | fcntl.FD_CLOEXEC)
36
36
37
37
38 TEST_BUFFERING_CHILD_SCRIPT = r'''
38 TEST_BUFFERING_CHILD_SCRIPT = r'''
39 import os
39 import os
40
40
41 from mercurial import dispatch
41 from mercurial import dispatch
42 from mercurial.utils import procutil
42 from mercurial.utils import procutil
43
43
44 dispatch.initstdio()
44 dispatch.initstdio()
45 procutil.{stream}.write(b'aaa')
45 procutil.{stream}.write(b'aaa')
46 os.write(procutil.{stream}.fileno(), b'[written aaa]')
46 os.write(procutil.{stream}.fileno(), b'[written aaa]')
47 procutil.{stream}.write(b'bbb\n')
47 procutil.{stream}.write(b'bbb\n')
48 os.write(procutil.{stream}.fileno(), b'[written bbb\\n]')
48 os.write(procutil.{stream}.fileno(), b'[written bbb\\n]')
49 '''
49 '''
50 UNBUFFERED = b'aaa[written aaa]bbb\n[written bbb\\n]'
50 UNBUFFERED = b'aaa[written aaa]bbb\n[written bbb\\n]'
51 LINE_BUFFERED = b'[written aaa]aaabbb\n[written bbb\\n]'
51 LINE_BUFFERED = b'[written aaa]aaabbb\n[written bbb\\n]'
52 FULLY_BUFFERED = b'[written aaa][written bbb\\n]aaabbb\n'
52 FULLY_BUFFERED = b'[written aaa][written bbb\\n]aaabbb\n'
53
53
54
54
55 TEST_LARGE_WRITE_CHILD_SCRIPT = r'''
55 TEST_LARGE_WRITE_CHILD_SCRIPT = r'''
56 import os
56 import os
57 import signal
57 import signal
58 import sys
58 import sys
59
59
60 from mercurial import dispatch
60 from mercurial import dispatch
61 from mercurial.utils import procutil
61 from mercurial.utils import procutil
62
62
63 signal.signal(signal.SIGINT, lambda *x: None)
63 signal.signal(signal.SIGINT, lambda *x: None)
64 dispatch.initstdio()
64 dispatch.initstdio()
65 write_result = procutil.{stream}.write(b'x' * 1048576)
65 write_result = procutil.{stream}.write(b'x' * 1048576)
66 with os.fdopen(
66 with os.fdopen(
67 os.open({write_result_fn!r}, os.O_WRONLY | getattr(os, 'O_TEMPORARY', 0)),
67 os.open({write_result_fn!r}, os.O_WRONLY | getattr(os, 'O_TEMPORARY', 0)),
68 'w',
68 'w',
69 ) as write_result_f:
69 ) as write_result_f:
70 write_result_f.write(str(write_result))
70 write_result_f.write(str(write_result))
71 '''
71 '''
72
72
73
73
74 TEST_BROKEN_PIPE_CHILD_SCRIPT = r'''
74 TEST_BROKEN_PIPE_CHILD_SCRIPT = r'''
75 import os
75 import os
76 import pickle
76 import pickle
77
77
78 from mercurial import dispatch
78 from mercurial import dispatch
79 from mercurial.utils import procutil
79 from mercurial.utils import procutil
80
80
81 dispatch.initstdio()
81 dispatch.initstdio()
82 procutil.stdin.read(1) # wait until parent process closed pipe
82 procutil.stdin.read(1) # wait until parent process closed pipe
83 try:
83 try:
84 procutil.{stream}.write(b'test')
84 procutil.{stream}.write(b'test')
85 procutil.{stream}.flush()
85 procutil.{stream}.flush()
86 except EnvironmentError as e:
86 except EnvironmentError as e:
87 with os.fdopen(
87 with os.fdopen(
88 os.open(
88 os.open(
89 {err_fn!r},
89 {err_fn!r},
90 os.O_WRONLY
90 os.O_WRONLY
91 | getattr(os, 'O_BINARY', 0)
91 | getattr(os, 'O_BINARY', 0)
92 | getattr(os, 'O_TEMPORARY', 0),
92 | getattr(os, 'O_TEMPORARY', 0),
93 ),
93 ),
94 'wb',
94 'wb',
95 ) as err_f:
95 ) as err_f:
96 pickle.dump(e, err_f)
96 pickle.dump(e, err_f)
97 # Exit early to suppress further broken pipe errors at interpreter shutdown.
97 # Exit early to suppress further broken pipe errors at interpreter shutdown.
98 os._exit(0)
98 os._exit(0)
99 '''
99 '''
100
100
101
101
102 @contextlib.contextmanager
102 @contextlib.contextmanager
103 def _closing(fds):
103 def _closing(fds):
104 try:
104 try:
105 yield
105 yield
106 finally:
106 finally:
107 for fd in fds:
107 for fd in fds:
108 try:
108 try:
109 os.close(fd)
109 os.close(fd)
110 except EnvironmentError:
110 except EnvironmentError:
111 pass
111 pass
112
112
113
113
114 # In the following, we set the FDs non-inheritable mainly to make it possible
114 # In the following, we set the FDs non-inheritable mainly to make it possible
115 # for tests to close the receiving end of the pipe / PTYs.
115 # for tests to close the receiving end of the pipe / PTYs.
116
116
117
117
118 @contextlib.contextmanager
118 @contextlib.contextmanager
119 def _devnull():
119 def _devnull():
120 devnull = os.open(os.devnull, os.O_WRONLY)
120 devnull = os.open(os.devnull, os.O_WRONLY)
121 # We don't have a receiving end, so it's not worth the effort on Python 2
121 # We don't have a receiving end, so it's not worth the effort on Python 2
122 # on Windows to make the FD non-inheritable.
122 # on Windows to make the FD non-inheritable.
123 with _closing([devnull]):
123 with _closing([devnull]):
124 yield (None, devnull)
124 yield (None, devnull)
125
125
126
126
127 @contextlib.contextmanager
127 @contextlib.contextmanager
128 def _pipes():
128 def _pipes():
129 rwpair = os.pipe()
129 rwpair = os.pipe()
130 # Pipes are already non-inheritable on Windows.
130 # Pipes are already non-inheritable on Windows.
131 if not pycompat.iswindows:
131 if not pycompat.iswindows:
132 set_noninheritable(rwpair[0])
132 set_noninheritable(rwpair[0])
133 set_noninheritable(rwpair[1])
133 set_noninheritable(rwpair[1])
134 with _closing(rwpair):
134 with _closing(rwpair):
135 yield rwpair
135 yield rwpair
136
136
137
137
138 @contextlib.contextmanager
138 @contextlib.contextmanager
139 def _ptys():
139 def _ptys():
140 if pycompat.iswindows:
140 if pycompat.iswindows:
141 raise unittest.SkipTest("PTYs are not supported on Windows")
141 raise unittest.SkipTest("PTYs are not supported on Windows")
142 import pty
142 import pty
143 import tty
143 import tty
144
144
145 rwpair = pty.openpty()
145 rwpair = pty.openpty()
146 set_noninheritable(rwpair[0])
146 set_noninheritable(rwpair[0])
147 set_noninheritable(rwpair[1])
147 set_noninheritable(rwpair[1])
148 with _closing(rwpair):
148 with _closing(rwpair):
149 tty.setraw(rwpair[0])
149 tty.setraw(rwpair[0])
150 yield rwpair
150 yield rwpair
151
151
152
152
153 def _readall(fd, buffer_size, initial_buf=None):
153 def _readall(fd, buffer_size, initial_buf=None):
154 buf = initial_buf or []
154 buf = initial_buf or []
155 while True:
155 while True:
156 try:
156 try:
157 s = os.read(fd, buffer_size)
157 s = os.read(fd, buffer_size)
158 except OSError as e:
158 except OSError as e:
159 if e.errno == errno.EIO:
159 if e.errno == errno.EIO:
160 # If the child-facing PTY got closed, reading from the
160 # If the child-facing PTY got closed, reading from the
161 # parent-facing PTY raises EIO.
161 # parent-facing PTY raises EIO.
162 break
162 break
163 raise
163 raise
164 if not s:
164 if not s:
165 break
165 break
166 buf.append(s)
166 buf.append(s)
167 return b''.join(buf)
167 return b''.join(buf)
168
168
169
169
170 class TestStdio(unittest.TestCase):
170 class TestStdio(unittest.TestCase):
171 def _test(
171 def _test(
172 self,
172 self,
173 child_script,
173 child_script,
174 stream,
174 stream,
175 rwpair_generator,
175 rwpair_generator,
176 check_output,
176 check_output,
177 python_args=[],
177 python_args=[],
178 post_child_check=None,
178 post_child_check=None,
179 stdin_generator=None,
179 stdin_generator=None,
180 ):
180 ):
181 assert stream in ('stdout', 'stderr')
181 assert stream in ('stdout', 'stderr')
182 if stdin_generator is None:
182 if stdin_generator is None:
183 stdin_generator = open(os.devnull, 'rb')
183 stdin_generator = open(os.devnull, 'rb')
184 with rwpair_generator() as (
184 with rwpair_generator() as (
185 stream_receiver,
185 stream_receiver,
186 child_stream,
186 child_stream,
187 ), stdin_generator as child_stdin:
187 ), stdin_generator as child_stdin:
188 proc = subprocess.Popen(
188 proc = subprocess.Popen(
189 [sys.executable] + python_args + ['-c', child_script],
189 [sys.executable] + python_args + ['-c', child_script],
190 stdin=child_stdin,
190 stdin=child_stdin,
191 stdout=child_stream if stream == 'stdout' else None,
191 stdout=child_stream if stream == 'stdout' else None,
192 stderr=child_stream if stream == 'stderr' else None,
192 stderr=child_stream if stream == 'stderr' else None,
193 )
193 )
194 try:
194 try:
195 os.close(child_stream)
195 os.close(child_stream)
196 if stream_receiver is not None:
196 if stream_receiver is not None:
197 check_output(stream_receiver, proc)
197 check_output(stream_receiver, proc)
198 except: # re-raises
198 except: # re-raises
199 proc.terminate()
199 proc.terminate()
200 raise
200 raise
201 finally:
201 finally:
202 retcode = proc.wait()
202 retcode = proc.wait()
203 self.assertEqual(retcode, 0)
203 self.assertEqual(retcode, 0)
204 if post_child_check is not None:
204 if post_child_check is not None:
205 post_child_check()
205 post_child_check()
206
206
207 def _test_buffering(
207 def _test_buffering(
208 self, stream, rwpair_generator, expected_output, python_args=[]
208 self, stream, rwpair_generator, expected_output, python_args=[]
209 ):
209 ):
210 def check_output(stream_receiver, proc):
210 def check_output(stream_receiver, proc):
211 self.assertEqual(_readall(stream_receiver, 1024), expected_output)
211 self.assertEqual(_readall(stream_receiver, 1024), expected_output)
212
212
213 self._test(
213 self._test(
214 TEST_BUFFERING_CHILD_SCRIPT.format(stream=stream),
214 TEST_BUFFERING_CHILD_SCRIPT.format(stream=stream),
215 stream,
215 stream,
216 rwpair_generator,
216 rwpair_generator,
217 check_output,
217 check_output,
218 python_args,
218 python_args,
219 )
219 )
220
220
221 def test_buffering_stdout_devnull(self):
221 def test_buffering_stdout_devnull(self):
222 self._test_buffering('stdout', _devnull, None)
222 self._test_buffering('stdout', _devnull, None)
223
223
224 def test_buffering_stdout_pipes(self):
224 def test_buffering_stdout_pipes(self):
225 self._test_buffering('stdout', _pipes, FULLY_BUFFERED)
225 self._test_buffering('stdout', _pipes, FULLY_BUFFERED)
226
226
227 def test_buffering_stdout_ptys(self):
227 def test_buffering_stdout_ptys(self):
228 self._test_buffering('stdout', _ptys, LINE_BUFFERED)
228 self._test_buffering('stdout', _ptys, LINE_BUFFERED)
229
229
230 def test_buffering_stdout_devnull_unbuffered(self):
230 def test_buffering_stdout_devnull_unbuffered(self):
231 self._test_buffering('stdout', _devnull, None, python_args=['-u'])
231 self._test_buffering('stdout', _devnull, None, python_args=['-u'])
232
232
233 def test_buffering_stdout_pipes_unbuffered(self):
233 def test_buffering_stdout_pipes_unbuffered(self):
234 self._test_buffering('stdout', _pipes, UNBUFFERED, python_args=['-u'])
234 self._test_buffering('stdout', _pipes, UNBUFFERED, python_args=['-u'])
235
235
236 def test_buffering_stdout_ptys_unbuffered(self):
236 def test_buffering_stdout_ptys_unbuffered(self):
237 self._test_buffering('stdout', _ptys, UNBUFFERED, python_args=['-u'])
237 self._test_buffering('stdout', _ptys, UNBUFFERED, python_args=['-u'])
238
238
239 if not pycompat.ispy3 and not pycompat.iswindows:
239 if not pycompat.ispy3 and not pycompat.iswindows:
240 # On Python 2 on non-Windows, we manually open stdout in line-buffered
240 # On Python 2 on non-Windows, we manually open stdout in line-buffered
241 # mode if connected to a TTY. We should check if Python was configured
241 # mode if connected to a TTY. We should check if Python was configured
242 # to use unbuffered stdout, but it's hard to do that.
242 # to use unbuffered stdout, but it's hard to do that.
243 test_buffering_stdout_ptys_unbuffered = unittest.expectedFailure(
243 test_buffering_stdout_ptys_unbuffered = unittest.expectedFailure(
244 test_buffering_stdout_ptys_unbuffered
244 test_buffering_stdout_ptys_unbuffered
245 )
245 )
246
246
247 def _test_large_write(self, stream, rwpair_generator, python_args=[]):
247 def _test_large_write(self, stream, rwpair_generator, python_args=[]):
248 if not pycompat.ispy3 and pycompat.isdarwin:
248 if not pycompat.ispy3 and pycompat.isdarwin:
249 # Python 2 doesn't always retry on EINTR, but the libc might retry.
249 # Python 2 doesn't always retry on EINTR, but the libc might retry.
250 # So far, it was observed only on macOS that EINTR is raised at the
250 # So far, it was observed only on macOS that EINTR is raised at the
251 # Python level. As Python 2 support will be dropped soon-ish, we
251 # Python level. As Python 2 support will be dropped soon-ish, we
252 # won't attempt to fix it.
252 # won't attempt to fix it.
253 raise unittest.SkipTest("raises EINTR on macOS")
253 raise unittest.SkipTest("raises EINTR on macOS")
254
254
255 def check_output(stream_receiver, proc):
255 def check_output(stream_receiver, proc):
256 if not pycompat.iswindows:
256 if not pycompat.iswindows:
257 # On Unix, we can provoke a partial write() by interrupting it
257 # On Unix, we can provoke a partial write() by interrupting it
258 # by a signal handler as soon as a bit of data was written.
258 # by a signal handler as soon as a bit of data was written.
259 # We test that write() is called until all data is written.
259 # We test that write() is called until all data is written.
260 buf = [os.read(stream_receiver, 1)]
260 buf = [os.read(stream_receiver, 1)]
261 proc.send_signal(signal.SIGINT)
261 proc.send_signal(signal.SIGINT)
262 else:
262 else:
263 # On Windows, there doesn't seem to be a way to cause partial
263 # On Windows, there doesn't seem to be a way to cause partial
264 # writes.
264 # writes.
265 buf = []
265 buf = []
266 self.assertEqual(
266 self.assertEqual(
267 _readall(stream_receiver, 131072, buf), b'x' * 1048576
267 _readall(stream_receiver, 131072, buf), b'x' * 1048576
268 )
268 )
269
269
270 def post_child_check():
270 def post_child_check():
271 write_result_str = write_result_f.read()
271 write_result_str = write_result_f.read()
272 if pycompat.ispy3:
272 if pycompat.ispy3:
273 # On Python 3, we test that the correct number of bytes is
273 # On Python 3, we test that the correct number of bytes is
274 # claimed to have been written.
274 # claimed to have been written.
275 expected_write_result_str = '1048576'
275 expected_write_result_str = '1048576'
276 else:
276 else:
277 # On Python 2, we only check that the large write does not
277 # On Python 2, we only check that the large write does not
278 # crash.
278 # crash.
279 expected_write_result_str = 'None'
279 expected_write_result_str = 'None'
280 self.assertEqual(write_result_str, expected_write_result_str)
280 self.assertEqual(write_result_str, expected_write_result_str)
281
281
282 with tempfile.NamedTemporaryFile('r') as write_result_f:
282 with tempfile.NamedTemporaryFile('r') as write_result_f:
283 self._test(
283 self._test(
284 TEST_LARGE_WRITE_CHILD_SCRIPT.format(
284 TEST_LARGE_WRITE_CHILD_SCRIPT.format(
285 stream=stream, write_result_fn=write_result_f.name
285 stream=stream, write_result_fn=write_result_f.name
286 ),
286 ),
287 stream,
287 stream,
288 rwpair_generator,
288 rwpair_generator,
289 check_output,
289 check_output,
290 python_args,
290 python_args,
291 post_child_check=post_child_check,
291 post_child_check=post_child_check,
292 )
292 )
293
293
294 def test_large_write_stdout_devnull(self):
294 def test_large_write_stdout_devnull(self):
295 self._test_large_write('stdout', _devnull)
295 self._test_large_write('stdout', _devnull)
296
296
297 def test_large_write_stdout_pipes(self):
297 def test_large_write_stdout_pipes(self):
298 self._test_large_write('stdout', _pipes)
298 self._test_large_write('stdout', _pipes)
299
299
300 def test_large_write_stdout_ptys(self):
300 def test_large_write_stdout_ptys(self):
301 self._test_large_write('stdout', _ptys)
301 self._test_large_write('stdout', _ptys)
302
302
303 def test_large_write_stdout_devnull_unbuffered(self):
303 def test_large_write_stdout_devnull_unbuffered(self):
304 self._test_large_write('stdout', _devnull, python_args=['-u'])
304 self._test_large_write('stdout', _devnull, python_args=['-u'])
305
305
306 def test_large_write_stdout_pipes_unbuffered(self):
306 def test_large_write_stdout_pipes_unbuffered(self):
307 self._test_large_write('stdout', _pipes, python_args=['-u'])
307 self._test_large_write('stdout', _pipes, python_args=['-u'])
308
308
309 def test_large_write_stdout_ptys_unbuffered(self):
309 def test_large_write_stdout_ptys_unbuffered(self):
310 self._test_large_write('stdout', _ptys, python_args=['-u'])
310 self._test_large_write('stdout', _ptys, python_args=['-u'])
311
311
312 def test_large_write_stderr_devnull(self):
312 def test_large_write_stderr_devnull(self):
313 self._test_large_write('stderr', _devnull)
313 self._test_large_write('stderr', _devnull)
314
314
315 def test_large_write_stderr_pipes(self):
315 def test_large_write_stderr_pipes(self):
316 self._test_large_write('stderr', _pipes)
316 self._test_large_write('stderr', _pipes)
317
317
318 def test_large_write_stderr_ptys(self):
318 def test_large_write_stderr_ptys(self):
319 self._test_large_write('stderr', _ptys)
319 self._test_large_write('stderr', _ptys)
320
320
321 def test_large_write_stderr_devnull_unbuffered(self):
321 def test_large_write_stderr_devnull_unbuffered(self):
322 self._test_large_write('stderr', _devnull, python_args=['-u'])
322 self._test_large_write('stderr', _devnull, python_args=['-u'])
323
323
324 def test_large_write_stderr_pipes_unbuffered(self):
324 def test_large_write_stderr_pipes_unbuffered(self):
325 self._test_large_write('stderr', _pipes, python_args=['-u'])
325 self._test_large_write('stderr', _pipes, python_args=['-u'])
326
326
327 def test_large_write_stderr_ptys_unbuffered(self):
327 def test_large_write_stderr_ptys_unbuffered(self):
328 self._test_large_write('stderr', _ptys, python_args=['-u'])
328 self._test_large_write('stderr', _ptys, python_args=['-u'])
329
329
330 def _test_broken_pipe(self, stream):
330 def _test_broken_pipe(self, stream):
331 assert stream in ('stdout', 'stderr')
331 assert stream in ('stdout', 'stderr')
332
332
333 def check_output(stream_receiver, proc):
333 def check_output(stream_receiver, proc):
334 os.close(stream_receiver)
334 os.close(stream_receiver)
335 proc.stdin.write(b'x')
335 proc.stdin.write(b'x')
336 proc.stdin.close()
336 proc.stdin.close()
337
337
338 def post_child_check():
338 def post_child_check():
339 err = util.pickle.load(err_f)
339 err = util.pickle.load(err_f)
340 self.assertEqual(err.errno, errno.EPIPE)
340 self.assertEqual(err.errno, errno.EPIPE)
341 self.assertEqual(err.strerror, "Broken pipe")
341 self.assertEqual(err.strerror, "Broken pipe")
342
342
343 with tempfile.NamedTemporaryFile('rb') as err_f:
343 with tempfile.NamedTemporaryFile('rb') as err_f:
344 self._test(
344 self._test(
345 TEST_BROKEN_PIPE_CHILD_SCRIPT.format(
345 TEST_BROKEN_PIPE_CHILD_SCRIPT.format(
346 stream=stream, err_fn=err_f.name
346 stream=stream, err_fn=err_f.name
347 ),
347 ),
348 stream,
348 stream,
349 _pipes,
349 _pipes,
350 check_output,
350 check_output,
351 post_child_check=post_child_check,
351 post_child_check=post_child_check,
352 stdin_generator=util.nullcontextmanager(subprocess.PIPE),
352 stdin_generator=util.nullcontextmanager(subprocess.PIPE),
353 )
353 )
354
354
355 def test_broken_pipe_stdout(self):
355 def test_broken_pipe_stdout(self):
356 self._test_broken_pipe('stdout')
356 self._test_broken_pipe('stdout')
357
357
358 def test_broken_pipe_stderr(self):
358 def test_broken_pipe_stderr(self):
359 self._test_broken_pipe('stderr')
359 self._test_broken_pipe('stderr')
360
360
361
361
362 if __name__ == '__main__':
362 if __name__ == '__main__':
363 import silenttestrunner
363 import silenttestrunner
364
364
365 silenttestrunner.main(__name__)
365 silenttestrunner.main(__name__)
@@ -1,223 +1,223
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2
2
3 from __future__ import absolute_import, print_function
3 from __future__ import absolute_import, print_function
4
4
5 __doc__ = """Tiny HTTP Proxy.
5 __doc__ = """Tiny HTTP Proxy.
6
6
7 This module implements GET, HEAD, POST, PUT and DELETE methods
7 This module implements GET, HEAD, POST, PUT and DELETE methods
8 on BaseHTTPServer, and behaves as an HTTP proxy. The CONNECT
8 on BaseHTTPServer, and behaves as an HTTP proxy. The CONNECT
9 method is also implemented experimentally, but has not been
9 method is also implemented experimentally, but has not been
10 tested yet.
10 tested yet.
11
11
12 Any help will be greatly appreciated. SUZUKI Hisao
12 Any help will be greatly appreciated. SUZUKI Hisao
13 """
13 """
14
14
15 __version__ = "0.2.1"
15 __version__ = "0.2.1"
16
16
17 import optparse
17 import optparse
18 import os
18 import os
19 import select
19 import select
20 import socket
20 import socket
21 import sys
21 import sys
22
22
23 from mercurial import (
23 from mercurial import (
24 pycompat,
24 pycompat,
25 util,
25 util,
26 )
26 )
27
27
28 httpserver = util.httpserver
28 httpserver = util.httpserver
29 socketserver = util.socketserver
29 socketserver = util.socketserver
30 urlreq = util.urlreq
30 urlreq = util.urlreq
31
31
32 if os.environ.get('HGIPV6', '0') == '1':
32 if os.environ.get('HGIPV6', '0') == '1':
33 family = socket.AF_INET6
33 family = socket.AF_INET6
34 else:
34 else:
35 family = socket.AF_INET
35 family = socket.AF_INET
36
36
37
37
38 class ProxyHandler(httpserver.basehttprequesthandler):
38 class ProxyHandler(httpserver.basehttprequesthandler):
39 __base = httpserver.basehttprequesthandler
39 __base = httpserver.basehttprequesthandler
40 __base_handle = __base.handle
40 __base_handle = __base.handle
41
41
42 server_version = "TinyHTTPProxy/" + __version__
42 server_version = "TinyHTTPProxy/" + __version__
43 rbufsize = 0 # self.rfile Be unbuffered
43 rbufsize = 0 # self.rfile Be unbuffered
44
44
45 def handle(self):
45 def handle(self):
46 (ip, port) = self.client_address
46 (ip, port) = self.client_address
47 allowed = getattr(self, 'allowed_clients', None)
47 allowed = getattr(self, 'allowed_clients', None)
48 if allowed is not None and ip not in allowed:
48 if allowed is not None and ip not in allowed:
49 self.raw_requestline = self.rfile.readline()
49 self.raw_requestline = self.rfile.readline()
50 if self.parse_request():
50 if self.parse_request():
51 self.send_error(403)
51 self.send_error(403)
52 else:
52 else:
53 self.__base_handle()
53 self.__base_handle()
54
54
55 def log_request(self, code='-', size='-'):
55 def log_request(self, code='-', size='-'):
56 xheaders = [h for h in self.headers.items() if h[0].startswith('x-')]
56 xheaders = [h for h in self.headers.items() if h[0].startswith('x-')]
57 self.log_message(
57 self.log_message(
58 '"%s" %s %s%s',
58 '"%s" %s %s%s',
59 self.requestline,
59 self.requestline,
60 str(code),
60 str(code),
61 str(size),
61 str(size),
62 ''.join([' %s:%s' % h for h in sorted(xheaders)]),
62 ''.join([' %s:%s' % h for h in sorted(xheaders)]),
63 )
63 )
64 # Flush for Windows, so output isn't lost on TerminateProcess()
64 # Flush for Windows, so output isn't lost on TerminateProcess()
65 sys.stdout.flush()
65 sys.stdout.flush()
66 sys.stderr.flush()
66 sys.stderr.flush()
67
67
68 def _connect_to(self, netloc, soc):
68 def _connect_to(self, netloc, soc):
69 i = netloc.find(':')
69 i = netloc.find(':')
70 if i >= 0:
70 if i >= 0:
71 host_port = netloc[:i], int(netloc[i + 1 :])
71 host_port = netloc[:i], int(netloc[i + 1 :])
72 else:
72 else:
73 host_port = netloc, 80
73 host_port = netloc, 80
74 print("\t" "connect to %s:%d" % host_port)
74 print("\t" "connect to %s:%d" % host_port)
75 try:
75 try:
76 soc.connect(host_port)
76 soc.connect(host_port)
77 except socket.error as arg:
77 except socket.error as arg:
78 try:
78 try:
79 msg = arg[1]
79 msg = arg[1]
80 except (IndexError, TypeError):
80 except (IndexError, TypeError):
81 msg = arg
81 msg = arg
82 self.send_error(404, msg)
82 self.send_error(404, msg)
83 return 0
83 return 0
84 return 1
84 return 1
85
85
86 def do_CONNECT(self):
86 def do_CONNECT(self):
87 soc = socket.socket(family, socket.SOCK_STREAM)
87 soc = socket.socket(family, socket.SOCK_STREAM)
88 try:
88 try:
89 if self._connect_to(self.path, soc):
89 if self._connect_to(self.path, soc):
90 self.log_request(200)
90 self.log_request(200)
91 self.wfile.write(
91 self.wfile.write(
92 pycompat.bytestr(self.protocol_version)
92 pycompat.bytestr(self.protocol_version)
93 + b" 200 Connection established\r\n"
93 + b" 200 Connection established\r\n"
94 )
94 )
95 self.wfile.write(
95 self.wfile.write(
96 b"Proxy-agent: %s\r\n"
96 b"Proxy-agent: %s\r\n"
97 % pycompat.bytestr(self.version_string())
97 % pycompat.bytestr(self.version_string())
98 )
98 )
99 self.wfile.write(b"\r\n")
99 self.wfile.write(b"\r\n")
100 self._read_write(soc, 300)
100 self._read_write(soc, 300)
101 finally:
101 finally:
102 print("\t" "bye")
102 print("\t" "bye")
103 soc.close()
103 soc.close()
104 self.connection.close()
104 self.connection.close()
105
105
106 def do_GET(self):
106 def do_GET(self):
107 (scm, netloc, path, params, query, fragment) = urlreq.urlparse(
107 (scm, netloc, path, params, query, fragment) = urlreq.urlparse(
108 self.path, 'http'
108 self.path, 'http'
109 )
109 )
110 if scm != 'http' or fragment or not netloc:
110 if scm != 'http' or fragment or not netloc:
111 self.send_error(400, "bad url %s" % self.path)
111 self.send_error(400, "bad url %s" % self.path)
112 return
112 return
113 soc = socket.socket(family, socket.SOCK_STREAM)
113 soc = socket.socket(family, socket.SOCK_STREAM)
114 try:
114 try:
115 if self._connect_to(netloc, soc):
115 if self._connect_to(netloc, soc):
116 self.log_request()
116 self.log_request()
117 url = urlreq.urlunparse(('', '', path, params, query, ''))
117 url = urlreq.urlunparse(('', '', path, params, query, ''))
118 soc.send(
118 soc.send(
119 b"%s %s %s\r\n"
119 b"%s %s %s\r\n"
120 % (
120 % (
121 pycompat.bytestr(self.command),
121 pycompat.bytestr(self.command),
122 pycompat.bytestr(url),
122 pycompat.bytestr(url),
123 pycompat.bytestr(self.request_version),
123 pycompat.bytestr(self.request_version),
124 )
124 )
125 )
125 )
126 self.headers['Connection'] = 'close'
126 self.headers['Connection'] = 'close'
127 del self.headers['Proxy-Connection']
127 del self.headers['Proxy-Connection']
128 for key, val in self.headers.items():
128 for key, val in self.headers.items():
129 soc.send(
129 soc.send(
130 b"%s: %s\r\n"
130 b"%s: %s\r\n"
131 % (pycompat.bytestr(key), pycompat.bytestr(val))
131 % (pycompat.bytestr(key), pycompat.bytestr(val))
132 )
132 )
133 soc.send(b"\r\n")
133 soc.send(b"\r\n")
134 self._read_write(soc)
134 self._read_write(soc)
135 finally:
135 finally:
136 print("\t" "bye")
136 print("\t" "bye")
137 soc.close()
137 soc.close()
138 self.connection.close()
138 self.connection.close()
139
139
140 def _read_write(self, soc, max_idling=20):
140 def _read_write(self, soc, max_idling=20):
141 iw = [self.connection, soc]
141 iw = [self.connection, soc]
142 ow = []
142 ow = []
143 count = 0
143 count = 0
144 while True:
144 while True:
145 count += 1
145 count += 1
146 (ins, _, exs) = select.select(iw, ow, iw, 3)
146 (ins, _, exs) = select.select(iw, ow, iw, 3)
147 if exs:
147 if exs:
148 break
148 break
149 if ins:
149 if ins:
150 for i in ins:
150 for i in ins:
151 if i is soc:
151 if i is soc:
152 out = self.connection
152 out = self.connection
153 else:
153 else:
154 out = soc
154 out = soc
155 try:
155 try:
156 data = i.recv(8192)
156 data = i.recv(8192)
157 except socket.error:
157 except socket.error:
158 break
158 break
159 if data:
159 if data:
160 out.send(data)
160 out.send(data)
161 count = 0
161 count = 0
162 else:
162 else:
163 print("\t" "idle", count)
163 print("\t" "idle", count)
164 if count == max_idling:
164 if count == max_idling:
165 break
165 break
166
166
167 do_HEAD = do_GET
167 do_HEAD = do_GET
168 do_POST = do_GET
168 do_POST = do_GET
169 do_PUT = do_GET
169 do_PUT = do_GET
170 do_DELETE = do_GET
170 do_DELETE = do_GET
171
171
172
172
173 class ThreadingHTTPServer(socketserver.ThreadingMixIn, httpserver.httpserver):
173 class ThreadingHTTPServer(socketserver.ThreadingMixIn, httpserver.httpserver):
174 def __init__(self, *args, **kwargs):
174 def __init__(self, *args, **kwargs):
175 httpserver.httpserver.__init__(self, *args, **kwargs)
175 httpserver.httpserver.__init__(self, *args, **kwargs)
176 a = open("proxy.pid", "w")
176 a = open("proxy.pid", "w")
177 a.write(str(os.getpid()) + "\n")
177 a.write(str(os.getpid()) + "\n")
178 a.close()
178 a.close()
179
179
180
180
181 def runserver(port=8000, bind=""):
181 def runserver(port=8000, bind=""):
182 server_address = (bind, port)
182 server_address = (bind, port)
183 ProxyHandler.protocol_version = "HTTP/1.0"
183 ProxyHandler.protocol_version = "HTTP/1.0"
184 httpd = ThreadingHTTPServer(server_address, ProxyHandler)
184 httpd = ThreadingHTTPServer(server_address, ProxyHandler)
185 sa = httpd.socket.getsockname()
185 sa = httpd.socket.getsockname()
186 print("Serving HTTP on", sa[0], "port", sa[1], "...")
186 print("Serving HTTP on", sa[0], "port", sa[1], "...")
187 try:
187 try:
188 httpd.serve_forever()
188 httpd.serve_forever()
189 except KeyboardInterrupt:
189 except KeyboardInterrupt:
190 print("\nKeyboard interrupt received, exiting.")
190 print("\nKeyboard interrupt received, exiting.")
191 httpd.server_close()
191 httpd.server_close()
192 sys.exit(0)
192 sys.exit(0)
193
193
194
194
195 if __name__ == '__main__':
195 if __name__ == '__main__':
196 argv = sys.argv
196 argv = sys.argv
197 if argv[1:] and argv[1] in ('-h', '--help'):
197 if argv[1:] and argv[1] in ('-h', '--help'):
198 print(argv[0], "[port [allowed_client_name ...]]")
198 print(argv[0], "[port [allowed_client_name ...]]")
199 else:
199 else:
200 if argv[2:]:
200 if argv[2:]:
201 allowed = []
201 allowed = []
202 for name in argv[2:]:
202 for name in argv[2:]:
203 client = socket.gethostbyname(name)
203 client = socket.gethostbyname(name)
204 allowed.append(client)
204 allowed.append(client)
205 print("Accept: %s (%s)" % (client, name))
205 print("Accept: %s (%s)" % (client, name))
206 ProxyHandler.allowed_clients = allowed
206 ProxyHandler.allowed_clients = allowed
207 del argv[2:]
207 del argv[2:]
208 else:
208 else:
209 print("Any clients will be served...")
209 print("Any clients will be served...")
210
210
211 parser = optparse.OptionParser()
211 parser = optparse.OptionParser()
212 parser.add_option(
212 parser.add_option(
213 '-b',
213 '-b',
214 '--bind',
214 '--bind',
215 metavar='ADDRESS',
215 metavar='ADDRESS',
216 help='Specify alternate bind address ' '[default: all interfaces]',
216 help='Specify alternate bind address ' '[default: all interfaces]',
217 default='',
217 default='',
218 )
218 )
219 (options, args) = parser.parse_args()
219 (options, args) = parser.parse_args()
220 port = 8000
220 port = 8000
221 if len(args) == 1:
221 if len(args) == 1:
222 port = int(args[0])
222 port = int(args[0])
223 runserver(port, options.bind)
223 runserver(port, options.bind)
General Comments 0
You need to be logged in to leave comments. Login now