##// END OF EJS Templates
run-tests: stop writing a `python3` symlink pointing to python2...
marmoute -
r48294:23f5ed6d default
parent child Browse files
Show More
@@ -1,113 +1,113 b''
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2 2
3 3 from __future__ import absolute_import
4 4
5 5 """
6 6 Small and dumb HTTP server for use in tests.
7 7 """
8 8
9 9 import optparse
10 10 import os
11 11 import signal
12 12 import socket
13 13 import sys
14 14
15 15 from mercurial import (
16 16 encoding,
17 17 pycompat,
18 18 server,
19 19 util,
20 20 )
21 21
22 22 httpserver = util.httpserver
23 23 OptionParser = optparse.OptionParser
24 24
25 25 if os.environ.get('HGIPV6', '0') == '1':
26 26
27 27 class simplehttpserver(httpserver.httpserver):
28 28 address_family = socket.AF_INET6
29 29
30 30
31 31 else:
32 32 simplehttpserver = httpserver.httpserver
33 33
34 34
35 35 class _httprequesthandler(httpserver.simplehttprequesthandler):
36 36 def log_message(self, format, *args):
37 37 httpserver.simplehttprequesthandler.log_message(self, format, *args)
38 38 sys.stderr.flush()
39 39
40 40
41 41 class simplehttpservice(object):
42 42 def __init__(self, host, port):
43 43 self.address = (host, port)
44 44
45 45 def init(self):
46 46 self.httpd = simplehttpserver(self.address, _httprequesthandler)
47 47
48 48 def run(self):
49 49 self.httpd.serve_forever()
50 50
51 51
52 52 if __name__ == '__main__':
53 53 parser = OptionParser()
54 54 parser.add_option(
55 55 '-p',
56 56 '--port',
57 57 dest='port',
58 58 type='int',
59 59 default=8000,
60 60 help='TCP port to listen on',
61 61 metavar='PORT',
62 62 )
63 63 parser.add_option(
64 64 '-H',
65 65 '--host',
66 66 dest='host',
67 67 default='localhost',
68 68 help='hostname or IP to listen on',
69 69 metavar='HOST',
70 70 )
71 71 parser.add_option('--logfile', help='file name of access/error log')
72 72 parser.add_option(
73 73 '--pid',
74 74 dest='pid',
75 75 help='file name where the PID of the server is stored',
76 76 )
77 77 parser.add_option(
78 78 '-f',
79 79 '--foreground',
80 80 dest='foreground',
81 81 action='store_true',
82 82 help='do not start the HTTP server in the background',
83 83 )
84 84 parser.add_option('--daemon-postexec', action='append')
85 85
86 86 (options, args) = parser.parse_args()
87 87
88 88 signal.signal(signal.SIGTERM, lambda x, y: sys.exit(0))
89 89
90 90 if options.foreground and options.logfile:
91 91 parser.error(
92 92 "options --logfile and --foreground are mutually " "exclusive"
93 93 )
94 94 if options.foreground and options.pid:
95 95 parser.error("options --pid and --foreground are mutually exclusive")
96 96
97 97 opts = {
98 98 b'pid_file': options.pid,
99 99 b'daemon': not options.foreground,
100 100 b'daemon_postexec': pycompat.rapply(
101 101 encoding.strtolocal, options.daemon_postexec
102 102 ),
103 103 }
104 104 service = simplehttpservice(options.host, options.port)
105 105 runargs = [sys.executable, __file__] + sys.argv[1:]
106 106 runargs = [pycompat.fsencode(a) for a in runargs]
107 107 server.runservice(
108 108 opts,
109 109 initfn=service.init,
110 110 runfn=service.run,
111 111 logfile=options.logfile,
112 112 runargs=runargs,
113 113 )
@@ -1,121 +1,121 b''
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2 2
3 3 """dummy SMTP server for use in tests"""
4 4
5 5 from __future__ import absolute_import
6 6
7 7 import asyncore
8 8 import optparse
9 9 import smtpd
10 10 import ssl
11 11 import sys
12 12 import traceback
13 13
14 14 from mercurial import (
15 15 pycompat,
16 16 server,
17 17 sslutil,
18 18 ui as uimod,
19 19 )
20 20
21 21
22 22 def log(msg):
23 23 sys.stdout.write(msg)
24 24 sys.stdout.flush()
25 25
26 26
27 27 class dummysmtpserver(smtpd.SMTPServer):
28 28 def __init__(self, localaddr):
29 29 smtpd.SMTPServer.__init__(self, localaddr, remoteaddr=None)
30 30
31 31 def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
32 32 log('%s from=%s to=%s\n' % (peer[0], mailfrom, ', '.join(rcpttos)))
33 33
34 34 def handle_error(self):
35 35 # On Windows, a bad SSL connection sometimes generates a WSAECONNRESET.
36 36 # The default handler will shutdown this server, and then both the
37 37 # current connection and subsequent ones fail on the client side with
38 38 # "No connection could be made because the target machine actively
39 39 # refused it". If we eat the error, then the client properly aborts in
40 40 # the expected way, and the server is available for subsequent requests.
41 41 traceback.print_exc()
42 42
43 43
44 44 class dummysmtpsecureserver(dummysmtpserver):
45 45 def __init__(self, localaddr, certfile):
46 46 dummysmtpserver.__init__(self, localaddr)
47 47 self._certfile = certfile
48 48
49 49 def handle_accept(self):
50 50 pair = self.accept()
51 51 if not pair:
52 52 return
53 53 conn, addr = pair
54 54 ui = uimod.ui.load()
55 55 try:
56 56 # wrap_socket() would block, but we don't care
57 57 conn = sslutil.wrapserversocket(conn, ui, certfile=self._certfile)
58 58 except ssl.SSLError:
59 59 log('%s ssl error\n' % addr[0])
60 60 conn.close()
61 61 return
62 62 smtpd.SMTPChannel(self, conn, addr)
63 63
64 64
65 65 def run():
66 66 try:
67 67 asyncore.loop()
68 68 except KeyboardInterrupt:
69 69 pass
70 70
71 71
72 72 def _encodestrsonly(v):
73 73 if isinstance(v, type(u'')):
74 74 return v.encode('ascii')
75 75 return v
76 76
77 77
78 78 def bytesvars(obj):
79 79 unidict = vars(obj)
80 80 bd = {k.encode('ascii'): _encodestrsonly(v) for k, v in unidict.items()}
81 81 if bd[b'daemon_postexec'] is not None:
82 82 bd[b'daemon_postexec'] = [
83 83 _encodestrsonly(v) for v in bd[b'daemon_postexec']
84 84 ]
85 85 return bd
86 86
87 87
88 88 def main():
89 89 op = optparse.OptionParser()
90 90 op.add_option('-d', '--daemon', action='store_true')
91 91 op.add_option('--daemon-postexec', action='append')
92 92 op.add_option('-p', '--port', type=int, default=8025)
93 93 op.add_option('-a', '--address', default='localhost')
94 94 op.add_option('--pid-file', metavar='FILE')
95 95 op.add_option('--tls', choices=['none', 'smtps'], default='none')
96 96 op.add_option('--certificate', metavar='FILE')
97 97
98 98 opts, args = op.parse_args()
99 99 if opts.tls == 'smtps' and not opts.certificate:
100 100 op.error('--certificate must be specified')
101 101
102 102 addr = (opts.address, opts.port)
103 103
104 104 def init():
105 105 if opts.tls == 'none':
106 106 dummysmtpserver(addr)
107 107 else:
108 108 dummysmtpsecureserver(addr, opts.certificate)
109 109 log('listening at %s:%d\n' % addr)
110 110
111 111 server.runservice(
112 112 bytesvars(opts),
113 113 initfn=init,
114 114 runfn=run,
115 115 runargs=[pycompat.sysexecutable, pycompat.fsencode(__file__)]
116 116 + pycompat.sysargv[1:],
117 117 )
118 118
119 119
120 120 if __name__ == '__main__':
121 121 main()
@@ -1,124 +1,124 b''
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2 2
3 3 """This does HTTP GET requests given a host:port and path and returns
4 4 a subset of the headers plus the body of the result."""
5 5
6 6 from __future__ import absolute_import
7 7
8 8 import argparse
9 9 import json
10 10 import os
11 11 import sys
12 12
13 13 from mercurial import (
14 14 pycompat,
15 15 util,
16 16 )
17 17
18 18 httplib = util.httplib
19 19
20 20 try:
21 21 import msvcrt
22 22
23 23 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
24 24 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
25 25 except ImportError:
26 26 pass
27 27
28 28 stdout = getattr(sys.stdout, 'buffer', sys.stdout)
29 29
30 30 parser = argparse.ArgumentParser()
31 31 parser.add_argument('--twice', action='store_true')
32 32 parser.add_argument('--headeronly', action='store_true')
33 33 parser.add_argument('--json', action='store_true')
34 34 parser.add_argument('--hgproto')
35 35 parser.add_argument(
36 36 '--requestheader',
37 37 nargs='*',
38 38 default=[],
39 39 help='Send an additional HTTP request header. Argument '
40 40 'value is <header>=<value>',
41 41 )
42 42 parser.add_argument('--bodyfile', help='Write HTTP response body to a file')
43 43 parser.add_argument('host')
44 44 parser.add_argument('path')
45 45 parser.add_argument('show', nargs='*')
46 46
47 47 args = parser.parse_args()
48 48
49 49 twice = args.twice
50 50 headeronly = args.headeronly
51 51 formatjson = args.json
52 52 hgproto = args.hgproto
53 53 requestheaders = args.requestheader
54 54
55 55 tag = None
56 56
57 57
58 58 def request(host, path, show):
59 59 assert not path.startswith('/'), path
60 60 global tag
61 61 headers = {}
62 62 if tag:
63 63 headers['If-None-Match'] = tag
64 64 if hgproto:
65 65 headers['X-HgProto-1'] = hgproto
66 66
67 67 for header in requestheaders:
68 68 key, value = header.split('=', 1)
69 69 headers[key] = value
70 70
71 71 conn = httplib.HTTPConnection(host)
72 72 conn.request("GET", '/' + path, None, headers)
73 73 response = conn.getresponse()
74 74 stdout.write(
75 75 b'%d %s\n' % (response.status, response.reason.encode('ascii'))
76 76 )
77 77 if show[:1] == ['-']:
78 78 show = sorted(
79 79 h for h, v in response.getheaders() if h.lower() not in show
80 80 )
81 81 for h in [h.lower() for h in show]:
82 82 if response.getheader(h, None) is not None:
83 83 stdout.write(
84 84 b"%s: %s\n"
85 85 % (h.encode('ascii'), response.getheader(h).encode('ascii'))
86 86 )
87 87 if not headeronly:
88 88 stdout.write(b'\n')
89 89 data = response.read()
90 90
91 91 if args.bodyfile:
92 92 bodyfh = open(args.bodyfile, 'wb')
93 93 else:
94 94 bodyfh = stdout
95 95
96 96 # Pretty print JSON. This also has the beneficial side-effect
97 97 # of verifying emitted JSON is well-formed.
98 98 if formatjson:
99 99 # json.dumps() will print trailing newlines. Eliminate them
100 100 # to make tests easier to write.
101 101 data = pycompat.json_loads(data)
102 102 lines = json.dumps(data, sort_keys=True, indent=2).splitlines()
103 103 for line in lines:
104 104 bodyfh.write(pycompat.sysbytes(line.rstrip()))
105 105 bodyfh.write(b'\n')
106 106 else:
107 107 bodyfh.write(data)
108 108
109 109 if args.bodyfile:
110 110 bodyfh.close()
111 111
112 112 if twice and response.getheader('ETag', None):
113 113 tag = response.getheader('ETag')
114 114
115 115 return response.status
116 116
117 117
118 118 status = request(args.host, args.path, args.show)
119 119 if twice:
120 120 status = request(args.host, args.path, args.show)
121 121
122 122 if 200 <= status <= 305:
123 123 sys.exit(0)
124 124 sys.exit(1)
@@ -1,78 +1,78 b''
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2 2 """Test the running system for features availability. Exit with zero
3 3 if all features are there, non-zero otherwise. If a feature name is
4 4 prefixed with "no-", the absence of feature is tested.
5 5 """
6 6
7 7 from __future__ import absolute_import, print_function
8 8
9 9 import hghave
10 10 import optparse
11 11 import os
12 12 import sys
13 13
14 14 checks = hghave.checks
15 15
16 16
17 17 def list_features():
18 18 for name, feature in sorted(checks.items()):
19 19 desc = feature[1]
20 20 print(name + ':', desc)
21 21
22 22
23 23 def test_features():
24 24 failed = 0
25 25 for name, feature in checks.items():
26 26 check, _ = feature
27 27 try:
28 28 check()
29 29 except Exception as e:
30 30 print("feature %s failed: %s" % (name, e))
31 31 failed += 1
32 32 return failed
33 33
34 34
35 35 parser = optparse.OptionParser("%prog [options] [features]")
36 36 parser.add_option(
37 37 "--test-features", action="store_true", help="test available features"
38 38 )
39 39 parser.add_option(
40 40 "--list-features", action="store_true", help="list available features"
41 41 )
42 42
43 43
44 44 def _loadaddon():
45 45 if 'TESTDIR' in os.environ:
46 46 # loading from '.' isn't needed, because `hghave` should be
47 47 # running at TESTTMP in this case
48 48 path = os.environ['TESTDIR']
49 49 else:
50 50 path = '.'
51 51
52 52 if not os.path.exists(os.path.join(path, 'hghaveaddon.py')):
53 53 return
54 54
55 55 sys.path.insert(0, path)
56 56 try:
57 57 import hghaveaddon
58 58
59 59 assert hghaveaddon # silence pyflakes
60 60 except BaseException as inst:
61 61 sys.stderr.write(
62 62 'failed to import hghaveaddon.py from %r: %s\n' % (path, inst)
63 63 )
64 64 sys.exit(2)
65 65 sys.path.pop(0)
66 66
67 67
68 68 if __name__ == '__main__':
69 69 options, args = parser.parse_args()
70 70 _loadaddon()
71 71 if options.list_features:
72 72 list_features()
73 73 sys.exit(0)
74 74
75 75 if options.test_features:
76 76 sys.exit(test_features())
77 77
78 78 hghave.require(args)
@@ -1,3937 +1,3945 b''
1 1 #!/usr/bin/env python3
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Olivia Mackall <olivia@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import argparse
49 49 import collections
50 50 import contextlib
51 51 import difflib
52 52 import distutils.version as version
53 53 import errno
54 54 import json
55 55 import multiprocessing
56 56 import os
57 57 import platform
58 58 import random
59 59 import re
60 60 import shutil
61 61 import signal
62 62 import socket
63 63 import subprocess
64 64 import sys
65 65 import sysconfig
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 import unittest
70 70 import uuid
71 71 import xml.dom.minidom as minidom
72 72
73 73 try:
74 74 import Queue as queue
75 75 except ImportError:
76 76 import queue
77 77
78 78 try:
79 79 import shlex
80 80
81 81 shellquote = shlex.quote
82 82 except (ImportError, AttributeError):
83 83 import pipes
84 84
85 85 shellquote = pipes.quote
86 86
87 87 processlock = threading.Lock()
88 88
89 89 pygmentspresent = False
90 90 try: # is pygments installed
91 91 import pygments
92 92 import pygments.lexers as lexers
93 93 import pygments.lexer as lexer
94 94 import pygments.formatters as formatters
95 95 import pygments.token as token
96 96 import pygments.style as style
97 97
98 98 if os.name == 'nt':
99 99 hgpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
100 100 sys.path.append(hgpath)
101 101 try:
102 102 from mercurial import win32 # pytype: disable=import-error
103 103
104 104 # Don't check the result code because it fails on heptapod, but
105 105 # something is able to convert to color anyway.
106 106 win32.enablevtmode()
107 107 finally:
108 108 sys.path = sys.path[:-1]
109 109
110 110 pygmentspresent = True
111 111 difflexer = lexers.DiffLexer()
112 112 terminal256formatter = formatters.Terminal256Formatter()
113 113 except ImportError:
114 114 pass
115 115
116 116 if pygmentspresent:
117 117
118 118 class TestRunnerStyle(style.Style):
119 119 default_style = ""
120 120 skipped = token.string_to_tokentype("Token.Generic.Skipped")
121 121 failed = token.string_to_tokentype("Token.Generic.Failed")
122 122 skippedname = token.string_to_tokentype("Token.Generic.SName")
123 123 failedname = token.string_to_tokentype("Token.Generic.FName")
124 124 styles = {
125 125 skipped: '#e5e5e5',
126 126 skippedname: '#00ffff',
127 127 failed: '#7f0000',
128 128 failedname: '#ff0000',
129 129 }
130 130
131 131 class TestRunnerLexer(lexer.RegexLexer):
132 132 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
133 133 tokens = {
134 134 'root': [
135 135 (r'^Skipped', token.Generic.Skipped, 'skipped'),
136 136 (r'^Failed ', token.Generic.Failed, 'failed'),
137 137 (r'^ERROR: ', token.Generic.Failed, 'failed'),
138 138 ],
139 139 'skipped': [
140 140 (testpattern, token.Generic.SName),
141 141 (r':.*', token.Generic.Skipped),
142 142 ],
143 143 'failed': [
144 144 (testpattern, token.Generic.FName),
145 145 (r'(:| ).*', token.Generic.Failed),
146 146 ],
147 147 }
148 148
149 149 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
150 150 runnerlexer = TestRunnerLexer()
151 151
152 152 origenviron = os.environ.copy()
153 153
154 154 if sys.version_info > (3, 5, 0):
155 155 PYTHON3 = True
156 156 xrange = range # we use xrange in one place, and we'd rather not use range
157 157
158 158 def _sys2bytes(p):
159 159 if p is None:
160 160 return p
161 161 return p.encode('utf-8')
162 162
163 163 def _bytes2sys(p):
164 164 if p is None:
165 165 return p
166 166 return p.decode('utf-8')
167 167
168 168 osenvironb = getattr(os, 'environb', None)
169 169 if osenvironb is None:
170 170 # Windows lacks os.environb, for instance. A proxy over the real thing
171 171 # instead of a copy allows the environment to be updated via bytes on
172 172 # all platforms.
173 173 class environbytes(object):
174 174 def __init__(self, strenv):
175 175 self.__len__ = strenv.__len__
176 176 self.clear = strenv.clear
177 177 self._strenv = strenv
178 178
179 179 def __getitem__(self, k):
180 180 v = self._strenv.__getitem__(_bytes2sys(k))
181 181 return _sys2bytes(v)
182 182
183 183 def __setitem__(self, k, v):
184 184 self._strenv.__setitem__(_bytes2sys(k), _bytes2sys(v))
185 185
186 186 def __delitem__(self, k):
187 187 self._strenv.__delitem__(_bytes2sys(k))
188 188
189 189 def __contains__(self, k):
190 190 return self._strenv.__contains__(_bytes2sys(k))
191 191
192 192 def __iter__(self):
193 193 return iter([_sys2bytes(k) for k in iter(self._strenv)])
194 194
195 195 def get(self, k, default=None):
196 196 v = self._strenv.get(_bytes2sys(k), _bytes2sys(default))
197 197 return _sys2bytes(v)
198 198
199 199 def pop(self, k, default=None):
200 200 v = self._strenv.pop(_bytes2sys(k), _bytes2sys(default))
201 201 return _sys2bytes(v)
202 202
203 203 osenvironb = environbytes(os.environ)
204 204
205 205 getcwdb = getattr(os, 'getcwdb')
206 206 if not getcwdb or os.name == 'nt':
207 207 getcwdb = lambda: _sys2bytes(os.getcwd())
208 208
209 209 elif sys.version_info >= (3, 0, 0):
210 210 print(
211 211 '%s is only supported on Python 3.5+ and 2.7, not %s'
212 212 % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
213 213 )
214 214 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
215 215 else:
216 216 PYTHON3 = False
217 217
218 218 # In python 2.x, path operations are generally done using
219 219 # bytestrings by default, so we don't have to do any extra
220 220 # fiddling there. We define the wrapper functions anyway just to
221 221 # help keep code consistent between platforms.
222 222 def _sys2bytes(p):
223 223 return p
224 224
225 225 _bytes2sys = _sys2bytes
226 226 osenvironb = os.environ
227 227 getcwdb = os.getcwd
228 228
229 229 # For Windows support
230 230 wifexited = getattr(os, "WIFEXITED", lambda x: False)
231 231
232 232 # Whether to use IPv6
233 233 def checksocketfamily(name, port=20058):
234 234 """return true if we can listen on localhost using family=name
235 235
236 236 name should be either 'AF_INET', or 'AF_INET6'.
237 237 port being used is okay - EADDRINUSE is considered as successful.
238 238 """
239 239 family = getattr(socket, name, None)
240 240 if family is None:
241 241 return False
242 242 try:
243 243 s = socket.socket(family, socket.SOCK_STREAM)
244 244 s.bind(('localhost', port))
245 245 s.close()
246 246 return True
247 247 except socket.error as exc:
248 248 if exc.errno == errno.EADDRINUSE:
249 249 return True
250 250 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
251 251 return False
252 252 else:
253 253 raise
254 254 else:
255 255 return False
256 256
257 257
258 258 # useipv6 will be set by parseargs
259 259 useipv6 = None
260 260
261 261
262 262 def checkportisavailable(port):
263 263 """return true if a port seems free to bind on localhost"""
264 264 if useipv6:
265 265 family = socket.AF_INET6
266 266 else:
267 267 family = socket.AF_INET
268 268 try:
269 269 with contextlib.closing(socket.socket(family, socket.SOCK_STREAM)) as s:
270 270 s.bind(('localhost', port))
271 271 return True
272 272 except socket.error as exc:
273 273 if os.name == 'nt' and exc.errno == errno.WSAEACCES:
274 274 return False
275 275 elif PYTHON3:
276 276 # TODO: make a proper exception handler after dropping py2. This
277 277 # works because socket.error is an alias for OSError on py3,
278 278 # which is also the baseclass of PermissionError.
279 279 if isinstance(exc, PermissionError):
280 280 return False
281 281 if exc.errno not in (
282 282 errno.EADDRINUSE,
283 283 errno.EADDRNOTAVAIL,
284 284 errno.EPROTONOSUPPORT,
285 285 ):
286 286 raise
287 287 return False
288 288
289 289
290 290 closefds = os.name == 'posix'
291 291
292 292
293 293 def Popen4(cmd, wd, timeout, env=None):
294 294 processlock.acquire()
295 295 p = subprocess.Popen(
296 296 _bytes2sys(cmd),
297 297 shell=True,
298 298 bufsize=-1,
299 299 cwd=_bytes2sys(wd),
300 300 env=env,
301 301 close_fds=closefds,
302 302 stdin=subprocess.PIPE,
303 303 stdout=subprocess.PIPE,
304 304 stderr=subprocess.STDOUT,
305 305 )
306 306 processlock.release()
307 307
308 308 p.fromchild = p.stdout
309 309 p.tochild = p.stdin
310 310 p.childerr = p.stderr
311 311
312 312 p.timeout = False
313 313 if timeout:
314 314
315 315 def t():
316 316 start = time.time()
317 317 while time.time() - start < timeout and p.returncode is None:
318 318 time.sleep(0.1)
319 319 p.timeout = True
320 320 vlog('# Timout reached for process %d' % p.pid)
321 321 if p.returncode is None:
322 322 terminate(p)
323 323
324 324 threading.Thread(target=t).start()
325 325
326 326 return p
327 327
328 328
329 329 if sys.executable:
330 330 sysexecutable = sys.executable
331 331 elif os.environ.get('PYTHONEXECUTABLE'):
332 332 sysexecutable = os.environ['PYTHONEXECUTABLE']
333 333 elif os.environ.get('PYTHON'):
334 334 sysexecutable = os.environ['PYTHON']
335 335 else:
336 336 raise AssertionError('Could not find Python interpreter')
337 337
338 338 PYTHON = _sys2bytes(sysexecutable.replace('\\', '/'))
339 339 IMPL_PATH = b'PYTHONPATH'
340 340 if 'java' in sys.platform:
341 341 IMPL_PATH = b'JYTHONPATH'
342 342
343 343 default_defaults = {
344 344 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
345 345 'timeout': ('HGTEST_TIMEOUT', 360),
346 346 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500),
347 347 'port': ('HGTEST_PORT', 20059),
348 348 'shell': ('HGTEST_SHELL', 'sh'),
349 349 }
350 350
351 351 defaults = default_defaults.copy()
352 352
353 353
354 354 def canonpath(path):
355 355 return os.path.realpath(os.path.expanduser(path))
356 356
357 357
358 358 def parselistfiles(files, listtype, warn=True):
359 359 entries = dict()
360 360 for filename in files:
361 361 try:
362 362 path = os.path.expanduser(os.path.expandvars(filename))
363 363 f = open(path, "rb")
364 364 except IOError as err:
365 365 if err.errno != errno.ENOENT:
366 366 raise
367 367 if warn:
368 368 print("warning: no such %s file: %s" % (listtype, filename))
369 369 continue
370 370
371 371 for line in f.readlines():
372 372 line = line.split(b'#', 1)[0].strip()
373 373 if line:
374 374 # Ensure path entries are compatible with os.path.relpath()
375 375 entries[os.path.normpath(line)] = filename
376 376
377 377 f.close()
378 378 return entries
379 379
380 380
381 381 def parsettestcases(path):
382 382 """read a .t test file, return a set of test case names
383 383
384 384 If path does not exist, return an empty set.
385 385 """
386 386 cases = []
387 387 try:
388 388 with open(path, 'rb') as f:
389 389 for l in f:
390 390 if l.startswith(b'#testcases '):
391 391 cases.append(sorted(l[11:].split()))
392 392 except IOError as ex:
393 393 if ex.errno != errno.ENOENT:
394 394 raise
395 395 return cases
396 396
397 397
398 398 def getparser():
399 399 """Obtain the OptionParser used by the CLI."""
400 400 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
401 401
402 402 selection = parser.add_argument_group('Test Selection')
403 403 selection.add_argument(
404 404 '--allow-slow-tests',
405 405 action='store_true',
406 406 help='allow extremely slow tests',
407 407 )
408 408 selection.add_argument(
409 409 "--blacklist",
410 410 action="append",
411 411 help="skip tests listed in the specified blacklist file",
412 412 )
413 413 selection.add_argument(
414 414 "--changed",
415 415 help="run tests that are changed in parent rev or working directory",
416 416 )
417 417 selection.add_argument(
418 418 "-k", "--keywords", help="run tests matching keywords"
419 419 )
420 420 selection.add_argument(
421 421 "-r", "--retest", action="store_true", help="retest failed tests"
422 422 )
423 423 selection.add_argument(
424 424 "--test-list",
425 425 action="append",
426 426 help="read tests to run from the specified file",
427 427 )
428 428 selection.add_argument(
429 429 "--whitelist",
430 430 action="append",
431 431 help="always run tests listed in the specified whitelist file",
432 432 )
433 433 selection.add_argument(
434 434 'tests', metavar='TESTS', nargs='*', help='Tests to run'
435 435 )
436 436
437 437 harness = parser.add_argument_group('Test Harness Behavior')
438 438 harness.add_argument(
439 439 '--bisect-repo',
440 440 metavar='bisect_repo',
441 441 help=(
442 442 "Path of a repo to bisect. Use together with " "--known-good-rev"
443 443 ),
444 444 )
445 445 harness.add_argument(
446 446 "-d",
447 447 "--debug",
448 448 action="store_true",
449 449 help="debug mode: write output of test scripts to console"
450 450 " rather than capturing and diffing it (disables timeout)",
451 451 )
452 452 harness.add_argument(
453 453 "-f",
454 454 "--first",
455 455 action="store_true",
456 456 help="exit on the first test failure",
457 457 )
458 458 harness.add_argument(
459 459 "-i",
460 460 "--interactive",
461 461 action="store_true",
462 462 help="prompt to accept changed output",
463 463 )
464 464 harness.add_argument(
465 465 "-j",
466 466 "--jobs",
467 467 type=int,
468 468 help="number of jobs to run in parallel"
469 469 " (default: $%s or %d)" % defaults['jobs'],
470 470 )
471 471 harness.add_argument(
472 472 "--keep-tmpdir",
473 473 action="store_true",
474 474 help="keep temporary directory after running tests",
475 475 )
476 476 harness.add_argument(
477 477 '--known-good-rev',
478 478 metavar="known_good_rev",
479 479 help=(
480 480 "Automatically bisect any failures using this "
481 481 "revision as a known-good revision."
482 482 ),
483 483 )
484 484 harness.add_argument(
485 485 "--list-tests",
486 486 action="store_true",
487 487 help="list tests instead of running them",
488 488 )
489 489 harness.add_argument(
490 490 "--loop", action="store_true", help="loop tests repeatedly"
491 491 )
492 492 harness.add_argument(
493 493 '--random', action="store_true", help='run tests in random order'
494 494 )
495 495 harness.add_argument(
496 496 '--order-by-runtime',
497 497 action="store_true",
498 498 help='run slowest tests first, according to .testtimes',
499 499 )
500 500 harness.add_argument(
501 501 "-p",
502 502 "--port",
503 503 type=int,
504 504 help="port on which servers should listen"
505 505 " (default: $%s or %d)" % defaults['port'],
506 506 )
507 507 harness.add_argument(
508 508 '--profile-runner',
509 509 action='store_true',
510 510 help='run statprof on run-tests',
511 511 )
512 512 harness.add_argument(
513 513 "-R", "--restart", action="store_true", help="restart at last error"
514 514 )
515 515 harness.add_argument(
516 516 "--runs-per-test",
517 517 type=int,
518 518 dest="runs_per_test",
519 519 help="run each test N times (default=1)",
520 520 default=1,
521 521 )
522 522 harness.add_argument(
523 523 "--shell", help="shell to use (default: $%s or %s)" % defaults['shell']
524 524 )
525 525 harness.add_argument(
526 526 '--showchannels', action='store_true', help='show scheduling channels'
527 527 )
528 528 harness.add_argument(
529 529 "--slowtimeout",
530 530 type=int,
531 531 help="kill errant slow tests after SLOWTIMEOUT seconds"
532 532 " (default: $%s or %d)" % defaults['slowtimeout'],
533 533 )
534 534 harness.add_argument(
535 535 "-t",
536 536 "--timeout",
537 537 type=int,
538 538 help="kill errant tests after TIMEOUT seconds"
539 539 " (default: $%s or %d)" % defaults['timeout'],
540 540 )
541 541 harness.add_argument(
542 542 "--tmpdir",
543 543 help="run tests in the given temporary directory"
544 544 " (implies --keep-tmpdir)",
545 545 )
546 546 harness.add_argument(
547 547 "-v", "--verbose", action="store_true", help="output verbose messages"
548 548 )
549 549
550 550 hgconf = parser.add_argument_group('Mercurial Configuration')
551 551 hgconf.add_argument(
552 552 "--chg",
553 553 action="store_true",
554 554 help="install and use chg wrapper in place of hg",
555 555 )
556 556 hgconf.add_argument(
557 557 "--chg-debug",
558 558 action="store_true",
559 559 help="show chg debug logs",
560 560 )
561 561 hgconf.add_argument(
562 562 "--rhg",
563 563 action="store_true",
564 564 help="install and use rhg Rust implementation in place of hg",
565 565 )
566 566 hgconf.add_argument("--compiler", help="compiler to build with")
567 567 hgconf.add_argument(
568 568 '--extra-config-opt',
569 569 action="append",
570 570 default=[],
571 571 help='set the given config opt in the test hgrc',
572 572 )
573 573 hgconf.add_argument(
574 574 "-l",
575 575 "--local",
576 576 action="store_true",
577 577 help="shortcut for --with-hg=<testdir>/../hg, "
578 578 "--with-rhg=<testdir>/../rust/target/release/rhg if --rhg is set, "
579 579 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
580 580 )
581 581 hgconf.add_argument(
582 582 "--ipv6",
583 583 action="store_true",
584 584 help="prefer IPv6 to IPv4 for network related tests",
585 585 )
586 586 hgconf.add_argument(
587 587 "--pure",
588 588 action="store_true",
589 589 help="use pure Python code instead of C extensions",
590 590 )
591 591 hgconf.add_argument(
592 592 "--rust",
593 593 action="store_true",
594 594 help="use Rust code alongside C extensions",
595 595 )
596 596 hgconf.add_argument(
597 597 "--no-rust",
598 598 action="store_true",
599 599 help="do not use Rust code even if compiled",
600 600 )
601 601 hgconf.add_argument(
602 602 "--with-chg",
603 603 metavar="CHG",
604 604 help="use specified chg wrapper in place of hg",
605 605 )
606 606 hgconf.add_argument(
607 607 "--with-rhg",
608 608 metavar="RHG",
609 609 help="use specified rhg Rust implementation in place of hg",
610 610 )
611 611 hgconf.add_argument(
612 612 "--with-hg",
613 613 metavar="HG",
614 614 help="test using specified hg script rather than a "
615 615 "temporary installation",
616 616 )
617 617
618 618 reporting = parser.add_argument_group('Results Reporting')
619 619 reporting.add_argument(
620 620 "-C",
621 621 "--annotate",
622 622 action="store_true",
623 623 help="output files annotated with coverage",
624 624 )
625 625 reporting.add_argument(
626 626 "--color",
627 627 choices=["always", "auto", "never"],
628 628 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
629 629 help="colorisation: always|auto|never (default: auto)",
630 630 )
631 631 reporting.add_argument(
632 632 "-c",
633 633 "--cover",
634 634 action="store_true",
635 635 help="print a test coverage report",
636 636 )
637 637 reporting.add_argument(
638 638 '--exceptions',
639 639 action='store_true',
640 640 help='log all exceptions and generate an exception report',
641 641 )
642 642 reporting.add_argument(
643 643 "-H",
644 644 "--htmlcov",
645 645 action="store_true",
646 646 help="create an HTML report of the coverage of the files",
647 647 )
648 648 reporting.add_argument(
649 649 "--json",
650 650 action="store_true",
651 651 help="store test result data in 'report.json' file",
652 652 )
653 653 reporting.add_argument(
654 654 "--outputdir",
655 655 help="directory to write error logs to (default=test directory)",
656 656 )
657 657 reporting.add_argument(
658 658 "-n", "--nodiff", action="store_true", help="skip showing test changes"
659 659 )
660 660 reporting.add_argument(
661 661 "-S",
662 662 "--noskips",
663 663 action="store_true",
664 664 help="don't report skip tests verbosely",
665 665 )
666 666 reporting.add_argument(
667 667 "--time", action="store_true", help="time how long each test takes"
668 668 )
669 669 reporting.add_argument("--view", help="external diff viewer")
670 670 reporting.add_argument(
671 671 "--xunit", help="record xunit results at specified path"
672 672 )
673 673
674 674 for option, (envvar, default) in defaults.items():
675 675 defaults[option] = type(default)(os.environ.get(envvar, default))
676 676 parser.set_defaults(**defaults)
677 677
678 678 return parser
679 679
680 680
681 681 def parseargs(args, parser):
682 682 """Parse arguments with our OptionParser and validate results."""
683 683 options = parser.parse_args(args)
684 684
685 685 # jython is always pure
686 686 if 'java' in sys.platform or '__pypy__' in sys.modules:
687 687 options.pure = True
688 688
689 689 if platform.python_implementation() != 'CPython' and options.rust:
690 690 parser.error('Rust extensions are only available with CPython')
691 691
692 692 if options.pure and options.rust:
693 693 parser.error('--rust cannot be used with --pure')
694 694
695 695 if options.rust and options.no_rust:
696 696 parser.error('--rust cannot be used with --no-rust')
697 697
698 698 if options.local:
699 699 if options.with_hg or options.with_rhg or options.with_chg:
700 700 parser.error(
701 701 '--local cannot be used with --with-hg or --with-rhg or --with-chg'
702 702 )
703 703 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
704 704 reporootdir = os.path.dirname(testdir)
705 705 pathandattrs = [(b'hg', 'with_hg')]
706 706 if options.chg:
707 707 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
708 708 if options.rhg:
709 709 pathandattrs.append((b'rust/target/release/rhg', 'with_rhg'))
710 710 for relpath, attr in pathandattrs:
711 711 binpath = os.path.join(reporootdir, relpath)
712 712 if os.name != 'nt' and not os.access(binpath, os.X_OK):
713 713 parser.error(
714 714 '--local specified, but %r not found or '
715 715 'not executable' % binpath
716 716 )
717 717 setattr(options, attr, _bytes2sys(binpath))
718 718
719 719 if options.with_hg:
720 720 options.with_hg = canonpath(_sys2bytes(options.with_hg))
721 721 if not (
722 722 os.path.isfile(options.with_hg)
723 723 and os.access(options.with_hg, os.X_OK)
724 724 ):
725 725 parser.error('--with-hg must specify an executable hg script')
726 726 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
727 727 sys.stderr.write('warning: --with-hg should specify an hg script\n')
728 728 sys.stderr.flush()
729 729
730 730 if (options.chg or options.with_chg) and os.name == 'nt':
731 731 parser.error('chg does not work on %s' % os.name)
732 732 if (options.rhg or options.with_rhg) and os.name == 'nt':
733 733 parser.error('rhg does not work on %s' % os.name)
734 734 if options.with_chg:
735 735 options.chg = False # no installation to temporary location
736 736 options.with_chg = canonpath(_sys2bytes(options.with_chg))
737 737 if not (
738 738 os.path.isfile(options.with_chg)
739 739 and os.access(options.with_chg, os.X_OK)
740 740 ):
741 741 parser.error('--with-chg must specify a chg executable')
742 742 if options.with_rhg:
743 743 options.rhg = False # no installation to temporary location
744 744 options.with_rhg = canonpath(_sys2bytes(options.with_rhg))
745 745 if not (
746 746 os.path.isfile(options.with_rhg)
747 747 and os.access(options.with_rhg, os.X_OK)
748 748 ):
749 749 parser.error('--with-rhg must specify a rhg executable')
750 750 if options.chg and options.with_hg:
751 751 # chg shares installation location with hg
752 752 parser.error(
753 753 '--chg does not work when --with-hg is specified '
754 754 '(use --with-chg instead)'
755 755 )
756 756 if options.rhg and options.with_hg:
757 757 # rhg shares installation location with hg
758 758 parser.error(
759 759 '--rhg does not work when --with-hg is specified '
760 760 '(use --with-rhg instead)'
761 761 )
762 762 if options.rhg and options.chg:
763 763 parser.error('--rhg and --chg do not work together')
764 764
765 765 if options.color == 'always' and not pygmentspresent:
766 766 sys.stderr.write(
767 767 'warning: --color=always ignored because '
768 768 'pygments is not installed\n'
769 769 )
770 770
771 771 if options.bisect_repo and not options.known_good_rev:
772 772 parser.error("--bisect-repo cannot be used without --known-good-rev")
773 773
774 774 global useipv6
775 775 if options.ipv6:
776 776 useipv6 = checksocketfamily('AF_INET6')
777 777 else:
778 778 # only use IPv6 if IPv4 is unavailable and IPv6 is available
779 779 useipv6 = (not checksocketfamily('AF_INET')) and checksocketfamily(
780 780 'AF_INET6'
781 781 )
782 782
783 783 options.anycoverage = options.cover or options.annotate or options.htmlcov
784 784 if options.anycoverage:
785 785 try:
786 786 import coverage
787 787
788 788 covver = version.StrictVersion(coverage.__version__).version
789 789 if covver < (3, 3):
790 790 parser.error('coverage options require coverage 3.3 or later')
791 791 except ImportError:
792 792 parser.error('coverage options now require the coverage package')
793 793
794 794 if options.anycoverage and options.local:
795 795 # this needs some path mangling somewhere, I guess
796 796 parser.error(
797 797 "sorry, coverage options do not work when --local " "is specified"
798 798 )
799 799
800 800 if options.anycoverage and options.with_hg:
801 801 parser.error(
802 802 "sorry, coverage options do not work when --with-hg " "is specified"
803 803 )
804 804
805 805 global verbose
806 806 if options.verbose:
807 807 verbose = ''
808 808
809 809 if options.tmpdir:
810 810 options.tmpdir = canonpath(options.tmpdir)
811 811
812 812 if options.jobs < 1:
813 813 parser.error('--jobs must be positive')
814 814 if options.interactive and options.debug:
815 815 parser.error("-i/--interactive and -d/--debug are incompatible")
816 816 if options.debug:
817 817 if options.timeout != defaults['timeout']:
818 818 sys.stderr.write('warning: --timeout option ignored with --debug\n')
819 819 if options.slowtimeout != defaults['slowtimeout']:
820 820 sys.stderr.write(
821 821 'warning: --slowtimeout option ignored with --debug\n'
822 822 )
823 823 options.timeout = 0
824 824 options.slowtimeout = 0
825 825
826 826 if options.blacklist:
827 827 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
828 828 if options.whitelist:
829 829 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
830 830 else:
831 831 options.whitelisted = {}
832 832
833 833 if options.showchannels:
834 834 options.nodiff = True
835 835
836 836 return options
837 837
838 838
839 839 def rename(src, dst):
840 840 """Like os.rename(), trade atomicity and opened files friendliness
841 841 for existing destination support.
842 842 """
843 843 shutil.copy(src, dst)
844 844 os.remove(src)
845 845
846 846
847 847 def makecleanable(path):
848 848 """Try to fix directory permission recursively so that the entire tree
849 849 can be deleted"""
850 850 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
851 851 for d in dirnames:
852 852 p = os.path.join(dirpath, d)
853 853 try:
854 854 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
855 855 except OSError:
856 856 pass
857 857
858 858
859 859 _unified_diff = difflib.unified_diff
860 860 if PYTHON3:
861 861 import functools
862 862
863 863 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
864 864
865 865
866 866 def getdiff(expected, output, ref, err):
867 867 servefail = False
868 868 lines = []
869 869 for line in _unified_diff(expected, output, ref, err):
870 870 if line.startswith(b'+++') or line.startswith(b'---'):
871 871 line = line.replace(b'\\', b'/')
872 872 if line.endswith(b' \n'):
873 873 line = line[:-2] + b'\n'
874 874 lines.append(line)
875 875 if not servefail and line.startswith(
876 876 b'+ abort: child process failed to start'
877 877 ):
878 878 servefail = True
879 879
880 880 return servefail, lines
881 881
882 882
883 883 verbose = False
884 884
885 885
886 886 def vlog(*msg):
887 887 """Log only when in verbose mode."""
888 888 if verbose is False:
889 889 return
890 890
891 891 return log(*msg)
892 892
893 893
894 894 # Bytes that break XML even in a CDATA block: control characters 0-31
895 895 # sans \t, \n and \r
896 896 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
897 897
898 898 # Match feature conditionalized output lines in the form, capturing the feature
899 899 # list in group 2, and the preceeding line output in group 1:
900 900 #
901 901 # output..output (feature !)\n
902 902 optline = re.compile(br'(.*) \((.+?) !\)\n$')
903 903
904 904
905 905 def cdatasafe(data):
906 906 """Make a string safe to include in a CDATA block.
907 907
908 908 Certain control characters are illegal in a CDATA block, and
909 909 there's no way to include a ]]> in a CDATA either. This function
910 910 replaces illegal bytes with ? and adds a space between the ]] so
911 911 that it won't break the CDATA block.
912 912 """
913 913 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
914 914
915 915
916 916 def log(*msg):
917 917 """Log something to stdout.
918 918
919 919 Arguments are strings to print.
920 920 """
921 921 with iolock:
922 922 if verbose:
923 923 print(verbose, end=' ')
924 924 for m in msg:
925 925 print(m, end=' ')
926 926 print()
927 927 sys.stdout.flush()
928 928
929 929
930 930 def highlightdiff(line, color):
931 931 if not color:
932 932 return line
933 933 assert pygmentspresent
934 934 return pygments.highlight(
935 935 line.decode('latin1'), difflexer, terminal256formatter
936 936 ).encode('latin1')
937 937
938 938
939 939 def highlightmsg(msg, color):
940 940 if not color:
941 941 return msg
942 942 assert pygmentspresent
943 943 return pygments.highlight(msg, runnerlexer, runnerformatter)
944 944
945 945
946 946 def terminate(proc):
947 947 """Terminate subprocess"""
948 948 vlog('# Terminating process %d' % proc.pid)
949 949 try:
950 950 proc.terminate()
951 951 except OSError:
952 952 pass
953 953
954 954
955 955 def killdaemons(pidfile):
956 956 import killdaemons as killmod
957 957
958 958 return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog)
959 959
960 960
961 961 class Test(unittest.TestCase):
962 962 """Encapsulates a single, runnable test.
963 963
964 964 While this class conforms to the unittest.TestCase API, it differs in that
965 965 instances need to be instantiated manually. (Typically, unittest.TestCase
966 966 classes are instantiated automatically by scanning modules.)
967 967 """
968 968
969 969 # Status code reserved for skipped tests (used by hghave).
970 970 SKIPPED_STATUS = 80
971 971
972 972 def __init__(
973 973 self,
974 974 path,
975 975 outputdir,
976 976 tmpdir,
977 977 keeptmpdir=False,
978 978 debug=False,
979 979 first=False,
980 980 timeout=None,
981 981 startport=None,
982 982 extraconfigopts=None,
983 983 shell=None,
984 984 hgcommand=None,
985 985 slowtimeout=None,
986 986 usechg=False,
987 987 chgdebug=False,
988 988 useipv6=False,
989 989 ):
990 990 """Create a test from parameters.
991 991
992 992 path is the full path to the file defining the test.
993 993
994 994 tmpdir is the main temporary directory to use for this test.
995 995
996 996 keeptmpdir determines whether to keep the test's temporary directory
997 997 after execution. It defaults to removal (False).
998 998
999 999 debug mode will make the test execute verbosely, with unfiltered
1000 1000 output.
1001 1001
1002 1002 timeout controls the maximum run time of the test. It is ignored when
1003 1003 debug is True. See slowtimeout for tests with #require slow.
1004 1004
1005 1005 slowtimeout overrides timeout if the test has #require slow.
1006 1006
1007 1007 startport controls the starting port number to use for this test. Each
1008 1008 test will reserve 3 port numbers for execution. It is the caller's
1009 1009 responsibility to allocate a non-overlapping port range to Test
1010 1010 instances.
1011 1011
1012 1012 extraconfigopts is an iterable of extra hgrc config options. Values
1013 1013 must have the form "key=value" (something understood by hgrc). Values
1014 1014 of the form "foo.key=value" will result in "[foo] key=value".
1015 1015
1016 1016 shell is the shell to execute tests in.
1017 1017 """
1018 1018 if timeout is None:
1019 1019 timeout = defaults['timeout']
1020 1020 if startport is None:
1021 1021 startport = defaults['port']
1022 1022 if slowtimeout is None:
1023 1023 slowtimeout = defaults['slowtimeout']
1024 1024 self.path = path
1025 1025 self.relpath = os.path.relpath(path)
1026 1026 self.bname = os.path.basename(path)
1027 1027 self.name = _bytes2sys(self.bname)
1028 1028 self._testdir = os.path.dirname(path)
1029 1029 self._outputdir = outputdir
1030 1030 self._tmpname = os.path.basename(path)
1031 1031 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
1032 1032
1033 1033 self._threadtmp = tmpdir
1034 1034 self._keeptmpdir = keeptmpdir
1035 1035 self._debug = debug
1036 1036 self._first = first
1037 1037 self._timeout = timeout
1038 1038 self._slowtimeout = slowtimeout
1039 1039 self._startport = startport
1040 1040 self._extraconfigopts = extraconfigopts or []
1041 1041 self._shell = _sys2bytes(shell)
1042 1042 self._hgcommand = hgcommand or b'hg'
1043 1043 self._usechg = usechg
1044 1044 self._chgdebug = chgdebug
1045 1045 self._useipv6 = useipv6
1046 1046
1047 1047 self._aborted = False
1048 1048 self._daemonpids = []
1049 1049 self._finished = None
1050 1050 self._ret = None
1051 1051 self._out = None
1052 1052 self._skipped = None
1053 1053 self._testtmp = None
1054 1054 self._chgsockdir = None
1055 1055
1056 1056 self._refout = self.readrefout()
1057 1057
1058 1058 def readrefout(self):
1059 1059 """read reference output"""
1060 1060 # If we're not in --debug mode and reference output file exists,
1061 1061 # check test output against it.
1062 1062 if self._debug:
1063 1063 return None # to match "out is None"
1064 1064 elif os.path.exists(self.refpath):
1065 1065 with open(self.refpath, 'rb') as f:
1066 1066 return f.read().splitlines(True)
1067 1067 else:
1068 1068 return []
1069 1069
1070 1070 # needed to get base class __repr__ running
1071 1071 @property
1072 1072 def _testMethodName(self):
1073 1073 return self.name
1074 1074
1075 1075 def __str__(self):
1076 1076 return self.name
1077 1077
1078 1078 def shortDescription(self):
1079 1079 return self.name
1080 1080
1081 1081 def setUp(self):
1082 1082 """Tasks to perform before run()."""
1083 1083 self._finished = False
1084 1084 self._ret = None
1085 1085 self._out = None
1086 1086 self._skipped = None
1087 1087
1088 1088 try:
1089 1089 os.mkdir(self._threadtmp)
1090 1090 except OSError as e:
1091 1091 if e.errno != errno.EEXIST:
1092 1092 raise
1093 1093
1094 1094 name = self._tmpname
1095 1095 self._testtmp = os.path.join(self._threadtmp, name)
1096 1096 os.mkdir(self._testtmp)
1097 1097
1098 1098 # Remove any previous output files.
1099 1099 if os.path.exists(self.errpath):
1100 1100 try:
1101 1101 os.remove(self.errpath)
1102 1102 except OSError as e:
1103 1103 # We might have raced another test to clean up a .err
1104 1104 # file, so ignore ENOENT when removing a previous .err
1105 1105 # file.
1106 1106 if e.errno != errno.ENOENT:
1107 1107 raise
1108 1108
1109 1109 if self._usechg:
1110 1110 self._chgsockdir = os.path.join(
1111 1111 self._threadtmp, b'%s.chgsock' % name
1112 1112 )
1113 1113 os.mkdir(self._chgsockdir)
1114 1114
1115 1115 def run(self, result):
1116 1116 """Run this test and report results against a TestResult instance."""
1117 1117 # This function is extremely similar to unittest.TestCase.run(). Once
1118 1118 # we require Python 2.7 (or at least its version of unittest), this
1119 1119 # function can largely go away.
1120 1120 self._result = result
1121 1121 result.startTest(self)
1122 1122 try:
1123 1123 try:
1124 1124 self.setUp()
1125 1125 except (KeyboardInterrupt, SystemExit):
1126 1126 self._aborted = True
1127 1127 raise
1128 1128 except Exception:
1129 1129 result.addError(self, sys.exc_info())
1130 1130 return
1131 1131
1132 1132 success = False
1133 1133 try:
1134 1134 self.runTest()
1135 1135 except KeyboardInterrupt:
1136 1136 self._aborted = True
1137 1137 raise
1138 1138 except unittest.SkipTest as e:
1139 1139 result.addSkip(self, str(e))
1140 1140 # The base class will have already counted this as a
1141 1141 # test we "ran", but we want to exclude skipped tests
1142 1142 # from those we count towards those run.
1143 1143 result.testsRun -= 1
1144 1144 except self.failureException as e:
1145 1145 # This differs from unittest in that we don't capture
1146 1146 # the stack trace. This is for historical reasons and
1147 1147 # this decision could be revisited in the future,
1148 1148 # especially for PythonTest instances.
1149 1149 if result.addFailure(self, str(e)):
1150 1150 success = True
1151 1151 except Exception:
1152 1152 result.addError(self, sys.exc_info())
1153 1153 else:
1154 1154 success = True
1155 1155
1156 1156 try:
1157 1157 self.tearDown()
1158 1158 except (KeyboardInterrupt, SystemExit):
1159 1159 self._aborted = True
1160 1160 raise
1161 1161 except Exception:
1162 1162 result.addError(self, sys.exc_info())
1163 1163 success = False
1164 1164
1165 1165 if success:
1166 1166 result.addSuccess(self)
1167 1167 finally:
1168 1168 result.stopTest(self, interrupted=self._aborted)
1169 1169
1170 1170 def runTest(self):
1171 1171 """Run this test instance.
1172 1172
1173 1173 This will return a tuple describing the result of the test.
1174 1174 """
1175 1175 env = self._getenv()
1176 1176 self._genrestoreenv(env)
1177 1177 self._daemonpids.append(env['DAEMON_PIDS'])
1178 1178 self._createhgrc(env['HGRCPATH'])
1179 1179
1180 1180 vlog('# Test', self.name)
1181 1181
1182 1182 ret, out = self._run(env)
1183 1183 self._finished = True
1184 1184 self._ret = ret
1185 1185 self._out = out
1186 1186
1187 1187 def describe(ret):
1188 1188 if ret < 0:
1189 1189 return 'killed by signal: %d' % -ret
1190 1190 return 'returned error code %d' % ret
1191 1191
1192 1192 self._skipped = False
1193 1193
1194 1194 if ret == self.SKIPPED_STATUS:
1195 1195 if out is None: # Debug mode, nothing to parse.
1196 1196 missing = ['unknown']
1197 1197 failed = None
1198 1198 else:
1199 1199 missing, failed = TTest.parsehghaveoutput(out)
1200 1200
1201 1201 if not missing:
1202 1202 missing = ['skipped']
1203 1203
1204 1204 if failed:
1205 1205 self.fail('hg have failed checking for %s' % failed[-1])
1206 1206 else:
1207 1207 self._skipped = True
1208 1208 raise unittest.SkipTest(missing[-1])
1209 1209 elif ret == 'timeout':
1210 1210 self.fail('timed out')
1211 1211 elif ret is False:
1212 1212 self.fail('no result code from test')
1213 1213 elif out != self._refout:
1214 1214 # Diff generation may rely on written .err file.
1215 1215 if (
1216 1216 (ret != 0 or out != self._refout)
1217 1217 and not self._skipped
1218 1218 and not self._debug
1219 1219 ):
1220 1220 with open(self.errpath, 'wb') as f:
1221 1221 for line in out:
1222 1222 f.write(line)
1223 1223
1224 1224 # The result object handles diff calculation for us.
1225 1225 with firstlock:
1226 1226 if self._result.addOutputMismatch(self, ret, out, self._refout):
1227 1227 # change was accepted, skip failing
1228 1228 return
1229 1229 if self._first:
1230 1230 global firsterror
1231 1231 firsterror = True
1232 1232
1233 1233 if ret:
1234 1234 msg = 'output changed and ' + describe(ret)
1235 1235 else:
1236 1236 msg = 'output changed'
1237 1237
1238 1238 self.fail(msg)
1239 1239 elif ret:
1240 1240 self.fail(describe(ret))
1241 1241
1242 1242 def tearDown(self):
1243 1243 """Tasks to perform after run()."""
1244 1244 for entry in self._daemonpids:
1245 1245 killdaemons(entry)
1246 1246 self._daemonpids = []
1247 1247
1248 1248 if self._keeptmpdir:
1249 1249 log(
1250 1250 '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s'
1251 1251 % (
1252 1252 _bytes2sys(self._testtmp),
1253 1253 _bytes2sys(self._threadtmp),
1254 1254 )
1255 1255 )
1256 1256 else:
1257 1257 try:
1258 1258 shutil.rmtree(self._testtmp)
1259 1259 except OSError:
1260 1260 # unreadable directory may be left in $TESTTMP; fix permission
1261 1261 # and try again
1262 1262 makecleanable(self._testtmp)
1263 1263 shutil.rmtree(self._testtmp, True)
1264 1264 shutil.rmtree(self._threadtmp, True)
1265 1265
1266 1266 if self._usechg:
1267 1267 # chgservers will stop automatically after they find the socket
1268 1268 # files are deleted
1269 1269 shutil.rmtree(self._chgsockdir, True)
1270 1270
1271 1271 if (
1272 1272 (self._ret != 0 or self._out != self._refout)
1273 1273 and not self._skipped
1274 1274 and not self._debug
1275 1275 and self._out
1276 1276 ):
1277 1277 with open(self.errpath, 'wb') as f:
1278 1278 for line in self._out:
1279 1279 f.write(line)
1280 1280
1281 1281 vlog("# Ret was:", self._ret, '(%s)' % self.name)
1282 1282
1283 1283 def _run(self, env):
1284 1284 # This should be implemented in child classes to run tests.
1285 1285 raise unittest.SkipTest('unknown test type')
1286 1286
1287 1287 def abort(self):
1288 1288 """Terminate execution of this test."""
1289 1289 self._aborted = True
1290 1290
1291 1291 def _portmap(self, i):
1292 1292 offset = b'' if i == 0 else b'%d' % i
1293 1293 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1294 1294
1295 1295 def _getreplacements(self):
1296 1296 """Obtain a mapping of text replacements to apply to test output.
1297 1297
1298 1298 Test output needs to be normalized so it can be compared to expected
1299 1299 output. This function defines how some of that normalization will
1300 1300 occur.
1301 1301 """
1302 1302 r = [
1303 1303 # This list should be parallel to defineport in _getenv
1304 1304 self._portmap(0),
1305 1305 self._portmap(1),
1306 1306 self._portmap(2),
1307 1307 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1308 1308 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1309 1309 ]
1310 1310 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1311 1311
1312 1312 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1313 1313
1314 1314 if os.path.exists(replacementfile):
1315 1315 data = {}
1316 1316 with open(replacementfile, mode='rb') as source:
1317 1317 # the intermediate 'compile' step help with debugging
1318 1318 code = compile(source.read(), replacementfile, 'exec')
1319 1319 exec(code, data)
1320 1320 for value in data.get('substitutions', ()):
1321 1321 if len(value) != 2:
1322 1322 msg = 'malformatted substitution in %s: %r'
1323 1323 msg %= (replacementfile, value)
1324 1324 raise ValueError(msg)
1325 1325 r.append(value)
1326 1326 return r
1327 1327
1328 1328 def _escapepath(self, p):
1329 1329 if os.name == 'nt':
1330 1330 return b''.join(
1331 1331 c.isalpha()
1332 1332 and b'[%s%s]' % (c.lower(), c.upper())
1333 1333 or c in b'/\\'
1334 1334 and br'[/\\]'
1335 1335 or c.isdigit()
1336 1336 and c
1337 1337 or b'\\' + c
1338 1338 for c in [p[i : i + 1] for i in range(len(p))]
1339 1339 )
1340 1340 else:
1341 1341 return re.escape(p)
1342 1342
1343 1343 def _localip(self):
1344 1344 if self._useipv6:
1345 1345 return b'::1'
1346 1346 else:
1347 1347 return b'127.0.0.1'
1348 1348
1349 1349 def _genrestoreenv(self, testenv):
1350 1350 """Generate a script that can be used by tests to restore the original
1351 1351 environment."""
1352 1352 # Put the restoreenv script inside self._threadtmp
1353 1353 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1354 1354 testenv['HGTEST_RESTOREENV'] = _bytes2sys(scriptpath)
1355 1355
1356 1356 # Only restore environment variable names that the shell allows
1357 1357 # us to export.
1358 1358 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1359 1359
1360 1360 # Do not restore these variables; otherwise tests would fail.
1361 1361 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1362 1362
1363 1363 with open(scriptpath, 'w') as envf:
1364 1364 for name, value in origenviron.items():
1365 1365 if not name_regex.match(name):
1366 1366 # Skip environment variables with unusual names not
1367 1367 # allowed by most shells.
1368 1368 continue
1369 1369 if name in reqnames:
1370 1370 continue
1371 1371 envf.write('%s=%s\n' % (name, shellquote(value)))
1372 1372
1373 1373 for name in testenv:
1374 1374 if name in origenviron or name in reqnames:
1375 1375 continue
1376 1376 envf.write('unset %s\n' % (name,))
1377 1377
1378 1378 def _getenv(self):
1379 1379 """Obtain environment variables to use during test execution."""
1380 1380
1381 1381 def defineport(i):
1382 1382 offset = '' if i == 0 else '%s' % i
1383 1383 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1384 1384
1385 1385 env = os.environ.copy()
1386 1386 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1387 1387 env['HGEMITWARNINGS'] = '1'
1388 1388 env['TESTTMP'] = _bytes2sys(self._testtmp)
1389 1389 uid_file = os.path.join(_bytes2sys(self._testtmp), 'UID')
1390 1390 env['HGTEST_UUIDFILE'] = uid_file
1391 1391 env['TESTNAME'] = self.name
1392 1392 env['HOME'] = _bytes2sys(self._testtmp)
1393 1393 if os.name == 'nt':
1394 1394 env['REALUSERPROFILE'] = env['USERPROFILE']
1395 1395 # py3.8+ ignores HOME: https://bugs.python.org/issue36264
1396 1396 env['USERPROFILE'] = env['HOME']
1397 1397 formated_timeout = _bytes2sys(b"%d" % default_defaults['timeout'][1])
1398 1398 env['HGTEST_TIMEOUT_DEFAULT'] = formated_timeout
1399 1399 env['HGTEST_TIMEOUT'] = _bytes2sys(b"%d" % self._timeout)
1400 1400 # This number should match portneeded in _getport
1401 1401 for port in xrange(3):
1402 1402 # This list should be parallel to _portmap in _getreplacements
1403 1403 defineport(port)
1404 1404 env["HGRCPATH"] = _bytes2sys(os.path.join(self._threadtmp, b'.hgrc'))
1405 1405 env["DAEMON_PIDS"] = _bytes2sys(
1406 1406 os.path.join(self._threadtmp, b'daemon.pids')
1407 1407 )
1408 1408 env["HGEDITOR"] = (
1409 1409 '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"'
1410 1410 )
1411 1411 env["HGUSER"] = "test"
1412 1412 env["HGENCODING"] = "ascii"
1413 1413 env["HGENCODINGMODE"] = "strict"
1414 1414 env["HGHOSTNAME"] = "test-hostname"
1415 1415 env['HGIPV6'] = str(int(self._useipv6))
1416 1416 # See contrib/catapipe.py for how to use this functionality.
1417 1417 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1418 1418 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1419 1419 # non-test one in as a default, otherwise set to devnull
1420 1420 env['HGTESTCATAPULTSERVERPIPE'] = env.get(
1421 1421 'HGCATAPULTSERVERPIPE', os.devnull
1422 1422 )
1423 1423
1424 1424 extraextensions = []
1425 1425 for opt in self._extraconfigopts:
1426 1426 section, key = opt.split('.', 1)
1427 1427 if section != 'extensions':
1428 1428 continue
1429 1429 name = key.split('=', 1)[0]
1430 1430 extraextensions.append(name)
1431 1431
1432 1432 if extraextensions:
1433 1433 env['HGTESTEXTRAEXTENSIONS'] = ' '.join(extraextensions)
1434 1434
1435 1435 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1436 1436 # IP addresses.
1437 1437 env['LOCALIP'] = _bytes2sys(self._localip())
1438 1438
1439 1439 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1440 1440 # but this is needed for testing python instances like dummyssh,
1441 1441 # dummysmtpd.py, and dumbhttp.py.
1442 1442 if PYTHON3 and os.name == 'nt':
1443 1443 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1444 1444
1445 1445 # Modified HOME in test environment can confuse Rust tools. So set
1446 1446 # CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is
1447 1447 # present and these variables aren't already defined.
1448 1448 cargo_home_path = os.path.expanduser('~/.cargo')
1449 1449 rustup_home_path = os.path.expanduser('~/.rustup')
1450 1450
1451 1451 if os.path.exists(cargo_home_path) and b'CARGO_HOME' not in osenvironb:
1452 1452 env['CARGO_HOME'] = cargo_home_path
1453 1453 if (
1454 1454 os.path.exists(rustup_home_path)
1455 1455 and b'RUSTUP_HOME' not in osenvironb
1456 1456 ):
1457 1457 env['RUSTUP_HOME'] = rustup_home_path
1458 1458
1459 1459 # Reset some environment variables to well-known values so that
1460 1460 # the tests produce repeatable output.
1461 1461 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1462 1462 env['TZ'] = 'GMT'
1463 1463 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1464 1464 env['COLUMNS'] = '80'
1465 1465 env['TERM'] = 'xterm'
1466 1466
1467 1467 dropped = [
1468 1468 'CDPATH',
1469 1469 'CHGDEBUG',
1470 1470 'EDITOR',
1471 1471 'GREP_OPTIONS',
1472 1472 'HG',
1473 1473 'HGMERGE',
1474 1474 'HGPLAIN',
1475 1475 'HGPLAINEXCEPT',
1476 1476 'HGPROF',
1477 1477 'http_proxy',
1478 1478 'no_proxy',
1479 1479 'NO_PROXY',
1480 1480 'PAGER',
1481 1481 'VISUAL',
1482 1482 ]
1483 1483
1484 1484 for k in dropped:
1485 1485 if k in env:
1486 1486 del env[k]
1487 1487
1488 1488 # unset env related to hooks
1489 1489 for k in list(env):
1490 1490 if k.startswith('HG_'):
1491 1491 del env[k]
1492 1492
1493 1493 if self._usechg:
1494 1494 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1495 1495 if self._chgdebug:
1496 1496 env['CHGDEBUG'] = 'true'
1497 1497
1498 1498 return env
1499 1499
1500 1500 def _createhgrc(self, path):
1501 1501 """Create an hgrc file for this test."""
1502 1502 with open(path, 'wb') as hgrc:
1503 1503 hgrc.write(b'[ui]\n')
1504 1504 hgrc.write(b'slash = True\n')
1505 1505 hgrc.write(b'interactive = False\n')
1506 1506 hgrc.write(b'detailed-exit-code = True\n')
1507 1507 hgrc.write(b'merge = internal:merge\n')
1508 1508 hgrc.write(b'mergemarkers = detailed\n')
1509 1509 hgrc.write(b'promptecho = True\n')
1510 1510 hgrc.write(b'timeout.warn=15\n')
1511 1511 hgrc.write(b'[defaults]\n')
1512 1512 hgrc.write(b'[devel]\n')
1513 1513 hgrc.write(b'all-warnings = true\n')
1514 1514 hgrc.write(b'default-date = 0 0\n')
1515 1515 hgrc.write(b'[largefiles]\n')
1516 1516 hgrc.write(
1517 1517 b'usercache = %s\n'
1518 1518 % (os.path.join(self._testtmp, b'.cache/largefiles'))
1519 1519 )
1520 1520 hgrc.write(b'[lfs]\n')
1521 1521 hgrc.write(
1522 1522 b'usercache = %s\n'
1523 1523 % (os.path.join(self._testtmp, b'.cache/lfs'))
1524 1524 )
1525 1525 hgrc.write(b'[web]\n')
1526 1526 hgrc.write(b'address = localhost\n')
1527 1527 hgrc.write(b'ipv6 = %r\n' % self._useipv6)
1528 1528 hgrc.write(b'server-header = testing stub value\n')
1529 1529
1530 1530 for opt in self._extraconfigopts:
1531 1531 section, key = _sys2bytes(opt).split(b'.', 1)
1532 1532 assert b'=' in key, (
1533 1533 'extra config opt %s must ' 'have an = for assignment' % opt
1534 1534 )
1535 1535 hgrc.write(b'[%s]\n%s\n' % (section, key))
1536 1536
1537 1537 def fail(self, msg):
1538 1538 # unittest differentiates between errored and failed.
1539 1539 # Failed is denoted by AssertionError (by default at least).
1540 1540 raise AssertionError(msg)
1541 1541
1542 1542 def _runcommand(self, cmd, env, normalizenewlines=False):
1543 1543 """Run command in a sub-process, capturing the output (stdout and
1544 1544 stderr).
1545 1545
1546 1546 Return a tuple (exitcode, output). output is None in debug mode.
1547 1547 """
1548 1548 if self._debug:
1549 1549 proc = subprocess.Popen(
1550 1550 _bytes2sys(cmd),
1551 1551 shell=True,
1552 1552 cwd=_bytes2sys(self._testtmp),
1553 1553 env=env,
1554 1554 )
1555 1555 ret = proc.wait()
1556 1556 return (ret, None)
1557 1557
1558 1558 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1559 1559
1560 1560 def cleanup():
1561 1561 terminate(proc)
1562 1562 ret = proc.wait()
1563 1563 if ret == 0:
1564 1564 ret = signal.SIGTERM << 8
1565 1565 killdaemons(env['DAEMON_PIDS'])
1566 1566 return ret
1567 1567
1568 1568 proc.tochild.close()
1569 1569
1570 1570 try:
1571 1571 output = proc.fromchild.read()
1572 1572 except KeyboardInterrupt:
1573 1573 vlog('# Handling keyboard interrupt')
1574 1574 cleanup()
1575 1575 raise
1576 1576
1577 1577 ret = proc.wait()
1578 1578 if wifexited(ret):
1579 1579 ret = os.WEXITSTATUS(ret)
1580 1580
1581 1581 if proc.timeout:
1582 1582 ret = 'timeout'
1583 1583
1584 1584 if ret:
1585 1585 killdaemons(env['DAEMON_PIDS'])
1586 1586
1587 1587 for s, r in self._getreplacements():
1588 1588 output = re.sub(s, r, output)
1589 1589
1590 1590 if normalizenewlines:
1591 1591 output = output.replace(b'\r\n', b'\n')
1592 1592
1593 1593 return ret, output.splitlines(True)
1594 1594
1595 1595
1596 1596 class PythonTest(Test):
1597 1597 """A Python-based test."""
1598 1598
1599 1599 @property
1600 1600 def refpath(self):
1601 1601 return os.path.join(self._testdir, b'%s.out' % self.bname)
1602 1602
1603 1603 def _run(self, env):
1604 1604 # Quote the python(3) executable for Windows
1605 1605 cmd = b'"%s" "%s"' % (PYTHON, self.path)
1606 1606 vlog("# Running", cmd.decode("utf-8"))
1607 1607 normalizenewlines = os.name == 'nt'
1608 1608 result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines)
1609 1609 if self._aborted:
1610 1610 raise KeyboardInterrupt()
1611 1611
1612 1612 return result
1613 1613
1614 1614
1615 1615 # Some glob patterns apply only in some circumstances, so the script
1616 1616 # might want to remove (glob) annotations that otherwise should be
1617 1617 # retained.
1618 1618 checkcodeglobpats = [
1619 1619 # On Windows it looks like \ doesn't require a (glob), but we know
1620 1620 # better.
1621 1621 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1622 1622 re.compile(br'^moving \S+/.*[^)]$'),
1623 1623 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1624 1624 # Not all platforms have 127.0.0.1 as loopback (though most do),
1625 1625 # so we always glob that too.
1626 1626 re.compile(br'.*\$LOCALIP.*$'),
1627 1627 ]
1628 1628
1629 1629 bchr = chr
1630 1630 if PYTHON3:
1631 1631 bchr = lambda x: bytes([x])
1632 1632
1633 1633 WARN_UNDEFINED = 1
1634 1634 WARN_YES = 2
1635 1635 WARN_NO = 3
1636 1636
1637 1637 MARK_OPTIONAL = b" (?)\n"
1638 1638
1639 1639
1640 1640 def isoptional(line):
1641 1641 return line.endswith(MARK_OPTIONAL)
1642 1642
1643 1643
1644 1644 class TTest(Test):
1645 1645 """A "t test" is a test backed by a .t file."""
1646 1646
1647 1647 SKIPPED_PREFIX = b'skipped: '
1648 1648 FAILED_PREFIX = b'hghave check failed: '
1649 1649 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1650 1650
1651 1651 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1652 1652 ESCAPEMAP = {bchr(i): br'\x%02x' % i for i in range(256)}
1653 1653 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1654 1654
1655 1655 def __init__(self, path, *args, **kwds):
1656 1656 # accept an extra "case" parameter
1657 1657 case = kwds.pop('case', [])
1658 1658 self._case = case
1659 1659 self._allcases = {x for y in parsettestcases(path) for x in y}
1660 1660 super(TTest, self).__init__(path, *args, **kwds)
1661 1661 if case:
1662 1662 casepath = b'#'.join(case)
1663 1663 self.name = '%s#%s' % (self.name, _bytes2sys(casepath))
1664 1664 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1665 1665 self._tmpname += b'-%s' % casepath.replace(b'#', b'-')
1666 1666 self._have = {}
1667 1667
1668 1668 @property
1669 1669 def refpath(self):
1670 1670 return os.path.join(self._testdir, self.bname)
1671 1671
1672 1672 def _run(self, env):
1673 1673 with open(self.path, 'rb') as f:
1674 1674 lines = f.readlines()
1675 1675
1676 1676 # .t file is both reference output and the test input, keep reference
1677 1677 # output updated with the the test input. This avoids some race
1678 1678 # conditions where the reference output does not match the actual test.
1679 1679 if self._refout is not None:
1680 1680 self._refout = lines
1681 1681
1682 1682 salt, script, after, expected = self._parsetest(lines)
1683 1683
1684 1684 # Write out the generated script.
1685 1685 fname = b'%s.sh' % self._testtmp
1686 1686 with open(fname, 'wb') as f:
1687 1687 for l in script:
1688 1688 f.write(l)
1689 1689
1690 1690 cmd = b'%s "%s"' % (self._shell, fname)
1691 1691 vlog("# Running", cmd.decode("utf-8"))
1692 1692
1693 1693 exitcode, output = self._runcommand(cmd, env)
1694 1694
1695 1695 if self._aborted:
1696 1696 raise KeyboardInterrupt()
1697 1697
1698 1698 # Do not merge output if skipped. Return hghave message instead.
1699 1699 # Similarly, with --debug, output is None.
1700 1700 if exitcode == self.SKIPPED_STATUS or output is None:
1701 1701 return exitcode, output
1702 1702
1703 1703 return self._processoutput(exitcode, output, salt, after, expected)
1704 1704
1705 1705 def _hghave(self, reqs):
1706 1706 allreqs = b' '.join(reqs)
1707 1707
1708 1708 self._detectslow(reqs)
1709 1709
1710 1710 if allreqs in self._have:
1711 1711 return self._have.get(allreqs)
1712 1712
1713 1713 # TODO do something smarter when all other uses of hghave are gone.
1714 1714 runtestdir = osenvironb[b'RUNTESTDIR']
1715 1715 tdir = runtestdir.replace(b'\\', b'/')
1716 1716 proc = Popen4(
1717 1717 b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
1718 1718 self._testtmp,
1719 1719 0,
1720 1720 self._getenv(),
1721 1721 )
1722 1722 stdout, stderr = proc.communicate()
1723 1723 ret = proc.wait()
1724 1724 if wifexited(ret):
1725 1725 ret = os.WEXITSTATUS(ret)
1726 1726 if ret == 2:
1727 1727 print(stdout.decode('utf-8'))
1728 1728 sys.exit(1)
1729 1729
1730 1730 if ret != 0:
1731 1731 self._have[allreqs] = (False, stdout)
1732 1732 return False, stdout
1733 1733
1734 1734 self._have[allreqs] = (True, None)
1735 1735 return True, None
1736 1736
1737 1737 def _detectslow(self, reqs):
1738 1738 """update the timeout of slow test when appropriate"""
1739 1739 if b'slow' in reqs:
1740 1740 self._timeout = self._slowtimeout
1741 1741
1742 1742 def _iftest(self, args):
1743 1743 # implements "#if"
1744 1744 reqs = []
1745 1745 for arg in args:
1746 1746 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1747 1747 if arg[3:] in self._case:
1748 1748 return False
1749 1749 elif arg in self._allcases:
1750 1750 if arg not in self._case:
1751 1751 return False
1752 1752 else:
1753 1753 reqs.append(arg)
1754 1754 self._detectslow(reqs)
1755 1755 return self._hghave(reqs)[0]
1756 1756
1757 1757 def _parsetest(self, lines):
1758 1758 # We generate a shell script which outputs unique markers to line
1759 1759 # up script results with our source. These markers include input
1760 1760 # line number and the last return code.
1761 1761 salt = b"SALT%d" % time.time()
1762 1762
1763 1763 def addsalt(line, inpython):
1764 1764 if inpython:
1765 1765 script.append(b'%s %d 0\n' % (salt, line))
1766 1766 else:
1767 1767 script.append(b'echo %s %d $?\n' % (salt, line))
1768 1768
1769 1769 activetrace = []
1770 1770 session = str(uuid.uuid4())
1771 1771 if PYTHON3:
1772 1772 session = session.encode('ascii')
1773 1773 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or os.getenv(
1774 1774 'HGCATAPULTSERVERPIPE'
1775 1775 )
1776 1776
1777 1777 def toggletrace(cmd=None):
1778 1778 if not hgcatapult or hgcatapult == os.devnull:
1779 1779 return
1780 1780
1781 1781 if activetrace:
1782 1782 script.append(
1783 1783 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1784 1784 % (session, activetrace[0])
1785 1785 )
1786 1786 if cmd is None:
1787 1787 return
1788 1788
1789 1789 if isinstance(cmd, str):
1790 1790 quoted = shellquote(cmd.strip())
1791 1791 else:
1792 1792 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1793 1793 quoted = quoted.replace(b'\\', b'\\\\')
1794 1794 script.append(
1795 1795 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1796 1796 % (session, quoted)
1797 1797 )
1798 1798 activetrace[0:] = [quoted]
1799 1799
1800 1800 script = []
1801 1801
1802 1802 # After we run the shell script, we re-unify the script output
1803 1803 # with non-active parts of the source, with synchronization by our
1804 1804 # SALT line number markers. The after table contains the non-active
1805 1805 # components, ordered by line number.
1806 1806 after = {}
1807 1807
1808 1808 # Expected shell script output.
1809 1809 expected = {}
1810 1810
1811 1811 pos = prepos = -1
1812 1812
1813 1813 # True or False when in a true or false conditional section
1814 1814 skipping = None
1815 1815
1816 1816 # We keep track of whether or not we're in a Python block so we
1817 1817 # can generate the surrounding doctest magic.
1818 1818 inpython = False
1819 1819
1820 1820 if self._debug:
1821 1821 script.append(b'set -x\n')
1822 1822 if self._hgcommand != b'hg':
1823 1823 script.append(b'alias hg="%s"\n' % self._hgcommand)
1824 1824 if os.getenv('MSYSTEM'):
1825 1825 script.append(b'alias pwd="pwd -W"\n')
1826 1826
1827 1827 if hgcatapult and hgcatapult != os.devnull:
1828 1828 if PYTHON3:
1829 1829 hgcatapult = hgcatapult.encode('utf8')
1830 1830 cataname = self.name.encode('utf8')
1831 1831 else:
1832 1832 cataname = self.name
1833 1833
1834 1834 # Kludge: use a while loop to keep the pipe from getting
1835 1835 # closed by our echo commands. The still-running file gets
1836 1836 # reaped at the end of the script, which causes the while
1837 1837 # loop to exit and closes the pipe. Sigh.
1838 1838 script.append(
1839 1839 b'rtendtracing() {\n'
1840 1840 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1841 1841 b' rm -f "$TESTTMP/.still-running"\n'
1842 1842 b'}\n'
1843 1843 b'trap "rtendtracing" 0\n'
1844 1844 b'touch "$TESTTMP/.still-running"\n'
1845 1845 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1846 1846 b'> %(catapult)s &\n'
1847 1847 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1848 1848 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1849 1849 % {
1850 1850 b'name': cataname,
1851 1851 b'session': session,
1852 1852 b'catapult': hgcatapult,
1853 1853 }
1854 1854 )
1855 1855
1856 1856 if self._case:
1857 1857 casestr = b'#'.join(self._case)
1858 1858 if isinstance(casestr, str):
1859 1859 quoted = shellquote(casestr)
1860 1860 else:
1861 1861 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1862 1862 script.append(b'TESTCASE=%s\n' % quoted)
1863 1863 script.append(b'export TESTCASE\n')
1864 1864
1865 1865 n = 0
1866 1866 for n, l in enumerate(lines):
1867 1867 if not l.endswith(b'\n'):
1868 1868 l += b'\n'
1869 1869 if l.startswith(b'#require'):
1870 1870 lsplit = l.split()
1871 1871 if len(lsplit) < 2 or lsplit[0] != b'#require':
1872 1872 after.setdefault(pos, []).append(
1873 1873 b' !!! invalid #require\n'
1874 1874 )
1875 1875 if not skipping:
1876 1876 haveresult, message = self._hghave(lsplit[1:])
1877 1877 if not haveresult:
1878 1878 script = [b'echo "%s"\nexit 80\n' % message]
1879 1879 break
1880 1880 after.setdefault(pos, []).append(l)
1881 1881 elif l.startswith(b'#if'):
1882 1882 lsplit = l.split()
1883 1883 if len(lsplit) < 2 or lsplit[0] != b'#if':
1884 1884 after.setdefault(pos, []).append(b' !!! invalid #if\n')
1885 1885 if skipping is not None:
1886 1886 after.setdefault(pos, []).append(b' !!! nested #if\n')
1887 1887 skipping = not self._iftest(lsplit[1:])
1888 1888 after.setdefault(pos, []).append(l)
1889 1889 elif l.startswith(b'#else'):
1890 1890 if skipping is None:
1891 1891 after.setdefault(pos, []).append(b' !!! missing #if\n')
1892 1892 skipping = not skipping
1893 1893 after.setdefault(pos, []).append(l)
1894 1894 elif l.startswith(b'#endif'):
1895 1895 if skipping is None:
1896 1896 after.setdefault(pos, []).append(b' !!! missing #if\n')
1897 1897 skipping = None
1898 1898 after.setdefault(pos, []).append(l)
1899 1899 elif skipping:
1900 1900 after.setdefault(pos, []).append(l)
1901 1901 elif l.startswith(b' >>> '): # python inlines
1902 1902 after.setdefault(pos, []).append(l)
1903 1903 prepos = pos
1904 1904 pos = n
1905 1905 if not inpython:
1906 1906 # We've just entered a Python block. Add the header.
1907 1907 inpython = True
1908 1908 addsalt(prepos, False) # Make sure we report the exit code.
1909 1909 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1910 1910 addsalt(n, True)
1911 1911 script.append(l[2:])
1912 1912 elif l.startswith(b' ... '): # python inlines
1913 1913 after.setdefault(prepos, []).append(l)
1914 1914 script.append(l[2:])
1915 1915 elif l.startswith(b' $ '): # commands
1916 1916 if inpython:
1917 1917 script.append(b'EOF\n')
1918 1918 inpython = False
1919 1919 after.setdefault(pos, []).append(l)
1920 1920 prepos = pos
1921 1921 pos = n
1922 1922 addsalt(n, False)
1923 1923 rawcmd = l[4:]
1924 1924 cmd = rawcmd.split()
1925 1925 toggletrace(rawcmd)
1926 1926 if len(cmd) == 2 and cmd[0] == b'cd':
1927 1927 rawcmd = b'cd %s || exit 1\n' % cmd[1]
1928 1928 script.append(rawcmd)
1929 1929 elif l.startswith(b' > '): # continuations
1930 1930 after.setdefault(prepos, []).append(l)
1931 1931 script.append(l[4:])
1932 1932 elif l.startswith(b' '): # results
1933 1933 # Queue up a list of expected results.
1934 1934 expected.setdefault(pos, []).append(l[2:])
1935 1935 else:
1936 1936 if inpython:
1937 1937 script.append(b'EOF\n')
1938 1938 inpython = False
1939 1939 # Non-command/result. Queue up for merged output.
1940 1940 after.setdefault(pos, []).append(l)
1941 1941
1942 1942 if inpython:
1943 1943 script.append(b'EOF\n')
1944 1944 if skipping is not None:
1945 1945 after.setdefault(pos, []).append(b' !!! missing #endif\n')
1946 1946 addsalt(n + 1, False)
1947 1947 # Need to end any current per-command trace
1948 1948 if activetrace:
1949 1949 toggletrace()
1950 1950 return salt, script, after, expected
1951 1951
1952 1952 def _processoutput(self, exitcode, output, salt, after, expected):
1953 1953 # Merge the script output back into a unified test.
1954 1954 warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not
1955 1955 if exitcode != 0:
1956 1956 warnonly = WARN_NO
1957 1957
1958 1958 pos = -1
1959 1959 postout = []
1960 1960 for out_rawline in output:
1961 1961 out_line, cmd_line = out_rawline, None
1962 1962 if salt in out_rawline:
1963 1963 out_line, cmd_line = out_rawline.split(salt, 1)
1964 1964
1965 1965 pos, postout, warnonly = self._process_out_line(
1966 1966 out_line, pos, postout, expected, warnonly
1967 1967 )
1968 1968 pos, postout = self._process_cmd_line(cmd_line, pos, postout, after)
1969 1969
1970 1970 if pos in after:
1971 1971 postout += after.pop(pos)
1972 1972
1973 1973 if warnonly == WARN_YES:
1974 1974 exitcode = False # Set exitcode to warned.
1975 1975
1976 1976 return exitcode, postout
1977 1977
1978 1978 def _process_out_line(self, out_line, pos, postout, expected, warnonly):
1979 1979 while out_line:
1980 1980 if not out_line.endswith(b'\n'):
1981 1981 out_line += b' (no-eol)\n'
1982 1982
1983 1983 # Find the expected output at the current position.
1984 1984 els = [None]
1985 1985 if expected.get(pos, None):
1986 1986 els = expected[pos]
1987 1987
1988 1988 optional = []
1989 1989 for i, el in enumerate(els):
1990 1990 r = False
1991 1991 if el:
1992 1992 r, exact = self.linematch(el, out_line)
1993 1993 if isinstance(r, str):
1994 1994 if r == '-glob':
1995 1995 out_line = ''.join(el.rsplit(' (glob)', 1))
1996 1996 r = '' # Warn only this line.
1997 1997 elif r == "retry":
1998 1998 postout.append(b' ' + el)
1999 1999 else:
2000 2000 log('\ninfo, unknown linematch result: %r\n' % r)
2001 2001 r = False
2002 2002 if r:
2003 2003 els.pop(i)
2004 2004 break
2005 2005 if el:
2006 2006 if isoptional(el):
2007 2007 optional.append(i)
2008 2008 else:
2009 2009 m = optline.match(el)
2010 2010 if m:
2011 2011 conditions = [c for c in m.group(2).split(b' ')]
2012 2012
2013 2013 if not self._iftest(conditions):
2014 2014 optional.append(i)
2015 2015 if exact:
2016 2016 # Don't allow line to be matches against a later
2017 2017 # line in the output
2018 2018 els.pop(i)
2019 2019 break
2020 2020
2021 2021 if r:
2022 2022 if r == "retry":
2023 2023 continue
2024 2024 # clean up any optional leftovers
2025 2025 for i in optional:
2026 2026 postout.append(b' ' + els[i])
2027 2027 for i in reversed(optional):
2028 2028 del els[i]
2029 2029 postout.append(b' ' + el)
2030 2030 else:
2031 2031 if self.NEEDESCAPE(out_line):
2032 2032 out_line = TTest._stringescape(
2033 2033 b'%s (esc)\n' % out_line.rstrip(b'\n')
2034 2034 )
2035 2035 postout.append(b' ' + out_line) # Let diff deal with it.
2036 2036 if r != '': # If line failed.
2037 2037 warnonly = WARN_NO
2038 2038 elif warnonly == WARN_UNDEFINED:
2039 2039 warnonly = WARN_YES
2040 2040 break
2041 2041 else:
2042 2042 # clean up any optional leftovers
2043 2043 while expected.get(pos, None):
2044 2044 el = expected[pos].pop(0)
2045 2045 if el:
2046 2046 if not isoptional(el):
2047 2047 m = optline.match(el)
2048 2048 if m:
2049 2049 conditions = [c for c in m.group(2).split(b' ')]
2050 2050
2051 2051 if self._iftest(conditions):
2052 2052 # Don't append as optional line
2053 2053 continue
2054 2054 else:
2055 2055 continue
2056 2056 postout.append(b' ' + el)
2057 2057 return pos, postout, warnonly
2058 2058
2059 2059 def _process_cmd_line(self, cmd_line, pos, postout, after):
2060 2060 """process a "command" part of a line from unified test output"""
2061 2061 if cmd_line:
2062 2062 # Add on last return code.
2063 2063 ret = int(cmd_line.split()[1])
2064 2064 if ret != 0:
2065 2065 postout.append(b' [%d]\n' % ret)
2066 2066 if pos in after:
2067 2067 # Merge in non-active test bits.
2068 2068 postout += after.pop(pos)
2069 2069 pos = int(cmd_line.split()[0])
2070 2070 return pos, postout
2071 2071
2072 2072 @staticmethod
2073 2073 def rematch(el, l):
2074 2074 try:
2075 2075 # parse any flags at the beginning of the regex. Only 'i' is
2076 2076 # supported right now, but this should be easy to extend.
2077 2077 flags, el = re.match(br'^(\(\?i\))?(.*)', el).groups()[0:2]
2078 2078 flags = flags or b''
2079 2079 el = flags + b'(?:' + el + b')'
2080 2080 # use \Z to ensure that the regex matches to the end of the string
2081 2081 if os.name == 'nt':
2082 2082 return re.match(el + br'\r?\n\Z', l)
2083 2083 return re.match(el + br'\n\Z', l)
2084 2084 except re.error:
2085 2085 # el is an invalid regex
2086 2086 return False
2087 2087
2088 2088 @staticmethod
2089 2089 def globmatch(el, l):
2090 2090 # The only supported special characters are * and ? plus / which also
2091 2091 # matches \ on windows. Escaping of these characters is supported.
2092 2092 if el + b'\n' == l:
2093 2093 if os.altsep:
2094 2094 # matching on "/" is not needed for this line
2095 2095 for pat in checkcodeglobpats:
2096 2096 if pat.match(el):
2097 2097 return True
2098 2098 return b'-glob'
2099 2099 return True
2100 2100 el = el.replace(b'$LOCALIP', b'*')
2101 2101 i, n = 0, len(el)
2102 2102 res = b''
2103 2103 while i < n:
2104 2104 c = el[i : i + 1]
2105 2105 i += 1
2106 2106 if c == b'\\' and i < n and el[i : i + 1] in b'*?\\/':
2107 2107 res += el[i - 1 : i + 1]
2108 2108 i += 1
2109 2109 elif c == b'*':
2110 2110 res += b'.*'
2111 2111 elif c == b'?':
2112 2112 res += b'.'
2113 2113 elif c == b'/' and os.altsep:
2114 2114 res += b'[/\\\\]'
2115 2115 else:
2116 2116 res += re.escape(c)
2117 2117 return TTest.rematch(res, l)
2118 2118
2119 2119 def linematch(self, el, l):
2120 2120 if el == l: # perfect match (fast)
2121 2121 return True, True
2122 2122 retry = False
2123 2123 if isoptional(el):
2124 2124 retry = "retry"
2125 2125 el = el[: -len(MARK_OPTIONAL)] + b"\n"
2126 2126 else:
2127 2127 m = optline.match(el)
2128 2128 if m:
2129 2129 conditions = [c for c in m.group(2).split(b' ')]
2130 2130
2131 2131 el = m.group(1) + b"\n"
2132 2132 if not self._iftest(conditions):
2133 2133 # listed feature missing, should not match
2134 2134 return "retry", False
2135 2135
2136 2136 if el.endswith(b" (esc)\n"):
2137 2137 if PYTHON3:
2138 2138 el = el[:-7].decode('unicode_escape') + '\n'
2139 2139 el = el.encode('latin-1')
2140 2140 else:
2141 2141 el = el[:-7].decode('string-escape') + '\n'
2142 2142 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
2143 2143 return True, True
2144 2144 if el.endswith(b" (re)\n"):
2145 2145 return (TTest.rematch(el[:-6], l) or retry), False
2146 2146 if el.endswith(b" (glob)\n"):
2147 2147 # ignore '(glob)' added to l by 'replacements'
2148 2148 if l.endswith(b" (glob)\n"):
2149 2149 l = l[:-8] + b"\n"
2150 2150 return (TTest.globmatch(el[:-8], l) or retry), False
2151 2151 if os.altsep:
2152 2152 _l = l.replace(b'\\', b'/')
2153 2153 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
2154 2154 return True, True
2155 2155 return retry, True
2156 2156
2157 2157 @staticmethod
2158 2158 def parsehghaveoutput(lines):
2159 2159 """Parse hghave log lines.
2160 2160
2161 2161 Return tuple of lists (missing, failed):
2162 2162 * the missing/unknown features
2163 2163 * the features for which existence check failed"""
2164 2164 missing = []
2165 2165 failed = []
2166 2166 for line in lines:
2167 2167 if line.startswith(TTest.SKIPPED_PREFIX):
2168 2168 line = line.splitlines()[0]
2169 2169 missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :]))
2170 2170 elif line.startswith(TTest.FAILED_PREFIX):
2171 2171 line = line.splitlines()[0]
2172 2172 failed.append(_bytes2sys(line[len(TTest.FAILED_PREFIX) :]))
2173 2173
2174 2174 return missing, failed
2175 2175
2176 2176 @staticmethod
2177 2177 def _escapef(m):
2178 2178 return TTest.ESCAPEMAP[m.group(0)]
2179 2179
2180 2180 @staticmethod
2181 2181 def _stringescape(s):
2182 2182 return TTest.ESCAPESUB(TTest._escapef, s)
2183 2183
2184 2184
2185 2185 iolock = threading.RLock()
2186 2186 firstlock = threading.RLock()
2187 2187 firsterror = False
2188 2188
2189 2189
2190 2190 class TestResult(unittest._TextTestResult):
2191 2191 """Holds results when executing via unittest."""
2192 2192
2193 2193 # Don't worry too much about accessing the non-public _TextTestResult.
2194 2194 # It is relatively common in Python testing tools.
2195 2195 def __init__(self, options, *args, **kwargs):
2196 2196 super(TestResult, self).__init__(*args, **kwargs)
2197 2197
2198 2198 self._options = options
2199 2199
2200 2200 # unittest.TestResult didn't have skipped until 2.7. We need to
2201 2201 # polyfill it.
2202 2202 self.skipped = []
2203 2203
2204 2204 # We have a custom "ignored" result that isn't present in any Python
2205 2205 # unittest implementation. It is very similar to skipped. It may make
2206 2206 # sense to map it into skip some day.
2207 2207 self.ignored = []
2208 2208
2209 2209 self.times = []
2210 2210 self._firststarttime = None
2211 2211 # Data stored for the benefit of generating xunit reports.
2212 2212 self.successes = []
2213 2213 self.faildata = {}
2214 2214
2215 2215 if options.color == 'auto':
2216 2216 isatty = self.stream.isatty()
2217 2217 # For some reason, redirecting stdout on Windows disables the ANSI
2218 2218 # color processing of stderr, which is what is used to print the
2219 2219 # output. Therefore, both must be tty on Windows to enable color.
2220 2220 if os.name == 'nt':
2221 2221 isatty = isatty and sys.stdout.isatty()
2222 2222 self.color = pygmentspresent and isatty
2223 2223 elif options.color == 'never':
2224 2224 self.color = False
2225 2225 else: # 'always', for testing purposes
2226 2226 self.color = pygmentspresent
2227 2227
2228 2228 def onStart(self, test):
2229 2229 """Can be overriden by custom TestResult"""
2230 2230
2231 2231 def onEnd(self):
2232 2232 """Can be overriden by custom TestResult"""
2233 2233
2234 2234 def addFailure(self, test, reason):
2235 2235 self.failures.append((test, reason))
2236 2236
2237 2237 if self._options.first:
2238 2238 self.stop()
2239 2239 else:
2240 2240 with iolock:
2241 2241 if reason == "timed out":
2242 2242 self.stream.write('t')
2243 2243 else:
2244 2244 if not self._options.nodiff:
2245 2245 self.stream.write('\n')
2246 2246 # Exclude the '\n' from highlighting to lex correctly
2247 2247 formatted = 'ERROR: %s output changed\n' % test
2248 2248 self.stream.write(highlightmsg(formatted, self.color))
2249 2249 self.stream.write('!')
2250 2250
2251 2251 self.stream.flush()
2252 2252
2253 2253 def addSuccess(self, test):
2254 2254 with iolock:
2255 2255 super(TestResult, self).addSuccess(test)
2256 2256 self.successes.append(test)
2257 2257
2258 2258 def addError(self, test, err):
2259 2259 super(TestResult, self).addError(test, err)
2260 2260 if self._options.first:
2261 2261 self.stop()
2262 2262
2263 2263 # Polyfill.
2264 2264 def addSkip(self, test, reason):
2265 2265 self.skipped.append((test, reason))
2266 2266 with iolock:
2267 2267 if self.showAll:
2268 2268 self.stream.writeln('skipped %s' % reason)
2269 2269 else:
2270 2270 self.stream.write('s')
2271 2271 self.stream.flush()
2272 2272
2273 2273 def addIgnore(self, test, reason):
2274 2274 self.ignored.append((test, reason))
2275 2275 with iolock:
2276 2276 if self.showAll:
2277 2277 self.stream.writeln('ignored %s' % reason)
2278 2278 else:
2279 2279 if reason not in ('not retesting', "doesn't match keyword"):
2280 2280 self.stream.write('i')
2281 2281 else:
2282 2282 self.testsRun += 1
2283 2283 self.stream.flush()
2284 2284
2285 2285 def addOutputMismatch(self, test, ret, got, expected):
2286 2286 """Record a mismatch in test output for a particular test."""
2287 2287 if self.shouldStop or firsterror:
2288 2288 # don't print, some other test case already failed and
2289 2289 # printed, we're just stale and probably failed due to our
2290 2290 # temp dir getting cleaned up.
2291 2291 return
2292 2292
2293 2293 accepted = False
2294 2294 lines = []
2295 2295
2296 2296 with iolock:
2297 2297 if self._options.nodiff:
2298 2298 pass
2299 2299 elif self._options.view:
2300 2300 v = self._options.view
2301 2301 subprocess.call(
2302 2302 r'"%s" "%s" "%s"'
2303 2303 % (v, _bytes2sys(test.refpath), _bytes2sys(test.errpath)),
2304 2304 shell=True,
2305 2305 )
2306 2306 else:
2307 2307 servefail, lines = getdiff(
2308 2308 expected, got, test.refpath, test.errpath
2309 2309 )
2310 2310 self.stream.write('\n')
2311 2311 for line in lines:
2312 2312 line = highlightdiff(line, self.color)
2313 2313 if PYTHON3:
2314 2314 self.stream.flush()
2315 2315 self.stream.buffer.write(line)
2316 2316 self.stream.buffer.flush()
2317 2317 else:
2318 2318 self.stream.write(line)
2319 2319 self.stream.flush()
2320 2320
2321 2321 if servefail:
2322 2322 raise test.failureException(
2323 2323 'server failed to start (HGPORT=%s)' % test._startport
2324 2324 )
2325 2325
2326 2326 # handle interactive prompt without releasing iolock
2327 2327 if self._options.interactive:
2328 2328 if test.readrefout() != expected:
2329 2329 self.stream.write(
2330 2330 'Reference output has changed (run again to prompt '
2331 2331 'changes)'
2332 2332 )
2333 2333 else:
2334 2334 self.stream.write('Accept this change? [y/N] ')
2335 2335 self.stream.flush()
2336 2336 answer = sys.stdin.readline().strip()
2337 2337 if answer.lower() in ('y', 'yes'):
2338 2338 if test.path.endswith(b'.t'):
2339 2339 rename(test.errpath, test.path)
2340 2340 else:
2341 2341 rename(test.errpath, b'%s.out' % test.path)
2342 2342 accepted = True
2343 2343 if not accepted:
2344 2344 self.faildata[test.name] = b''.join(lines)
2345 2345
2346 2346 return accepted
2347 2347
2348 2348 def startTest(self, test):
2349 2349 super(TestResult, self).startTest(test)
2350 2350
2351 2351 # os.times module computes the user time and system time spent by
2352 2352 # child's processes along with real elapsed time taken by a process.
2353 2353 # This module has one limitation. It can only work for Linux user
2354 2354 # and not for Windows. Hence why we fall back to another function
2355 2355 # for wall time calculations.
2356 2356 test.started_times = os.times()
2357 2357 # TODO use a monotonic clock once support for Python 2.7 is dropped.
2358 2358 test.started_time = time.time()
2359 2359 if self._firststarttime is None: # thread racy but irrelevant
2360 2360 self._firststarttime = test.started_time
2361 2361
2362 2362 def stopTest(self, test, interrupted=False):
2363 2363 super(TestResult, self).stopTest(test)
2364 2364
2365 2365 test.stopped_times = os.times()
2366 2366 stopped_time = time.time()
2367 2367
2368 2368 starttime = test.started_times
2369 2369 endtime = test.stopped_times
2370 2370 origin = self._firststarttime
2371 2371 self.times.append(
2372 2372 (
2373 2373 test.name,
2374 2374 endtime[2] - starttime[2], # user space CPU time
2375 2375 endtime[3] - starttime[3], # sys space CPU time
2376 2376 stopped_time - test.started_time, # real time
2377 2377 test.started_time - origin, # start date in run context
2378 2378 stopped_time - origin, # end date in run context
2379 2379 )
2380 2380 )
2381 2381
2382 2382 if interrupted:
2383 2383 with iolock:
2384 2384 self.stream.writeln(
2385 2385 'INTERRUPTED: %s (after %d seconds)'
2386 2386 % (test.name, self.times[-1][3])
2387 2387 )
2388 2388
2389 2389
2390 2390 def getTestResult():
2391 2391 """
2392 2392 Returns the relevant test result
2393 2393 """
2394 2394 if "CUSTOM_TEST_RESULT" in os.environ:
2395 2395 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
2396 2396 return testresultmodule.TestResult
2397 2397 else:
2398 2398 return TestResult
2399 2399
2400 2400
2401 2401 class TestSuite(unittest.TestSuite):
2402 2402 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
2403 2403
2404 2404 def __init__(
2405 2405 self,
2406 2406 testdir,
2407 2407 jobs=1,
2408 2408 whitelist=None,
2409 2409 blacklist=None,
2410 2410 keywords=None,
2411 2411 loop=False,
2412 2412 runs_per_test=1,
2413 2413 loadtest=None,
2414 2414 showchannels=False,
2415 2415 *args,
2416 2416 **kwargs
2417 2417 ):
2418 2418 """Create a new instance that can run tests with a configuration.
2419 2419
2420 2420 testdir specifies the directory where tests are executed from. This
2421 2421 is typically the ``tests`` directory from Mercurial's source
2422 2422 repository.
2423 2423
2424 2424 jobs specifies the number of jobs to run concurrently. Each test
2425 2425 executes on its own thread. Tests actually spawn new processes, so
2426 2426 state mutation should not be an issue.
2427 2427
2428 2428 If there is only one job, it will use the main thread.
2429 2429
2430 2430 whitelist and blacklist denote tests that have been whitelisted and
2431 2431 blacklisted, respectively. These arguments don't belong in TestSuite.
2432 2432 Instead, whitelist and blacklist should be handled by the thing that
2433 2433 populates the TestSuite with tests. They are present to preserve
2434 2434 backwards compatible behavior which reports skipped tests as part
2435 2435 of the results.
2436 2436
2437 2437 keywords denotes key words that will be used to filter which tests
2438 2438 to execute. This arguably belongs outside of TestSuite.
2439 2439
2440 2440 loop denotes whether to loop over tests forever.
2441 2441 """
2442 2442 super(TestSuite, self).__init__(*args, **kwargs)
2443 2443
2444 2444 self._jobs = jobs
2445 2445 self._whitelist = whitelist
2446 2446 self._blacklist = blacklist
2447 2447 self._keywords = keywords
2448 2448 self._loop = loop
2449 2449 self._runs_per_test = runs_per_test
2450 2450 self._loadtest = loadtest
2451 2451 self._showchannels = showchannels
2452 2452
2453 2453 def run(self, result):
2454 2454 # We have a number of filters that need to be applied. We do this
2455 2455 # here instead of inside Test because it makes the running logic for
2456 2456 # Test simpler.
2457 2457 tests = []
2458 2458 num_tests = [0]
2459 2459 for test in self._tests:
2460 2460
2461 2461 def get():
2462 2462 num_tests[0] += 1
2463 2463 if getattr(test, 'should_reload', False):
2464 2464 return self._loadtest(test, num_tests[0])
2465 2465 return test
2466 2466
2467 2467 if not os.path.exists(test.path):
2468 2468 result.addSkip(test, "Doesn't exist")
2469 2469 continue
2470 2470
2471 2471 is_whitelisted = self._whitelist and (
2472 2472 test.relpath in self._whitelist or test.bname in self._whitelist
2473 2473 )
2474 2474 if not is_whitelisted:
2475 2475 is_blacklisted = self._blacklist and (
2476 2476 test.relpath in self._blacklist
2477 2477 or test.bname in self._blacklist
2478 2478 )
2479 2479 if is_blacklisted:
2480 2480 result.addSkip(test, 'blacklisted')
2481 2481 continue
2482 2482 if self._keywords:
2483 2483 with open(test.path, 'rb') as f:
2484 2484 t = f.read().lower() + test.bname.lower()
2485 2485 ignored = False
2486 2486 for k in self._keywords.lower().split():
2487 2487 if k not in t:
2488 2488 result.addIgnore(test, "doesn't match keyword")
2489 2489 ignored = True
2490 2490 break
2491 2491
2492 2492 if ignored:
2493 2493 continue
2494 2494 for _ in xrange(self._runs_per_test):
2495 2495 tests.append(get())
2496 2496
2497 2497 runtests = list(tests)
2498 2498 done = queue.Queue()
2499 2499 running = 0
2500 2500
2501 2501 channels = [""] * self._jobs
2502 2502
2503 2503 def job(test, result):
2504 2504 for n, v in enumerate(channels):
2505 2505 if not v:
2506 2506 channel = n
2507 2507 break
2508 2508 else:
2509 2509 raise ValueError('Could not find output channel')
2510 2510 channels[channel] = "=" + test.name[5:].split(".")[0]
2511 2511 try:
2512 2512 test(result)
2513 2513 done.put(None)
2514 2514 except KeyboardInterrupt:
2515 2515 pass
2516 2516 except: # re-raises
2517 2517 done.put(('!', test, 'run-test raised an error, see traceback'))
2518 2518 raise
2519 2519 finally:
2520 2520 try:
2521 2521 channels[channel] = ''
2522 2522 except IndexError:
2523 2523 pass
2524 2524
2525 2525 def stat():
2526 2526 count = 0
2527 2527 while channels:
2528 2528 d = '\n%03s ' % count
2529 2529 for n, v in enumerate(channels):
2530 2530 if v:
2531 2531 d += v[0]
2532 2532 channels[n] = v[1:] or '.'
2533 2533 else:
2534 2534 d += ' '
2535 2535 d += ' '
2536 2536 with iolock:
2537 2537 sys.stdout.write(d + ' ')
2538 2538 sys.stdout.flush()
2539 2539 for x in xrange(10):
2540 2540 if channels:
2541 2541 time.sleep(0.1)
2542 2542 count += 1
2543 2543
2544 2544 stoppedearly = False
2545 2545
2546 2546 if self._showchannels:
2547 2547 statthread = threading.Thread(target=stat, name="stat")
2548 2548 statthread.start()
2549 2549
2550 2550 try:
2551 2551 while tests or running:
2552 2552 if not done.empty() or running == self._jobs or not tests:
2553 2553 try:
2554 2554 done.get(True, 1)
2555 2555 running -= 1
2556 2556 if result and result.shouldStop:
2557 2557 stoppedearly = True
2558 2558 break
2559 2559 except queue.Empty:
2560 2560 continue
2561 2561 if tests and not running == self._jobs:
2562 2562 test = tests.pop(0)
2563 2563 if self._loop:
2564 2564 if getattr(test, 'should_reload', False):
2565 2565 num_tests[0] += 1
2566 2566 tests.append(self._loadtest(test, num_tests[0]))
2567 2567 else:
2568 2568 tests.append(test)
2569 2569 if self._jobs == 1:
2570 2570 job(test, result)
2571 2571 else:
2572 2572 t = threading.Thread(
2573 2573 target=job, name=test.name, args=(test, result)
2574 2574 )
2575 2575 t.start()
2576 2576 running += 1
2577 2577
2578 2578 # If we stop early we still need to wait on started tests to
2579 2579 # finish. Otherwise, there is a race between the test completing
2580 2580 # and the test's cleanup code running. This could result in the
2581 2581 # test reporting incorrect.
2582 2582 if stoppedearly:
2583 2583 while running:
2584 2584 try:
2585 2585 done.get(True, 1)
2586 2586 running -= 1
2587 2587 except queue.Empty:
2588 2588 continue
2589 2589 except KeyboardInterrupt:
2590 2590 for test in runtests:
2591 2591 test.abort()
2592 2592
2593 2593 channels = []
2594 2594
2595 2595 return result
2596 2596
2597 2597
2598 2598 # Save the most recent 5 wall-clock runtimes of each test to a
2599 2599 # human-readable text file named .testtimes. Tests are sorted
2600 2600 # alphabetically, while times for each test are listed from oldest to
2601 2601 # newest.
2602 2602
2603 2603
2604 2604 def loadtimes(outputdir):
2605 2605 times = []
2606 2606 try:
2607 2607 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2608 2608 for line in fp:
2609 2609 m = re.match('(.*?) ([0-9. ]+)', line)
2610 2610 times.append(
2611 2611 (m.group(1), [float(t) for t in m.group(2).split()])
2612 2612 )
2613 2613 except IOError as err:
2614 2614 if err.errno != errno.ENOENT:
2615 2615 raise
2616 2616 return times
2617 2617
2618 2618
2619 2619 def savetimes(outputdir, result):
2620 2620 saved = dict(loadtimes(outputdir))
2621 2621 maxruns = 5
2622 2622 skipped = {str(t[0]) for t in result.skipped}
2623 2623 for tdata in result.times:
2624 2624 test, real = tdata[0], tdata[3]
2625 2625 if test not in skipped:
2626 2626 ts = saved.setdefault(test, [])
2627 2627 ts.append(real)
2628 2628 ts[:] = ts[-maxruns:]
2629 2629
2630 2630 fd, tmpname = tempfile.mkstemp(
2631 2631 prefix=b'.testtimes', dir=outputdir, text=True
2632 2632 )
2633 2633 with os.fdopen(fd, 'w') as fp:
2634 2634 for name, ts in sorted(saved.items()):
2635 2635 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2636 2636 timepath = os.path.join(outputdir, b'.testtimes')
2637 2637 try:
2638 2638 os.unlink(timepath)
2639 2639 except OSError:
2640 2640 pass
2641 2641 try:
2642 2642 os.rename(tmpname, timepath)
2643 2643 except OSError:
2644 2644 pass
2645 2645
2646 2646
2647 2647 class TextTestRunner(unittest.TextTestRunner):
2648 2648 """Custom unittest test runner that uses appropriate settings."""
2649 2649
2650 2650 def __init__(self, runner, *args, **kwargs):
2651 2651 super(TextTestRunner, self).__init__(*args, **kwargs)
2652 2652
2653 2653 self._runner = runner
2654 2654
2655 2655 self._result = getTestResult()(
2656 2656 self._runner.options, self.stream, self.descriptions, self.verbosity
2657 2657 )
2658 2658
2659 2659 def listtests(self, test):
2660 2660 test = sorted(test, key=lambda t: t.name)
2661 2661
2662 2662 self._result.onStart(test)
2663 2663
2664 2664 for t in test:
2665 2665 print(t.name)
2666 2666 self._result.addSuccess(t)
2667 2667
2668 2668 if self._runner.options.xunit:
2669 2669 with open(self._runner.options.xunit, "wb") as xuf:
2670 2670 self._writexunit(self._result, xuf)
2671 2671
2672 2672 if self._runner.options.json:
2673 2673 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2674 2674 with open(jsonpath, 'w') as fp:
2675 2675 self._writejson(self._result, fp)
2676 2676
2677 2677 return self._result
2678 2678
2679 2679 def run(self, test):
2680 2680 self._result.onStart(test)
2681 2681 test(self._result)
2682 2682
2683 2683 failed = len(self._result.failures)
2684 2684 skipped = len(self._result.skipped)
2685 2685 ignored = len(self._result.ignored)
2686 2686
2687 2687 with iolock:
2688 2688 self.stream.writeln('')
2689 2689
2690 2690 if not self._runner.options.noskips:
2691 2691 for test, msg in sorted(
2692 2692 self._result.skipped, key=lambda s: s[0].name
2693 2693 ):
2694 2694 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2695 2695 msg = highlightmsg(formatted, self._result.color)
2696 2696 self.stream.write(msg)
2697 2697 for test, msg in sorted(
2698 2698 self._result.failures, key=lambda f: f[0].name
2699 2699 ):
2700 2700 formatted = 'Failed %s: %s\n' % (test.name, msg)
2701 2701 self.stream.write(highlightmsg(formatted, self._result.color))
2702 2702 for test, msg in sorted(
2703 2703 self._result.errors, key=lambda e: e[0].name
2704 2704 ):
2705 2705 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2706 2706
2707 2707 if self._runner.options.xunit:
2708 2708 with open(self._runner.options.xunit, "wb") as xuf:
2709 2709 self._writexunit(self._result, xuf)
2710 2710
2711 2711 if self._runner.options.json:
2712 2712 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2713 2713 with open(jsonpath, 'w') as fp:
2714 2714 self._writejson(self._result, fp)
2715 2715
2716 2716 self._runner._checkhglib('Tested')
2717 2717
2718 2718 savetimes(self._runner._outputdir, self._result)
2719 2719
2720 2720 if failed and self._runner.options.known_good_rev:
2721 2721 self._bisecttests(t for t, m in self._result.failures)
2722 2722 self.stream.writeln(
2723 2723 '# Ran %d tests, %d skipped, %d failed.'
2724 2724 % (self._result.testsRun, skipped + ignored, failed)
2725 2725 )
2726 2726 if failed:
2727 2727 self.stream.writeln(
2728 2728 'python hash seed: %s' % os.environ['PYTHONHASHSEED']
2729 2729 )
2730 2730 if self._runner.options.time:
2731 2731 self.printtimes(self._result.times)
2732 2732
2733 2733 if self._runner.options.exceptions:
2734 2734 exceptions = aggregateexceptions(
2735 2735 os.path.join(self._runner._outputdir, b'exceptions')
2736 2736 )
2737 2737
2738 2738 self.stream.writeln('Exceptions Report:')
2739 2739 self.stream.writeln(
2740 2740 '%d total from %d frames'
2741 2741 % (exceptions['total'], len(exceptions['exceptioncounts']))
2742 2742 )
2743 2743 combined = exceptions['combined']
2744 2744 for key in sorted(combined, key=combined.get, reverse=True):
2745 2745 frame, line, exc = key
2746 2746 totalcount, testcount, leastcount, leasttest = combined[key]
2747 2747
2748 2748 self.stream.writeln(
2749 2749 '%d (%d tests)\t%s: %s (%s - %d total)'
2750 2750 % (
2751 2751 totalcount,
2752 2752 testcount,
2753 2753 frame,
2754 2754 exc,
2755 2755 leasttest,
2756 2756 leastcount,
2757 2757 )
2758 2758 )
2759 2759
2760 2760 self.stream.flush()
2761 2761
2762 2762 return self._result
2763 2763
2764 2764 def _bisecttests(self, tests):
2765 2765 bisectcmd = ['hg', 'bisect']
2766 2766 bisectrepo = self._runner.options.bisect_repo
2767 2767 if bisectrepo:
2768 2768 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2769 2769
2770 2770 def pread(args):
2771 2771 env = os.environ.copy()
2772 2772 env['HGPLAIN'] = '1'
2773 2773 p = subprocess.Popen(
2774 2774 args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env
2775 2775 )
2776 2776 data = p.stdout.read()
2777 2777 p.wait()
2778 2778 return data
2779 2779
2780 2780 for test in tests:
2781 2781 pread(bisectcmd + ['--reset']),
2782 2782 pread(bisectcmd + ['--bad', '.'])
2783 2783 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2784 2784 # TODO: we probably need to forward more options
2785 2785 # that alter hg's behavior inside the tests.
2786 2786 opts = ''
2787 2787 withhg = self._runner.options.with_hg
2788 2788 if withhg:
2789 2789 opts += ' --with-hg=%s ' % shellquote(_bytes2sys(withhg))
2790 2790 rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test)
2791 2791 data = pread(bisectcmd + ['--command', rtc])
2792 2792 m = re.search(
2793 2793 (
2794 2794 br'\nThe first (?P<goodbad>bad|good) revision '
2795 2795 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2796 2796 br'summary: +(?P<summary>[^\n]+)\n'
2797 2797 ),
2798 2798 data,
2799 2799 (re.MULTILINE | re.DOTALL),
2800 2800 )
2801 2801 if m is None:
2802 2802 self.stream.writeln(
2803 2803 'Failed to identify failure point for %s' % test
2804 2804 )
2805 2805 continue
2806 2806 dat = m.groupdict()
2807 2807 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2808 2808 self.stream.writeln(
2809 2809 '%s %s by %s (%s)'
2810 2810 % (
2811 2811 test,
2812 2812 verb,
2813 2813 dat['node'].decode('ascii'),
2814 2814 dat['summary'].decode('utf8', 'ignore'),
2815 2815 )
2816 2816 )
2817 2817
2818 2818 def printtimes(self, times):
2819 2819 # iolock held by run
2820 2820 self.stream.writeln('# Producing time report')
2821 2821 times.sort(key=lambda t: (t[3]))
2822 2822 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2823 2823 self.stream.writeln(
2824 2824 '%-7s %-7s %-7s %-7s %-7s %s'
2825 2825 % ('start', 'end', 'cuser', 'csys', 'real', 'Test')
2826 2826 )
2827 2827 for tdata in times:
2828 2828 test = tdata[0]
2829 2829 cuser, csys, real, start, end = tdata[1:6]
2830 2830 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2831 2831
2832 2832 @staticmethod
2833 2833 def _writexunit(result, outf):
2834 2834 # See http://llg.cubic.org/docs/junit/ for a reference.
2835 2835 timesd = {t[0]: t[3] for t in result.times}
2836 2836 doc = minidom.Document()
2837 2837 s = doc.createElement('testsuite')
2838 2838 s.setAttribute('errors', "0") # TODO
2839 2839 s.setAttribute('failures', str(len(result.failures)))
2840 2840 s.setAttribute('name', 'run-tests')
2841 2841 s.setAttribute(
2842 2842 'skipped', str(len(result.skipped) + len(result.ignored))
2843 2843 )
2844 2844 s.setAttribute('tests', str(result.testsRun))
2845 2845 doc.appendChild(s)
2846 2846 for tc in result.successes:
2847 2847 t = doc.createElement('testcase')
2848 2848 t.setAttribute('name', tc.name)
2849 2849 tctime = timesd.get(tc.name)
2850 2850 if tctime is not None:
2851 2851 t.setAttribute('time', '%.3f' % tctime)
2852 2852 s.appendChild(t)
2853 2853 for tc, err in sorted(result.faildata.items()):
2854 2854 t = doc.createElement('testcase')
2855 2855 t.setAttribute('name', tc)
2856 2856 tctime = timesd.get(tc)
2857 2857 if tctime is not None:
2858 2858 t.setAttribute('time', '%.3f' % tctime)
2859 2859 # createCDATASection expects a unicode or it will
2860 2860 # convert using default conversion rules, which will
2861 2861 # fail if string isn't ASCII.
2862 2862 err = cdatasafe(err).decode('utf-8', 'replace')
2863 2863 cd = doc.createCDATASection(err)
2864 2864 # Use 'failure' here instead of 'error' to match errors = 0,
2865 2865 # failures = len(result.failures) in the testsuite element.
2866 2866 failelem = doc.createElement('failure')
2867 2867 failelem.setAttribute('message', 'output changed')
2868 2868 failelem.setAttribute('type', 'output-mismatch')
2869 2869 failelem.appendChild(cd)
2870 2870 t.appendChild(failelem)
2871 2871 s.appendChild(t)
2872 2872 for tc, message in result.skipped:
2873 2873 # According to the schema, 'skipped' has no attributes. So store
2874 2874 # the skip message as a text node instead.
2875 2875 t = doc.createElement('testcase')
2876 2876 t.setAttribute('name', tc.name)
2877 2877 binmessage = message.encode('utf-8')
2878 2878 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2879 2879 cd = doc.createCDATASection(message)
2880 2880 skipelem = doc.createElement('skipped')
2881 2881 skipelem.appendChild(cd)
2882 2882 t.appendChild(skipelem)
2883 2883 s.appendChild(t)
2884 2884 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2885 2885
2886 2886 @staticmethod
2887 2887 def _writejson(result, outf):
2888 2888 timesd = {}
2889 2889 for tdata in result.times:
2890 2890 test = tdata[0]
2891 2891 timesd[test] = tdata[1:]
2892 2892
2893 2893 outcome = {}
2894 2894 groups = [
2895 2895 ('success', ((tc, None) for tc in result.successes)),
2896 2896 ('failure', result.failures),
2897 2897 ('skip', result.skipped),
2898 2898 ]
2899 2899 for res, testcases in groups:
2900 2900 for tc, __ in testcases:
2901 2901 if tc.name in timesd:
2902 2902 diff = result.faildata.get(tc.name, b'')
2903 2903 try:
2904 2904 diff = diff.decode('unicode_escape')
2905 2905 except UnicodeDecodeError as e:
2906 2906 diff = '%r decoding diff, sorry' % e
2907 2907 tres = {
2908 2908 'result': res,
2909 2909 'time': ('%0.3f' % timesd[tc.name][2]),
2910 2910 'cuser': ('%0.3f' % timesd[tc.name][0]),
2911 2911 'csys': ('%0.3f' % timesd[tc.name][1]),
2912 2912 'start': ('%0.3f' % timesd[tc.name][3]),
2913 2913 'end': ('%0.3f' % timesd[tc.name][4]),
2914 2914 'diff': diff,
2915 2915 }
2916 2916 else:
2917 2917 # blacklisted test
2918 2918 tres = {'result': res}
2919 2919
2920 2920 outcome[tc.name] = tres
2921 2921 jsonout = json.dumps(
2922 2922 outcome, sort_keys=True, indent=4, separators=(',', ': ')
2923 2923 )
2924 2924 outf.writelines(("testreport =", jsonout))
2925 2925
2926 2926
2927 2927 def sorttests(testdescs, previoustimes, shuffle=False):
2928 2928 """Do an in-place sort of tests."""
2929 2929 if shuffle:
2930 2930 random.shuffle(testdescs)
2931 2931 return
2932 2932
2933 2933 if previoustimes:
2934 2934
2935 2935 def sortkey(f):
2936 2936 f = f['path']
2937 2937 if f in previoustimes:
2938 2938 # Use most recent time as estimate
2939 2939 return -(previoustimes[f][-1])
2940 2940 else:
2941 2941 # Default to a rather arbitrary value of 1 second for new tests
2942 2942 return -1.0
2943 2943
2944 2944 else:
2945 2945 # keywords for slow tests
2946 2946 slow = {
2947 2947 b'svn': 10,
2948 2948 b'cvs': 10,
2949 2949 b'hghave': 10,
2950 2950 b'largefiles-update': 10,
2951 2951 b'run-tests': 10,
2952 2952 b'corruption': 10,
2953 2953 b'race': 10,
2954 2954 b'i18n': 10,
2955 2955 b'check': 100,
2956 2956 b'gendoc': 100,
2957 2957 b'contrib-perf': 200,
2958 2958 b'merge-combination': 100,
2959 2959 }
2960 2960 perf = {}
2961 2961
2962 2962 def sortkey(f):
2963 2963 # run largest tests first, as they tend to take the longest
2964 2964 f = f['path']
2965 2965 try:
2966 2966 return perf[f]
2967 2967 except KeyError:
2968 2968 try:
2969 2969 val = -os.stat(f).st_size
2970 2970 except OSError as e:
2971 2971 if e.errno != errno.ENOENT:
2972 2972 raise
2973 2973 perf[f] = -1e9 # file does not exist, tell early
2974 2974 return -1e9
2975 2975 for kw, mul in slow.items():
2976 2976 if kw in f:
2977 2977 val *= mul
2978 2978 if f.endswith(b'.py'):
2979 2979 val /= 10.0
2980 2980 perf[f] = val / 1000.0
2981 2981 return perf[f]
2982 2982
2983 2983 testdescs.sort(key=sortkey)
2984 2984
2985 2985
2986 2986 class TestRunner(object):
2987 2987 """Holds context for executing tests.
2988 2988
2989 2989 Tests rely on a lot of state. This object holds it for them.
2990 2990 """
2991 2991
2992 2992 # Programs required to run tests.
2993 2993 REQUIREDTOOLS = [
2994 2994 b'diff',
2995 2995 b'grep',
2996 2996 b'unzip',
2997 2997 b'gunzip',
2998 2998 b'bunzip2',
2999 2999 b'sed',
3000 3000 ]
3001 3001
3002 3002 # Maps file extensions to test class.
3003 3003 TESTTYPES = [
3004 3004 (b'.py', PythonTest),
3005 3005 (b'.t', TTest),
3006 3006 ]
3007 3007
3008 3008 def __init__(self):
3009 3009 self.options = None
3010 3010 self._hgroot = None
3011 3011 self._testdir = None
3012 3012 self._outputdir = None
3013 3013 self._hgtmp = None
3014 3014 self._installdir = None
3015 3015 self._bindir = None
3016 3016 self._tmpbindir = None
3017 3017 self._pythondir = None
3018 3018 self._coveragefile = None
3019 3019 self._createdfiles = []
3020 3020 self._hgcommand = None
3021 3021 self._hgpath = None
3022 3022 self._portoffset = 0
3023 3023 self._ports = {}
3024 3024
3025 3025 def run(self, args, parser=None):
3026 3026 """Run the test suite."""
3027 3027 oldmask = os.umask(0o22)
3028 3028 try:
3029 3029 parser = parser or getparser()
3030 3030 options = parseargs(args, parser)
3031 3031 tests = [_sys2bytes(a) for a in options.tests]
3032 3032 if options.test_list is not None:
3033 3033 for listfile in options.test_list:
3034 3034 with open(listfile, 'rb') as f:
3035 3035 tests.extend(t for t in f.read().splitlines() if t)
3036 3036 self.options = options
3037 3037
3038 3038 self._checktools()
3039 3039 testdescs = self.findtests(tests)
3040 3040 if options.profile_runner:
3041 3041 import statprof
3042 3042
3043 3043 statprof.start()
3044 3044 result = self._run(testdescs)
3045 3045 if options.profile_runner:
3046 3046 statprof.stop()
3047 3047 statprof.display()
3048 3048 return result
3049 3049
3050 3050 finally:
3051 3051 os.umask(oldmask)
3052 3052
3053 3053 def _run(self, testdescs):
3054 3054 testdir = getcwdb()
3055 3055 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
3056 3056 # assume all tests in same folder for now
3057 3057 if testdescs:
3058 3058 pathname = os.path.dirname(testdescs[0]['path'])
3059 3059 if pathname:
3060 3060 testdir = os.path.join(testdir, pathname)
3061 3061 self._testdir = osenvironb[b'TESTDIR'] = testdir
3062 3062 if self.options.outputdir:
3063 3063 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3064 3064 else:
3065 3065 self._outputdir = getcwdb()
3066 3066 if testdescs and pathname:
3067 3067 self._outputdir = os.path.join(self._outputdir, pathname)
3068 3068 previoustimes = {}
3069 3069 if self.options.order_by_runtime:
3070 3070 previoustimes = dict(loadtimes(self._outputdir))
3071 3071 sorttests(testdescs, previoustimes, shuffle=self.options.random)
3072 3072
3073 3073 if 'PYTHONHASHSEED' not in os.environ:
3074 3074 # use a random python hash seed all the time
3075 3075 # we do the randomness ourself to know what seed is used
3076 3076 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
3077 3077
3078 3078 # Rayon (Rust crate for multi-threading) will use all logical CPU cores
3079 3079 # by default, causing thrashing on high-cpu-count systems.
3080 3080 # Setting its limit to 3 during tests should still let us uncover
3081 3081 # multi-threading bugs while keeping the thrashing reasonable.
3082 3082 os.environ.setdefault("RAYON_NUM_THREADS", "3")
3083 3083
3084 3084 if self.options.tmpdir:
3085 3085 self.options.keep_tmpdir = True
3086 3086 tmpdir = _sys2bytes(self.options.tmpdir)
3087 3087 if os.path.exists(tmpdir):
3088 3088 # Meaning of tmpdir has changed since 1.3: we used to create
3089 3089 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
3090 3090 # tmpdir already exists.
3091 3091 print("error: temp dir %r already exists" % tmpdir)
3092 3092 return 1
3093 3093
3094 3094 os.makedirs(tmpdir)
3095 3095 else:
3096 3096 d = None
3097 3097 if os.name == 'nt':
3098 3098 # without this, we get the default temp dir location, but
3099 3099 # in all lowercase, which causes troubles with paths (issue3490)
3100 3100 d = osenvironb.get(b'TMP', None)
3101 3101 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
3102 3102
3103 3103 self._hgtmp = osenvironb[b'HGTMP'] = os.path.realpath(tmpdir)
3104 3104
3105 3105 if self.options.with_hg:
3106 3106 self._installdir = None
3107 3107 whg = self.options.with_hg
3108 3108 self._bindir = os.path.dirname(os.path.realpath(whg))
3109 3109 assert isinstance(self._bindir, bytes)
3110 3110 self._hgcommand = os.path.basename(whg)
3111 3111 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
3112 3112 os.makedirs(self._tmpbindir)
3113 3113
3114 3114 normbin = os.path.normpath(os.path.abspath(whg))
3115 3115 normbin = normbin.replace(_sys2bytes(os.sep), b'/')
3116 3116
3117 3117 # Other Python scripts in the test harness need to
3118 3118 # `import mercurial`. If `hg` is a Python script, we assume
3119 3119 # the Mercurial modules are relative to its path and tell the tests
3120 3120 # to load Python modules from its directory.
3121 3121 with open(whg, 'rb') as fh:
3122 3122 initial = fh.read(1024)
3123 3123
3124 3124 if re.match(b'#!.*python', initial):
3125 3125 self._pythondir = self._bindir
3126 3126 # If it looks like our in-repo Rust binary, use the source root.
3127 3127 # This is a bit hacky. But rhg is still not supported outside the
3128 3128 # source directory. So until it is, do the simple thing.
3129 3129 elif re.search(b'/rust/target/[^/]+/hg', normbin):
3130 3130 self._pythondir = os.path.dirname(self._testdir)
3131 3131 # Fall back to the legacy behavior.
3132 3132 else:
3133 3133 self._pythondir = self._bindir
3134 3134
3135 3135 else:
3136 3136 self._installdir = os.path.join(self._hgtmp, b"install")
3137 3137 self._bindir = os.path.join(self._installdir, b"bin")
3138 3138 self._hgcommand = b'hg'
3139 3139 self._tmpbindir = self._bindir
3140 3140 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
3141 3141
3142 3142 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
3143 3143 # a python script and feed it to python.exe. Legacy stdio is force
3144 3144 # enabled by hg.exe, and this is a more realistic way to launch hg
3145 3145 # anyway.
3146 3146 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
3147 3147 self._hgcommand += b'.exe'
3148 3148
3149 3149 # set CHGHG, then replace "hg" command by "chg"
3150 3150 chgbindir = self._bindir
3151 3151 if self.options.chg or self.options.with_chg:
3152 3152 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
3153 3153 else:
3154 3154 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
3155 3155 if self.options.chg:
3156 3156 self._hgcommand = b'chg'
3157 3157 elif self.options.with_chg:
3158 3158 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
3159 3159 self._hgcommand = os.path.basename(self.options.with_chg)
3160 3160
3161 3161 # configure fallback and replace "hg" command by "rhg"
3162 3162 rhgbindir = self._bindir
3163 3163 if self.options.rhg or self.options.with_rhg:
3164 3164 # Affects hghave.py
3165 3165 osenvironb[b'RHG_INSTALLED_AS_HG'] = b'1'
3166 3166 # Affects configuration. Alternatives would be setting configuration through
3167 3167 # `$HGRCPATH` but some tests override that, or changing `_hgcommand` to include
3168 3168 # `--config` but that disrupts tests that print command lines and check expected
3169 3169 # output.
3170 3170 osenvironb[b'RHG_ON_UNSUPPORTED'] = b'fallback'
3171 3171 osenvironb[b'RHG_FALLBACK_EXECUTABLE'] = os.path.join(
3172 3172 self._bindir, self._hgcommand
3173 3173 )
3174 3174 if self.options.rhg:
3175 3175 self._hgcommand = b'rhg'
3176 3176 elif self.options.with_rhg:
3177 3177 rhgbindir = os.path.dirname(os.path.realpath(self.options.with_rhg))
3178 3178 self._hgcommand = os.path.basename(self.options.with_rhg)
3179 3179
3180 3180 osenvironb[b"BINDIR"] = self._bindir
3181 3181 osenvironb[b"PYTHON"] = PYTHON
3182 3182
3183 3183 fileb = _sys2bytes(__file__)
3184 3184 runtestdir = os.path.abspath(os.path.dirname(fileb))
3185 3185 osenvironb[b'RUNTESTDIR'] = runtestdir
3186 3186 if PYTHON3:
3187 3187 sepb = _sys2bytes(os.pathsep)
3188 3188 else:
3189 3189 sepb = os.pathsep
3190 3190 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
3191 3191 if os.path.islink(__file__):
3192 3192 # test helper will likely be at the end of the symlink
3193 3193 realfile = os.path.realpath(fileb)
3194 3194 realdir = os.path.abspath(os.path.dirname(realfile))
3195 3195 path.insert(2, realdir)
3196 3196 if chgbindir != self._bindir:
3197 3197 path.insert(1, chgbindir)
3198 3198 if rhgbindir != self._bindir:
3199 3199 path.insert(1, rhgbindir)
3200 3200 if self._testdir != runtestdir:
3201 3201 path = [self._testdir] + path
3202 3202 if self._tmpbindir != self._bindir:
3203 3203 path = [self._tmpbindir] + path
3204 3204 osenvironb[b"PATH"] = sepb.join(path)
3205 3205
3206 3206 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
3207 3207 # can run .../tests/run-tests.py test-foo where test-foo
3208 3208 # adds an extension to HGRC. Also include run-test.py directory to
3209 3209 # import modules like heredoctest.
3210 3210 pypath = [self._pythondir, self._testdir, runtestdir]
3211 3211 # We have to augment PYTHONPATH, rather than simply replacing
3212 3212 # it, in case external libraries are only available via current
3213 3213 # PYTHONPATH. (In particular, the Subversion bindings on OS X
3214 3214 # are in /opt/subversion.)
3215 3215 oldpypath = osenvironb.get(IMPL_PATH)
3216 3216 if oldpypath:
3217 3217 pypath.append(oldpypath)
3218 3218 osenvironb[IMPL_PATH] = sepb.join(pypath)
3219 3219
3220 3220 if self.options.pure:
3221 3221 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
3222 3222 os.environ["HGMODULEPOLICY"] = "py"
3223 3223 if self.options.rust:
3224 3224 os.environ["HGMODULEPOLICY"] = "rust+c"
3225 3225 if self.options.no_rust:
3226 3226 current_policy = os.environ.get("HGMODULEPOLICY", "")
3227 3227 if current_policy.startswith("rust+"):
3228 3228 os.environ["HGMODULEPOLICY"] = current_policy[len("rust+") :]
3229 3229 os.environ.pop("HGWITHRUSTEXT", None)
3230 3230
3231 3231 if self.options.allow_slow_tests:
3232 3232 os.environ["HGTEST_SLOW"] = "slow"
3233 3233 elif 'HGTEST_SLOW' in os.environ:
3234 3234 del os.environ['HGTEST_SLOW']
3235 3235
3236 3236 self._coveragefile = os.path.join(self._testdir, b'.coverage')
3237 3237
3238 3238 if self.options.exceptions:
3239 3239 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
3240 3240 try:
3241 3241 os.makedirs(exceptionsdir)
3242 3242 except OSError as e:
3243 3243 if e.errno != errno.EEXIST:
3244 3244 raise
3245 3245
3246 3246 # Remove all existing exception reports.
3247 3247 for f in os.listdir(exceptionsdir):
3248 3248 os.unlink(os.path.join(exceptionsdir, f))
3249 3249
3250 3250 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
3251 3251 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
3252 3252 self.options.extra_config_opt.append(
3253 3253 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')
3254 3254 )
3255 3255
3256 3256 vlog("# Using TESTDIR", _bytes2sys(self._testdir))
3257 3257 vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR']))
3258 3258 vlog("# Using HGTMP", _bytes2sys(self._hgtmp))
3259 3259 vlog("# Using PATH", os.environ["PATH"])
3260 3260 vlog(
3261 3261 "# Using",
3262 3262 _bytes2sys(IMPL_PATH),
3263 3263 _bytes2sys(osenvironb[IMPL_PATH]),
3264 3264 )
3265 3265 vlog("# Writing to directory", _bytes2sys(self._outputdir))
3266 3266
3267 3267 try:
3268 3268 return self._runtests(testdescs) or 0
3269 3269 finally:
3270 3270 time.sleep(0.1)
3271 3271 self._cleanup()
3272 3272
3273 3273 def findtests(self, args):
3274 3274 """Finds possible test files from arguments.
3275 3275
3276 3276 If you wish to inject custom tests into the test harness, this would
3277 3277 be a good function to monkeypatch or override in a derived class.
3278 3278 """
3279 3279 if not args:
3280 3280 if self.options.changed:
3281 3281 proc = Popen4(
3282 3282 b'hg st --rev "%s" -man0 .'
3283 3283 % _sys2bytes(self.options.changed),
3284 3284 None,
3285 3285 0,
3286 3286 )
3287 3287 stdout, stderr = proc.communicate()
3288 3288 args = stdout.strip(b'\0').split(b'\0')
3289 3289 else:
3290 3290 args = os.listdir(b'.')
3291 3291
3292 3292 expanded_args = []
3293 3293 for arg in args:
3294 3294 if os.path.isdir(arg):
3295 3295 if not arg.endswith(b'/'):
3296 3296 arg += b'/'
3297 3297 expanded_args.extend([arg + a for a in os.listdir(arg)])
3298 3298 else:
3299 3299 expanded_args.append(arg)
3300 3300 args = expanded_args
3301 3301
3302 3302 testcasepattern = re.compile(br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))')
3303 3303 tests = []
3304 3304 for t in args:
3305 3305 case = []
3306 3306
3307 3307 if not (
3308 3308 os.path.basename(t).startswith(b'test-')
3309 3309 and (t.endswith(b'.py') or t.endswith(b'.t'))
3310 3310 ):
3311 3311
3312 3312 m = testcasepattern.match(os.path.basename(t))
3313 3313 if m is not None:
3314 3314 t_basename, casestr = m.groups()
3315 3315 t = os.path.join(os.path.dirname(t), t_basename)
3316 3316 if casestr:
3317 3317 case = casestr.split(b'#')
3318 3318 else:
3319 3319 continue
3320 3320
3321 3321 if t.endswith(b'.t'):
3322 3322 # .t file may contain multiple test cases
3323 3323 casedimensions = parsettestcases(t)
3324 3324 if casedimensions:
3325 3325 cases = []
3326 3326
3327 3327 def addcases(case, casedimensions):
3328 3328 if not casedimensions:
3329 3329 cases.append(case)
3330 3330 else:
3331 3331 for c in casedimensions[0]:
3332 3332 addcases(case + [c], casedimensions[1:])
3333 3333
3334 3334 addcases([], casedimensions)
3335 3335 if case and case in cases:
3336 3336 cases = [case]
3337 3337 elif case:
3338 3338 # Ignore invalid cases
3339 3339 cases = []
3340 3340 else:
3341 3341 pass
3342 3342 tests += [{'path': t, 'case': c} for c in sorted(cases)]
3343 3343 else:
3344 3344 tests.append({'path': t})
3345 3345 else:
3346 3346 tests.append({'path': t})
3347 3347
3348 3348 if self.options.retest:
3349 3349 retest_args = []
3350 3350 for test in tests:
3351 3351 errpath = self._geterrpath(test)
3352 3352 if os.path.exists(errpath):
3353 3353 retest_args.append(test)
3354 3354 tests = retest_args
3355 3355 return tests
3356 3356
3357 3357 def _runtests(self, testdescs):
3358 3358 def _reloadtest(test, i):
3359 3359 # convert a test back to its description dict
3360 3360 desc = {'path': test.path}
3361 3361 case = getattr(test, '_case', [])
3362 3362 if case:
3363 3363 desc['case'] = case
3364 3364 return self._gettest(desc, i)
3365 3365
3366 3366 try:
3367 3367 if self.options.restart:
3368 3368 orig = list(testdescs)
3369 3369 while testdescs:
3370 3370 desc = testdescs[0]
3371 3371 errpath = self._geterrpath(desc)
3372 3372 if os.path.exists(errpath):
3373 3373 break
3374 3374 testdescs.pop(0)
3375 3375 if not testdescs:
3376 3376 print("running all tests")
3377 3377 testdescs = orig
3378 3378
3379 3379 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
3380 3380 num_tests = len(tests) * self.options.runs_per_test
3381 3381
3382 3382 jobs = min(num_tests, self.options.jobs)
3383 3383
3384 3384 failed = False
3385 3385 kws = self.options.keywords
3386 3386 if kws is not None and PYTHON3:
3387 3387 kws = kws.encode('utf-8')
3388 3388
3389 3389 suite = TestSuite(
3390 3390 self._testdir,
3391 3391 jobs=jobs,
3392 3392 whitelist=self.options.whitelisted,
3393 3393 blacklist=self.options.blacklist,
3394 3394 keywords=kws,
3395 3395 loop=self.options.loop,
3396 3396 runs_per_test=self.options.runs_per_test,
3397 3397 showchannels=self.options.showchannels,
3398 3398 tests=tests,
3399 3399 loadtest=_reloadtest,
3400 3400 )
3401 3401 verbosity = 1
3402 3402 if self.options.list_tests:
3403 3403 verbosity = 0
3404 3404 elif self.options.verbose:
3405 3405 verbosity = 2
3406 3406 runner = TextTestRunner(self, verbosity=verbosity)
3407 3407
3408 3408 if self.options.list_tests:
3409 3409 result = runner.listtests(suite)
3410 3410 else:
3411 3411 if self._installdir:
3412 3412 self._installhg()
3413 3413 self._checkhglib("Testing")
3414 3414 else:
3415 3415 self._usecorrectpython()
3416 3416 if self.options.chg:
3417 3417 assert self._installdir
3418 3418 self._installchg()
3419 3419 if self.options.rhg:
3420 3420 assert self._installdir
3421 3421 self._installrhg()
3422 3422
3423 3423 log(
3424 3424 'running %d tests using %d parallel processes'
3425 3425 % (num_tests, jobs)
3426 3426 )
3427 3427
3428 3428 result = runner.run(suite)
3429 3429
3430 3430 if result.failures or result.errors:
3431 3431 failed = True
3432 3432
3433 3433 result.onEnd()
3434 3434
3435 3435 if self.options.anycoverage:
3436 3436 self._outputcoverage()
3437 3437 except KeyboardInterrupt:
3438 3438 failed = True
3439 3439 print("\ninterrupted!")
3440 3440
3441 3441 if failed:
3442 3442 return 1
3443 3443
3444 3444 def _geterrpath(self, test):
3445 3445 # test['path'] is a relative path
3446 3446 if 'case' in test:
3447 3447 # for multiple dimensions test cases
3448 3448 casestr = b'#'.join(test['case'])
3449 3449 errpath = b'%s#%s.err' % (test['path'], casestr)
3450 3450 else:
3451 3451 errpath = b'%s.err' % test['path']
3452 3452 if self.options.outputdir:
3453 3453 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3454 3454 errpath = os.path.join(self._outputdir, errpath)
3455 3455 return errpath
3456 3456
3457 3457 def _getport(self, count):
3458 3458 port = self._ports.get(count) # do we have a cached entry?
3459 3459 if port is None:
3460 3460 portneeded = 3
3461 3461 # above 100 tries we just give up and let test reports failure
3462 3462 for tries in xrange(100):
3463 3463 allfree = True
3464 3464 port = self.options.port + self._portoffset
3465 3465 for idx in xrange(portneeded):
3466 3466 if not checkportisavailable(port + idx):
3467 3467 allfree = False
3468 3468 break
3469 3469 self._portoffset += portneeded
3470 3470 if allfree:
3471 3471 break
3472 3472 self._ports[count] = port
3473 3473 return port
3474 3474
3475 3475 def _gettest(self, testdesc, count):
3476 3476 """Obtain a Test by looking at its filename.
3477 3477
3478 3478 Returns a Test instance. The Test may not be runnable if it doesn't
3479 3479 map to a known type.
3480 3480 """
3481 3481 path = testdesc['path']
3482 3482 lctest = path.lower()
3483 3483 testcls = Test
3484 3484
3485 3485 for ext, cls in self.TESTTYPES:
3486 3486 if lctest.endswith(ext):
3487 3487 testcls = cls
3488 3488 break
3489 3489
3490 3490 refpath = os.path.join(getcwdb(), path)
3491 3491 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
3492 3492
3493 3493 # extra keyword parameters. 'case' is used by .t tests
3494 3494 kwds = {k: testdesc[k] for k in ['case'] if k in testdesc}
3495 3495
3496 3496 t = testcls(
3497 3497 refpath,
3498 3498 self._outputdir,
3499 3499 tmpdir,
3500 3500 keeptmpdir=self.options.keep_tmpdir,
3501 3501 debug=self.options.debug,
3502 3502 first=self.options.first,
3503 3503 timeout=self.options.timeout,
3504 3504 startport=self._getport(count),
3505 3505 extraconfigopts=self.options.extra_config_opt,
3506 3506 shell=self.options.shell,
3507 3507 hgcommand=self._hgcommand,
3508 3508 usechg=bool(self.options.with_chg or self.options.chg),
3509 3509 chgdebug=self.options.chg_debug,
3510 3510 useipv6=useipv6,
3511 3511 **kwds
3512 3512 )
3513 3513 t.should_reload = True
3514 3514 return t
3515 3515
3516 3516 def _cleanup(self):
3517 3517 """Clean up state from this test invocation."""
3518 3518 if self.options.keep_tmpdir:
3519 3519 return
3520 3520
3521 3521 vlog("# Cleaning up HGTMP", _bytes2sys(self._hgtmp))
3522 3522 shutil.rmtree(self._hgtmp, True)
3523 3523 for f in self._createdfiles:
3524 3524 try:
3525 3525 os.remove(f)
3526 3526 except OSError:
3527 3527 pass
3528 3528
3529 3529 def _usecorrectpython(self):
3530 3530 """Configure the environment to use the appropriate Python in tests."""
3531 3531 # Tests must use the same interpreter as us or bad things will happen.
3532 3532 if sys.platform == 'win32':
3533 pyexename = b'python.exe'
3533 pyexe_names = [b'python', b'python.exe']
3534 elif sys.version_info[0] < 3:
3535 pyexe_names = [b'python', b'python2']
3534 3536 else:
3535 pyexename = b'python3' # XXX this is wrong with python2...
3537 pyexe_names = [b'python', b'python3']
3536 3538
3537 3539 # os.symlink() is a thing with py3 on Windows, but it requires
3538 3540 # Administrator rights.
3539 3541 if getattr(os, 'symlink', None) and os.name != 'nt':
3540 3542 msg = "# Making python executable in test path a symlink to '%s'"
3541 3543 msg %= sysexecutable
3542 3544 vlog(msg)
3543 for pyexename in [pyexename]:
3545 for pyexename in pyexe_names:
3544 3546 mypython = os.path.join(self._tmpbindir, pyexename)
3545 3547 try:
3546 3548 if os.readlink(mypython) == sysexecutable:
3547 3549 continue
3548 3550 os.unlink(mypython)
3549 3551 except OSError as err:
3550 3552 if err.errno != errno.ENOENT:
3551 3553 raise
3552 3554 if self._findprogram(pyexename) != sysexecutable:
3553 3555 try:
3554 3556 os.symlink(sysexecutable, mypython)
3555 3557 self._createdfiles.append(mypython)
3556 3558 except OSError as err:
3557 3559 # child processes may race, which is harmless
3558 3560 if err.errno != errno.EEXIST:
3559 3561 raise
3560 3562 else:
3561 3563 # Windows doesn't have `python3.exe`, and MSYS cannot understand the
3562 3564 # reparse point with that name provided by Microsoft. Create a
3563 3565 # simple script on PATH with that name that delegates to the py3
3564 3566 # launcher so the shebang lines work.
3565 3567 if os.getenv('MSYSTEM'):
3566 3568 with open(osenvironb[b'RUNTESTDIR'] + b'/python3', 'wb') as f:
3567 3569 f.write(b'#!/bin/sh\n')
3568 3570 f.write(b'py -3.%d "$@"\n' % sys.version_info[1])
3571 if os.getenv('MSYSTEM'):
3572 with open(osenvironb[b'RUNTESTDIR'] + b'/python2', 'wb') as f:
3573 f.write(b'#!/bin/sh\n')
3574 f.write(b'py -2.%d "$@"\n' % sys.version_info[1])
3569 3575
3570 3576 exedir, exename = os.path.split(sysexecutable)
3571 msg = "# Modifying search path to find %s as %s in '%s'"
3572 msg %= (exename, pyexename, exedir)
3573 vlog(msg)
3577 for pyexename in pyexe_names:
3578 msg = "# Modifying search path to find %s as %s in '%s'"
3579 msg %= (exename, pyexename, exedir)
3580 vlog(msg)
3574 3581 path = os.environ['PATH'].split(os.pathsep)
3575 3582 while exedir in path:
3576 3583 path.remove(exedir)
3577 3584
3578 3585 # Binaries installed by pip into the user area like pylint.exe may
3579 3586 # not be in PATH by default.
3580 3587 extra_paths = [exedir]
3581 3588 vi = sys.version_info
3582 3589 appdata = os.environ.get('APPDATA')
3583 3590 if appdata is not None:
3584 3591 scripts_dir = os.path.join(
3585 3592 appdata,
3586 3593 'Python',
3587 3594 'Python%d%d' % (vi[0], vi[1]),
3588 3595 'Scripts',
3589 3596 )
3590 3597
3591 3598 if vi.major == 2:
3592 3599 scripts_dir = os.path.join(
3593 3600 appdata,
3594 3601 'Python',
3595 3602 'Scripts',
3596 3603 )
3597 3604
3598 3605 extra_paths.append(scripts_dir)
3599 3606
3600 3607 os.environ['PATH'] = os.pathsep.join(extra_paths + path)
3601 if not self._findprogram(pyexename):
3602 print("WARNING: Cannot find %s in search path" % pyexename)
3608 for pyexename in pyexe_names:
3609 if not self._findprogram(pyexename):
3610 print("WARNING: Cannot find %s in search path" % pyexename)
3603 3611
3604 3612 def _installhg(self):
3605 3613 """Install hg into the test environment.
3606 3614
3607 3615 This will also configure hg with the appropriate testing settings.
3608 3616 """
3609 3617 vlog("# Performing temporary installation of HG")
3610 3618 installerrs = os.path.join(self._hgtmp, b"install.err")
3611 3619 compiler = ''
3612 3620 if self.options.compiler:
3613 3621 compiler = '--compiler ' + self.options.compiler
3614 3622 setup_opts = b""
3615 3623 if self.options.pure:
3616 3624 setup_opts = b"--pure"
3617 3625 elif self.options.rust:
3618 3626 setup_opts = b"--rust"
3619 3627 elif self.options.no_rust:
3620 3628 setup_opts = b"--no-rust"
3621 3629
3622 3630 # Run installer in hg root
3623 3631 script = os.path.realpath(sys.argv[0])
3624 3632 exe = sysexecutable
3625 3633 if PYTHON3:
3626 3634 compiler = _sys2bytes(compiler)
3627 3635 script = _sys2bytes(script)
3628 3636 exe = _sys2bytes(exe)
3629 3637 hgroot = os.path.dirname(os.path.dirname(script))
3630 3638 self._hgroot = hgroot
3631 3639 os.chdir(hgroot)
3632 3640 nohome = b'--home=""'
3633 3641 if os.name == 'nt':
3634 3642 # The --home="" trick works only on OS where os.sep == '/'
3635 3643 # because of a distutils convert_path() fast-path. Avoid it at
3636 3644 # least on Windows for now, deal with .pydistutils.cfg bugs
3637 3645 # when they happen.
3638 3646 nohome = b''
3639 3647 cmd = (
3640 3648 b'"%(exe)s" setup.py %(setup_opts)s clean --all'
3641 3649 b' build %(compiler)s --build-base="%(base)s"'
3642 3650 b' install --force --prefix="%(prefix)s"'
3643 3651 b' --install-lib="%(libdir)s"'
3644 3652 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3645 3653 % {
3646 3654 b'exe': exe,
3647 3655 b'setup_opts': setup_opts,
3648 3656 b'compiler': compiler,
3649 3657 b'base': os.path.join(self._hgtmp, b"build"),
3650 3658 b'prefix': self._installdir,
3651 3659 b'libdir': self._pythondir,
3652 3660 b'bindir': self._bindir,
3653 3661 b'nohome': nohome,
3654 3662 b'logfile': installerrs,
3655 3663 }
3656 3664 )
3657 3665
3658 3666 # setuptools requires install directories to exist.
3659 3667 def makedirs(p):
3660 3668 try:
3661 3669 os.makedirs(p)
3662 3670 except OSError as e:
3663 3671 if e.errno != errno.EEXIST:
3664 3672 raise
3665 3673
3666 3674 makedirs(self._pythondir)
3667 3675 makedirs(self._bindir)
3668 3676
3669 3677 vlog("# Running", cmd.decode("utf-8"))
3670 3678 if subprocess.call(_bytes2sys(cmd), shell=True) == 0:
3671 3679 if not self.options.verbose:
3672 3680 try:
3673 3681 os.remove(installerrs)
3674 3682 except OSError as e:
3675 3683 if e.errno != errno.ENOENT:
3676 3684 raise
3677 3685 else:
3678 3686 with open(installerrs, 'rb') as f:
3679 3687 for line in f:
3680 3688 if PYTHON3:
3681 3689 sys.stdout.buffer.write(line)
3682 3690 else:
3683 3691 sys.stdout.write(line)
3684 3692 sys.exit(1)
3685 3693 os.chdir(self._testdir)
3686 3694
3687 3695 self._usecorrectpython()
3688 3696
3689 3697 hgbat = os.path.join(self._bindir, b'hg.bat')
3690 3698 if os.path.isfile(hgbat):
3691 3699 # hg.bat expects to be put in bin/scripts while run-tests.py
3692 3700 # installation layout put it in bin/ directly. Fix it
3693 3701 with open(hgbat, 'rb') as f:
3694 3702 data = f.read()
3695 3703 if br'"%~dp0..\python" "%~dp0hg" %*' in data:
3696 3704 data = data.replace(
3697 3705 br'"%~dp0..\python" "%~dp0hg" %*',
3698 3706 b'"%~dp0python" "%~dp0hg" %*',
3699 3707 )
3700 3708 with open(hgbat, 'wb') as f:
3701 3709 f.write(data)
3702 3710 else:
3703 3711 print('WARNING: cannot fix hg.bat reference to python.exe')
3704 3712
3705 3713 if self.options.anycoverage:
3706 3714 custom = os.path.join(
3707 3715 osenvironb[b'RUNTESTDIR'], b'sitecustomize.py'
3708 3716 )
3709 3717 target = os.path.join(self._pythondir, b'sitecustomize.py')
3710 3718 vlog('# Installing coverage trigger to %s' % target)
3711 3719 shutil.copyfile(custom, target)
3712 3720 rc = os.path.join(self._testdir, b'.coveragerc')
3713 3721 vlog('# Installing coverage rc to %s' % rc)
3714 3722 osenvironb[b'COVERAGE_PROCESS_START'] = rc
3715 3723 covdir = os.path.join(self._installdir, b'..', b'coverage')
3716 3724 try:
3717 3725 os.mkdir(covdir)
3718 3726 except OSError as e:
3719 3727 if e.errno != errno.EEXIST:
3720 3728 raise
3721 3729
3722 3730 osenvironb[b'COVERAGE_DIR'] = covdir
3723 3731
3724 3732 def _checkhglib(self, verb):
3725 3733 """Ensure that the 'mercurial' package imported by python is
3726 3734 the one we expect it to be. If not, print a warning to stderr."""
3727 3735 if (self._bindir == self._pythondir) and (
3728 3736 self._bindir != self._tmpbindir
3729 3737 ):
3730 3738 # The pythondir has been inferred from --with-hg flag.
3731 3739 # We cannot expect anything sensible here.
3732 3740 return
3733 3741 expecthg = os.path.join(self._pythondir, b'mercurial')
3734 3742 actualhg = self._gethgpath()
3735 3743 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3736 3744 sys.stderr.write(
3737 3745 'warning: %s with unexpected mercurial lib: %s\n'
3738 3746 ' (expected %s)\n' % (verb, actualhg, expecthg)
3739 3747 )
3740 3748
3741 3749 def _gethgpath(self):
3742 3750 """Return the path to the mercurial package that is actually found by
3743 3751 the current Python interpreter."""
3744 3752 if self._hgpath is not None:
3745 3753 return self._hgpath
3746 3754
3747 3755 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3748 3756 cmd = cmd % PYTHON
3749 3757 if PYTHON3:
3750 3758 cmd = _bytes2sys(cmd)
3751 3759
3752 3760 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3753 3761 out, err = p.communicate()
3754 3762
3755 3763 self._hgpath = out.strip()
3756 3764
3757 3765 return self._hgpath
3758 3766
3759 3767 def _installchg(self):
3760 3768 """Install chg into the test environment"""
3761 3769 vlog('# Performing temporary installation of CHG')
3762 3770 assert os.path.dirname(self._bindir) == self._installdir
3763 3771 assert self._hgroot, 'must be called after _installhg()'
3764 3772 cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % {
3765 3773 b'make': b'make', # TODO: switch by option or environment?
3766 3774 b'prefix': self._installdir,
3767 3775 }
3768 3776 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3769 3777 vlog("# Running", cmd)
3770 3778 proc = subprocess.Popen(
3771 3779 cmd,
3772 3780 shell=True,
3773 3781 cwd=cwd,
3774 3782 stdin=subprocess.PIPE,
3775 3783 stdout=subprocess.PIPE,
3776 3784 stderr=subprocess.STDOUT,
3777 3785 )
3778 3786 out, _err = proc.communicate()
3779 3787 if proc.returncode != 0:
3780 3788 if PYTHON3:
3781 3789 sys.stdout.buffer.write(out)
3782 3790 else:
3783 3791 sys.stdout.write(out)
3784 3792 sys.exit(1)
3785 3793
3786 3794 def _installrhg(self):
3787 3795 """Install rhg into the test environment"""
3788 3796 vlog('# Performing temporary installation of rhg')
3789 3797 assert os.path.dirname(self._bindir) == self._installdir
3790 3798 assert self._hgroot, 'must be called after _installhg()'
3791 3799 cmd = b'"%(make)s" install-rhg PREFIX="%(prefix)s"' % {
3792 3800 b'make': b'make', # TODO: switch by option or environment?
3793 3801 b'prefix': self._installdir,
3794 3802 }
3795 3803 cwd = self._hgroot
3796 3804 vlog("# Running", cmd)
3797 3805 proc = subprocess.Popen(
3798 3806 cmd,
3799 3807 shell=True,
3800 3808 cwd=cwd,
3801 3809 stdin=subprocess.PIPE,
3802 3810 stdout=subprocess.PIPE,
3803 3811 stderr=subprocess.STDOUT,
3804 3812 )
3805 3813 out, _err = proc.communicate()
3806 3814 if proc.returncode != 0:
3807 3815 if PYTHON3:
3808 3816 sys.stdout.buffer.write(out)
3809 3817 else:
3810 3818 sys.stdout.write(out)
3811 3819 sys.exit(1)
3812 3820
3813 3821 def _outputcoverage(self):
3814 3822 """Produce code coverage output."""
3815 3823 import coverage
3816 3824
3817 3825 coverage = coverage.coverage
3818 3826
3819 3827 vlog('# Producing coverage report')
3820 3828 # chdir is the easiest way to get short, relative paths in the
3821 3829 # output.
3822 3830 os.chdir(self._hgroot)
3823 3831 covdir = os.path.join(_bytes2sys(self._installdir), '..', 'coverage')
3824 3832 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3825 3833
3826 3834 # Map install directory paths back to source directory.
3827 3835 cov.config.paths['srcdir'] = ['.', _bytes2sys(self._pythondir)]
3828 3836
3829 3837 cov.combine()
3830 3838
3831 3839 omit = [
3832 3840 _bytes2sys(os.path.join(x, b'*'))
3833 3841 for x in [self._bindir, self._testdir]
3834 3842 ]
3835 3843 cov.report(ignore_errors=True, omit=omit)
3836 3844
3837 3845 if self.options.htmlcov:
3838 3846 htmldir = os.path.join(_bytes2sys(self._outputdir), 'htmlcov')
3839 3847 cov.html_report(directory=htmldir, omit=omit)
3840 3848 if self.options.annotate:
3841 3849 adir = os.path.join(_bytes2sys(self._outputdir), 'annotated')
3842 3850 if not os.path.isdir(adir):
3843 3851 os.mkdir(adir)
3844 3852 cov.annotate(directory=adir, omit=omit)
3845 3853
3846 3854 def _findprogram(self, program):
3847 3855 """Search PATH for a executable program"""
3848 3856 dpb = _sys2bytes(os.defpath)
3849 3857 sepb = _sys2bytes(os.pathsep)
3850 3858 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3851 3859 name = os.path.join(p, program)
3852 3860 if os.name == 'nt' or os.access(name, os.X_OK):
3853 3861 return _bytes2sys(name)
3854 3862 return None
3855 3863
3856 3864 def _checktools(self):
3857 3865 """Ensure tools required to run tests are present."""
3858 3866 for p in self.REQUIREDTOOLS:
3859 3867 if os.name == 'nt' and not p.endswith(b'.exe'):
3860 3868 p += b'.exe'
3861 3869 found = self._findprogram(p)
3862 3870 p = p.decode("utf-8")
3863 3871 if found:
3864 3872 vlog("# Found prerequisite", p, "at", found)
3865 3873 else:
3866 3874 print("WARNING: Did not find prerequisite tool: %s " % p)
3867 3875
3868 3876
3869 3877 def aggregateexceptions(path):
3870 3878 exceptioncounts = collections.Counter()
3871 3879 testsbyfailure = collections.defaultdict(set)
3872 3880 failuresbytest = collections.defaultdict(set)
3873 3881
3874 3882 for f in os.listdir(path):
3875 3883 with open(os.path.join(path, f), 'rb') as fh:
3876 3884 data = fh.read().split(b'\0')
3877 3885 if len(data) != 5:
3878 3886 continue
3879 3887
3880 3888 exc, mainframe, hgframe, hgline, testname = data
3881 3889 exc = exc.decode('utf-8')
3882 3890 mainframe = mainframe.decode('utf-8')
3883 3891 hgframe = hgframe.decode('utf-8')
3884 3892 hgline = hgline.decode('utf-8')
3885 3893 testname = testname.decode('utf-8')
3886 3894
3887 3895 key = (hgframe, hgline, exc)
3888 3896 exceptioncounts[key] += 1
3889 3897 testsbyfailure[key].add(testname)
3890 3898 failuresbytest[testname].add(key)
3891 3899
3892 3900 # Find test having fewest failures for each failure.
3893 3901 leastfailing = {}
3894 3902 for key, tests in testsbyfailure.items():
3895 3903 fewesttest = None
3896 3904 fewestcount = 99999999
3897 3905 for test in sorted(tests):
3898 3906 if len(failuresbytest[test]) < fewestcount:
3899 3907 fewesttest = test
3900 3908 fewestcount = len(failuresbytest[test])
3901 3909
3902 3910 leastfailing[key] = (fewestcount, fewesttest)
3903 3911
3904 3912 # Create a combined counter so we can sort by total occurrences and
3905 3913 # impacted tests.
3906 3914 combined = {}
3907 3915 for key in exceptioncounts:
3908 3916 combined[key] = (
3909 3917 exceptioncounts[key],
3910 3918 len(testsbyfailure[key]),
3911 3919 leastfailing[key][0],
3912 3920 leastfailing[key][1],
3913 3921 )
3914 3922
3915 3923 return {
3916 3924 'exceptioncounts': exceptioncounts,
3917 3925 'total': sum(exceptioncounts.values()),
3918 3926 'combined': combined,
3919 3927 'leastfailing': leastfailing,
3920 3928 'byfailure': testsbyfailure,
3921 3929 'bytest': failuresbytest,
3922 3930 }
3923 3931
3924 3932
3925 3933 if __name__ == '__main__':
3926 3934 runner = TestRunner()
3927 3935
3928 3936 try:
3929 3937 import msvcrt
3930 3938
3931 3939 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3932 3940 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3933 3941 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3934 3942 except ImportError:
3935 3943 pass
3936 3944
3937 3945 sys.exit(runner.run(sys.argv[1:]))
@@ -1,65 +1,65 b''
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2 2 """
3 3 Tests the behavior of filelog w.r.t. data starting with '\1\n'
4 4 """
5 5 from __future__ import absolute_import, print_function
6 6
7 7 from mercurial.node import hex
8 8 from mercurial import (
9 9 hg,
10 10 ui as uimod,
11 11 )
12 12
13 13 myui = uimod.ui.load()
14 14 repo = hg.repository(myui, path=b'.', create=True)
15 15
16 16 fl = repo.file(b'foobar')
17 17
18 18
19 19 def addrev(text, renamed=False):
20 20 if renamed:
21 21 # data doesn't matter. Just make sure filelog.renamed() returns True
22 22 meta = {b'copyrev': hex(repo.nullid), b'copy': b'bar'}
23 23 else:
24 24 meta = {}
25 25
26 26 lock = t = None
27 27 try:
28 28 lock = repo.lock()
29 29 t = repo.transaction(b'commit')
30 30 node = fl.add(text, meta, t, 0, repo.nullid, repo.nullid)
31 31 return node
32 32 finally:
33 33 if t:
34 34 t.close()
35 35 if lock:
36 36 lock.release()
37 37
38 38
39 39 def error(text):
40 40 print('ERROR: ' + text)
41 41
42 42
43 43 textwith = b'\1\nfoo'
44 44 without = b'foo'
45 45
46 46 node = addrev(textwith)
47 47 if not textwith == fl.read(node):
48 48 error('filelog.read for data starting with \\1\\n')
49 49 if fl.cmp(node, textwith) or not fl.cmp(node, without):
50 50 error('filelog.cmp for data starting with \\1\\n')
51 51 if fl.size(0) != len(textwith):
52 52 error(
53 53 'FIXME: This is a known failure of filelog.size for data starting '
54 54 'with \\1\\n'
55 55 )
56 56
57 57 node = addrev(textwith, renamed=True)
58 58 if not textwith == fl.read(node):
59 59 error('filelog.read for a renaming + data starting with \\1\\n')
60 60 if fl.cmp(node, textwith) or not fl.cmp(node, without):
61 61 error('filelog.cmp for a renaming + data starting with \\1\\n')
62 62 if fl.size(1) != len(textwith):
63 63 error('filelog.size for a renaming + data starting with \\1\\n')
64 64
65 65 print('OK.')
@@ -1,416 +1,416 b''
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2 2 from __future__ import absolute_import, print_function
3 3
4 4 import hashlib
5 5 import os
6 6 import random
7 7 import shutil
8 8 import stat
9 9 import struct
10 10 import sys
11 11 import tempfile
12 12 import time
13 13 import unittest
14 14
15 15 import silenttestrunner
16 16
17 17 # Load the local remotefilelog, not the system one
18 18 sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
19 19 from mercurial.node import sha1nodeconstants
20 20 from mercurial import policy
21 21
22 22 if not policy._packageprefs.get(policy.policy, (False, False))[1]:
23 23 if __name__ == '__main__':
24 24 msg = "skipped: pure module not available with module policy:"
25 25 print(msg, policy.policy, file=sys.stderr)
26 26 sys.exit(80)
27 27
28 28 from mercurial import (
29 29 pycompat,
30 30 ui as uimod,
31 31 )
32 32 from hgext.remotefilelog import (
33 33 basepack,
34 34 constants,
35 35 datapack,
36 36 )
37 37
38 38
39 39 class datapacktestsbase(object):
40 40 def __init__(self, datapackreader, paramsavailable):
41 41 self.datapackreader = datapackreader
42 42 self.paramsavailable = paramsavailable
43 43
44 44 def setUp(self):
45 45 self.tempdirs = []
46 46
47 47 def tearDown(self):
48 48 for d in self.tempdirs:
49 49 shutil.rmtree(d)
50 50
51 51 def makeTempDir(self):
52 52 tempdir = pycompat.bytestr(tempfile.mkdtemp())
53 53 self.tempdirs.append(tempdir)
54 54 return tempdir
55 55
56 56 def getHash(self, content):
57 57 return hashlib.sha1(content).digest()
58 58
59 59 def getFakeHash(self):
60 60 return b''.join(
61 61 pycompat.bytechr(random.randint(0, 255)) for _ in range(20)
62 62 )
63 63
64 64 def createPack(self, revisions=None, packdir=None):
65 65 if revisions is None:
66 66 revisions = [
67 67 (
68 68 b"filename",
69 69 self.getFakeHash(),
70 70 sha1nodeconstants.nullid,
71 71 b"content",
72 72 )
73 73 ]
74 74
75 75 if packdir is None:
76 76 packdir = self.makeTempDir()
77 77
78 78 packer = datapack.mutabledatapack(uimod.ui(), packdir, version=2)
79 79
80 80 for args in revisions:
81 81 filename, node, base, content = args[0:4]
82 82 # meta is optional
83 83 meta = None
84 84 if len(args) > 4:
85 85 meta = args[4]
86 86 packer.add(filename, node, base, content, metadata=meta)
87 87
88 88 path = packer.close()
89 89 return self.datapackreader(path)
90 90
91 91 def _testAddSingle(self, content):
92 92 """Test putting a simple blob into a pack and reading it out."""
93 93 filename = b"foo"
94 94 node = self.getHash(content)
95 95
96 96 revisions = [(filename, node, sha1nodeconstants.nullid, content)]
97 97 pack = self.createPack(revisions)
98 98 if self.paramsavailable:
99 99 self.assertEqual(
100 100 pack.params.fanoutprefix, basepack.SMALLFANOUTPREFIX
101 101 )
102 102
103 103 chain = pack.getdeltachain(filename, node)
104 104 self.assertEqual(content, chain[0][4])
105 105
106 106 def testAddSingle(self):
107 107 self._testAddSingle(b'')
108 108
109 109 def testAddSingleEmpty(self):
110 110 self._testAddSingle(b'abcdef')
111 111
112 112 def testAddMultiple(self):
113 113 """Test putting multiple unrelated blobs into a pack and reading them
114 114 out.
115 115 """
116 116 revisions = []
117 117 for i in range(10):
118 118 filename = b"foo%d" % i
119 119 content = b"abcdef%d" % i
120 120 node = self.getHash(content)
121 121 revisions.append((filename, node, self.getFakeHash(), content))
122 122
123 123 pack = self.createPack(revisions)
124 124
125 125 for filename, node, base, content in revisions:
126 126 entry = pack.getdelta(filename, node)
127 127 self.assertEqual((content, filename, base, {}), entry)
128 128
129 129 chain = pack.getdeltachain(filename, node)
130 130 self.assertEqual(content, chain[0][4])
131 131
132 132 def testAddDeltas(self):
133 133 """Test putting multiple delta blobs into a pack and read the chain."""
134 134 revisions = []
135 135 filename = b"foo"
136 136 lastnode = sha1nodeconstants.nullid
137 137 for i in range(10):
138 138 content = b"abcdef%d" % i
139 139 node = self.getHash(content)
140 140 revisions.append((filename, node, lastnode, content))
141 141 lastnode = node
142 142
143 143 pack = self.createPack(revisions)
144 144
145 145 entry = pack.getdelta(filename, revisions[0][1])
146 146 realvalue = (revisions[0][3], filename, revisions[0][2], {})
147 147 self.assertEqual(entry, realvalue)
148 148
149 149 # Test that the chain for the final entry has all the others
150 150 chain = pack.getdeltachain(filename, node)
151 151 for i in range(10):
152 152 content = b"abcdef%d" % i
153 153 self.assertEqual(content, chain[-i - 1][4])
154 154
155 155 def testPackMany(self):
156 156 """Pack many related and unrelated objects."""
157 157 # Build a random pack file
158 158 revisions = []
159 159 blobs = {}
160 160 random.seed(0)
161 161 for i in range(100):
162 162 filename = b"filename-%d" % i
163 163 filerevs = []
164 164 for j in range(random.randint(1, 100)):
165 165 content = b"content-%d" % j
166 166 node = self.getHash(content)
167 167 lastnode = sha1nodeconstants.nullid
168 168 if len(filerevs) > 0:
169 169 lastnode = filerevs[random.randint(0, len(filerevs) - 1)]
170 170 filerevs.append(node)
171 171 blobs[(filename, node, lastnode)] = content
172 172 revisions.append((filename, node, lastnode, content))
173 173
174 174 pack = self.createPack(revisions)
175 175
176 176 # Verify the pack contents
177 177 for (filename, node, lastnode), content in sorted(blobs.items()):
178 178 chain = pack.getdeltachain(filename, node)
179 179 for entry in chain:
180 180 expectedcontent = blobs[(entry[0], entry[1], entry[3])]
181 181 self.assertEqual(entry[4], expectedcontent)
182 182
183 183 def testPackMetadata(self):
184 184 revisions = []
185 185 for i in range(100):
186 186 filename = b'%d.txt' % i
187 187 content = b'put-something-here \n' * i
188 188 node = self.getHash(content)
189 189 meta = {
190 190 constants.METAKEYFLAG: i ** 4,
191 191 constants.METAKEYSIZE: len(content),
192 192 b'Z': b'random_string',
193 193 b'_': b'\0' * i,
194 194 }
195 195 revisions.append(
196 196 (filename, node, sha1nodeconstants.nullid, content, meta)
197 197 )
198 198 pack = self.createPack(revisions)
199 199 for name, node, x, content, origmeta in revisions:
200 200 parsedmeta = pack.getmeta(name, node)
201 201 # flag == 0 should be optimized out
202 202 if origmeta[constants.METAKEYFLAG] == 0:
203 203 del origmeta[constants.METAKEYFLAG]
204 204 self.assertEqual(parsedmeta, origmeta)
205 205
206 206 def testGetMissing(self):
207 207 """Test the getmissing() api."""
208 208 revisions = []
209 209 filename = b"foo"
210 210 lastnode = sha1nodeconstants.nullid
211 211 for i in range(10):
212 212 content = b"abcdef%d" % i
213 213 node = self.getHash(content)
214 214 revisions.append((filename, node, lastnode, content))
215 215 lastnode = node
216 216
217 217 pack = self.createPack(revisions)
218 218
219 219 missing = pack.getmissing([(b"foo", revisions[0][1])])
220 220 self.assertFalse(missing)
221 221
222 222 missing = pack.getmissing(
223 223 [(b"foo", revisions[0][1]), (b"foo", revisions[1][1])]
224 224 )
225 225 self.assertFalse(missing)
226 226
227 227 fakenode = self.getFakeHash()
228 228 missing = pack.getmissing(
229 229 [(b"foo", revisions[0][1]), (b"foo", fakenode)]
230 230 )
231 231 self.assertEqual(missing, [(b"foo", fakenode)])
232 232
233 233 def testAddThrows(self):
234 234 pack = self.createPack()
235 235
236 236 try:
237 237 pack.add(b'filename', sha1nodeconstants.nullid, b'contents')
238 238 self.assertTrue(False, "datapack.add should throw")
239 239 except RuntimeError:
240 240 pass
241 241
242 242 def testBadVersionThrows(self):
243 243 pack = self.createPack()
244 244 path = pack.path + b'.datapack'
245 245 with open(path, 'rb') as f:
246 246 raw = f.read()
247 247 raw = struct.pack('!B', 255) + raw[1:]
248 248 os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
249 249 with open(path, 'wb+') as f:
250 250 f.write(raw)
251 251
252 252 try:
253 253 self.datapackreader(pack.path)
254 254 self.assertTrue(False, "bad version number should have thrown")
255 255 except RuntimeError:
256 256 pass
257 257
258 258 def testMissingDeltabase(self):
259 259 fakenode = self.getFakeHash()
260 260 revisions = [(b"filename", fakenode, self.getFakeHash(), b"content")]
261 261 pack = self.createPack(revisions)
262 262 chain = pack.getdeltachain(b"filename", fakenode)
263 263 self.assertEqual(len(chain), 1)
264 264
265 265 def testLargePack(self):
266 266 """Test creating and reading from a large pack with over X entries.
267 267 This causes it to use a 2^16 fanout table instead."""
268 268 revisions = []
269 269 blobs = {}
270 270 total = basepack.SMALLFANOUTCUTOFF + 1
271 271 for i in pycompat.xrange(total):
272 272 filename = b"filename-%d" % i
273 273 content = filename
274 274 node = self.getHash(content)
275 275 blobs[(filename, node)] = content
276 276 revisions.append(
277 277 (filename, node, sha1nodeconstants.nullid, content)
278 278 )
279 279
280 280 pack = self.createPack(revisions)
281 281 if self.paramsavailable:
282 282 self.assertEqual(
283 283 pack.params.fanoutprefix, basepack.LARGEFANOUTPREFIX
284 284 )
285 285
286 286 for (filename, node), content in blobs.items():
287 287 actualcontent = pack.getdeltachain(filename, node)[0][4]
288 288 self.assertEqual(actualcontent, content)
289 289
290 290 def testPacksCache(self):
291 291 """Test that we remember the most recent packs while fetching the delta
292 292 chain."""
293 293
294 294 packdir = self.makeTempDir()
295 295 deltachains = []
296 296
297 297 numpacks = 10
298 298 revisionsperpack = 100
299 299
300 300 for i in range(numpacks):
301 301 chain = []
302 302 revision = (
303 303 b'%d' % i,
304 304 self.getFakeHash(),
305 305 sha1nodeconstants.nullid,
306 306 b"content",
307 307 )
308 308
309 309 for _ in range(revisionsperpack):
310 310 chain.append(revision)
311 311 revision = (
312 312 b'%d' % i,
313 313 self.getFakeHash(),
314 314 revision[1],
315 315 self.getFakeHash(),
316 316 )
317 317
318 318 self.createPack(chain, packdir)
319 319 deltachains.append(chain)
320 320
321 321 class testdatapackstore(datapack.datapackstore):
322 322 # Ensures that we are not keeping everything in the cache.
323 323 DEFAULTCACHESIZE = numpacks // 2
324 324
325 325 store = testdatapackstore(uimod.ui(), packdir)
326 326
327 327 random.shuffle(deltachains)
328 328 for randomchain in deltachains:
329 329 revision = random.choice(randomchain)
330 330 chain = store.getdeltachain(revision[0], revision[1])
331 331
332 332 mostrecentpack = next(iter(store.packs), None)
333 333 self.assertEqual(
334 334 mostrecentpack.getdeltachain(revision[0], revision[1]), chain
335 335 )
336 336
337 337 self.assertEqual(randomchain.index(revision) + 1, len(chain))
338 338
339 339 # perf test off by default since it's slow
340 340 def _testIndexPerf(self):
341 341 random.seed(0)
342 342 print("Multi-get perf test")
343 343 packsizes = [
344 344 100,
345 345 10000,
346 346 100000,
347 347 500000,
348 348 1000000,
349 349 3000000,
350 350 ]
351 351 lookupsizes = [
352 352 10,
353 353 100,
354 354 1000,
355 355 10000,
356 356 100000,
357 357 1000000,
358 358 ]
359 359 for packsize in packsizes:
360 360 revisions = []
361 361 for i in pycompat.xrange(packsize):
362 362 filename = b"filename-%d" % i
363 363 content = b"content-%d" % i
364 364 node = self.getHash(content)
365 365 revisions.append(
366 366 (filename, node, sha1nodeconstants.nullid, content)
367 367 )
368 368
369 369 path = self.createPack(revisions).path
370 370
371 371 # Perf of large multi-get
372 372 import gc
373 373
374 374 gc.disable()
375 375 pack = self.datapackreader(path)
376 376 for lookupsize in lookupsizes:
377 377 if lookupsize > packsize:
378 378 continue
379 379 random.shuffle(revisions)
380 380 findnodes = [(rev[0], rev[1]) for rev in revisions]
381 381
382 382 start = time.time()
383 383 pack.getmissing(findnodes[:lookupsize])
384 384 elapsed = time.time() - start
385 385 print(
386 386 "%s pack %d lookups = %0.04f"
387 387 % (
388 388 ('%d' % packsize).rjust(7),
389 389 ('%d' % lookupsize).rjust(7),
390 390 elapsed,
391 391 )
392 392 )
393 393
394 394 print("")
395 395 gc.enable()
396 396
397 397 # The perf test is meant to produce output, so we always fail the test
398 398 # so the user sees the output.
399 399 raise RuntimeError("perf test always fails")
400 400
401 401
402 402 class datapacktests(datapacktestsbase, unittest.TestCase):
403 403 def __init__(self, *args, **kwargs):
404 404 datapacktestsbase.__init__(self, datapack.datapack, True)
405 405 unittest.TestCase.__init__(self, *args, **kwargs)
406 406
407 407
408 408 # TODO:
409 409 # datapack store:
410 410 # - getmissing
411 411 # - GC two packs into one
412 412
413 413 if __name__ == '__main__':
414 414 if pycompat.iswindows:
415 415 sys.exit(80) # Skip on Windows
416 416 silenttestrunner.main(__name__)
@@ -1,313 +1,313 b''
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2 2 from __future__ import absolute_import
3 3
4 4 import hashlib
5 5 import os
6 6 import random
7 7 import shutil
8 8 import stat
9 9 import struct
10 10 import sys
11 11 import tempfile
12 12 import unittest
13 13
14 14 import silenttestrunner
15 15
16 16 from mercurial.node import sha1nodeconstants
17 17 from mercurial import (
18 18 pycompat,
19 19 ui as uimod,
20 20 )
21 21
22 22 # Load the local remotefilelog, not the system one
23 23 sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
24 24 from hgext.remotefilelog import (
25 25 basepack,
26 26 historypack,
27 27 )
28 28
29 29
30 30 class histpacktests(unittest.TestCase):
31 31 def setUp(self):
32 32 self.tempdirs = []
33 33
34 34 def tearDown(self):
35 35 for d in self.tempdirs:
36 36 shutil.rmtree(d)
37 37
38 38 def makeTempDir(self):
39 39 tempdir = tempfile.mkdtemp()
40 40 self.tempdirs.append(tempdir)
41 41 return pycompat.fsencode(tempdir)
42 42
43 43 def getHash(self, content):
44 44 return hashlib.sha1(content).digest()
45 45
46 46 def getFakeHash(self):
47 47 return b''.join(
48 48 pycompat.bytechr(random.randint(0, 255)) for _ in range(20)
49 49 )
50 50
51 51 def createPack(self, revisions=None):
52 52 """Creates and returns a historypack containing the specified revisions.
53 53
54 54 `revisions` is a list of tuples, where each tuple contains a filanem,
55 55 node, p1node, p2node, and linknode.
56 56 """
57 57 if revisions is None:
58 58 revisions = [
59 59 (
60 60 b"filename",
61 61 self.getFakeHash(),
62 62 sha1nodeconstants.nullid,
63 63 sha1nodeconstants.nullid,
64 64 self.getFakeHash(),
65 65 None,
66 66 )
67 67 ]
68 68
69 69 packdir = pycompat.fsencode(self.makeTempDir())
70 70 packer = historypack.mutablehistorypack(uimod.ui(), packdir, version=2)
71 71
72 72 for filename, node, p1, p2, linknode, copyfrom in revisions:
73 73 packer.add(filename, node, p1, p2, linknode, copyfrom)
74 74
75 75 path = packer.close()
76 76 return historypack.historypack(path)
77 77
78 78 def testAddSingle(self):
79 79 """Test putting a single entry into a pack and reading it out."""
80 80 filename = b"foo"
81 81 node = self.getFakeHash()
82 82 p1 = self.getFakeHash()
83 83 p2 = self.getFakeHash()
84 84 linknode = self.getFakeHash()
85 85
86 86 revisions = [(filename, node, p1, p2, linknode, None)]
87 87 pack = self.createPack(revisions)
88 88
89 89 actual = pack.getancestors(filename, node)[node]
90 90 self.assertEqual(p1, actual[0])
91 91 self.assertEqual(p2, actual[1])
92 92 self.assertEqual(linknode, actual[2])
93 93
94 94 def testAddMultiple(self):
95 95 """Test putting multiple unrelated revisions into a pack and reading
96 96 them out.
97 97 """
98 98 revisions = []
99 99 for i in range(10):
100 100 filename = b"foo-%d" % i
101 101 node = self.getFakeHash()
102 102 p1 = self.getFakeHash()
103 103 p2 = self.getFakeHash()
104 104 linknode = self.getFakeHash()
105 105 revisions.append((filename, node, p1, p2, linknode, None))
106 106
107 107 pack = self.createPack(revisions)
108 108
109 109 for filename, node, p1, p2, linknode, copyfrom in revisions:
110 110 actual = pack.getancestors(filename, node)[node]
111 111 self.assertEqual(p1, actual[0])
112 112 self.assertEqual(p2, actual[1])
113 113 self.assertEqual(linknode, actual[2])
114 114 self.assertEqual(copyfrom, actual[3])
115 115
116 116 def testAddAncestorChain(self):
117 117 """Test putting multiple revisions in into a pack and read the ancestor
118 118 chain.
119 119 """
120 120 revisions = []
121 121 filename = b"foo"
122 122 lastnode = sha1nodeconstants.nullid
123 123 for i in range(10):
124 124 node = self.getFakeHash()
125 125 revisions.append(
126 126 (
127 127 filename,
128 128 node,
129 129 lastnode,
130 130 sha1nodeconstants.nullid,
131 131 sha1nodeconstants.nullid,
132 132 None,
133 133 )
134 134 )
135 135 lastnode = node
136 136
137 137 # revisions must be added in topological order, newest first
138 138 revisions = list(reversed(revisions))
139 139 pack = self.createPack(revisions)
140 140
141 141 # Test that the chain has all the entries
142 142 ancestors = pack.getancestors(revisions[0][0], revisions[0][1])
143 143 for filename, node, p1, p2, linknode, copyfrom in revisions:
144 144 ap1, ap2, alinknode, acopyfrom = ancestors[node]
145 145 self.assertEqual(ap1, p1)
146 146 self.assertEqual(ap2, p2)
147 147 self.assertEqual(alinknode, linknode)
148 148 self.assertEqual(acopyfrom, copyfrom)
149 149
150 150 def testPackMany(self):
151 151 """Pack many related and unrelated ancestors."""
152 152 # Build a random pack file
153 153 allentries = {}
154 154 ancestorcounts = {}
155 155 revisions = []
156 156 random.seed(0)
157 157 for i in range(100):
158 158 filename = b"filename-%d" % i
159 159 entries = []
160 160 p2 = sha1nodeconstants.nullid
161 161 linknode = sha1nodeconstants.nullid
162 162 for j in range(random.randint(1, 100)):
163 163 node = self.getFakeHash()
164 164 p1 = sha1nodeconstants.nullid
165 165 if len(entries) > 0:
166 166 p1 = entries[random.randint(0, len(entries) - 1)]
167 167 entries.append(node)
168 168 revisions.append((filename, node, p1, p2, linknode, None))
169 169 allentries[(filename, node)] = (p1, p2, linknode)
170 170 if p1 == sha1nodeconstants.nullid:
171 171 ancestorcounts[(filename, node)] = 1
172 172 else:
173 173 newcount = ancestorcounts[(filename, p1)] + 1
174 174 ancestorcounts[(filename, node)] = newcount
175 175
176 176 # Must add file entries in reverse topological order
177 177 revisions = list(reversed(revisions))
178 178 pack = self.createPack(revisions)
179 179
180 180 # Verify the pack contents
181 181 for (filename, node) in allentries:
182 182 ancestors = pack.getancestors(filename, node)
183 183 self.assertEqual(ancestorcounts[(filename, node)], len(ancestors))
184 184 for anode, (ap1, ap2, alinknode, copyfrom) in ancestors.items():
185 185 ep1, ep2, elinknode = allentries[(filename, anode)]
186 186 self.assertEqual(ap1, ep1)
187 187 self.assertEqual(ap2, ep2)
188 188 self.assertEqual(alinknode, elinknode)
189 189 self.assertEqual(copyfrom, None)
190 190
191 191 def testGetNodeInfo(self):
192 192 revisions = []
193 193 filename = b"foo"
194 194 lastnode = sha1nodeconstants.nullid
195 195 for i in range(10):
196 196 node = self.getFakeHash()
197 197 revisions.append(
198 198 (
199 199 filename,
200 200 node,
201 201 lastnode,
202 202 sha1nodeconstants.nullid,
203 203 sha1nodeconstants.nullid,
204 204 None,
205 205 )
206 206 )
207 207 lastnode = node
208 208
209 209 pack = self.createPack(revisions)
210 210
211 211 # Test that getnodeinfo returns the expected results
212 212 for filename, node, p1, p2, linknode, copyfrom in revisions:
213 213 ap1, ap2, alinknode, acopyfrom = pack.getnodeinfo(filename, node)
214 214 self.assertEqual(ap1, p1)
215 215 self.assertEqual(ap2, p2)
216 216 self.assertEqual(alinknode, linknode)
217 217 self.assertEqual(acopyfrom, copyfrom)
218 218
219 219 def testGetMissing(self):
220 220 """Test the getmissing() api."""
221 221 revisions = []
222 222 filename = b"foo"
223 223 for i in range(10):
224 224 node = self.getFakeHash()
225 225 p1 = self.getFakeHash()
226 226 p2 = self.getFakeHash()
227 227 linknode = self.getFakeHash()
228 228 revisions.append((filename, node, p1, p2, linknode, None))
229 229
230 230 pack = self.createPack(revisions)
231 231
232 232 missing = pack.getmissing([(filename, revisions[0][1])])
233 233 self.assertFalse(missing)
234 234
235 235 missing = pack.getmissing(
236 236 [(filename, revisions[0][1]), (filename, revisions[1][1])]
237 237 )
238 238 self.assertFalse(missing)
239 239
240 240 fakenode = self.getFakeHash()
241 241 missing = pack.getmissing(
242 242 [(filename, revisions[0][1]), (filename, fakenode)]
243 243 )
244 244 self.assertEqual(missing, [(filename, fakenode)])
245 245
246 246 # Test getmissing on a non-existant filename
247 247 missing = pack.getmissing([(b"bar", fakenode)])
248 248 self.assertEqual(missing, [(b"bar", fakenode)])
249 249
250 250 def testAddThrows(self):
251 251 pack = self.createPack()
252 252
253 253 try:
254 254 pack.add(
255 255 b'filename',
256 256 sha1nodeconstants.nullid,
257 257 sha1nodeconstants.nullid,
258 258 sha1nodeconstants.nullid,
259 259 sha1nodeconstants.nullid,
260 260 None,
261 261 )
262 262 self.assertTrue(False, "historypack.add should throw")
263 263 except RuntimeError:
264 264 pass
265 265
266 266 def testBadVersionThrows(self):
267 267 pack = self.createPack()
268 268 path = pack.path + b'.histpack'
269 269 with open(path, 'rb') as f:
270 270 raw = f.read()
271 271 raw = struct.pack('!B', 255) + raw[1:]
272 272 os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
273 273 with open(path, 'wb+') as f:
274 274 f.write(raw)
275 275
276 276 try:
277 277 historypack.historypack(pack.path)
278 278 self.assertTrue(False, "bad version number should have thrown")
279 279 except RuntimeError:
280 280 pass
281 281
282 282 def testLargePack(self):
283 283 """Test creating and reading from a large pack with over X entries.
284 284 This causes it to use a 2^16 fanout table instead."""
285 285 total = basepack.SMALLFANOUTCUTOFF + 1
286 286 revisions = []
287 287 for i in pycompat.xrange(total):
288 288 filename = b"foo-%d" % i
289 289 node = self.getFakeHash()
290 290 p1 = self.getFakeHash()
291 291 p2 = self.getFakeHash()
292 292 linknode = self.getFakeHash()
293 293 revisions.append((filename, node, p1, p2, linknode, None))
294 294
295 295 pack = self.createPack(revisions)
296 296 self.assertEqual(pack.params.fanoutprefix, basepack.LARGEFANOUTPREFIX)
297 297
298 298 for filename, node, p1, p2, linknode, copyfrom in revisions:
299 299 actual = pack.getancestors(filename, node)[node]
300 300 self.assertEqual(p1, actual[0])
301 301 self.assertEqual(p2, actual[1])
302 302 self.assertEqual(linknode, actual[2])
303 303 self.assertEqual(copyfrom, actual[3])
304 304
305 305
306 306 # TODO:
307 307 # histpack store:
308 308 # - repack two packs into one
309 309
310 310 if __name__ == '__main__':
311 311 if pycompat.iswindows:
312 312 sys.exit(80) # Skip on Windows
313 313 silenttestrunner.main(__name__)
@@ -1,2038 +1,2069 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
10 10 running 0 tests using 0 parallel processes
11 11
12 12 # Ran 0 tests, 0 skipped, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27 running 0 tests using 0 parallel processes
28 28
29 29 # Ran 0 tests, 0 skipped, 0 failed.
30 30 $ rm hg
31 31 #endif
32 32
33 33 #if execbit
34 34 $ touch hg
35 35 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
36 36 usage: run-tests.py [options] [tests]
37 37 run-tests.py: error: --with-hg must specify an executable hg script
38 38 [2]
39 39 $ rm hg
40 40 #endif
41 41
42 42 Features for testing optional lines
43 43 ===================================
44 44
45 45 $ cat > hghaveaddon.py <<EOF
46 46 > import hghave
47 47 > @hghave.check("custom", "custom hghave feature")
48 48 > def has_custom():
49 49 > return True
50 50 > @hghave.check("missing", "missing hghave feature")
51 51 > def has_missing():
52 52 > return False
53 53 > EOF
54 54
55 55 an empty test
56 56 =======================
57 57
58 58 $ touch test-empty.t
59 59 $ rt
60 60 running 1 tests using 1 parallel processes
61 61 .
62 62 # Ran 1 tests, 0 skipped, 0 failed.
63 63 $ rm test-empty.t
64 64
65 65 a succesful test
66 66 =======================
67 67
68 68 $ cat > test-success.t << EOF
69 69 > $ echo babar
70 70 > babar
71 71 > $ echo xyzzy
72 72 > dont_print (?)
73 73 > nothing[42]line (re) (?)
74 74 > never*happens (glob) (?)
75 75 > more_nothing (?)
76 76 > xyzzy
77 77 > nor this (?)
78 78 > $ printf 'abc\ndef\nxyz\n'
79 79 > 123 (?)
80 80 > abc
81 81 > def (?)
82 82 > 456 (?)
83 83 > xyz
84 84 > $ printf 'zyx\nwvu\ntsr\n'
85 85 > abc (?)
86 86 > zyx (custom !)
87 87 > wvu
88 88 > no_print (no-custom !)
89 89 > tsr (no-missing !)
90 90 > missing (missing !)
91 91 > EOF
92 92
93 93 $ rt
94 94 running 1 tests using 1 parallel processes
95 95 .
96 96 # Ran 1 tests, 0 skipped, 0 failed.
97 97
98 98 failing test
99 99 ==================
100 100
101 101 test churn with globs
102 102 $ cat > test-failure.t <<EOF
103 103 > $ echo "bar-baz"; echo "bar-bad"; echo foo
104 104 > bar*bad (glob)
105 105 > bar*baz (glob)
106 106 > | fo (re)
107 107 > EOF
108 108 $ rt test-failure.t
109 109 running 1 tests using 1 parallel processes
110 110
111 111 --- $TESTTMP/test-failure.t
112 112 +++ $TESTTMP/test-failure.t.err
113 113 @@ -1,4 +1,4 @@
114 114 $ echo "bar-baz"; echo "bar-bad"; echo foo
115 115 + bar*baz (glob)
116 116 bar*bad (glob)
117 117 - bar*baz (glob)
118 118 - | fo (re)
119 119 + foo
120 120
121 121 ERROR: test-failure.t output changed
122 122 !
123 123 Failed test-failure.t: output changed
124 124 # Ran 1 tests, 0 skipped, 1 failed.
125 125 python hash seed: * (glob)
126 126 [1]
127 127
128 128 test how multiple globs gets matched with lines in output
129 129 $ cat > test-failure-globs.t <<EOF
130 130 > $ echo "context"; echo "context"; \
131 131 > echo "key: 1"; echo "value: not a"; \
132 132 > echo "key: 2"; echo "value: not b"; \
133 133 > echo "key: 3"; echo "value: c"; \
134 134 > echo "key: 4"; echo "value: d"
135 135 > context
136 136 > context
137 137 > key: 1
138 138 > value: a
139 139 > key: 2
140 140 > value: b
141 141 > key: 3
142 142 > value: * (glob)
143 143 > key: 4
144 144 > value: * (glob)
145 145 > EOF
146 146 $ rt test-failure-globs.t
147 147 running 1 tests using 1 parallel processes
148 148
149 149 --- $TESTTMP/test-failure-globs.t
150 150 +++ $TESTTMP/test-failure-globs.t.err
151 151 @@ -2,9 +2,9 @@
152 152 context
153 153 context
154 154 key: 1
155 155 - value: a
156 156 + value: not a
157 157 key: 2
158 158 - value: b
159 159 + value: not b
160 160 key: 3
161 161 value: * (glob)
162 162 key: 4
163 163
164 164 ERROR: test-failure-globs.t output changed
165 165 !
166 166 Failed test-failure-globs.t: output changed
167 167 # Ran 1 tests, 0 skipped, 1 failed.
168 168 python hash seed: * (glob)
169 169 [1]
170 170 $ rm test-failure-globs.t
171 171
172 172 test diff colorisation
173 173
174 174 #if no-windows pygments
175 175 $ rt test-failure.t --color always
176 176 running 1 tests using 1 parallel processes
177 177
178 178 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
179 179 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
180 180 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
181 181 $ echo "bar-baz"; echo "bar-bad"; echo foo
182 182 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
183 183 bar*bad (glob)
184 184 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
185 185 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
186 186 \x1b[38;5;34m+ foo\x1b[39m (esc)
187 187
188 188 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
189 189 !
190 190 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
191 191 # Ran 1 tests, 0 skipped, 1 failed.
192 192 python hash seed: * (glob)
193 193 [1]
194 194
195 195 $ rt test-failure.t 2> tmp.log
196 196 running 1 tests using 1 parallel processes
197 197 [1]
198 198 $ cat tmp.log
199 199
200 200 --- $TESTTMP/test-failure.t
201 201 +++ $TESTTMP/test-failure.t.err
202 202 @@ -1,4 +1,4 @@
203 203 $ echo "bar-baz"; echo "bar-bad"; echo foo
204 204 + bar*baz (glob)
205 205 bar*bad (glob)
206 206 - bar*baz (glob)
207 207 - | fo (re)
208 208 + foo
209 209
210 210 ERROR: test-failure.t output changed
211 211 !
212 212 Failed test-failure.t: output changed
213 213 # Ran 1 tests, 0 skipped, 1 failed.
214 214 python hash seed: * (glob)
215 215 #endif
216 216
217 217 $ cat > test-failure.t << EOF
218 218 > $ true
219 219 > should go away (true !)
220 220 > $ true
221 221 > should stay (false !)
222 222 >
223 223 > Should remove first line, not second or third
224 224 > $ echo 'testing'
225 225 > baz*foo (glob) (true !)
226 226 > foobar*foo (glob) (false !)
227 227 > te*ting (glob) (true !)
228 228 >
229 229 > Should keep first two lines, remove third and last
230 230 > $ echo 'testing'
231 231 > test.ng (re) (true !)
232 232 > foo.ar (re) (false !)
233 233 > b.r (re) (true !)
234 234 > missing (?)
235 235 > awol (true !)
236 236 >
237 237 > The "missing" line should stay, even though awol is dropped
238 238 > $ echo 'testing'
239 239 > test.ng (re) (true !)
240 240 > foo.ar (?)
241 241 > awol
242 242 > missing (?)
243 243 > EOF
244 244 $ rt test-failure.t
245 245 running 1 tests using 1 parallel processes
246 246
247 247 --- $TESTTMP/test-failure.t
248 248 +++ $TESTTMP/test-failure.t.err
249 249 @@ -1,11 +1,9 @@
250 250 $ true
251 251 - should go away (true !)
252 252 $ true
253 253 should stay (false !)
254 254
255 255 Should remove first line, not second or third
256 256 $ echo 'testing'
257 257 - baz*foo (glob) (true !)
258 258 foobar*foo (glob) (false !)
259 259 te*ting (glob) (true !)
260 260
261 261 foo.ar (re) (false !)
262 262 missing (?)
263 263 @@ -13,13 +11,10 @@
264 264 $ echo 'testing'
265 265 test.ng (re) (true !)
266 266 foo.ar (re) (false !)
267 267 - b.r (re) (true !)
268 268 missing (?)
269 269 - awol (true !)
270 270
271 271 The "missing" line should stay, even though awol is dropped
272 272 $ echo 'testing'
273 273 test.ng (re) (true !)
274 274 foo.ar (?)
275 275 - awol
276 276 missing (?)
277 277
278 278 ERROR: test-failure.t output changed
279 279 !
280 280 Failed test-failure.t: output changed
281 281 # Ran 1 tests, 0 skipped, 1 failed.
282 282 python hash seed: * (glob)
283 283 [1]
284 284
285 285 basic failing test
286 286 $ cat > test-failure.t << EOF
287 287 > $ echo babar
288 288 > rataxes
289 289 > This is a noop statement so that
290 290 > this test is still more bytes than success.
291 291 > pad pad pad pad............................................................
292 292 > pad pad pad pad............................................................
293 293 > pad pad pad pad............................................................
294 294 > pad pad pad pad............................................................
295 295 > pad pad pad pad............................................................
296 296 > pad pad pad pad............................................................
297 297 > EOF
298 298
299 299 >>> fh = open('test-failure-unicode.t', 'wb')
300 300 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
301 301 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
302 302
303 303 $ rt
304 304 running 3 tests using 1 parallel processes
305 305
306 306 --- $TESTTMP/test-failure.t
307 307 +++ $TESTTMP/test-failure.t.err
308 308 @@ -1,5 +1,5 @@
309 309 $ echo babar
310 310 - rataxes
311 311 + babar
312 312 This is a noop statement so that
313 313 this test is still more bytes than success.
314 314 pad pad pad pad............................................................
315 315
316 316 ERROR: test-failure.t output changed
317 317 !.
318 318 --- $TESTTMP/test-failure-unicode.t
319 319 +++ $TESTTMP/test-failure-unicode.t.err
320 320 @@ -1,2 +1,2 @@
321 321 $ echo babar\xce\xb1 (esc)
322 322 - l\xce\xb5\xce\xb5t (esc)
323 323 + babar\xce\xb1 (esc)
324 324
325 325 ERROR: test-failure-unicode.t output changed
326 326 !
327 327 Failed test-failure-unicode.t: output changed
328 328 Failed test-failure.t: output changed
329 329 # Ran 3 tests, 0 skipped, 2 failed.
330 330 python hash seed: * (glob)
331 331 [1]
332 332
333 333 test --outputdir
334 334 $ mkdir output
335 335 $ rt --outputdir output
336 336 running 3 tests using 1 parallel processes
337 337
338 338 --- $TESTTMP/test-failure.t
339 339 +++ $TESTTMP/output/test-failure.t.err
340 340 @@ -1,5 +1,5 @@
341 341 $ echo babar
342 342 - rataxes
343 343 + babar
344 344 This is a noop statement so that
345 345 this test is still more bytes than success.
346 346 pad pad pad pad............................................................
347 347
348 348 ERROR: test-failure.t output changed
349 349 !.
350 350 --- $TESTTMP/test-failure-unicode.t
351 351 +++ $TESTTMP/output/test-failure-unicode.t.err
352 352 @@ -1,2 +1,2 @@
353 353 $ echo babar\xce\xb1 (esc)
354 354 - l\xce\xb5\xce\xb5t (esc)
355 355 + babar\xce\xb1 (esc)
356 356
357 357 ERROR: test-failure-unicode.t output changed
358 358 !
359 359 Failed test-failure-unicode.t: output changed
360 360 Failed test-failure.t: output changed
361 361 # Ran 3 tests, 0 skipped, 2 failed.
362 362 python hash seed: * (glob)
363 363 [1]
364 364 $ ls -a output
365 365 .
366 366 ..
367 367 .testtimes
368 368 test-failure-unicode.t.err
369 369 test-failure.t.err
370 370
371 371 test --xunit support
372 372 $ rt --xunit=xunit.xml
373 373 running 3 tests using 1 parallel processes
374 374
375 375 --- $TESTTMP/test-failure.t
376 376 +++ $TESTTMP/test-failure.t.err
377 377 @@ -1,5 +1,5 @@
378 378 $ echo babar
379 379 - rataxes
380 380 + babar
381 381 This is a noop statement so that
382 382 this test is still more bytes than success.
383 383 pad pad pad pad............................................................
384 384
385 385 ERROR: test-failure.t output changed
386 386 !.
387 387 --- $TESTTMP/test-failure-unicode.t
388 388 +++ $TESTTMP/test-failure-unicode.t.err
389 389 @@ -1,2 +1,2 @@
390 390 $ echo babar\xce\xb1 (esc)
391 391 - l\xce\xb5\xce\xb5t (esc)
392 392 + babar\xce\xb1 (esc)
393 393
394 394 ERROR: test-failure-unicode.t output changed
395 395 !
396 396 Failed test-failure-unicode.t: output changed
397 397 Failed test-failure.t: output changed
398 398 # Ran 3 tests, 0 skipped, 2 failed.
399 399 python hash seed: * (glob)
400 400 [1]
401 401 $ cat xunit.xml
402 402 <?xml version="1.0" encoding="utf-8"?>
403 403 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
404 404 <testcase name="test-success.t" time="*"/> (glob)
405 405 <testcase name="test-failure-unicode.t" time="*"> (glob)
406 406 <failure message="output changed" type="output-mismatch"><![CDATA[--- $TESTTMP/test-failure-unicode.t (py38 !)
407 407 <failure message="output changed" type="output-mismatch"> (no-py38 !)
408 408 <![CDATA[--- $TESTTMP/test-failure-unicode.t (no-py38 !)
409 409 +++ $TESTTMP/test-failure-unicode.t.err
410 410 @@ -1,2 +1,2 @@
411 411 $ echo babar\xce\xb1 (esc)
412 412 - l\xce\xb5\xce\xb5t (esc)
413 413 + babar\xce\xb1 (esc)
414 414 ]]></failure> (py38 !)
415 415 ]]> </failure> (no-py38 !)
416 416 </testcase>
417 417 <testcase name="test-failure.t" time="*"> (glob)
418 418 <failure message="output changed" type="output-mismatch"><![CDATA[--- $TESTTMP/test-failure.t (py38 !)
419 419 <failure message="output changed" type="output-mismatch"> (no-py38 !)
420 420 <![CDATA[--- $TESTTMP/test-failure.t (no-py38 !)
421 421 +++ $TESTTMP/test-failure.t.err
422 422 @@ -1,5 +1,5 @@
423 423 $ echo babar
424 424 - rataxes
425 425 + babar
426 426 This is a noop statement so that
427 427 this test is still more bytes than success.
428 428 pad pad pad pad............................................................
429 429 ]]></failure> (py38 !)
430 430 ]]> </failure> (no-py38 !)
431 431 </testcase>
432 432 </testsuite>
433 433
434 434 $ cat .testtimes
435 435 test-empty.t * (glob)
436 436 test-failure-globs.t * (glob)
437 437 test-failure-unicode.t * (glob)
438 438 test-failure.t * (glob)
439 439 test-success.t * (glob)
440 440
441 441 $ rt --list-tests
442 442 test-failure-unicode.t
443 443 test-failure.t
444 444 test-success.t
445 445
446 446 $ rt --list-tests --json
447 447 test-failure-unicode.t
448 448 test-failure.t
449 449 test-success.t
450 450 $ cat report.json
451 451 testreport ={
452 452 "test-failure-unicode.t": {
453 453 "result": "success"
454 454 },
455 455 "test-failure.t": {
456 456 "result": "success"
457 457 },
458 458 "test-success.t": {
459 459 "result": "success"
460 460 }
461 461 } (no-eol)
462 462
463 463 $ rt --list-tests --xunit=xunit.xml
464 464 test-failure-unicode.t
465 465 test-failure.t
466 466 test-success.t
467 467 $ cat xunit.xml
468 468 <?xml version="1.0" encoding="utf-8"?>
469 469 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
470 470 <testcase name="test-failure-unicode.t"/>
471 471 <testcase name="test-failure.t"/>
472 472 <testcase name="test-success.t"/>
473 473 </testsuite>
474 474
475 475 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
476 476 test-failure-unicode.t
477 477 test-failure.t
478 478 $ cat output/report.json
479 479 testreport ={
480 480 "test-failure-unicode.t": {
481 481 "result": "success"
482 482 },
483 483 "test-failure.t": {
484 484 "result": "success"
485 485 }
486 486 } (no-eol)
487 487 $ cat xunit.xml
488 488 <?xml version="1.0" encoding="utf-8"?>
489 489 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
490 490 <testcase name="test-failure-unicode.t"/>
491 491 <testcase name="test-failure.t"/>
492 492 </testsuite>
493 493
494 494 $ rm test-failure-unicode.t
495 495
496 496 test for --retest
497 497 ====================
498 498
499 499 $ rt --retest
500 500 running 1 tests using 1 parallel processes
501 501
502 502 --- $TESTTMP/test-failure.t
503 503 +++ $TESTTMP/test-failure.t.err
504 504 @@ -1,5 +1,5 @@
505 505 $ echo babar
506 506 - rataxes
507 507 + babar
508 508 This is a noop statement so that
509 509 this test is still more bytes than success.
510 510 pad pad pad pad............................................................
511 511
512 512 ERROR: test-failure.t output changed
513 513 !
514 514 Failed test-failure.t: output changed
515 515 # Ran 1 tests, 0 skipped, 1 failed.
516 516 python hash seed: * (glob)
517 517 [1]
518 518
519 519 --retest works with --outputdir
520 520 $ rm -r output
521 521 $ mkdir output
522 522 $ mv test-failure.t.err output
523 523 $ rt --retest --outputdir output
524 524 running 1 tests using 1 parallel processes
525 525
526 526 --- $TESTTMP/test-failure.t
527 527 +++ $TESTTMP/output/test-failure.t.err
528 528 @@ -1,5 +1,5 @@
529 529 $ echo babar
530 530 - rataxes
531 531 + babar
532 532 This is a noop statement so that
533 533 this test is still more bytes than success.
534 534 pad pad pad pad............................................................
535 535
536 536 ERROR: test-failure.t output changed
537 537 !
538 538 Failed test-failure.t: output changed
539 539 # Ran 1 tests, 0 skipped, 1 failed.
540 540 python hash seed: * (glob)
541 541 [1]
542 542
543 543 Selecting Tests To Run
544 544 ======================
545 545
546 546 successful
547 547
548 548 $ rt test-success.t
549 549 running 1 tests using 1 parallel processes
550 550 .
551 551 # Ran 1 tests, 0 skipped, 0 failed.
552 552
553 553 success w/ keyword
554 554 $ rt -k xyzzy
555 555 running 2 tests using 1 parallel processes
556 556 .
557 557 # Ran 2 tests, 1 skipped, 0 failed.
558 558
559 559 failed
560 560
561 561 $ rt test-failure.t
562 562 running 1 tests using 1 parallel processes
563 563
564 564 --- $TESTTMP/test-failure.t
565 565 +++ $TESTTMP/test-failure.t.err
566 566 @@ -1,5 +1,5 @@
567 567 $ echo babar
568 568 - rataxes
569 569 + babar
570 570 This is a noop statement so that
571 571 this test is still more bytes than success.
572 572 pad pad pad pad............................................................
573 573
574 574 ERROR: test-failure.t output changed
575 575 !
576 576 Failed test-failure.t: output changed
577 577 # Ran 1 tests, 0 skipped, 1 failed.
578 578 python hash seed: * (glob)
579 579 [1]
580 580
581 581 failure w/ keyword
582 582 $ rt -k rataxes
583 583 running 2 tests using 1 parallel processes
584 584
585 585 --- $TESTTMP/test-failure.t
586 586 +++ $TESTTMP/test-failure.t.err
587 587 @@ -1,5 +1,5 @@
588 588 $ echo babar
589 589 - rataxes
590 590 + babar
591 591 This is a noop statement so that
592 592 this test is still more bytes than success.
593 593 pad pad pad pad............................................................
594 594
595 595 ERROR: test-failure.t output changed
596 596 !
597 597 Failed test-failure.t: output changed
598 598 # Ran 2 tests, 1 skipped, 1 failed.
599 599 python hash seed: * (glob)
600 600 [1]
601 601
602 602 Verify that when a process fails to start we show a useful message
603 603 ==================================================================
604 604
605 605 $ cat > test-serve-fail.t <<EOF
606 606 > $ echo 'abort: child process failed to start blah'
607 607 > EOF
608 608 $ rt test-serve-fail.t
609 609 running 1 tests using 1 parallel processes
610 610
611 611 --- $TESTTMP/test-serve-fail.t
612 612 +++ $TESTTMP/test-serve-fail.t.err
613 613 @@ -1* +1,2 @@ (glob)
614 614 $ echo 'abort: child process failed to start blah'
615 615 + abort: child process failed to start blah
616 616
617 617 ERROR: test-serve-fail.t output changed
618 618 !
619 619 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
620 620 # Ran 1 tests, 0 skipped, 1 failed.
621 621 python hash seed: * (glob)
622 622 [1]
623 623 $ rm test-serve-fail.t
624 624
625 625 Verify that we can try other ports
626 626 ===================================
627 627
628 628 Extensions aren't inherited by the invoked run-tests.py. An extension
629 629 introducing a repository requirement could cause this to fail. So we force
630 630 HGRCPATH to get a clean environment.
631 631
632 632 $ HGRCPATH= hg init inuse
633 633 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
634 634 $ cat blocks.pid >> $DAEMON_PIDS
635 635 $ cat > test-serve-inuse.t <<EOF
636 636 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
637 637 > $ cat hg.pid >> \$DAEMON_PIDS
638 638 > EOF
639 639 $ rt test-serve-inuse.t
640 640 running 1 tests using 1 parallel processes
641 641 .
642 642 # Ran 1 tests, 0 skipped, 0 failed.
643 643 $ rm test-serve-inuse.t
644 644 $ killdaemons.py $DAEMON_PIDS
645 645
646 646 Running In Debug Mode
647 647 ======================
648 648
649 649 $ rt --debug 2>&1 | grep -v pwd
650 650 running 2 tests using 1 parallel processes
651 651 + alias hg=hg.exe (windows !)
652 652 + echo *SALT* 0 0 (glob)
653 653 *SALT* 0 0 (glob)
654 654 + echo babar
655 655 babar
656 656 + echo *SALT* 10 0 (glob)
657 657 *SALT* 10 0 (glob)
658 658 .+ alias hg=hg.exe (windows !)
659 659 *+ echo *SALT* 0 0 (glob)
660 660 *SALT* 0 0 (glob)
661 661 + echo babar
662 662 babar
663 663 + echo *SALT* 2 0 (glob)
664 664 *SALT* 2 0 (glob)
665 665 + echo xyzzy
666 666 xyzzy
667 667 + echo *SALT* 9 0 (glob)
668 668 *SALT* 9 0 (glob)
669 669 + printf *abc\ndef\nxyz\n* (glob)
670 670 abc
671 671 def
672 672 xyz
673 673 + echo *SALT* 15 0 (glob)
674 674 *SALT* 15 0 (glob)
675 675 + printf *zyx\nwvu\ntsr\n* (glob)
676 676 zyx
677 677 wvu
678 678 tsr
679 679 + echo *SALT* 22 0 (glob)
680 680 *SALT* 22 0 (glob)
681 681 .
682 682 # Ran 2 tests, 0 skipped, 0 failed.
683 683
684 684 Parallel runs
685 685 ==============
686 686
687 687 (duplicate the failing test to get predictable output)
688 688 $ cp test-failure.t test-failure-copy.t
689 689
690 690 $ rt --jobs 2 test-failure*.t -n
691 691 running 2 tests using 2 parallel processes
692 692 !!
693 693 Failed test-failure*.t: output changed (glob)
694 694 Failed test-failure*.t: output changed (glob)
695 695 # Ran 2 tests, 0 skipped, 2 failed.
696 696 python hash seed: * (glob)
697 697 [1]
698 698
699 699 failures in parallel with --first should only print one failure
700 700 $ rt --jobs 2 --first test-failure*.t
701 701 running 2 tests using 2 parallel processes
702 702
703 703 --- $TESTTMP/test-failure*.t (glob)
704 704 +++ $TESTTMP/test-failure*.t.err (glob)
705 705 @@ -1,5 +1,5 @@
706 706 $ echo babar
707 707 - rataxes
708 708 + babar
709 709 This is a noop statement so that
710 710 this test is still more bytes than success.
711 711 pad pad pad pad............................................................
712 712
713 713 Failed test-failure*.t: output changed (glob)
714 714 Failed test-failure*.t: output changed (glob)
715 715 # Ran 2 tests, 0 skipped, 2 failed.
716 716 python hash seed: * (glob)
717 717 [1]
718 718
719 719
720 720 (delete the duplicated test file)
721 721 $ rm test-failure-copy.t
722 722
723 723 multiple runs per test should be parallelized
724 724
725 725 $ rt --jobs 2 --runs-per-test 2 test-success.t
726 726 running 2 tests using 2 parallel processes
727 727 ..
728 728 # Ran 2 tests, 0 skipped, 0 failed.
729 729
730 730 Interactive run
731 731 ===============
732 732
733 733 (backup the failing test)
734 734 $ cp test-failure.t backup
735 735
736 736 Refuse the fix
737 737
738 738 $ echo 'n' | rt -i
739 739 running 2 tests using 1 parallel processes
740 740
741 741 --- $TESTTMP/test-failure.t
742 742 +++ $TESTTMP/test-failure.t.err
743 743 @@ -1,5 +1,5 @@
744 744 $ echo babar
745 745 - rataxes
746 746 + babar
747 747 This is a noop statement so that
748 748 this test is still more bytes than success.
749 749 pad pad pad pad............................................................
750 750 Accept this change? [y/N]
751 751 ERROR: test-failure.t output changed
752 752 !.
753 753 Failed test-failure.t: output changed
754 754 # Ran 2 tests, 0 skipped, 1 failed.
755 755 python hash seed: * (glob)
756 756 [1]
757 757
758 758 $ cat test-failure.t
759 759 $ echo babar
760 760 rataxes
761 761 This is a noop statement so that
762 762 this test is still more bytes than success.
763 763 pad pad pad pad............................................................
764 764 pad pad pad pad............................................................
765 765 pad pad pad pad............................................................
766 766 pad pad pad pad............................................................
767 767 pad pad pad pad............................................................
768 768 pad pad pad pad............................................................
769 769
770 770 Interactive with custom view
771 771
772 772 $ echo 'n' | rt -i --view echo
773 773 running 2 tests using 1 parallel processes
774 774 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
775 775 Accept this change? [y/N]* (glob)
776 776 ERROR: test-failure.t output changed
777 777 !.
778 778 Failed test-failure.t: output changed
779 779 # Ran 2 tests, 0 skipped, 1 failed.
780 780 python hash seed: * (glob)
781 781 [1]
782 782
783 783 View the fix
784 784
785 785 $ echo 'y' | rt --view echo
786 786 running 2 tests using 1 parallel processes
787 787 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
788 788
789 789 ERROR: test-failure.t output changed
790 790 !.
791 791 Failed test-failure.t: output changed
792 792 # Ran 2 tests, 0 skipped, 1 failed.
793 793 python hash seed: * (glob)
794 794 [1]
795 795
796 796 Accept the fix
797 797
798 798 $ cat >> test-failure.t <<EOF
799 799 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
800 800 > saved backup bundle to \$TESTTMP/foo.hg
801 801 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
802 802 > saved backup bundle to $TESTTMP\\foo.hg
803 803 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
804 804 > saved backup bundle to \$TESTTMP/*.hg (glob)
805 805 > EOF
806 806 $ echo 'y' | rt -i 2>&1
807 807 running 2 tests using 1 parallel processes
808 808
809 809 --- $TESTTMP/test-failure.t
810 810 +++ $TESTTMP/test-failure.t.err
811 811 @@ -1,5 +1,5 @@
812 812 $ echo babar
813 813 - rataxes
814 814 + babar
815 815 This is a noop statement so that
816 816 this test is still more bytes than success.
817 817 pad pad pad pad............................................................
818 818 @@ -11,6 +11,6 @@
819 819 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
820 820 saved backup bundle to $TESTTMP/foo.hg
821 821 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
822 822 - saved backup bundle to $TESTTMP\foo.hg
823 823 + saved backup bundle to $TESTTMP/foo.hg
824 824 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
825 825 saved backup bundle to $TESTTMP/*.hg (glob)
826 826 Accept this change? [y/N] ..
827 827 # Ran 2 tests, 0 skipped, 0 failed.
828 828
829 829 $ sed -e 's,(glob)$,&<,g' test-failure.t
830 830 $ echo babar
831 831 babar
832 832 This is a noop statement so that
833 833 this test is still more bytes than success.
834 834 pad pad pad pad............................................................
835 835 pad pad pad pad............................................................
836 836 pad pad pad pad............................................................
837 837 pad pad pad pad............................................................
838 838 pad pad pad pad............................................................
839 839 pad pad pad pad............................................................
840 840 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
841 841 saved backup bundle to $TESTTMP/foo.hg
842 842 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
843 843 saved backup bundle to $TESTTMP/foo.hg
844 844 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
845 845 saved backup bundle to $TESTTMP/*.hg (glob)<
846 846
847 847 $ rm test-failure.t
848 848
849 849 Race condition - test file was modified when test is running
850 850
851 851 $ TESTRACEDIR=`pwd`
852 852 $ export TESTRACEDIR
853 853 $ cat > test-race.t <<EOF
854 854 > $ echo 1
855 855 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
856 856 > EOF
857 857
858 858 $ rt -i test-race.t
859 859 running 1 tests using 1 parallel processes
860 860
861 861 --- $TESTTMP/test-race.t
862 862 +++ $TESTTMP/test-race.t.err
863 863 @@ -1,2 +1,3 @@
864 864 $ echo 1
865 865 + 1
866 866 $ echo "# a new line" >> $TESTTMP/test-race.t
867 867 Reference output has changed (run again to prompt changes)
868 868 ERROR: test-race.t output changed
869 869 !
870 870 Failed test-race.t: output changed
871 871 # Ran 1 tests, 0 skipped, 1 failed.
872 872 python hash seed: * (glob)
873 873 [1]
874 874
875 875 $ rm test-race.t
876 876
877 877 When "#testcases" is used in .t files
878 878
879 879 $ cat >> test-cases.t <<EOF
880 880 > #testcases a b
881 881 > #if a
882 882 > $ echo 1
883 883 > #endif
884 884 > #if b
885 885 > $ echo 2
886 886 > #endif
887 887 > EOF
888 888
889 889 $ cat <<EOF | rt -i test-cases.t 2>&1
890 890 > y
891 891 > y
892 892 > EOF
893 893 running 2 tests using 1 parallel processes
894 894
895 895 --- $TESTTMP/test-cases.t
896 896 +++ $TESTTMP/test-cases.t#a.err
897 897 @@ -1,6 +1,7 @@
898 898 #testcases a b
899 899 #if a
900 900 $ echo 1
901 901 + 1
902 902 #endif
903 903 #if b
904 904 $ echo 2
905 905 Accept this change? [y/N] .
906 906 --- $TESTTMP/test-cases.t
907 907 +++ $TESTTMP/test-cases.t#b.err
908 908 @@ -5,4 +5,5 @@
909 909 #endif
910 910 #if b
911 911 $ echo 2
912 912 + 2
913 913 #endif
914 914 Accept this change? [y/N] .
915 915 # Ran 2 tests, 0 skipped, 0 failed.
916 916
917 917 $ cat test-cases.t
918 918 #testcases a b
919 919 #if a
920 920 $ echo 1
921 921 1
922 922 #endif
923 923 #if b
924 924 $ echo 2
925 925 2
926 926 #endif
927 927
928 928 $ cat >> test-cases.t <<'EOF'
929 929 > #if a
930 930 > $ NAME=A
931 931 > #else
932 932 > $ NAME=B
933 933 > #endif
934 934 > $ echo $NAME
935 935 > A (a !)
936 936 > B (b !)
937 937 > EOF
938 938 $ rt test-cases.t
939 939 running 2 tests using 1 parallel processes
940 940 ..
941 941 # Ran 2 tests, 0 skipped, 0 failed.
942 942
943 943 When using multiple dimensions of "#testcases" in .t files
944 944
945 945 $ cat > test-cases.t <<'EOF'
946 946 > #testcases a b
947 947 > #testcases c d
948 948 > #if a d
949 949 > $ echo $TESTCASE
950 950 > a#d
951 951 > #endif
952 952 > #if b c
953 953 > $ echo yes
954 954 > no
955 955 > #endif
956 956 > EOF
957 957 $ rt test-cases.t
958 958 running 4 tests using 1 parallel processes
959 959 ..
960 960 --- $TESTTMP/test-cases.t
961 961 +++ $TESTTMP/test-cases.t#b#c.err
962 962 @@ -6,5 +6,5 @@
963 963 #endif
964 964 #if b c
965 965 $ echo yes
966 966 - no
967 967 + yes
968 968 #endif
969 969
970 970 ERROR: test-cases.t#b#c output changed
971 971 !.
972 972 Failed test-cases.t#b#c: output changed
973 973 # Ran 4 tests, 0 skipped, 1 failed.
974 974 python hash seed: * (glob)
975 975 [1]
976 976
977 977 $ rt --retest
978 978 running 1 tests using 1 parallel processes
979 979
980 980 --- $TESTTMP/test-cases.t
981 981 +++ $TESTTMP/test-cases.t#b#c.err
982 982 @@ -6,5 +6,5 @@
983 983 #endif
984 984 #if b c
985 985 $ echo yes
986 986 - no
987 987 + yes
988 988 #endif
989 989
990 990 ERROR: test-cases.t#b#c output changed
991 991 !
992 992 Failed test-cases.t#b#c: output changed
993 993 # Ran 1 tests, 0 skipped, 1 failed.
994 994 python hash seed: * (glob)
995 995 [1]
996 996 $ rm test-cases.t#b#c.err
997 997 $ rm test-cases.t
998 998
999 999 (reinstall)
1000 1000 $ mv backup test-failure.t
1001 1001
1002 1002 No Diff
1003 1003 ===============
1004 1004
1005 1005 $ rt --nodiff
1006 1006 running 2 tests using 1 parallel processes
1007 1007 !.
1008 1008 Failed test-failure.t: output changed
1009 1009 # Ran 2 tests, 0 skipped, 1 failed.
1010 1010 python hash seed: * (glob)
1011 1011 [1]
1012 1012
1013 1013 test --tmpdir support
1014 1014 $ rt --tmpdir=$TESTTMP/keep test-success.t
1015 1015 running 1 tests using 1 parallel processes
1016 1016
1017 1017 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
1018 1018 Keeping threadtmp dir: $TESTTMP/keep/child1
1019 1019 .
1020 1020 # Ran 1 tests, 0 skipped, 0 failed.
1021 1021
1022 1022 timeouts
1023 1023 ========
1024 1024 $ cat > test-timeout.t <<EOF
1025 1025 > $ sleep 2
1026 1026 > $ echo pass
1027 1027 > pass
1028 1028 > EOF
1029 1029 > echo '#require slow' > test-slow-timeout.t
1030 1030 > cat test-timeout.t >> test-slow-timeout.t
1031 1031 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
1032 1032 running 2 tests using 1 parallel processes
1033 1033 st
1034 1034 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
1035 1035 Failed test-timeout.t: timed out
1036 1036 # Ran 1 tests, 1 skipped, 1 failed.
1037 1037 python hash seed: * (glob)
1038 1038 [1]
1039 1039 $ rt --timeout=1 --slowtimeout=3 \
1040 1040 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1041 1041 running 2 tests using 1 parallel processes
1042 1042 .t
1043 1043 Failed test-timeout.t: timed out
1044 1044 # Ran 2 tests, 0 skipped, 1 failed.
1045 1045 python hash seed: * (glob)
1046 1046 [1]
1047 1047 $ rm test-timeout.t test-slow-timeout.t
1048 1048
1049 1049 test for --time
1050 1050 ==================
1051 1051
1052 1052 $ rt test-success.t --time
1053 1053 running 1 tests using 1 parallel processes
1054 1054 .
1055 1055 # Ran 1 tests, 0 skipped, 0 failed.
1056 1056 # Producing time report
1057 1057 start end cuser csys real Test
1058 1058 \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} test-success.t (re)
1059 1059
1060 1060 test for --time with --job enabled
1061 1061 ====================================
1062 1062
1063 1063 $ rt test-success.t --time --jobs 2
1064 1064 running 1 tests using 1 parallel processes
1065 1065 .
1066 1066 # Ran 1 tests, 0 skipped, 0 failed.
1067 1067 # Producing time report
1068 1068 start end cuser csys real Test
1069 1069 \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} test-success.t (re)
1070 1070
1071 1071 Skips
1072 1072 ================
1073 1073 $ cat > test-skip.t <<EOF
1074 1074 > $ echo xyzzy
1075 1075 > #if true
1076 1076 > #require false
1077 1077 > #end
1078 1078 > EOF
1079 1079 $ cat > test-noskip.t <<EOF
1080 1080 > #if false
1081 1081 > #require false
1082 1082 > #endif
1083 1083 > EOF
1084 1084 $ rt --nodiff
1085 1085 running 4 tests using 1 parallel processes
1086 1086 !.s.
1087 1087 Skipped test-skip.t: missing feature: nail clipper
1088 1088 Failed test-failure.t: output changed
1089 1089 # Ran 3 tests, 1 skipped, 1 failed.
1090 1090 python hash seed: * (glob)
1091 1091 [1]
1092 1092
1093 1093 $ rm test-noskip.t
1094 1094 $ rt --keyword xyzzy
1095 1095 running 3 tests using 1 parallel processes
1096 1096 .s
1097 1097 Skipped test-skip.t: missing feature: nail clipper
1098 1098 # Ran 2 tests, 2 skipped, 0 failed.
1099 1099
1100 1100 Skips with xml
1101 1101 $ rt --keyword xyzzy \
1102 1102 > --xunit=xunit.xml
1103 1103 running 3 tests using 1 parallel processes
1104 1104 .s
1105 1105 Skipped test-skip.t: missing feature: nail clipper
1106 1106 # Ran 2 tests, 2 skipped, 0 failed.
1107 1107 $ cat xunit.xml
1108 1108 <?xml version="1.0" encoding="utf-8"?>
1109 1109 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1110 1110 <testcase name="test-success.t" time="*"/> (glob)
1111 1111 <testcase name="test-skip.t">
1112 1112 <skipped><![CDATA[missing feature: nail clipper]]></skipped> (py38 !)
1113 1113 <skipped> (no-py38 !)
1114 1114 <![CDATA[missing feature: nail clipper]]> </skipped> (no-py38 !)
1115 1115 </testcase>
1116 1116 </testsuite>
1117 1117
1118 1118 Missing skips or blacklisted skips don't count as executed:
1119 1119 $ mkdir tests
1120 1120 $ echo tests/test-failure.t > blacklist
1121 1121 $ cp test-failure.t tests
1122 1122 $ rt --blacklist=blacklist --json\
1123 1123 > tests/test-failure.t tests/test-bogus.t
1124 1124 running 2 tests using 1 parallel processes
1125 1125 ss
1126 1126 Skipped test-bogus.t: Doesn't exist
1127 1127 Skipped test-failure.t: blacklisted
1128 1128 # Ran 0 tests, 2 skipped, 0 failed.
1129 1129 $ cat tests/report.json
1130 1130 testreport ={
1131 1131 "test-bogus.t": {
1132 1132 "result": "skip"
1133 1133 },
1134 1134 "test-failure.t": {
1135 1135 "result": "skip"
1136 1136 }
1137 1137 } (no-eol)
1138 1138 $ rm -r tests
1139 1139 $ echo test-failure.t > blacklist
1140 1140
1141 1141 Whitelist trumps blacklist
1142 1142 $ echo test-failure.t > whitelist
1143 1143 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1144 1144 > test-failure.t test-bogus.t
1145 1145 running 2 tests using 1 parallel processes
1146 1146 s
1147 1147 --- $TESTTMP/test-failure.t
1148 1148 +++ $TESTTMP/test-failure.t.err
1149 1149 @@ -1,5 +1,5 @@
1150 1150 $ echo babar
1151 1151 - rataxes
1152 1152 + babar
1153 1153 This is a noop statement so that
1154 1154 this test is still more bytes than success.
1155 1155 pad pad pad pad............................................................
1156 1156
1157 1157 ERROR: test-failure.t output changed
1158 1158 !
1159 1159 Skipped test-bogus.t: Doesn't exist
1160 1160 Failed test-failure.t: output changed
1161 1161 # Ran 1 tests, 1 skipped, 1 failed.
1162 1162 python hash seed: * (glob)
1163 1163 [1]
1164 1164
1165 1165 Ensure that --test-list causes only the tests listed in that file to
1166 1166 be executed.
1167 1167 $ echo test-success.t >> onlytest
1168 1168 $ rt --test-list=onlytest
1169 1169 running 1 tests using 1 parallel processes
1170 1170 .
1171 1171 # Ran 1 tests, 0 skipped, 0 failed.
1172 1172 $ echo test-bogus.t >> anothertest
1173 1173 $ rt --test-list=onlytest --test-list=anothertest
1174 1174 running 2 tests using 1 parallel processes
1175 1175 s.
1176 1176 Skipped test-bogus.t: Doesn't exist
1177 1177 # Ran 1 tests, 1 skipped, 0 failed.
1178 1178 $ rm onlytest anothertest
1179 1179
1180 1180 test for --json
1181 1181 ==================
1182 1182
1183 1183 $ rt --json
1184 1184 running 3 tests using 1 parallel processes
1185 1185
1186 1186 --- $TESTTMP/test-failure.t
1187 1187 +++ $TESTTMP/test-failure.t.err
1188 1188 @@ -1,5 +1,5 @@
1189 1189 $ echo babar
1190 1190 - rataxes
1191 1191 + babar
1192 1192 This is a noop statement so that
1193 1193 this test is still more bytes than success.
1194 1194 pad pad pad pad............................................................
1195 1195
1196 1196 ERROR: test-failure.t output changed
1197 1197 !.s
1198 1198 Skipped test-skip.t: missing feature: nail clipper
1199 1199 Failed test-failure.t: output changed
1200 1200 # Ran 2 tests, 1 skipped, 1 failed.
1201 1201 python hash seed: * (glob)
1202 1202 [1]
1203 1203
1204 1204 $ cat report.json
1205 1205 testreport ={
1206 1206 "test-failure.t": [\{] (re)
1207 1207 "csys": "\s*\d+\.\d{3,4}", ? (re)
1208 1208 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1209 1209 "diff": "---.+\+\+\+.+", ? (re)
1210 1210 "end": "\s*\d+\.\d{3,4}", ? (re)
1211 1211 "result": "failure", ? (re)
1212 1212 "start": "\s*\d+\.\d{3,4}", ? (re)
1213 1213 "time": "\s*\d+\.\d{3,4}" (re)
1214 1214 }, ? (re)
1215 1215 "test-skip.t": {
1216 1216 "csys": "\s*\d+\.\d{3,4}", ? (re)
1217 1217 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1218 1218 "diff": "", ? (re)
1219 1219 "end": "\s*\d+\.\d{3,4}", ? (re)
1220 1220 "result": "skip", ? (re)
1221 1221 "start": "\s*\d+\.\d{3,4}", ? (re)
1222 1222 "time": "\s*\d+\.\d{3,4}" (re)
1223 1223 }, ? (re)
1224 1224 "test-success.t": [\{] (re)
1225 1225 "csys": "\s*\d+\.\d{3,4}", ? (re)
1226 1226 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1227 1227 "diff": "", ? (re)
1228 1228 "end": "\s*\d+\.\d{3,4}", ? (re)
1229 1229 "result": "success", ? (re)
1230 1230 "start": "\s*\d+\.\d{3,4}", ? (re)
1231 1231 "time": "\s*\d+\.\d{3,4}" (re)
1232 1232 }
1233 1233 } (no-eol)
1234 1234 --json with --outputdir
1235 1235
1236 1236 $ rm report.json
1237 1237 $ rm -r output
1238 1238 $ mkdir output
1239 1239 $ rt --json --outputdir output
1240 1240 running 3 tests using 1 parallel processes
1241 1241
1242 1242 --- $TESTTMP/test-failure.t
1243 1243 +++ $TESTTMP/output/test-failure.t.err
1244 1244 @@ -1,5 +1,5 @@
1245 1245 $ echo babar
1246 1246 - rataxes
1247 1247 + babar
1248 1248 This is a noop statement so that
1249 1249 this test is still more bytes than success.
1250 1250 pad pad pad pad............................................................
1251 1251
1252 1252 ERROR: test-failure.t output changed
1253 1253 !.s
1254 1254 Skipped test-skip.t: missing feature: nail clipper
1255 1255 Failed test-failure.t: output changed
1256 1256 # Ran 2 tests, 1 skipped, 1 failed.
1257 1257 python hash seed: * (glob)
1258 1258 [1]
1259 1259 $ f report.json
1260 1260 report.json: file not found
1261 1261 $ cat output/report.json
1262 1262 testreport ={
1263 1263 "test-failure.t": [\{] (re)
1264 1264 "csys": "\s*\d+\.\d{3,4}", ? (re)
1265 1265 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1266 1266 "diff": "---.+\+\+\+.+", ? (re)
1267 1267 "end": "\s*\d+\.\d{3,4}", ? (re)
1268 1268 "result": "failure", ? (re)
1269 1269 "start": "\s*\d+\.\d{3,4}", ? (re)
1270 1270 "time": "\s*\d+\.\d{3,4}" (re)
1271 1271 }, ? (re)
1272 1272 "test-skip.t": {
1273 1273 "csys": "\s*\d+\.\d{3,4}", ? (re)
1274 1274 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1275 1275 "diff": "", ? (re)
1276 1276 "end": "\s*\d+\.\d{3,4}", ? (re)
1277 1277 "result": "skip", ? (re)
1278 1278 "start": "\s*\d+\.\d{3,4}", ? (re)
1279 1279 "time": "\s*\d+\.\d{3,4}" (re)
1280 1280 }, ? (re)
1281 1281 "test-success.t": [\{] (re)
1282 1282 "csys": "\s*\d+\.\d{3,4}", ? (re)
1283 1283 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1284 1284 "diff": "", ? (re)
1285 1285 "end": "\s*\d+\.\d{3,4}", ? (re)
1286 1286 "result": "success", ? (re)
1287 1287 "start": "\s*\d+\.\d{3,4}", ? (re)
1288 1288 "time": "\s*\d+\.\d{3,4}" (re)
1289 1289 }
1290 1290 } (no-eol)
1291 1291 $ ls -a output
1292 1292 .
1293 1293 ..
1294 1294 .testtimes
1295 1295 report.json
1296 1296 test-failure.t.err
1297 1297
1298 1298 Test that failed test accepted through interactive are properly reported:
1299 1299
1300 1300 $ cp test-failure.t backup
1301 1301 $ echo y | rt --json -i
1302 1302 running 3 tests using 1 parallel processes
1303 1303
1304 1304 --- $TESTTMP/test-failure.t
1305 1305 +++ $TESTTMP/test-failure.t.err
1306 1306 @@ -1,5 +1,5 @@
1307 1307 $ echo babar
1308 1308 - rataxes
1309 1309 + babar
1310 1310 This is a noop statement so that
1311 1311 this test is still more bytes than success.
1312 1312 pad pad pad pad............................................................
1313 1313 Accept this change? [y/N] ..s
1314 1314 Skipped test-skip.t: missing feature: nail clipper
1315 1315 # Ran 2 tests, 1 skipped, 0 failed.
1316 1316
1317 1317 $ cat report.json
1318 1318 testreport ={
1319 1319 "test-failure.t": [\{] (re)
1320 1320 "csys": "\s*\d+\.\d{3,4}", ? (re)
1321 1321 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1322 1322 "diff": "", ? (re)
1323 1323 "end": "\s*\d+\.\d{3,4}", ? (re)
1324 1324 "result": "success", ? (re)
1325 1325 "start": "\s*\d+\.\d{3,4}", ? (re)
1326 1326 "time": "\s*\d+\.\d{3,4}" (re)
1327 1327 }, ? (re)
1328 1328 "test-skip.t": {
1329 1329 "csys": "\s*\d+\.\d{3,4}", ? (re)
1330 1330 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1331 1331 "diff": "", ? (re)
1332 1332 "end": "\s*\d+\.\d{3,4}", ? (re)
1333 1333 "result": "skip", ? (re)
1334 1334 "start": "\s*\d+\.\d{3,4}", ? (re)
1335 1335 "time": "\s*\d+\.\d{3,4}" (re)
1336 1336 }, ? (re)
1337 1337 "test-success.t": [\{] (re)
1338 1338 "csys": "\s*\d+\.\d{3,4}", ? (re)
1339 1339 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1340 1340 "diff": "", ? (re)
1341 1341 "end": "\s*\d+\.\d{3,4}", ? (re)
1342 1342 "result": "success", ? (re)
1343 1343 "start": "\s*\d+\.\d{3,4}", ? (re)
1344 1344 "time": "\s*\d+\.\d{3,4}" (re)
1345 1345 }
1346 1346 } (no-eol)
1347 1347 $ mv backup test-failure.t
1348 1348
1349 1349 backslash on end of line with glob matching is handled properly
1350 1350
1351 1351 $ cat > test-glob-backslash.t << EOF
1352 1352 > $ echo 'foo bar \\'
1353 1353 > foo * \ (glob)
1354 1354 > EOF
1355 1355
1356 1356 $ rt test-glob-backslash.t
1357 1357 running 1 tests using 1 parallel processes
1358 1358 .
1359 1359 # Ran 1 tests, 0 skipped, 0 failed.
1360 1360
1361 1361 $ rm -f test-glob-backslash.t
1362 1362
1363 1363 Test globbing of local IP addresses
1364 1364 $ echo 172.16.18.1
1365 1365 $LOCALIP (glob)
1366 1366 $ echo dead:beef::1
1367 1367 $LOCALIP (glob)
1368 1368
1369 1369 Add support for external test formatter
1370 1370 =======================================
1371 1371
1372 1372 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@" test-success.t test-failure.t
1373 1373 running 2 tests using 1 parallel processes
1374 1374
1375 1375 # Ran 2 tests, 0 skipped, 0 failed.
1376 1376 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1377 1377 FAILURE! test-failure.t output changed
1378 1378 SUCCESS! test-success.t
1379 1379 ON_END!
1380 1380
1381 1381 Test reusability for third party tools
1382 1382 ======================================
1383 1383
1384 1384 $ mkdir "$TESTTMP"/anothertests
1385 1385 $ cd "$TESTTMP"/anothertests
1386 1386
1387 1387 test that `run-tests.py` can execute hghave, even if it runs not in
1388 1388 Mercurial source tree.
1389 1389
1390 1390 $ cat > test-hghave.t <<EOF
1391 1391 > #require true
1392 1392 > $ echo foo
1393 1393 > foo
1394 1394 > EOF
1395 1395 $ rt test-hghave.t
1396 1396 running 1 tests using 1 parallel processes
1397 1397 .
1398 1398 # Ran 1 tests, 0 skipped, 0 failed.
1399 1399
1400 1400 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1401 1401 running is placed.
1402 1402
1403 1403 $ cat > test-runtestdir.t <<EOF
1404 1404 > - $TESTDIR, in which test-run-tests.t is placed
1405 1405 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1406 1406 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1407 1407 >
1408 1408 > #if windows
1409 1409 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1410 1410 > #else
1411 1411 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1412 1412 > #endif
1413 1413 > If this prints a path, that means RUNTESTDIR didn't equal
1414 1414 > TESTDIR as it should have.
1415 1415 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1416 1416 > This should print the start of check-code. If this passes but the
1417 1417 > previous check failed, that means we found a copy of check-code at whatever
1418 1418 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1419 1419 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python3@#!USRBINENVPY@'
1420 1420 > #!USRBINENVPY
1421 1421 > #
1422 1422 > # check-code - a style and portability checker for Mercurial
1423 1423 > EOF
1424 1424 $ rt test-runtestdir.t
1425 1425 running 1 tests using 1 parallel processes
1426 1426 .
1427 1427 # Ran 1 tests, 0 skipped, 0 failed.
1428 1428
1429 1429 #if execbit
1430 1430
1431 1431 test that TESTDIR is referred in PATH
1432 1432
1433 1433 $ cat > custom-command.sh <<EOF
1434 1434 > #!/bin/sh
1435 1435 > echo "hello world"
1436 1436 > EOF
1437 1437 $ chmod +x custom-command.sh
1438 1438 $ cat > test-testdir-path.t <<EOF
1439 1439 > $ custom-command.sh
1440 1440 > hello world
1441 1441 > EOF
1442 1442 $ rt test-testdir-path.t
1443 1443 running 1 tests using 1 parallel processes
1444 1444 .
1445 1445 # Ran 1 tests, 0 skipped, 0 failed.
1446 1446
1447 1447 #endif
1448 1448
1449 1449 test support for --allow-slow-tests
1450 1450 $ cat > test-very-slow-test.t <<EOF
1451 1451 > #require slow
1452 1452 > $ echo pass
1453 1453 > pass
1454 1454 > EOF
1455 1455 $ rt test-very-slow-test.t
1456 1456 running 1 tests using 1 parallel processes
1457 1457 s
1458 1458 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1459 1459 # Ran 0 tests, 1 skipped, 0 failed.
1460 1460 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1461 1461 running 1 tests using 1 parallel processes
1462 1462 .
1463 1463 # Ran 1 tests, 0 skipped, 0 failed.
1464 1464
1465 1465 support for running a test outside the current directory
1466 1466 $ mkdir nonlocal
1467 1467 $ cat > nonlocal/test-is-not-here.t << EOF
1468 1468 > $ echo pass
1469 1469 > pass
1470 1470 > EOF
1471 1471 $ rt nonlocal/test-is-not-here.t
1472 1472 running 1 tests using 1 parallel processes
1473 1473 .
1474 1474 # Ran 1 tests, 0 skipped, 0 failed.
1475 1475
1476 1476 support for automatically discovering test if arg is a folder
1477 1477 $ mkdir tmp && cd tmp
1478 1478
1479 1479 $ cat > test-uno.t << EOF
1480 1480 > $ echo line
1481 1481 > line
1482 1482 > EOF
1483 1483
1484 1484 $ cp test-uno.t test-dos.t
1485 1485 $ cd ..
1486 1486 $ cp -R tmp tmpp
1487 1487 $ cp tmp/test-uno.t test-solo.t
1488 1488
1489 1489 $ rt tmp/ test-solo.t tmpp
1490 1490 running 5 tests using 1 parallel processes
1491 1491 .....
1492 1492 # Ran 5 tests, 0 skipped, 0 failed.
1493 1493 $ rm -rf tmp tmpp
1494 1494
1495 1495 support for running run-tests.py from another directory
1496 1496 $ mkdir tmp && cd tmp
1497 1497
1498 1498 $ cat > useful-file.sh << EOF
1499 1499 > important command
1500 1500 > EOF
1501 1501
1502 1502 $ cat > test-folder.t << EOF
1503 1503 > $ cat \$TESTDIR/useful-file.sh
1504 1504 > important command
1505 1505 > EOF
1506 1506
1507 1507 $ cat > test-folder-fail.t << EOF
1508 1508 > $ cat \$TESTDIR/useful-file.sh
1509 1509 > important commando
1510 1510 > EOF
1511 1511
1512 1512 $ cd ..
1513 1513 $ rt tmp/test-*.t
1514 1514 running 2 tests using 1 parallel processes
1515 1515
1516 1516 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1517 1517 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1518 1518 @@ -1,2 +1,2 @@
1519 1519 $ cat $TESTDIR/useful-file.sh
1520 1520 - important commando
1521 1521 + important command
1522 1522
1523 1523 ERROR: test-folder-fail.t output changed
1524 1524 !.
1525 1525 Failed test-folder-fail.t: output changed
1526 1526 # Ran 2 tests, 0 skipped, 1 failed.
1527 1527 python hash seed: * (glob)
1528 1528 [1]
1529 1529
1530 1530 support for bisecting failed tests automatically
1531 1531 $ hg init bisect
1532 1532 $ cd bisect
1533 1533 $ cat >> test-bisect.t <<EOF
1534 1534 > $ echo pass
1535 1535 > pass
1536 1536 > EOF
1537 1537 $ hg add test-bisect.t
1538 1538 $ hg ci -m 'good'
1539 1539 $ cat >> test-bisect.t <<EOF
1540 1540 > $ echo pass
1541 1541 > fail
1542 1542 > EOF
1543 1543 $ hg ci -m 'bad'
1544 1544 $ rt --known-good-rev=0 test-bisect.t
1545 1545 running 1 tests using 1 parallel processes
1546 1546
1547 1547 --- $TESTTMP/anothertests/bisect/test-bisect.t
1548 1548 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1549 1549 @@ -1,4 +1,4 @@
1550 1550 $ echo pass
1551 1551 pass
1552 1552 $ echo pass
1553 1553 - fail
1554 1554 + pass
1555 1555
1556 1556 ERROR: test-bisect.t output changed
1557 1557 !
1558 1558 Failed test-bisect.t: output changed
1559 1559 test-bisect.t broken by 72cbf122d116 (bad)
1560 1560 # Ran 1 tests, 0 skipped, 1 failed.
1561 1561 python hash seed: * (glob)
1562 1562 [1]
1563 1563
1564 1564 $ cd ..
1565 1565
1566 1566 support bisecting a separate repo
1567 1567
1568 1568 $ hg init bisect-dependent
1569 1569 $ cd bisect-dependent
1570 1570 $ cat > test-bisect-dependent.t <<EOF
1571 1571 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1572 1572 > pass
1573 1573 > EOF
1574 1574 $ hg commit -Am dependent test-bisect-dependent.t
1575 1575
1576 1576 $ rt --known-good-rev=0 test-bisect-dependent.t
1577 1577 running 1 tests using 1 parallel processes
1578 1578
1579 1579 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1580 1580 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1581 1581 @@ -1,2 +1,2 @@
1582 1582 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1583 1583 - pass
1584 1584 + fail
1585 1585
1586 1586 ERROR: test-bisect-dependent.t output changed
1587 1587 !
1588 1588 Failed test-bisect-dependent.t: output changed
1589 1589 Failed to identify failure point for test-bisect-dependent.t
1590 1590 # Ran 1 tests, 0 skipped, 1 failed.
1591 1591 python hash seed: * (glob)
1592 1592 [1]
1593 1593
1594 1594 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1595 1595 usage: run-tests.py [options] [tests]
1596 1596 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1597 1597 [2]
1598 1598
1599 1599 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1600 1600 running 1 tests using 1 parallel processes
1601 1601
1602 1602 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1603 1603 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1604 1604 @@ -1,2 +1,2 @@
1605 1605 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1606 1606 - pass
1607 1607 + fail
1608 1608
1609 1609 ERROR: test-bisect-dependent.t output changed
1610 1610 !
1611 1611 Failed test-bisect-dependent.t: output changed
1612 1612 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1613 1613 # Ran 1 tests, 0 skipped, 1 failed.
1614 1614 python hash seed: * (glob)
1615 1615 [1]
1616 1616
1617 1617 $ cd ..
1618 1618
1619 1619 Test a broken #if statement doesn't break run-tests threading.
1620 1620 ==============================================================
1621 1621 $ mkdir broken
1622 1622 $ cd broken
1623 1623 $ cat > test-broken.t <<EOF
1624 1624 > true
1625 1625 > #if notarealhghavefeature
1626 1626 > $ false
1627 1627 > #endif
1628 1628 > EOF
1629 1629 $ for f in 1 2 3 4 ; do
1630 1630 > cat > test-works-$f.t <<EOF
1631 1631 > This is test case $f
1632 1632 > $ sleep 1
1633 1633 > EOF
1634 1634 > done
1635 1635 $ rt -j 2
1636 1636 running 5 tests using 2 parallel processes
1637 1637 ....
1638 1638 # Ran 5 tests, 0 skipped, 0 failed.
1639 1639 skipped: unknown feature: notarealhghavefeature
1640 1640
1641 1641 $ cd ..
1642 1642 $ rm -rf broken
1643 1643
1644 1644 Test cases in .t files
1645 1645 ======================
1646 1646 $ mkdir cases
1647 1647 $ cd cases
1648 1648 $ cat > test-cases-abc.t <<'EOF'
1649 1649 > #testcases A B C
1650 1650 > $ V=B
1651 1651 > #if A
1652 1652 > $ V=A
1653 1653 > #endif
1654 1654 > #if C
1655 1655 > $ V=C
1656 1656 > #endif
1657 1657 > $ echo $V | sed 's/A/C/'
1658 1658 > C
1659 1659 > #if C
1660 1660 > $ [ $V = C ]
1661 1661 > #endif
1662 1662 > #if A
1663 1663 > $ [ $V = C ]
1664 1664 > [1]
1665 1665 > #endif
1666 1666 > #if no-C
1667 1667 > $ [ $V = C ]
1668 1668 > [1]
1669 1669 > #endif
1670 1670 > $ [ $V = D ]
1671 1671 > [1]
1672 1672 > EOF
1673 1673 $ rt
1674 1674 running 3 tests using 1 parallel processes
1675 1675 .
1676 1676 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1677 1677 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1678 1678 @@ -7,7 +7,7 @@
1679 1679 $ V=C
1680 1680 #endif
1681 1681 $ echo $V | sed 's/A/C/'
1682 1682 - C
1683 1683 + B
1684 1684 #if C
1685 1685 $ [ $V = C ]
1686 1686 #endif
1687 1687
1688 1688 ERROR: test-cases-abc.t#B output changed
1689 1689 !.
1690 1690 Failed test-cases-abc.t#B: output changed
1691 1691 # Ran 3 tests, 0 skipped, 1 failed.
1692 1692 python hash seed: * (glob)
1693 1693 [1]
1694 1694
1695 1695 --restart works
1696 1696
1697 1697 $ rt --restart
1698 1698 running 2 tests using 1 parallel processes
1699 1699
1700 1700 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1701 1701 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1702 1702 @@ -7,7 +7,7 @@
1703 1703 $ V=C
1704 1704 #endif
1705 1705 $ echo $V | sed 's/A/C/'
1706 1706 - C
1707 1707 + B
1708 1708 #if C
1709 1709 $ [ $V = C ]
1710 1710 #endif
1711 1711
1712 1712 ERROR: test-cases-abc.t#B output changed
1713 1713 !.
1714 1714 Failed test-cases-abc.t#B: output changed
1715 1715 # Ran 2 tests, 0 skipped, 1 failed.
1716 1716 python hash seed: * (glob)
1717 1717 [1]
1718 1718
1719 1719 --restart works with outputdir
1720 1720
1721 1721 $ mkdir output
1722 1722 $ mv test-cases-abc.t#B.err output
1723 1723 $ rt --restart --outputdir output
1724 1724 running 2 tests using 1 parallel processes
1725 1725
1726 1726 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1727 1727 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1728 1728 @@ -7,7 +7,7 @@
1729 1729 $ V=C
1730 1730 #endif
1731 1731 $ echo $V | sed 's/A/C/'
1732 1732 - C
1733 1733 + B
1734 1734 #if C
1735 1735 $ [ $V = C ]
1736 1736 #endif
1737 1737
1738 1738 ERROR: test-cases-abc.t#B output changed
1739 1739 !.
1740 1740 Failed test-cases-abc.t#B: output changed
1741 1741 # Ran 2 tests, 0 skipped, 1 failed.
1742 1742 python hash seed: * (glob)
1743 1743 [1]
1744 1744
1745 1745 Test TESTCASE variable
1746 1746
1747 1747 $ cat > test-cases-ab.t <<'EOF'
1748 1748 > $ dostuff() {
1749 1749 > > echo "In case $TESTCASE"
1750 1750 > > }
1751 1751 > #testcases A B
1752 1752 > #if A
1753 1753 > $ dostuff
1754 1754 > In case A
1755 1755 > #endif
1756 1756 > #if B
1757 1757 > $ dostuff
1758 1758 > In case B
1759 1759 > #endif
1760 1760 > EOF
1761 1761 $ rt test-cases-ab.t
1762 1762 running 2 tests using 1 parallel processes
1763 1763 ..
1764 1764 # Ran 2 tests, 0 skipped, 0 failed.
1765 1765
1766 1766 Support running a specific test case
1767 1767
1768 1768 $ rt "test-cases-abc.t#B"
1769 1769 running 1 tests using 1 parallel processes
1770 1770
1771 1771 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1772 1772 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1773 1773 @@ -7,7 +7,7 @@
1774 1774 $ V=C
1775 1775 #endif
1776 1776 $ echo $V | sed 's/A/C/'
1777 1777 - C
1778 1778 + B
1779 1779 #if C
1780 1780 $ [ $V = C ]
1781 1781 #endif
1782 1782
1783 1783 ERROR: test-cases-abc.t#B output changed
1784 1784 !
1785 1785 Failed test-cases-abc.t#B: output changed
1786 1786 # Ran 1 tests, 0 skipped, 1 failed.
1787 1787 python hash seed: * (glob)
1788 1788 [1]
1789 1789
1790 1790 Support running multiple test cases in the same file
1791 1791
1792 1792 $ rt test-cases-abc.t#B test-cases-abc.t#C
1793 1793 running 2 tests using 1 parallel processes
1794 1794
1795 1795 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1796 1796 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1797 1797 @@ -7,7 +7,7 @@
1798 1798 $ V=C
1799 1799 #endif
1800 1800 $ echo $V | sed 's/A/C/'
1801 1801 - C
1802 1802 + B
1803 1803 #if C
1804 1804 $ [ $V = C ]
1805 1805 #endif
1806 1806
1807 1807 ERROR: test-cases-abc.t#B output changed
1808 1808 !.
1809 1809 Failed test-cases-abc.t#B: output changed
1810 1810 # Ran 2 tests, 0 skipped, 1 failed.
1811 1811 python hash seed: * (glob)
1812 1812 [1]
1813 1813
1814 1814 Support ignoring invalid test cases
1815 1815
1816 1816 $ rt test-cases-abc.t#B test-cases-abc.t#D
1817 1817 running 1 tests using 1 parallel processes
1818 1818
1819 1819 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1820 1820 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1821 1821 @@ -7,7 +7,7 @@
1822 1822 $ V=C
1823 1823 #endif
1824 1824 $ echo $V | sed 's/A/C/'
1825 1825 - C
1826 1826 + B
1827 1827 #if C
1828 1828 $ [ $V = C ]
1829 1829 #endif
1830 1830
1831 1831 ERROR: test-cases-abc.t#B output changed
1832 1832 !
1833 1833 Failed test-cases-abc.t#B: output changed
1834 1834 # Ran 1 tests, 0 skipped, 1 failed.
1835 1835 python hash seed: * (glob)
1836 1836 [1]
1837 1837
1838 1838 Support running complex test cases names
1839 1839
1840 1840 $ cat > test-cases-advanced-cases.t <<'EOF'
1841 1841 > #testcases simple case-with-dashes casewith_-.chars
1842 1842 > $ echo $TESTCASE
1843 1843 > simple
1844 1844 > EOF
1845 1845
1846 1846 $ cat test-cases-advanced-cases.t
1847 1847 #testcases simple case-with-dashes casewith_-.chars
1848 1848 $ echo $TESTCASE
1849 1849 simple
1850 1850
1851 1851 $ rt test-cases-advanced-cases.t
1852 1852 running 3 tests using 1 parallel processes
1853 1853
1854 1854 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1855 1855 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1856 1856 @@ -1,3 +1,3 @@
1857 1857 #testcases simple case-with-dashes casewith_-.chars
1858 1858 $ echo $TESTCASE
1859 1859 - simple
1860 1860 + case-with-dashes
1861 1861
1862 1862 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1863 1863 !
1864 1864 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1865 1865 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1866 1866 @@ -1,3 +1,3 @@
1867 1867 #testcases simple case-with-dashes casewith_-.chars
1868 1868 $ echo $TESTCASE
1869 1869 - simple
1870 1870 + casewith_-.chars
1871 1871
1872 1872 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1873 1873 !.
1874 1874 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1875 1875 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1876 1876 # Ran 3 tests, 0 skipped, 2 failed.
1877 1877 python hash seed: * (glob)
1878 1878 [1]
1879 1879
1880 1880 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1881 1881 running 1 tests using 1 parallel processes
1882 1882
1883 1883 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1884 1884 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1885 1885 @@ -1,3 +1,3 @@
1886 1886 #testcases simple case-with-dashes casewith_-.chars
1887 1887 $ echo $TESTCASE
1888 1888 - simple
1889 1889 + case-with-dashes
1890 1890
1891 1891 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1892 1892 !
1893 1893 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1894 1894 # Ran 1 tests, 0 skipped, 1 failed.
1895 1895 python hash seed: * (glob)
1896 1896 [1]
1897 1897
1898 1898 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1899 1899 running 1 tests using 1 parallel processes
1900 1900
1901 1901 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1902 1902 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1903 1903 @@ -1,3 +1,3 @@
1904 1904 #testcases simple case-with-dashes casewith_-.chars
1905 1905 $ echo $TESTCASE
1906 1906 - simple
1907 1907 + casewith_-.chars
1908 1908
1909 1909 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1910 1910 !
1911 1911 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1912 1912 # Ran 1 tests, 0 skipped, 1 failed.
1913 1913 python hash seed: * (glob)
1914 1914 [1]
1915 1915
1916 1916 Test automatic pattern replacement
1917 1917 ==================================
1918 1918
1919 1919 $ cat << EOF >> common-pattern.py
1920 1920 > substitutions = [
1921 1921 > (br'foo-(.*)\\b',
1922 1922 > br'\$XXX=\\1\$'),
1923 1923 > (br'bar\\n',
1924 1924 > br'\$YYY$\\n'),
1925 1925 > ]
1926 1926 > EOF
1927 1927
1928 1928 $ cat << EOF >> test-substitution.t
1929 1929 > $ echo foo-12
1930 1930 > \$XXX=12$
1931 1931 > $ echo foo-42
1932 1932 > \$XXX=42$
1933 1933 > $ echo bar prior
1934 1934 > bar prior
1935 1935 > $ echo lastbar
1936 1936 > last\$YYY$
1937 1937 > $ echo foo-bar foo-baz
1938 1938 > EOF
1939 1939
1940 1940 $ rt test-substitution.t
1941 1941 running 1 tests using 1 parallel processes
1942 1942
1943 1943 --- $TESTTMP/anothertests/cases/test-substitution.t
1944 1944 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1945 1945 @@ -7,3 +7,4 @@
1946 1946 $ echo lastbar
1947 1947 last$YYY$
1948 1948 $ echo foo-bar foo-baz
1949 1949 + $XXX=bar foo-baz$
1950 1950
1951 1951 ERROR: test-substitution.t output changed
1952 1952 !
1953 1953 Failed test-substitution.t: output changed
1954 1954 # Ran 1 tests, 0 skipped, 1 failed.
1955 1955 python hash seed: * (glob)
1956 1956 [1]
1957 1957
1958 1958 --extra-config-opt works
1959 1959
1960 1960 $ cat << EOF >> test-config-opt.t
1961 1961 > $ hg init test-config-opt
1962 1962 > $ hg -R test-config-opt purge
1963 1963 > $ echo "HGTESTEXTRAEXTENSIONS: \$HGTESTEXTRAEXTENSIONS"
1964 1964 > HGTESTEXTRAEXTENSIONS: purge
1965 1965 > EOF
1966 1966
1967 1967 $ rt --extra-config-opt extensions.purge= \
1968 1968 > --extra-config-opt not.an.extension=True test-config-opt.t
1969 1969 running 1 tests using 1 parallel processes
1970 1970 .
1971 1971 # Ran 1 tests, 0 skipped, 0 failed.
1972 1972
1973 1973 Test conditional output matching
1974 1974 ================================
1975 1975
1976 1976 $ cat << EOF >> test-conditional-matching.t
1977 1977 > #testcases foo bar
1978 1978 > $ echo richtig
1979 1979 > richtig (true !)
1980 1980 > $ echo falsch
1981 1981 > falsch (false !)
1982 1982 > #if foo
1983 1983 > $ echo arthur
1984 1984 > arthur (bar !)
1985 1985 > #endif
1986 1986 > $ echo celeste
1987 1987 > celeste (foo !)
1988 1988 > $ echo zephir
1989 1989 > zephir (bar !)
1990 1990 > EOF
1991 1991
1992 1992 $ rt test-conditional-matching.t
1993 1993 running 2 tests using 1 parallel processes
1994 1994
1995 1995 --- $TESTTMP/anothertests/cases/test-conditional-matching.t
1996 1996 +++ $TESTTMP/anothertests/cases/test-conditional-matching.t#bar.err
1997 1997 @@ -3,11 +3,13 @@
1998 1998 richtig (true !)
1999 1999 $ echo falsch
2000 2000 falsch (false !)
2001 2001 + falsch
2002 2002 #if foo
2003 2003 $ echo arthur
2004 2004 arthur \(bar !\) (re)
2005 2005 #endif
2006 2006 $ echo celeste
2007 2007 celeste \(foo !\) (re)
2008 2008 + celeste
2009 2009 $ echo zephir
2010 2010 zephir \(bar !\) (re)
2011 2011
2012 2012 ERROR: test-conditional-matching.t#bar output changed
2013 2013 !
2014 2014 --- $TESTTMP/anothertests/cases/test-conditional-matching.t
2015 2015 +++ $TESTTMP/anothertests/cases/test-conditional-matching.t#foo.err
2016 2016 @@ -3,11 +3,14 @@
2017 2017 richtig (true !)
2018 2018 $ echo falsch
2019 2019 falsch (false !)
2020 2020 + falsch
2021 2021 #if foo
2022 2022 $ echo arthur
2023 2023 arthur \(bar !\) (re)
2024 2024 + arthur
2025 2025 #endif
2026 2026 $ echo celeste
2027 2027 celeste \(foo !\) (re)
2028 2028 $ echo zephir
2029 2029 zephir \(bar !\) (re)
2030 2030 + zephir
2031 2031
2032 2032 ERROR: test-conditional-matching.t#foo output changed
2033 2033 !
2034 2034 Failed test-conditional-matching.t#bar: output changed
2035 2035 Failed test-conditional-matching.t#foo: output changed
2036 2036 # Ran 2 tests, 0 skipped, 2 failed.
2037 2037 python hash seed: * (glob)
2038 2038 [1]
2039
2040 Test that a proper "python" has been set up
2041 ===========================================
2042
2043 (with a small check-code work around)
2044 $ printf "#!/usr/bi" > test-py3.tmp
2045 $ printf "n/en" >> test-py3.tmp
2046 $ cat << EOF >> test-py3.tmp
2047 > v python3
2048 > import sys
2049 > print('.'.join(str(x) for x in sys.version_info))
2050 > EOF
2051 $ mv test-py3.tmp test-py3.py
2052 $ chmod +x test-py3.py
2053
2054 (with a small check-code work around)
2055 $ printf "#!/usr/bi" > test-py.tmp
2056 $ printf "n/en" >> test-py.tmp
2057 $ cat << EOF >> test-py.tmp
2058 > v python
2059 > import sys
2060 > print('.'.join(str(x) for x in sys.version_info))
2061 > EOF
2062 $ mv test-py.tmp test-py.py
2063 $ chmod +x test-py.py
2064
2065 $ ./test-py3.py
2066 3.* (glob)
2067 $ ./test-py.py
2068 2.* (glob) (no-py3 !)
2069 3.* (glob) (py3 !)
@@ -1,50 +1,50 b''
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2 2 from __future__ import absolute_import, print_function
3 3
4 4 import sys
5 5
6 6 from mercurial import (
7 7 commands,
8 8 localrepo,
9 9 ui as uimod,
10 10 )
11 11
12 12 print_ = print
13 13
14 14
15 15 def print(*args, **kwargs):
16 16 """print() wrapper that flushes stdout buffers to avoid py3 buffer issues
17 17
18 18 We could also just write directly to sys.stdout.buffer the way the
19 19 ui object will, but this was easier for porting the test.
20 20 """
21 21 print_(*args, **kwargs)
22 22 sys.stdout.flush()
23 23
24 24
25 25 u = uimod.ui.load()
26 26
27 27 print('% creating repo')
28 28 repo = localrepo.instance(u, b'.', create=True)
29 29
30 30 f = open('test.py', 'w')
31 31 try:
32 32 f.write('foo\n')
33 33 finally:
34 34 f.close
35 35
36 36 print('% add and commit')
37 37 commands.add(u, repo, b'test.py')
38 38 commands.commit(u, repo, message=b'*')
39 39 commands.status(u, repo, clean=True)
40 40
41 41
42 42 print('% change')
43 43 f = open('test.py', 'w')
44 44 try:
45 45 f.write('bar\n')
46 46 finally:
47 47 f.close()
48 48
49 49 # this would return clean instead of changed before the fix
50 50 commands.status(u, repo, clean=True, modified=True)
@@ -1,365 +1,365 b''
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2 2 """
3 3 Tests the buffering behavior of stdio streams in `mercurial.utils.procutil`.
4 4 """
5 5 from __future__ import absolute_import
6 6
7 7 import contextlib
8 8 import errno
9 9 import os
10 10 import signal
11 11 import subprocess
12 12 import sys
13 13 import tempfile
14 14 import unittest
15 15
16 16 from mercurial import pycompat, util
17 17
18 18
19 19 if pycompat.ispy3:
20 20
21 21 def set_noninheritable(fd):
22 22 # On Python 3, file descriptors are non-inheritable by default.
23 23 pass
24 24
25 25
26 26 else:
27 27 if pycompat.iswindows:
28 28 # unused
29 29 set_noninheritable = None
30 30 else:
31 31 import fcntl
32 32
33 33 def set_noninheritable(fd):
34 34 old = fcntl.fcntl(fd, fcntl.F_GETFD)
35 35 fcntl.fcntl(fd, fcntl.F_SETFD, old | fcntl.FD_CLOEXEC)
36 36
37 37
38 38 TEST_BUFFERING_CHILD_SCRIPT = r'''
39 39 import os
40 40
41 41 from mercurial import dispatch
42 42 from mercurial.utils import procutil
43 43
44 44 dispatch.initstdio()
45 45 procutil.{stream}.write(b'aaa')
46 46 os.write(procutil.{stream}.fileno(), b'[written aaa]')
47 47 procutil.{stream}.write(b'bbb\n')
48 48 os.write(procutil.{stream}.fileno(), b'[written bbb\\n]')
49 49 '''
50 50 UNBUFFERED = b'aaa[written aaa]bbb\n[written bbb\\n]'
51 51 LINE_BUFFERED = b'[written aaa]aaabbb\n[written bbb\\n]'
52 52 FULLY_BUFFERED = b'[written aaa][written bbb\\n]aaabbb\n'
53 53
54 54
55 55 TEST_LARGE_WRITE_CHILD_SCRIPT = r'''
56 56 import os
57 57 import signal
58 58 import sys
59 59
60 60 from mercurial import dispatch
61 61 from mercurial.utils import procutil
62 62
63 63 signal.signal(signal.SIGINT, lambda *x: None)
64 64 dispatch.initstdio()
65 65 write_result = procutil.{stream}.write(b'x' * 1048576)
66 66 with os.fdopen(
67 67 os.open({write_result_fn!r}, os.O_WRONLY | getattr(os, 'O_TEMPORARY', 0)),
68 68 'w',
69 69 ) as write_result_f:
70 70 write_result_f.write(str(write_result))
71 71 '''
72 72
73 73
74 74 TEST_BROKEN_PIPE_CHILD_SCRIPT = r'''
75 75 import os
76 76 import pickle
77 77
78 78 from mercurial import dispatch
79 79 from mercurial.utils import procutil
80 80
81 81 dispatch.initstdio()
82 82 procutil.stdin.read(1) # wait until parent process closed pipe
83 83 try:
84 84 procutil.{stream}.write(b'test')
85 85 procutil.{stream}.flush()
86 86 except EnvironmentError as e:
87 87 with os.fdopen(
88 88 os.open(
89 89 {err_fn!r},
90 90 os.O_WRONLY
91 91 | getattr(os, 'O_BINARY', 0)
92 92 | getattr(os, 'O_TEMPORARY', 0),
93 93 ),
94 94 'wb',
95 95 ) as err_f:
96 96 pickle.dump(e, err_f)
97 97 # Exit early to suppress further broken pipe errors at interpreter shutdown.
98 98 os._exit(0)
99 99 '''
100 100
101 101
102 102 @contextlib.contextmanager
103 103 def _closing(fds):
104 104 try:
105 105 yield
106 106 finally:
107 107 for fd in fds:
108 108 try:
109 109 os.close(fd)
110 110 except EnvironmentError:
111 111 pass
112 112
113 113
114 114 # In the following, we set the FDs non-inheritable mainly to make it possible
115 115 # for tests to close the receiving end of the pipe / PTYs.
116 116
117 117
118 118 @contextlib.contextmanager
119 119 def _devnull():
120 120 devnull = os.open(os.devnull, os.O_WRONLY)
121 121 # We don't have a receiving end, so it's not worth the effort on Python 2
122 122 # on Windows to make the FD non-inheritable.
123 123 with _closing([devnull]):
124 124 yield (None, devnull)
125 125
126 126
127 127 @contextlib.contextmanager
128 128 def _pipes():
129 129 rwpair = os.pipe()
130 130 # Pipes are already non-inheritable on Windows.
131 131 if not pycompat.iswindows:
132 132 set_noninheritable(rwpair[0])
133 133 set_noninheritable(rwpair[1])
134 134 with _closing(rwpair):
135 135 yield rwpair
136 136
137 137
138 138 @contextlib.contextmanager
139 139 def _ptys():
140 140 if pycompat.iswindows:
141 141 raise unittest.SkipTest("PTYs are not supported on Windows")
142 142 import pty
143 143 import tty
144 144
145 145 rwpair = pty.openpty()
146 146 set_noninheritable(rwpair[0])
147 147 set_noninheritable(rwpair[1])
148 148 with _closing(rwpair):
149 149 tty.setraw(rwpair[0])
150 150 yield rwpair
151 151
152 152
153 153 def _readall(fd, buffer_size, initial_buf=None):
154 154 buf = initial_buf or []
155 155 while True:
156 156 try:
157 157 s = os.read(fd, buffer_size)
158 158 except OSError as e:
159 159 if e.errno == errno.EIO:
160 160 # If the child-facing PTY got closed, reading from the
161 161 # parent-facing PTY raises EIO.
162 162 break
163 163 raise
164 164 if not s:
165 165 break
166 166 buf.append(s)
167 167 return b''.join(buf)
168 168
169 169
170 170 class TestStdio(unittest.TestCase):
171 171 def _test(
172 172 self,
173 173 child_script,
174 174 stream,
175 175 rwpair_generator,
176 176 check_output,
177 177 python_args=[],
178 178 post_child_check=None,
179 179 stdin_generator=None,
180 180 ):
181 181 assert stream in ('stdout', 'stderr')
182 182 if stdin_generator is None:
183 183 stdin_generator = open(os.devnull, 'rb')
184 184 with rwpair_generator() as (
185 185 stream_receiver,
186 186 child_stream,
187 187 ), stdin_generator as child_stdin:
188 188 proc = subprocess.Popen(
189 189 [sys.executable] + python_args + ['-c', child_script],
190 190 stdin=child_stdin,
191 191 stdout=child_stream if stream == 'stdout' else None,
192 192 stderr=child_stream if stream == 'stderr' else None,
193 193 )
194 194 try:
195 195 os.close(child_stream)
196 196 if stream_receiver is not None:
197 197 check_output(stream_receiver, proc)
198 198 except: # re-raises
199 199 proc.terminate()
200 200 raise
201 201 finally:
202 202 retcode = proc.wait()
203 203 self.assertEqual(retcode, 0)
204 204 if post_child_check is not None:
205 205 post_child_check()
206 206
207 207 def _test_buffering(
208 208 self, stream, rwpair_generator, expected_output, python_args=[]
209 209 ):
210 210 def check_output(stream_receiver, proc):
211 211 self.assertEqual(_readall(stream_receiver, 1024), expected_output)
212 212
213 213 self._test(
214 214 TEST_BUFFERING_CHILD_SCRIPT.format(stream=stream),
215 215 stream,
216 216 rwpair_generator,
217 217 check_output,
218 218 python_args,
219 219 )
220 220
221 221 def test_buffering_stdout_devnull(self):
222 222 self._test_buffering('stdout', _devnull, None)
223 223
224 224 def test_buffering_stdout_pipes(self):
225 225 self._test_buffering('stdout', _pipes, FULLY_BUFFERED)
226 226
227 227 def test_buffering_stdout_ptys(self):
228 228 self._test_buffering('stdout', _ptys, LINE_BUFFERED)
229 229
230 230 def test_buffering_stdout_devnull_unbuffered(self):
231 231 self._test_buffering('stdout', _devnull, None, python_args=['-u'])
232 232
233 233 def test_buffering_stdout_pipes_unbuffered(self):
234 234 self._test_buffering('stdout', _pipes, UNBUFFERED, python_args=['-u'])
235 235
236 236 def test_buffering_stdout_ptys_unbuffered(self):
237 237 self._test_buffering('stdout', _ptys, UNBUFFERED, python_args=['-u'])
238 238
239 239 if not pycompat.ispy3 and not pycompat.iswindows:
240 240 # On Python 2 on non-Windows, we manually open stdout in line-buffered
241 241 # mode if connected to a TTY. We should check if Python was configured
242 242 # to use unbuffered stdout, but it's hard to do that.
243 243 test_buffering_stdout_ptys_unbuffered = unittest.expectedFailure(
244 244 test_buffering_stdout_ptys_unbuffered
245 245 )
246 246
247 247 def _test_large_write(self, stream, rwpair_generator, python_args=[]):
248 248 if not pycompat.ispy3 and pycompat.isdarwin:
249 249 # Python 2 doesn't always retry on EINTR, but the libc might retry.
250 250 # So far, it was observed only on macOS that EINTR is raised at the
251 251 # Python level. As Python 2 support will be dropped soon-ish, we
252 252 # won't attempt to fix it.
253 253 raise unittest.SkipTest("raises EINTR on macOS")
254 254
255 255 def check_output(stream_receiver, proc):
256 256 if not pycompat.iswindows:
257 257 # On Unix, we can provoke a partial write() by interrupting it
258 258 # by a signal handler as soon as a bit of data was written.
259 259 # We test that write() is called until all data is written.
260 260 buf = [os.read(stream_receiver, 1)]
261 261 proc.send_signal(signal.SIGINT)
262 262 else:
263 263 # On Windows, there doesn't seem to be a way to cause partial
264 264 # writes.
265 265 buf = []
266 266 self.assertEqual(
267 267 _readall(stream_receiver, 131072, buf), b'x' * 1048576
268 268 )
269 269
270 270 def post_child_check():
271 271 write_result_str = write_result_f.read()
272 272 if pycompat.ispy3:
273 273 # On Python 3, we test that the correct number of bytes is
274 274 # claimed to have been written.
275 275 expected_write_result_str = '1048576'
276 276 else:
277 277 # On Python 2, we only check that the large write does not
278 278 # crash.
279 279 expected_write_result_str = 'None'
280 280 self.assertEqual(write_result_str, expected_write_result_str)
281 281
282 282 with tempfile.NamedTemporaryFile('r') as write_result_f:
283 283 self._test(
284 284 TEST_LARGE_WRITE_CHILD_SCRIPT.format(
285 285 stream=stream, write_result_fn=write_result_f.name
286 286 ),
287 287 stream,
288 288 rwpair_generator,
289 289 check_output,
290 290 python_args,
291 291 post_child_check=post_child_check,
292 292 )
293 293
294 294 def test_large_write_stdout_devnull(self):
295 295 self._test_large_write('stdout', _devnull)
296 296
297 297 def test_large_write_stdout_pipes(self):
298 298 self._test_large_write('stdout', _pipes)
299 299
300 300 def test_large_write_stdout_ptys(self):
301 301 self._test_large_write('stdout', _ptys)
302 302
303 303 def test_large_write_stdout_devnull_unbuffered(self):
304 304 self._test_large_write('stdout', _devnull, python_args=['-u'])
305 305
306 306 def test_large_write_stdout_pipes_unbuffered(self):
307 307 self._test_large_write('stdout', _pipes, python_args=['-u'])
308 308
309 309 def test_large_write_stdout_ptys_unbuffered(self):
310 310 self._test_large_write('stdout', _ptys, python_args=['-u'])
311 311
312 312 def test_large_write_stderr_devnull(self):
313 313 self._test_large_write('stderr', _devnull)
314 314
315 315 def test_large_write_stderr_pipes(self):
316 316 self._test_large_write('stderr', _pipes)
317 317
318 318 def test_large_write_stderr_ptys(self):
319 319 self._test_large_write('stderr', _ptys)
320 320
321 321 def test_large_write_stderr_devnull_unbuffered(self):
322 322 self._test_large_write('stderr', _devnull, python_args=['-u'])
323 323
324 324 def test_large_write_stderr_pipes_unbuffered(self):
325 325 self._test_large_write('stderr', _pipes, python_args=['-u'])
326 326
327 327 def test_large_write_stderr_ptys_unbuffered(self):
328 328 self._test_large_write('stderr', _ptys, python_args=['-u'])
329 329
330 330 def _test_broken_pipe(self, stream):
331 331 assert stream in ('stdout', 'stderr')
332 332
333 333 def check_output(stream_receiver, proc):
334 334 os.close(stream_receiver)
335 335 proc.stdin.write(b'x')
336 336 proc.stdin.close()
337 337
338 338 def post_child_check():
339 339 err = util.pickle.load(err_f)
340 340 self.assertEqual(err.errno, errno.EPIPE)
341 341 self.assertEqual(err.strerror, "Broken pipe")
342 342
343 343 with tempfile.NamedTemporaryFile('rb') as err_f:
344 344 self._test(
345 345 TEST_BROKEN_PIPE_CHILD_SCRIPT.format(
346 346 stream=stream, err_fn=err_f.name
347 347 ),
348 348 stream,
349 349 _pipes,
350 350 check_output,
351 351 post_child_check=post_child_check,
352 352 stdin_generator=util.nullcontextmanager(subprocess.PIPE),
353 353 )
354 354
355 355 def test_broken_pipe_stdout(self):
356 356 self._test_broken_pipe('stdout')
357 357
358 358 def test_broken_pipe_stderr(self):
359 359 self._test_broken_pipe('stderr')
360 360
361 361
362 362 if __name__ == '__main__':
363 363 import silenttestrunner
364 364
365 365 silenttestrunner.main(__name__)
@@ -1,223 +1,223 b''
1 #!/usr/bin/env python3
1 #!/usr/bin/env python
2 2
3 3 from __future__ import absolute_import, print_function
4 4
5 5 __doc__ = """Tiny HTTP Proxy.
6 6
7 7 This module implements GET, HEAD, POST, PUT and DELETE methods
8 8 on BaseHTTPServer, and behaves as an HTTP proxy. The CONNECT
9 9 method is also implemented experimentally, but has not been
10 10 tested yet.
11 11
12 12 Any help will be greatly appreciated. SUZUKI Hisao
13 13 """
14 14
15 15 __version__ = "0.2.1"
16 16
17 17 import optparse
18 18 import os
19 19 import select
20 20 import socket
21 21 import sys
22 22
23 23 from mercurial import (
24 24 pycompat,
25 25 util,
26 26 )
27 27
28 28 httpserver = util.httpserver
29 29 socketserver = util.socketserver
30 30 urlreq = util.urlreq
31 31
32 32 if os.environ.get('HGIPV6', '0') == '1':
33 33 family = socket.AF_INET6
34 34 else:
35 35 family = socket.AF_INET
36 36
37 37
38 38 class ProxyHandler(httpserver.basehttprequesthandler):
39 39 __base = httpserver.basehttprequesthandler
40 40 __base_handle = __base.handle
41 41
42 42 server_version = "TinyHTTPProxy/" + __version__
43 43 rbufsize = 0 # self.rfile Be unbuffered
44 44
45 45 def handle(self):
46 46 (ip, port) = self.client_address
47 47 allowed = getattr(self, 'allowed_clients', None)
48 48 if allowed is not None and ip not in allowed:
49 49 self.raw_requestline = self.rfile.readline()
50 50 if self.parse_request():
51 51 self.send_error(403)
52 52 else:
53 53 self.__base_handle()
54 54
55 55 def log_request(self, code='-', size='-'):
56 56 xheaders = [h for h in self.headers.items() if h[0].startswith('x-')]
57 57 self.log_message(
58 58 '"%s" %s %s%s',
59 59 self.requestline,
60 60 str(code),
61 61 str(size),
62 62 ''.join([' %s:%s' % h for h in sorted(xheaders)]),
63 63 )
64 64 # Flush for Windows, so output isn't lost on TerminateProcess()
65 65 sys.stdout.flush()
66 66 sys.stderr.flush()
67 67
68 68 def _connect_to(self, netloc, soc):
69 69 i = netloc.find(':')
70 70 if i >= 0:
71 71 host_port = netloc[:i], int(netloc[i + 1 :])
72 72 else:
73 73 host_port = netloc, 80
74 74 print("\t" "connect to %s:%d" % host_port)
75 75 try:
76 76 soc.connect(host_port)
77 77 except socket.error as arg:
78 78 try:
79 79 msg = arg[1]
80 80 except (IndexError, TypeError):
81 81 msg = arg
82 82 self.send_error(404, msg)
83 83 return 0
84 84 return 1
85 85
86 86 def do_CONNECT(self):
87 87 soc = socket.socket(family, socket.SOCK_STREAM)
88 88 try:
89 89 if self._connect_to(self.path, soc):
90 90 self.log_request(200)
91 91 self.wfile.write(
92 92 pycompat.bytestr(self.protocol_version)
93 93 + b" 200 Connection established\r\n"
94 94 )
95 95 self.wfile.write(
96 96 b"Proxy-agent: %s\r\n"
97 97 % pycompat.bytestr(self.version_string())
98 98 )
99 99 self.wfile.write(b"\r\n")
100 100 self._read_write(soc, 300)
101 101 finally:
102 102 print("\t" "bye")
103 103 soc.close()
104 104 self.connection.close()
105 105
106 106 def do_GET(self):
107 107 (scm, netloc, path, params, query, fragment) = urlreq.urlparse(
108 108 self.path, 'http'
109 109 )
110 110 if scm != 'http' or fragment or not netloc:
111 111 self.send_error(400, "bad url %s" % self.path)
112 112 return
113 113 soc = socket.socket(family, socket.SOCK_STREAM)
114 114 try:
115 115 if self._connect_to(netloc, soc):
116 116 self.log_request()
117 117 url = urlreq.urlunparse(('', '', path, params, query, ''))
118 118 soc.send(
119 119 b"%s %s %s\r\n"
120 120 % (
121 121 pycompat.bytestr(self.command),
122 122 pycompat.bytestr(url),
123 123 pycompat.bytestr(self.request_version),
124 124 )
125 125 )
126 126 self.headers['Connection'] = 'close'
127 127 del self.headers['Proxy-Connection']
128 128 for key, val in self.headers.items():
129 129 soc.send(
130 130 b"%s: %s\r\n"
131 131 % (pycompat.bytestr(key), pycompat.bytestr(val))
132 132 )
133 133 soc.send(b"\r\n")
134 134 self._read_write(soc)
135 135 finally:
136 136 print("\t" "bye")
137 137 soc.close()
138 138 self.connection.close()
139 139
140 140 def _read_write(self, soc, max_idling=20):
141 141 iw = [self.connection, soc]
142 142 ow = []
143 143 count = 0
144 144 while True:
145 145 count += 1
146 146 (ins, _, exs) = select.select(iw, ow, iw, 3)
147 147 if exs:
148 148 break
149 149 if ins:
150 150 for i in ins:
151 151 if i is soc:
152 152 out = self.connection
153 153 else:
154 154 out = soc
155 155 try:
156 156 data = i.recv(8192)
157 157 except socket.error:
158 158 break
159 159 if data:
160 160 out.send(data)
161 161 count = 0
162 162 else:
163 163 print("\t" "idle", count)
164 164 if count == max_idling:
165 165 break
166 166
167 167 do_HEAD = do_GET
168 168 do_POST = do_GET
169 169 do_PUT = do_GET
170 170 do_DELETE = do_GET
171 171
172 172
173 173 class ThreadingHTTPServer(socketserver.ThreadingMixIn, httpserver.httpserver):
174 174 def __init__(self, *args, **kwargs):
175 175 httpserver.httpserver.__init__(self, *args, **kwargs)
176 176 a = open("proxy.pid", "w")
177 177 a.write(str(os.getpid()) + "\n")
178 178 a.close()
179 179
180 180
181 181 def runserver(port=8000, bind=""):
182 182 server_address = (bind, port)
183 183 ProxyHandler.protocol_version = "HTTP/1.0"
184 184 httpd = ThreadingHTTPServer(server_address, ProxyHandler)
185 185 sa = httpd.socket.getsockname()
186 186 print("Serving HTTP on", sa[0], "port", sa[1], "...")
187 187 try:
188 188 httpd.serve_forever()
189 189 except KeyboardInterrupt:
190 190 print("\nKeyboard interrupt received, exiting.")
191 191 httpd.server_close()
192 192 sys.exit(0)
193 193
194 194
195 195 if __name__ == '__main__':
196 196 argv = sys.argv
197 197 if argv[1:] and argv[1] in ('-h', '--help'):
198 198 print(argv[0], "[port [allowed_client_name ...]]")
199 199 else:
200 200 if argv[2:]:
201 201 allowed = []
202 202 for name in argv[2:]:
203 203 client = socket.gethostbyname(name)
204 204 allowed.append(client)
205 205 print("Accept: %s (%s)" % (client, name))
206 206 ProxyHandler.allowed_clients = allowed
207 207 del argv[2:]
208 208 else:
209 209 print("Any clients will be served...")
210 210
211 211 parser = optparse.OptionParser()
212 212 parser.add_option(
213 213 '-b',
214 214 '--bind',
215 215 metavar='ADDRESS',
216 216 help='Specify alternate bind address ' '[default: all interfaces]',
217 217 default='',
218 218 )
219 219 (options, args) = parser.parse_args()
220 220 port = 8000
221 221 if len(args) == 1:
222 222 port = int(args[0])
223 223 runserver(port, options.bind)
General Comments 0
You need to be logged in to leave comments. Login now