##// END OF EJS Templates
run-tests: allow #require inside #if...
Jun Wu -
r36695:a247a0e8 default
parent child Browse files
Show More
@@ -1,3109 +1,3110 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import argparse
49 49 import collections
50 50 import difflib
51 51 import distutils.version as version
52 52 import errno
53 53 import json
54 54 import os
55 55 import random
56 56 import re
57 57 import shutil
58 58 import signal
59 59 import socket
60 60 import subprocess
61 61 import sys
62 62 import sysconfig
63 63 import tempfile
64 64 import threading
65 65 import time
66 66 import unittest
67 67 import xml.dom.minidom as minidom
68 68
69 69 try:
70 70 import Queue as queue
71 71 except ImportError:
72 72 import queue
73 73
74 74 try:
75 75 import shlex
76 76 shellquote = shlex.quote
77 77 except (ImportError, AttributeError):
78 78 import pipes
79 79 shellquote = pipes.quote
80 80
81 81 if os.environ.get('RTUNICODEPEDANTRY', False):
82 82 try:
83 83 reload(sys)
84 84 sys.setdefaultencoding("undefined")
85 85 except NameError:
86 86 pass
87 87
88 88 origenviron = os.environ.copy()
89 89 osenvironb = getattr(os, 'environb', os.environ)
90 90 processlock = threading.Lock()
91 91
92 92 pygmentspresent = False
93 93 # ANSI color is unsupported prior to Windows 10
94 94 if os.name != 'nt':
95 95 try: # is pygments installed
96 96 import pygments
97 97 import pygments.lexers as lexers
98 98 import pygments.lexer as lexer
99 99 import pygments.formatters as formatters
100 100 import pygments.token as token
101 101 import pygments.style as style
102 102 pygmentspresent = True
103 103 difflexer = lexers.DiffLexer()
104 104 terminal256formatter = formatters.Terminal256Formatter()
105 105 except ImportError:
106 106 pass
107 107
108 108 if pygmentspresent:
109 109 class TestRunnerStyle(style.Style):
110 110 default_style = ""
111 111 skipped = token.string_to_tokentype("Token.Generic.Skipped")
112 112 failed = token.string_to_tokentype("Token.Generic.Failed")
113 113 skippedname = token.string_to_tokentype("Token.Generic.SName")
114 114 failedname = token.string_to_tokentype("Token.Generic.FName")
115 115 styles = {
116 116 skipped: '#e5e5e5',
117 117 skippedname: '#00ffff',
118 118 failed: '#7f0000',
119 119 failedname: '#ff0000',
120 120 }
121 121
122 122 class TestRunnerLexer(lexer.RegexLexer):
123 123 testpattern = r'[\w-]+\.(t|py)( \(case [\w-]+\))?'
124 124 tokens = {
125 125 'root': [
126 126 (r'^Skipped', token.Generic.Skipped, 'skipped'),
127 127 (r'^Failed ', token.Generic.Failed, 'failed'),
128 128 (r'^ERROR: ', token.Generic.Failed, 'failed'),
129 129 ],
130 130 'skipped': [
131 131 (testpattern, token.Generic.SName),
132 132 (r':.*', token.Generic.Skipped),
133 133 ],
134 134 'failed': [
135 135 (testpattern, token.Generic.FName),
136 136 (r'(:| ).*', token.Generic.Failed),
137 137 ]
138 138 }
139 139
140 140 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
141 141 runnerlexer = TestRunnerLexer()
142 142
143 143 if sys.version_info > (3, 5, 0):
144 144 PYTHON3 = True
145 145 xrange = range # we use xrange in one place, and we'd rather not use range
146 146 def _bytespath(p):
147 147 if p is None:
148 148 return p
149 149 return p.encode('utf-8')
150 150
151 151 def _strpath(p):
152 152 if p is None:
153 153 return p
154 154 return p.decode('utf-8')
155 155
156 156 elif sys.version_info >= (3, 0, 0):
157 157 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
158 158 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
159 159 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
160 160 else:
161 161 PYTHON3 = False
162 162
163 163 # In python 2.x, path operations are generally done using
164 164 # bytestrings by default, so we don't have to do any extra
165 165 # fiddling there. We define the wrapper functions anyway just to
166 166 # help keep code consistent between platforms.
167 167 def _bytespath(p):
168 168 return p
169 169
170 170 _strpath = _bytespath
171 171
172 172 # For Windows support
173 173 wifexited = getattr(os, "WIFEXITED", lambda x: False)
174 174
175 175 # Whether to use IPv6
176 176 def checksocketfamily(name, port=20058):
177 177 """return true if we can listen on localhost using family=name
178 178
179 179 name should be either 'AF_INET', or 'AF_INET6'.
180 180 port being used is okay - EADDRINUSE is considered as successful.
181 181 """
182 182 family = getattr(socket, name, None)
183 183 if family is None:
184 184 return False
185 185 try:
186 186 s = socket.socket(family, socket.SOCK_STREAM)
187 187 s.bind(('localhost', port))
188 188 s.close()
189 189 return True
190 190 except socket.error as exc:
191 191 if exc.errno == errno.EADDRINUSE:
192 192 return True
193 193 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
194 194 return False
195 195 else:
196 196 raise
197 197 else:
198 198 return False
199 199
200 200 # useipv6 will be set by parseargs
201 201 useipv6 = None
202 202
203 203 def checkportisavailable(port):
204 204 """return true if a port seems free to bind on localhost"""
205 205 if useipv6:
206 206 family = socket.AF_INET6
207 207 else:
208 208 family = socket.AF_INET
209 209 try:
210 210 s = socket.socket(family, socket.SOCK_STREAM)
211 211 s.bind(('localhost', port))
212 212 s.close()
213 213 return True
214 214 except socket.error as exc:
215 215 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
216 216 errno.EPROTONOSUPPORT):
217 217 raise
218 218 return False
219 219
220 220 closefds = os.name == 'posix'
221 221 def Popen4(cmd, wd, timeout, env=None):
222 222 processlock.acquire()
223 223 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
224 224 close_fds=closefds,
225 225 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
226 226 stderr=subprocess.STDOUT)
227 227 processlock.release()
228 228
229 229 p.fromchild = p.stdout
230 230 p.tochild = p.stdin
231 231 p.childerr = p.stderr
232 232
233 233 p.timeout = False
234 234 if timeout:
235 235 def t():
236 236 start = time.time()
237 237 while time.time() - start < timeout and p.returncode is None:
238 238 time.sleep(.1)
239 239 p.timeout = True
240 240 if p.returncode is None:
241 241 terminate(p)
242 242 threading.Thread(target=t).start()
243 243
244 244 return p
245 245
246 246 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
247 247 IMPL_PATH = b'PYTHONPATH'
248 248 if 'java' in sys.platform:
249 249 IMPL_PATH = b'JYTHONPATH'
250 250
251 251 defaults = {
252 252 'jobs': ('HGTEST_JOBS', 1),
253 253 'timeout': ('HGTEST_TIMEOUT', 180),
254 254 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
255 255 'port': ('HGTEST_PORT', 20059),
256 256 'shell': ('HGTEST_SHELL', 'sh'),
257 257 }
258 258
259 259 def canonpath(path):
260 260 return os.path.realpath(os.path.expanduser(path))
261 261
262 262 def parselistfiles(files, listtype, warn=True):
263 263 entries = dict()
264 264 for filename in files:
265 265 try:
266 266 path = os.path.expanduser(os.path.expandvars(filename))
267 267 f = open(path, "rb")
268 268 except IOError as err:
269 269 if err.errno != errno.ENOENT:
270 270 raise
271 271 if warn:
272 272 print("warning: no such %s file: %s" % (listtype, filename))
273 273 continue
274 274
275 275 for line in f.readlines():
276 276 line = line.split(b'#', 1)[0].strip()
277 277 if line:
278 278 entries[line] = filename
279 279
280 280 f.close()
281 281 return entries
282 282
283 283 def parsettestcases(path):
284 284 """read a .t test file, return a set of test case names
285 285
286 286 If path does not exist, return an empty set.
287 287 """
288 288 cases = set()
289 289 try:
290 290 with open(path, 'rb') as f:
291 291 for l in f:
292 292 if l.startswith(b'#testcases '):
293 293 cases.update(l[11:].split())
294 294 except IOError as ex:
295 295 if ex.errno != errno.ENOENT:
296 296 raise
297 297 return cases
298 298
299 299 def getparser():
300 300 """Obtain the OptionParser used by the CLI."""
301 301 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
302 302
303 303 selection = parser.add_argument_group('Test Selection')
304 304 selection.add_argument('--allow-slow-tests', action='store_true',
305 305 help='allow extremely slow tests')
306 306 selection.add_argument("--blacklist", action="append",
307 307 help="skip tests listed in the specified blacklist file")
308 308 selection.add_argument("--changed",
309 309 help="run tests that are changed in parent rev or working directory")
310 310 selection.add_argument("-k", "--keywords",
311 311 help="run tests matching keywords")
312 312 selection.add_argument("-r", "--retest", action="store_true",
313 313 help = "retest failed tests")
314 314 selection.add_argument("--test-list", action="append",
315 315 help="read tests to run from the specified file")
316 316 selection.add_argument("--whitelist", action="append",
317 317 help="always run tests listed in the specified whitelist file")
318 318 selection.add_argument('tests', metavar='TESTS', nargs='*',
319 319 help='Tests to run')
320 320
321 321 harness = parser.add_argument_group('Test Harness Behavior')
322 322 harness.add_argument('--bisect-repo',
323 323 metavar='bisect_repo',
324 324 help=("Path of a repo to bisect. Use together with "
325 325 "--known-good-rev"))
326 326 harness.add_argument("-d", "--debug", action="store_true",
327 327 help="debug mode: write output of test scripts to console"
328 328 " rather than capturing and diffing it (disables timeout)")
329 329 harness.add_argument("-f", "--first", action="store_true",
330 330 help="exit on the first test failure")
331 331 harness.add_argument("-i", "--interactive", action="store_true",
332 332 help="prompt to accept changed output")
333 333 harness.add_argument("-j", "--jobs", type=int,
334 334 help="number of jobs to run in parallel"
335 335 " (default: $%s or %d)" % defaults['jobs'])
336 336 harness.add_argument("--keep-tmpdir", action="store_true",
337 337 help="keep temporary directory after running tests")
338 338 harness.add_argument('--known-good-rev',
339 339 metavar="known_good_rev",
340 340 help=("Automatically bisect any failures using this "
341 341 "revision as a known-good revision."))
342 342 harness.add_argument("--list-tests", action="store_true",
343 343 help="list tests instead of running them")
344 344 harness.add_argument("--loop", action="store_true",
345 345 help="loop tests repeatedly")
346 346 harness.add_argument('--random', action="store_true",
347 347 help='run tests in random order')
348 348 harness.add_argument('--order-by-runtime', action="store_true",
349 349 help='run slowest tests first, according to .testtimes')
350 350 harness.add_argument("-p", "--port", type=int,
351 351 help="port on which servers should listen"
352 352 " (default: $%s or %d)" % defaults['port'])
353 353 harness.add_argument('--profile-runner', action='store_true',
354 354 help='run statprof on run-tests')
355 355 harness.add_argument("-R", "--restart", action="store_true",
356 356 help="restart at last error")
357 357 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
358 358 help="run each test N times (default=1)", default=1)
359 359 harness.add_argument("--shell",
360 360 help="shell to use (default: $%s or %s)" % defaults['shell'])
361 361 harness.add_argument('--showchannels', action='store_true',
362 362 help='show scheduling channels')
363 363 harness.add_argument("--slowtimeout", type=int,
364 364 help="kill errant slow tests after SLOWTIMEOUT seconds"
365 365 " (default: $%s or %d)" % defaults['slowtimeout'])
366 366 harness.add_argument("-t", "--timeout", type=int,
367 367 help="kill errant tests after TIMEOUT seconds"
368 368 " (default: $%s or %d)" % defaults['timeout'])
369 369 harness.add_argument("--tmpdir",
370 370 help="run tests in the given temporary directory"
371 371 " (implies --keep-tmpdir)")
372 372 harness.add_argument("-v", "--verbose", action="store_true",
373 373 help="output verbose messages")
374 374
375 375 hgconf = parser.add_argument_group('Mercurial Configuration')
376 376 hgconf.add_argument("--chg", action="store_true",
377 377 help="install and use chg wrapper in place of hg")
378 378 hgconf.add_argument("--compiler",
379 379 help="compiler to build with")
380 380 hgconf.add_argument('--extra-config-opt', action="append", default=[],
381 381 help='set the given config opt in the test hgrc')
382 382 hgconf.add_argument("-l", "--local", action="store_true",
383 383 help="shortcut for --with-hg=<testdir>/../hg, "
384 384 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
385 385 hgconf.add_argument("--ipv6", action="store_true",
386 386 help="prefer IPv6 to IPv4 for network related tests")
387 387 hgconf.add_argument("--pure", action="store_true",
388 388 help="use pure Python code instead of C extensions")
389 389 hgconf.add_argument("-3", "--py3k-warnings", action="store_true",
390 390 help="enable Py3k warnings on Python 2.7+")
391 391 hgconf.add_argument("--with-chg", metavar="CHG",
392 392 help="use specified chg wrapper in place of hg")
393 393 hgconf.add_argument("--with-hg",
394 394 metavar="HG",
395 395 help="test using specified hg script rather than a "
396 396 "temporary installation")
397 397 # This option should be deleted once test-check-py3-compat.t and other
398 398 # Python 3 tests run with Python 3.
399 399 hgconf.add_argument("--with-python3", metavar="PYTHON3",
400 400 help="Python 3 interpreter (if running under Python 2)"
401 401 " (TEMPORARY)")
402 402
403 403 reporting = parser.add_argument_group('Results Reporting')
404 404 reporting.add_argument("-C", "--annotate", action="store_true",
405 405 help="output files annotated with coverage")
406 406 reporting.add_argument("--color", choices=["always", "auto", "never"],
407 407 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
408 408 help="colorisation: always|auto|never (default: auto)")
409 409 reporting.add_argument("-c", "--cover", action="store_true",
410 410 help="print a test coverage report")
411 411 reporting.add_argument('--exceptions', action='store_true',
412 412 help='log all exceptions and generate an exception report')
413 413 reporting.add_argument("-H", "--htmlcov", action="store_true",
414 414 help="create an HTML report of the coverage of the files")
415 415 reporting.add_argument("--json", action="store_true",
416 416 help="store test result data in 'report.json' file")
417 417 reporting.add_argument("--outputdir",
418 418 help="directory to write error logs to (default=test directory)")
419 419 reporting.add_argument("-n", "--nodiff", action="store_true",
420 420 help="skip showing test changes")
421 421 reporting.add_argument("-S", "--noskips", action="store_true",
422 422 help="don't report skip tests verbosely")
423 423 reporting.add_argument("--time", action="store_true",
424 424 help="time how long each test takes")
425 425 reporting.add_argument("--view",
426 426 help="external diff viewer")
427 427 reporting.add_argument("--xunit",
428 428 help="record xunit results at specified path")
429 429
430 430 for option, (envvar, default) in defaults.items():
431 431 defaults[option] = type(default)(os.environ.get(envvar, default))
432 432 parser.set_defaults(**defaults)
433 433
434 434 return parser
435 435
436 436 def parseargs(args, parser):
437 437 """Parse arguments with our OptionParser and validate results."""
438 438 options = parser.parse_args(args)
439 439
440 440 # jython is always pure
441 441 if 'java' in sys.platform or '__pypy__' in sys.modules:
442 442 options.pure = True
443 443
444 444 if options.with_hg:
445 445 options.with_hg = canonpath(_bytespath(options.with_hg))
446 446 if not (os.path.isfile(options.with_hg) and
447 447 os.access(options.with_hg, os.X_OK)):
448 448 parser.error('--with-hg must specify an executable hg script')
449 449 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
450 450 sys.stderr.write('warning: --with-hg should specify an hg script\n')
451 451 if options.local:
452 452 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
453 453 reporootdir = os.path.dirname(testdir)
454 454 pathandattrs = [(b'hg', 'with_hg')]
455 455 if options.chg:
456 456 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
457 457 for relpath, attr in pathandattrs:
458 458 binpath = os.path.join(reporootdir, relpath)
459 459 if os.name != 'nt' and not os.access(binpath, os.X_OK):
460 460 parser.error('--local specified, but %r not found or '
461 461 'not executable' % binpath)
462 462 setattr(options, attr, binpath)
463 463
464 464 if (options.chg or options.with_chg) and os.name == 'nt':
465 465 parser.error('chg does not work on %s' % os.name)
466 466 if options.with_chg:
467 467 options.chg = False # no installation to temporary location
468 468 options.with_chg = canonpath(_bytespath(options.with_chg))
469 469 if not (os.path.isfile(options.with_chg) and
470 470 os.access(options.with_chg, os.X_OK)):
471 471 parser.error('--with-chg must specify a chg executable')
472 472 if options.chg and options.with_hg:
473 473 # chg shares installation location with hg
474 474 parser.error('--chg does not work when --with-hg is specified '
475 475 '(use --with-chg instead)')
476 476
477 477 if options.color == 'always' and not pygmentspresent:
478 478 sys.stderr.write('warning: --color=always ignored because '
479 479 'pygments is not installed\n')
480 480
481 481 if options.bisect_repo and not options.known_good_rev:
482 482 parser.error("--bisect-repo cannot be used without --known-good-rev")
483 483
484 484 global useipv6
485 485 if options.ipv6:
486 486 useipv6 = checksocketfamily('AF_INET6')
487 487 else:
488 488 # only use IPv6 if IPv4 is unavailable and IPv6 is available
489 489 useipv6 = ((not checksocketfamily('AF_INET'))
490 490 and checksocketfamily('AF_INET6'))
491 491
492 492 options.anycoverage = options.cover or options.annotate or options.htmlcov
493 493 if options.anycoverage:
494 494 try:
495 495 import coverage
496 496 covver = version.StrictVersion(coverage.__version__).version
497 497 if covver < (3, 3):
498 498 parser.error('coverage options require coverage 3.3 or later')
499 499 except ImportError:
500 500 parser.error('coverage options now require the coverage package')
501 501
502 502 if options.anycoverage and options.local:
503 503 # this needs some path mangling somewhere, I guess
504 504 parser.error("sorry, coverage options do not work when --local "
505 505 "is specified")
506 506
507 507 if options.anycoverage and options.with_hg:
508 508 parser.error("sorry, coverage options do not work when --with-hg "
509 509 "is specified")
510 510
511 511 global verbose
512 512 if options.verbose:
513 513 verbose = ''
514 514
515 515 if options.tmpdir:
516 516 options.tmpdir = canonpath(options.tmpdir)
517 517
518 518 if options.jobs < 1:
519 519 parser.error('--jobs must be positive')
520 520 if options.interactive and options.debug:
521 521 parser.error("-i/--interactive and -d/--debug are incompatible")
522 522 if options.debug:
523 523 if options.timeout != defaults['timeout']:
524 524 sys.stderr.write(
525 525 'warning: --timeout option ignored with --debug\n')
526 526 if options.slowtimeout != defaults['slowtimeout']:
527 527 sys.stderr.write(
528 528 'warning: --slowtimeout option ignored with --debug\n')
529 529 options.timeout = 0
530 530 options.slowtimeout = 0
531 531 if options.py3k_warnings:
532 532 if PYTHON3:
533 533 parser.error(
534 534 '--py3k-warnings can only be used on Python 2.7')
535 535 if options.with_python3:
536 536 if PYTHON3:
537 537 parser.error('--with-python3 cannot be used when executing with '
538 538 'Python 3')
539 539
540 540 options.with_python3 = canonpath(options.with_python3)
541 541 # Verify Python3 executable is acceptable.
542 542 proc = subprocess.Popen([options.with_python3, b'--version'],
543 543 stdout=subprocess.PIPE,
544 544 stderr=subprocess.STDOUT)
545 545 out, _err = proc.communicate()
546 546 ret = proc.wait()
547 547 if ret != 0:
548 548 parser.error('could not determine version of python 3')
549 549 if not out.startswith('Python '):
550 550 parser.error('unexpected output from python3 --version: %s' %
551 551 out)
552 552 vers = version.LooseVersion(out[len('Python '):])
553 553 if vers < version.LooseVersion('3.5.0'):
554 554 parser.error('--with-python3 version must be 3.5.0 or greater; '
555 555 'got %s' % out)
556 556
557 557 if options.blacklist:
558 558 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
559 559 if options.whitelist:
560 560 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
561 561 else:
562 562 options.whitelisted = {}
563 563
564 564 if options.showchannels:
565 565 options.nodiff = True
566 566
567 567 return options
568 568
569 569 def rename(src, dst):
570 570 """Like os.rename(), trade atomicity and opened files friendliness
571 571 for existing destination support.
572 572 """
573 573 shutil.copy(src, dst)
574 574 os.remove(src)
575 575
576 576 _unified_diff = difflib.unified_diff
577 577 if PYTHON3:
578 578 import functools
579 579 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
580 580
581 581 def getdiff(expected, output, ref, err):
582 582 servefail = False
583 583 lines = []
584 584 for line in _unified_diff(expected, output, ref, err):
585 585 if line.startswith(b'+++') or line.startswith(b'---'):
586 586 line = line.replace(b'\\', b'/')
587 587 if line.endswith(b' \n'):
588 588 line = line[:-2] + b'\n'
589 589 lines.append(line)
590 590 if not servefail and line.startswith(
591 591 b'+ abort: child process failed to start'):
592 592 servefail = True
593 593
594 594 return servefail, lines
595 595
596 596 verbose = False
597 597 def vlog(*msg):
598 598 """Log only when in verbose mode."""
599 599 if verbose is False:
600 600 return
601 601
602 602 return log(*msg)
603 603
604 604 # Bytes that break XML even in a CDATA block: control characters 0-31
605 605 # sans \t, \n and \r
606 606 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
607 607
608 608 # Match feature conditionalized output lines in the form, capturing the feature
609 609 # list in group 2, and the preceeding line output in group 1:
610 610 #
611 611 # output..output (feature !)\n
612 612 optline = re.compile(b'(.*) \((.+?) !\)\n$')
613 613
614 614 def cdatasafe(data):
615 615 """Make a string safe to include in a CDATA block.
616 616
617 617 Certain control characters are illegal in a CDATA block, and
618 618 there's no way to include a ]]> in a CDATA either. This function
619 619 replaces illegal bytes with ? and adds a space between the ]] so
620 620 that it won't break the CDATA block.
621 621 """
622 622 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
623 623
624 624 def log(*msg):
625 625 """Log something to stdout.
626 626
627 627 Arguments are strings to print.
628 628 """
629 629 with iolock:
630 630 if verbose:
631 631 print(verbose, end=' ')
632 632 for m in msg:
633 633 print(m, end=' ')
634 634 print()
635 635 sys.stdout.flush()
636 636
637 637 def highlightdiff(line, color):
638 638 if not color:
639 639 return line
640 640 assert pygmentspresent
641 641 return pygments.highlight(line.decode('latin1'), difflexer,
642 642 terminal256formatter).encode('latin1')
643 643
644 644 def highlightmsg(msg, color):
645 645 if not color:
646 646 return msg
647 647 assert pygmentspresent
648 648 return pygments.highlight(msg, runnerlexer, runnerformatter)
649 649
650 650 def terminate(proc):
651 651 """Terminate subprocess"""
652 652 vlog('# Terminating process %d' % proc.pid)
653 653 try:
654 654 proc.terminate()
655 655 except OSError:
656 656 pass
657 657
658 658 def killdaemons(pidfile):
659 659 import killdaemons as killmod
660 660 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
661 661 logfn=vlog)
662 662
663 663 class Test(unittest.TestCase):
664 664 """Encapsulates a single, runnable test.
665 665
666 666 While this class conforms to the unittest.TestCase API, it differs in that
667 667 instances need to be instantiated manually. (Typically, unittest.TestCase
668 668 classes are instantiated automatically by scanning modules.)
669 669 """
670 670
671 671 # Status code reserved for skipped tests (used by hghave).
672 672 SKIPPED_STATUS = 80
673 673
674 674 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
675 675 debug=False,
676 676 first=False,
677 677 timeout=None,
678 678 startport=None, extraconfigopts=None,
679 679 py3kwarnings=False, shell=None, hgcommand=None,
680 680 slowtimeout=None, usechg=False,
681 681 useipv6=False):
682 682 """Create a test from parameters.
683 683
684 684 path is the full path to the file defining the test.
685 685
686 686 tmpdir is the main temporary directory to use for this test.
687 687
688 688 keeptmpdir determines whether to keep the test's temporary directory
689 689 after execution. It defaults to removal (False).
690 690
691 691 debug mode will make the test execute verbosely, with unfiltered
692 692 output.
693 693
694 694 timeout controls the maximum run time of the test. It is ignored when
695 695 debug is True. See slowtimeout for tests with #require slow.
696 696
697 697 slowtimeout overrides timeout if the test has #require slow.
698 698
699 699 startport controls the starting port number to use for this test. Each
700 700 test will reserve 3 port numbers for execution. It is the caller's
701 701 responsibility to allocate a non-overlapping port range to Test
702 702 instances.
703 703
704 704 extraconfigopts is an iterable of extra hgrc config options. Values
705 705 must have the form "key=value" (something understood by hgrc). Values
706 706 of the form "foo.key=value" will result in "[foo] key=value".
707 707
708 708 py3kwarnings enables Py3k warnings.
709 709
710 710 shell is the shell to execute tests in.
711 711 """
712 712 if timeout is None:
713 713 timeout = defaults['timeout']
714 714 if startport is None:
715 715 startport = defaults['port']
716 716 if slowtimeout is None:
717 717 slowtimeout = defaults['slowtimeout']
718 718 self.path = path
719 719 self.bname = os.path.basename(path)
720 720 self.name = _strpath(self.bname)
721 721 self._testdir = os.path.dirname(path)
722 722 self._outputdir = outputdir
723 723 self._tmpname = os.path.basename(path)
724 724 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
725 725
726 726 self._threadtmp = tmpdir
727 727 self._keeptmpdir = keeptmpdir
728 728 self._debug = debug
729 729 self._first = first
730 730 self._timeout = timeout
731 731 self._slowtimeout = slowtimeout
732 732 self._startport = startport
733 733 self._extraconfigopts = extraconfigopts or []
734 734 self._py3kwarnings = py3kwarnings
735 735 self._shell = _bytespath(shell)
736 736 self._hgcommand = hgcommand or b'hg'
737 737 self._usechg = usechg
738 738 self._useipv6 = useipv6
739 739
740 740 self._aborted = False
741 741 self._daemonpids = []
742 742 self._finished = None
743 743 self._ret = None
744 744 self._out = None
745 745 self._skipped = None
746 746 self._testtmp = None
747 747 self._chgsockdir = None
748 748
749 749 self._refout = self.readrefout()
750 750
751 751 def readrefout(self):
752 752 """read reference output"""
753 753 # If we're not in --debug mode and reference output file exists,
754 754 # check test output against it.
755 755 if self._debug:
756 756 return None # to match "out is None"
757 757 elif os.path.exists(self.refpath):
758 758 with open(self.refpath, 'rb') as f:
759 759 return f.read().splitlines(True)
760 760 else:
761 761 return []
762 762
763 763 # needed to get base class __repr__ running
764 764 @property
765 765 def _testMethodName(self):
766 766 return self.name
767 767
768 768 def __str__(self):
769 769 return self.name
770 770
771 771 def shortDescription(self):
772 772 return self.name
773 773
774 774 def setUp(self):
775 775 """Tasks to perform before run()."""
776 776 self._finished = False
777 777 self._ret = None
778 778 self._out = None
779 779 self._skipped = None
780 780
781 781 try:
782 782 os.mkdir(self._threadtmp)
783 783 except OSError as e:
784 784 if e.errno != errno.EEXIST:
785 785 raise
786 786
787 787 name = self._tmpname
788 788 self._testtmp = os.path.join(self._threadtmp, name)
789 789 os.mkdir(self._testtmp)
790 790
791 791 # Remove any previous output files.
792 792 if os.path.exists(self.errpath):
793 793 try:
794 794 os.remove(self.errpath)
795 795 except OSError as e:
796 796 # We might have raced another test to clean up a .err
797 797 # file, so ignore ENOENT when removing a previous .err
798 798 # file.
799 799 if e.errno != errno.ENOENT:
800 800 raise
801 801
802 802 if self._usechg:
803 803 self._chgsockdir = os.path.join(self._threadtmp,
804 804 b'%s.chgsock' % name)
805 805 os.mkdir(self._chgsockdir)
806 806
807 807 def run(self, result):
808 808 """Run this test and report results against a TestResult instance."""
809 809 # This function is extremely similar to unittest.TestCase.run(). Once
810 810 # we require Python 2.7 (or at least its version of unittest), this
811 811 # function can largely go away.
812 812 self._result = result
813 813 result.startTest(self)
814 814 try:
815 815 try:
816 816 self.setUp()
817 817 except (KeyboardInterrupt, SystemExit):
818 818 self._aborted = True
819 819 raise
820 820 except Exception:
821 821 result.addError(self, sys.exc_info())
822 822 return
823 823
824 824 success = False
825 825 try:
826 826 self.runTest()
827 827 except KeyboardInterrupt:
828 828 self._aborted = True
829 829 raise
830 830 except unittest.SkipTest as e:
831 831 result.addSkip(self, str(e))
832 832 # The base class will have already counted this as a
833 833 # test we "ran", but we want to exclude skipped tests
834 834 # from those we count towards those run.
835 835 result.testsRun -= 1
836 836 except self.failureException as e:
837 837 # This differs from unittest in that we don't capture
838 838 # the stack trace. This is for historical reasons and
839 839 # this decision could be revisited in the future,
840 840 # especially for PythonTest instances.
841 841 if result.addFailure(self, str(e)):
842 842 success = True
843 843 except Exception:
844 844 result.addError(self, sys.exc_info())
845 845 else:
846 846 success = True
847 847
848 848 try:
849 849 self.tearDown()
850 850 except (KeyboardInterrupt, SystemExit):
851 851 self._aborted = True
852 852 raise
853 853 except Exception:
854 854 result.addError(self, sys.exc_info())
855 855 success = False
856 856
857 857 if success:
858 858 result.addSuccess(self)
859 859 finally:
860 860 result.stopTest(self, interrupted=self._aborted)
861 861
862 862 def runTest(self):
863 863 """Run this test instance.
864 864
865 865 This will return a tuple describing the result of the test.
866 866 """
867 867 env = self._getenv()
868 868 self._genrestoreenv(env)
869 869 self._daemonpids.append(env['DAEMON_PIDS'])
870 870 self._createhgrc(env['HGRCPATH'])
871 871
872 872 vlog('# Test', self.name)
873 873
874 874 ret, out = self._run(env)
875 875 self._finished = True
876 876 self._ret = ret
877 877 self._out = out
878 878
879 879 def describe(ret):
880 880 if ret < 0:
881 881 return 'killed by signal: %d' % -ret
882 882 return 'returned error code %d' % ret
883 883
884 884 self._skipped = False
885 885
886 886 if ret == self.SKIPPED_STATUS:
887 887 if out is None: # Debug mode, nothing to parse.
888 888 missing = ['unknown']
889 889 failed = None
890 890 else:
891 891 missing, failed = TTest.parsehghaveoutput(out)
892 892
893 893 if not missing:
894 894 missing = ['skipped']
895 895
896 896 if failed:
897 897 self.fail('hg have failed checking for %s' % failed[-1])
898 898 else:
899 899 self._skipped = True
900 900 raise unittest.SkipTest(missing[-1])
901 901 elif ret == 'timeout':
902 902 self.fail('timed out')
903 903 elif ret is False:
904 904 self.fail('no result code from test')
905 905 elif out != self._refout:
906 906 # Diff generation may rely on written .err file.
907 907 if (ret != 0 or out != self._refout) and not self._skipped \
908 908 and not self._debug:
909 909 with open(self.errpath, 'wb') as f:
910 910 for line in out:
911 911 f.write(line)
912 912
913 913 # The result object handles diff calculation for us.
914 914 with firstlock:
915 915 if self._result.addOutputMismatch(self, ret, out, self._refout):
916 916 # change was accepted, skip failing
917 917 return
918 918 if self._first:
919 919 global firsterror
920 920 firsterror = True
921 921
922 922 if ret:
923 923 msg = 'output changed and ' + describe(ret)
924 924 else:
925 925 msg = 'output changed'
926 926
927 927 self.fail(msg)
928 928 elif ret:
929 929 self.fail(describe(ret))
930 930
931 931 def tearDown(self):
932 932 """Tasks to perform after run()."""
933 933 for entry in self._daemonpids:
934 934 killdaemons(entry)
935 935 self._daemonpids = []
936 936
937 937 if self._keeptmpdir:
938 938 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
939 939 (self._testtmp.decode('utf-8'),
940 940 self._threadtmp.decode('utf-8')))
941 941 else:
942 942 shutil.rmtree(self._testtmp, True)
943 943 shutil.rmtree(self._threadtmp, True)
944 944
945 945 if self._usechg:
946 946 # chgservers will stop automatically after they find the socket
947 947 # files are deleted
948 948 shutil.rmtree(self._chgsockdir, True)
949 949
950 950 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
951 951 and not self._debug and self._out:
952 952 with open(self.errpath, 'wb') as f:
953 953 for line in self._out:
954 954 f.write(line)
955 955
956 956 vlog("# Ret was:", self._ret, '(%s)' % self.name)
957 957
958 958 def _run(self, env):
959 959 # This should be implemented in child classes to run tests.
960 960 raise unittest.SkipTest('unknown test type')
961 961
962 962 def abort(self):
963 963 """Terminate execution of this test."""
964 964 self._aborted = True
965 965
966 966 def _portmap(self, i):
967 967 offset = b'' if i == 0 else b'%d' % i
968 968 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
969 969
970 970 def _getreplacements(self):
971 971 """Obtain a mapping of text replacements to apply to test output.
972 972
973 973 Test output needs to be normalized so it can be compared to expected
974 974 output. This function defines how some of that normalization will
975 975 occur.
976 976 """
977 977 r = [
978 978 # This list should be parallel to defineport in _getenv
979 979 self._portmap(0),
980 980 self._portmap(1),
981 981 self._portmap(2),
982 982 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
983 983 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
984 984 ]
985 985 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
986 986
987 987 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
988 988
989 989 if os.path.exists(replacementfile):
990 990 data = {}
991 991 with open(replacementfile, mode='rb') as source:
992 992 # the intermediate 'compile' step help with debugging
993 993 code = compile(source.read(), replacementfile, 'exec')
994 994 exec(code, data)
995 995 for value in data.get('substitutions', ()):
996 996 if len(value) != 2:
997 997 msg = 'malformatted substitution in %s: %r'
998 998 msg %= (replacementfile, value)
999 999 raise ValueError(msg)
1000 1000 r.append(value)
1001 1001 return r
1002 1002
1003 1003 def _escapepath(self, p):
1004 1004 if os.name == 'nt':
1005 1005 return (
1006 1006 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1007 1007 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1008 1008 for c in p))
1009 1009 )
1010 1010 else:
1011 1011 return re.escape(p)
1012 1012
1013 1013 def _localip(self):
1014 1014 if self._useipv6:
1015 1015 return b'::1'
1016 1016 else:
1017 1017 return b'127.0.0.1'
1018 1018
1019 1019 def _genrestoreenv(self, testenv):
1020 1020 """Generate a script that can be used by tests to restore the original
1021 1021 environment."""
1022 1022 # Put the restoreenv script inside self._threadtmp
1023 1023 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1024 1024 testenv['HGTEST_RESTOREENV'] = scriptpath
1025 1025
1026 1026 # Only restore environment variable names that the shell allows
1027 1027 # us to export.
1028 1028 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1029 1029
1030 1030 # Do not restore these variables; otherwise tests would fail.
1031 1031 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1032 1032
1033 1033 with open(scriptpath, 'w') as envf:
1034 1034 for name, value in origenviron.items():
1035 1035 if not name_regex.match(name):
1036 1036 # Skip environment variables with unusual names not
1037 1037 # allowed by most shells.
1038 1038 continue
1039 1039 if name in reqnames:
1040 1040 continue
1041 1041 envf.write('%s=%s\n' % (name, shellquote(value)))
1042 1042
1043 1043 for name in testenv:
1044 1044 if name in origenviron or name in reqnames:
1045 1045 continue
1046 1046 envf.write('unset %s\n' % (name,))
1047 1047
1048 1048 def _getenv(self):
1049 1049 """Obtain environment variables to use during test execution."""
1050 1050 def defineport(i):
1051 1051 offset = '' if i == 0 else '%s' % i
1052 1052 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1053 1053 env = os.environ.copy()
1054 1054 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1055 1055 env['HGEMITWARNINGS'] = '1'
1056 1056 env['TESTTMP'] = self._testtmp
1057 1057 env['TESTNAME'] = self.name
1058 1058 env['HOME'] = self._testtmp
1059 1059 # This number should match portneeded in _getport
1060 1060 for port in xrange(3):
1061 1061 # This list should be parallel to _portmap in _getreplacements
1062 1062 defineport(port)
1063 1063 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
1064 1064 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
1065 1065 env["HGEDITOR"] = ('"' + sys.executable + '"'
1066 1066 + ' -c "import sys; sys.exit(0)"')
1067 1067 env["HGMERGE"] = "internal:merge"
1068 1068 env["HGUSER"] = "test"
1069 1069 env["HGENCODING"] = "ascii"
1070 1070 env["HGENCODINGMODE"] = "strict"
1071 1071 env['HGIPV6'] = str(int(self._useipv6))
1072 1072
1073 1073 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1074 1074 # IP addresses.
1075 1075 env['LOCALIP'] = self._localip()
1076 1076
1077 1077 # Reset some environment variables to well-known values so that
1078 1078 # the tests produce repeatable output.
1079 1079 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1080 1080 env['TZ'] = 'GMT'
1081 1081 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1082 1082 env['COLUMNS'] = '80'
1083 1083 env['TERM'] = 'xterm'
1084 1084
1085 1085 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1086 1086 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1087 1087 'NO_PROXY CHGDEBUG').split():
1088 1088 if k in env:
1089 1089 del env[k]
1090 1090
1091 1091 # unset env related to hooks
1092 1092 for k in list(env):
1093 1093 if k.startswith('HG_'):
1094 1094 del env[k]
1095 1095
1096 1096 if self._usechg:
1097 1097 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1098 1098
1099 1099 return env
1100 1100
1101 1101 def _createhgrc(self, path):
1102 1102 """Create an hgrc file for this test."""
1103 1103 with open(path, 'wb') as hgrc:
1104 1104 hgrc.write(b'[ui]\n')
1105 1105 hgrc.write(b'slash = True\n')
1106 1106 hgrc.write(b'interactive = False\n')
1107 1107 hgrc.write(b'mergemarkers = detailed\n')
1108 1108 hgrc.write(b'promptecho = True\n')
1109 1109 hgrc.write(b'[defaults]\n')
1110 1110 hgrc.write(b'[devel]\n')
1111 1111 hgrc.write(b'all-warnings = true\n')
1112 1112 hgrc.write(b'default-date = 0 0\n')
1113 1113 hgrc.write(b'[largefiles]\n')
1114 1114 hgrc.write(b'usercache = %s\n' %
1115 1115 (os.path.join(self._testtmp, b'.cache/largefiles')))
1116 1116 hgrc.write(b'[lfs]\n')
1117 1117 hgrc.write(b'usercache = %s\n' %
1118 1118 (os.path.join(self._testtmp, b'.cache/lfs')))
1119 1119 hgrc.write(b'[web]\n')
1120 1120 hgrc.write(b'address = localhost\n')
1121 1121 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1122 1122
1123 1123 for opt in self._extraconfigopts:
1124 1124 section, key = opt.encode('utf-8').split(b'.', 1)
1125 1125 assert b'=' in key, ('extra config opt %s must '
1126 1126 'have an = for assignment' % opt)
1127 1127 hgrc.write(b'[%s]\n%s\n' % (section, key))
1128 1128
1129 1129 def fail(self, msg):
1130 1130 # unittest differentiates between errored and failed.
1131 1131 # Failed is denoted by AssertionError (by default at least).
1132 1132 raise AssertionError(msg)
1133 1133
1134 1134 def _runcommand(self, cmd, env, normalizenewlines=False):
1135 1135 """Run command in a sub-process, capturing the output (stdout and
1136 1136 stderr).
1137 1137
1138 1138 Return a tuple (exitcode, output). output is None in debug mode.
1139 1139 """
1140 1140 if self._debug:
1141 1141 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
1142 1142 env=env)
1143 1143 ret = proc.wait()
1144 1144 return (ret, None)
1145 1145
1146 1146 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1147 1147 def cleanup():
1148 1148 terminate(proc)
1149 1149 ret = proc.wait()
1150 1150 if ret == 0:
1151 1151 ret = signal.SIGTERM << 8
1152 1152 killdaemons(env['DAEMON_PIDS'])
1153 1153 return ret
1154 1154
1155 1155 output = ''
1156 1156 proc.tochild.close()
1157 1157
1158 1158 try:
1159 1159 output = proc.fromchild.read()
1160 1160 except KeyboardInterrupt:
1161 1161 vlog('# Handling keyboard interrupt')
1162 1162 cleanup()
1163 1163 raise
1164 1164
1165 1165 ret = proc.wait()
1166 1166 if wifexited(ret):
1167 1167 ret = os.WEXITSTATUS(ret)
1168 1168
1169 1169 if proc.timeout:
1170 1170 ret = 'timeout'
1171 1171
1172 1172 if ret:
1173 1173 killdaemons(env['DAEMON_PIDS'])
1174 1174
1175 1175 for s, r in self._getreplacements():
1176 1176 output = re.sub(s, r, output)
1177 1177
1178 1178 if normalizenewlines:
1179 1179 output = output.replace('\r\n', '\n')
1180 1180
1181 1181 return ret, output.splitlines(True)
1182 1182
1183 1183 class PythonTest(Test):
1184 1184 """A Python-based test."""
1185 1185
1186 1186 @property
1187 1187 def refpath(self):
1188 1188 return os.path.join(self._testdir, b'%s.out' % self.bname)
1189 1189
1190 1190 def _run(self, env):
1191 1191 py3kswitch = self._py3kwarnings and b' -3' or b''
1192 1192 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
1193 1193 vlog("# Running", cmd)
1194 1194 normalizenewlines = os.name == 'nt'
1195 1195 result = self._runcommand(cmd, env,
1196 1196 normalizenewlines=normalizenewlines)
1197 1197 if self._aborted:
1198 1198 raise KeyboardInterrupt()
1199 1199
1200 1200 return result
1201 1201
1202 1202 # Some glob patterns apply only in some circumstances, so the script
1203 1203 # might want to remove (glob) annotations that otherwise should be
1204 1204 # retained.
1205 1205 checkcodeglobpats = [
1206 1206 # On Windows it looks like \ doesn't require a (glob), but we know
1207 1207 # better.
1208 1208 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1209 1209 re.compile(br'^moving \S+/.*[^)]$'),
1210 1210 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1211 1211 # Not all platforms have 127.0.0.1 as loopback (though most do),
1212 1212 # so we always glob that too.
1213 1213 re.compile(br'.*\$LOCALIP.*$'),
1214 1214 ]
1215 1215
1216 1216 bchr = chr
1217 1217 if PYTHON3:
1218 1218 bchr = lambda x: bytes([x])
1219 1219
1220 1220 class TTest(Test):
1221 1221 """A "t test" is a test backed by a .t file."""
1222 1222
1223 1223 SKIPPED_PREFIX = b'skipped: '
1224 1224 FAILED_PREFIX = b'hghave check failed: '
1225 1225 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1226 1226
1227 1227 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1228 1228 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1229 1229 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1230 1230
1231 1231 def __init__(self, path, *args, **kwds):
1232 1232 # accept an extra "case" parameter
1233 1233 case = kwds.pop('case', None)
1234 1234 self._case = case
1235 1235 self._allcases = parsettestcases(path)
1236 1236 super(TTest, self).__init__(path, *args, **kwds)
1237 1237 if case:
1238 1238 self.name = '%s (case %s)' % (self.name, _strpath(case))
1239 1239 self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
1240 1240 self._tmpname += b'-%s' % case
1241 1241 self._have = {}
1242 1242
1243 1243 @property
1244 1244 def refpath(self):
1245 1245 return os.path.join(self._testdir, self.bname)
1246 1246
1247 1247 def _run(self, env):
1248 1248 with open(self.path, 'rb') as f:
1249 1249 lines = f.readlines()
1250 1250
1251 1251 # .t file is both reference output and the test input, keep reference
1252 1252 # output updated with the the test input. This avoids some race
1253 1253 # conditions where the reference output does not match the actual test.
1254 1254 if self._refout is not None:
1255 1255 self._refout = lines
1256 1256
1257 1257 salt, script, after, expected = self._parsetest(lines)
1258 1258
1259 1259 # Write out the generated script.
1260 1260 fname = b'%s.sh' % self._testtmp
1261 1261 with open(fname, 'wb') as f:
1262 1262 for l in script:
1263 1263 f.write(l)
1264 1264
1265 1265 cmd = b'%s "%s"' % (self._shell, fname)
1266 1266 vlog("# Running", cmd)
1267 1267
1268 1268 exitcode, output = self._runcommand(cmd, env)
1269 1269
1270 1270 if self._aborted:
1271 1271 raise KeyboardInterrupt()
1272 1272
1273 1273 # Do not merge output if skipped. Return hghave message instead.
1274 1274 # Similarly, with --debug, output is None.
1275 1275 if exitcode == self.SKIPPED_STATUS or output is None:
1276 1276 return exitcode, output
1277 1277
1278 1278 return self._processoutput(exitcode, output, salt, after, expected)
1279 1279
1280 1280 def _hghave(self, reqs):
1281 1281 allreqs = b' '.join(reqs)
1282 1282 if allreqs in self._have:
1283 1283 return self._have.get(allreqs)
1284 1284
1285 1285 # TODO do something smarter when all other uses of hghave are gone.
1286 1286 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1287 1287 tdir = runtestdir.replace(b'\\', b'/')
1288 1288 proc = Popen4(b'%s -c "%s/hghave %s"' %
1289 1289 (self._shell, tdir, allreqs),
1290 1290 self._testtmp, 0, self._getenv())
1291 1291 stdout, stderr = proc.communicate()
1292 1292 ret = proc.wait()
1293 1293 if wifexited(ret):
1294 1294 ret = os.WEXITSTATUS(ret)
1295 1295 if ret == 2:
1296 1296 print(stdout.decode('utf-8'))
1297 1297 sys.exit(1)
1298 1298
1299 1299 if ret != 0:
1300 1300 self._have[allreqs] = (False, stdout)
1301 1301 return False, stdout
1302 1302
1303 1303 if b'slow' in reqs:
1304 1304 self._timeout = self._slowtimeout
1305 1305
1306 1306 self._have[allreqs] = (True, None)
1307 1307 return True, None
1308 1308
1309 1309 def _iftest(self, args):
1310 1310 # implements "#if"
1311 1311 reqs = []
1312 1312 for arg in args:
1313 1313 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1314 1314 if arg[3:] == self._case:
1315 1315 return False
1316 1316 elif arg in self._allcases:
1317 1317 if arg != self._case:
1318 1318 return False
1319 1319 else:
1320 1320 reqs.append(arg)
1321 1321 return self._hghave(reqs)[0]
1322 1322
1323 1323 def _parsetest(self, lines):
1324 1324 # We generate a shell script which outputs unique markers to line
1325 1325 # up script results with our source. These markers include input
1326 1326 # line number and the last return code.
1327 1327 salt = b"SALT%d" % time.time()
1328 1328 def addsalt(line, inpython):
1329 1329 if inpython:
1330 1330 script.append(b'%s %d 0\n' % (salt, line))
1331 1331 else:
1332 1332 script.append(b'echo %s %d $?\n' % (salt, line))
1333 1333
1334 1334 script = []
1335 1335
1336 1336 # After we run the shell script, we re-unify the script output
1337 1337 # with non-active parts of the source, with synchronization by our
1338 1338 # SALT line number markers. The after table contains the non-active
1339 1339 # components, ordered by line number.
1340 1340 after = {}
1341 1341
1342 1342 # Expected shell script output.
1343 1343 expected = {}
1344 1344
1345 1345 pos = prepos = -1
1346 1346
1347 1347 # True or False when in a true or false conditional section
1348 1348 skipping = None
1349 1349
1350 1350 # We keep track of whether or not we're in a Python block so we
1351 1351 # can generate the surrounding doctest magic.
1352 1352 inpython = False
1353 1353
1354 1354 if self._debug:
1355 1355 script.append(b'set -x\n')
1356 1356 if self._hgcommand != b'hg':
1357 1357 script.append(b'alias hg="%s"\n' % self._hgcommand)
1358 1358 if os.getenv('MSYSTEM'):
1359 1359 script.append(b'alias pwd="pwd -W"\n')
1360 1360 if self._case:
1361 1361 if isinstance(self._case, str):
1362 1362 quoted = shellquote(self._case)
1363 1363 else:
1364 1364 quoted = shellquote(self._case.decode('utf8')).encode('utf8')
1365 1365 script.append(b'TESTCASE=%s\n' % quoted)
1366 1366 script.append(b'export TESTCASE\n')
1367 1367
1368 1368 n = 0
1369 1369 for n, l in enumerate(lines):
1370 1370 if not l.endswith(b'\n'):
1371 1371 l += b'\n'
1372 1372 if l.startswith(b'#require'):
1373 1373 lsplit = l.split()
1374 1374 if len(lsplit) < 2 or lsplit[0] != b'#require':
1375 1375 after.setdefault(pos, []).append(' !!! invalid #require\n')
1376 haveresult, message = self._hghave(lsplit[1:])
1377 if not haveresult:
1378 script = [b'echo "%s"\nexit 80\n' % message]
1379 break
1376 if not skipping:
1377 haveresult, message = self._hghave(lsplit[1:])
1378 if not haveresult:
1379 script = [b'echo "%s"\nexit 80\n' % message]
1380 break
1380 1381 after.setdefault(pos, []).append(l)
1381 1382 elif l.startswith(b'#if'):
1382 1383 lsplit = l.split()
1383 1384 if len(lsplit) < 2 or lsplit[0] != b'#if':
1384 1385 after.setdefault(pos, []).append(' !!! invalid #if\n')
1385 1386 if skipping is not None:
1386 1387 after.setdefault(pos, []).append(' !!! nested #if\n')
1387 1388 skipping = not self._iftest(lsplit[1:])
1388 1389 after.setdefault(pos, []).append(l)
1389 1390 elif l.startswith(b'#else'):
1390 1391 if skipping is None:
1391 1392 after.setdefault(pos, []).append(' !!! missing #if\n')
1392 1393 skipping = not skipping
1393 1394 after.setdefault(pos, []).append(l)
1394 1395 elif l.startswith(b'#endif'):
1395 1396 if skipping is None:
1396 1397 after.setdefault(pos, []).append(' !!! missing #if\n')
1397 1398 skipping = None
1398 1399 after.setdefault(pos, []).append(l)
1399 1400 elif skipping:
1400 1401 after.setdefault(pos, []).append(l)
1401 1402 elif l.startswith(b' >>> '): # python inlines
1402 1403 after.setdefault(pos, []).append(l)
1403 1404 prepos = pos
1404 1405 pos = n
1405 1406 if not inpython:
1406 1407 # We've just entered a Python block. Add the header.
1407 1408 inpython = True
1408 1409 addsalt(prepos, False) # Make sure we report the exit code.
1409 1410 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1410 1411 addsalt(n, True)
1411 1412 script.append(l[2:])
1412 1413 elif l.startswith(b' ... '): # python inlines
1413 1414 after.setdefault(prepos, []).append(l)
1414 1415 script.append(l[2:])
1415 1416 elif l.startswith(b' $ '): # commands
1416 1417 if inpython:
1417 1418 script.append(b'EOF\n')
1418 1419 inpython = False
1419 1420 after.setdefault(pos, []).append(l)
1420 1421 prepos = pos
1421 1422 pos = n
1422 1423 addsalt(n, False)
1423 1424 cmd = l[4:].split()
1424 1425 if len(cmd) == 2 and cmd[0] == b'cd':
1425 1426 l = b' $ cd %s || exit 1\n' % cmd[1]
1426 1427 script.append(l[4:])
1427 1428 elif l.startswith(b' > '): # continuations
1428 1429 after.setdefault(prepos, []).append(l)
1429 1430 script.append(l[4:])
1430 1431 elif l.startswith(b' '): # results
1431 1432 # Queue up a list of expected results.
1432 1433 expected.setdefault(pos, []).append(l[2:])
1433 1434 else:
1434 1435 if inpython:
1435 1436 script.append(b'EOF\n')
1436 1437 inpython = False
1437 1438 # Non-command/result. Queue up for merged output.
1438 1439 after.setdefault(pos, []).append(l)
1439 1440
1440 1441 if inpython:
1441 1442 script.append(b'EOF\n')
1442 1443 if skipping is not None:
1443 1444 after.setdefault(pos, []).append(' !!! missing #endif\n')
1444 1445 addsalt(n + 1, False)
1445 1446
1446 1447 return salt, script, after, expected
1447 1448
1448 1449 def _processoutput(self, exitcode, output, salt, after, expected):
1449 1450 # Merge the script output back into a unified test.
1450 1451 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1451 1452 if exitcode != 0:
1452 1453 warnonly = 3
1453 1454
1454 1455 pos = -1
1455 1456 postout = []
1456 1457 for l in output:
1457 1458 lout, lcmd = l, None
1458 1459 if salt in l:
1459 1460 lout, lcmd = l.split(salt, 1)
1460 1461
1461 1462 while lout:
1462 1463 if not lout.endswith(b'\n'):
1463 1464 lout += b' (no-eol)\n'
1464 1465
1465 1466 # Find the expected output at the current position.
1466 1467 els = [None]
1467 1468 if expected.get(pos, None):
1468 1469 els = expected[pos]
1469 1470
1470 1471 i = 0
1471 1472 optional = []
1472 1473 while i < len(els):
1473 1474 el = els[i]
1474 1475
1475 1476 r = self.linematch(el, lout)
1476 1477 if isinstance(r, str):
1477 1478 if r == '-glob':
1478 1479 lout = ''.join(el.rsplit(' (glob)', 1))
1479 1480 r = '' # Warn only this line.
1480 1481 elif r == "retry":
1481 1482 postout.append(b' ' + el)
1482 1483 els.pop(i)
1483 1484 break
1484 1485 else:
1485 1486 log('\ninfo, unknown linematch result: %r\n' % r)
1486 1487 r = False
1487 1488 if r:
1488 1489 els.pop(i)
1489 1490 break
1490 1491 if el:
1491 1492 if el.endswith(b" (?)\n"):
1492 1493 optional.append(i)
1493 1494 else:
1494 1495 m = optline.match(el)
1495 1496 if m:
1496 1497 conditions = [
1497 1498 c for c in m.group(2).split(b' ')]
1498 1499
1499 1500 if not self._iftest(conditions):
1500 1501 optional.append(i)
1501 1502
1502 1503 i += 1
1503 1504
1504 1505 if r:
1505 1506 if r == "retry":
1506 1507 continue
1507 1508 # clean up any optional leftovers
1508 1509 for i in optional:
1509 1510 postout.append(b' ' + els[i])
1510 1511 for i in reversed(optional):
1511 1512 del els[i]
1512 1513 postout.append(b' ' + el)
1513 1514 else:
1514 1515 if self.NEEDESCAPE(lout):
1515 1516 lout = TTest._stringescape(b'%s (esc)\n' %
1516 1517 lout.rstrip(b'\n'))
1517 1518 postout.append(b' ' + lout) # Let diff deal with it.
1518 1519 if r != '': # If line failed.
1519 1520 warnonly = 3 # for sure not
1520 1521 elif warnonly == 1: # Is "not yet" and line is warn only.
1521 1522 warnonly = 2 # Yes do warn.
1522 1523 break
1523 1524 else:
1524 1525 # clean up any optional leftovers
1525 1526 while expected.get(pos, None):
1526 1527 el = expected[pos].pop(0)
1527 1528 if el:
1528 1529 if not el.endswith(b" (?)\n"):
1529 1530 m = optline.match(el)
1530 1531 if m:
1531 1532 conditions = [c for c in m.group(2).split(b' ')]
1532 1533
1533 1534 if self._iftest(conditions):
1534 1535 # Don't append as optional line
1535 1536 continue
1536 1537 else:
1537 1538 continue
1538 1539 postout.append(b' ' + el)
1539 1540
1540 1541 if lcmd:
1541 1542 # Add on last return code.
1542 1543 ret = int(lcmd.split()[1])
1543 1544 if ret != 0:
1544 1545 postout.append(b' [%d]\n' % ret)
1545 1546 if pos in after:
1546 1547 # Merge in non-active test bits.
1547 1548 postout += after.pop(pos)
1548 1549 pos = int(lcmd.split()[0])
1549 1550
1550 1551 if pos in after:
1551 1552 postout += after.pop(pos)
1552 1553
1553 1554 if warnonly == 2:
1554 1555 exitcode = False # Set exitcode to warned.
1555 1556
1556 1557 return exitcode, postout
1557 1558
1558 1559 @staticmethod
1559 1560 def rematch(el, l):
1560 1561 try:
1561 1562 el = b'(?:' + el + b')'
1562 1563 # use \Z to ensure that the regex matches to the end of the string
1563 1564 if os.name == 'nt':
1564 1565 return re.match(el + br'\r?\n\Z', l)
1565 1566 return re.match(el + br'\n\Z', l)
1566 1567 except re.error:
1567 1568 # el is an invalid regex
1568 1569 return False
1569 1570
1570 1571 @staticmethod
1571 1572 def globmatch(el, l):
1572 1573 # The only supported special characters are * and ? plus / which also
1573 1574 # matches \ on windows. Escaping of these characters is supported.
1574 1575 if el + b'\n' == l:
1575 1576 if os.altsep:
1576 1577 # matching on "/" is not needed for this line
1577 1578 for pat in checkcodeglobpats:
1578 1579 if pat.match(el):
1579 1580 return True
1580 1581 return b'-glob'
1581 1582 return True
1582 1583 el = el.replace(b'$LOCALIP', b'*')
1583 1584 i, n = 0, len(el)
1584 1585 res = b''
1585 1586 while i < n:
1586 1587 c = el[i:i + 1]
1587 1588 i += 1
1588 1589 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1589 1590 res += el[i - 1:i + 1]
1590 1591 i += 1
1591 1592 elif c == b'*':
1592 1593 res += b'.*'
1593 1594 elif c == b'?':
1594 1595 res += b'.'
1595 1596 elif c == b'/' and os.altsep:
1596 1597 res += b'[/\\\\]'
1597 1598 else:
1598 1599 res += re.escape(c)
1599 1600 return TTest.rematch(res, l)
1600 1601
1601 1602 def linematch(self, el, l):
1602 1603 retry = False
1603 1604 if el == l: # perfect match (fast)
1604 1605 return True
1605 1606 if el:
1606 1607 if el.endswith(b" (?)\n"):
1607 1608 retry = "retry"
1608 1609 el = el[:-5] + b"\n"
1609 1610 else:
1610 1611 m = optline.match(el)
1611 1612 if m:
1612 1613 conditions = [c for c in m.group(2).split(b' ')]
1613 1614
1614 1615 el = m.group(1) + b"\n"
1615 1616 if not self._iftest(conditions):
1616 1617 retry = "retry" # Not required by listed features
1617 1618
1618 1619 if el.endswith(b" (esc)\n"):
1619 1620 if PYTHON3:
1620 1621 el = el[:-7].decode('unicode_escape') + '\n'
1621 1622 el = el.encode('utf-8')
1622 1623 else:
1623 1624 el = el[:-7].decode('string-escape') + '\n'
1624 1625 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1625 1626 return True
1626 1627 if el.endswith(b" (re)\n"):
1627 1628 return TTest.rematch(el[:-6], l) or retry
1628 1629 if el.endswith(b" (glob)\n"):
1629 1630 # ignore '(glob)' added to l by 'replacements'
1630 1631 if l.endswith(b" (glob)\n"):
1631 1632 l = l[:-8] + b"\n"
1632 1633 return TTest.globmatch(el[:-8], l) or retry
1633 1634 if os.altsep:
1634 1635 _l = l.replace(b'\\', b'/')
1635 1636 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1636 1637 return True
1637 1638 return retry
1638 1639
1639 1640 @staticmethod
1640 1641 def parsehghaveoutput(lines):
1641 1642 '''Parse hghave log lines.
1642 1643
1643 1644 Return tuple of lists (missing, failed):
1644 1645 * the missing/unknown features
1645 1646 * the features for which existence check failed'''
1646 1647 missing = []
1647 1648 failed = []
1648 1649 for line in lines:
1649 1650 if line.startswith(TTest.SKIPPED_PREFIX):
1650 1651 line = line.splitlines()[0]
1651 1652 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1652 1653 elif line.startswith(TTest.FAILED_PREFIX):
1653 1654 line = line.splitlines()[0]
1654 1655 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1655 1656
1656 1657 return missing, failed
1657 1658
1658 1659 @staticmethod
1659 1660 def _escapef(m):
1660 1661 return TTest.ESCAPEMAP[m.group(0)]
1661 1662
1662 1663 @staticmethod
1663 1664 def _stringescape(s):
1664 1665 return TTest.ESCAPESUB(TTest._escapef, s)
1665 1666
1666 1667 iolock = threading.RLock()
1667 1668 firstlock = threading.RLock()
1668 1669 firsterror = False
1669 1670
1670 1671 class TestResult(unittest._TextTestResult):
1671 1672 """Holds results when executing via unittest."""
1672 1673 # Don't worry too much about accessing the non-public _TextTestResult.
1673 1674 # It is relatively common in Python testing tools.
1674 1675 def __init__(self, options, *args, **kwargs):
1675 1676 super(TestResult, self).__init__(*args, **kwargs)
1676 1677
1677 1678 self._options = options
1678 1679
1679 1680 # unittest.TestResult didn't have skipped until 2.7. We need to
1680 1681 # polyfill it.
1681 1682 self.skipped = []
1682 1683
1683 1684 # We have a custom "ignored" result that isn't present in any Python
1684 1685 # unittest implementation. It is very similar to skipped. It may make
1685 1686 # sense to map it into skip some day.
1686 1687 self.ignored = []
1687 1688
1688 1689 self.times = []
1689 1690 self._firststarttime = None
1690 1691 # Data stored for the benefit of generating xunit reports.
1691 1692 self.successes = []
1692 1693 self.faildata = {}
1693 1694
1694 1695 if options.color == 'auto':
1695 1696 self.color = pygmentspresent and self.stream.isatty()
1696 1697 elif options.color == 'never':
1697 1698 self.color = False
1698 1699 else: # 'always', for testing purposes
1699 1700 self.color = pygmentspresent
1700 1701
1701 1702 def addFailure(self, test, reason):
1702 1703 self.failures.append((test, reason))
1703 1704
1704 1705 if self._options.first:
1705 1706 self.stop()
1706 1707 else:
1707 1708 with iolock:
1708 1709 if reason == "timed out":
1709 1710 self.stream.write('t')
1710 1711 else:
1711 1712 if not self._options.nodiff:
1712 1713 self.stream.write('\n')
1713 1714 # Exclude the '\n' from highlighting to lex correctly
1714 1715 formatted = 'ERROR: %s output changed\n' % test
1715 1716 self.stream.write(highlightmsg(formatted, self.color))
1716 1717 self.stream.write('!')
1717 1718
1718 1719 self.stream.flush()
1719 1720
1720 1721 def addSuccess(self, test):
1721 1722 with iolock:
1722 1723 super(TestResult, self).addSuccess(test)
1723 1724 self.successes.append(test)
1724 1725
1725 1726 def addError(self, test, err):
1726 1727 super(TestResult, self).addError(test, err)
1727 1728 if self._options.first:
1728 1729 self.stop()
1729 1730
1730 1731 # Polyfill.
1731 1732 def addSkip(self, test, reason):
1732 1733 self.skipped.append((test, reason))
1733 1734 with iolock:
1734 1735 if self.showAll:
1735 1736 self.stream.writeln('skipped %s' % reason)
1736 1737 else:
1737 1738 self.stream.write('s')
1738 1739 self.stream.flush()
1739 1740
1740 1741 def addIgnore(self, test, reason):
1741 1742 self.ignored.append((test, reason))
1742 1743 with iolock:
1743 1744 if self.showAll:
1744 1745 self.stream.writeln('ignored %s' % reason)
1745 1746 else:
1746 1747 if reason not in ('not retesting', "doesn't match keyword"):
1747 1748 self.stream.write('i')
1748 1749 else:
1749 1750 self.testsRun += 1
1750 1751 self.stream.flush()
1751 1752
1752 1753 def addOutputMismatch(self, test, ret, got, expected):
1753 1754 """Record a mismatch in test output for a particular test."""
1754 1755 if self.shouldStop or firsterror:
1755 1756 # don't print, some other test case already failed and
1756 1757 # printed, we're just stale and probably failed due to our
1757 1758 # temp dir getting cleaned up.
1758 1759 return
1759 1760
1760 1761 accepted = False
1761 1762 lines = []
1762 1763
1763 1764 with iolock:
1764 1765 if self._options.nodiff:
1765 1766 pass
1766 1767 elif self._options.view:
1767 1768 v = self._options.view
1768 1769 if PYTHON3:
1769 1770 v = _bytespath(v)
1770 1771 os.system(b"%s %s %s" %
1771 1772 (v, test.refpath, test.errpath))
1772 1773 else:
1773 1774 servefail, lines = getdiff(expected, got,
1774 1775 test.refpath, test.errpath)
1775 1776 self.stream.write('\n')
1776 1777 for line in lines:
1777 1778 line = highlightdiff(line, self.color)
1778 1779 if PYTHON3:
1779 1780 self.stream.flush()
1780 1781 self.stream.buffer.write(line)
1781 1782 self.stream.buffer.flush()
1782 1783 else:
1783 1784 self.stream.write(line)
1784 1785 self.stream.flush()
1785 1786
1786 1787 if servefail:
1787 1788 raise test.failureException(
1788 1789 'server failed to start (HGPORT=%s)' % test._startport)
1789 1790
1790 1791 # handle interactive prompt without releasing iolock
1791 1792 if self._options.interactive:
1792 1793 if test.readrefout() != expected:
1793 1794 self.stream.write(
1794 1795 'Reference output has changed (run again to prompt '
1795 1796 'changes)')
1796 1797 else:
1797 1798 self.stream.write('Accept this change? [n] ')
1798 1799 answer = sys.stdin.readline().strip()
1799 1800 if answer.lower() in ('y', 'yes'):
1800 1801 if test.path.endswith(b'.t'):
1801 1802 rename(test.errpath, test.path)
1802 1803 else:
1803 1804 rename(test.errpath, '%s.out' % test.path)
1804 1805 accepted = True
1805 1806 if not accepted:
1806 1807 self.faildata[test.name] = b''.join(lines)
1807 1808
1808 1809 return accepted
1809 1810
1810 1811 def startTest(self, test):
1811 1812 super(TestResult, self).startTest(test)
1812 1813
1813 1814 # os.times module computes the user time and system time spent by
1814 1815 # child's processes along with real elapsed time taken by a process.
1815 1816 # This module has one limitation. It can only work for Linux user
1816 1817 # and not for Windows.
1817 1818 test.started = os.times()
1818 1819 if self._firststarttime is None: # thread racy but irrelevant
1819 1820 self._firststarttime = test.started[4]
1820 1821
1821 1822 def stopTest(self, test, interrupted=False):
1822 1823 super(TestResult, self).stopTest(test)
1823 1824
1824 1825 test.stopped = os.times()
1825 1826
1826 1827 starttime = test.started
1827 1828 endtime = test.stopped
1828 1829 origin = self._firststarttime
1829 1830 self.times.append((test.name,
1830 1831 endtime[2] - starttime[2], # user space CPU time
1831 1832 endtime[3] - starttime[3], # sys space CPU time
1832 1833 endtime[4] - starttime[4], # real time
1833 1834 starttime[4] - origin, # start date in run context
1834 1835 endtime[4] - origin, # end date in run context
1835 1836 ))
1836 1837
1837 1838 if interrupted:
1838 1839 with iolock:
1839 1840 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1840 1841 test.name, self.times[-1][3]))
1841 1842
1842 1843 class TestSuite(unittest.TestSuite):
1843 1844 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1844 1845
1845 1846 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1846 1847 retest=False, keywords=None, loop=False, runs_per_test=1,
1847 1848 loadtest=None, showchannels=False,
1848 1849 *args, **kwargs):
1849 1850 """Create a new instance that can run tests with a configuration.
1850 1851
1851 1852 testdir specifies the directory where tests are executed from. This
1852 1853 is typically the ``tests`` directory from Mercurial's source
1853 1854 repository.
1854 1855
1855 1856 jobs specifies the number of jobs to run concurrently. Each test
1856 1857 executes on its own thread. Tests actually spawn new processes, so
1857 1858 state mutation should not be an issue.
1858 1859
1859 1860 If there is only one job, it will use the main thread.
1860 1861
1861 1862 whitelist and blacklist denote tests that have been whitelisted and
1862 1863 blacklisted, respectively. These arguments don't belong in TestSuite.
1863 1864 Instead, whitelist and blacklist should be handled by the thing that
1864 1865 populates the TestSuite with tests. They are present to preserve
1865 1866 backwards compatible behavior which reports skipped tests as part
1866 1867 of the results.
1867 1868
1868 1869 retest denotes whether to retest failed tests. This arguably belongs
1869 1870 outside of TestSuite.
1870 1871
1871 1872 keywords denotes key words that will be used to filter which tests
1872 1873 to execute. This arguably belongs outside of TestSuite.
1873 1874
1874 1875 loop denotes whether to loop over tests forever.
1875 1876 """
1876 1877 super(TestSuite, self).__init__(*args, **kwargs)
1877 1878
1878 1879 self._jobs = jobs
1879 1880 self._whitelist = whitelist
1880 1881 self._blacklist = blacklist
1881 1882 self._retest = retest
1882 1883 self._keywords = keywords
1883 1884 self._loop = loop
1884 1885 self._runs_per_test = runs_per_test
1885 1886 self._loadtest = loadtest
1886 1887 self._showchannels = showchannels
1887 1888
1888 1889 def run(self, result):
1889 1890 # We have a number of filters that need to be applied. We do this
1890 1891 # here instead of inside Test because it makes the running logic for
1891 1892 # Test simpler.
1892 1893 tests = []
1893 1894 num_tests = [0]
1894 1895 for test in self._tests:
1895 1896 def get():
1896 1897 num_tests[0] += 1
1897 1898 if getattr(test, 'should_reload', False):
1898 1899 return self._loadtest(test, num_tests[0])
1899 1900 return test
1900 1901 if not os.path.exists(test.path):
1901 1902 result.addSkip(test, "Doesn't exist")
1902 1903 continue
1903 1904
1904 1905 if not (self._whitelist and test.bname in self._whitelist):
1905 1906 if self._blacklist and test.bname in self._blacklist:
1906 1907 result.addSkip(test, 'blacklisted')
1907 1908 continue
1908 1909
1909 1910 if self._retest and not os.path.exists(test.errpath):
1910 1911 result.addIgnore(test, 'not retesting')
1911 1912 continue
1912 1913
1913 1914 if self._keywords:
1914 1915 with open(test.path, 'rb') as f:
1915 1916 t = f.read().lower() + test.bname.lower()
1916 1917 ignored = False
1917 1918 for k in self._keywords.lower().split():
1918 1919 if k not in t:
1919 1920 result.addIgnore(test, "doesn't match keyword")
1920 1921 ignored = True
1921 1922 break
1922 1923
1923 1924 if ignored:
1924 1925 continue
1925 1926 for _ in xrange(self._runs_per_test):
1926 1927 tests.append(get())
1927 1928
1928 1929 runtests = list(tests)
1929 1930 done = queue.Queue()
1930 1931 running = 0
1931 1932
1932 1933 channels = [""] * self._jobs
1933 1934
1934 1935 def job(test, result):
1935 1936 for n, v in enumerate(channels):
1936 1937 if not v:
1937 1938 channel = n
1938 1939 break
1939 1940 else:
1940 1941 raise ValueError('Could not find output channel')
1941 1942 channels[channel] = "=" + test.name[5:].split(".")[0]
1942 1943 try:
1943 1944 test(result)
1944 1945 done.put(None)
1945 1946 except KeyboardInterrupt:
1946 1947 pass
1947 1948 except: # re-raises
1948 1949 done.put(('!', test, 'run-test raised an error, see traceback'))
1949 1950 raise
1950 1951 finally:
1951 1952 try:
1952 1953 channels[channel] = ''
1953 1954 except IndexError:
1954 1955 pass
1955 1956
1956 1957 def stat():
1957 1958 count = 0
1958 1959 while channels:
1959 1960 d = '\n%03s ' % count
1960 1961 for n, v in enumerate(channels):
1961 1962 if v:
1962 1963 d += v[0]
1963 1964 channels[n] = v[1:] or '.'
1964 1965 else:
1965 1966 d += ' '
1966 1967 d += ' '
1967 1968 with iolock:
1968 1969 sys.stdout.write(d + ' ')
1969 1970 sys.stdout.flush()
1970 1971 for x in xrange(10):
1971 1972 if channels:
1972 1973 time.sleep(.1)
1973 1974 count += 1
1974 1975
1975 1976 stoppedearly = False
1976 1977
1977 1978 if self._showchannels:
1978 1979 statthread = threading.Thread(target=stat, name="stat")
1979 1980 statthread.start()
1980 1981
1981 1982 try:
1982 1983 while tests or running:
1983 1984 if not done.empty() or running == self._jobs or not tests:
1984 1985 try:
1985 1986 done.get(True, 1)
1986 1987 running -= 1
1987 1988 if result and result.shouldStop:
1988 1989 stoppedearly = True
1989 1990 break
1990 1991 except queue.Empty:
1991 1992 continue
1992 1993 if tests and not running == self._jobs:
1993 1994 test = tests.pop(0)
1994 1995 if self._loop:
1995 1996 if getattr(test, 'should_reload', False):
1996 1997 num_tests[0] += 1
1997 1998 tests.append(
1998 1999 self._loadtest(test, num_tests[0]))
1999 2000 else:
2000 2001 tests.append(test)
2001 2002 if self._jobs == 1:
2002 2003 job(test, result)
2003 2004 else:
2004 2005 t = threading.Thread(target=job, name=test.name,
2005 2006 args=(test, result))
2006 2007 t.start()
2007 2008 running += 1
2008 2009
2009 2010 # If we stop early we still need to wait on started tests to
2010 2011 # finish. Otherwise, there is a race between the test completing
2011 2012 # and the test's cleanup code running. This could result in the
2012 2013 # test reporting incorrect.
2013 2014 if stoppedearly:
2014 2015 while running:
2015 2016 try:
2016 2017 done.get(True, 1)
2017 2018 running -= 1
2018 2019 except queue.Empty:
2019 2020 continue
2020 2021 except KeyboardInterrupt:
2021 2022 for test in runtests:
2022 2023 test.abort()
2023 2024
2024 2025 channels = []
2025 2026
2026 2027 return result
2027 2028
2028 2029 # Save the most recent 5 wall-clock runtimes of each test to a
2029 2030 # human-readable text file named .testtimes. Tests are sorted
2030 2031 # alphabetically, while times for each test are listed from oldest to
2031 2032 # newest.
2032 2033
2033 2034 def loadtimes(outputdir):
2034 2035 times = []
2035 2036 try:
2036 2037 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2037 2038 for line in fp:
2038 2039 m = re.match('(.*?) ([0-9. ]+)', line)
2039 2040 times.append((m.group(1),
2040 2041 [float(t) for t in m.group(2).split()]))
2041 2042 except IOError as err:
2042 2043 if err.errno != errno.ENOENT:
2043 2044 raise
2044 2045 return times
2045 2046
2046 2047 def savetimes(outputdir, result):
2047 2048 saved = dict(loadtimes(outputdir))
2048 2049 maxruns = 5
2049 2050 skipped = set([str(t[0]) for t in result.skipped])
2050 2051 for tdata in result.times:
2051 2052 test, real = tdata[0], tdata[3]
2052 2053 if test not in skipped:
2053 2054 ts = saved.setdefault(test, [])
2054 2055 ts.append(real)
2055 2056 ts[:] = ts[-maxruns:]
2056 2057
2057 2058 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2058 2059 dir=outputdir, text=True)
2059 2060 with os.fdopen(fd, 'w') as fp:
2060 2061 for name, ts in sorted(saved.items()):
2061 2062 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2062 2063 timepath = os.path.join(outputdir, b'.testtimes')
2063 2064 try:
2064 2065 os.unlink(timepath)
2065 2066 except OSError:
2066 2067 pass
2067 2068 try:
2068 2069 os.rename(tmpname, timepath)
2069 2070 except OSError:
2070 2071 pass
2071 2072
2072 2073 class TextTestRunner(unittest.TextTestRunner):
2073 2074 """Custom unittest test runner that uses appropriate settings."""
2074 2075
2075 2076 def __init__(self, runner, *args, **kwargs):
2076 2077 super(TextTestRunner, self).__init__(*args, **kwargs)
2077 2078
2078 2079 self._runner = runner
2079 2080
2080 2081 def listtests(self, test):
2081 2082 result = TestResult(self._runner.options, self.stream,
2082 2083 self.descriptions, 0)
2083 2084 test = sorted(test, key=lambda t: t.name)
2084 2085 for t in test:
2085 2086 print(t.name)
2086 2087 result.addSuccess(t)
2087 2088
2088 2089 if self._runner.options.xunit:
2089 2090 with open(self._runner.options.xunit, "wb") as xuf:
2090 2091 self._writexunit(result, xuf)
2091 2092
2092 2093 if self._runner.options.json:
2093 2094 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2094 2095 with open(jsonpath, 'w') as fp:
2095 2096 self._writejson(result, fp)
2096 2097
2097 2098 return result
2098 2099
2099 2100 def run(self, test):
2100 2101 result = TestResult(self._runner.options, self.stream,
2101 2102 self.descriptions, self.verbosity)
2102 2103
2103 2104 test(result)
2104 2105
2105 2106 failed = len(result.failures)
2106 2107 skipped = len(result.skipped)
2107 2108 ignored = len(result.ignored)
2108 2109
2109 2110 with iolock:
2110 2111 self.stream.writeln('')
2111 2112
2112 2113 if not self._runner.options.noskips:
2113 2114 for test, msg in result.skipped:
2114 2115 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2115 2116 self.stream.write(highlightmsg(formatted, result.color))
2116 2117 for test, msg in result.failures:
2117 2118 formatted = 'Failed %s: %s\n' % (test.name, msg)
2118 2119 self.stream.write(highlightmsg(formatted, result.color))
2119 2120 for test, msg in result.errors:
2120 2121 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2121 2122
2122 2123 if self._runner.options.xunit:
2123 2124 with open(self._runner.options.xunit, "wb") as xuf:
2124 2125 self._writexunit(result, xuf)
2125 2126
2126 2127 if self._runner.options.json:
2127 2128 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2128 2129 with open(jsonpath, 'w') as fp:
2129 2130 self._writejson(result, fp)
2130 2131
2131 2132 self._runner._checkhglib('Tested')
2132 2133
2133 2134 savetimes(self._runner._outputdir, result)
2134 2135
2135 2136 if failed and self._runner.options.known_good_rev:
2136 2137 self._bisecttests(t for t, m in result.failures)
2137 2138 self.stream.writeln(
2138 2139 '# Ran %d tests, %d skipped, %d failed.'
2139 2140 % (result.testsRun, skipped + ignored, failed))
2140 2141 if failed:
2141 2142 self.stream.writeln('python hash seed: %s' %
2142 2143 os.environ['PYTHONHASHSEED'])
2143 2144 if self._runner.options.time:
2144 2145 self.printtimes(result.times)
2145 2146
2146 2147 if self._runner.options.exceptions:
2147 2148 exceptions = aggregateexceptions(
2148 2149 os.path.join(self._runner._outputdir, b'exceptions'))
2149 2150
2150 2151 self.stream.writeln('Exceptions Report:')
2151 2152 self.stream.writeln('%d total from %d frames' %
2152 2153 (exceptions['total'],
2153 2154 len(exceptions['exceptioncounts'])))
2154 2155 combined = exceptions['combined']
2155 2156 for key in sorted(combined, key=combined.get, reverse=True):
2156 2157 frame, line, exc = key
2157 2158 totalcount, testcount, leastcount, leasttest = combined[key]
2158 2159
2159 2160 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2160 2161 % (totalcount,
2161 2162 testcount,
2162 2163 frame, exc,
2163 2164 leasttest, leastcount))
2164 2165
2165 2166 self.stream.flush()
2166 2167
2167 2168 return result
2168 2169
2169 2170 def _bisecttests(self, tests):
2170 2171 bisectcmd = ['hg', 'bisect']
2171 2172 bisectrepo = self._runner.options.bisect_repo
2172 2173 if bisectrepo:
2173 2174 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2174 2175 def pread(args):
2175 2176 env = os.environ.copy()
2176 2177 env['HGPLAIN'] = '1'
2177 2178 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2178 2179 stdout=subprocess.PIPE, env=env)
2179 2180 data = p.stdout.read()
2180 2181 p.wait()
2181 2182 return data
2182 2183 for test in tests:
2183 2184 pread(bisectcmd + ['--reset']),
2184 2185 pread(bisectcmd + ['--bad', '.'])
2185 2186 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2186 2187 # TODO: we probably need to forward more options
2187 2188 # that alter hg's behavior inside the tests.
2188 2189 opts = ''
2189 2190 withhg = self._runner.options.with_hg
2190 2191 if withhg:
2191 2192 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2192 2193 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2193 2194 test)
2194 2195 data = pread(bisectcmd + ['--command', rtc])
2195 2196 m = re.search(
2196 2197 (br'\nThe first (?P<goodbad>bad|good) revision '
2197 2198 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2198 2199 br'summary: +(?P<summary>[^\n]+)\n'),
2199 2200 data, (re.MULTILINE | re.DOTALL))
2200 2201 if m is None:
2201 2202 self.stream.writeln(
2202 2203 'Failed to identify failure point for %s' % test)
2203 2204 continue
2204 2205 dat = m.groupdict()
2205 2206 verb = 'broken' if dat['goodbad'] == 'bad' else 'fixed'
2206 2207 self.stream.writeln(
2207 2208 '%s %s by %s (%s)' % (
2208 2209 test, verb, dat['node'], dat['summary']))
2209 2210
2210 2211 def printtimes(self, times):
2211 2212 # iolock held by run
2212 2213 self.stream.writeln('# Producing time report')
2213 2214 times.sort(key=lambda t: (t[3]))
2214 2215 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2215 2216 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2216 2217 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2217 2218 for tdata in times:
2218 2219 test = tdata[0]
2219 2220 cuser, csys, real, start, end = tdata[1:6]
2220 2221 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2221 2222
2222 2223 @staticmethod
2223 2224 def _writexunit(result, outf):
2224 2225 # See http://llg.cubic.org/docs/junit/ for a reference.
2225 2226 timesd = dict((t[0], t[3]) for t in result.times)
2226 2227 doc = minidom.Document()
2227 2228 s = doc.createElement('testsuite')
2228 2229 s.setAttribute('name', 'run-tests')
2229 2230 s.setAttribute('tests', str(result.testsRun))
2230 2231 s.setAttribute('errors', "0") # TODO
2231 2232 s.setAttribute('failures', str(len(result.failures)))
2232 2233 s.setAttribute('skipped', str(len(result.skipped) +
2233 2234 len(result.ignored)))
2234 2235 doc.appendChild(s)
2235 2236 for tc in result.successes:
2236 2237 t = doc.createElement('testcase')
2237 2238 t.setAttribute('name', tc.name)
2238 2239 tctime = timesd.get(tc.name)
2239 2240 if tctime is not None:
2240 2241 t.setAttribute('time', '%.3f' % tctime)
2241 2242 s.appendChild(t)
2242 2243 for tc, err in sorted(result.faildata.items()):
2243 2244 t = doc.createElement('testcase')
2244 2245 t.setAttribute('name', tc)
2245 2246 tctime = timesd.get(tc)
2246 2247 if tctime is not None:
2247 2248 t.setAttribute('time', '%.3f' % tctime)
2248 2249 # createCDATASection expects a unicode or it will
2249 2250 # convert using default conversion rules, which will
2250 2251 # fail if string isn't ASCII.
2251 2252 err = cdatasafe(err).decode('utf-8', 'replace')
2252 2253 cd = doc.createCDATASection(err)
2253 2254 # Use 'failure' here instead of 'error' to match errors = 0,
2254 2255 # failures = len(result.failures) in the testsuite element.
2255 2256 failelem = doc.createElement('failure')
2256 2257 failelem.setAttribute('message', 'output changed')
2257 2258 failelem.setAttribute('type', 'output-mismatch')
2258 2259 failelem.appendChild(cd)
2259 2260 t.appendChild(failelem)
2260 2261 s.appendChild(t)
2261 2262 for tc, message in result.skipped:
2262 2263 # According to the schema, 'skipped' has no attributes. So store
2263 2264 # the skip message as a text node instead.
2264 2265 t = doc.createElement('testcase')
2265 2266 t.setAttribute('name', tc.name)
2266 2267 binmessage = message.encode('utf-8')
2267 2268 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2268 2269 cd = doc.createCDATASection(message)
2269 2270 skipelem = doc.createElement('skipped')
2270 2271 skipelem.appendChild(cd)
2271 2272 t.appendChild(skipelem)
2272 2273 s.appendChild(t)
2273 2274 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2274 2275
2275 2276 @staticmethod
2276 2277 def _writejson(result, outf):
2277 2278 timesd = {}
2278 2279 for tdata in result.times:
2279 2280 test = tdata[0]
2280 2281 timesd[test] = tdata[1:]
2281 2282
2282 2283 outcome = {}
2283 2284 groups = [('success', ((tc, None)
2284 2285 for tc in result.successes)),
2285 2286 ('failure', result.failures),
2286 2287 ('skip', result.skipped)]
2287 2288 for res, testcases in groups:
2288 2289 for tc, __ in testcases:
2289 2290 if tc.name in timesd:
2290 2291 diff = result.faildata.get(tc.name, b'')
2291 2292 try:
2292 2293 diff = diff.decode('unicode_escape')
2293 2294 except UnicodeDecodeError as e:
2294 2295 diff = '%r decoding diff, sorry' % e
2295 2296 tres = {'result': res,
2296 2297 'time': ('%0.3f' % timesd[tc.name][2]),
2297 2298 'cuser': ('%0.3f' % timesd[tc.name][0]),
2298 2299 'csys': ('%0.3f' % timesd[tc.name][1]),
2299 2300 'start': ('%0.3f' % timesd[tc.name][3]),
2300 2301 'end': ('%0.3f' % timesd[tc.name][4]),
2301 2302 'diff': diff,
2302 2303 }
2303 2304 else:
2304 2305 # blacklisted test
2305 2306 tres = {'result': res}
2306 2307
2307 2308 outcome[tc.name] = tres
2308 2309 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2309 2310 separators=(',', ': '))
2310 2311 outf.writelines(("testreport =", jsonout))
2311 2312
2312 2313 def sorttests(testdescs, previoustimes, shuffle=False):
2313 2314 """Do an in-place sort of tests."""
2314 2315 if shuffle:
2315 2316 random.shuffle(testdescs)
2316 2317 return
2317 2318
2318 2319 if previoustimes:
2319 2320 def sortkey(f):
2320 2321 f = f['path']
2321 2322 if f in previoustimes:
2322 2323 # Use most recent time as estimate
2323 2324 return -previoustimes[f][-1]
2324 2325 else:
2325 2326 # Default to a rather arbitrary value of 1 second for new tests
2326 2327 return -1.0
2327 2328 else:
2328 2329 # keywords for slow tests
2329 2330 slow = {b'svn': 10,
2330 2331 b'cvs': 10,
2331 2332 b'hghave': 10,
2332 2333 b'largefiles-update': 10,
2333 2334 b'run-tests': 10,
2334 2335 b'corruption': 10,
2335 2336 b'race': 10,
2336 2337 b'i18n': 10,
2337 2338 b'check': 100,
2338 2339 b'gendoc': 100,
2339 2340 b'contrib-perf': 200,
2340 2341 }
2341 2342 perf = {}
2342 2343
2343 2344 def sortkey(f):
2344 2345 # run largest tests first, as they tend to take the longest
2345 2346 f = f['path']
2346 2347 try:
2347 2348 return perf[f]
2348 2349 except KeyError:
2349 2350 try:
2350 2351 val = -os.stat(f).st_size
2351 2352 except OSError as e:
2352 2353 if e.errno != errno.ENOENT:
2353 2354 raise
2354 2355 perf[f] = -1e9 # file does not exist, tell early
2355 2356 return -1e9
2356 2357 for kw, mul in slow.items():
2357 2358 if kw in f:
2358 2359 val *= mul
2359 2360 if f.endswith(b'.py'):
2360 2361 val /= 10.0
2361 2362 perf[f] = val / 1000.0
2362 2363 return perf[f]
2363 2364
2364 2365 testdescs.sort(key=sortkey)
2365 2366
2366 2367 class TestRunner(object):
2367 2368 """Holds context for executing tests.
2368 2369
2369 2370 Tests rely on a lot of state. This object holds it for them.
2370 2371 """
2371 2372
2372 2373 # Programs required to run tests.
2373 2374 REQUIREDTOOLS = [
2374 2375 b'diff',
2375 2376 b'grep',
2376 2377 b'unzip',
2377 2378 b'gunzip',
2378 2379 b'bunzip2',
2379 2380 b'sed',
2380 2381 ]
2381 2382
2382 2383 # Maps file extensions to test class.
2383 2384 TESTTYPES = [
2384 2385 (b'.py', PythonTest),
2385 2386 (b'.t', TTest),
2386 2387 ]
2387 2388
2388 2389 def __init__(self):
2389 2390 self.options = None
2390 2391 self._hgroot = None
2391 2392 self._testdir = None
2392 2393 self._outputdir = None
2393 2394 self._hgtmp = None
2394 2395 self._installdir = None
2395 2396 self._bindir = None
2396 2397 self._tmpbinddir = None
2397 2398 self._pythondir = None
2398 2399 self._coveragefile = None
2399 2400 self._createdfiles = []
2400 2401 self._hgcommand = None
2401 2402 self._hgpath = None
2402 2403 self._portoffset = 0
2403 2404 self._ports = {}
2404 2405
2405 2406 def run(self, args, parser=None):
2406 2407 """Run the test suite."""
2407 2408 oldmask = os.umask(0o22)
2408 2409 try:
2409 2410 parser = parser or getparser()
2410 2411 options = parseargs(args, parser)
2411 2412 tests = [_bytespath(a) for a in options.tests]
2412 2413 if options.test_list is not None:
2413 2414 for listfile in options.test_list:
2414 2415 with open(listfile, 'rb') as f:
2415 2416 tests.extend(t for t in f.read().splitlines() if t)
2416 2417 self.options = options
2417 2418
2418 2419 self._checktools()
2419 2420 testdescs = self.findtests(tests)
2420 2421 if options.profile_runner:
2421 2422 import statprof
2422 2423 statprof.start()
2423 2424 result = self._run(testdescs)
2424 2425 if options.profile_runner:
2425 2426 statprof.stop()
2426 2427 statprof.display()
2427 2428 return result
2428 2429
2429 2430 finally:
2430 2431 os.umask(oldmask)
2431 2432
2432 2433 def _run(self, testdescs):
2433 2434 self._testdir = osenvironb[b'TESTDIR'] = getattr(
2434 2435 os, 'getcwdb', os.getcwd)()
2435 2436 # assume all tests in same folder for now
2436 2437 if testdescs:
2437 2438 pathname = os.path.dirname(testdescs[0]['path'])
2438 2439 if pathname:
2439 2440 osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
2440 2441 pathname)
2441 2442 if self.options.outputdir:
2442 2443 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2443 2444 else:
2444 2445 self._outputdir = self._testdir
2445 2446 if testdescs and pathname:
2446 2447 self._outputdir = os.path.join(self._outputdir, pathname)
2447 2448 previoustimes = {}
2448 2449 if self.options.order_by_runtime:
2449 2450 previoustimes = dict(loadtimes(self._outputdir))
2450 2451 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2451 2452
2452 2453 if 'PYTHONHASHSEED' not in os.environ:
2453 2454 # use a random python hash seed all the time
2454 2455 # we do the randomness ourself to know what seed is used
2455 2456 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2456 2457
2457 2458 if self.options.tmpdir:
2458 2459 self.options.keep_tmpdir = True
2459 2460 tmpdir = _bytespath(self.options.tmpdir)
2460 2461 if os.path.exists(tmpdir):
2461 2462 # Meaning of tmpdir has changed since 1.3: we used to create
2462 2463 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2463 2464 # tmpdir already exists.
2464 2465 print("error: temp dir %r already exists" % tmpdir)
2465 2466 return 1
2466 2467
2467 2468 os.makedirs(tmpdir)
2468 2469 else:
2469 2470 d = None
2470 2471 if os.name == 'nt':
2471 2472 # without this, we get the default temp dir location, but
2472 2473 # in all lowercase, which causes troubles with paths (issue3490)
2473 2474 d = osenvironb.get(b'TMP', None)
2474 2475 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2475 2476
2476 2477 self._hgtmp = osenvironb[b'HGTMP'] = (
2477 2478 os.path.realpath(tmpdir))
2478 2479
2479 2480 if self.options.with_hg:
2480 2481 self._installdir = None
2481 2482 whg = self.options.with_hg
2482 2483 self._bindir = os.path.dirname(os.path.realpath(whg))
2483 2484 assert isinstance(self._bindir, bytes)
2484 2485 self._hgcommand = os.path.basename(whg)
2485 2486 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2486 2487 os.makedirs(self._tmpbindir)
2487 2488
2488 2489 normbin = os.path.normpath(os.path.abspath(whg))
2489 2490 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2490 2491
2491 2492 # Other Python scripts in the test harness need to
2492 2493 # `import mercurial`. If `hg` is a Python script, we assume
2493 2494 # the Mercurial modules are relative to its path and tell the tests
2494 2495 # to load Python modules from its directory.
2495 2496 with open(whg, 'rb') as fh:
2496 2497 initial = fh.read(1024)
2497 2498
2498 2499 if re.match(b'#!.*python', initial):
2499 2500 self._pythondir = self._bindir
2500 2501 # If it looks like our in-repo Rust binary, use the source root.
2501 2502 # This is a bit hacky. But rhg is still not supported outside the
2502 2503 # source directory. So until it is, do the simple thing.
2503 2504 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2504 2505 self._pythondir = os.path.dirname(self._testdir)
2505 2506 # Fall back to the legacy behavior.
2506 2507 else:
2507 2508 self._pythondir = self._bindir
2508 2509
2509 2510 else:
2510 2511 self._installdir = os.path.join(self._hgtmp, b"install")
2511 2512 self._bindir = os.path.join(self._installdir, b"bin")
2512 2513 self._hgcommand = b'hg'
2513 2514 self._tmpbindir = self._bindir
2514 2515 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2515 2516
2516 2517 # set CHGHG, then replace "hg" command by "chg"
2517 2518 chgbindir = self._bindir
2518 2519 if self.options.chg or self.options.with_chg:
2519 2520 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2520 2521 else:
2521 2522 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2522 2523 if self.options.chg:
2523 2524 self._hgcommand = b'chg'
2524 2525 elif self.options.with_chg:
2525 2526 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2526 2527 self._hgcommand = os.path.basename(self.options.with_chg)
2527 2528
2528 2529 osenvironb[b"BINDIR"] = self._bindir
2529 2530 osenvironb[b"PYTHON"] = PYTHON
2530 2531
2531 2532 if self.options.with_python3:
2532 2533 osenvironb[b'PYTHON3'] = self.options.with_python3
2533 2534
2534 2535 fileb = _bytespath(__file__)
2535 2536 runtestdir = os.path.abspath(os.path.dirname(fileb))
2536 2537 osenvironb[b'RUNTESTDIR'] = runtestdir
2537 2538 if PYTHON3:
2538 2539 sepb = _bytespath(os.pathsep)
2539 2540 else:
2540 2541 sepb = os.pathsep
2541 2542 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2542 2543 if os.path.islink(__file__):
2543 2544 # test helper will likely be at the end of the symlink
2544 2545 realfile = os.path.realpath(fileb)
2545 2546 realdir = os.path.abspath(os.path.dirname(realfile))
2546 2547 path.insert(2, realdir)
2547 2548 if chgbindir != self._bindir:
2548 2549 path.insert(1, chgbindir)
2549 2550 if self._testdir != runtestdir:
2550 2551 path = [self._testdir] + path
2551 2552 if self._tmpbindir != self._bindir:
2552 2553 path = [self._tmpbindir] + path
2553 2554 osenvironb[b"PATH"] = sepb.join(path)
2554 2555
2555 2556 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2556 2557 # can run .../tests/run-tests.py test-foo where test-foo
2557 2558 # adds an extension to HGRC. Also include run-test.py directory to
2558 2559 # import modules like heredoctest.
2559 2560 pypath = [self._pythondir, self._testdir, runtestdir]
2560 2561 # We have to augment PYTHONPATH, rather than simply replacing
2561 2562 # it, in case external libraries are only available via current
2562 2563 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2563 2564 # are in /opt/subversion.)
2564 2565 oldpypath = osenvironb.get(IMPL_PATH)
2565 2566 if oldpypath:
2566 2567 pypath.append(oldpypath)
2567 2568 osenvironb[IMPL_PATH] = sepb.join(pypath)
2568 2569
2569 2570 if self.options.pure:
2570 2571 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2571 2572 os.environ["HGMODULEPOLICY"] = "py"
2572 2573
2573 2574 if self.options.allow_slow_tests:
2574 2575 os.environ["HGTEST_SLOW"] = "slow"
2575 2576 elif 'HGTEST_SLOW' in os.environ:
2576 2577 del os.environ['HGTEST_SLOW']
2577 2578
2578 2579 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2579 2580
2580 2581 if self.options.exceptions:
2581 2582 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2582 2583 try:
2583 2584 os.makedirs(exceptionsdir)
2584 2585 except OSError as e:
2585 2586 if e.errno != errno.EEXIST:
2586 2587 raise
2587 2588
2588 2589 # Remove all existing exception reports.
2589 2590 for f in os.listdir(exceptionsdir):
2590 2591 os.unlink(os.path.join(exceptionsdir, f))
2591 2592
2592 2593 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2593 2594 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2594 2595 self.options.extra_config_opt.append(
2595 2596 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2596 2597
2597 2598 vlog("# Using TESTDIR", self._testdir)
2598 2599 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2599 2600 vlog("# Using HGTMP", self._hgtmp)
2600 2601 vlog("# Using PATH", os.environ["PATH"])
2601 2602 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2602 2603 vlog("# Writing to directory", self._outputdir)
2603 2604
2604 2605 try:
2605 2606 return self._runtests(testdescs) or 0
2606 2607 finally:
2607 2608 time.sleep(.1)
2608 2609 self._cleanup()
2609 2610
2610 2611 def findtests(self, args):
2611 2612 """Finds possible test files from arguments.
2612 2613
2613 2614 If you wish to inject custom tests into the test harness, this would
2614 2615 be a good function to monkeypatch or override in a derived class.
2615 2616 """
2616 2617 if not args:
2617 2618 if self.options.changed:
2618 2619 proc = Popen4('hg st --rev "%s" -man0 .' %
2619 2620 self.options.changed, None, 0)
2620 2621 stdout, stderr = proc.communicate()
2621 2622 args = stdout.strip(b'\0').split(b'\0')
2622 2623 else:
2623 2624 args = os.listdir(b'.')
2624 2625
2625 2626 expanded_args = []
2626 2627 for arg in args:
2627 2628 if os.path.isdir(arg):
2628 2629 if not arg.endswith(b'/'):
2629 2630 arg += b'/'
2630 2631 expanded_args.extend([arg + a for a in os.listdir(arg)])
2631 2632 else:
2632 2633 expanded_args.append(arg)
2633 2634 args = expanded_args
2634 2635
2635 2636 tests = []
2636 2637 for t in args:
2637 2638 if not (os.path.basename(t).startswith(b'test-')
2638 2639 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2639 2640 continue
2640 2641 if t.endswith(b'.t'):
2641 2642 # .t file may contain multiple test cases
2642 2643 cases = sorted(parsettestcases(t))
2643 2644 if cases:
2644 2645 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2645 2646 else:
2646 2647 tests.append({'path': t})
2647 2648 else:
2648 2649 tests.append({'path': t})
2649 2650 return tests
2650 2651
2651 2652 def _runtests(self, testdescs):
2652 2653 def _reloadtest(test, i):
2653 2654 # convert a test back to its description dict
2654 2655 desc = {'path': test.path}
2655 2656 case = getattr(test, '_case', None)
2656 2657 if case:
2657 2658 desc['case'] = case
2658 2659 return self._gettest(desc, i)
2659 2660
2660 2661 try:
2661 2662 if self.options.restart:
2662 2663 orig = list(testdescs)
2663 2664 while testdescs:
2664 2665 desc = testdescs[0]
2665 2666 # desc['path'] is a relative path
2666 2667 if 'case' in desc:
2667 2668 errpath = b'%s.%s.err' % (desc['path'], desc['case'])
2668 2669 else:
2669 2670 errpath = b'%s.err' % desc['path']
2670 2671 errpath = os.path.join(self._outputdir, errpath)
2671 2672 if os.path.exists(errpath):
2672 2673 break
2673 2674 testdescs.pop(0)
2674 2675 if not testdescs:
2675 2676 print("running all tests")
2676 2677 testdescs = orig
2677 2678
2678 2679 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2679 2680
2680 2681 failed = False
2681 2682 kws = self.options.keywords
2682 2683 if kws is not None and PYTHON3:
2683 2684 kws = kws.encode('utf-8')
2684 2685
2685 2686 suite = TestSuite(self._testdir,
2686 2687 jobs=self.options.jobs,
2687 2688 whitelist=self.options.whitelisted,
2688 2689 blacklist=self.options.blacklist,
2689 2690 retest=self.options.retest,
2690 2691 keywords=kws,
2691 2692 loop=self.options.loop,
2692 2693 runs_per_test=self.options.runs_per_test,
2693 2694 showchannels=self.options.showchannels,
2694 2695 tests=tests, loadtest=_reloadtest)
2695 2696 verbosity = 1
2696 2697 if self.options.verbose:
2697 2698 verbosity = 2
2698 2699 runner = TextTestRunner(self, verbosity=verbosity)
2699 2700
2700 2701 if self.options.list_tests:
2701 2702 result = runner.listtests(suite)
2702 2703 else:
2703 2704 if self._installdir:
2704 2705 self._installhg()
2705 2706 self._checkhglib("Testing")
2706 2707 else:
2707 2708 self._usecorrectpython()
2708 2709 if self.options.chg:
2709 2710 assert self._installdir
2710 2711 self._installchg()
2711 2712
2712 2713 result = runner.run(suite)
2713 2714
2714 2715 if result.failures:
2715 2716 failed = True
2716 2717
2717 2718 if self.options.anycoverage:
2718 2719 self._outputcoverage()
2719 2720 except KeyboardInterrupt:
2720 2721 failed = True
2721 2722 print("\ninterrupted!")
2722 2723
2723 2724 if failed:
2724 2725 return 1
2725 2726
2726 2727 def _getport(self, count):
2727 2728 port = self._ports.get(count) # do we have a cached entry?
2728 2729 if port is None:
2729 2730 portneeded = 3
2730 2731 # above 100 tries we just give up and let test reports failure
2731 2732 for tries in xrange(100):
2732 2733 allfree = True
2733 2734 port = self.options.port + self._portoffset
2734 2735 for idx in xrange(portneeded):
2735 2736 if not checkportisavailable(port + idx):
2736 2737 allfree = False
2737 2738 break
2738 2739 self._portoffset += portneeded
2739 2740 if allfree:
2740 2741 break
2741 2742 self._ports[count] = port
2742 2743 return port
2743 2744
2744 2745 def _gettest(self, testdesc, count):
2745 2746 """Obtain a Test by looking at its filename.
2746 2747
2747 2748 Returns a Test instance. The Test may not be runnable if it doesn't
2748 2749 map to a known type.
2749 2750 """
2750 2751 path = testdesc['path']
2751 2752 lctest = path.lower()
2752 2753 testcls = Test
2753 2754
2754 2755 for ext, cls in self.TESTTYPES:
2755 2756 if lctest.endswith(ext):
2756 2757 testcls = cls
2757 2758 break
2758 2759
2759 2760 refpath = os.path.join(self._testdir, path)
2760 2761 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2761 2762
2762 2763 # extra keyword parameters. 'case' is used by .t tests
2763 2764 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2764 2765
2765 2766 t = testcls(refpath, self._outputdir, tmpdir,
2766 2767 keeptmpdir=self.options.keep_tmpdir,
2767 2768 debug=self.options.debug,
2768 2769 first=self.options.first,
2769 2770 timeout=self.options.timeout,
2770 2771 startport=self._getport(count),
2771 2772 extraconfigopts=self.options.extra_config_opt,
2772 2773 py3kwarnings=self.options.py3k_warnings,
2773 2774 shell=self.options.shell,
2774 2775 hgcommand=self._hgcommand,
2775 2776 usechg=bool(self.options.with_chg or self.options.chg),
2776 2777 useipv6=useipv6, **kwds)
2777 2778 t.should_reload = True
2778 2779 return t
2779 2780
2780 2781 def _cleanup(self):
2781 2782 """Clean up state from this test invocation."""
2782 2783 if self.options.keep_tmpdir:
2783 2784 return
2784 2785
2785 2786 vlog("# Cleaning up HGTMP", self._hgtmp)
2786 2787 shutil.rmtree(self._hgtmp, True)
2787 2788 for f in self._createdfiles:
2788 2789 try:
2789 2790 os.remove(f)
2790 2791 except OSError:
2791 2792 pass
2792 2793
2793 2794 def _usecorrectpython(self):
2794 2795 """Configure the environment to use the appropriate Python in tests."""
2795 2796 # Tests must use the same interpreter as us or bad things will happen.
2796 2797 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2797 2798 if getattr(os, 'symlink', None):
2798 2799 vlog("# Making python executable in test path a symlink to '%s'" %
2799 2800 sys.executable)
2800 2801 mypython = os.path.join(self._tmpbindir, pyexename)
2801 2802 try:
2802 2803 if os.readlink(mypython) == sys.executable:
2803 2804 return
2804 2805 os.unlink(mypython)
2805 2806 except OSError as err:
2806 2807 if err.errno != errno.ENOENT:
2807 2808 raise
2808 2809 if self._findprogram(pyexename) != sys.executable:
2809 2810 try:
2810 2811 os.symlink(sys.executable, mypython)
2811 2812 self._createdfiles.append(mypython)
2812 2813 except OSError as err:
2813 2814 # child processes may race, which is harmless
2814 2815 if err.errno != errno.EEXIST:
2815 2816 raise
2816 2817 else:
2817 2818 exedir, exename = os.path.split(sys.executable)
2818 2819 vlog("# Modifying search path to find %s as %s in '%s'" %
2819 2820 (exename, pyexename, exedir))
2820 2821 path = os.environ['PATH'].split(os.pathsep)
2821 2822 while exedir in path:
2822 2823 path.remove(exedir)
2823 2824 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2824 2825 if not self._findprogram(pyexename):
2825 2826 print("WARNING: Cannot find %s in search path" % pyexename)
2826 2827
2827 2828 def _installhg(self):
2828 2829 """Install hg into the test environment.
2829 2830
2830 2831 This will also configure hg with the appropriate testing settings.
2831 2832 """
2832 2833 vlog("# Performing temporary installation of HG")
2833 2834 installerrs = os.path.join(self._hgtmp, b"install.err")
2834 2835 compiler = ''
2835 2836 if self.options.compiler:
2836 2837 compiler = '--compiler ' + self.options.compiler
2837 2838 if self.options.pure:
2838 2839 pure = b"--pure"
2839 2840 else:
2840 2841 pure = b""
2841 2842
2842 2843 # Run installer in hg root
2843 2844 script = os.path.realpath(sys.argv[0])
2844 2845 exe = sys.executable
2845 2846 if PYTHON3:
2846 2847 compiler = _bytespath(compiler)
2847 2848 script = _bytespath(script)
2848 2849 exe = _bytespath(exe)
2849 2850 hgroot = os.path.dirname(os.path.dirname(script))
2850 2851 self._hgroot = hgroot
2851 2852 os.chdir(hgroot)
2852 2853 nohome = b'--home=""'
2853 2854 if os.name == 'nt':
2854 2855 # The --home="" trick works only on OS where os.sep == '/'
2855 2856 # because of a distutils convert_path() fast-path. Avoid it at
2856 2857 # least on Windows for now, deal with .pydistutils.cfg bugs
2857 2858 # when they happen.
2858 2859 nohome = b''
2859 2860 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2860 2861 b' build %(compiler)s --build-base="%(base)s"'
2861 2862 b' install --force --prefix="%(prefix)s"'
2862 2863 b' --install-lib="%(libdir)s"'
2863 2864 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2864 2865 % {b'exe': exe, b'pure': pure,
2865 2866 b'compiler': compiler,
2866 2867 b'base': os.path.join(self._hgtmp, b"build"),
2867 2868 b'prefix': self._installdir, b'libdir': self._pythondir,
2868 2869 b'bindir': self._bindir,
2869 2870 b'nohome': nohome, b'logfile': installerrs})
2870 2871
2871 2872 # setuptools requires install directories to exist.
2872 2873 def makedirs(p):
2873 2874 try:
2874 2875 os.makedirs(p)
2875 2876 except OSError as e:
2876 2877 if e.errno != errno.EEXIST:
2877 2878 raise
2878 2879 makedirs(self._pythondir)
2879 2880 makedirs(self._bindir)
2880 2881
2881 2882 vlog("# Running", cmd)
2882 2883 if os.system(cmd) == 0:
2883 2884 if not self.options.verbose:
2884 2885 try:
2885 2886 os.remove(installerrs)
2886 2887 except OSError as e:
2887 2888 if e.errno != errno.ENOENT:
2888 2889 raise
2889 2890 else:
2890 2891 with open(installerrs, 'rb') as f:
2891 2892 for line in f:
2892 2893 if PYTHON3:
2893 2894 sys.stdout.buffer.write(line)
2894 2895 else:
2895 2896 sys.stdout.write(line)
2896 2897 sys.exit(1)
2897 2898 os.chdir(self._testdir)
2898 2899
2899 2900 self._usecorrectpython()
2900 2901
2901 2902 if self.options.py3k_warnings and not self.options.anycoverage:
2902 2903 vlog("# Updating hg command to enable Py3k Warnings switch")
2903 2904 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
2904 2905 lines = [line.rstrip() for line in f]
2905 2906 lines[0] += ' -3'
2906 2907 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
2907 2908 for line in lines:
2908 2909 f.write(line + '\n')
2909 2910
2910 2911 hgbat = os.path.join(self._bindir, b'hg.bat')
2911 2912 if os.path.isfile(hgbat):
2912 2913 # hg.bat expects to be put in bin/scripts while run-tests.py
2913 2914 # installation layout put it in bin/ directly. Fix it
2914 2915 with open(hgbat, 'rb') as f:
2915 2916 data = f.read()
2916 2917 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2917 2918 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2918 2919 b'"%~dp0python" "%~dp0hg" %*')
2919 2920 with open(hgbat, 'wb') as f:
2920 2921 f.write(data)
2921 2922 else:
2922 2923 print('WARNING: cannot fix hg.bat reference to python.exe')
2923 2924
2924 2925 if self.options.anycoverage:
2925 2926 custom = os.path.join(self._testdir, 'sitecustomize.py')
2926 2927 target = os.path.join(self._pythondir, 'sitecustomize.py')
2927 2928 vlog('# Installing coverage trigger to %s' % target)
2928 2929 shutil.copyfile(custom, target)
2929 2930 rc = os.path.join(self._testdir, '.coveragerc')
2930 2931 vlog('# Installing coverage rc to %s' % rc)
2931 2932 os.environ['COVERAGE_PROCESS_START'] = rc
2932 2933 covdir = os.path.join(self._installdir, '..', 'coverage')
2933 2934 try:
2934 2935 os.mkdir(covdir)
2935 2936 except OSError as e:
2936 2937 if e.errno != errno.EEXIST:
2937 2938 raise
2938 2939
2939 2940 os.environ['COVERAGE_DIR'] = covdir
2940 2941
2941 2942 def _checkhglib(self, verb):
2942 2943 """Ensure that the 'mercurial' package imported by python is
2943 2944 the one we expect it to be. If not, print a warning to stderr."""
2944 2945 if ((self._bindir == self._pythondir) and
2945 2946 (self._bindir != self._tmpbindir)):
2946 2947 # The pythondir has been inferred from --with-hg flag.
2947 2948 # We cannot expect anything sensible here.
2948 2949 return
2949 2950 expecthg = os.path.join(self._pythondir, b'mercurial')
2950 2951 actualhg = self._gethgpath()
2951 2952 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2952 2953 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2953 2954 ' (expected %s)\n'
2954 2955 % (verb, actualhg, expecthg))
2955 2956 def _gethgpath(self):
2956 2957 """Return the path to the mercurial package that is actually found by
2957 2958 the current Python interpreter."""
2958 2959 if self._hgpath is not None:
2959 2960 return self._hgpath
2960 2961
2961 2962 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2962 2963 cmd = cmd % PYTHON
2963 2964 if PYTHON3:
2964 2965 cmd = _strpath(cmd)
2965 2966 pipe = os.popen(cmd)
2966 2967 try:
2967 2968 self._hgpath = _bytespath(pipe.read().strip())
2968 2969 finally:
2969 2970 pipe.close()
2970 2971
2971 2972 return self._hgpath
2972 2973
2973 2974 def _installchg(self):
2974 2975 """Install chg into the test environment"""
2975 2976 vlog('# Performing temporary installation of CHG')
2976 2977 assert os.path.dirname(self._bindir) == self._installdir
2977 2978 assert self._hgroot, 'must be called after _installhg()'
2978 2979 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
2979 2980 % {b'make': 'make', # TODO: switch by option or environment?
2980 2981 b'prefix': self._installdir})
2981 2982 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
2982 2983 vlog("# Running", cmd)
2983 2984 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
2984 2985 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
2985 2986 stderr=subprocess.STDOUT)
2986 2987 out, _err = proc.communicate()
2987 2988 if proc.returncode != 0:
2988 2989 if PYTHON3:
2989 2990 sys.stdout.buffer.write(out)
2990 2991 else:
2991 2992 sys.stdout.write(out)
2992 2993 sys.exit(1)
2993 2994
2994 2995 def _outputcoverage(self):
2995 2996 """Produce code coverage output."""
2996 2997 import coverage
2997 2998 coverage = coverage.coverage
2998 2999
2999 3000 vlog('# Producing coverage report')
3000 3001 # chdir is the easiest way to get short, relative paths in the
3001 3002 # output.
3002 3003 os.chdir(self._hgroot)
3003 3004 covdir = os.path.join(self._installdir, '..', 'coverage')
3004 3005 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3005 3006
3006 3007 # Map install directory paths back to source directory.
3007 3008 cov.config.paths['srcdir'] = ['.', self._pythondir]
3008 3009
3009 3010 cov.combine()
3010 3011
3011 3012 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
3012 3013 cov.report(ignore_errors=True, omit=omit)
3013 3014
3014 3015 if self.options.htmlcov:
3015 3016 htmldir = os.path.join(self._outputdir, 'htmlcov')
3016 3017 cov.html_report(directory=htmldir, omit=omit)
3017 3018 if self.options.annotate:
3018 3019 adir = os.path.join(self._outputdir, 'annotated')
3019 3020 if not os.path.isdir(adir):
3020 3021 os.mkdir(adir)
3021 3022 cov.annotate(directory=adir, omit=omit)
3022 3023
3023 3024 def _findprogram(self, program):
3024 3025 """Search PATH for a executable program"""
3025 3026 dpb = _bytespath(os.defpath)
3026 3027 sepb = _bytespath(os.pathsep)
3027 3028 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3028 3029 name = os.path.join(p, program)
3029 3030 if os.name == 'nt' or os.access(name, os.X_OK):
3030 3031 return name
3031 3032 return None
3032 3033
3033 3034 def _checktools(self):
3034 3035 """Ensure tools required to run tests are present."""
3035 3036 for p in self.REQUIREDTOOLS:
3036 3037 if os.name == 'nt' and not p.endswith('.exe'):
3037 3038 p += '.exe'
3038 3039 found = self._findprogram(p)
3039 3040 if found:
3040 3041 vlog("# Found prerequisite", p, "at", found)
3041 3042 else:
3042 3043 print("WARNING: Did not find prerequisite tool: %s " %
3043 3044 p.decode("utf-8"))
3044 3045
3045 3046 def aggregateexceptions(path):
3046 3047 exceptioncounts = collections.Counter()
3047 3048 testsbyfailure = collections.defaultdict(set)
3048 3049 failuresbytest = collections.defaultdict(set)
3049 3050
3050 3051 for f in os.listdir(path):
3051 3052 with open(os.path.join(path, f), 'rb') as fh:
3052 3053 data = fh.read().split(b'\0')
3053 3054 if len(data) != 5:
3054 3055 continue
3055 3056
3056 3057 exc, mainframe, hgframe, hgline, testname = data
3057 3058 exc = exc.decode('utf-8')
3058 3059 mainframe = mainframe.decode('utf-8')
3059 3060 hgframe = hgframe.decode('utf-8')
3060 3061 hgline = hgline.decode('utf-8')
3061 3062 testname = testname.decode('utf-8')
3062 3063
3063 3064 key = (hgframe, hgline, exc)
3064 3065 exceptioncounts[key] += 1
3065 3066 testsbyfailure[key].add(testname)
3066 3067 failuresbytest[testname].add(key)
3067 3068
3068 3069 # Find test having fewest failures for each failure.
3069 3070 leastfailing = {}
3070 3071 for key, tests in testsbyfailure.items():
3071 3072 fewesttest = None
3072 3073 fewestcount = 99999999
3073 3074 for test in sorted(tests):
3074 3075 if len(failuresbytest[test]) < fewestcount:
3075 3076 fewesttest = test
3076 3077 fewestcount = len(failuresbytest[test])
3077 3078
3078 3079 leastfailing[key] = (fewestcount, fewesttest)
3079 3080
3080 3081 # Create a combined counter so we can sort by total occurrences and
3081 3082 # impacted tests.
3082 3083 combined = {}
3083 3084 for key in exceptioncounts:
3084 3085 combined[key] = (exceptioncounts[key],
3085 3086 len(testsbyfailure[key]),
3086 3087 leastfailing[key][0],
3087 3088 leastfailing[key][1])
3088 3089
3089 3090 return {
3090 3091 'exceptioncounts': exceptioncounts,
3091 3092 'total': sum(exceptioncounts.values()),
3092 3093 'combined': combined,
3093 3094 'leastfailing': leastfailing,
3094 3095 'byfailure': testsbyfailure,
3095 3096 'bytest': failuresbytest,
3096 3097 }
3097 3098
3098 3099 if __name__ == '__main__':
3099 3100 runner = TestRunner()
3100 3101
3101 3102 try:
3102 3103 import msvcrt
3103 3104 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3104 3105 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3105 3106 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3106 3107 except ImportError:
3107 3108 pass
3108 3109
3109 3110 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1612 +1,1620 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9
10 10 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
11 11
12 12 # Ran 0 tests, 0 skipped, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 18 > run-tests.py --with-hg=`which hg` "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27
28 28 # Ran 0 tests, 0 skipped, 0 failed.
29 29 $ rm hg
30 30 #endif
31 31
32 32 #if execbit
33 33 $ touch hg
34 34 $ run-tests.py --with-hg=./hg
35 35 usage: run-tests.py [options] [tests]
36 36 run-tests.py: error: --with-hg must specify an executable hg script
37 37 [2]
38 38 $ rm hg
39 39 #endif
40 40
41 41 Features for testing optional lines
42 42 ===================================
43 43
44 44 $ cat > hghaveaddon.py <<EOF
45 45 > import hghave
46 46 > @hghave.check("custom", "custom hghave feature")
47 47 > def has_custom():
48 48 > return True
49 49 > @hghave.check("missing", "missing hghave feature")
50 50 > def has_missing():
51 51 > return False
52 52 > EOF
53 53
54 54 an empty test
55 55 =======================
56 56
57 57 $ touch test-empty.t
58 58 $ rt
59 59 .
60 60 # Ran 1 tests, 0 skipped, 0 failed.
61 61 $ rm test-empty.t
62 62
63 63 a succesful test
64 64 =======================
65 65
66 66 $ cat > test-success.t << EOF
67 67 > $ echo babar
68 68 > babar
69 69 > $ echo xyzzy
70 70 > dont_print (?)
71 71 > nothing[42]line (re) (?)
72 72 > never*happens (glob) (?)
73 73 > more_nothing (?)
74 74 > xyzzy
75 75 > nor this (?)
76 76 > $ printf 'abc\ndef\nxyz\n'
77 77 > 123 (?)
78 78 > abc
79 79 > def (?)
80 80 > 456 (?)
81 81 > xyz
82 82 > $ printf 'zyx\nwvu\ntsr\n'
83 83 > abc (?)
84 84 > zyx (custom !)
85 85 > wvu
86 86 > no_print (no-custom !)
87 87 > tsr (no-missing !)
88 88 > missing (missing !)
89 89 > EOF
90 90
91 91 $ rt
92 92 .
93 93 # Ran 1 tests, 0 skipped, 0 failed.
94 94
95 95 failing test
96 96 ==================
97 97
98 98 test churn with globs
99 99 $ cat > test-failure.t <<EOF
100 100 > $ echo "bar-baz"; echo "bar-bad"; echo foo
101 101 > bar*bad (glob)
102 102 > bar*baz (glob)
103 103 > | fo (re)
104 104 > EOF
105 105 $ rt test-failure.t
106 106
107 107 --- $TESTTMP/test-failure.t
108 108 +++ $TESTTMP/test-failure.t.err
109 109 @@ -1,4 +1,4 @@
110 110 $ echo "bar-baz"; echo "bar-bad"; echo foo
111 111 + bar*baz (glob)
112 112 bar*bad (glob)
113 113 - bar*baz (glob)
114 114 - | fo (re)
115 115 + foo
116 116
117 117 ERROR: test-failure.t output changed
118 118 !
119 119 Failed test-failure.t: output changed
120 120 # Ran 1 tests, 0 skipped, 1 failed.
121 121 python hash seed: * (glob)
122 122 [1]
123 123
124 124 test diff colorisation
125 125
126 126 #if no-windows pygments
127 127 $ rt test-failure.t --color always
128 128
129 129 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
130 130 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
131 131 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
132 132 $ echo "bar-baz"; echo "bar-bad"; echo foo
133 133 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
134 134 bar*bad (glob)
135 135 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
136 136 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
137 137 \x1b[38;5;34m+ foo\x1b[39m (esc)
138 138
139 139 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
140 140 !
141 141 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
142 142 # Ran 1 tests, 0 skipped, 1 failed.
143 143 python hash seed: * (glob)
144 144 [1]
145 145
146 146 $ rt test-failure.t 2> tmp.log
147 147 [1]
148 148 $ cat tmp.log
149 149
150 150 --- $TESTTMP/test-failure.t
151 151 +++ $TESTTMP/test-failure.t.err
152 152 @@ -1,4 +1,4 @@
153 153 $ echo "bar-baz"; echo "bar-bad"; echo foo
154 154 + bar*baz (glob)
155 155 bar*bad (glob)
156 156 - bar*baz (glob)
157 157 - | fo (re)
158 158 + foo
159 159
160 160 ERROR: test-failure.t output changed
161 161 !
162 162 Failed test-failure.t: output changed
163 163 # Ran 1 tests, 0 skipped, 1 failed.
164 164 python hash seed: * (glob)
165 165 #endif
166 166
167 167 $ cat > test-failure.t << EOF
168 168 > $ true
169 169 > should go away (true !)
170 170 > $ true
171 171 > should stay (false !)
172 172 >
173 173 > Should remove first line, not second or third
174 174 > $ echo 'testing'
175 175 > baz*foo (glob) (true !)
176 176 > foobar*foo (glob) (false !)
177 177 > te*ting (glob) (true !)
178 178 >
179 179 > Should keep first two lines, remove third and last
180 180 > $ echo 'testing'
181 181 > test.ng (re) (true !)
182 182 > foo.ar (re) (false !)
183 183 > b.r (re) (true !)
184 184 > missing (?)
185 185 > awol (true !)
186 186 >
187 187 > The "missing" line should stay, even though awol is dropped
188 188 > $ echo 'testing'
189 189 > test.ng (re) (true !)
190 190 > foo.ar (?)
191 191 > awol
192 192 > missing (?)
193 193 > EOF
194 194 $ rt test-failure.t
195 195
196 196 --- $TESTTMP/test-failure.t
197 197 +++ $TESTTMP/test-failure.t.err
198 198 @@ -1,11 +1,9 @@
199 199 $ true
200 200 - should go away (true !)
201 201 $ true
202 202 should stay (false !)
203 203
204 204 Should remove first line, not second or third
205 205 $ echo 'testing'
206 206 - baz*foo (glob) (true !)
207 207 foobar*foo (glob) (false !)
208 208 te*ting (glob) (true !)
209 209
210 210 foo.ar (re) (false !)
211 211 missing (?)
212 212 @@ -13,13 +11,10 @@
213 213 $ echo 'testing'
214 214 test.ng (re) (true !)
215 215 foo.ar (re) (false !)
216 216 - b.r (re) (true !)
217 217 missing (?)
218 218 - awol (true !)
219 219
220 220 The "missing" line should stay, even though awol is dropped
221 221 $ echo 'testing'
222 222 test.ng (re) (true !)
223 223 foo.ar (?)
224 224 - awol
225 225 missing (?)
226 226
227 227 ERROR: test-failure.t output changed
228 228 !
229 229 Failed test-failure.t: output changed
230 230 # Ran 1 tests, 0 skipped, 1 failed.
231 231 python hash seed: * (glob)
232 232 [1]
233 233
234 234 basic failing test
235 235 $ cat > test-failure.t << EOF
236 236 > $ echo babar
237 237 > rataxes
238 238 > This is a noop statement so that
239 239 > this test is still more bytes than success.
240 240 > pad pad pad pad............................................................
241 241 > pad pad pad pad............................................................
242 242 > pad pad pad pad............................................................
243 243 > pad pad pad pad............................................................
244 244 > pad pad pad pad............................................................
245 245 > pad pad pad pad............................................................
246 246 > EOF
247 247
248 248 >>> fh = open('test-failure-unicode.t', 'wb')
249 249 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
250 250 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
251 251
252 252 $ rt
253 253
254 254 --- $TESTTMP/test-failure.t
255 255 +++ $TESTTMP/test-failure.t.err
256 256 @@ -1,5 +1,5 @@
257 257 $ echo babar
258 258 - rataxes
259 259 + babar
260 260 This is a noop statement so that
261 261 this test is still more bytes than success.
262 262 pad pad pad pad............................................................
263 263
264 264 ERROR: test-failure.t output changed
265 265 !.
266 266 --- $TESTTMP/test-failure-unicode.t
267 267 +++ $TESTTMP/test-failure-unicode.t.err
268 268 @@ -1,2 +1,2 @@
269 269 $ echo babar\xce\xb1 (esc)
270 270 - l\xce\xb5\xce\xb5t (esc)
271 271 + babar\xce\xb1 (esc)
272 272
273 273 ERROR: test-failure-unicode.t output changed
274 274 !
275 275 Failed test-failure.t: output changed
276 276 Failed test-failure-unicode.t: output changed
277 277 # Ran 3 tests, 0 skipped, 2 failed.
278 278 python hash seed: * (glob)
279 279 [1]
280 280
281 281 test --outputdir
282 282 $ mkdir output
283 283 $ rt --outputdir output
284 284
285 285 --- $TESTTMP/test-failure.t
286 286 +++ $TESTTMP/output/test-failure.t.err
287 287 @@ -1,5 +1,5 @@
288 288 $ echo babar
289 289 - rataxes
290 290 + babar
291 291 This is a noop statement so that
292 292 this test is still more bytes than success.
293 293 pad pad pad pad............................................................
294 294
295 295 ERROR: test-failure.t output changed
296 296 !.
297 297 --- $TESTTMP/test-failure-unicode.t
298 298 +++ $TESTTMP/output/test-failure-unicode.t.err
299 299 @@ -1,2 +1,2 @@
300 300 $ echo babar\xce\xb1 (esc)
301 301 - l\xce\xb5\xce\xb5t (esc)
302 302 + babar\xce\xb1 (esc)
303 303
304 304 ERROR: test-failure-unicode.t output changed
305 305 !
306 306 Failed test-failure.t: output changed
307 307 Failed test-failure-unicode.t: output changed
308 308 # Ran 3 tests, 0 skipped, 2 failed.
309 309 python hash seed: * (glob)
310 310 [1]
311 311 $ ls -a output
312 312 .
313 313 ..
314 314 .testtimes
315 315 test-failure-unicode.t.err
316 316 test-failure.t.err
317 317
318 318 test --xunit support
319 319 $ rt --xunit=xunit.xml
320 320
321 321 --- $TESTTMP/test-failure.t
322 322 +++ $TESTTMP/test-failure.t.err
323 323 @@ -1,5 +1,5 @@
324 324 $ echo babar
325 325 - rataxes
326 326 + babar
327 327 This is a noop statement so that
328 328 this test is still more bytes than success.
329 329 pad pad pad pad............................................................
330 330
331 331 ERROR: test-failure.t output changed
332 332 !.
333 333 --- $TESTTMP/test-failure-unicode.t
334 334 +++ $TESTTMP/test-failure-unicode.t.err
335 335 @@ -1,2 +1,2 @@
336 336 $ echo babar\xce\xb1 (esc)
337 337 - l\xce\xb5\xce\xb5t (esc)
338 338 + babar\xce\xb1 (esc)
339 339
340 340 ERROR: test-failure-unicode.t output changed
341 341 !
342 342 Failed test-failure.t: output changed
343 343 Failed test-failure-unicode.t: output changed
344 344 # Ran 3 tests, 0 skipped, 2 failed.
345 345 python hash seed: * (glob)
346 346 [1]
347 347 $ cat xunit.xml
348 348 <?xml version="1.0" encoding="utf-8"?>
349 349 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
350 350 <testcase name="test-success.t" time="*"/> (glob)
351 351 <testcase name="test-failure-unicode.t" time="*"> (glob)
352 352 <failure message="output changed" type="output-mismatch">
353 353 <![CDATA[--- $TESTTMP/test-failure-unicode.t
354 354 +++ $TESTTMP/test-failure-unicode.t.err
355 355 @@ -1,2 +1,2 @@
356 356 $ echo babar\xce\xb1 (esc)
357 357 - l\xce\xb5\xce\xb5t (esc)
358 358 + babar\xce\xb1 (esc)
359 359 ]]> </failure>
360 360 </testcase>
361 361 <testcase name="test-failure.t" time="*"> (glob)
362 362 <failure message="output changed" type="output-mismatch">
363 363 <![CDATA[--- $TESTTMP/test-failure.t
364 364 +++ $TESTTMP/test-failure.t.err
365 365 @@ -1,5 +1,5 @@
366 366 $ echo babar
367 367 - rataxes
368 368 + babar
369 369 This is a noop statement so that
370 370 this test is still more bytes than success.
371 371 pad pad pad pad............................................................
372 372 ]]> </failure>
373 373 </testcase>
374 374 </testsuite>
375 375
376 376 $ cat .testtimes
377 377 test-empty.t * (glob)
378 378 test-failure-unicode.t * (glob)
379 379 test-failure.t * (glob)
380 380 test-success.t * (glob)
381 381
382 382 $ rt --list-tests
383 383 test-failure-unicode.t
384 384 test-failure.t
385 385 test-success.t
386 386
387 387 $ rt --list-tests --json
388 388 test-failure-unicode.t
389 389 test-failure.t
390 390 test-success.t
391 391 $ cat report.json
392 392 testreport ={
393 393 "test-failure-unicode.t": {
394 394 "result": "success"
395 395 },
396 396 "test-failure.t": {
397 397 "result": "success"
398 398 },
399 399 "test-success.t": {
400 400 "result": "success"
401 401 }
402 402 } (no-eol)
403 403
404 404 $ rt --list-tests --xunit=xunit.xml
405 405 test-failure-unicode.t
406 406 test-failure.t
407 407 test-success.t
408 408 $ cat xunit.xml
409 409 <?xml version="1.0" encoding="utf-8"?>
410 410 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
411 411 <testcase name="test-failure-unicode.t"/>
412 412 <testcase name="test-failure.t"/>
413 413 <testcase name="test-success.t"/>
414 414 </testsuite>
415 415
416 416 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
417 417 test-failure-unicode.t
418 418 test-failure.t
419 419 $ cat output/report.json
420 420 testreport ={
421 421 "test-failure-unicode.t": {
422 422 "result": "success"
423 423 },
424 424 "test-failure.t": {
425 425 "result": "success"
426 426 }
427 427 } (no-eol)
428 428 $ cat xunit.xml
429 429 <?xml version="1.0" encoding="utf-8"?>
430 430 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
431 431 <testcase name="test-failure-unicode.t"/>
432 432 <testcase name="test-failure.t"/>
433 433 </testsuite>
434 434
435 435 $ rm test-failure-unicode.t
436 436
437 437 test for --retest
438 438 ====================
439 439
440 440 $ rt --retest
441 441
442 442 --- $TESTTMP/test-failure.t
443 443 +++ $TESTTMP/test-failure.t.err
444 444 @@ -1,5 +1,5 @@
445 445 $ echo babar
446 446 - rataxes
447 447 + babar
448 448 This is a noop statement so that
449 449 this test is still more bytes than success.
450 450 pad pad pad pad............................................................
451 451
452 452 ERROR: test-failure.t output changed
453 453 !
454 454 Failed test-failure.t: output changed
455 455 # Ran 2 tests, 1 skipped, 1 failed.
456 456 python hash seed: * (glob)
457 457 [1]
458 458
459 459 --retest works with --outputdir
460 460 $ rm -r output
461 461 $ mkdir output
462 462 $ mv test-failure.t.err output
463 463 $ rt --retest --outputdir output
464 464
465 465 --- $TESTTMP/test-failure.t
466 466 +++ $TESTTMP/output/test-failure.t.err
467 467 @@ -1,5 +1,5 @@
468 468 $ echo babar
469 469 - rataxes
470 470 + babar
471 471 This is a noop statement so that
472 472 this test is still more bytes than success.
473 473 pad pad pad pad............................................................
474 474
475 475 ERROR: test-failure.t output changed
476 476 !
477 477 Failed test-failure.t: output changed
478 478 # Ran 2 tests, 1 skipped, 1 failed.
479 479 python hash seed: * (glob)
480 480 [1]
481 481
482 482 Selecting Tests To Run
483 483 ======================
484 484
485 485 successful
486 486
487 487 $ rt test-success.t
488 488 .
489 489 # Ran 1 tests, 0 skipped, 0 failed.
490 490
491 491 success w/ keyword
492 492 $ rt -k xyzzy
493 493 .
494 494 # Ran 2 tests, 1 skipped, 0 failed.
495 495
496 496 failed
497 497
498 498 $ rt test-failure.t
499 499
500 500 --- $TESTTMP/test-failure.t
501 501 +++ $TESTTMP/test-failure.t.err
502 502 @@ -1,5 +1,5 @@
503 503 $ echo babar
504 504 - rataxes
505 505 + babar
506 506 This is a noop statement so that
507 507 this test is still more bytes than success.
508 508 pad pad pad pad............................................................
509 509
510 510 ERROR: test-failure.t output changed
511 511 !
512 512 Failed test-failure.t: output changed
513 513 # Ran 1 tests, 0 skipped, 1 failed.
514 514 python hash seed: * (glob)
515 515 [1]
516 516
517 517 failure w/ keyword
518 518 $ rt -k rataxes
519 519
520 520 --- $TESTTMP/test-failure.t
521 521 +++ $TESTTMP/test-failure.t.err
522 522 @@ -1,5 +1,5 @@
523 523 $ echo babar
524 524 - rataxes
525 525 + babar
526 526 This is a noop statement so that
527 527 this test is still more bytes than success.
528 528 pad pad pad pad............................................................
529 529
530 530 ERROR: test-failure.t output changed
531 531 !
532 532 Failed test-failure.t: output changed
533 533 # Ran 2 tests, 1 skipped, 1 failed.
534 534 python hash seed: * (glob)
535 535 [1]
536 536
537 537 Verify that when a process fails to start we show a useful message
538 538 ==================================================================
539 539
540 540 $ cat > test-serve-fail.t <<EOF
541 541 > $ echo 'abort: child process failed to start blah'
542 542 > EOF
543 543 $ rt test-serve-fail.t
544 544
545 545 --- $TESTTMP/test-serve-fail.t
546 546 +++ $TESTTMP/test-serve-fail.t.err
547 547 @@ -1* +1,2 @@ (glob)
548 548 $ echo 'abort: child process failed to start blah'
549 549 + abort: child process failed to start blah
550 550
551 551 ERROR: test-serve-fail.t output changed
552 552 !
553 553 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
554 554 # Ran 1 tests, 0 skipped, 1 failed.
555 555 python hash seed: * (glob)
556 556 [1]
557 557 $ rm test-serve-fail.t
558 558
559 559 Verify that we can try other ports
560 560 ===================================
561 561 $ hg init inuse
562 562 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
563 563 $ cat blocks.pid >> $DAEMON_PIDS
564 564 $ cat > test-serve-inuse.t <<EOF
565 565 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
566 566 > $ cat hg.pid >> \$DAEMON_PIDS
567 567 > EOF
568 568 $ rt test-serve-inuse.t
569 569 .
570 570 # Ran 1 tests, 0 skipped, 0 failed.
571 571 $ rm test-serve-inuse.t
572 572 $ killdaemons.py $DAEMON_PIDS
573 573 $ rm $DAEMON_PIDS
574 574
575 575 Running In Debug Mode
576 576 ======================
577 577
578 578 $ rt --debug 2>&1 | grep -v pwd
579 579 + echo *SALT* 0 0 (glob)
580 580 *SALT* 0 0 (glob)
581 581 + echo babar
582 582 babar
583 583 + echo *SALT* 10 0 (glob)
584 584 *SALT* 10 0 (glob)
585 585 *+ echo *SALT* 0 0 (glob)
586 586 *SALT* 0 0 (glob)
587 587 + echo babar
588 588 babar
589 589 + echo *SALT* 2 0 (glob)
590 590 *SALT* 2 0 (glob)
591 591 + echo xyzzy
592 592 xyzzy
593 593 + echo *SALT* 9 0 (glob)
594 594 *SALT* 9 0 (glob)
595 595 + printf *abc\ndef\nxyz\n* (glob)
596 596 abc
597 597 def
598 598 xyz
599 599 + echo *SALT* 15 0 (glob)
600 600 *SALT* 15 0 (glob)
601 601 + printf *zyx\nwvu\ntsr\n* (glob)
602 602 zyx
603 603 wvu
604 604 tsr
605 605 + echo *SALT* 22 0 (glob)
606 606 *SALT* 22 0 (glob)
607 607 .
608 608 # Ran 2 tests, 0 skipped, 0 failed.
609 609
610 610 Parallel runs
611 611 ==============
612 612
613 613 (duplicate the failing test to get predictable output)
614 614 $ cp test-failure.t test-failure-copy.t
615 615
616 616 $ rt --jobs 2 test-failure*.t -n
617 617 !!
618 618 Failed test-failure*.t: output changed (glob)
619 619 Failed test-failure*.t: output changed (glob)
620 620 # Ran 2 tests, 0 skipped, 2 failed.
621 621 python hash seed: * (glob)
622 622 [1]
623 623
624 624 failures in parallel with --first should only print one failure
625 625 $ rt --jobs 2 --first test-failure*.t
626 626
627 627 --- $TESTTMP/test-failure*.t (glob)
628 628 +++ $TESTTMP/test-failure*.t.err (glob)
629 629 @@ -1,5 +1,5 @@
630 630 $ echo babar
631 631 - rataxes
632 632 + babar
633 633 This is a noop statement so that
634 634 this test is still more bytes than success.
635 635 pad pad pad pad............................................................
636 636
637 637 Failed test-failure*.t: output changed (glob)
638 638 Failed test-failure*.t: output changed (glob)
639 639 # Ran 2 tests, 0 skipped, 2 failed.
640 640 python hash seed: * (glob)
641 641 [1]
642 642
643 643
644 644 (delete the duplicated test file)
645 645 $ rm test-failure-copy.t
646 646
647 647
648 648 Interactive run
649 649 ===============
650 650
651 651 (backup the failing test)
652 652 $ cp test-failure.t backup
653 653
654 654 Refuse the fix
655 655
656 656 $ echo 'n' | rt -i
657 657
658 658 --- $TESTTMP/test-failure.t
659 659 +++ $TESTTMP/test-failure.t.err
660 660 @@ -1,5 +1,5 @@
661 661 $ echo babar
662 662 - rataxes
663 663 + babar
664 664 This is a noop statement so that
665 665 this test is still more bytes than success.
666 666 pad pad pad pad............................................................
667 667 Accept this change? [n]
668 668 ERROR: test-failure.t output changed
669 669 !.
670 670 Failed test-failure.t: output changed
671 671 # Ran 2 tests, 0 skipped, 1 failed.
672 672 python hash seed: * (glob)
673 673 [1]
674 674
675 675 $ cat test-failure.t
676 676 $ echo babar
677 677 rataxes
678 678 This is a noop statement so that
679 679 this test is still more bytes than success.
680 680 pad pad pad pad............................................................
681 681 pad pad pad pad............................................................
682 682 pad pad pad pad............................................................
683 683 pad pad pad pad............................................................
684 684 pad pad pad pad............................................................
685 685 pad pad pad pad............................................................
686 686
687 687 Interactive with custom view
688 688
689 689 $ echo 'n' | rt -i --view echo
690 690 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
691 691 Accept this change? [n]* (glob)
692 692 ERROR: test-failure.t output changed
693 693 !.
694 694 Failed test-failure.t: output changed
695 695 # Ran 2 tests, 0 skipped, 1 failed.
696 696 python hash seed: * (glob)
697 697 [1]
698 698
699 699 View the fix
700 700
701 701 $ echo 'y' | rt --view echo
702 702 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
703 703
704 704 ERROR: test-failure.t output changed
705 705 !.
706 706 Failed test-failure.t: output changed
707 707 # Ran 2 tests, 0 skipped, 1 failed.
708 708 python hash seed: * (glob)
709 709 [1]
710 710
711 711 Accept the fix
712 712
713 713 $ cat >> test-failure.t <<EOF
714 714 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
715 715 > saved backup bundle to \$TESTTMP/foo.hg
716 716 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
717 717 > saved backup bundle to $TESTTMP\\foo.hg
718 718 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
719 719 > saved backup bundle to \$TESTTMP/*.hg (glob)
720 720 > EOF
721 721 $ echo 'y' | rt -i 2>&1
722 722
723 723 --- $TESTTMP/test-failure.t
724 724 +++ $TESTTMP/test-failure.t.err
725 725 @@ -1,5 +1,5 @@
726 726 $ echo babar
727 727 - rataxes
728 728 + babar
729 729 This is a noop statement so that
730 730 this test is still more bytes than success.
731 731 pad pad pad pad............................................................
732 732 @@ -11,6 +11,6 @@
733 733 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
734 734 saved backup bundle to $TESTTMP/foo.hg
735 735 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
736 736 - saved backup bundle to $TESTTMP\foo.hg
737 737 + saved backup bundle to $TESTTMP/foo.hg
738 738 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
739 739 saved backup bundle to $TESTTMP/*.hg (glob)
740 740 Accept this change? [n] ..
741 741 # Ran 2 tests, 0 skipped, 0 failed.
742 742
743 743 $ sed -e 's,(glob)$,&<,g' test-failure.t
744 744 $ echo babar
745 745 babar
746 746 This is a noop statement so that
747 747 this test is still more bytes than success.
748 748 pad pad pad pad............................................................
749 749 pad pad pad pad............................................................
750 750 pad pad pad pad............................................................
751 751 pad pad pad pad............................................................
752 752 pad pad pad pad............................................................
753 753 pad pad pad pad............................................................
754 754 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
755 755 saved backup bundle to $TESTTMP/foo.hg
756 756 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
757 757 saved backup bundle to $TESTTMP/foo.hg
758 758 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
759 759 saved backup bundle to $TESTTMP/*.hg (glob)<
760 760
761 761 Race condition - test file was modified when test is running
762 762
763 763 $ TESTRACEDIR=`pwd`
764 764 $ export TESTRACEDIR
765 765 $ cat > test-race.t <<EOF
766 766 > $ echo 1
767 767 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
768 768 > EOF
769 769
770 770 $ rt -i test-race.t
771 771
772 772 --- $TESTTMP/test-race.t
773 773 +++ $TESTTMP/test-race.t.err
774 774 @@ -1,2 +1,3 @@
775 775 $ echo 1
776 776 + 1
777 777 $ echo "# a new line" >> $TESTTMP/test-race.t
778 778 Reference output has changed (run again to prompt changes)
779 779 ERROR: test-race.t output changed
780 780 !
781 781 Failed test-race.t: output changed
782 782 # Ran 1 tests, 0 skipped, 1 failed.
783 783 python hash seed: * (glob)
784 784 [1]
785 785
786 786 $ rm test-race.t
787 787
788 788 When "#testcases" is used in .t files
789 789
790 790 $ cat >> test-cases.t <<EOF
791 791 > #testcases a b
792 792 > #if a
793 793 > $ echo 1
794 794 > #endif
795 795 > #if b
796 796 > $ echo 2
797 797 > #endif
798 798 > EOF
799 799
800 800 $ cat <<EOF | rt -i test-cases.t 2>&1
801 801 > y
802 802 > y
803 803 > EOF
804 804
805 805 --- $TESTTMP/test-cases.t
806 806 +++ $TESTTMP/test-cases.t.a.err
807 807 @@ -1,6 +1,7 @@
808 808 #testcases a b
809 809 #if a
810 810 $ echo 1
811 811 + 1
812 812 #endif
813 813 #if b
814 814 $ echo 2
815 815 Accept this change? [n] .
816 816 --- $TESTTMP/test-cases.t
817 817 +++ $TESTTMP/test-cases.t.b.err
818 818 @@ -5,4 +5,5 @@
819 819 #endif
820 820 #if b
821 821 $ echo 2
822 822 + 2
823 823 #endif
824 824 Accept this change? [n] .
825 825 # Ran 2 tests, 0 skipped, 0 failed.
826 826
827 827 $ cat test-cases.t
828 828 #testcases a b
829 829 #if a
830 830 $ echo 1
831 831 1
832 832 #endif
833 833 #if b
834 834 $ echo 2
835 835 2
836 836 #endif
837 837
838 838 $ cat >> test-cases.t <<'EOF'
839 839 > #if a
840 840 > $ NAME=A
841 841 > #else
842 842 > $ NAME=B
843 843 > #endif
844 844 > $ echo $NAME
845 845 > A (a !)
846 846 > B (b !)
847 847 > EOF
848 848 $ rt test-cases.t
849 849 ..
850 850 # Ran 2 tests, 0 skipped, 0 failed.
851 851
852 852 $ rm test-cases.t
853 853
854 854 (reinstall)
855 855 $ mv backup test-failure.t
856 856
857 857 No Diff
858 858 ===============
859 859
860 860 $ rt --nodiff
861 861 !.
862 862 Failed test-failure.t: output changed
863 863 # Ran 2 tests, 0 skipped, 1 failed.
864 864 python hash seed: * (glob)
865 865 [1]
866 866
867 867 test --tmpdir support
868 868 $ rt --tmpdir=$TESTTMP/keep test-success.t
869 869
870 870 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
871 871 Keeping threadtmp dir: $TESTTMP/keep/child1
872 872 .
873 873 # Ran 1 tests, 0 skipped, 0 failed.
874 874
875 875 timeouts
876 876 ========
877 877 $ cat > test-timeout.t <<EOF
878 878 > $ sleep 2
879 879 > $ echo pass
880 880 > pass
881 881 > EOF
882 882 > echo '#require slow' > test-slow-timeout.t
883 883 > cat test-timeout.t >> test-slow-timeout.t
884 884 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
885 885 st
886 886 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
887 887 Failed test-timeout.t: timed out
888 888 # Ran 1 tests, 1 skipped, 1 failed.
889 889 python hash seed: * (glob)
890 890 [1]
891 891 $ rt --timeout=1 --slowtimeout=3 \
892 892 > test-timeout.t test-slow-timeout.t --allow-slow-tests
893 893 .t
894 894 Failed test-timeout.t: timed out
895 895 # Ran 2 tests, 0 skipped, 1 failed.
896 896 python hash seed: * (glob)
897 897 [1]
898 898 $ rm test-timeout.t test-slow-timeout.t
899 899
900 900 test for --time
901 901 ==================
902 902
903 903 $ rt test-success.t --time
904 904 .
905 905 # Ran 1 tests, 0 skipped, 0 failed.
906 906 # Producing time report
907 907 start end cuser csys real Test
908 908 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
909 909
910 910 test for --time with --job enabled
911 911 ====================================
912 912
913 913 $ rt test-success.t --time --jobs 2
914 914 .
915 915 # Ran 1 tests, 0 skipped, 0 failed.
916 916 # Producing time report
917 917 start end cuser csys real Test
918 918 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
919 919
920 920 Skips
921 921 ================
922 922 $ cat > test-skip.t <<EOF
923 923 > $ echo xyzzy
924 > #if true
924 925 > #require false
926 > #end
927 > EOF
928 $ cat > test-noskip.t <<EOF
929 > #if false
930 > #require false
931 > #endif
925 932 > EOF
926 933 $ rt --nodiff
927 !.s
934 !.s.
928 935 Skipped test-skip.t: missing feature: nail clipper
929 936 Failed test-failure.t: output changed
930 # Ran 2 tests, 1 skipped, 1 failed.
937 # Ran 3 tests, 1 skipped, 1 failed.
931 938 python hash seed: * (glob)
932 939 [1]
933 940
941 $ rm test-noskip.t
934 942 $ rt --keyword xyzzy
935 943 .s
936 944 Skipped test-skip.t: missing feature: nail clipper
937 945 # Ran 2 tests, 2 skipped, 0 failed.
938 946
939 947 Skips with xml
940 948 $ rt --keyword xyzzy \
941 949 > --xunit=xunit.xml
942 950 .s
943 951 Skipped test-skip.t: missing feature: nail clipper
944 952 # Ran 2 tests, 2 skipped, 0 failed.
945 953 $ cat xunit.xml
946 954 <?xml version="1.0" encoding="utf-8"?>
947 955 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
948 956 <testcase name="test-success.t" time="*"/> (glob)
949 957 <testcase name="test-skip.t">
950 958 <skipped>
951 959 <![CDATA[missing feature: nail clipper]]> </skipped>
952 960 </testcase>
953 961 </testsuite>
954 962
955 963 Missing skips or blacklisted skips don't count as executed:
956 964 $ echo test-failure.t > blacklist
957 965 $ rt --blacklist=blacklist --json\
958 966 > test-failure.t test-bogus.t
959 967 ss
960 968 Skipped test-bogus.t: Doesn't exist
961 969 Skipped test-failure.t: blacklisted
962 970 # Ran 0 tests, 2 skipped, 0 failed.
963 971 $ cat report.json
964 972 testreport ={
965 973 "test-bogus.t": {
966 974 "result": "skip"
967 975 },
968 976 "test-failure.t": {
969 977 "result": "skip"
970 978 }
971 979 } (no-eol)
972 980
973 981 Whitelist trumps blacklist
974 982 $ echo test-failure.t > whitelist
975 983 $ rt --blacklist=blacklist --whitelist=whitelist --json\
976 984 > test-failure.t test-bogus.t
977 985 s
978 986 --- $TESTTMP/test-failure.t
979 987 +++ $TESTTMP/test-failure.t.err
980 988 @@ -1,5 +1,5 @@
981 989 $ echo babar
982 990 - rataxes
983 991 + babar
984 992 This is a noop statement so that
985 993 this test is still more bytes than success.
986 994 pad pad pad pad............................................................
987 995
988 996 ERROR: test-failure.t output changed
989 997 !
990 998 Skipped test-bogus.t: Doesn't exist
991 999 Failed test-failure.t: output changed
992 1000 # Ran 1 tests, 1 skipped, 1 failed.
993 1001 python hash seed: * (glob)
994 1002 [1]
995 1003
996 1004 Ensure that --test-list causes only the tests listed in that file to
997 1005 be executed.
998 1006 $ echo test-success.t >> onlytest
999 1007 $ rt --test-list=onlytest
1000 1008 .
1001 1009 # Ran 1 tests, 0 skipped, 0 failed.
1002 1010 $ echo test-bogus.t >> anothertest
1003 1011 $ rt --test-list=onlytest --test-list=anothertest
1004 1012 s.
1005 1013 Skipped test-bogus.t: Doesn't exist
1006 1014 # Ran 1 tests, 1 skipped, 0 failed.
1007 1015 $ rm onlytest anothertest
1008 1016
1009 1017 test for --json
1010 1018 ==================
1011 1019
1012 1020 $ rt --json
1013 1021
1014 1022 --- $TESTTMP/test-failure.t
1015 1023 +++ $TESTTMP/test-failure.t.err
1016 1024 @@ -1,5 +1,5 @@
1017 1025 $ echo babar
1018 1026 - rataxes
1019 1027 + babar
1020 1028 This is a noop statement so that
1021 1029 this test is still more bytes than success.
1022 1030 pad pad pad pad............................................................
1023 1031
1024 1032 ERROR: test-failure.t output changed
1025 1033 !.s
1026 1034 Skipped test-skip.t: missing feature: nail clipper
1027 1035 Failed test-failure.t: output changed
1028 1036 # Ran 2 tests, 1 skipped, 1 failed.
1029 1037 python hash seed: * (glob)
1030 1038 [1]
1031 1039
1032 1040 $ cat report.json
1033 1041 testreport ={
1034 1042 "test-failure.t": [\{] (re)
1035 1043 "csys": "\s*[\d\.]{4,5}", ? (re)
1036 1044 "cuser": "\s*[\d\.]{4,5}", ? (re)
1037 1045 "diff": "---.+\+\+\+.+", ? (re)
1038 1046 "end": "\s*[\d\.]{4,5}", ? (re)
1039 1047 "result": "failure", ? (re)
1040 1048 "start": "\s*[\d\.]{4,5}", ? (re)
1041 1049 "time": "\s*[\d\.]{4,5}" (re)
1042 1050 }, ? (re)
1043 1051 "test-skip.t": {
1044 1052 "csys": "\s*[\d\.]{4,5}", ? (re)
1045 1053 "cuser": "\s*[\d\.]{4,5}", ? (re)
1046 1054 "diff": "", ? (re)
1047 1055 "end": "\s*[\d\.]{4,5}", ? (re)
1048 1056 "result": "skip", ? (re)
1049 1057 "start": "\s*[\d\.]{4,5}", ? (re)
1050 1058 "time": "\s*[\d\.]{4,5}" (re)
1051 1059 }, ? (re)
1052 1060 "test-success.t": [\{] (re)
1053 1061 "csys": "\s*[\d\.]{4,5}", ? (re)
1054 1062 "cuser": "\s*[\d\.]{4,5}", ? (re)
1055 1063 "diff": "", ? (re)
1056 1064 "end": "\s*[\d\.]{4,5}", ? (re)
1057 1065 "result": "success", ? (re)
1058 1066 "start": "\s*[\d\.]{4,5}", ? (re)
1059 1067 "time": "\s*[\d\.]{4,5}" (re)
1060 1068 }
1061 1069 } (no-eol)
1062 1070 --json with --outputdir
1063 1071
1064 1072 $ rm report.json
1065 1073 $ rm -r output
1066 1074 $ mkdir output
1067 1075 $ rt --json --outputdir output
1068 1076
1069 1077 --- $TESTTMP/test-failure.t
1070 1078 +++ $TESTTMP/output/test-failure.t.err
1071 1079 @@ -1,5 +1,5 @@
1072 1080 $ echo babar
1073 1081 - rataxes
1074 1082 + babar
1075 1083 This is a noop statement so that
1076 1084 this test is still more bytes than success.
1077 1085 pad pad pad pad............................................................
1078 1086
1079 1087 ERROR: test-failure.t output changed
1080 1088 !.s
1081 1089 Skipped test-skip.t: missing feature: nail clipper
1082 1090 Failed test-failure.t: output changed
1083 1091 # Ran 2 tests, 1 skipped, 1 failed.
1084 1092 python hash seed: * (glob)
1085 1093 [1]
1086 1094 $ f report.json
1087 1095 report.json: file not found
1088 1096 $ cat output/report.json
1089 1097 testreport ={
1090 1098 "test-failure.t": [\{] (re)
1091 1099 "csys": "\s*[\d\.]{4,5}", ? (re)
1092 1100 "cuser": "\s*[\d\.]{4,5}", ? (re)
1093 1101 "diff": "---.+\+\+\+.+", ? (re)
1094 1102 "end": "\s*[\d\.]{4,5}", ? (re)
1095 1103 "result": "failure", ? (re)
1096 1104 "start": "\s*[\d\.]{4,5}", ? (re)
1097 1105 "time": "\s*[\d\.]{4,5}" (re)
1098 1106 }, ? (re)
1099 1107 "test-skip.t": {
1100 1108 "csys": "\s*[\d\.]{4,5}", ? (re)
1101 1109 "cuser": "\s*[\d\.]{4,5}", ? (re)
1102 1110 "diff": "", ? (re)
1103 1111 "end": "\s*[\d\.]{4,5}", ? (re)
1104 1112 "result": "skip", ? (re)
1105 1113 "start": "\s*[\d\.]{4,5}", ? (re)
1106 1114 "time": "\s*[\d\.]{4,5}" (re)
1107 1115 }, ? (re)
1108 1116 "test-success.t": [\{] (re)
1109 1117 "csys": "\s*[\d\.]{4,5}", ? (re)
1110 1118 "cuser": "\s*[\d\.]{4,5}", ? (re)
1111 1119 "diff": "", ? (re)
1112 1120 "end": "\s*[\d\.]{4,5}", ? (re)
1113 1121 "result": "success", ? (re)
1114 1122 "start": "\s*[\d\.]{4,5}", ? (re)
1115 1123 "time": "\s*[\d\.]{4,5}" (re)
1116 1124 }
1117 1125 } (no-eol)
1118 1126 $ ls -a output
1119 1127 .
1120 1128 ..
1121 1129 .testtimes
1122 1130 report.json
1123 1131 test-failure.t.err
1124 1132
1125 1133 Test that failed test accepted through interactive are properly reported:
1126 1134
1127 1135 $ cp test-failure.t backup
1128 1136 $ echo y | rt --json -i
1129 1137
1130 1138 --- $TESTTMP/test-failure.t
1131 1139 +++ $TESTTMP/test-failure.t.err
1132 1140 @@ -1,5 +1,5 @@
1133 1141 $ echo babar
1134 1142 - rataxes
1135 1143 + babar
1136 1144 This is a noop statement so that
1137 1145 this test is still more bytes than success.
1138 1146 pad pad pad pad............................................................
1139 1147 Accept this change? [n] ..s
1140 1148 Skipped test-skip.t: missing feature: nail clipper
1141 1149 # Ran 2 tests, 1 skipped, 0 failed.
1142 1150
1143 1151 $ cat report.json
1144 1152 testreport ={
1145 1153 "test-failure.t": [\{] (re)
1146 1154 "csys": "\s*[\d\.]{4,5}", ? (re)
1147 1155 "cuser": "\s*[\d\.]{4,5}", ? (re)
1148 1156 "diff": "", ? (re)
1149 1157 "end": "\s*[\d\.]{4,5}", ? (re)
1150 1158 "result": "success", ? (re)
1151 1159 "start": "\s*[\d\.]{4,5}", ? (re)
1152 1160 "time": "\s*[\d\.]{4,5}" (re)
1153 1161 }, ? (re)
1154 1162 "test-skip.t": {
1155 1163 "csys": "\s*[\d\.]{4,5}", ? (re)
1156 1164 "cuser": "\s*[\d\.]{4,5}", ? (re)
1157 1165 "diff": "", ? (re)
1158 1166 "end": "\s*[\d\.]{4,5}", ? (re)
1159 1167 "result": "skip", ? (re)
1160 1168 "start": "\s*[\d\.]{4,5}", ? (re)
1161 1169 "time": "\s*[\d\.]{4,5}" (re)
1162 1170 }, ? (re)
1163 1171 "test-success.t": [\{] (re)
1164 1172 "csys": "\s*[\d\.]{4,5}", ? (re)
1165 1173 "cuser": "\s*[\d\.]{4,5}", ? (re)
1166 1174 "diff": "", ? (re)
1167 1175 "end": "\s*[\d\.]{4,5}", ? (re)
1168 1176 "result": "success", ? (re)
1169 1177 "start": "\s*[\d\.]{4,5}", ? (re)
1170 1178 "time": "\s*[\d\.]{4,5}" (re)
1171 1179 }
1172 1180 } (no-eol)
1173 1181 $ mv backup test-failure.t
1174 1182
1175 1183 backslash on end of line with glob matching is handled properly
1176 1184
1177 1185 $ cat > test-glob-backslash.t << EOF
1178 1186 > $ echo 'foo bar \\'
1179 1187 > foo * \ (glob)
1180 1188 > EOF
1181 1189
1182 1190 $ rt test-glob-backslash.t
1183 1191 .
1184 1192 # Ran 1 tests, 0 skipped, 0 failed.
1185 1193
1186 1194 $ rm -f test-glob-backslash.t
1187 1195
1188 1196 Test globbing of local IP addresses
1189 1197 $ echo 172.16.18.1
1190 1198 $LOCALIP (glob)
1191 1199 $ echo dead:beef::1
1192 1200 $LOCALIP (glob)
1193 1201
1194 1202 Test reusability for third party tools
1195 1203 ======================================
1196 1204
1197 1205 $ mkdir "$TESTTMP"/anothertests
1198 1206 $ cd "$TESTTMP"/anothertests
1199 1207
1200 1208 test that `run-tests.py` can execute hghave, even if it runs not in
1201 1209 Mercurial source tree.
1202 1210
1203 1211 $ cat > test-hghave.t <<EOF
1204 1212 > #require true
1205 1213 > $ echo foo
1206 1214 > foo
1207 1215 > EOF
1208 1216 $ rt test-hghave.t
1209 1217 .
1210 1218 # Ran 1 tests, 0 skipped, 0 failed.
1211 1219
1212 1220 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1213 1221 running is placed.
1214 1222
1215 1223 $ cat > test-runtestdir.t <<EOF
1216 1224 > - $TESTDIR, in which test-run-tests.t is placed
1217 1225 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1218 1226 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1219 1227 >
1220 1228 > #if windows
1221 1229 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1222 1230 > #else
1223 1231 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1224 1232 > #endif
1225 1233 > If this prints a path, that means RUNTESTDIR didn't equal
1226 1234 > TESTDIR as it should have.
1227 1235 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1228 1236 > This should print the start of check-code. If this passes but the
1229 1237 > previous check failed, that means we found a copy of check-code at whatever
1230 1238 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1231 1239 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1232 1240 > #!USRBINENVPY
1233 1241 > #
1234 1242 > # check-code - a style and portability checker for Mercurial
1235 1243 > EOF
1236 1244 $ rt test-runtestdir.t
1237 1245 .
1238 1246 # Ran 1 tests, 0 skipped, 0 failed.
1239 1247
1240 1248 #if execbit
1241 1249
1242 1250 test that TESTDIR is referred in PATH
1243 1251
1244 1252 $ cat > custom-command.sh <<EOF
1245 1253 > #!/bin/sh
1246 1254 > echo "hello world"
1247 1255 > EOF
1248 1256 $ chmod +x custom-command.sh
1249 1257 $ cat > test-testdir-path.t <<EOF
1250 1258 > $ custom-command.sh
1251 1259 > hello world
1252 1260 > EOF
1253 1261 $ rt test-testdir-path.t
1254 1262 .
1255 1263 # Ran 1 tests, 0 skipped, 0 failed.
1256 1264
1257 1265 #endif
1258 1266
1259 1267 test support for --allow-slow-tests
1260 1268 $ cat > test-very-slow-test.t <<EOF
1261 1269 > #require slow
1262 1270 > $ echo pass
1263 1271 > pass
1264 1272 > EOF
1265 1273 $ rt test-very-slow-test.t
1266 1274 s
1267 1275 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1268 1276 # Ran 0 tests, 1 skipped, 0 failed.
1269 1277 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1270 1278 .
1271 1279 # Ran 1 tests, 0 skipped, 0 failed.
1272 1280
1273 1281 support for running a test outside the current directory
1274 1282 $ mkdir nonlocal
1275 1283 $ cat > nonlocal/test-is-not-here.t << EOF
1276 1284 > $ echo pass
1277 1285 > pass
1278 1286 > EOF
1279 1287 $ rt nonlocal/test-is-not-here.t
1280 1288 .
1281 1289 # Ran 1 tests, 0 skipped, 0 failed.
1282 1290
1283 1291 support for automatically discovering test if arg is a folder
1284 1292 $ mkdir tmp && cd tmp
1285 1293
1286 1294 $ cat > test-uno.t << EOF
1287 1295 > $ echo line
1288 1296 > line
1289 1297 > EOF
1290 1298
1291 1299 $ cp test-uno.t test-dos.t
1292 1300 $ cd ..
1293 1301 $ cp -R tmp tmpp
1294 1302 $ cp tmp/test-uno.t test-solo.t
1295 1303
1296 1304 $ rt tmp/ test-solo.t tmpp
1297 1305 .....
1298 1306 # Ran 5 tests, 0 skipped, 0 failed.
1299 1307 $ rm -rf tmp tmpp
1300 1308
1301 1309 support for running run-tests.py from another directory
1302 1310 $ mkdir tmp && cd tmp
1303 1311
1304 1312 $ cat > useful-file.sh << EOF
1305 1313 > important command
1306 1314 > EOF
1307 1315
1308 1316 $ cat > test-folder.t << EOF
1309 1317 > $ cat \$TESTDIR/useful-file.sh
1310 1318 > important command
1311 1319 > EOF
1312 1320
1313 1321 $ cat > test-folder-fail.t << EOF
1314 1322 > $ cat \$TESTDIR/useful-file.sh
1315 1323 > important commando
1316 1324 > EOF
1317 1325
1318 1326 $ cd ..
1319 1327 $ rt tmp/test-*.t
1320 1328
1321 1329 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1322 1330 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1323 1331 @@ -1,2 +1,2 @@
1324 1332 $ cat $TESTDIR/useful-file.sh
1325 1333 - important commando
1326 1334 + important command
1327 1335
1328 1336 ERROR: test-folder-fail.t output changed
1329 1337 !.
1330 1338 Failed test-folder-fail.t: output changed
1331 1339 # Ran 2 tests, 0 skipped, 1 failed.
1332 1340 python hash seed: * (glob)
1333 1341 [1]
1334 1342
1335 1343 support for bisecting failed tests automatically
1336 1344 $ hg init bisect
1337 1345 $ cd bisect
1338 1346 $ cat >> test-bisect.t <<EOF
1339 1347 > $ echo pass
1340 1348 > pass
1341 1349 > EOF
1342 1350 $ hg add test-bisect.t
1343 1351 $ hg ci -m 'good'
1344 1352 $ cat >> test-bisect.t <<EOF
1345 1353 > $ echo pass
1346 1354 > fail
1347 1355 > EOF
1348 1356 $ hg ci -m 'bad'
1349 1357 $ rt --known-good-rev=0 test-bisect.t
1350 1358
1351 1359 --- $TESTTMP/anothertests/bisect/test-bisect.t
1352 1360 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1353 1361 @@ -1,4 +1,4 @@
1354 1362 $ echo pass
1355 1363 pass
1356 1364 $ echo pass
1357 1365 - fail
1358 1366 + pass
1359 1367
1360 1368 ERROR: test-bisect.t output changed
1361 1369 !
1362 1370 Failed test-bisect.t: output changed
1363 1371 test-bisect.t broken by 72cbf122d116 (bad)
1364 1372 # Ran 1 tests, 0 skipped, 1 failed.
1365 1373 python hash seed: * (glob)
1366 1374 [1]
1367 1375
1368 1376 $ cd ..
1369 1377
1370 1378 support bisecting a separate repo
1371 1379
1372 1380 $ hg init bisect-dependent
1373 1381 $ cd bisect-dependent
1374 1382 $ cat > test-bisect-dependent.t <<EOF
1375 1383 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1376 1384 > pass
1377 1385 > EOF
1378 1386 $ hg commit -Am dependent test-bisect-dependent.t
1379 1387
1380 1388 $ rt --known-good-rev=0 test-bisect-dependent.t
1381 1389
1382 1390 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1383 1391 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1384 1392 @@ -1,2 +1,2 @@
1385 1393 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1386 1394 - pass
1387 1395 + fail
1388 1396
1389 1397 ERROR: test-bisect-dependent.t output changed
1390 1398 !
1391 1399 Failed test-bisect-dependent.t: output changed
1392 1400 Failed to identify failure point for test-bisect-dependent.t
1393 1401 # Ran 1 tests, 0 skipped, 1 failed.
1394 1402 python hash seed: * (glob)
1395 1403 [1]
1396 1404
1397 1405 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1398 1406 usage: run-tests.py [options] [tests]
1399 1407 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1400 1408 [2]
1401 1409
1402 1410 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1403 1411
1404 1412 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1405 1413 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1406 1414 @@ -1,2 +1,2 @@
1407 1415 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1408 1416 - pass
1409 1417 + fail
1410 1418
1411 1419 ERROR: test-bisect-dependent.t output changed
1412 1420 !
1413 1421 Failed test-bisect-dependent.t: output changed
1414 1422 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1415 1423 # Ran 1 tests, 0 skipped, 1 failed.
1416 1424 python hash seed: * (glob)
1417 1425 [1]
1418 1426
1419 1427 $ cd ..
1420 1428
1421 1429 Test a broken #if statement doesn't break run-tests threading.
1422 1430 ==============================================================
1423 1431 $ mkdir broken
1424 1432 $ cd broken
1425 1433 $ cat > test-broken.t <<EOF
1426 1434 > true
1427 1435 > #if notarealhghavefeature
1428 1436 > $ false
1429 1437 > #endif
1430 1438 > EOF
1431 1439 $ for f in 1 2 3 4 ; do
1432 1440 > cat > test-works-$f.t <<EOF
1433 1441 > This is test case $f
1434 1442 > $ sleep 1
1435 1443 > EOF
1436 1444 > done
1437 1445 $ rt -j 2
1438 1446 ....
1439 1447 # Ran 5 tests, 0 skipped, 0 failed.
1440 1448 skipped: unknown feature: notarealhghavefeature
1441 1449
1442 1450 $ cd ..
1443 1451 $ rm -rf broken
1444 1452
1445 1453 Test cases in .t files
1446 1454 ======================
1447 1455 $ mkdir cases
1448 1456 $ cd cases
1449 1457 $ cat > test-cases-abc.t <<'EOF'
1450 1458 > #testcases A B C
1451 1459 > $ V=B
1452 1460 > #if A
1453 1461 > $ V=A
1454 1462 > #endif
1455 1463 > #if C
1456 1464 > $ V=C
1457 1465 > #endif
1458 1466 > $ echo $V | sed 's/A/C/'
1459 1467 > C
1460 1468 > #if C
1461 1469 > $ [ $V = C ]
1462 1470 > #endif
1463 1471 > #if A
1464 1472 > $ [ $V = C ]
1465 1473 > [1]
1466 1474 > #endif
1467 1475 > #if no-C
1468 1476 > $ [ $V = C ]
1469 1477 > [1]
1470 1478 > #endif
1471 1479 > $ [ $V = D ]
1472 1480 > [1]
1473 1481 > EOF
1474 1482 $ rt
1475 1483 .
1476 1484 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1477 1485 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1478 1486 @@ -7,7 +7,7 @@
1479 1487 $ V=C
1480 1488 #endif
1481 1489 $ echo $V | sed 's/A/C/'
1482 1490 - C
1483 1491 + B
1484 1492 #if C
1485 1493 $ [ $V = C ]
1486 1494 #endif
1487 1495
1488 1496 ERROR: test-cases-abc.t (case B) output changed
1489 1497 !.
1490 1498 Failed test-cases-abc.t (case B): output changed
1491 1499 # Ran 3 tests, 0 skipped, 1 failed.
1492 1500 python hash seed: * (glob)
1493 1501 [1]
1494 1502
1495 1503 --restart works
1496 1504
1497 1505 $ rt --restart
1498 1506
1499 1507 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1500 1508 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1501 1509 @@ -7,7 +7,7 @@
1502 1510 $ V=C
1503 1511 #endif
1504 1512 $ echo $V | sed 's/A/C/'
1505 1513 - C
1506 1514 + B
1507 1515 #if C
1508 1516 $ [ $V = C ]
1509 1517 #endif
1510 1518
1511 1519 ERROR: test-cases-abc.t (case B) output changed
1512 1520 !.
1513 1521 Failed test-cases-abc.t (case B): output changed
1514 1522 # Ran 2 tests, 0 skipped, 1 failed.
1515 1523 python hash seed: * (glob)
1516 1524 [1]
1517 1525
1518 1526 --restart works with outputdir
1519 1527
1520 1528 $ mkdir output
1521 1529 $ mv test-cases-abc.t.B.err output
1522 1530 $ rt --restart --outputdir output
1523 1531
1524 1532 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1525 1533 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t.B.err
1526 1534 @@ -7,7 +7,7 @@
1527 1535 $ V=C
1528 1536 #endif
1529 1537 $ echo $V | sed 's/A/C/'
1530 1538 - C
1531 1539 + B
1532 1540 #if C
1533 1541 $ [ $V = C ]
1534 1542 #endif
1535 1543
1536 1544 ERROR: test-cases-abc.t (case B) output changed
1537 1545 !.
1538 1546 Failed test-cases-abc.t (case B): output changed
1539 1547 # Ran 2 tests, 0 skipped, 1 failed.
1540 1548 python hash seed: * (glob)
1541 1549 [1]
1542 1550
1543 1551 Test TESTCASE variable
1544 1552
1545 1553 $ cat > test-cases-ab.t <<'EOF'
1546 1554 > $ dostuff() {
1547 1555 > > echo "In case $TESTCASE"
1548 1556 > > }
1549 1557 > #testcases A B
1550 1558 > #if A
1551 1559 > $ dostuff
1552 1560 > In case A
1553 1561 > #endif
1554 1562 > #if B
1555 1563 > $ dostuff
1556 1564 > In case B
1557 1565 > #endif
1558 1566 > EOF
1559 1567 $ rt test-cases-ab.t
1560 1568 ..
1561 1569 # Ran 2 tests, 0 skipped, 0 failed.
1562 1570
1563 1571 Test automatic pattern replacement
1564 1572
1565 1573 $ cat << EOF >> common-pattern.py
1566 1574 > substitutions = [
1567 1575 > (br'foo-(.*)\\b',
1568 1576 > br'\$XXX=\\1\$'),
1569 1577 > (br'bar\\n',
1570 1578 > br'\$YYY$\\n'),
1571 1579 > ]
1572 1580 > EOF
1573 1581
1574 1582 $ cat << EOF >> test-substitution.t
1575 1583 > $ echo foo-12
1576 1584 > \$XXX=12$
1577 1585 > $ echo foo-42
1578 1586 > \$XXX=42$
1579 1587 > $ echo bar prior
1580 1588 > bar prior
1581 1589 > $ echo lastbar
1582 1590 > last\$YYY$
1583 1591 > $ echo foo-bar foo-baz
1584 1592 > EOF
1585 1593
1586 1594 $ rt test-substitution.t
1587 1595
1588 1596 --- $TESTTMP/anothertests/cases/test-substitution.t
1589 1597 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1590 1598 @@ -7,3 +7,4 @@
1591 1599 $ echo lastbar
1592 1600 last$YYY$
1593 1601 $ echo foo-bar foo-baz
1594 1602 + $XXX=bar foo-baz$
1595 1603
1596 1604 ERROR: test-substitution.t output changed
1597 1605 !
1598 1606 Failed test-substitution.t: output changed
1599 1607 # Ran 1 tests, 0 skipped, 1 failed.
1600 1608 python hash seed: * (glob)
1601 1609 [1]
1602 1610
1603 1611 --extra-config-opt works
1604 1612
1605 1613 $ cat << EOF >> test-config-opt.t
1606 1614 > $ hg init test-config-opt
1607 1615 > $ hg -R test-config-opt purge
1608 1616 > EOF
1609 1617
1610 1618 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1611 1619 .
1612 1620 # Ran 1 tests, 0 skipped, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now