##// END OF EJS Templates
run-tests: print number of tests and parallel process count...
Gregory Szorc -
r40280:1039404c default
parent child Browse files
Show More
@@ -1,3234 +1,3239 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import argparse
49 49 import collections
50 50 import difflib
51 51 import distutils.version as version
52 52 import errno
53 53 import json
54 54 import os
55 55 import random
56 56 import re
57 57 import shutil
58 58 import signal
59 59 import socket
60 60 import subprocess
61 61 import sys
62 62 import sysconfig
63 63 import tempfile
64 64 import threading
65 65 import time
66 66 import unittest
67 67 import uuid
68 68 import xml.dom.minidom as minidom
69 69
70 70 try:
71 71 import Queue as queue
72 72 except ImportError:
73 73 import queue
74 74
75 75 try:
76 76 import shlex
77 77 shellquote = shlex.quote
78 78 except (ImportError, AttributeError):
79 79 import pipes
80 80 shellquote = pipes.quote
81 81
82 82 if os.environ.get('RTUNICODEPEDANTRY', False):
83 83 try:
84 84 reload(sys)
85 85 sys.setdefaultencoding("undefined")
86 86 except NameError:
87 87 pass
88 88
89 89 processlock = threading.Lock()
90 90
91 91 pygmentspresent = False
92 92 # ANSI color is unsupported prior to Windows 10
93 93 if os.name != 'nt':
94 94 try: # is pygments installed
95 95 import pygments
96 96 import pygments.lexers as lexers
97 97 import pygments.lexer as lexer
98 98 import pygments.formatters as formatters
99 99 import pygments.token as token
100 100 import pygments.style as style
101 101 pygmentspresent = True
102 102 difflexer = lexers.DiffLexer()
103 103 terminal256formatter = formatters.Terminal256Formatter()
104 104 except ImportError:
105 105 pass
106 106
107 107 if pygmentspresent:
108 108 class TestRunnerStyle(style.Style):
109 109 default_style = ""
110 110 skipped = token.string_to_tokentype("Token.Generic.Skipped")
111 111 failed = token.string_to_tokentype("Token.Generic.Failed")
112 112 skippedname = token.string_to_tokentype("Token.Generic.SName")
113 113 failedname = token.string_to_tokentype("Token.Generic.FName")
114 114 styles = {
115 115 skipped: '#e5e5e5',
116 116 skippedname: '#00ffff',
117 117 failed: '#7f0000',
118 118 failedname: '#ff0000',
119 119 }
120 120
121 121 class TestRunnerLexer(lexer.RegexLexer):
122 122 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
123 123 tokens = {
124 124 'root': [
125 125 (r'^Skipped', token.Generic.Skipped, 'skipped'),
126 126 (r'^Failed ', token.Generic.Failed, 'failed'),
127 127 (r'^ERROR: ', token.Generic.Failed, 'failed'),
128 128 ],
129 129 'skipped': [
130 130 (testpattern, token.Generic.SName),
131 131 (r':.*', token.Generic.Skipped),
132 132 ],
133 133 'failed': [
134 134 (testpattern, token.Generic.FName),
135 135 (r'(:| ).*', token.Generic.Failed),
136 136 ]
137 137 }
138 138
139 139 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
140 140 runnerlexer = TestRunnerLexer()
141 141
142 142 origenviron = os.environ.copy()
143 143
144 144 if sys.version_info > (3, 5, 0):
145 145 PYTHON3 = True
146 146 xrange = range # we use xrange in one place, and we'd rather not use range
147 147 def _bytespath(p):
148 148 if p is None:
149 149 return p
150 150 return p.encode('utf-8')
151 151
152 152 def _strpath(p):
153 153 if p is None:
154 154 return p
155 155 return p.decode('utf-8')
156 156
157 157 osenvironb = getattr(os, 'environb', None)
158 158 if osenvironb is None:
159 159 # Windows lacks os.environb, for instance. A proxy over the real thing
160 160 # instead of a copy allows the environment to be updated via bytes on
161 161 # all platforms.
162 162 class environbytes(object):
163 163 def __init__(self, strenv):
164 164 self.__len__ = strenv.__len__
165 165 self.clear = strenv.clear
166 166 self._strenv = strenv
167 167 def __getitem__(self, k):
168 168 v = self._strenv.__getitem__(_strpath(k))
169 169 return _bytespath(v)
170 170 def __setitem__(self, k, v):
171 171 self._strenv.__setitem__(_strpath(k), _strpath(v))
172 172 def __delitem__(self, k):
173 173 self._strenv.__delitem__(_strpath(k))
174 174 def __contains__(self, k):
175 175 return self._strenv.__contains__(_strpath(k))
176 176 def __iter__(self):
177 177 return iter([_bytespath(k) for k in iter(self._strenv)])
178 178 def get(self, k, default=None):
179 179 v = self._strenv.get(_strpath(k), _strpath(default))
180 180 return _bytespath(v)
181 181 def pop(self, k, default=None):
182 182 v = self._strenv.pop(_strpath(k), _strpath(default))
183 183 return _bytespath(v)
184 184
185 185 osenvironb = environbytes(os.environ)
186 186
187 187 getcwdb = getattr(os, 'getcwdb')
188 188 if not getcwdb or os.name == 'nt':
189 189 getcwdb = lambda: _bytespath(os.getcwd())
190 190
191 191 elif sys.version_info >= (3, 0, 0):
192 192 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
193 193 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
194 194 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
195 195 else:
196 196 PYTHON3 = False
197 197
198 198 # In python 2.x, path operations are generally done using
199 199 # bytestrings by default, so we don't have to do any extra
200 200 # fiddling there. We define the wrapper functions anyway just to
201 201 # help keep code consistent between platforms.
202 202 def _bytespath(p):
203 203 return p
204 204
205 205 _strpath = _bytespath
206 206 osenvironb = os.environ
207 207 getcwdb = os.getcwd
208 208
209 209 # For Windows support
210 210 wifexited = getattr(os, "WIFEXITED", lambda x: False)
211 211
212 212 # Whether to use IPv6
213 213 def checksocketfamily(name, port=20058):
214 214 """return true if we can listen on localhost using family=name
215 215
216 216 name should be either 'AF_INET', or 'AF_INET6'.
217 217 port being used is okay - EADDRINUSE is considered as successful.
218 218 """
219 219 family = getattr(socket, name, None)
220 220 if family is None:
221 221 return False
222 222 try:
223 223 s = socket.socket(family, socket.SOCK_STREAM)
224 224 s.bind(('localhost', port))
225 225 s.close()
226 226 return True
227 227 except socket.error as exc:
228 228 if exc.errno == errno.EADDRINUSE:
229 229 return True
230 230 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
231 231 return False
232 232 else:
233 233 raise
234 234 else:
235 235 return False
236 236
237 237 # useipv6 will be set by parseargs
238 238 useipv6 = None
239 239
240 240 def checkportisavailable(port):
241 241 """return true if a port seems free to bind on localhost"""
242 242 if useipv6:
243 243 family = socket.AF_INET6
244 244 else:
245 245 family = socket.AF_INET
246 246 try:
247 247 s = socket.socket(family, socket.SOCK_STREAM)
248 248 s.bind(('localhost', port))
249 249 s.close()
250 250 return True
251 251 except socket.error as exc:
252 252 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
253 253 errno.EPROTONOSUPPORT):
254 254 raise
255 255 return False
256 256
257 257 closefds = os.name == 'posix'
258 258 def Popen4(cmd, wd, timeout, env=None):
259 259 processlock.acquire()
260 260 p = subprocess.Popen(_strpath(cmd), shell=True, bufsize=-1,
261 261 cwd=_strpath(wd), env=env,
262 262 close_fds=closefds,
263 263 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
264 264 stderr=subprocess.STDOUT)
265 265 processlock.release()
266 266
267 267 p.fromchild = p.stdout
268 268 p.tochild = p.stdin
269 269 p.childerr = p.stderr
270 270
271 271 p.timeout = False
272 272 if timeout:
273 273 def t():
274 274 start = time.time()
275 275 while time.time() - start < timeout and p.returncode is None:
276 276 time.sleep(.1)
277 277 p.timeout = True
278 278 if p.returncode is None:
279 279 terminate(p)
280 280 threading.Thread(target=t).start()
281 281
282 282 return p
283 283
284 284 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
285 285 IMPL_PATH = b'PYTHONPATH'
286 286 if 'java' in sys.platform:
287 287 IMPL_PATH = b'JYTHONPATH'
288 288
289 289 defaults = {
290 290 'jobs': ('HGTEST_JOBS', 1),
291 291 'timeout': ('HGTEST_TIMEOUT', 180),
292 292 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
293 293 'port': ('HGTEST_PORT', 20059),
294 294 'shell': ('HGTEST_SHELL', 'sh'),
295 295 }
296 296
297 297 def canonpath(path):
298 298 return os.path.realpath(os.path.expanduser(path))
299 299
300 300 def parselistfiles(files, listtype, warn=True):
301 301 entries = dict()
302 302 for filename in files:
303 303 try:
304 304 path = os.path.expanduser(os.path.expandvars(filename))
305 305 f = open(path, "rb")
306 306 except IOError as err:
307 307 if err.errno != errno.ENOENT:
308 308 raise
309 309 if warn:
310 310 print("warning: no such %s file: %s" % (listtype, filename))
311 311 continue
312 312
313 313 for line in f.readlines():
314 314 line = line.split(b'#', 1)[0].strip()
315 315 if line:
316 316 entries[line] = filename
317 317
318 318 f.close()
319 319 return entries
320 320
321 321 def parsettestcases(path):
322 322 """read a .t test file, return a set of test case names
323 323
324 324 If path does not exist, return an empty set.
325 325 """
326 326 cases = []
327 327 try:
328 328 with open(path, 'rb') as f:
329 329 for l in f:
330 330 if l.startswith(b'#testcases '):
331 331 cases.append(sorted(l[11:].split()))
332 332 except IOError as ex:
333 333 if ex.errno != errno.ENOENT:
334 334 raise
335 335 return cases
336 336
337 337 def getparser():
338 338 """Obtain the OptionParser used by the CLI."""
339 339 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
340 340
341 341 selection = parser.add_argument_group('Test Selection')
342 342 selection.add_argument('--allow-slow-tests', action='store_true',
343 343 help='allow extremely slow tests')
344 344 selection.add_argument("--blacklist", action="append",
345 345 help="skip tests listed in the specified blacklist file")
346 346 selection.add_argument("--changed",
347 347 help="run tests that are changed in parent rev or working directory")
348 348 selection.add_argument("-k", "--keywords",
349 349 help="run tests matching keywords")
350 350 selection.add_argument("-r", "--retest", action="store_true",
351 351 help = "retest failed tests")
352 352 selection.add_argument("--test-list", action="append",
353 353 help="read tests to run from the specified file")
354 354 selection.add_argument("--whitelist", action="append",
355 355 help="always run tests listed in the specified whitelist file")
356 356 selection.add_argument('tests', metavar='TESTS', nargs='*',
357 357 help='Tests to run')
358 358
359 359 harness = parser.add_argument_group('Test Harness Behavior')
360 360 harness.add_argument('--bisect-repo',
361 361 metavar='bisect_repo',
362 362 help=("Path of a repo to bisect. Use together with "
363 363 "--known-good-rev"))
364 364 harness.add_argument("-d", "--debug", action="store_true",
365 365 help="debug mode: write output of test scripts to console"
366 366 " rather than capturing and diffing it (disables timeout)")
367 367 harness.add_argument("-f", "--first", action="store_true",
368 368 help="exit on the first test failure")
369 369 harness.add_argument("-i", "--interactive", action="store_true",
370 370 help="prompt to accept changed output")
371 371 harness.add_argument("-j", "--jobs", type=int,
372 372 help="number of jobs to run in parallel"
373 373 " (default: $%s or %d)" % defaults['jobs'])
374 374 harness.add_argument("--keep-tmpdir", action="store_true",
375 375 help="keep temporary directory after running tests")
376 376 harness.add_argument('--known-good-rev',
377 377 metavar="known_good_rev",
378 378 help=("Automatically bisect any failures using this "
379 379 "revision as a known-good revision."))
380 380 harness.add_argument("--list-tests", action="store_true",
381 381 help="list tests instead of running them")
382 382 harness.add_argument("--loop", action="store_true",
383 383 help="loop tests repeatedly")
384 384 harness.add_argument('--random', action="store_true",
385 385 help='run tests in random order')
386 386 harness.add_argument('--order-by-runtime', action="store_true",
387 387 help='run slowest tests first, according to .testtimes')
388 388 harness.add_argument("-p", "--port", type=int,
389 389 help="port on which servers should listen"
390 390 " (default: $%s or %d)" % defaults['port'])
391 391 harness.add_argument('--profile-runner', action='store_true',
392 392 help='run statprof on run-tests')
393 393 harness.add_argument("-R", "--restart", action="store_true",
394 394 help="restart at last error")
395 395 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
396 396 help="run each test N times (default=1)", default=1)
397 397 harness.add_argument("--shell",
398 398 help="shell to use (default: $%s or %s)" % defaults['shell'])
399 399 harness.add_argument('--showchannels', action='store_true',
400 400 help='show scheduling channels')
401 401 harness.add_argument("--slowtimeout", type=int,
402 402 help="kill errant slow tests after SLOWTIMEOUT seconds"
403 403 " (default: $%s or %d)" % defaults['slowtimeout'])
404 404 harness.add_argument("-t", "--timeout", type=int,
405 405 help="kill errant tests after TIMEOUT seconds"
406 406 " (default: $%s or %d)" % defaults['timeout'])
407 407 harness.add_argument("--tmpdir",
408 408 help="run tests in the given temporary directory"
409 409 " (implies --keep-tmpdir)")
410 410 harness.add_argument("-v", "--verbose", action="store_true",
411 411 help="output verbose messages")
412 412
413 413 hgconf = parser.add_argument_group('Mercurial Configuration')
414 414 hgconf.add_argument("--chg", action="store_true",
415 415 help="install and use chg wrapper in place of hg")
416 416 hgconf.add_argument("--compiler",
417 417 help="compiler to build with")
418 418 hgconf.add_argument('--extra-config-opt', action="append", default=[],
419 419 help='set the given config opt in the test hgrc')
420 420 hgconf.add_argument("-l", "--local", action="store_true",
421 421 help="shortcut for --with-hg=<testdir>/../hg, "
422 422 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
423 423 hgconf.add_argument("--ipv6", action="store_true",
424 424 help="prefer IPv6 to IPv4 for network related tests")
425 425 hgconf.add_argument("--pure", action="store_true",
426 426 help="use pure Python code instead of C extensions")
427 427 hgconf.add_argument("-3", "--py3k-warnings", action="store_true",
428 428 help="enable Py3k warnings on Python 2.7+")
429 429 hgconf.add_argument("--with-chg", metavar="CHG",
430 430 help="use specified chg wrapper in place of hg")
431 431 hgconf.add_argument("--with-hg",
432 432 metavar="HG",
433 433 help="test using specified hg script rather than a "
434 434 "temporary installation")
435 435
436 436 reporting = parser.add_argument_group('Results Reporting')
437 437 reporting.add_argument("-C", "--annotate", action="store_true",
438 438 help="output files annotated with coverage")
439 439 reporting.add_argument("--color", choices=["always", "auto", "never"],
440 440 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
441 441 help="colorisation: always|auto|never (default: auto)")
442 442 reporting.add_argument("-c", "--cover", action="store_true",
443 443 help="print a test coverage report")
444 444 reporting.add_argument('--exceptions', action='store_true',
445 445 help='log all exceptions and generate an exception report')
446 446 reporting.add_argument("-H", "--htmlcov", action="store_true",
447 447 help="create an HTML report of the coverage of the files")
448 448 reporting.add_argument("--json", action="store_true",
449 449 help="store test result data in 'report.json' file")
450 450 reporting.add_argument("--outputdir",
451 451 help="directory to write error logs to (default=test directory)")
452 452 reporting.add_argument("-n", "--nodiff", action="store_true",
453 453 help="skip showing test changes")
454 454 reporting.add_argument("-S", "--noskips", action="store_true",
455 455 help="don't report skip tests verbosely")
456 456 reporting.add_argument("--time", action="store_true",
457 457 help="time how long each test takes")
458 458 reporting.add_argument("--view",
459 459 help="external diff viewer")
460 460 reporting.add_argument("--xunit",
461 461 help="record xunit results at specified path")
462 462
463 463 for option, (envvar, default) in defaults.items():
464 464 defaults[option] = type(default)(os.environ.get(envvar, default))
465 465 parser.set_defaults(**defaults)
466 466
467 467 return parser
468 468
469 469 def parseargs(args, parser):
470 470 """Parse arguments with our OptionParser and validate results."""
471 471 options = parser.parse_args(args)
472 472
473 473 # jython is always pure
474 474 if 'java' in sys.platform or '__pypy__' in sys.modules:
475 475 options.pure = True
476 476
477 477 if options.with_hg:
478 478 options.with_hg = canonpath(_bytespath(options.with_hg))
479 479 if not (os.path.isfile(options.with_hg) and
480 480 os.access(options.with_hg, os.X_OK)):
481 481 parser.error('--with-hg must specify an executable hg script')
482 482 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
483 483 sys.stderr.write('warning: --with-hg should specify an hg script\n')
484 484 if options.local:
485 485 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
486 486 reporootdir = os.path.dirname(testdir)
487 487 pathandattrs = [(b'hg', 'with_hg')]
488 488 if options.chg:
489 489 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
490 490 for relpath, attr in pathandattrs:
491 491 binpath = os.path.join(reporootdir, relpath)
492 492 if os.name != 'nt' and not os.access(binpath, os.X_OK):
493 493 parser.error('--local specified, but %r not found or '
494 494 'not executable' % binpath)
495 495 setattr(options, attr, binpath)
496 496
497 497 if (options.chg or options.with_chg) and os.name == 'nt':
498 498 parser.error('chg does not work on %s' % os.name)
499 499 if options.with_chg:
500 500 options.chg = False # no installation to temporary location
501 501 options.with_chg = canonpath(_bytespath(options.with_chg))
502 502 if not (os.path.isfile(options.with_chg) and
503 503 os.access(options.with_chg, os.X_OK)):
504 504 parser.error('--with-chg must specify a chg executable')
505 505 if options.chg and options.with_hg:
506 506 # chg shares installation location with hg
507 507 parser.error('--chg does not work when --with-hg is specified '
508 508 '(use --with-chg instead)')
509 509
510 510 if options.color == 'always' and not pygmentspresent:
511 511 sys.stderr.write('warning: --color=always ignored because '
512 512 'pygments is not installed\n')
513 513
514 514 if options.bisect_repo and not options.known_good_rev:
515 515 parser.error("--bisect-repo cannot be used without --known-good-rev")
516 516
517 517 global useipv6
518 518 if options.ipv6:
519 519 useipv6 = checksocketfamily('AF_INET6')
520 520 else:
521 521 # only use IPv6 if IPv4 is unavailable and IPv6 is available
522 522 useipv6 = ((not checksocketfamily('AF_INET'))
523 523 and checksocketfamily('AF_INET6'))
524 524
525 525 options.anycoverage = options.cover or options.annotate or options.htmlcov
526 526 if options.anycoverage:
527 527 try:
528 528 import coverage
529 529 covver = version.StrictVersion(coverage.__version__).version
530 530 if covver < (3, 3):
531 531 parser.error('coverage options require coverage 3.3 or later')
532 532 except ImportError:
533 533 parser.error('coverage options now require the coverage package')
534 534
535 535 if options.anycoverage and options.local:
536 536 # this needs some path mangling somewhere, I guess
537 537 parser.error("sorry, coverage options do not work when --local "
538 538 "is specified")
539 539
540 540 if options.anycoverage and options.with_hg:
541 541 parser.error("sorry, coverage options do not work when --with-hg "
542 542 "is specified")
543 543
544 544 global verbose
545 545 if options.verbose:
546 546 verbose = ''
547 547
548 548 if options.tmpdir:
549 549 options.tmpdir = canonpath(options.tmpdir)
550 550
551 551 if options.jobs < 1:
552 552 parser.error('--jobs must be positive')
553 553 if options.interactive and options.debug:
554 554 parser.error("-i/--interactive and -d/--debug are incompatible")
555 555 if options.debug:
556 556 if options.timeout != defaults['timeout']:
557 557 sys.stderr.write(
558 558 'warning: --timeout option ignored with --debug\n')
559 559 if options.slowtimeout != defaults['slowtimeout']:
560 560 sys.stderr.write(
561 561 'warning: --slowtimeout option ignored with --debug\n')
562 562 options.timeout = 0
563 563 options.slowtimeout = 0
564 564 if options.py3k_warnings:
565 565 if PYTHON3:
566 566 parser.error(
567 567 '--py3k-warnings can only be used on Python 2.7')
568 568
569 569 if options.blacklist:
570 570 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
571 571 if options.whitelist:
572 572 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
573 573 else:
574 574 options.whitelisted = {}
575 575
576 576 if options.showchannels:
577 577 options.nodiff = True
578 578
579 579 return options
580 580
581 581 def rename(src, dst):
582 582 """Like os.rename(), trade atomicity and opened files friendliness
583 583 for existing destination support.
584 584 """
585 585 shutil.copy(src, dst)
586 586 os.remove(src)
587 587
588 588 _unified_diff = difflib.unified_diff
589 589 if PYTHON3:
590 590 import functools
591 591 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
592 592
593 593 def getdiff(expected, output, ref, err):
594 594 servefail = False
595 595 lines = []
596 596 for line in _unified_diff(expected, output, ref, err):
597 597 if line.startswith(b'+++') or line.startswith(b'---'):
598 598 line = line.replace(b'\\', b'/')
599 599 if line.endswith(b' \n'):
600 600 line = line[:-2] + b'\n'
601 601 lines.append(line)
602 602 if not servefail and line.startswith(
603 603 b'+ abort: child process failed to start'):
604 604 servefail = True
605 605
606 606 return servefail, lines
607 607
608 608 verbose = False
609 609 def vlog(*msg):
610 610 """Log only when in verbose mode."""
611 611 if verbose is False:
612 612 return
613 613
614 614 return log(*msg)
615 615
616 616 # Bytes that break XML even in a CDATA block: control characters 0-31
617 617 # sans \t, \n and \r
618 618 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
619 619
620 620 # Match feature conditionalized output lines in the form, capturing the feature
621 621 # list in group 2, and the preceeding line output in group 1:
622 622 #
623 623 # output..output (feature !)\n
624 624 optline = re.compile(b'(.*) \((.+?) !\)\n$')
625 625
626 626 def cdatasafe(data):
627 627 """Make a string safe to include in a CDATA block.
628 628
629 629 Certain control characters are illegal in a CDATA block, and
630 630 there's no way to include a ]]> in a CDATA either. This function
631 631 replaces illegal bytes with ? and adds a space between the ]] so
632 632 that it won't break the CDATA block.
633 633 """
634 634 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
635 635
636 636 def log(*msg):
637 637 """Log something to stdout.
638 638
639 639 Arguments are strings to print.
640 640 """
641 641 with iolock:
642 642 if verbose:
643 643 print(verbose, end=' ')
644 644 for m in msg:
645 645 print(m, end=' ')
646 646 print()
647 647 sys.stdout.flush()
648 648
649 649 def highlightdiff(line, color):
650 650 if not color:
651 651 return line
652 652 assert pygmentspresent
653 653 return pygments.highlight(line.decode('latin1'), difflexer,
654 654 terminal256formatter).encode('latin1')
655 655
656 656 def highlightmsg(msg, color):
657 657 if not color:
658 658 return msg
659 659 assert pygmentspresent
660 660 return pygments.highlight(msg, runnerlexer, runnerformatter)
661 661
662 662 def terminate(proc):
663 663 """Terminate subprocess"""
664 664 vlog('# Terminating process %d' % proc.pid)
665 665 try:
666 666 proc.terminate()
667 667 except OSError:
668 668 pass
669 669
670 670 def killdaemons(pidfile):
671 671 import killdaemons as killmod
672 672 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
673 673 logfn=vlog)
674 674
675 675 class Test(unittest.TestCase):
676 676 """Encapsulates a single, runnable test.
677 677
678 678 While this class conforms to the unittest.TestCase API, it differs in that
679 679 instances need to be instantiated manually. (Typically, unittest.TestCase
680 680 classes are instantiated automatically by scanning modules.)
681 681 """
682 682
683 683 # Status code reserved for skipped tests (used by hghave).
684 684 SKIPPED_STATUS = 80
685 685
686 686 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
687 687 debug=False,
688 688 first=False,
689 689 timeout=None,
690 690 startport=None, extraconfigopts=None,
691 691 py3kwarnings=False, shell=None, hgcommand=None,
692 692 slowtimeout=None, usechg=False,
693 693 useipv6=False):
694 694 """Create a test from parameters.
695 695
696 696 path is the full path to the file defining the test.
697 697
698 698 tmpdir is the main temporary directory to use for this test.
699 699
700 700 keeptmpdir determines whether to keep the test's temporary directory
701 701 after execution. It defaults to removal (False).
702 702
703 703 debug mode will make the test execute verbosely, with unfiltered
704 704 output.
705 705
706 706 timeout controls the maximum run time of the test. It is ignored when
707 707 debug is True. See slowtimeout for tests with #require slow.
708 708
709 709 slowtimeout overrides timeout if the test has #require slow.
710 710
711 711 startport controls the starting port number to use for this test. Each
712 712 test will reserve 3 port numbers for execution. It is the caller's
713 713 responsibility to allocate a non-overlapping port range to Test
714 714 instances.
715 715
716 716 extraconfigopts is an iterable of extra hgrc config options. Values
717 717 must have the form "key=value" (something understood by hgrc). Values
718 718 of the form "foo.key=value" will result in "[foo] key=value".
719 719
720 720 py3kwarnings enables Py3k warnings.
721 721
722 722 shell is the shell to execute tests in.
723 723 """
724 724 if timeout is None:
725 725 timeout = defaults['timeout']
726 726 if startport is None:
727 727 startport = defaults['port']
728 728 if slowtimeout is None:
729 729 slowtimeout = defaults['slowtimeout']
730 730 self.path = path
731 731 self.bname = os.path.basename(path)
732 732 self.name = _strpath(self.bname)
733 733 self._testdir = os.path.dirname(path)
734 734 self._outputdir = outputdir
735 735 self._tmpname = os.path.basename(path)
736 736 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
737 737
738 738 self._threadtmp = tmpdir
739 739 self._keeptmpdir = keeptmpdir
740 740 self._debug = debug
741 741 self._first = first
742 742 self._timeout = timeout
743 743 self._slowtimeout = slowtimeout
744 744 self._startport = startport
745 745 self._extraconfigopts = extraconfigopts or []
746 746 self._py3kwarnings = py3kwarnings
747 747 self._shell = _bytespath(shell)
748 748 self._hgcommand = hgcommand or b'hg'
749 749 self._usechg = usechg
750 750 self._useipv6 = useipv6
751 751
752 752 self._aborted = False
753 753 self._daemonpids = []
754 754 self._finished = None
755 755 self._ret = None
756 756 self._out = None
757 757 self._skipped = None
758 758 self._testtmp = None
759 759 self._chgsockdir = None
760 760
761 761 self._refout = self.readrefout()
762 762
763 763 def readrefout(self):
764 764 """read reference output"""
765 765 # If we're not in --debug mode and reference output file exists,
766 766 # check test output against it.
767 767 if self._debug:
768 768 return None # to match "out is None"
769 769 elif os.path.exists(self.refpath):
770 770 with open(self.refpath, 'rb') as f:
771 771 return f.read().splitlines(True)
772 772 else:
773 773 return []
774 774
775 775 # needed to get base class __repr__ running
776 776 @property
777 777 def _testMethodName(self):
778 778 return self.name
779 779
780 780 def __str__(self):
781 781 return self.name
782 782
783 783 def shortDescription(self):
784 784 return self.name
785 785
786 786 def setUp(self):
787 787 """Tasks to perform before run()."""
788 788 self._finished = False
789 789 self._ret = None
790 790 self._out = None
791 791 self._skipped = None
792 792
793 793 try:
794 794 os.mkdir(self._threadtmp)
795 795 except OSError as e:
796 796 if e.errno != errno.EEXIST:
797 797 raise
798 798
799 799 name = self._tmpname
800 800 self._testtmp = os.path.join(self._threadtmp, name)
801 801 os.mkdir(self._testtmp)
802 802
803 803 # Remove any previous output files.
804 804 if os.path.exists(self.errpath):
805 805 try:
806 806 os.remove(self.errpath)
807 807 except OSError as e:
808 808 # We might have raced another test to clean up a .err
809 809 # file, so ignore ENOENT when removing a previous .err
810 810 # file.
811 811 if e.errno != errno.ENOENT:
812 812 raise
813 813
814 814 if self._usechg:
815 815 self._chgsockdir = os.path.join(self._threadtmp,
816 816 b'%s.chgsock' % name)
817 817 os.mkdir(self._chgsockdir)
818 818
819 819 def run(self, result):
820 820 """Run this test and report results against a TestResult instance."""
821 821 # This function is extremely similar to unittest.TestCase.run(). Once
822 822 # we require Python 2.7 (or at least its version of unittest), this
823 823 # function can largely go away.
824 824 self._result = result
825 825 result.startTest(self)
826 826 try:
827 827 try:
828 828 self.setUp()
829 829 except (KeyboardInterrupt, SystemExit):
830 830 self._aborted = True
831 831 raise
832 832 except Exception:
833 833 result.addError(self, sys.exc_info())
834 834 return
835 835
836 836 success = False
837 837 try:
838 838 self.runTest()
839 839 except KeyboardInterrupt:
840 840 self._aborted = True
841 841 raise
842 842 except unittest.SkipTest as e:
843 843 result.addSkip(self, str(e))
844 844 # The base class will have already counted this as a
845 845 # test we "ran", but we want to exclude skipped tests
846 846 # from those we count towards those run.
847 847 result.testsRun -= 1
848 848 except self.failureException as e:
849 849 # This differs from unittest in that we don't capture
850 850 # the stack trace. This is for historical reasons and
851 851 # this decision could be revisited in the future,
852 852 # especially for PythonTest instances.
853 853 if result.addFailure(self, str(e)):
854 854 success = True
855 855 except Exception:
856 856 result.addError(self, sys.exc_info())
857 857 else:
858 858 success = True
859 859
860 860 try:
861 861 self.tearDown()
862 862 except (KeyboardInterrupt, SystemExit):
863 863 self._aborted = True
864 864 raise
865 865 except Exception:
866 866 result.addError(self, sys.exc_info())
867 867 success = False
868 868
869 869 if success:
870 870 result.addSuccess(self)
871 871 finally:
872 872 result.stopTest(self, interrupted=self._aborted)
873 873
874 874 def runTest(self):
875 875 """Run this test instance.
876 876
877 877 This will return a tuple describing the result of the test.
878 878 """
879 879 env = self._getenv()
880 880 self._genrestoreenv(env)
881 881 self._daemonpids.append(env['DAEMON_PIDS'])
882 882 self._createhgrc(env['HGRCPATH'])
883 883
884 884 vlog('# Test', self.name)
885 885
886 886 ret, out = self._run(env)
887 887 self._finished = True
888 888 self._ret = ret
889 889 self._out = out
890 890
891 891 def describe(ret):
892 892 if ret < 0:
893 893 return 'killed by signal: %d' % -ret
894 894 return 'returned error code %d' % ret
895 895
896 896 self._skipped = False
897 897
898 898 if ret == self.SKIPPED_STATUS:
899 899 if out is None: # Debug mode, nothing to parse.
900 900 missing = ['unknown']
901 901 failed = None
902 902 else:
903 903 missing, failed = TTest.parsehghaveoutput(out)
904 904
905 905 if not missing:
906 906 missing = ['skipped']
907 907
908 908 if failed:
909 909 self.fail('hg have failed checking for %s' % failed[-1])
910 910 else:
911 911 self._skipped = True
912 912 raise unittest.SkipTest(missing[-1])
913 913 elif ret == 'timeout':
914 914 self.fail('timed out')
915 915 elif ret is False:
916 916 self.fail('no result code from test')
917 917 elif out != self._refout:
918 918 # Diff generation may rely on written .err file.
919 919 if (ret != 0 or out != self._refout) and not self._skipped \
920 920 and not self._debug:
921 921 with open(self.errpath, 'wb') as f:
922 922 for line in out:
923 923 f.write(line)
924 924
925 925 # The result object handles diff calculation for us.
926 926 with firstlock:
927 927 if self._result.addOutputMismatch(self, ret, out, self._refout):
928 928 # change was accepted, skip failing
929 929 return
930 930 if self._first:
931 931 global firsterror
932 932 firsterror = True
933 933
934 934 if ret:
935 935 msg = 'output changed and ' + describe(ret)
936 936 else:
937 937 msg = 'output changed'
938 938
939 939 self.fail(msg)
940 940 elif ret:
941 941 self.fail(describe(ret))
942 942
943 943 def tearDown(self):
944 944 """Tasks to perform after run()."""
945 945 for entry in self._daemonpids:
946 946 killdaemons(entry)
947 947 self._daemonpids = []
948 948
949 949 if self._keeptmpdir:
950 950 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
951 951 (self._testtmp.decode('utf-8'),
952 952 self._threadtmp.decode('utf-8')))
953 953 else:
954 954 shutil.rmtree(self._testtmp, True)
955 955 shutil.rmtree(self._threadtmp, True)
956 956
957 957 if self._usechg:
958 958 # chgservers will stop automatically after they find the socket
959 959 # files are deleted
960 960 shutil.rmtree(self._chgsockdir, True)
961 961
962 962 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
963 963 and not self._debug and self._out:
964 964 with open(self.errpath, 'wb') as f:
965 965 for line in self._out:
966 966 f.write(line)
967 967
968 968 vlog("# Ret was:", self._ret, '(%s)' % self.name)
969 969
970 970 def _run(self, env):
971 971 # This should be implemented in child classes to run tests.
972 972 raise unittest.SkipTest('unknown test type')
973 973
974 974 def abort(self):
975 975 """Terminate execution of this test."""
976 976 self._aborted = True
977 977
978 978 def _portmap(self, i):
979 979 offset = b'' if i == 0 else b'%d' % i
980 980 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
981 981
982 982 def _getreplacements(self):
983 983 """Obtain a mapping of text replacements to apply to test output.
984 984
985 985 Test output needs to be normalized so it can be compared to expected
986 986 output. This function defines how some of that normalization will
987 987 occur.
988 988 """
989 989 r = [
990 990 # This list should be parallel to defineport in _getenv
991 991 self._portmap(0),
992 992 self._portmap(1),
993 993 self._portmap(2),
994 994 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
995 995 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
996 996 ]
997 997 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
998 998
999 999 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1000 1000
1001 1001 if os.path.exists(replacementfile):
1002 1002 data = {}
1003 1003 with open(replacementfile, mode='rb') as source:
1004 1004 # the intermediate 'compile' step help with debugging
1005 1005 code = compile(source.read(), replacementfile, 'exec')
1006 1006 exec(code, data)
1007 1007 for value in data.get('substitutions', ()):
1008 1008 if len(value) != 2:
1009 1009 msg = 'malformatted substitution in %s: %r'
1010 1010 msg %= (replacementfile, value)
1011 1011 raise ValueError(msg)
1012 1012 r.append(value)
1013 1013 return r
1014 1014
1015 1015 def _escapepath(self, p):
1016 1016 if os.name == 'nt':
1017 1017 return (
1018 1018 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1019 1019 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1020 1020 for c in [p[i:i + 1] for i in range(len(p))]))
1021 1021 )
1022 1022 else:
1023 1023 return re.escape(p)
1024 1024
1025 1025 def _localip(self):
1026 1026 if self._useipv6:
1027 1027 return b'::1'
1028 1028 else:
1029 1029 return b'127.0.0.1'
1030 1030
1031 1031 def _genrestoreenv(self, testenv):
1032 1032 """Generate a script that can be used by tests to restore the original
1033 1033 environment."""
1034 1034 # Put the restoreenv script inside self._threadtmp
1035 1035 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1036 1036 testenv['HGTEST_RESTOREENV'] = _strpath(scriptpath)
1037 1037
1038 1038 # Only restore environment variable names that the shell allows
1039 1039 # us to export.
1040 1040 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1041 1041
1042 1042 # Do not restore these variables; otherwise tests would fail.
1043 1043 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1044 1044
1045 1045 with open(scriptpath, 'w') as envf:
1046 1046 for name, value in origenviron.items():
1047 1047 if not name_regex.match(name):
1048 1048 # Skip environment variables with unusual names not
1049 1049 # allowed by most shells.
1050 1050 continue
1051 1051 if name in reqnames:
1052 1052 continue
1053 1053 envf.write('%s=%s\n' % (name, shellquote(value)))
1054 1054
1055 1055 for name in testenv:
1056 1056 if name in origenviron or name in reqnames:
1057 1057 continue
1058 1058 envf.write('unset %s\n' % (name,))
1059 1059
1060 1060 def _getenv(self):
1061 1061 """Obtain environment variables to use during test execution."""
1062 1062 def defineport(i):
1063 1063 offset = '' if i == 0 else '%s' % i
1064 1064 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1065 1065 env = os.environ.copy()
1066 1066 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1067 1067 env['HGEMITWARNINGS'] = '1'
1068 1068 env['TESTTMP'] = _strpath(self._testtmp)
1069 1069 env['TESTNAME'] = self.name
1070 1070 env['HOME'] = _strpath(self._testtmp)
1071 1071 # This number should match portneeded in _getport
1072 1072 for port in xrange(3):
1073 1073 # This list should be parallel to _portmap in _getreplacements
1074 1074 defineport(port)
1075 1075 env["HGRCPATH"] = _strpath(os.path.join(self._threadtmp, b'.hgrc'))
1076 1076 env["DAEMON_PIDS"] = _strpath(os.path.join(self._threadtmp,
1077 1077 b'daemon.pids'))
1078 1078 env["HGEDITOR"] = ('"' + sys.executable + '"'
1079 1079 + ' -c "import sys; sys.exit(0)"')
1080 1080 env["HGMERGE"] = "internal:merge"
1081 1081 env["HGUSER"] = "test"
1082 1082 env["HGENCODING"] = "ascii"
1083 1083 env["HGENCODINGMODE"] = "strict"
1084 1084 env["HGHOSTNAME"] = "test-hostname"
1085 1085 env['HGIPV6'] = str(int(self._useipv6))
1086 1086 if 'HGCATAPULTSERVERPIPE' not in env:
1087 1087 env['HGCATAPULTSERVERPIPE'] = os.devnull
1088 1088
1089 1089 extraextensions = []
1090 1090 for opt in self._extraconfigopts:
1091 1091 section, key = opt.encode('utf-8').split(b'.', 1)
1092 1092 if section != 'extensions':
1093 1093 continue
1094 1094 name = key.split(b'=', 1)[0]
1095 1095 extraextensions.append(name)
1096 1096
1097 1097 if extraextensions:
1098 1098 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1099 1099
1100 1100 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1101 1101 # IP addresses.
1102 1102 env['LOCALIP'] = _strpath(self._localip())
1103 1103
1104 1104 # Reset some environment variables to well-known values so that
1105 1105 # the tests produce repeatable output.
1106 1106 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1107 1107 env['TZ'] = 'GMT'
1108 1108 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1109 1109 env['COLUMNS'] = '80'
1110 1110 env['TERM'] = 'xterm'
1111 1111
1112 1112 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1113 1113 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1114 1114 'NO_PROXY CHGDEBUG').split():
1115 1115 if k in env:
1116 1116 del env[k]
1117 1117
1118 1118 # unset env related to hooks
1119 1119 for k in list(env):
1120 1120 if k.startswith('HG_'):
1121 1121 del env[k]
1122 1122
1123 1123 if self._usechg:
1124 1124 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1125 1125
1126 1126 return env
1127 1127
1128 1128 def _createhgrc(self, path):
1129 1129 """Create an hgrc file for this test."""
1130 1130 with open(path, 'wb') as hgrc:
1131 1131 hgrc.write(b'[ui]\n')
1132 1132 hgrc.write(b'slash = True\n')
1133 1133 hgrc.write(b'interactive = False\n')
1134 1134 hgrc.write(b'mergemarkers = detailed\n')
1135 1135 hgrc.write(b'promptecho = True\n')
1136 1136 hgrc.write(b'[defaults]\n')
1137 1137 hgrc.write(b'[devel]\n')
1138 1138 hgrc.write(b'all-warnings = true\n')
1139 1139 hgrc.write(b'default-date = 0 0\n')
1140 1140 hgrc.write(b'[largefiles]\n')
1141 1141 hgrc.write(b'usercache = %s\n' %
1142 1142 (os.path.join(self._testtmp, b'.cache/largefiles')))
1143 1143 hgrc.write(b'[lfs]\n')
1144 1144 hgrc.write(b'usercache = %s\n' %
1145 1145 (os.path.join(self._testtmp, b'.cache/lfs')))
1146 1146 hgrc.write(b'[web]\n')
1147 1147 hgrc.write(b'address = localhost\n')
1148 1148 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1149 1149 hgrc.write(b'server-header = testing stub value\n')
1150 1150
1151 1151 for opt in self._extraconfigopts:
1152 1152 section, key = opt.encode('utf-8').split(b'.', 1)
1153 1153 assert b'=' in key, ('extra config opt %s must '
1154 1154 'have an = for assignment' % opt)
1155 1155 hgrc.write(b'[%s]\n%s\n' % (section, key))
1156 1156
1157 1157 def fail(self, msg):
1158 1158 # unittest differentiates between errored and failed.
1159 1159 # Failed is denoted by AssertionError (by default at least).
1160 1160 raise AssertionError(msg)
1161 1161
1162 1162 def _runcommand(self, cmd, env, normalizenewlines=False):
1163 1163 """Run command in a sub-process, capturing the output (stdout and
1164 1164 stderr).
1165 1165
1166 1166 Return a tuple (exitcode, output). output is None in debug mode.
1167 1167 """
1168 1168 if self._debug:
1169 1169 proc = subprocess.Popen(_strpath(cmd), shell=True,
1170 1170 cwd=_strpath(self._testtmp),
1171 1171 env=env)
1172 1172 ret = proc.wait()
1173 1173 return (ret, None)
1174 1174
1175 1175 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1176 1176 def cleanup():
1177 1177 terminate(proc)
1178 1178 ret = proc.wait()
1179 1179 if ret == 0:
1180 1180 ret = signal.SIGTERM << 8
1181 1181 killdaemons(env['DAEMON_PIDS'])
1182 1182 return ret
1183 1183
1184 1184 output = b''
1185 1185 proc.tochild.close()
1186 1186
1187 1187 try:
1188 1188 output = proc.fromchild.read()
1189 1189 except KeyboardInterrupt:
1190 1190 vlog('# Handling keyboard interrupt')
1191 1191 cleanup()
1192 1192 raise
1193 1193
1194 1194 ret = proc.wait()
1195 1195 if wifexited(ret):
1196 1196 ret = os.WEXITSTATUS(ret)
1197 1197
1198 1198 if proc.timeout:
1199 1199 ret = 'timeout'
1200 1200
1201 1201 if ret:
1202 1202 killdaemons(env['DAEMON_PIDS'])
1203 1203
1204 1204 for s, r in self._getreplacements():
1205 1205 output = re.sub(s, r, output)
1206 1206
1207 1207 if normalizenewlines:
1208 1208 output = output.replace(b'\r\n', b'\n')
1209 1209
1210 1210 return ret, output.splitlines(True)
1211 1211
1212 1212 class PythonTest(Test):
1213 1213 """A Python-based test."""
1214 1214
1215 1215 @property
1216 1216 def refpath(self):
1217 1217 return os.path.join(self._testdir, b'%s.out' % self.bname)
1218 1218
1219 1219 def _run(self, env):
1220 1220 py3kswitch = self._py3kwarnings and b' -3' or b''
1221 1221 cmd = b'"%s"%s "%s"' % (PYTHON, py3kswitch, self.path)
1222 1222 vlog("# Running", cmd)
1223 1223 normalizenewlines = os.name == 'nt'
1224 1224 result = self._runcommand(cmd, env,
1225 1225 normalizenewlines=normalizenewlines)
1226 1226 if self._aborted:
1227 1227 raise KeyboardInterrupt()
1228 1228
1229 1229 return result
1230 1230
1231 1231 # Some glob patterns apply only in some circumstances, so the script
1232 1232 # might want to remove (glob) annotations that otherwise should be
1233 1233 # retained.
1234 1234 checkcodeglobpats = [
1235 1235 # On Windows it looks like \ doesn't require a (glob), but we know
1236 1236 # better.
1237 1237 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1238 1238 re.compile(br'^moving \S+/.*[^)]$'),
1239 1239 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1240 1240 # Not all platforms have 127.0.0.1 as loopback (though most do),
1241 1241 # so we always glob that too.
1242 1242 re.compile(br'.*\$LOCALIP.*$'),
1243 1243 ]
1244 1244
1245 1245 bchr = chr
1246 1246 if PYTHON3:
1247 1247 bchr = lambda x: bytes([x])
1248 1248
1249 1249 class TTest(Test):
1250 1250 """A "t test" is a test backed by a .t file."""
1251 1251
1252 1252 SKIPPED_PREFIX = b'skipped: '
1253 1253 FAILED_PREFIX = b'hghave check failed: '
1254 1254 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1255 1255
1256 1256 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1257 1257 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1258 1258 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1259 1259
1260 1260 def __init__(self, path, *args, **kwds):
1261 1261 # accept an extra "case" parameter
1262 1262 case = kwds.pop('case', [])
1263 1263 self._case = case
1264 1264 self._allcases = {x for y in parsettestcases(path) for x in y}
1265 1265 super(TTest, self).__init__(path, *args, **kwds)
1266 1266 if case:
1267 1267 casepath = b'#'.join(case)
1268 1268 self.name = '%s#%s' % (self.name, _strpath(casepath))
1269 1269 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1270 1270 self._tmpname += b'-%s' % casepath
1271 1271 self._have = {}
1272 1272
1273 1273 @property
1274 1274 def refpath(self):
1275 1275 return os.path.join(self._testdir, self.bname)
1276 1276
1277 1277 def _run(self, env):
1278 1278 with open(self.path, 'rb') as f:
1279 1279 lines = f.readlines()
1280 1280
1281 1281 # .t file is both reference output and the test input, keep reference
1282 1282 # output updated with the the test input. This avoids some race
1283 1283 # conditions where the reference output does not match the actual test.
1284 1284 if self._refout is not None:
1285 1285 self._refout = lines
1286 1286
1287 1287 salt, script, after, expected = self._parsetest(lines)
1288 1288
1289 1289 # Write out the generated script.
1290 1290 fname = b'%s.sh' % self._testtmp
1291 1291 with open(fname, 'wb') as f:
1292 1292 for l in script:
1293 1293 f.write(l)
1294 1294
1295 1295 cmd = b'%s "%s"' % (self._shell, fname)
1296 1296 vlog("# Running", cmd)
1297 1297
1298 1298 exitcode, output = self._runcommand(cmd, env)
1299 1299
1300 1300 if self._aborted:
1301 1301 raise KeyboardInterrupt()
1302 1302
1303 1303 # Do not merge output if skipped. Return hghave message instead.
1304 1304 # Similarly, with --debug, output is None.
1305 1305 if exitcode == self.SKIPPED_STATUS or output is None:
1306 1306 return exitcode, output
1307 1307
1308 1308 return self._processoutput(exitcode, output, salt, after, expected)
1309 1309
1310 1310 def _hghave(self, reqs):
1311 1311 allreqs = b' '.join(reqs)
1312 1312 if allreqs in self._have:
1313 1313 return self._have.get(allreqs)
1314 1314
1315 1315 # TODO do something smarter when all other uses of hghave are gone.
1316 1316 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1317 1317 tdir = runtestdir.replace(b'\\', b'/')
1318 1318 proc = Popen4(b'%s -c "%s/hghave %s"' %
1319 1319 (self._shell, tdir, allreqs),
1320 1320 self._testtmp, 0, self._getenv())
1321 1321 stdout, stderr = proc.communicate()
1322 1322 ret = proc.wait()
1323 1323 if wifexited(ret):
1324 1324 ret = os.WEXITSTATUS(ret)
1325 1325 if ret == 2:
1326 1326 print(stdout.decode('utf-8'))
1327 1327 sys.exit(1)
1328 1328
1329 1329 if ret != 0:
1330 1330 self._have[allreqs] = (False, stdout)
1331 1331 return False, stdout
1332 1332
1333 1333 if b'slow' in reqs:
1334 1334 self._timeout = self._slowtimeout
1335 1335
1336 1336 self._have[allreqs] = (True, None)
1337 1337 return True, None
1338 1338
1339 1339 def _iftest(self, args):
1340 1340 # implements "#if"
1341 1341 reqs = []
1342 1342 for arg in args:
1343 1343 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1344 1344 if arg[3:] in self._case:
1345 1345 return False
1346 1346 elif arg in self._allcases:
1347 1347 if arg not in self._case:
1348 1348 return False
1349 1349 else:
1350 1350 reqs.append(arg)
1351 1351 return self._hghave(reqs)[0]
1352 1352
1353 1353 def _parsetest(self, lines):
1354 1354 # We generate a shell script which outputs unique markers to line
1355 1355 # up script results with our source. These markers include input
1356 1356 # line number and the last return code.
1357 1357 salt = b"SALT%d" % time.time()
1358 1358 def addsalt(line, inpython):
1359 1359 if inpython:
1360 1360 script.append(b'%s %d 0\n' % (salt, line))
1361 1361 else:
1362 1362 script.append(b'echo %s %d $?\n' % (salt, line))
1363 1363 active = []
1364 1364 session = str(uuid.uuid4())
1365 1365 if PYTHON3:
1366 1366 session = session.encode('ascii')
1367 1367 def toggletrace(cmd):
1368 1368 if isinstance(cmd, str):
1369 1369 quoted = shellquote(cmd.strip())
1370 1370 else:
1371 1371 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1372 1372 quoted = quoted.replace(b'\\', b'\\\\')
1373 1373 if active:
1374 1374 script.append(
1375 1375 b'echo END %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
1376 1376 session, active[0]))
1377 1377 script.append(
1378 1378 b'echo START %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
1379 1379 session, quoted))
1380 1380 active[0:] = [quoted]
1381 1381
1382 1382 script = []
1383 1383
1384 1384 # After we run the shell script, we re-unify the script output
1385 1385 # with non-active parts of the source, with synchronization by our
1386 1386 # SALT line number markers. The after table contains the non-active
1387 1387 # components, ordered by line number.
1388 1388 after = {}
1389 1389
1390 1390 # Expected shell script output.
1391 1391 expected = {}
1392 1392
1393 1393 pos = prepos = -1
1394 1394
1395 1395 # True or False when in a true or false conditional section
1396 1396 skipping = None
1397 1397
1398 1398 # We keep track of whether or not we're in a Python block so we
1399 1399 # can generate the surrounding doctest magic.
1400 1400 inpython = False
1401 1401
1402 1402 if self._debug:
1403 1403 script.append(b'set -x\n')
1404 1404 if self._hgcommand != b'hg':
1405 1405 script.append(b'alias hg="%s"\n' % self._hgcommand)
1406 1406 if os.getenv('MSYSTEM'):
1407 1407 script.append(b'alias pwd="pwd -W"\n')
1408 1408
1409 1409 hgcatapult = os.getenv('HGCATAPULTSERVERPIPE')
1410 1410 if hgcatapult and hgcatapult != os.devnull:
1411 1411 # Kludge: use a while loop to keep the pipe from getting
1412 1412 # closed by our echo commands. The still-running file gets
1413 1413 # reaped at the end of the script, which causes the while
1414 1414 # loop to exit and closes the pipe. Sigh.
1415 1415 script.append(
1416 1416 b'rtendtracing() {\n'
1417 1417 b' echo END %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
1418 1418 b' rm -f "$TESTTMP/.still-running"\n'
1419 1419 b'}\n'
1420 1420 b'trap "rtendtracing" 0\n'
1421 1421 b'touch "$TESTTMP/.still-running"\n'
1422 1422 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1423 1423 b'> $HGCATAPULTSERVERPIPE &\n'
1424 1424 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1425 1425 b'echo START %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
1426 1426 % {
1427 1427 'name': self.name,
1428 1428 'session': session,
1429 1429 }
1430 1430 )
1431 1431
1432 1432 if self._case:
1433 1433 casestr = b'#'.join(self._case)
1434 1434 if isinstance(self._case, str):
1435 1435 quoted = shellquote(casestr)
1436 1436 else:
1437 1437 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1438 1438 script.append(b'TESTCASE=%s\n' % quoted)
1439 1439 script.append(b'export TESTCASE\n')
1440 1440
1441 1441 n = 0
1442 1442 for n, l in enumerate(lines):
1443 1443 if not l.endswith(b'\n'):
1444 1444 l += b'\n'
1445 1445 if l.startswith(b'#require'):
1446 1446 lsplit = l.split()
1447 1447 if len(lsplit) < 2 or lsplit[0] != b'#require':
1448 1448 after.setdefault(pos, []).append(' !!! invalid #require\n')
1449 1449 if not skipping:
1450 1450 haveresult, message = self._hghave(lsplit[1:])
1451 1451 if not haveresult:
1452 1452 script = [b'echo "%s"\nexit 80\n' % message]
1453 1453 break
1454 1454 after.setdefault(pos, []).append(l)
1455 1455 elif l.startswith(b'#if'):
1456 1456 lsplit = l.split()
1457 1457 if len(lsplit) < 2 or lsplit[0] != b'#if':
1458 1458 after.setdefault(pos, []).append(' !!! invalid #if\n')
1459 1459 if skipping is not None:
1460 1460 after.setdefault(pos, []).append(' !!! nested #if\n')
1461 1461 skipping = not self._iftest(lsplit[1:])
1462 1462 after.setdefault(pos, []).append(l)
1463 1463 elif l.startswith(b'#else'):
1464 1464 if skipping is None:
1465 1465 after.setdefault(pos, []).append(' !!! missing #if\n')
1466 1466 skipping = not skipping
1467 1467 after.setdefault(pos, []).append(l)
1468 1468 elif l.startswith(b'#endif'):
1469 1469 if skipping is None:
1470 1470 after.setdefault(pos, []).append(' !!! missing #if\n')
1471 1471 skipping = None
1472 1472 after.setdefault(pos, []).append(l)
1473 1473 elif skipping:
1474 1474 after.setdefault(pos, []).append(l)
1475 1475 elif l.startswith(b' >>> '): # python inlines
1476 1476 after.setdefault(pos, []).append(l)
1477 1477 prepos = pos
1478 1478 pos = n
1479 1479 if not inpython:
1480 1480 # We've just entered a Python block. Add the header.
1481 1481 inpython = True
1482 1482 addsalt(prepos, False) # Make sure we report the exit code.
1483 1483 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1484 1484 addsalt(n, True)
1485 1485 script.append(l[2:])
1486 1486 elif l.startswith(b' ... '): # python inlines
1487 1487 after.setdefault(prepos, []).append(l)
1488 1488 script.append(l[2:])
1489 1489 elif l.startswith(b' $ '): # commands
1490 1490 if inpython:
1491 1491 script.append(b'EOF\n')
1492 1492 inpython = False
1493 1493 after.setdefault(pos, []).append(l)
1494 1494 prepos = pos
1495 1495 pos = n
1496 1496 addsalt(n, False)
1497 1497 rawcmd = l[4:]
1498 1498 cmd = rawcmd.split()
1499 1499 toggletrace(rawcmd)
1500 1500 if len(cmd) == 2 and cmd[0] == b'cd':
1501 1501 l = b' $ cd %s || exit 1\n' % cmd[1]
1502 1502 script.append(rawcmd)
1503 1503 elif l.startswith(b' > '): # continuations
1504 1504 after.setdefault(prepos, []).append(l)
1505 1505 script.append(l[4:])
1506 1506 elif l.startswith(b' '): # results
1507 1507 # Queue up a list of expected results.
1508 1508 expected.setdefault(pos, []).append(l[2:])
1509 1509 else:
1510 1510 if inpython:
1511 1511 script.append(b'EOF\n')
1512 1512 inpython = False
1513 1513 # Non-command/result. Queue up for merged output.
1514 1514 after.setdefault(pos, []).append(l)
1515 1515
1516 1516 if inpython:
1517 1517 script.append(b'EOF\n')
1518 1518 if skipping is not None:
1519 1519 after.setdefault(pos, []).append(' !!! missing #endif\n')
1520 1520 addsalt(n + 1, False)
1521 1521 return salt, script, after, expected
1522 1522
1523 1523 def _processoutput(self, exitcode, output, salt, after, expected):
1524 1524 # Merge the script output back into a unified test.
1525 1525 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1526 1526 if exitcode != 0:
1527 1527 warnonly = 3
1528 1528
1529 1529 pos = -1
1530 1530 postout = []
1531 1531 for l in output:
1532 1532 lout, lcmd = l, None
1533 1533 if salt in l:
1534 1534 lout, lcmd = l.split(salt, 1)
1535 1535
1536 1536 while lout:
1537 1537 if not lout.endswith(b'\n'):
1538 1538 lout += b' (no-eol)\n'
1539 1539
1540 1540 # Find the expected output at the current position.
1541 1541 els = [None]
1542 1542 if expected.get(pos, None):
1543 1543 els = expected[pos]
1544 1544
1545 1545 optional = []
1546 1546 for i, el in enumerate(els):
1547 1547 r = False
1548 1548 if el:
1549 1549 r, exact = self.linematch(el, lout)
1550 1550 if isinstance(r, str):
1551 1551 if r == '-glob':
1552 1552 lout = ''.join(el.rsplit(' (glob)', 1))
1553 1553 r = '' # Warn only this line.
1554 1554 elif r == "retry":
1555 1555 postout.append(b' ' + el)
1556 1556 else:
1557 1557 log('\ninfo, unknown linematch result: %r\n' % r)
1558 1558 r = False
1559 1559 if r:
1560 1560 els.pop(i)
1561 1561 break
1562 1562 if el:
1563 1563 if el.endswith(b" (?)\n"):
1564 1564 optional.append(i)
1565 1565 else:
1566 1566 m = optline.match(el)
1567 1567 if m:
1568 1568 conditions = [
1569 1569 c for c in m.group(2).split(b' ')]
1570 1570
1571 1571 if not self._iftest(conditions):
1572 1572 optional.append(i)
1573 1573 if exact:
1574 1574 # Don't allow line to be matches against a later
1575 1575 # line in the output
1576 1576 els.pop(i)
1577 1577 break
1578 1578
1579 1579 if r:
1580 1580 if r == "retry":
1581 1581 continue
1582 1582 # clean up any optional leftovers
1583 1583 for i in optional:
1584 1584 postout.append(b' ' + els[i])
1585 1585 for i in reversed(optional):
1586 1586 del els[i]
1587 1587 postout.append(b' ' + el)
1588 1588 else:
1589 1589 if self.NEEDESCAPE(lout):
1590 1590 lout = TTest._stringescape(b'%s (esc)\n' %
1591 1591 lout.rstrip(b'\n'))
1592 1592 postout.append(b' ' + lout) # Let diff deal with it.
1593 1593 if r != '': # If line failed.
1594 1594 warnonly = 3 # for sure not
1595 1595 elif warnonly == 1: # Is "not yet" and line is warn only.
1596 1596 warnonly = 2 # Yes do warn.
1597 1597 break
1598 1598 else:
1599 1599 # clean up any optional leftovers
1600 1600 while expected.get(pos, None):
1601 1601 el = expected[pos].pop(0)
1602 1602 if el:
1603 1603 if not el.endswith(b" (?)\n"):
1604 1604 m = optline.match(el)
1605 1605 if m:
1606 1606 conditions = [c for c in m.group(2).split(b' ')]
1607 1607
1608 1608 if self._iftest(conditions):
1609 1609 # Don't append as optional line
1610 1610 continue
1611 1611 else:
1612 1612 continue
1613 1613 postout.append(b' ' + el)
1614 1614
1615 1615 if lcmd:
1616 1616 # Add on last return code.
1617 1617 ret = int(lcmd.split()[1])
1618 1618 if ret != 0:
1619 1619 postout.append(b' [%d]\n' % ret)
1620 1620 if pos in after:
1621 1621 # Merge in non-active test bits.
1622 1622 postout += after.pop(pos)
1623 1623 pos = int(lcmd.split()[0])
1624 1624
1625 1625 if pos in after:
1626 1626 postout += after.pop(pos)
1627 1627
1628 1628 if warnonly == 2:
1629 1629 exitcode = False # Set exitcode to warned.
1630 1630
1631 1631 return exitcode, postout
1632 1632
1633 1633 @staticmethod
1634 1634 def rematch(el, l):
1635 1635 try:
1636 1636 el = b'(?:' + el + b')'
1637 1637 # use \Z to ensure that the regex matches to the end of the string
1638 1638 if os.name == 'nt':
1639 1639 return re.match(el + br'\r?\n\Z', l)
1640 1640 return re.match(el + br'\n\Z', l)
1641 1641 except re.error:
1642 1642 # el is an invalid regex
1643 1643 return False
1644 1644
1645 1645 @staticmethod
1646 1646 def globmatch(el, l):
1647 1647 # The only supported special characters are * and ? plus / which also
1648 1648 # matches \ on windows. Escaping of these characters is supported.
1649 1649 if el + b'\n' == l:
1650 1650 if os.altsep:
1651 1651 # matching on "/" is not needed for this line
1652 1652 for pat in checkcodeglobpats:
1653 1653 if pat.match(el):
1654 1654 return True
1655 1655 return b'-glob'
1656 1656 return True
1657 1657 el = el.replace(b'$LOCALIP', b'*')
1658 1658 i, n = 0, len(el)
1659 1659 res = b''
1660 1660 while i < n:
1661 1661 c = el[i:i + 1]
1662 1662 i += 1
1663 1663 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1664 1664 res += el[i - 1:i + 1]
1665 1665 i += 1
1666 1666 elif c == b'*':
1667 1667 res += b'.*'
1668 1668 elif c == b'?':
1669 1669 res += b'.'
1670 1670 elif c == b'/' and os.altsep:
1671 1671 res += b'[/\\\\]'
1672 1672 else:
1673 1673 res += re.escape(c)
1674 1674 return TTest.rematch(res, l)
1675 1675
1676 1676 def linematch(self, el, l):
1677 1677 if el == l: # perfect match (fast)
1678 1678 return True, True
1679 1679 retry = False
1680 1680 if el.endswith(b" (?)\n"):
1681 1681 retry = "retry"
1682 1682 el = el[:-5] + b"\n"
1683 1683 else:
1684 1684 m = optline.match(el)
1685 1685 if m:
1686 1686 conditions = [c for c in m.group(2).split(b' ')]
1687 1687
1688 1688 el = m.group(1) + b"\n"
1689 1689 if not self._iftest(conditions):
1690 1690 retry = "retry" # Not required by listed features
1691 1691
1692 1692 if el.endswith(b" (esc)\n"):
1693 1693 if PYTHON3:
1694 1694 el = el[:-7].decode('unicode_escape') + '\n'
1695 1695 el = el.encode('utf-8')
1696 1696 else:
1697 1697 el = el[:-7].decode('string-escape') + '\n'
1698 1698 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1699 1699 return True, True
1700 1700 if el.endswith(b" (re)\n"):
1701 1701 return (TTest.rematch(el[:-6], l) or retry), False
1702 1702 if el.endswith(b" (glob)\n"):
1703 1703 # ignore '(glob)' added to l by 'replacements'
1704 1704 if l.endswith(b" (glob)\n"):
1705 1705 l = l[:-8] + b"\n"
1706 1706 return (TTest.globmatch(el[:-8], l) or retry), False
1707 1707 if os.altsep:
1708 1708 _l = l.replace(b'\\', b'/')
1709 1709 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1710 1710 return True, True
1711 1711 return retry, True
1712 1712
1713 1713 @staticmethod
1714 1714 def parsehghaveoutput(lines):
1715 1715 '''Parse hghave log lines.
1716 1716
1717 1717 Return tuple of lists (missing, failed):
1718 1718 * the missing/unknown features
1719 1719 * the features for which existence check failed'''
1720 1720 missing = []
1721 1721 failed = []
1722 1722 for line in lines:
1723 1723 if line.startswith(TTest.SKIPPED_PREFIX):
1724 1724 line = line.splitlines()[0]
1725 1725 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1726 1726 elif line.startswith(TTest.FAILED_PREFIX):
1727 1727 line = line.splitlines()[0]
1728 1728 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1729 1729
1730 1730 return missing, failed
1731 1731
1732 1732 @staticmethod
1733 1733 def _escapef(m):
1734 1734 return TTest.ESCAPEMAP[m.group(0)]
1735 1735
1736 1736 @staticmethod
1737 1737 def _stringescape(s):
1738 1738 return TTest.ESCAPESUB(TTest._escapef, s)
1739 1739
1740 1740 iolock = threading.RLock()
1741 1741 firstlock = threading.RLock()
1742 1742 firsterror = False
1743 1743
1744 1744 class TestResult(unittest._TextTestResult):
1745 1745 """Holds results when executing via unittest."""
1746 1746 # Don't worry too much about accessing the non-public _TextTestResult.
1747 1747 # It is relatively common in Python testing tools.
1748 1748 def __init__(self, options, *args, **kwargs):
1749 1749 super(TestResult, self).__init__(*args, **kwargs)
1750 1750
1751 1751 self._options = options
1752 1752
1753 1753 # unittest.TestResult didn't have skipped until 2.7. We need to
1754 1754 # polyfill it.
1755 1755 self.skipped = []
1756 1756
1757 1757 # We have a custom "ignored" result that isn't present in any Python
1758 1758 # unittest implementation. It is very similar to skipped. It may make
1759 1759 # sense to map it into skip some day.
1760 1760 self.ignored = []
1761 1761
1762 1762 self.times = []
1763 1763 self._firststarttime = None
1764 1764 # Data stored for the benefit of generating xunit reports.
1765 1765 self.successes = []
1766 1766 self.faildata = {}
1767 1767
1768 1768 if options.color == 'auto':
1769 1769 self.color = pygmentspresent and self.stream.isatty()
1770 1770 elif options.color == 'never':
1771 1771 self.color = False
1772 1772 else: # 'always', for testing purposes
1773 1773 self.color = pygmentspresent
1774 1774
1775 1775 def onStart(self, test):
1776 1776 """ Can be overriden by custom TestResult
1777 1777 """
1778 1778
1779 1779 def onEnd(self):
1780 1780 """ Can be overriden by custom TestResult
1781 1781 """
1782 1782
1783 1783 def addFailure(self, test, reason):
1784 1784 self.failures.append((test, reason))
1785 1785
1786 1786 if self._options.first:
1787 1787 self.stop()
1788 1788 else:
1789 1789 with iolock:
1790 1790 if reason == "timed out":
1791 1791 self.stream.write('t')
1792 1792 else:
1793 1793 if not self._options.nodiff:
1794 1794 self.stream.write('\n')
1795 1795 # Exclude the '\n' from highlighting to lex correctly
1796 1796 formatted = 'ERROR: %s output changed\n' % test
1797 1797 self.stream.write(highlightmsg(formatted, self.color))
1798 1798 self.stream.write('!')
1799 1799
1800 1800 self.stream.flush()
1801 1801
1802 1802 def addSuccess(self, test):
1803 1803 with iolock:
1804 1804 super(TestResult, self).addSuccess(test)
1805 1805 self.successes.append(test)
1806 1806
1807 1807 def addError(self, test, err):
1808 1808 super(TestResult, self).addError(test, err)
1809 1809 if self._options.first:
1810 1810 self.stop()
1811 1811
1812 1812 # Polyfill.
1813 1813 def addSkip(self, test, reason):
1814 1814 self.skipped.append((test, reason))
1815 1815 with iolock:
1816 1816 if self.showAll:
1817 1817 self.stream.writeln('skipped %s' % reason)
1818 1818 else:
1819 1819 self.stream.write('s')
1820 1820 self.stream.flush()
1821 1821
1822 1822 def addIgnore(self, test, reason):
1823 1823 self.ignored.append((test, reason))
1824 1824 with iolock:
1825 1825 if self.showAll:
1826 1826 self.stream.writeln('ignored %s' % reason)
1827 1827 else:
1828 1828 if reason not in ('not retesting', "doesn't match keyword"):
1829 1829 self.stream.write('i')
1830 1830 else:
1831 1831 self.testsRun += 1
1832 1832 self.stream.flush()
1833 1833
1834 1834 def addOutputMismatch(self, test, ret, got, expected):
1835 1835 """Record a mismatch in test output for a particular test."""
1836 1836 if self.shouldStop or firsterror:
1837 1837 # don't print, some other test case already failed and
1838 1838 # printed, we're just stale and probably failed due to our
1839 1839 # temp dir getting cleaned up.
1840 1840 return
1841 1841
1842 1842 accepted = False
1843 1843 lines = []
1844 1844
1845 1845 with iolock:
1846 1846 if self._options.nodiff:
1847 1847 pass
1848 1848 elif self._options.view:
1849 1849 v = self._options.view
1850 1850 os.system(r"%s %s %s" %
1851 1851 (v, _strpath(test.refpath), _strpath(test.errpath)))
1852 1852 else:
1853 1853 servefail, lines = getdiff(expected, got,
1854 1854 test.refpath, test.errpath)
1855 1855 self.stream.write('\n')
1856 1856 for line in lines:
1857 1857 line = highlightdiff(line, self.color)
1858 1858 if PYTHON3:
1859 1859 self.stream.flush()
1860 1860 self.stream.buffer.write(line)
1861 1861 self.stream.buffer.flush()
1862 1862 else:
1863 1863 self.stream.write(line)
1864 1864 self.stream.flush()
1865 1865
1866 1866 if servefail:
1867 1867 raise test.failureException(
1868 1868 'server failed to start (HGPORT=%s)' % test._startport)
1869 1869
1870 1870 # handle interactive prompt without releasing iolock
1871 1871 if self._options.interactive:
1872 1872 if test.readrefout() != expected:
1873 1873 self.stream.write(
1874 1874 'Reference output has changed (run again to prompt '
1875 1875 'changes)')
1876 1876 else:
1877 1877 self.stream.write('Accept this change? [n] ')
1878 1878 self.stream.flush()
1879 1879 answer = sys.stdin.readline().strip()
1880 1880 if answer.lower() in ('y', 'yes'):
1881 1881 if test.path.endswith(b'.t'):
1882 1882 rename(test.errpath, test.path)
1883 1883 else:
1884 1884 rename(test.errpath, '%s.out' % test.path)
1885 1885 accepted = True
1886 1886 if not accepted:
1887 1887 self.faildata[test.name] = b''.join(lines)
1888 1888
1889 1889 return accepted
1890 1890
1891 1891 def startTest(self, test):
1892 1892 super(TestResult, self).startTest(test)
1893 1893
1894 1894 # os.times module computes the user time and system time spent by
1895 1895 # child's processes along with real elapsed time taken by a process.
1896 1896 # This module has one limitation. It can only work for Linux user
1897 1897 # and not for Windows.
1898 1898 test.started = os.times()
1899 1899 if self._firststarttime is None: # thread racy but irrelevant
1900 1900 self._firststarttime = test.started[4]
1901 1901
1902 1902 def stopTest(self, test, interrupted=False):
1903 1903 super(TestResult, self).stopTest(test)
1904 1904
1905 1905 test.stopped = os.times()
1906 1906
1907 1907 starttime = test.started
1908 1908 endtime = test.stopped
1909 1909 origin = self._firststarttime
1910 1910 self.times.append((test.name,
1911 1911 endtime[2] - starttime[2], # user space CPU time
1912 1912 endtime[3] - starttime[3], # sys space CPU time
1913 1913 endtime[4] - starttime[4], # real time
1914 1914 starttime[4] - origin, # start date in run context
1915 1915 endtime[4] - origin, # end date in run context
1916 1916 ))
1917 1917
1918 1918 if interrupted:
1919 1919 with iolock:
1920 1920 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1921 1921 test.name, self.times[-1][3]))
1922 1922
1923 1923 def getTestResult():
1924 1924 """
1925 1925 Returns the relevant test result
1926 1926 """
1927 1927 if "CUSTOM_TEST_RESULT" in os.environ:
1928 1928 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
1929 1929 return testresultmodule.TestResult
1930 1930 else:
1931 1931 return TestResult
1932 1932
1933 1933 class TestSuite(unittest.TestSuite):
1934 1934 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1935 1935
1936 1936 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1937 1937 retest=False, keywords=None, loop=False, runs_per_test=1,
1938 1938 loadtest=None, showchannels=False,
1939 1939 *args, **kwargs):
1940 1940 """Create a new instance that can run tests with a configuration.
1941 1941
1942 1942 testdir specifies the directory where tests are executed from. This
1943 1943 is typically the ``tests`` directory from Mercurial's source
1944 1944 repository.
1945 1945
1946 1946 jobs specifies the number of jobs to run concurrently. Each test
1947 1947 executes on its own thread. Tests actually spawn new processes, so
1948 1948 state mutation should not be an issue.
1949 1949
1950 1950 If there is only one job, it will use the main thread.
1951 1951
1952 1952 whitelist and blacklist denote tests that have been whitelisted and
1953 1953 blacklisted, respectively. These arguments don't belong in TestSuite.
1954 1954 Instead, whitelist and blacklist should be handled by the thing that
1955 1955 populates the TestSuite with tests. They are present to preserve
1956 1956 backwards compatible behavior which reports skipped tests as part
1957 1957 of the results.
1958 1958
1959 1959 retest denotes whether to retest failed tests. This arguably belongs
1960 1960 outside of TestSuite.
1961 1961
1962 1962 keywords denotes key words that will be used to filter which tests
1963 1963 to execute. This arguably belongs outside of TestSuite.
1964 1964
1965 1965 loop denotes whether to loop over tests forever.
1966 1966 """
1967 1967 super(TestSuite, self).__init__(*args, **kwargs)
1968 1968
1969 1969 self._jobs = jobs
1970 1970 self._whitelist = whitelist
1971 1971 self._blacklist = blacklist
1972 1972 self._retest = retest
1973 1973 self._keywords = keywords
1974 1974 self._loop = loop
1975 1975 self._runs_per_test = runs_per_test
1976 1976 self._loadtest = loadtest
1977 1977 self._showchannels = showchannels
1978 1978
1979 1979 def run(self, result):
1980 1980 # We have a number of filters that need to be applied. We do this
1981 1981 # here instead of inside Test because it makes the running logic for
1982 1982 # Test simpler.
1983 1983 tests = []
1984 1984 num_tests = [0]
1985 1985 for test in self._tests:
1986 1986 def get():
1987 1987 num_tests[0] += 1
1988 1988 if getattr(test, 'should_reload', False):
1989 1989 return self._loadtest(test, num_tests[0])
1990 1990 return test
1991 1991 if not os.path.exists(test.path):
1992 1992 result.addSkip(test, "Doesn't exist")
1993 1993 continue
1994 1994
1995 1995 if not (self._whitelist and test.bname in self._whitelist):
1996 1996 if self._blacklist and test.bname in self._blacklist:
1997 1997 result.addSkip(test, 'blacklisted')
1998 1998 continue
1999 1999
2000 2000 if self._retest and not os.path.exists(test.errpath):
2001 2001 result.addIgnore(test, 'not retesting')
2002 2002 continue
2003 2003
2004 2004 if self._keywords:
2005 2005 with open(test.path, 'rb') as f:
2006 2006 t = f.read().lower() + test.bname.lower()
2007 2007 ignored = False
2008 2008 for k in self._keywords.lower().split():
2009 2009 if k not in t:
2010 2010 result.addIgnore(test, "doesn't match keyword")
2011 2011 ignored = True
2012 2012 break
2013 2013
2014 2014 if ignored:
2015 2015 continue
2016 2016 for _ in xrange(self._runs_per_test):
2017 2017 tests.append(get())
2018 2018
2019 2019 runtests = list(tests)
2020 2020 done = queue.Queue()
2021 2021 running = 0
2022 2022
2023 2023 channels = [""] * self._jobs
2024 2024
2025 2025 def job(test, result):
2026 2026 for n, v in enumerate(channels):
2027 2027 if not v:
2028 2028 channel = n
2029 2029 break
2030 2030 else:
2031 2031 raise ValueError('Could not find output channel')
2032 2032 channels[channel] = "=" + test.name[5:].split(".")[0]
2033 2033 try:
2034 2034 test(result)
2035 2035 done.put(None)
2036 2036 except KeyboardInterrupt:
2037 2037 pass
2038 2038 except: # re-raises
2039 2039 done.put(('!', test, 'run-test raised an error, see traceback'))
2040 2040 raise
2041 2041 finally:
2042 2042 try:
2043 2043 channels[channel] = ''
2044 2044 except IndexError:
2045 2045 pass
2046 2046
2047 2047 def stat():
2048 2048 count = 0
2049 2049 while channels:
2050 2050 d = '\n%03s ' % count
2051 2051 for n, v in enumerate(channels):
2052 2052 if v:
2053 2053 d += v[0]
2054 2054 channels[n] = v[1:] or '.'
2055 2055 else:
2056 2056 d += ' '
2057 2057 d += ' '
2058 2058 with iolock:
2059 2059 sys.stdout.write(d + ' ')
2060 2060 sys.stdout.flush()
2061 2061 for x in xrange(10):
2062 2062 if channels:
2063 2063 time.sleep(.1)
2064 2064 count += 1
2065 2065
2066 2066 stoppedearly = False
2067 2067
2068 2068 if self._showchannels:
2069 2069 statthread = threading.Thread(target=stat, name="stat")
2070 2070 statthread.start()
2071 2071
2072 2072 try:
2073 2073 while tests or running:
2074 2074 if not done.empty() or running == self._jobs or not tests:
2075 2075 try:
2076 2076 done.get(True, 1)
2077 2077 running -= 1
2078 2078 if result and result.shouldStop:
2079 2079 stoppedearly = True
2080 2080 break
2081 2081 except queue.Empty:
2082 2082 continue
2083 2083 if tests and not running == self._jobs:
2084 2084 test = tests.pop(0)
2085 2085 if self._loop:
2086 2086 if getattr(test, 'should_reload', False):
2087 2087 num_tests[0] += 1
2088 2088 tests.append(
2089 2089 self._loadtest(test, num_tests[0]))
2090 2090 else:
2091 2091 tests.append(test)
2092 2092 if self._jobs == 1:
2093 2093 job(test, result)
2094 2094 else:
2095 2095 t = threading.Thread(target=job, name=test.name,
2096 2096 args=(test, result))
2097 2097 t.start()
2098 2098 running += 1
2099 2099
2100 2100 # If we stop early we still need to wait on started tests to
2101 2101 # finish. Otherwise, there is a race between the test completing
2102 2102 # and the test's cleanup code running. This could result in the
2103 2103 # test reporting incorrect.
2104 2104 if stoppedearly:
2105 2105 while running:
2106 2106 try:
2107 2107 done.get(True, 1)
2108 2108 running -= 1
2109 2109 except queue.Empty:
2110 2110 continue
2111 2111 except KeyboardInterrupt:
2112 2112 for test in runtests:
2113 2113 test.abort()
2114 2114
2115 2115 channels = []
2116 2116
2117 2117 return result
2118 2118
2119 2119 # Save the most recent 5 wall-clock runtimes of each test to a
2120 2120 # human-readable text file named .testtimes. Tests are sorted
2121 2121 # alphabetically, while times for each test are listed from oldest to
2122 2122 # newest.
2123 2123
2124 2124 def loadtimes(outputdir):
2125 2125 times = []
2126 2126 try:
2127 2127 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2128 2128 for line in fp:
2129 2129 m = re.match('(.*?) ([0-9. ]+)', line)
2130 2130 times.append((m.group(1),
2131 2131 [float(t) for t in m.group(2).split()]))
2132 2132 except IOError as err:
2133 2133 if err.errno != errno.ENOENT:
2134 2134 raise
2135 2135 return times
2136 2136
2137 2137 def savetimes(outputdir, result):
2138 2138 saved = dict(loadtimes(outputdir))
2139 2139 maxruns = 5
2140 2140 skipped = set([str(t[0]) for t in result.skipped])
2141 2141 for tdata in result.times:
2142 2142 test, real = tdata[0], tdata[3]
2143 2143 if test not in skipped:
2144 2144 ts = saved.setdefault(test, [])
2145 2145 ts.append(real)
2146 2146 ts[:] = ts[-maxruns:]
2147 2147
2148 2148 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2149 2149 dir=outputdir, text=True)
2150 2150 with os.fdopen(fd, 'w') as fp:
2151 2151 for name, ts in sorted(saved.items()):
2152 2152 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2153 2153 timepath = os.path.join(outputdir, b'.testtimes')
2154 2154 try:
2155 2155 os.unlink(timepath)
2156 2156 except OSError:
2157 2157 pass
2158 2158 try:
2159 2159 os.rename(tmpname, timepath)
2160 2160 except OSError:
2161 2161 pass
2162 2162
2163 2163 class TextTestRunner(unittest.TextTestRunner):
2164 2164 """Custom unittest test runner that uses appropriate settings."""
2165 2165
2166 2166 def __init__(self, runner, *args, **kwargs):
2167 2167 super(TextTestRunner, self).__init__(*args, **kwargs)
2168 2168
2169 2169 self._runner = runner
2170 2170
2171 2171 self._result = getTestResult()(self._runner.options, self.stream,
2172 2172 self.descriptions, self.verbosity)
2173 2173
2174 2174 def listtests(self, test):
2175 2175 test = sorted(test, key=lambda t: t.name)
2176 2176
2177 2177 self._result.onStart(test)
2178 2178
2179 2179 for t in test:
2180 2180 print(t.name)
2181 2181 self._result.addSuccess(t)
2182 2182
2183 2183 if self._runner.options.xunit:
2184 2184 with open(self._runner.options.xunit, "wb") as xuf:
2185 2185 self._writexunit(self._result, xuf)
2186 2186
2187 2187 if self._runner.options.json:
2188 2188 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2189 2189 with open(jsonpath, 'w') as fp:
2190 2190 self._writejson(self._result, fp)
2191 2191
2192 2192 return self._result
2193 2193
2194 2194 def run(self, test):
2195 2195 self._result.onStart(test)
2196 2196 test(self._result)
2197 2197
2198 2198 failed = len(self._result.failures)
2199 2199 skipped = len(self._result.skipped)
2200 2200 ignored = len(self._result.ignored)
2201 2201
2202 2202 with iolock:
2203 2203 self.stream.writeln('')
2204 2204
2205 2205 if not self._runner.options.noskips:
2206 2206 for test, msg in self._result.skipped:
2207 2207 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2208 2208 msg = highlightmsg(formatted, self._result.color)
2209 2209 self.stream.write(msg)
2210 2210 for test, msg in self._result.failures:
2211 2211 formatted = 'Failed %s: %s\n' % (test.name, msg)
2212 2212 self.stream.write(highlightmsg(formatted, self._result.color))
2213 2213 for test, msg in self._result.errors:
2214 2214 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2215 2215
2216 2216 if self._runner.options.xunit:
2217 2217 with open(self._runner.options.xunit, "wb") as xuf:
2218 2218 self._writexunit(self._result, xuf)
2219 2219
2220 2220 if self._runner.options.json:
2221 2221 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2222 2222 with open(jsonpath, 'w') as fp:
2223 2223 self._writejson(self._result, fp)
2224 2224
2225 2225 self._runner._checkhglib('Tested')
2226 2226
2227 2227 savetimes(self._runner._outputdir, self._result)
2228 2228
2229 2229 if failed and self._runner.options.known_good_rev:
2230 2230 self._bisecttests(t for t, m in self._result.failures)
2231 2231 self.stream.writeln(
2232 2232 '# Ran %d tests, %d skipped, %d failed.'
2233 2233 % (self._result.testsRun, skipped + ignored, failed))
2234 2234 if failed:
2235 2235 self.stream.writeln('python hash seed: %s' %
2236 2236 os.environ['PYTHONHASHSEED'])
2237 2237 if self._runner.options.time:
2238 2238 self.printtimes(self._result.times)
2239 2239
2240 2240 if self._runner.options.exceptions:
2241 2241 exceptions = aggregateexceptions(
2242 2242 os.path.join(self._runner._outputdir, b'exceptions'))
2243 2243
2244 2244 self.stream.writeln('Exceptions Report:')
2245 2245 self.stream.writeln('%d total from %d frames' %
2246 2246 (exceptions['total'],
2247 2247 len(exceptions['exceptioncounts'])))
2248 2248 combined = exceptions['combined']
2249 2249 for key in sorted(combined, key=combined.get, reverse=True):
2250 2250 frame, line, exc = key
2251 2251 totalcount, testcount, leastcount, leasttest = combined[key]
2252 2252
2253 2253 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2254 2254 % (totalcount,
2255 2255 testcount,
2256 2256 frame, exc,
2257 2257 leasttest, leastcount))
2258 2258
2259 2259 self.stream.flush()
2260 2260
2261 2261 return self._result
2262 2262
2263 2263 def _bisecttests(self, tests):
2264 2264 bisectcmd = ['hg', 'bisect']
2265 2265 bisectrepo = self._runner.options.bisect_repo
2266 2266 if bisectrepo:
2267 2267 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2268 2268 def pread(args):
2269 2269 env = os.environ.copy()
2270 2270 env['HGPLAIN'] = '1'
2271 2271 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2272 2272 stdout=subprocess.PIPE, env=env)
2273 2273 data = p.stdout.read()
2274 2274 p.wait()
2275 2275 return data
2276 2276 for test in tests:
2277 2277 pread(bisectcmd + ['--reset']),
2278 2278 pread(bisectcmd + ['--bad', '.'])
2279 2279 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2280 2280 # TODO: we probably need to forward more options
2281 2281 # that alter hg's behavior inside the tests.
2282 2282 opts = ''
2283 2283 withhg = self._runner.options.with_hg
2284 2284 if withhg:
2285 2285 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2286 2286 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2287 2287 test)
2288 2288 data = pread(bisectcmd + ['--command', rtc])
2289 2289 m = re.search(
2290 2290 (br'\nThe first (?P<goodbad>bad|good) revision '
2291 2291 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2292 2292 br'summary: +(?P<summary>[^\n]+)\n'),
2293 2293 data, (re.MULTILINE | re.DOTALL))
2294 2294 if m is None:
2295 2295 self.stream.writeln(
2296 2296 'Failed to identify failure point for %s' % test)
2297 2297 continue
2298 2298 dat = m.groupdict()
2299 2299 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2300 2300 self.stream.writeln(
2301 2301 '%s %s by %s (%s)' % (
2302 2302 test, verb, dat['node'].decode('ascii'),
2303 2303 dat['summary'].decode('utf8', 'ignore')))
2304 2304
2305 2305 def printtimes(self, times):
2306 2306 # iolock held by run
2307 2307 self.stream.writeln('# Producing time report')
2308 2308 times.sort(key=lambda t: (t[3]))
2309 2309 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2310 2310 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2311 2311 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2312 2312 for tdata in times:
2313 2313 test = tdata[0]
2314 2314 cuser, csys, real, start, end = tdata[1:6]
2315 2315 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2316 2316
2317 2317 @staticmethod
2318 2318 def _writexunit(result, outf):
2319 2319 # See http://llg.cubic.org/docs/junit/ for a reference.
2320 2320 timesd = dict((t[0], t[3]) for t in result.times)
2321 2321 doc = minidom.Document()
2322 2322 s = doc.createElement('testsuite')
2323 2323 s.setAttribute('name', 'run-tests')
2324 2324 s.setAttribute('tests', str(result.testsRun))
2325 2325 s.setAttribute('errors', "0") # TODO
2326 2326 s.setAttribute('failures', str(len(result.failures)))
2327 2327 s.setAttribute('skipped', str(len(result.skipped) +
2328 2328 len(result.ignored)))
2329 2329 doc.appendChild(s)
2330 2330 for tc in result.successes:
2331 2331 t = doc.createElement('testcase')
2332 2332 t.setAttribute('name', tc.name)
2333 2333 tctime = timesd.get(tc.name)
2334 2334 if tctime is not None:
2335 2335 t.setAttribute('time', '%.3f' % tctime)
2336 2336 s.appendChild(t)
2337 2337 for tc, err in sorted(result.faildata.items()):
2338 2338 t = doc.createElement('testcase')
2339 2339 t.setAttribute('name', tc)
2340 2340 tctime = timesd.get(tc)
2341 2341 if tctime is not None:
2342 2342 t.setAttribute('time', '%.3f' % tctime)
2343 2343 # createCDATASection expects a unicode or it will
2344 2344 # convert using default conversion rules, which will
2345 2345 # fail if string isn't ASCII.
2346 2346 err = cdatasafe(err).decode('utf-8', 'replace')
2347 2347 cd = doc.createCDATASection(err)
2348 2348 # Use 'failure' here instead of 'error' to match errors = 0,
2349 2349 # failures = len(result.failures) in the testsuite element.
2350 2350 failelem = doc.createElement('failure')
2351 2351 failelem.setAttribute('message', 'output changed')
2352 2352 failelem.setAttribute('type', 'output-mismatch')
2353 2353 failelem.appendChild(cd)
2354 2354 t.appendChild(failelem)
2355 2355 s.appendChild(t)
2356 2356 for tc, message in result.skipped:
2357 2357 # According to the schema, 'skipped' has no attributes. So store
2358 2358 # the skip message as a text node instead.
2359 2359 t = doc.createElement('testcase')
2360 2360 t.setAttribute('name', tc.name)
2361 2361 binmessage = message.encode('utf-8')
2362 2362 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2363 2363 cd = doc.createCDATASection(message)
2364 2364 skipelem = doc.createElement('skipped')
2365 2365 skipelem.appendChild(cd)
2366 2366 t.appendChild(skipelem)
2367 2367 s.appendChild(t)
2368 2368 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2369 2369
2370 2370 @staticmethod
2371 2371 def _writejson(result, outf):
2372 2372 timesd = {}
2373 2373 for tdata in result.times:
2374 2374 test = tdata[0]
2375 2375 timesd[test] = tdata[1:]
2376 2376
2377 2377 outcome = {}
2378 2378 groups = [('success', ((tc, None)
2379 2379 for tc in result.successes)),
2380 2380 ('failure', result.failures),
2381 2381 ('skip', result.skipped)]
2382 2382 for res, testcases in groups:
2383 2383 for tc, __ in testcases:
2384 2384 if tc.name in timesd:
2385 2385 diff = result.faildata.get(tc.name, b'')
2386 2386 try:
2387 2387 diff = diff.decode('unicode_escape')
2388 2388 except UnicodeDecodeError as e:
2389 2389 diff = '%r decoding diff, sorry' % e
2390 2390 tres = {'result': res,
2391 2391 'time': ('%0.3f' % timesd[tc.name][2]),
2392 2392 'cuser': ('%0.3f' % timesd[tc.name][0]),
2393 2393 'csys': ('%0.3f' % timesd[tc.name][1]),
2394 2394 'start': ('%0.3f' % timesd[tc.name][3]),
2395 2395 'end': ('%0.3f' % timesd[tc.name][4]),
2396 2396 'diff': diff,
2397 2397 }
2398 2398 else:
2399 2399 # blacklisted test
2400 2400 tres = {'result': res}
2401 2401
2402 2402 outcome[tc.name] = tres
2403 2403 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2404 2404 separators=(',', ': '))
2405 2405 outf.writelines(("testreport =", jsonout))
2406 2406
2407 2407 def sorttests(testdescs, previoustimes, shuffle=False):
2408 2408 """Do an in-place sort of tests."""
2409 2409 if shuffle:
2410 2410 random.shuffle(testdescs)
2411 2411 return
2412 2412
2413 2413 if previoustimes:
2414 2414 def sortkey(f):
2415 2415 f = f['path']
2416 2416 if f in previoustimes:
2417 2417 # Use most recent time as estimate
2418 2418 return -previoustimes[f][-1]
2419 2419 else:
2420 2420 # Default to a rather arbitrary value of 1 second for new tests
2421 2421 return -1.0
2422 2422 else:
2423 2423 # keywords for slow tests
2424 2424 slow = {b'svn': 10,
2425 2425 b'cvs': 10,
2426 2426 b'hghave': 10,
2427 2427 b'largefiles-update': 10,
2428 2428 b'run-tests': 10,
2429 2429 b'corruption': 10,
2430 2430 b'race': 10,
2431 2431 b'i18n': 10,
2432 2432 b'check': 100,
2433 2433 b'gendoc': 100,
2434 2434 b'contrib-perf': 200,
2435 2435 }
2436 2436 perf = {}
2437 2437
2438 2438 def sortkey(f):
2439 2439 # run largest tests first, as they tend to take the longest
2440 2440 f = f['path']
2441 2441 try:
2442 2442 return perf[f]
2443 2443 except KeyError:
2444 2444 try:
2445 2445 val = -os.stat(f).st_size
2446 2446 except OSError as e:
2447 2447 if e.errno != errno.ENOENT:
2448 2448 raise
2449 2449 perf[f] = -1e9 # file does not exist, tell early
2450 2450 return -1e9
2451 2451 for kw, mul in slow.items():
2452 2452 if kw in f:
2453 2453 val *= mul
2454 2454 if f.endswith(b'.py'):
2455 2455 val /= 10.0
2456 2456 perf[f] = val / 1000.0
2457 2457 return perf[f]
2458 2458
2459 2459 testdescs.sort(key=sortkey)
2460 2460
2461 2461 class TestRunner(object):
2462 2462 """Holds context for executing tests.
2463 2463
2464 2464 Tests rely on a lot of state. This object holds it for them.
2465 2465 """
2466 2466
2467 2467 # Programs required to run tests.
2468 2468 REQUIREDTOOLS = [
2469 2469 b'diff',
2470 2470 b'grep',
2471 2471 b'unzip',
2472 2472 b'gunzip',
2473 2473 b'bunzip2',
2474 2474 b'sed',
2475 2475 ]
2476 2476
2477 2477 # Maps file extensions to test class.
2478 2478 TESTTYPES = [
2479 2479 (b'.py', PythonTest),
2480 2480 (b'.t', TTest),
2481 2481 ]
2482 2482
2483 2483 def __init__(self):
2484 2484 self.options = None
2485 2485 self._hgroot = None
2486 2486 self._testdir = None
2487 2487 self._outputdir = None
2488 2488 self._hgtmp = None
2489 2489 self._installdir = None
2490 2490 self._bindir = None
2491 2491 self._tmpbinddir = None
2492 2492 self._pythondir = None
2493 2493 self._coveragefile = None
2494 2494 self._createdfiles = []
2495 2495 self._hgcommand = None
2496 2496 self._hgpath = None
2497 2497 self._portoffset = 0
2498 2498 self._ports = {}
2499 2499
2500 2500 def run(self, args, parser=None):
2501 2501 """Run the test suite."""
2502 2502 oldmask = os.umask(0o22)
2503 2503 try:
2504 2504 parser = parser or getparser()
2505 2505 options = parseargs(args, parser)
2506 2506 tests = [_bytespath(a) for a in options.tests]
2507 2507 if options.test_list is not None:
2508 2508 for listfile in options.test_list:
2509 2509 with open(listfile, 'rb') as f:
2510 2510 tests.extend(t for t in f.read().splitlines() if t)
2511 2511 self.options = options
2512 2512
2513 2513 self._checktools()
2514 2514 testdescs = self.findtests(tests)
2515 2515 if options.profile_runner:
2516 2516 import statprof
2517 2517 statprof.start()
2518 2518 result = self._run(testdescs)
2519 2519 if options.profile_runner:
2520 2520 statprof.stop()
2521 2521 statprof.display()
2522 2522 return result
2523 2523
2524 2524 finally:
2525 2525 os.umask(oldmask)
2526 2526
2527 2527 def _run(self, testdescs):
2528 2528 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
2529 2529 # assume all tests in same folder for now
2530 2530 if testdescs:
2531 2531 pathname = os.path.dirname(testdescs[0]['path'])
2532 2532 if pathname:
2533 2533 osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
2534 2534 pathname)
2535 2535 if self.options.outputdir:
2536 2536 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2537 2537 else:
2538 2538 self._outputdir = self._testdir
2539 2539 if testdescs and pathname:
2540 2540 self._outputdir = os.path.join(self._outputdir, pathname)
2541 2541 previoustimes = {}
2542 2542 if self.options.order_by_runtime:
2543 2543 previoustimes = dict(loadtimes(self._outputdir))
2544 2544 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2545 2545
2546 2546 if 'PYTHONHASHSEED' not in os.environ:
2547 2547 # use a random python hash seed all the time
2548 2548 # we do the randomness ourself to know what seed is used
2549 2549 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2550 2550
2551 2551 if self.options.tmpdir:
2552 2552 self.options.keep_tmpdir = True
2553 2553 tmpdir = _bytespath(self.options.tmpdir)
2554 2554 if os.path.exists(tmpdir):
2555 2555 # Meaning of tmpdir has changed since 1.3: we used to create
2556 2556 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2557 2557 # tmpdir already exists.
2558 2558 print("error: temp dir %r already exists" % tmpdir)
2559 2559 return 1
2560 2560
2561 2561 os.makedirs(tmpdir)
2562 2562 else:
2563 2563 d = None
2564 2564 if os.name == 'nt':
2565 2565 # without this, we get the default temp dir location, but
2566 2566 # in all lowercase, which causes troubles with paths (issue3490)
2567 2567 d = osenvironb.get(b'TMP', None)
2568 2568 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2569 2569
2570 2570 self._hgtmp = osenvironb[b'HGTMP'] = (
2571 2571 os.path.realpath(tmpdir))
2572 2572
2573 2573 if self.options.with_hg:
2574 2574 self._installdir = None
2575 2575 whg = self.options.with_hg
2576 2576 self._bindir = os.path.dirname(os.path.realpath(whg))
2577 2577 assert isinstance(self._bindir, bytes)
2578 2578 self._hgcommand = os.path.basename(whg)
2579 2579 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2580 2580 os.makedirs(self._tmpbindir)
2581 2581
2582 2582 normbin = os.path.normpath(os.path.abspath(whg))
2583 2583 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2584 2584
2585 2585 # Other Python scripts in the test harness need to
2586 2586 # `import mercurial`. If `hg` is a Python script, we assume
2587 2587 # the Mercurial modules are relative to its path and tell the tests
2588 2588 # to load Python modules from its directory.
2589 2589 with open(whg, 'rb') as fh:
2590 2590 initial = fh.read(1024)
2591 2591
2592 2592 if re.match(b'#!.*python', initial):
2593 2593 self._pythondir = self._bindir
2594 2594 # If it looks like our in-repo Rust binary, use the source root.
2595 2595 # This is a bit hacky. But rhg is still not supported outside the
2596 2596 # source directory. So until it is, do the simple thing.
2597 2597 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2598 2598 self._pythondir = os.path.dirname(self._testdir)
2599 2599 # Fall back to the legacy behavior.
2600 2600 else:
2601 2601 self._pythondir = self._bindir
2602 2602
2603 2603 else:
2604 2604 self._installdir = os.path.join(self._hgtmp, b"install")
2605 2605 self._bindir = os.path.join(self._installdir, b"bin")
2606 2606 self._hgcommand = b'hg'
2607 2607 self._tmpbindir = self._bindir
2608 2608 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2609 2609
2610 2610 # set CHGHG, then replace "hg" command by "chg"
2611 2611 chgbindir = self._bindir
2612 2612 if self.options.chg or self.options.with_chg:
2613 2613 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2614 2614 else:
2615 2615 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2616 2616 if self.options.chg:
2617 2617 self._hgcommand = b'chg'
2618 2618 elif self.options.with_chg:
2619 2619 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2620 2620 self._hgcommand = os.path.basename(self.options.with_chg)
2621 2621
2622 2622 osenvironb[b"BINDIR"] = self._bindir
2623 2623 osenvironb[b"PYTHON"] = PYTHON
2624 2624
2625 2625 fileb = _bytespath(__file__)
2626 2626 runtestdir = os.path.abspath(os.path.dirname(fileb))
2627 2627 osenvironb[b'RUNTESTDIR'] = runtestdir
2628 2628 if PYTHON3:
2629 2629 sepb = _bytespath(os.pathsep)
2630 2630 else:
2631 2631 sepb = os.pathsep
2632 2632 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2633 2633 if os.path.islink(__file__):
2634 2634 # test helper will likely be at the end of the symlink
2635 2635 realfile = os.path.realpath(fileb)
2636 2636 realdir = os.path.abspath(os.path.dirname(realfile))
2637 2637 path.insert(2, realdir)
2638 2638 if chgbindir != self._bindir:
2639 2639 path.insert(1, chgbindir)
2640 2640 if self._testdir != runtestdir:
2641 2641 path = [self._testdir] + path
2642 2642 if self._tmpbindir != self._bindir:
2643 2643 path = [self._tmpbindir] + path
2644 2644 osenvironb[b"PATH"] = sepb.join(path)
2645 2645
2646 2646 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2647 2647 # can run .../tests/run-tests.py test-foo where test-foo
2648 2648 # adds an extension to HGRC. Also include run-test.py directory to
2649 2649 # import modules like heredoctest.
2650 2650 pypath = [self._pythondir, self._testdir, runtestdir]
2651 2651 # We have to augment PYTHONPATH, rather than simply replacing
2652 2652 # it, in case external libraries are only available via current
2653 2653 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2654 2654 # are in /opt/subversion.)
2655 2655 oldpypath = osenvironb.get(IMPL_PATH)
2656 2656 if oldpypath:
2657 2657 pypath.append(oldpypath)
2658 2658 osenvironb[IMPL_PATH] = sepb.join(pypath)
2659 2659
2660 2660 if self.options.pure:
2661 2661 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2662 2662 os.environ["HGMODULEPOLICY"] = "py"
2663 2663
2664 2664 if self.options.allow_slow_tests:
2665 2665 os.environ["HGTEST_SLOW"] = "slow"
2666 2666 elif 'HGTEST_SLOW' in os.environ:
2667 2667 del os.environ['HGTEST_SLOW']
2668 2668
2669 2669 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2670 2670
2671 2671 if self.options.exceptions:
2672 2672 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2673 2673 try:
2674 2674 os.makedirs(exceptionsdir)
2675 2675 except OSError as e:
2676 2676 if e.errno != errno.EEXIST:
2677 2677 raise
2678 2678
2679 2679 # Remove all existing exception reports.
2680 2680 for f in os.listdir(exceptionsdir):
2681 2681 os.unlink(os.path.join(exceptionsdir, f))
2682 2682
2683 2683 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2684 2684 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2685 2685 self.options.extra_config_opt.append(
2686 2686 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2687 2687
2688 2688 vlog("# Using TESTDIR", self._testdir)
2689 2689 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2690 2690 vlog("# Using HGTMP", self._hgtmp)
2691 2691 vlog("# Using PATH", os.environ["PATH"])
2692 2692 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2693 2693 vlog("# Writing to directory", self._outputdir)
2694 2694
2695 2695 try:
2696 2696 return self._runtests(testdescs) or 0
2697 2697 finally:
2698 2698 time.sleep(.1)
2699 2699 self._cleanup()
2700 2700
2701 2701 def findtests(self, args):
2702 2702 """Finds possible test files from arguments.
2703 2703
2704 2704 If you wish to inject custom tests into the test harness, this would
2705 2705 be a good function to monkeypatch or override in a derived class.
2706 2706 """
2707 2707 if not args:
2708 2708 if self.options.changed:
2709 2709 proc = Popen4('hg st --rev "%s" -man0 .' %
2710 2710 self.options.changed, None, 0)
2711 2711 stdout, stderr = proc.communicate()
2712 2712 args = stdout.strip(b'\0').split(b'\0')
2713 2713 else:
2714 2714 args = os.listdir(b'.')
2715 2715
2716 2716 expanded_args = []
2717 2717 for arg in args:
2718 2718 if os.path.isdir(arg):
2719 2719 if not arg.endswith(b'/'):
2720 2720 arg += b'/'
2721 2721 expanded_args.extend([arg + a for a in os.listdir(arg)])
2722 2722 else:
2723 2723 expanded_args.append(arg)
2724 2724 args = expanded_args
2725 2725
2726 2726 testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.#]+))')
2727 2727 tests = []
2728 2728 for t in args:
2729 2729 case = []
2730 2730
2731 2731 if not (os.path.basename(t).startswith(b'test-')
2732 2732 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2733 2733
2734 2734 m = testcasepattern.match(t)
2735 2735 if m is not None:
2736 2736 t, _, casestr = m.groups()
2737 2737 if casestr:
2738 2738 case = casestr.split(b'#')
2739 2739 else:
2740 2740 continue
2741 2741
2742 2742 if t.endswith(b'.t'):
2743 2743 # .t file may contain multiple test cases
2744 2744 casedimensions = parsettestcases(t)
2745 2745 if casedimensions:
2746 2746 cases = []
2747 2747 def addcases(case, casedimensions):
2748 2748 if not casedimensions:
2749 2749 cases.append(case)
2750 2750 else:
2751 2751 for c in casedimensions[0]:
2752 2752 addcases(case + [c], casedimensions[1:])
2753 2753 addcases([], casedimensions)
2754 2754 if case and case in cases:
2755 2755 cases = [case]
2756 2756 elif case:
2757 2757 # Ignore invalid cases
2758 2758 cases = []
2759 2759 else:
2760 2760 pass
2761 2761 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2762 2762 else:
2763 2763 tests.append({'path': t})
2764 2764 else:
2765 2765 tests.append({'path': t})
2766 2766 return tests
2767 2767
2768 2768 def _runtests(self, testdescs):
2769 2769 def _reloadtest(test, i):
2770 2770 # convert a test back to its description dict
2771 2771 desc = {'path': test.path}
2772 2772 case = getattr(test, '_case', [])
2773 2773 if case:
2774 2774 desc['case'] = case
2775 2775 return self._gettest(desc, i)
2776 2776
2777 2777 try:
2778 2778 if self.options.restart:
2779 2779 orig = list(testdescs)
2780 2780 while testdescs:
2781 2781 desc = testdescs[0]
2782 2782 # desc['path'] is a relative path
2783 2783 if 'case' in desc:
2784 2784 casestr = b'#'.join(desc['case'])
2785 2785 errpath = b'%s#%s.err' % (desc['path'], casestr)
2786 2786 else:
2787 2787 errpath = b'%s.err' % desc['path']
2788 2788 errpath = os.path.join(self._outputdir, errpath)
2789 2789 if os.path.exists(errpath):
2790 2790 break
2791 2791 testdescs.pop(0)
2792 2792 if not testdescs:
2793 2793 print("running all tests")
2794 2794 testdescs = orig
2795 2795
2796 2796 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2797 2797
2798 jobs = min(len(tests), self.options.jobs)
2799
2798 2800 failed = False
2799 2801 kws = self.options.keywords
2800 2802 if kws is not None and PYTHON3:
2801 2803 kws = kws.encode('utf-8')
2802 2804
2803 2805 suite = TestSuite(self._testdir,
2804 jobs=self.options.jobs,
2806 jobs=jobs,
2805 2807 whitelist=self.options.whitelisted,
2806 2808 blacklist=self.options.blacklist,
2807 2809 retest=self.options.retest,
2808 2810 keywords=kws,
2809 2811 loop=self.options.loop,
2810 2812 runs_per_test=self.options.runs_per_test,
2811 2813 showchannels=self.options.showchannels,
2812 2814 tests=tests, loadtest=_reloadtest)
2813 2815 verbosity = 1
2814 2816 if self.options.list_tests:
2815 2817 verbosity = 0
2816 2818 elif self.options.verbose:
2817 2819 verbosity = 2
2818 2820 runner = TextTestRunner(self, verbosity=verbosity)
2819 2821
2820 2822 if self.options.list_tests:
2821 2823 result = runner.listtests(suite)
2822 2824 else:
2823 2825 if self._installdir:
2824 2826 self._installhg()
2825 2827 self._checkhglib("Testing")
2826 2828 else:
2827 2829 self._usecorrectpython()
2828 2830 if self.options.chg:
2829 2831 assert self._installdir
2830 2832 self._installchg()
2831 2833
2834 log('running %d tests using %d parallel processes' % (
2835 len(tests), jobs))
2836
2832 2837 result = runner.run(suite)
2833 2838
2834 2839 if result.failures:
2835 2840 failed = True
2836 2841
2837 2842 result.onEnd()
2838 2843
2839 2844 if self.options.anycoverage:
2840 2845 self._outputcoverage()
2841 2846 except KeyboardInterrupt:
2842 2847 failed = True
2843 2848 print("\ninterrupted!")
2844 2849
2845 2850 if failed:
2846 2851 return 1
2847 2852
2848 2853 def _getport(self, count):
2849 2854 port = self._ports.get(count) # do we have a cached entry?
2850 2855 if port is None:
2851 2856 portneeded = 3
2852 2857 # above 100 tries we just give up and let test reports failure
2853 2858 for tries in xrange(100):
2854 2859 allfree = True
2855 2860 port = self.options.port + self._portoffset
2856 2861 for idx in xrange(portneeded):
2857 2862 if not checkportisavailable(port + idx):
2858 2863 allfree = False
2859 2864 break
2860 2865 self._portoffset += portneeded
2861 2866 if allfree:
2862 2867 break
2863 2868 self._ports[count] = port
2864 2869 return port
2865 2870
2866 2871 def _gettest(self, testdesc, count):
2867 2872 """Obtain a Test by looking at its filename.
2868 2873
2869 2874 Returns a Test instance. The Test may not be runnable if it doesn't
2870 2875 map to a known type.
2871 2876 """
2872 2877 path = testdesc['path']
2873 2878 lctest = path.lower()
2874 2879 testcls = Test
2875 2880
2876 2881 for ext, cls in self.TESTTYPES:
2877 2882 if lctest.endswith(ext):
2878 2883 testcls = cls
2879 2884 break
2880 2885
2881 2886 refpath = os.path.join(self._testdir, path)
2882 2887 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2883 2888
2884 2889 # extra keyword parameters. 'case' is used by .t tests
2885 2890 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2886 2891
2887 2892 t = testcls(refpath, self._outputdir, tmpdir,
2888 2893 keeptmpdir=self.options.keep_tmpdir,
2889 2894 debug=self.options.debug,
2890 2895 first=self.options.first,
2891 2896 timeout=self.options.timeout,
2892 2897 startport=self._getport(count),
2893 2898 extraconfigopts=self.options.extra_config_opt,
2894 2899 py3kwarnings=self.options.py3k_warnings,
2895 2900 shell=self.options.shell,
2896 2901 hgcommand=self._hgcommand,
2897 2902 usechg=bool(self.options.with_chg or self.options.chg),
2898 2903 useipv6=useipv6, **kwds)
2899 2904 t.should_reload = True
2900 2905 return t
2901 2906
2902 2907 def _cleanup(self):
2903 2908 """Clean up state from this test invocation."""
2904 2909 if self.options.keep_tmpdir:
2905 2910 return
2906 2911
2907 2912 vlog("# Cleaning up HGTMP", self._hgtmp)
2908 2913 shutil.rmtree(self._hgtmp, True)
2909 2914 for f in self._createdfiles:
2910 2915 try:
2911 2916 os.remove(f)
2912 2917 except OSError:
2913 2918 pass
2914 2919
2915 2920 def _usecorrectpython(self):
2916 2921 """Configure the environment to use the appropriate Python in tests."""
2917 2922 # Tests must use the same interpreter as us or bad things will happen.
2918 2923 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2919 2924
2920 2925 # os.symlink() is a thing with py3 on Windows, but it requires
2921 2926 # Administrator rights.
2922 2927 if getattr(os, 'symlink', None) and os.name != 'nt':
2923 2928 vlog("# Making python executable in test path a symlink to '%s'" %
2924 2929 sys.executable)
2925 2930 mypython = os.path.join(self._tmpbindir, pyexename)
2926 2931 try:
2927 2932 if os.readlink(mypython) == sys.executable:
2928 2933 return
2929 2934 os.unlink(mypython)
2930 2935 except OSError as err:
2931 2936 if err.errno != errno.ENOENT:
2932 2937 raise
2933 2938 if self._findprogram(pyexename) != sys.executable:
2934 2939 try:
2935 2940 os.symlink(sys.executable, mypython)
2936 2941 self._createdfiles.append(mypython)
2937 2942 except OSError as err:
2938 2943 # child processes may race, which is harmless
2939 2944 if err.errno != errno.EEXIST:
2940 2945 raise
2941 2946 else:
2942 2947 exedir, exename = os.path.split(sys.executable)
2943 2948 vlog("# Modifying search path to find %s as %s in '%s'" %
2944 2949 (exename, pyexename, exedir))
2945 2950 path = os.environ['PATH'].split(os.pathsep)
2946 2951 while exedir in path:
2947 2952 path.remove(exedir)
2948 2953 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2949 2954 if not self._findprogram(pyexename):
2950 2955 print("WARNING: Cannot find %s in search path" % pyexename)
2951 2956
2952 2957 def _installhg(self):
2953 2958 """Install hg into the test environment.
2954 2959
2955 2960 This will also configure hg with the appropriate testing settings.
2956 2961 """
2957 2962 vlog("# Performing temporary installation of HG")
2958 2963 installerrs = os.path.join(self._hgtmp, b"install.err")
2959 2964 compiler = ''
2960 2965 if self.options.compiler:
2961 2966 compiler = '--compiler ' + self.options.compiler
2962 2967 if self.options.pure:
2963 2968 pure = b"--pure"
2964 2969 else:
2965 2970 pure = b""
2966 2971
2967 2972 # Run installer in hg root
2968 2973 script = os.path.realpath(sys.argv[0])
2969 2974 exe = sys.executable
2970 2975 if PYTHON3:
2971 2976 compiler = _bytespath(compiler)
2972 2977 script = _bytespath(script)
2973 2978 exe = _bytespath(exe)
2974 2979 hgroot = os.path.dirname(os.path.dirname(script))
2975 2980 self._hgroot = hgroot
2976 2981 os.chdir(hgroot)
2977 2982 nohome = b'--home=""'
2978 2983 if os.name == 'nt':
2979 2984 # The --home="" trick works only on OS where os.sep == '/'
2980 2985 # because of a distutils convert_path() fast-path. Avoid it at
2981 2986 # least on Windows for now, deal with .pydistutils.cfg bugs
2982 2987 # when they happen.
2983 2988 nohome = b''
2984 2989 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2985 2990 b' build %(compiler)s --build-base="%(base)s"'
2986 2991 b' install --force --prefix="%(prefix)s"'
2987 2992 b' --install-lib="%(libdir)s"'
2988 2993 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2989 2994 % {b'exe': exe, b'pure': pure,
2990 2995 b'compiler': compiler,
2991 2996 b'base': os.path.join(self._hgtmp, b"build"),
2992 2997 b'prefix': self._installdir, b'libdir': self._pythondir,
2993 2998 b'bindir': self._bindir,
2994 2999 b'nohome': nohome, b'logfile': installerrs})
2995 3000
2996 3001 # setuptools requires install directories to exist.
2997 3002 def makedirs(p):
2998 3003 try:
2999 3004 os.makedirs(p)
3000 3005 except OSError as e:
3001 3006 if e.errno != errno.EEXIST:
3002 3007 raise
3003 3008 makedirs(self._pythondir)
3004 3009 makedirs(self._bindir)
3005 3010
3006 3011 vlog("# Running", cmd)
3007 3012 if os.system(_strpath(cmd)) == 0:
3008 3013 if not self.options.verbose:
3009 3014 try:
3010 3015 os.remove(installerrs)
3011 3016 except OSError as e:
3012 3017 if e.errno != errno.ENOENT:
3013 3018 raise
3014 3019 else:
3015 3020 with open(installerrs, 'rb') as f:
3016 3021 for line in f:
3017 3022 if PYTHON3:
3018 3023 sys.stdout.buffer.write(line)
3019 3024 else:
3020 3025 sys.stdout.write(line)
3021 3026 sys.exit(1)
3022 3027 os.chdir(self._testdir)
3023 3028
3024 3029 self._usecorrectpython()
3025 3030
3026 3031 if self.options.py3k_warnings and not self.options.anycoverage:
3027 3032 vlog("# Updating hg command to enable Py3k Warnings switch")
3028 3033 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
3029 3034 lines = [line.rstrip() for line in f]
3030 3035 lines[0] += ' -3'
3031 3036 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
3032 3037 for line in lines:
3033 3038 f.write(line + '\n')
3034 3039
3035 3040 hgbat = os.path.join(self._bindir, b'hg.bat')
3036 3041 if os.path.isfile(hgbat):
3037 3042 # hg.bat expects to be put in bin/scripts while run-tests.py
3038 3043 # installation layout put it in bin/ directly. Fix it
3039 3044 with open(hgbat, 'rb') as f:
3040 3045 data = f.read()
3041 3046 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
3042 3047 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
3043 3048 b'"%~dp0python" "%~dp0hg" %*')
3044 3049 with open(hgbat, 'wb') as f:
3045 3050 f.write(data)
3046 3051 else:
3047 3052 print('WARNING: cannot fix hg.bat reference to python.exe')
3048 3053
3049 3054 if self.options.anycoverage:
3050 3055 custom = os.path.join(self._testdir, 'sitecustomize.py')
3051 3056 target = os.path.join(self._pythondir, 'sitecustomize.py')
3052 3057 vlog('# Installing coverage trigger to %s' % target)
3053 3058 shutil.copyfile(custom, target)
3054 3059 rc = os.path.join(self._testdir, '.coveragerc')
3055 3060 vlog('# Installing coverage rc to %s' % rc)
3056 3061 os.environ['COVERAGE_PROCESS_START'] = rc
3057 3062 covdir = os.path.join(self._installdir, '..', 'coverage')
3058 3063 try:
3059 3064 os.mkdir(covdir)
3060 3065 except OSError as e:
3061 3066 if e.errno != errno.EEXIST:
3062 3067 raise
3063 3068
3064 3069 os.environ['COVERAGE_DIR'] = covdir
3065 3070
3066 3071 def _checkhglib(self, verb):
3067 3072 """Ensure that the 'mercurial' package imported by python is
3068 3073 the one we expect it to be. If not, print a warning to stderr."""
3069 3074 if ((self._bindir == self._pythondir) and
3070 3075 (self._bindir != self._tmpbindir)):
3071 3076 # The pythondir has been inferred from --with-hg flag.
3072 3077 # We cannot expect anything sensible here.
3073 3078 return
3074 3079 expecthg = os.path.join(self._pythondir, b'mercurial')
3075 3080 actualhg = self._gethgpath()
3076 3081 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3077 3082 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
3078 3083 ' (expected %s)\n'
3079 3084 % (verb, actualhg, expecthg))
3080 3085 def _gethgpath(self):
3081 3086 """Return the path to the mercurial package that is actually found by
3082 3087 the current Python interpreter."""
3083 3088 if self._hgpath is not None:
3084 3089 return self._hgpath
3085 3090
3086 3091 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
3087 3092 cmd = cmd % PYTHON
3088 3093 if PYTHON3:
3089 3094 cmd = _strpath(cmd)
3090 3095 pipe = os.popen(cmd)
3091 3096 try:
3092 3097 self._hgpath = _bytespath(pipe.read().strip())
3093 3098 finally:
3094 3099 pipe.close()
3095 3100
3096 3101 return self._hgpath
3097 3102
3098 3103 def _installchg(self):
3099 3104 """Install chg into the test environment"""
3100 3105 vlog('# Performing temporary installation of CHG')
3101 3106 assert os.path.dirname(self._bindir) == self._installdir
3102 3107 assert self._hgroot, 'must be called after _installhg()'
3103 3108 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
3104 3109 % {b'make': 'make', # TODO: switch by option or environment?
3105 3110 b'prefix': self._installdir})
3106 3111 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3107 3112 vlog("# Running", cmd)
3108 3113 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
3109 3114 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
3110 3115 stderr=subprocess.STDOUT)
3111 3116 out, _err = proc.communicate()
3112 3117 if proc.returncode != 0:
3113 3118 if PYTHON3:
3114 3119 sys.stdout.buffer.write(out)
3115 3120 else:
3116 3121 sys.stdout.write(out)
3117 3122 sys.exit(1)
3118 3123
3119 3124 def _outputcoverage(self):
3120 3125 """Produce code coverage output."""
3121 3126 import coverage
3122 3127 coverage = coverage.coverage
3123 3128
3124 3129 vlog('# Producing coverage report')
3125 3130 # chdir is the easiest way to get short, relative paths in the
3126 3131 # output.
3127 3132 os.chdir(self._hgroot)
3128 3133 covdir = os.path.join(self._installdir, '..', 'coverage')
3129 3134 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3130 3135
3131 3136 # Map install directory paths back to source directory.
3132 3137 cov.config.paths['srcdir'] = ['.', self._pythondir]
3133 3138
3134 3139 cov.combine()
3135 3140
3136 3141 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
3137 3142 cov.report(ignore_errors=True, omit=omit)
3138 3143
3139 3144 if self.options.htmlcov:
3140 3145 htmldir = os.path.join(self._outputdir, 'htmlcov')
3141 3146 cov.html_report(directory=htmldir, omit=omit)
3142 3147 if self.options.annotate:
3143 3148 adir = os.path.join(self._outputdir, 'annotated')
3144 3149 if not os.path.isdir(adir):
3145 3150 os.mkdir(adir)
3146 3151 cov.annotate(directory=adir, omit=omit)
3147 3152
3148 3153 def _findprogram(self, program):
3149 3154 """Search PATH for a executable program"""
3150 3155 dpb = _bytespath(os.defpath)
3151 3156 sepb = _bytespath(os.pathsep)
3152 3157 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3153 3158 name = os.path.join(p, program)
3154 3159 if os.name == 'nt' or os.access(name, os.X_OK):
3155 3160 return name
3156 3161 return None
3157 3162
3158 3163 def _checktools(self):
3159 3164 """Ensure tools required to run tests are present."""
3160 3165 for p in self.REQUIREDTOOLS:
3161 3166 if os.name == 'nt' and not p.endswith(b'.exe'):
3162 3167 p += b'.exe'
3163 3168 found = self._findprogram(p)
3164 3169 if found:
3165 3170 vlog("# Found prerequisite", p, "at", found)
3166 3171 else:
3167 3172 print("WARNING: Did not find prerequisite tool: %s " %
3168 3173 p.decode("utf-8"))
3169 3174
3170 3175 def aggregateexceptions(path):
3171 3176 exceptioncounts = collections.Counter()
3172 3177 testsbyfailure = collections.defaultdict(set)
3173 3178 failuresbytest = collections.defaultdict(set)
3174 3179
3175 3180 for f in os.listdir(path):
3176 3181 with open(os.path.join(path, f), 'rb') as fh:
3177 3182 data = fh.read().split(b'\0')
3178 3183 if len(data) != 5:
3179 3184 continue
3180 3185
3181 3186 exc, mainframe, hgframe, hgline, testname = data
3182 3187 exc = exc.decode('utf-8')
3183 3188 mainframe = mainframe.decode('utf-8')
3184 3189 hgframe = hgframe.decode('utf-8')
3185 3190 hgline = hgline.decode('utf-8')
3186 3191 testname = testname.decode('utf-8')
3187 3192
3188 3193 key = (hgframe, hgline, exc)
3189 3194 exceptioncounts[key] += 1
3190 3195 testsbyfailure[key].add(testname)
3191 3196 failuresbytest[testname].add(key)
3192 3197
3193 3198 # Find test having fewest failures for each failure.
3194 3199 leastfailing = {}
3195 3200 for key, tests in testsbyfailure.items():
3196 3201 fewesttest = None
3197 3202 fewestcount = 99999999
3198 3203 for test in sorted(tests):
3199 3204 if len(failuresbytest[test]) < fewestcount:
3200 3205 fewesttest = test
3201 3206 fewestcount = len(failuresbytest[test])
3202 3207
3203 3208 leastfailing[key] = (fewestcount, fewesttest)
3204 3209
3205 3210 # Create a combined counter so we can sort by total occurrences and
3206 3211 # impacted tests.
3207 3212 combined = {}
3208 3213 for key in exceptioncounts:
3209 3214 combined[key] = (exceptioncounts[key],
3210 3215 len(testsbyfailure[key]),
3211 3216 leastfailing[key][0],
3212 3217 leastfailing[key][1])
3213 3218
3214 3219 return {
3215 3220 'exceptioncounts': exceptioncounts,
3216 3221 'total': sum(exceptioncounts.values()),
3217 3222 'combined': combined,
3218 3223 'leastfailing': leastfailing,
3219 3224 'byfailure': testsbyfailure,
3220 3225 'bytest': failuresbytest,
3221 3226 }
3222 3227
3223 3228 if __name__ == '__main__':
3224 3229 runner = TestRunner()
3225 3230
3226 3231 try:
3227 3232 import msvcrt
3228 3233 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3229 3234 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3230 3235 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3231 3236 except ImportError:
3232 3237 pass
3233 3238
3234 3239 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1857 +1,1928 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
10 running 0 tests using 0 parallel processes
10 11
11 12 # Ran 0 tests, 0 skipped, 0 failed.
12 13
13 14 Define a helper to avoid the install step
14 15 =============
15 16 $ rt()
16 17 > {
17 18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` "$@"
18 19 > }
19 20
20 21 error paths
21 22
22 23 #if symlink
23 24 $ ln -s `which true` hg
24 25 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
25 26 warning: --with-hg should specify an hg script
27 running 0 tests using 0 parallel processes
26 28
27 29 # Ran 0 tests, 0 skipped, 0 failed.
28 30 $ rm hg
29 31 #endif
30 32
31 33 #if execbit
32 34 $ touch hg
33 35 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
34 36 usage: run-tests.py [options] [tests]
35 37 run-tests.py: error: --with-hg must specify an executable hg script
36 38 [2]
37 39 $ rm hg
38 40 #endif
39 41
40 42 Features for testing optional lines
41 43 ===================================
42 44
43 45 $ cat > hghaveaddon.py <<EOF
44 46 > import hghave
45 47 > @hghave.check("custom", "custom hghave feature")
46 48 > def has_custom():
47 49 > return True
48 50 > @hghave.check("missing", "missing hghave feature")
49 51 > def has_missing():
50 52 > return False
51 53 > EOF
52 54
53 55 an empty test
54 56 =======================
55 57
56 58 $ touch test-empty.t
57 59 $ rt
60 running 1 tests using 1 parallel processes
58 61 .
59 62 # Ran 1 tests, 0 skipped, 0 failed.
60 63 $ rm test-empty.t
61 64
62 65 a succesful test
63 66 =======================
64 67
65 68 $ cat > test-success.t << EOF
66 69 > $ echo babar
67 70 > babar
68 71 > $ echo xyzzy
69 72 > dont_print (?)
70 73 > nothing[42]line (re) (?)
71 74 > never*happens (glob) (?)
72 75 > more_nothing (?)
73 76 > xyzzy
74 77 > nor this (?)
75 78 > $ printf 'abc\ndef\nxyz\n'
76 79 > 123 (?)
77 80 > abc
78 81 > def (?)
79 82 > 456 (?)
80 83 > xyz
81 84 > $ printf 'zyx\nwvu\ntsr\n'
82 85 > abc (?)
83 86 > zyx (custom !)
84 87 > wvu
85 88 > no_print (no-custom !)
86 89 > tsr (no-missing !)
87 90 > missing (missing !)
88 91 > EOF
89 92
90 93 $ rt
94 running 1 tests using 1 parallel processes
91 95 .
92 96 # Ran 1 tests, 0 skipped, 0 failed.
93 97
94 98 failing test
95 99 ==================
96 100
97 101 test churn with globs
98 102 $ cat > test-failure.t <<EOF
99 103 > $ echo "bar-baz"; echo "bar-bad"; echo foo
100 104 > bar*bad (glob)
101 105 > bar*baz (glob)
102 106 > | fo (re)
103 107 > EOF
104 108 $ rt test-failure.t
109 running 1 tests using 1 parallel processes
105 110
106 111 --- $TESTTMP/test-failure.t
107 112 +++ $TESTTMP/test-failure.t.err
108 113 @@ -1,4 +1,4 @@
109 114 $ echo "bar-baz"; echo "bar-bad"; echo foo
110 115 + bar*baz (glob)
111 116 bar*bad (glob)
112 117 - bar*baz (glob)
113 118 - | fo (re)
114 119 + foo
115 120
116 121 ERROR: test-failure.t output changed
117 122 !
118 123 Failed test-failure.t: output changed
119 124 # Ran 1 tests, 0 skipped, 1 failed.
120 125 python hash seed: * (glob)
121 126 [1]
122 127
123 128 test how multiple globs gets matched with lines in output
124 129 $ cat > test-failure-globs.t <<EOF
125 130 > $ echo "context"; echo "context"; \
126 131 > echo "key: 1"; echo "value: not a"; \
127 132 > echo "key: 2"; echo "value: not b"; \
128 133 > echo "key: 3"; echo "value: c"; \
129 134 > echo "key: 4"; echo "value: d"
130 135 > context
131 136 > context
132 137 > key: 1
133 138 > value: a
134 139 > key: 2
135 140 > value: b
136 141 > key: 3
137 142 > value: * (glob)
138 143 > key: 4
139 144 > value: * (glob)
140 145 > EOF
141 146 $ rt test-failure-globs.t
147 running 1 tests using 1 parallel processes
142 148
143 149 --- $TESTTMP/test-failure-globs.t
144 150 +++ $TESTTMP/test-failure-globs.t.err
145 151 @@ -2,9 +2,9 @@
146 152 context
147 153 context
148 154 key: 1
149 155 - value: a
150 156 + value: not a
151 157 key: 2
152 158 - value: b
153 159 + value: not b
154 160 key: 3
155 161 value: * (glob)
156 162 key: 4
157 163
158 164 ERROR: test-failure-globs.t output changed
159 165 !
160 166 Failed test-failure-globs.t: output changed
161 167 # Ran 1 tests, 0 skipped, 1 failed.
162 168 python hash seed: * (glob)
163 169 [1]
164 170 $ rm test-failure-globs.t
165 171
166 172 test diff colorisation
167 173
168 174 #if no-windows pygments
169 175 $ rt test-failure.t --color always
170 176
171 177 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
172 178 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
173 179 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
174 180 $ echo "bar-baz"; echo "bar-bad"; echo foo
175 181 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
176 182 bar*bad (glob)
177 183 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
178 184 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
179 185 \x1b[38;5;34m+ foo\x1b[39m (esc)
180 186
181 187 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
182 188 !
183 189 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
184 190 # Ran 1 tests, 0 skipped, 1 failed.
185 191 python hash seed: * (glob)
186 192 [1]
187 193
188 194 $ rt test-failure.t 2> tmp.log
189 195 [1]
190 196 $ cat tmp.log
191 197
192 198 --- $TESTTMP/test-failure.t
193 199 +++ $TESTTMP/test-failure.t.err
194 200 @@ -1,4 +1,4 @@
195 201 $ echo "bar-baz"; echo "bar-bad"; echo foo
196 202 + bar*baz (glob)
197 203 bar*bad (glob)
198 204 - bar*baz (glob)
199 205 - | fo (re)
200 206 + foo
201 207
202 208 ERROR: test-failure.t output changed
203 209 !
204 210 Failed test-failure.t: output changed
205 211 # Ran 1 tests, 0 skipped, 1 failed.
206 212 python hash seed: * (glob)
207 213 #endif
208 214
209 215 $ cat > test-failure.t << EOF
210 216 > $ true
211 217 > should go away (true !)
212 218 > $ true
213 219 > should stay (false !)
214 220 >
215 221 > Should remove first line, not second or third
216 222 > $ echo 'testing'
217 223 > baz*foo (glob) (true !)
218 224 > foobar*foo (glob) (false !)
219 225 > te*ting (glob) (true !)
220 226 >
221 227 > Should keep first two lines, remove third and last
222 228 > $ echo 'testing'
223 229 > test.ng (re) (true !)
224 230 > foo.ar (re) (false !)
225 231 > b.r (re) (true !)
226 232 > missing (?)
227 233 > awol (true !)
228 234 >
229 235 > The "missing" line should stay, even though awol is dropped
230 236 > $ echo 'testing'
231 237 > test.ng (re) (true !)
232 238 > foo.ar (?)
233 239 > awol
234 240 > missing (?)
235 241 > EOF
236 242 $ rt test-failure.t
243 running 1 tests using 1 parallel processes
237 244
238 245 --- $TESTTMP/test-failure.t
239 246 +++ $TESTTMP/test-failure.t.err
240 247 @@ -1,11 +1,9 @@
241 248 $ true
242 249 - should go away (true !)
243 250 $ true
244 251 should stay (false !)
245 252
246 253 Should remove first line, not second or third
247 254 $ echo 'testing'
248 255 - baz*foo (glob) (true !)
249 256 foobar*foo (glob) (false !)
250 257 te*ting (glob) (true !)
251 258
252 259 foo.ar (re) (false !)
253 260 missing (?)
254 261 @@ -13,13 +11,10 @@
255 262 $ echo 'testing'
256 263 test.ng (re) (true !)
257 264 foo.ar (re) (false !)
258 265 - b.r (re) (true !)
259 266 missing (?)
260 267 - awol (true !)
261 268
262 269 The "missing" line should stay, even though awol is dropped
263 270 $ echo 'testing'
264 271 test.ng (re) (true !)
265 272 foo.ar (?)
266 273 - awol
267 274 missing (?)
268 275
269 276 ERROR: test-failure.t output changed
270 277 !
271 278 Failed test-failure.t: output changed
272 279 # Ran 1 tests, 0 skipped, 1 failed.
273 280 python hash seed: * (glob)
274 281 [1]
275 282
276 283 basic failing test
277 284 $ cat > test-failure.t << EOF
278 285 > $ echo babar
279 286 > rataxes
280 287 > This is a noop statement so that
281 288 > this test is still more bytes than success.
282 289 > pad pad pad pad............................................................
283 290 > pad pad pad pad............................................................
284 291 > pad pad pad pad............................................................
285 292 > pad pad pad pad............................................................
286 293 > pad pad pad pad............................................................
287 294 > pad pad pad pad............................................................
288 295 > EOF
289 296
290 297 >>> fh = open('test-failure-unicode.t', 'wb')
291 298 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
292 299 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
293 300
294 301 $ rt
302 running 3 tests using 1 parallel processes
295 303
296 304 --- $TESTTMP/test-failure.t
297 305 +++ $TESTTMP/test-failure.t.err
298 306 @@ -1,5 +1,5 @@
299 307 $ echo babar
300 308 - rataxes
301 309 + babar
302 310 This is a noop statement so that
303 311 this test is still more bytes than success.
304 312 pad pad pad pad............................................................
305 313
306 314 ERROR: test-failure.t output changed
307 315 !.
308 316 --- $TESTTMP/test-failure-unicode.t
309 317 +++ $TESTTMP/test-failure-unicode.t.err
310 318 @@ -1,2 +1,2 @@
311 319 $ echo babar\xce\xb1 (esc)
312 320 - l\xce\xb5\xce\xb5t (esc)
313 321 + babar\xce\xb1 (esc)
314 322
315 323 ERROR: test-failure-unicode.t output changed
316 324 !
317 325 Failed test-failure.t: output changed
318 326 Failed test-failure-unicode.t: output changed
319 327 # Ran 3 tests, 0 skipped, 2 failed.
320 328 python hash seed: * (glob)
321 329 [1]
322 330
323 331 test --outputdir
324 332 $ mkdir output
325 333 $ rt --outputdir output
334 running 3 tests using 1 parallel processes
326 335
327 336 --- $TESTTMP/test-failure.t
328 337 +++ $TESTTMP/output/test-failure.t.err
329 338 @@ -1,5 +1,5 @@
330 339 $ echo babar
331 340 - rataxes
332 341 + babar
333 342 This is a noop statement so that
334 343 this test is still more bytes than success.
335 344 pad pad pad pad............................................................
336 345
337 346 ERROR: test-failure.t output changed
338 347 !.
339 348 --- $TESTTMP/test-failure-unicode.t
340 349 +++ $TESTTMP/output/test-failure-unicode.t.err
341 350 @@ -1,2 +1,2 @@
342 351 $ echo babar\xce\xb1 (esc)
343 352 - l\xce\xb5\xce\xb5t (esc)
344 353 + babar\xce\xb1 (esc)
345 354
346 355 ERROR: test-failure-unicode.t output changed
347 356 !
348 357 Failed test-failure.t: output changed
349 358 Failed test-failure-unicode.t: output changed
350 359 # Ran 3 tests, 0 skipped, 2 failed.
351 360 python hash seed: * (glob)
352 361 [1]
353 362 $ ls -a output
354 363 .
355 364 ..
356 365 .testtimes
357 366 test-failure-unicode.t.err
358 367 test-failure.t.err
359 368
360 369 test --xunit support
361 370 $ rt --xunit=xunit.xml
371 running 3 tests using 1 parallel processes
362 372
363 373 --- $TESTTMP/test-failure.t
364 374 +++ $TESTTMP/test-failure.t.err
365 375 @@ -1,5 +1,5 @@
366 376 $ echo babar
367 377 - rataxes
368 378 + babar
369 379 This is a noop statement so that
370 380 this test is still more bytes than success.
371 381 pad pad pad pad............................................................
372 382
373 383 ERROR: test-failure.t output changed
374 384 !.
375 385 --- $TESTTMP/test-failure-unicode.t
376 386 +++ $TESTTMP/test-failure-unicode.t.err
377 387 @@ -1,2 +1,2 @@
378 388 $ echo babar\xce\xb1 (esc)
379 389 - l\xce\xb5\xce\xb5t (esc)
380 390 + babar\xce\xb1 (esc)
381 391
382 392 ERROR: test-failure-unicode.t output changed
383 393 !
384 394 Failed test-failure.t: output changed
385 395 Failed test-failure-unicode.t: output changed
386 396 # Ran 3 tests, 0 skipped, 2 failed.
387 397 python hash seed: * (glob)
388 398 [1]
389 399 $ cat xunit.xml
390 400 <?xml version="1.0" encoding="utf-8"?>
391 401 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
392 402 <testcase name="test-success.t" time="*"/> (glob)
393 403 <testcase name="test-failure-unicode.t" time="*"> (glob)
394 404 <failure message="output changed" type="output-mismatch">
395 405 <![CDATA[--- $TESTTMP/test-failure-unicode.t
396 406 +++ $TESTTMP/test-failure-unicode.t.err
397 407 @@ -1,2 +1,2 @@
398 408 $ echo babar\xce\xb1 (esc)
399 409 - l\xce\xb5\xce\xb5t (esc)
400 410 + babar\xce\xb1 (esc)
401 411 ]]> </failure>
402 412 </testcase>
403 413 <testcase name="test-failure.t" time="*"> (glob)
404 414 <failure message="output changed" type="output-mismatch">
405 415 <![CDATA[--- $TESTTMP/test-failure.t
406 416 +++ $TESTTMP/test-failure.t.err
407 417 @@ -1,5 +1,5 @@
408 418 $ echo babar
409 419 - rataxes
410 420 + babar
411 421 This is a noop statement so that
412 422 this test is still more bytes than success.
413 423 pad pad pad pad............................................................
414 424 ]]> </failure>
415 425 </testcase>
416 426 </testsuite>
417 427
418 428 $ cat .testtimes
419 429 test-empty.t * (glob)
420 430 test-failure-globs.t * (glob)
421 431 test-failure-unicode.t * (glob)
422 432 test-failure.t * (glob)
423 433 test-success.t * (glob)
424 434
425 435 $ rt --list-tests
426 436 test-failure-unicode.t
427 437 test-failure.t
428 438 test-success.t
429 439
430 440 $ rt --list-tests --json
431 441 test-failure-unicode.t
432 442 test-failure.t
433 443 test-success.t
434 444 $ cat report.json
435 445 testreport ={
436 446 "test-failure-unicode.t": {
437 447 "result": "success"
438 448 },
439 449 "test-failure.t": {
440 450 "result": "success"
441 451 },
442 452 "test-success.t": {
443 453 "result": "success"
444 454 }
445 455 } (no-eol)
446 456
447 457 $ rt --list-tests --xunit=xunit.xml
448 458 test-failure-unicode.t
449 459 test-failure.t
450 460 test-success.t
451 461 $ cat xunit.xml
452 462 <?xml version="1.0" encoding="utf-8"?>
453 463 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
454 464 <testcase name="test-failure-unicode.t"/>
455 465 <testcase name="test-failure.t"/>
456 466 <testcase name="test-success.t"/>
457 467 </testsuite>
458 468
459 469 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
460 470 test-failure-unicode.t
461 471 test-failure.t
462 472 $ cat output/report.json
463 473 testreport ={
464 474 "test-failure-unicode.t": {
465 475 "result": "success"
466 476 },
467 477 "test-failure.t": {
468 478 "result": "success"
469 479 }
470 480 } (no-eol)
471 481 $ cat xunit.xml
472 482 <?xml version="1.0" encoding="utf-8"?>
473 483 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
474 484 <testcase name="test-failure-unicode.t"/>
475 485 <testcase name="test-failure.t"/>
476 486 </testsuite>
477 487
478 488 $ rm test-failure-unicode.t
479 489
480 490 test for --retest
481 491 ====================
482 492
483 493 $ rt --retest
494 running 2 tests using 1 parallel processes
484 495
485 496 --- $TESTTMP/test-failure.t
486 497 +++ $TESTTMP/test-failure.t.err
487 498 @@ -1,5 +1,5 @@
488 499 $ echo babar
489 500 - rataxes
490 501 + babar
491 502 This is a noop statement so that
492 503 this test is still more bytes than success.
493 504 pad pad pad pad............................................................
494 505
495 506 ERROR: test-failure.t output changed
496 507 !
497 508 Failed test-failure.t: output changed
498 509 # Ran 2 tests, 1 skipped, 1 failed.
499 510 python hash seed: * (glob)
500 511 [1]
501 512
502 513 --retest works with --outputdir
503 514 $ rm -r output
504 515 $ mkdir output
505 516 $ mv test-failure.t.err output
506 517 $ rt --retest --outputdir output
518 running 2 tests using 1 parallel processes
507 519
508 520 --- $TESTTMP/test-failure.t
509 521 +++ $TESTTMP/output/test-failure.t.err
510 522 @@ -1,5 +1,5 @@
511 523 $ echo babar
512 524 - rataxes
513 525 + babar
514 526 This is a noop statement so that
515 527 this test is still more bytes than success.
516 528 pad pad pad pad............................................................
517 529
518 530 ERROR: test-failure.t output changed
519 531 !
520 532 Failed test-failure.t: output changed
521 533 # Ran 2 tests, 1 skipped, 1 failed.
522 534 python hash seed: * (glob)
523 535 [1]
524 536
525 537 Selecting Tests To Run
526 538 ======================
527 539
528 540 successful
529 541
530 542 $ rt test-success.t
543 running 1 tests using 1 parallel processes
531 544 .
532 545 # Ran 1 tests, 0 skipped, 0 failed.
533 546
534 547 success w/ keyword
535 548 $ rt -k xyzzy
549 running 2 tests using 1 parallel processes
536 550 .
537 551 # Ran 2 tests, 1 skipped, 0 failed.
538 552
539 553 failed
540 554
541 555 $ rt test-failure.t
556 running 1 tests using 1 parallel processes
542 557
543 558 --- $TESTTMP/test-failure.t
544 559 +++ $TESTTMP/test-failure.t.err
545 560 @@ -1,5 +1,5 @@
546 561 $ echo babar
547 562 - rataxes
548 563 + babar
549 564 This is a noop statement so that
550 565 this test is still more bytes than success.
551 566 pad pad pad pad............................................................
552 567
553 568 ERROR: test-failure.t output changed
554 569 !
555 570 Failed test-failure.t: output changed
556 571 # Ran 1 tests, 0 skipped, 1 failed.
557 572 python hash seed: * (glob)
558 573 [1]
559 574
560 575 failure w/ keyword
561 576 $ rt -k rataxes
577 running 2 tests using 1 parallel processes
562 578
563 579 --- $TESTTMP/test-failure.t
564 580 +++ $TESTTMP/test-failure.t.err
565 581 @@ -1,5 +1,5 @@
566 582 $ echo babar
567 583 - rataxes
568 584 + babar
569 585 This is a noop statement so that
570 586 this test is still more bytes than success.
571 587 pad pad pad pad............................................................
572 588
573 589 ERROR: test-failure.t output changed
574 590 !
575 591 Failed test-failure.t: output changed
576 592 # Ran 2 tests, 1 skipped, 1 failed.
577 593 python hash seed: * (glob)
578 594 [1]
579 595
580 596 Verify that when a process fails to start we show a useful message
581 597 ==================================================================
582 598
583 599 $ cat > test-serve-fail.t <<EOF
584 600 > $ echo 'abort: child process failed to start blah'
585 601 > EOF
586 602 $ rt test-serve-fail.t
603 running 1 tests using 1 parallel processes
587 604
588 605 --- $TESTTMP/test-serve-fail.t
589 606 +++ $TESTTMP/test-serve-fail.t.err
590 607 @@ -1* +1,2 @@ (glob)
591 608 $ echo 'abort: child process failed to start blah'
592 609 + abort: child process failed to start blah
593 610
594 611 ERROR: test-serve-fail.t output changed
595 612 !
596 613 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
597 614 # Ran 1 tests, 0 skipped, 1 failed.
598 615 python hash seed: * (glob)
599 616 [1]
600 617 $ rm test-serve-fail.t
601 618
602 619 Verify that we can try other ports
603 620 ===================================
604 621
605 622 Extensions aren't inherited by the invoked run-tests.py. An extension
606 623 introducing a repository requirement could cause this to fail. So we force
607 624 HGRCPATH to get a clean environment.
608 625
609 626 $ HGRCPATH= hg init inuse
610 627 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
611 628 $ cat blocks.pid >> $DAEMON_PIDS
612 629 $ cat > test-serve-inuse.t <<EOF
613 630 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
614 631 > $ cat hg.pid >> \$DAEMON_PIDS
615 632 > EOF
616 633 $ rt test-serve-inuse.t
634 running 1 tests using 1 parallel processes
617 635 .
618 636 # Ran 1 tests, 0 skipped, 0 failed.
619 637 $ rm test-serve-inuse.t
620 638 $ killdaemons.py $DAEMON_PIDS
621 639
622 640 Running In Debug Mode
623 641 ======================
624 642
625 643 $ rt --debug 2>&1 | grep -v pwd
644 running 2 tests using 1 parallel processes
626 645 + echo *SALT* 0 0 (glob)
627 646 *SALT* 0 0 (glob)
628 647 + echo babar
629 648 babar
630 649 + echo *SALT* 10 0 (glob)
631 650 *SALT* 10 0 (glob)
632 651 *+ echo *SALT* 0 0 (glob)
633 652 *SALT* 0 0 (glob)
634 653 + echo babar
635 654 babar
636 655 + echo *SALT* 2 0 (glob)
637 656 *SALT* 2 0 (glob)
638 657 + echo xyzzy
639 658 xyzzy
640 659 + echo *SALT* 9 0 (glob)
641 660 *SALT* 9 0 (glob)
642 661 + printf *abc\ndef\nxyz\n* (glob)
643 662 abc
644 663 def
645 664 xyz
646 665 + echo *SALT* 15 0 (glob)
647 666 *SALT* 15 0 (glob)
648 667 + printf *zyx\nwvu\ntsr\n* (glob)
649 668 zyx
650 669 wvu
651 670 tsr
652 671 + echo *SALT* 22 0 (glob)
653 672 *SALT* 22 0 (glob)
654 673 .
655 674 # Ran 2 tests, 0 skipped, 0 failed.
656 675
657 676 Parallel runs
658 677 ==============
659 678
660 679 (duplicate the failing test to get predictable output)
661 680 $ cp test-failure.t test-failure-copy.t
662 681
663 682 $ rt --jobs 2 test-failure*.t -n
683 running 2 tests using 2 parallel processes
664 684 !!
665 685 Failed test-failure*.t: output changed (glob)
666 686 Failed test-failure*.t: output changed (glob)
667 687 # Ran 2 tests, 0 skipped, 2 failed.
668 688 python hash seed: * (glob)
669 689 [1]
670 690
671 691 failures in parallel with --first should only print one failure
672 692 $ rt --jobs 2 --first test-failure*.t
693 running 2 tests using 2 parallel processes
673 694
674 695 --- $TESTTMP/test-failure*.t (glob)
675 696 +++ $TESTTMP/test-failure*.t.err (glob)
676 697 @@ -1,5 +1,5 @@
677 698 $ echo babar
678 699 - rataxes
679 700 + babar
680 701 This is a noop statement so that
681 702 this test is still more bytes than success.
682 703 pad pad pad pad............................................................
683 704
684 705 Failed test-failure*.t: output changed (glob)
685 706 Failed test-failure*.t: output changed (glob)
686 707 # Ran 2 tests, 0 skipped, 2 failed.
687 708 python hash seed: * (glob)
688 709 [1]
689 710
690 711
691 712 (delete the duplicated test file)
692 713 $ rm test-failure-copy.t
693 714
694 715
695 716 Interactive run
696 717 ===============
697 718
698 719 (backup the failing test)
699 720 $ cp test-failure.t backup
700 721
701 722 Refuse the fix
702 723
703 724 $ echo 'n' | rt -i
725 running 2 tests using 1 parallel processes
704 726
705 727 --- $TESTTMP/test-failure.t
706 728 +++ $TESTTMP/test-failure.t.err
707 729 @@ -1,5 +1,5 @@
708 730 $ echo babar
709 731 - rataxes
710 732 + babar
711 733 This is a noop statement so that
712 734 this test is still more bytes than success.
713 735 pad pad pad pad............................................................
714 736 Accept this change? [n]
715 737 ERROR: test-failure.t output changed
716 738 !.
717 739 Failed test-failure.t: output changed
718 740 # Ran 2 tests, 0 skipped, 1 failed.
719 741 python hash seed: * (glob)
720 742 [1]
721 743
722 744 $ cat test-failure.t
723 745 $ echo babar
724 746 rataxes
725 747 This is a noop statement so that
726 748 this test is still more bytes than success.
727 749 pad pad pad pad............................................................
728 750 pad pad pad pad............................................................
729 751 pad pad pad pad............................................................
730 752 pad pad pad pad............................................................
731 753 pad pad pad pad............................................................
732 754 pad pad pad pad............................................................
733 755
734 756 Interactive with custom view
735 757
736 758 $ echo 'n' | rt -i --view echo
759 running 2 tests using 1 parallel processes
737 760 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
738 761 Accept this change? [n]* (glob)
739 762 ERROR: test-failure.t output changed
740 763 !.
741 764 Failed test-failure.t: output changed
742 765 # Ran 2 tests, 0 skipped, 1 failed.
743 766 python hash seed: * (glob)
744 767 [1]
745 768
746 769 View the fix
747 770
748 771 $ echo 'y' | rt --view echo
772 running 2 tests using 1 parallel processes
749 773 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
750 774
751 775 ERROR: test-failure.t output changed
752 776 !.
753 777 Failed test-failure.t: output changed
754 778 # Ran 2 tests, 0 skipped, 1 failed.
755 779 python hash seed: * (glob)
756 780 [1]
757 781
758 782 Accept the fix
759 783
760 784 $ cat >> test-failure.t <<EOF
761 785 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
762 786 > saved backup bundle to \$TESTTMP/foo.hg
763 787 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
764 788 > saved backup bundle to $TESTTMP\\foo.hg
765 789 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
766 790 > saved backup bundle to \$TESTTMP/*.hg (glob)
767 791 > EOF
768 792 $ echo 'y' | rt -i 2>&1
793 running 2 tests using 1 parallel processes
769 794
770 795 --- $TESTTMP/test-failure.t
771 796 +++ $TESTTMP/test-failure.t.err
772 797 @@ -1,5 +1,5 @@
773 798 $ echo babar
774 799 - rataxes
775 800 + babar
776 801 This is a noop statement so that
777 802 this test is still more bytes than success.
778 803 pad pad pad pad............................................................
779 804 @@ -11,6 +11,6 @@
780 805 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
781 806 saved backup bundle to $TESTTMP/foo.hg
782 807 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
783 808 - saved backup bundle to $TESTTMP\foo.hg
784 809 + saved backup bundle to $TESTTMP/foo.hg
785 810 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
786 811 saved backup bundle to $TESTTMP/*.hg (glob)
787 812 Accept this change? [n] ..
788 813 # Ran 2 tests, 0 skipped, 0 failed.
789 814
790 815 $ sed -e 's,(glob)$,&<,g' test-failure.t
791 816 $ echo babar
792 817 babar
793 818 This is a noop statement so that
794 819 this test is still more bytes than success.
795 820 pad pad pad pad............................................................
796 821 pad pad pad pad............................................................
797 822 pad pad pad pad............................................................
798 823 pad pad pad pad............................................................
799 824 pad pad pad pad............................................................
800 825 pad pad pad pad............................................................
801 826 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
802 827 saved backup bundle to $TESTTMP/foo.hg
803 828 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
804 829 saved backup bundle to $TESTTMP/foo.hg
805 830 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
806 831 saved backup bundle to $TESTTMP/*.hg (glob)<
807 832
808 833 Race condition - test file was modified when test is running
809 834
810 835 $ TESTRACEDIR=`pwd`
811 836 $ export TESTRACEDIR
812 837 $ cat > test-race.t <<EOF
813 838 > $ echo 1
814 839 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
815 840 > EOF
816 841
817 842 $ rt -i test-race.t
843 running 1 tests using 1 parallel processes
818 844
819 845 --- $TESTTMP/test-race.t
820 846 +++ $TESTTMP/test-race.t.err
821 847 @@ -1,2 +1,3 @@
822 848 $ echo 1
823 849 + 1
824 850 $ echo "# a new line" >> $TESTTMP/test-race.t
825 851 Reference output has changed (run again to prompt changes)
826 852 ERROR: test-race.t output changed
827 853 !
828 854 Failed test-race.t: output changed
829 855 # Ran 1 tests, 0 skipped, 1 failed.
830 856 python hash seed: * (glob)
831 857 [1]
832 858
833 859 $ rm test-race.t
834 860
835 861 When "#testcases" is used in .t files
836 862
837 863 $ cat >> test-cases.t <<EOF
838 864 > #testcases a b
839 865 > #if a
840 866 > $ echo 1
841 867 > #endif
842 868 > #if b
843 869 > $ echo 2
844 870 > #endif
845 871 > EOF
846 872
847 873 $ cat <<EOF | rt -i test-cases.t 2>&1
848 874 > y
849 875 > y
850 876 > EOF
877 running 2 tests using 1 parallel processes
851 878
852 879 --- $TESTTMP/test-cases.t
853 880 +++ $TESTTMP/test-cases.t#a.err
854 881 @@ -1,6 +1,7 @@
855 882 #testcases a b
856 883 #if a
857 884 $ echo 1
858 885 + 1
859 886 #endif
860 887 #if b
861 888 $ echo 2
862 889 Accept this change? [n] .
863 890 --- $TESTTMP/test-cases.t
864 891 +++ $TESTTMP/test-cases.t#b.err
865 892 @@ -5,4 +5,5 @@
866 893 #endif
867 894 #if b
868 895 $ echo 2
869 896 + 2
870 897 #endif
871 898 Accept this change? [n] .
872 899 # Ran 2 tests, 0 skipped, 0 failed.
873 900
874 901 $ cat test-cases.t
875 902 #testcases a b
876 903 #if a
877 904 $ echo 1
878 905 1
879 906 #endif
880 907 #if b
881 908 $ echo 2
882 909 2
883 910 #endif
884 911
885 912 $ cat >> test-cases.t <<'EOF'
886 913 > #if a
887 914 > $ NAME=A
888 915 > #else
889 916 > $ NAME=B
890 917 > #endif
891 918 > $ echo $NAME
892 919 > A (a !)
893 920 > B (b !)
894 921 > EOF
895 922 $ rt test-cases.t
923 running 2 tests using 1 parallel processes
896 924 ..
897 925 # Ran 2 tests, 0 skipped, 0 failed.
898 926
899 927 When using multiple dimensions of "#testcases" in .t files
900 928
901 929 $ cat > test-cases.t <<'EOF'
902 930 > #testcases a b
903 931 > #testcases c d
904 932 > #if a d
905 933 > $ echo $TESTCASE
906 934 > a#d
907 935 > #endif
908 936 > #if b c
909 937 > $ echo yes
910 938 > no
911 939 > #endif
912 940 > EOF
913 941 $ rt test-cases.t
942 running 4 tests using 1 parallel processes
914 943 ..
915 944 --- $TESTTMP/test-cases.t
916 945 +++ $TESTTMP/test-cases.t#b#c.err
917 946 @@ -6,5 +6,5 @@
918 947 #endif
919 948 #if b c
920 949 $ echo yes
921 950 - no
922 951 + yes
923 952 #endif
924 953
925 954 ERROR: test-cases.t#b#c output changed
926 955 !.
927 956 Failed test-cases.t#b#c: output changed
928 957 # Ran 4 tests, 0 skipped, 1 failed.
929 958 python hash seed: * (glob)
930 959 [1]
931 960
932 961 $ rm test-cases.t#b#c.err
933 962 $ rm test-cases.t
934 963
935 964 (reinstall)
936 965 $ mv backup test-failure.t
937 966
938 967 No Diff
939 968 ===============
940 969
941 970 $ rt --nodiff
971 running 2 tests using 1 parallel processes
942 972 !.
943 973 Failed test-failure.t: output changed
944 974 # Ran 2 tests, 0 skipped, 1 failed.
945 975 python hash seed: * (glob)
946 976 [1]
947 977
948 978 test --tmpdir support
949 979 $ rt --tmpdir=$TESTTMP/keep test-success.t
980 running 1 tests using 1 parallel processes
950 981
951 982 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
952 983 Keeping threadtmp dir: $TESTTMP/keep/child1
953 984 .
954 985 # Ran 1 tests, 0 skipped, 0 failed.
955 986
956 987 timeouts
957 988 ========
958 989 $ cat > test-timeout.t <<EOF
959 990 > $ sleep 2
960 991 > $ echo pass
961 992 > pass
962 993 > EOF
963 994 > echo '#require slow' > test-slow-timeout.t
964 995 > cat test-timeout.t >> test-slow-timeout.t
965 996 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
997 running 2 tests using 1 parallel processes
966 998 st
967 999 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
968 1000 Failed test-timeout.t: timed out
969 1001 # Ran 1 tests, 1 skipped, 1 failed.
970 1002 python hash seed: * (glob)
971 1003 [1]
972 1004 $ rt --timeout=1 --slowtimeout=3 \
973 1005 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1006 running 2 tests using 1 parallel processes
974 1007 .t
975 1008 Failed test-timeout.t: timed out
976 1009 # Ran 2 tests, 0 skipped, 1 failed.
977 1010 python hash seed: * (glob)
978 1011 [1]
979 1012 $ rm test-timeout.t test-slow-timeout.t
980 1013
981 1014 test for --time
982 1015 ==================
983 1016
984 1017 $ rt test-success.t --time
1018 running 1 tests using 1 parallel processes
985 1019 .
986 1020 # Ran 1 tests, 0 skipped, 0 failed.
987 1021 # Producing time report
988 1022 start end cuser csys real Test
989 1023 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
990 1024
991 1025 test for --time with --job enabled
992 1026 ====================================
993 1027
994 1028 $ rt test-success.t --time --jobs 2
1029 running 1 tests using 1 parallel processes
995 1030 .
996 1031 # Ran 1 tests, 0 skipped, 0 failed.
997 1032 # Producing time report
998 1033 start end cuser csys real Test
999 1034 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1000 1035
1001 1036 Skips
1002 1037 ================
1003 1038 $ cat > test-skip.t <<EOF
1004 1039 > $ echo xyzzy
1005 1040 > #if true
1006 1041 > #require false
1007 1042 > #end
1008 1043 > EOF
1009 1044 $ cat > test-noskip.t <<EOF
1010 1045 > #if false
1011 1046 > #require false
1012 1047 > #endif
1013 1048 > EOF
1014 1049 $ rt --nodiff
1050 running 4 tests using 1 parallel processes
1015 1051 !.s.
1016 1052 Skipped test-skip.t: missing feature: nail clipper
1017 1053 Failed test-failure.t: output changed
1018 1054 # Ran 3 tests, 1 skipped, 1 failed.
1019 1055 python hash seed: * (glob)
1020 1056 [1]
1021 1057
1022 1058 $ rm test-noskip.t
1023 1059 $ rt --keyword xyzzy
1060 running 3 tests using 1 parallel processes
1024 1061 .s
1025 1062 Skipped test-skip.t: missing feature: nail clipper
1026 1063 # Ran 2 tests, 2 skipped, 0 failed.
1027 1064
1028 1065 Skips with xml
1029 1066 $ rt --keyword xyzzy \
1030 1067 > --xunit=xunit.xml
1068 running 3 tests using 1 parallel processes
1031 1069 .s
1032 1070 Skipped test-skip.t: missing feature: nail clipper
1033 1071 # Ran 2 tests, 2 skipped, 0 failed.
1034 1072 $ cat xunit.xml
1035 1073 <?xml version="1.0" encoding="utf-8"?>
1036 1074 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1037 1075 <testcase name="test-success.t" time="*"/> (glob)
1038 1076 <testcase name="test-skip.t">
1039 1077 <skipped>
1040 1078 <![CDATA[missing feature: nail clipper]]> </skipped>
1041 1079 </testcase>
1042 1080 </testsuite>
1043 1081
1044 1082 Missing skips or blacklisted skips don't count as executed:
1045 1083 $ echo test-failure.t > blacklist
1046 1084 $ rt --blacklist=blacklist --json\
1047 1085 > test-failure.t test-bogus.t
1086 running 2 tests using 1 parallel processes
1048 1087 ss
1049 1088 Skipped test-bogus.t: Doesn't exist
1050 1089 Skipped test-failure.t: blacklisted
1051 1090 # Ran 0 tests, 2 skipped, 0 failed.
1052 1091 $ cat report.json
1053 1092 testreport ={
1054 1093 "test-bogus.t": {
1055 1094 "result": "skip"
1056 1095 },
1057 1096 "test-failure.t": {
1058 1097 "result": "skip"
1059 1098 }
1060 1099 } (no-eol)
1061 1100
1062 1101 Whitelist trumps blacklist
1063 1102 $ echo test-failure.t > whitelist
1064 1103 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1065 1104 > test-failure.t test-bogus.t
1105 running 2 tests using 1 parallel processes
1066 1106 s
1067 1107 --- $TESTTMP/test-failure.t
1068 1108 +++ $TESTTMP/test-failure.t.err
1069 1109 @@ -1,5 +1,5 @@
1070 1110 $ echo babar
1071 1111 - rataxes
1072 1112 + babar
1073 1113 This is a noop statement so that
1074 1114 this test is still more bytes than success.
1075 1115 pad pad pad pad............................................................
1076 1116
1077 1117 ERROR: test-failure.t output changed
1078 1118 !
1079 1119 Skipped test-bogus.t: Doesn't exist
1080 1120 Failed test-failure.t: output changed
1081 1121 # Ran 1 tests, 1 skipped, 1 failed.
1082 1122 python hash seed: * (glob)
1083 1123 [1]
1084 1124
1085 1125 Ensure that --test-list causes only the tests listed in that file to
1086 1126 be executed.
1087 1127 $ echo test-success.t >> onlytest
1088 1128 $ rt --test-list=onlytest
1129 running 1 tests using 1 parallel processes
1089 1130 .
1090 1131 # Ran 1 tests, 0 skipped, 0 failed.
1091 1132 $ echo test-bogus.t >> anothertest
1092 1133 $ rt --test-list=onlytest --test-list=anothertest
1134 running 2 tests using 1 parallel processes
1093 1135 s.
1094 1136 Skipped test-bogus.t: Doesn't exist
1095 1137 # Ran 1 tests, 1 skipped, 0 failed.
1096 1138 $ rm onlytest anothertest
1097 1139
1098 1140 test for --json
1099 1141 ==================
1100 1142
1101 1143 $ rt --json
1144 running 3 tests using 1 parallel processes
1102 1145
1103 1146 --- $TESTTMP/test-failure.t
1104 1147 +++ $TESTTMP/test-failure.t.err
1105 1148 @@ -1,5 +1,5 @@
1106 1149 $ echo babar
1107 1150 - rataxes
1108 1151 + babar
1109 1152 This is a noop statement so that
1110 1153 this test is still more bytes than success.
1111 1154 pad pad pad pad............................................................
1112 1155
1113 1156 ERROR: test-failure.t output changed
1114 1157 !.s
1115 1158 Skipped test-skip.t: missing feature: nail clipper
1116 1159 Failed test-failure.t: output changed
1117 1160 # Ran 2 tests, 1 skipped, 1 failed.
1118 1161 python hash seed: * (glob)
1119 1162 [1]
1120 1163
1121 1164 $ cat report.json
1122 1165 testreport ={
1123 1166 "test-failure.t": [\{] (re)
1124 1167 "csys": "\s*[\d\.]{4,5}", ? (re)
1125 1168 "cuser": "\s*[\d\.]{4,5}", ? (re)
1126 1169 "diff": "---.+\+\+\+.+", ? (re)
1127 1170 "end": "\s*[\d\.]{4,5}", ? (re)
1128 1171 "result": "failure", ? (re)
1129 1172 "start": "\s*[\d\.]{4,5}", ? (re)
1130 1173 "time": "\s*[\d\.]{4,5}" (re)
1131 1174 }, ? (re)
1132 1175 "test-skip.t": {
1133 1176 "csys": "\s*[\d\.]{4,5}", ? (re)
1134 1177 "cuser": "\s*[\d\.]{4,5}", ? (re)
1135 1178 "diff": "", ? (re)
1136 1179 "end": "\s*[\d\.]{4,5}", ? (re)
1137 1180 "result": "skip", ? (re)
1138 1181 "start": "\s*[\d\.]{4,5}", ? (re)
1139 1182 "time": "\s*[\d\.]{4,5}" (re)
1140 1183 }, ? (re)
1141 1184 "test-success.t": [\{] (re)
1142 1185 "csys": "\s*[\d\.]{4,5}", ? (re)
1143 1186 "cuser": "\s*[\d\.]{4,5}", ? (re)
1144 1187 "diff": "", ? (re)
1145 1188 "end": "\s*[\d\.]{4,5}", ? (re)
1146 1189 "result": "success", ? (re)
1147 1190 "start": "\s*[\d\.]{4,5}", ? (re)
1148 1191 "time": "\s*[\d\.]{4,5}" (re)
1149 1192 }
1150 1193 } (no-eol)
1151 1194 --json with --outputdir
1152 1195
1153 1196 $ rm report.json
1154 1197 $ rm -r output
1155 1198 $ mkdir output
1156 1199 $ rt --json --outputdir output
1200 running 3 tests using 1 parallel processes
1157 1201
1158 1202 --- $TESTTMP/test-failure.t
1159 1203 +++ $TESTTMP/output/test-failure.t.err
1160 1204 @@ -1,5 +1,5 @@
1161 1205 $ echo babar
1162 1206 - rataxes
1163 1207 + babar
1164 1208 This is a noop statement so that
1165 1209 this test is still more bytes than success.
1166 1210 pad pad pad pad............................................................
1167 1211
1168 1212 ERROR: test-failure.t output changed
1169 1213 !.s
1170 1214 Skipped test-skip.t: missing feature: nail clipper
1171 1215 Failed test-failure.t: output changed
1172 1216 # Ran 2 tests, 1 skipped, 1 failed.
1173 1217 python hash seed: * (glob)
1174 1218 [1]
1175 1219 $ f report.json
1176 1220 report.json: file not found
1177 1221 $ cat output/report.json
1178 1222 testreport ={
1179 1223 "test-failure.t": [\{] (re)
1180 1224 "csys": "\s*[\d\.]{4,5}", ? (re)
1181 1225 "cuser": "\s*[\d\.]{4,5}", ? (re)
1182 1226 "diff": "---.+\+\+\+.+", ? (re)
1183 1227 "end": "\s*[\d\.]{4,5}", ? (re)
1184 1228 "result": "failure", ? (re)
1185 1229 "start": "\s*[\d\.]{4,5}", ? (re)
1186 1230 "time": "\s*[\d\.]{4,5}" (re)
1187 1231 }, ? (re)
1188 1232 "test-skip.t": {
1189 1233 "csys": "\s*[\d\.]{4,5}", ? (re)
1190 1234 "cuser": "\s*[\d\.]{4,5}", ? (re)
1191 1235 "diff": "", ? (re)
1192 1236 "end": "\s*[\d\.]{4,5}", ? (re)
1193 1237 "result": "skip", ? (re)
1194 1238 "start": "\s*[\d\.]{4,5}", ? (re)
1195 1239 "time": "\s*[\d\.]{4,5}" (re)
1196 1240 }, ? (re)
1197 1241 "test-success.t": [\{] (re)
1198 1242 "csys": "\s*[\d\.]{4,5}", ? (re)
1199 1243 "cuser": "\s*[\d\.]{4,5}", ? (re)
1200 1244 "diff": "", ? (re)
1201 1245 "end": "\s*[\d\.]{4,5}", ? (re)
1202 1246 "result": "success", ? (re)
1203 1247 "start": "\s*[\d\.]{4,5}", ? (re)
1204 1248 "time": "\s*[\d\.]{4,5}" (re)
1205 1249 }
1206 1250 } (no-eol)
1207 1251 $ ls -a output
1208 1252 .
1209 1253 ..
1210 1254 .testtimes
1211 1255 report.json
1212 1256 test-failure.t.err
1213 1257
1214 1258 Test that failed test accepted through interactive are properly reported:
1215 1259
1216 1260 $ cp test-failure.t backup
1217 1261 $ echo y | rt --json -i
1262 running 3 tests using 1 parallel processes
1218 1263
1219 1264 --- $TESTTMP/test-failure.t
1220 1265 +++ $TESTTMP/test-failure.t.err
1221 1266 @@ -1,5 +1,5 @@
1222 1267 $ echo babar
1223 1268 - rataxes
1224 1269 + babar
1225 1270 This is a noop statement so that
1226 1271 this test is still more bytes than success.
1227 1272 pad pad pad pad............................................................
1228 1273 Accept this change? [n] ..s
1229 1274 Skipped test-skip.t: missing feature: nail clipper
1230 1275 # Ran 2 tests, 1 skipped, 0 failed.
1231 1276
1232 1277 $ cat report.json
1233 1278 testreport ={
1234 1279 "test-failure.t": [\{] (re)
1235 1280 "csys": "\s*[\d\.]{4,5}", ? (re)
1236 1281 "cuser": "\s*[\d\.]{4,5}", ? (re)
1237 1282 "diff": "", ? (re)
1238 1283 "end": "\s*[\d\.]{4,5}", ? (re)
1239 1284 "result": "success", ? (re)
1240 1285 "start": "\s*[\d\.]{4,5}", ? (re)
1241 1286 "time": "\s*[\d\.]{4,5}" (re)
1242 1287 }, ? (re)
1243 1288 "test-skip.t": {
1244 1289 "csys": "\s*[\d\.]{4,5}", ? (re)
1245 1290 "cuser": "\s*[\d\.]{4,5}", ? (re)
1246 1291 "diff": "", ? (re)
1247 1292 "end": "\s*[\d\.]{4,5}", ? (re)
1248 1293 "result": "skip", ? (re)
1249 1294 "start": "\s*[\d\.]{4,5}", ? (re)
1250 1295 "time": "\s*[\d\.]{4,5}" (re)
1251 1296 }, ? (re)
1252 1297 "test-success.t": [\{] (re)
1253 1298 "csys": "\s*[\d\.]{4,5}", ? (re)
1254 1299 "cuser": "\s*[\d\.]{4,5}", ? (re)
1255 1300 "diff": "", ? (re)
1256 1301 "end": "\s*[\d\.]{4,5}", ? (re)
1257 1302 "result": "success", ? (re)
1258 1303 "start": "\s*[\d\.]{4,5}", ? (re)
1259 1304 "time": "\s*[\d\.]{4,5}" (re)
1260 1305 }
1261 1306 } (no-eol)
1262 1307 $ mv backup test-failure.t
1263 1308
1264 1309 backslash on end of line with glob matching is handled properly
1265 1310
1266 1311 $ cat > test-glob-backslash.t << EOF
1267 1312 > $ echo 'foo bar \\'
1268 1313 > foo * \ (glob)
1269 1314 > EOF
1270 1315
1271 1316 $ rt test-glob-backslash.t
1317 running 1 tests using 1 parallel processes
1272 1318 .
1273 1319 # Ran 1 tests, 0 skipped, 0 failed.
1274 1320
1275 1321 $ rm -f test-glob-backslash.t
1276 1322
1277 1323 Test globbing of local IP addresses
1278 1324 $ echo 172.16.18.1
1279 1325 $LOCALIP (glob)
1280 1326 $ echo dead:beef::1
1281 1327 $LOCALIP (glob)
1282 1328
1283 1329 Add support for external test formatter
1284 1330 =======================================
1285 1331
1286 1332 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` "$@" test-success.t test-failure.t
1333 running 2 tests using 1 parallel processes
1287 1334
1288 1335 # Ran 2 tests, 0 skipped, 0 failed.
1289 1336 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1290 1337 FAILURE! test-failure.t output changed
1291 1338 SUCCESS! test-success.t
1292 1339 ON_END!
1293 1340
1294 1341 Test reusability for third party tools
1295 1342 ======================================
1296 1343
1297 1344 $ mkdir "$TESTTMP"/anothertests
1298 1345 $ cd "$TESTTMP"/anothertests
1299 1346
1300 1347 test that `run-tests.py` can execute hghave, even if it runs not in
1301 1348 Mercurial source tree.
1302 1349
1303 1350 $ cat > test-hghave.t <<EOF
1304 1351 > #require true
1305 1352 > $ echo foo
1306 1353 > foo
1307 1354 > EOF
1308 1355 $ rt test-hghave.t
1356 running 1 tests using 1 parallel processes
1309 1357 .
1310 1358 # Ran 1 tests, 0 skipped, 0 failed.
1311 1359
1312 1360 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1313 1361 running is placed.
1314 1362
1315 1363 $ cat > test-runtestdir.t <<EOF
1316 1364 > - $TESTDIR, in which test-run-tests.t is placed
1317 1365 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1318 1366 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1319 1367 >
1320 1368 > #if windows
1321 1369 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1322 1370 > #else
1323 1371 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1324 1372 > #endif
1325 1373 > If this prints a path, that means RUNTESTDIR didn't equal
1326 1374 > TESTDIR as it should have.
1327 1375 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1328 1376 > This should print the start of check-code. If this passes but the
1329 1377 > previous check failed, that means we found a copy of check-code at whatever
1330 1378 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1331 1379 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1332 1380 > #!USRBINENVPY
1333 1381 > #
1334 1382 > # check-code - a style and portability checker for Mercurial
1335 1383 > EOF
1336 1384 $ rt test-runtestdir.t
1385 running 1 tests using 1 parallel processes
1337 1386 .
1338 1387 # Ran 1 tests, 0 skipped, 0 failed.
1339 1388
1340 1389 #if execbit
1341 1390
1342 1391 test that TESTDIR is referred in PATH
1343 1392
1344 1393 $ cat > custom-command.sh <<EOF
1345 1394 > #!/bin/sh
1346 1395 > echo "hello world"
1347 1396 > EOF
1348 1397 $ chmod +x custom-command.sh
1349 1398 $ cat > test-testdir-path.t <<EOF
1350 1399 > $ custom-command.sh
1351 1400 > hello world
1352 1401 > EOF
1353 1402 $ rt test-testdir-path.t
1403 running 1 tests using 1 parallel processes
1354 1404 .
1355 1405 # Ran 1 tests, 0 skipped, 0 failed.
1356 1406
1357 1407 #endif
1358 1408
1359 1409 test support for --allow-slow-tests
1360 1410 $ cat > test-very-slow-test.t <<EOF
1361 1411 > #require slow
1362 1412 > $ echo pass
1363 1413 > pass
1364 1414 > EOF
1365 1415 $ rt test-very-slow-test.t
1416 running 1 tests using 1 parallel processes
1366 1417 s
1367 1418 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1368 1419 # Ran 0 tests, 1 skipped, 0 failed.
1369 1420 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1421 running 1 tests using 1 parallel processes
1370 1422 .
1371 1423 # Ran 1 tests, 0 skipped, 0 failed.
1372 1424
1373 1425 support for running a test outside the current directory
1374 1426 $ mkdir nonlocal
1375 1427 $ cat > nonlocal/test-is-not-here.t << EOF
1376 1428 > $ echo pass
1377 1429 > pass
1378 1430 > EOF
1379 1431 $ rt nonlocal/test-is-not-here.t
1432 running 1 tests using 1 parallel processes
1380 1433 .
1381 1434 # Ran 1 tests, 0 skipped, 0 failed.
1382 1435
1383 1436 support for automatically discovering test if arg is a folder
1384 1437 $ mkdir tmp && cd tmp
1385 1438
1386 1439 $ cat > test-uno.t << EOF
1387 1440 > $ echo line
1388 1441 > line
1389 1442 > EOF
1390 1443
1391 1444 $ cp test-uno.t test-dos.t
1392 1445 $ cd ..
1393 1446 $ cp -R tmp tmpp
1394 1447 $ cp tmp/test-uno.t test-solo.t
1395 1448
1396 1449 $ rt tmp/ test-solo.t tmpp
1450 running 5 tests using 1 parallel processes
1397 1451 .....
1398 1452 # Ran 5 tests, 0 skipped, 0 failed.
1399 1453 $ rm -rf tmp tmpp
1400 1454
1401 1455 support for running run-tests.py from another directory
1402 1456 $ mkdir tmp && cd tmp
1403 1457
1404 1458 $ cat > useful-file.sh << EOF
1405 1459 > important command
1406 1460 > EOF
1407 1461
1408 1462 $ cat > test-folder.t << EOF
1409 1463 > $ cat \$TESTDIR/useful-file.sh
1410 1464 > important command
1411 1465 > EOF
1412 1466
1413 1467 $ cat > test-folder-fail.t << EOF
1414 1468 > $ cat \$TESTDIR/useful-file.sh
1415 1469 > important commando
1416 1470 > EOF
1417 1471
1418 1472 $ cd ..
1419 1473 $ rt tmp/test-*.t
1474 running 2 tests using 1 parallel processes
1420 1475
1421 1476 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1422 1477 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1423 1478 @@ -1,2 +1,2 @@
1424 1479 $ cat $TESTDIR/useful-file.sh
1425 1480 - important commando
1426 1481 + important command
1427 1482
1428 1483 ERROR: test-folder-fail.t output changed
1429 1484 !.
1430 1485 Failed test-folder-fail.t: output changed
1431 1486 # Ran 2 tests, 0 skipped, 1 failed.
1432 1487 python hash seed: * (glob)
1433 1488 [1]
1434 1489
1435 1490 support for bisecting failed tests automatically
1436 1491 $ hg init bisect
1437 1492 $ cd bisect
1438 1493 $ cat >> test-bisect.t <<EOF
1439 1494 > $ echo pass
1440 1495 > pass
1441 1496 > EOF
1442 1497 $ hg add test-bisect.t
1443 1498 $ hg ci -m 'good'
1444 1499 $ cat >> test-bisect.t <<EOF
1445 1500 > $ echo pass
1446 1501 > fail
1447 1502 > EOF
1448 1503 $ hg ci -m 'bad'
1449 1504 $ rt --known-good-rev=0 test-bisect.t
1505 running 1 tests using 1 parallel processes
1450 1506
1451 1507 --- $TESTTMP/anothertests/bisect/test-bisect.t
1452 1508 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1453 1509 @@ -1,4 +1,4 @@
1454 1510 $ echo pass
1455 1511 pass
1456 1512 $ echo pass
1457 1513 - fail
1458 1514 + pass
1459 1515
1460 1516 ERROR: test-bisect.t output changed
1461 1517 !
1462 1518 Failed test-bisect.t: output changed
1463 1519 test-bisect.t broken by 72cbf122d116 (bad)
1464 1520 # Ran 1 tests, 0 skipped, 1 failed.
1465 1521 python hash seed: * (glob)
1466 1522 [1]
1467 1523
1468 1524 $ cd ..
1469 1525
1470 1526 support bisecting a separate repo
1471 1527
1472 1528 $ hg init bisect-dependent
1473 1529 $ cd bisect-dependent
1474 1530 $ cat > test-bisect-dependent.t <<EOF
1475 1531 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1476 1532 > pass
1477 1533 > EOF
1478 1534 $ hg commit -Am dependent test-bisect-dependent.t
1479 1535
1480 1536 $ rt --known-good-rev=0 test-bisect-dependent.t
1537 running 1 tests using 1 parallel processes
1481 1538
1482 1539 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1483 1540 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1484 1541 @@ -1,2 +1,2 @@
1485 1542 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1486 1543 - pass
1487 1544 + fail
1488 1545
1489 1546 ERROR: test-bisect-dependent.t output changed
1490 1547 !
1491 1548 Failed test-bisect-dependent.t: output changed
1492 1549 Failed to identify failure point for test-bisect-dependent.t
1493 1550 # Ran 1 tests, 0 skipped, 1 failed.
1494 1551 python hash seed: * (glob)
1495 1552 [1]
1496 1553
1497 1554 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1498 1555 usage: run-tests.py [options] [tests]
1499 1556 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1500 1557 [2]
1501 1558
1502 1559 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1560 running 1 tests using 1 parallel processes
1503 1561
1504 1562 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1505 1563 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1506 1564 @@ -1,2 +1,2 @@
1507 1565 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1508 1566 - pass
1509 1567 + fail
1510 1568
1511 1569 ERROR: test-bisect-dependent.t output changed
1512 1570 !
1513 1571 Failed test-bisect-dependent.t: output changed
1514 1572 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1515 1573 # Ran 1 tests, 0 skipped, 1 failed.
1516 1574 python hash seed: * (glob)
1517 1575 [1]
1518 1576
1519 1577 $ cd ..
1520 1578
1521 1579 Test a broken #if statement doesn't break run-tests threading.
1522 1580 ==============================================================
1523 1581 $ mkdir broken
1524 1582 $ cd broken
1525 1583 $ cat > test-broken.t <<EOF
1526 1584 > true
1527 1585 > #if notarealhghavefeature
1528 1586 > $ false
1529 1587 > #endif
1530 1588 > EOF
1531 1589 $ for f in 1 2 3 4 ; do
1532 1590 > cat > test-works-$f.t <<EOF
1533 1591 > This is test case $f
1534 1592 > $ sleep 1
1535 1593 > EOF
1536 1594 > done
1537 1595 $ rt -j 2
1596 running 5 tests using 2 parallel processes
1538 1597 ....
1539 1598 # Ran 5 tests, 0 skipped, 0 failed.
1540 1599 skipped: unknown feature: notarealhghavefeature
1541 1600
1542 1601 $ cd ..
1543 1602 $ rm -rf broken
1544 1603
1545 1604 Test cases in .t files
1546 1605 ======================
1547 1606 $ mkdir cases
1548 1607 $ cd cases
1549 1608 $ cat > test-cases-abc.t <<'EOF'
1550 1609 > #testcases A B C
1551 1610 > $ V=B
1552 1611 > #if A
1553 1612 > $ V=A
1554 1613 > #endif
1555 1614 > #if C
1556 1615 > $ V=C
1557 1616 > #endif
1558 1617 > $ echo $V | sed 's/A/C/'
1559 1618 > C
1560 1619 > #if C
1561 1620 > $ [ $V = C ]
1562 1621 > #endif
1563 1622 > #if A
1564 1623 > $ [ $V = C ]
1565 1624 > [1]
1566 1625 > #endif
1567 1626 > #if no-C
1568 1627 > $ [ $V = C ]
1569 1628 > [1]
1570 1629 > #endif
1571 1630 > $ [ $V = D ]
1572 1631 > [1]
1573 1632 > EOF
1574 1633 $ rt
1634 running 3 tests using 1 parallel processes
1575 1635 .
1576 1636 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1577 1637 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1578 1638 @@ -7,7 +7,7 @@
1579 1639 $ V=C
1580 1640 #endif
1581 1641 $ echo $V | sed 's/A/C/'
1582 1642 - C
1583 1643 + B
1584 1644 #if C
1585 1645 $ [ $V = C ]
1586 1646 #endif
1587 1647
1588 1648 ERROR: test-cases-abc.t#B output changed
1589 1649 !.
1590 1650 Failed test-cases-abc.t#B: output changed
1591 1651 # Ran 3 tests, 0 skipped, 1 failed.
1592 1652 python hash seed: * (glob)
1593 1653 [1]
1594 1654
1595 1655 --restart works
1596 1656
1597 1657 $ rt --restart
1658 running 2 tests using 1 parallel processes
1598 1659
1599 1660 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1600 1661 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1601 1662 @@ -7,7 +7,7 @@
1602 1663 $ V=C
1603 1664 #endif
1604 1665 $ echo $V | sed 's/A/C/'
1605 1666 - C
1606 1667 + B
1607 1668 #if C
1608 1669 $ [ $V = C ]
1609 1670 #endif
1610 1671
1611 1672 ERROR: test-cases-abc.t#B output changed
1612 1673 !.
1613 1674 Failed test-cases-abc.t#B: output changed
1614 1675 # Ran 2 tests, 0 skipped, 1 failed.
1615 1676 python hash seed: * (glob)
1616 1677 [1]
1617 1678
1618 1679 --restart works with outputdir
1619 1680
1620 1681 $ mkdir output
1621 1682 $ mv test-cases-abc.t#B.err output
1622 1683 $ rt --restart --outputdir output
1684 running 2 tests using 1 parallel processes
1623 1685
1624 1686 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1625 1687 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1626 1688 @@ -7,7 +7,7 @@
1627 1689 $ V=C
1628 1690 #endif
1629 1691 $ echo $V | sed 's/A/C/'
1630 1692 - C
1631 1693 + B
1632 1694 #if C
1633 1695 $ [ $V = C ]
1634 1696 #endif
1635 1697
1636 1698 ERROR: test-cases-abc.t#B output changed
1637 1699 !.
1638 1700 Failed test-cases-abc.t#B: output changed
1639 1701 # Ran 2 tests, 0 skipped, 1 failed.
1640 1702 python hash seed: * (glob)
1641 1703 [1]
1642 1704
1643 1705 Test TESTCASE variable
1644 1706
1645 1707 $ cat > test-cases-ab.t <<'EOF'
1646 1708 > $ dostuff() {
1647 1709 > > echo "In case $TESTCASE"
1648 1710 > > }
1649 1711 > #testcases A B
1650 1712 > #if A
1651 1713 > $ dostuff
1652 1714 > In case A
1653 1715 > #endif
1654 1716 > #if B
1655 1717 > $ dostuff
1656 1718 > In case B
1657 1719 > #endif
1658 1720 > EOF
1659 1721 $ rt test-cases-ab.t
1722 running 2 tests using 1 parallel processes
1660 1723 ..
1661 1724 # Ran 2 tests, 0 skipped, 0 failed.
1662 1725
1663 1726 Support running a specific test case
1664 1727
1665 1728 $ rt "test-cases-abc.t#B"
1729 running 1 tests using 1 parallel processes
1666 1730
1667 1731 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1668 1732 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1669 1733 @@ -7,7 +7,7 @@
1670 1734 $ V=C
1671 1735 #endif
1672 1736 $ echo $V | sed 's/A/C/'
1673 1737 - C
1674 1738 + B
1675 1739 #if C
1676 1740 $ [ $V = C ]
1677 1741 #endif
1678 1742
1679 1743 ERROR: test-cases-abc.t#B output changed
1680 1744 !
1681 1745 Failed test-cases-abc.t#B: output changed
1682 1746 # Ran 1 tests, 0 skipped, 1 failed.
1683 1747 python hash seed: * (glob)
1684 1748 [1]
1685 1749
1686 1750 Support running multiple test cases in the same file
1687 1751
1688 1752 $ rt test-cases-abc.t#B test-cases-abc.t#C
1753 running 2 tests using 1 parallel processes
1689 1754
1690 1755 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1691 1756 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1692 1757 @@ -7,7 +7,7 @@
1693 1758 $ V=C
1694 1759 #endif
1695 1760 $ echo $V | sed 's/A/C/'
1696 1761 - C
1697 1762 + B
1698 1763 #if C
1699 1764 $ [ $V = C ]
1700 1765 #endif
1701 1766
1702 1767 ERROR: test-cases-abc.t#B output changed
1703 1768 !.
1704 1769 Failed test-cases-abc.t#B: output changed
1705 1770 # Ran 2 tests, 0 skipped, 1 failed.
1706 1771 python hash seed: * (glob)
1707 1772 [1]
1708 1773
1709 1774 Support ignoring invalid test cases
1710 1775
1711 1776 $ rt test-cases-abc.t#B test-cases-abc.t#D
1777 running 1 tests using 1 parallel processes
1712 1778
1713 1779 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1714 1780 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1715 1781 @@ -7,7 +7,7 @@
1716 1782 $ V=C
1717 1783 #endif
1718 1784 $ echo $V | sed 's/A/C/'
1719 1785 - C
1720 1786 + B
1721 1787 #if C
1722 1788 $ [ $V = C ]
1723 1789 #endif
1724 1790
1725 1791 ERROR: test-cases-abc.t#B output changed
1726 1792 !
1727 1793 Failed test-cases-abc.t#B: output changed
1728 1794 # Ran 1 tests, 0 skipped, 1 failed.
1729 1795 python hash seed: * (glob)
1730 1796 [1]
1731 1797
1732 1798 Support running complex test cases names
1733 1799
1734 1800 $ cat > test-cases-advanced-cases.t <<'EOF'
1735 1801 > #testcases simple case-with-dashes casewith_-.chars
1736 1802 > $ echo $TESTCASE
1737 1803 > simple
1738 1804 > EOF
1739 1805
1740 1806 $ cat test-cases-advanced-cases.t
1741 1807 #testcases simple case-with-dashes casewith_-.chars
1742 1808 $ echo $TESTCASE
1743 1809 simple
1744 1810
1745 1811 $ rt test-cases-advanced-cases.t
1812 running 3 tests using 1 parallel processes
1746 1813
1747 1814 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1748 1815 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1749 1816 @@ -1,3 +1,3 @@
1750 1817 #testcases simple case-with-dashes casewith_-.chars
1751 1818 $ echo $TESTCASE
1752 1819 - simple
1753 1820 + case-with-dashes
1754 1821
1755 1822 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1756 1823 !
1757 1824 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1758 1825 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1759 1826 @@ -1,3 +1,3 @@
1760 1827 #testcases simple case-with-dashes casewith_-.chars
1761 1828 $ echo $TESTCASE
1762 1829 - simple
1763 1830 + casewith_-.chars
1764 1831
1765 1832 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1766 1833 !.
1767 1834 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1768 1835 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1769 1836 # Ran 3 tests, 0 skipped, 2 failed.
1770 1837 python hash seed: * (glob)
1771 1838 [1]
1772 1839
1773 1840 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1841 running 1 tests using 1 parallel processes
1774 1842
1775 1843 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1776 1844 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1777 1845 @@ -1,3 +1,3 @@
1778 1846 #testcases simple case-with-dashes casewith_-.chars
1779 1847 $ echo $TESTCASE
1780 1848 - simple
1781 1849 + case-with-dashes
1782 1850
1783 1851 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1784 1852 !
1785 1853 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1786 1854 # Ran 1 tests, 0 skipped, 1 failed.
1787 1855 python hash seed: * (glob)
1788 1856 [1]
1789 1857
1790 1858 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1859 running 1 tests using 1 parallel processes
1791 1860
1792 1861 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1793 1862 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1794 1863 @@ -1,3 +1,3 @@
1795 1864 #testcases simple case-with-dashes casewith_-.chars
1796 1865 $ echo $TESTCASE
1797 1866 - simple
1798 1867 + casewith_-.chars
1799 1868
1800 1869 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1801 1870 !
1802 1871 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1803 1872 # Ran 1 tests, 0 skipped, 1 failed.
1804 1873 python hash seed: * (glob)
1805 1874 [1]
1806 1875
1807 1876 Test automatic pattern replacement
1808 1877 ==================================
1809 1878
1810 1879 $ cat << EOF >> common-pattern.py
1811 1880 > substitutions = [
1812 1881 > (br'foo-(.*)\\b',
1813 1882 > br'\$XXX=\\1\$'),
1814 1883 > (br'bar\\n',
1815 1884 > br'\$YYY$\\n'),
1816 1885 > ]
1817 1886 > EOF
1818 1887
1819 1888 $ cat << EOF >> test-substitution.t
1820 1889 > $ echo foo-12
1821 1890 > \$XXX=12$
1822 1891 > $ echo foo-42
1823 1892 > \$XXX=42$
1824 1893 > $ echo bar prior
1825 1894 > bar prior
1826 1895 > $ echo lastbar
1827 1896 > last\$YYY$
1828 1897 > $ echo foo-bar foo-baz
1829 1898 > EOF
1830 1899
1831 1900 $ rt test-substitution.t
1901 running 1 tests using 1 parallel processes
1832 1902
1833 1903 --- $TESTTMP/anothertests/cases/test-substitution.t
1834 1904 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1835 1905 @@ -7,3 +7,4 @@
1836 1906 $ echo lastbar
1837 1907 last$YYY$
1838 1908 $ echo foo-bar foo-baz
1839 1909 + $XXX=bar foo-baz$
1840 1910
1841 1911 ERROR: test-substitution.t output changed
1842 1912 !
1843 1913 Failed test-substitution.t: output changed
1844 1914 # Ran 1 tests, 0 skipped, 1 failed.
1845 1915 python hash seed: * (glob)
1846 1916 [1]
1847 1917
1848 1918 --extra-config-opt works
1849 1919
1850 1920 $ cat << EOF >> test-config-opt.t
1851 1921 > $ hg init test-config-opt
1852 1922 > $ hg -R test-config-opt purge
1853 1923 > EOF
1854 1924
1855 1925 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1926 running 1 tests using 1 parallel processes
1856 1927 .
1857 1928 # Ran 1 tests, 0 skipped, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now