##// END OF EJS Templates
run-tests: run tests with as many processes as cores by default...
Gregory Szorc -
r40281:e7e70c03 default
parent child Browse files
Show More
@@ -1,3239 +1,3240 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import argparse
49 49 import collections
50 50 import difflib
51 51 import distutils.version as version
52 52 import errno
53 53 import json
54 import multiprocessing
54 55 import os
55 56 import random
56 57 import re
57 58 import shutil
58 59 import signal
59 60 import socket
60 61 import subprocess
61 62 import sys
62 63 import sysconfig
63 64 import tempfile
64 65 import threading
65 66 import time
66 67 import unittest
67 68 import uuid
68 69 import xml.dom.minidom as minidom
69 70
70 71 try:
71 72 import Queue as queue
72 73 except ImportError:
73 74 import queue
74 75
75 76 try:
76 77 import shlex
77 78 shellquote = shlex.quote
78 79 except (ImportError, AttributeError):
79 80 import pipes
80 81 shellquote = pipes.quote
81 82
82 83 if os.environ.get('RTUNICODEPEDANTRY', False):
83 84 try:
84 85 reload(sys)
85 86 sys.setdefaultencoding("undefined")
86 87 except NameError:
87 88 pass
88 89
89 90 processlock = threading.Lock()
90 91
91 92 pygmentspresent = False
92 93 # ANSI color is unsupported prior to Windows 10
93 94 if os.name != 'nt':
94 95 try: # is pygments installed
95 96 import pygments
96 97 import pygments.lexers as lexers
97 98 import pygments.lexer as lexer
98 99 import pygments.formatters as formatters
99 100 import pygments.token as token
100 101 import pygments.style as style
101 102 pygmentspresent = True
102 103 difflexer = lexers.DiffLexer()
103 104 terminal256formatter = formatters.Terminal256Formatter()
104 105 except ImportError:
105 106 pass
106 107
107 108 if pygmentspresent:
108 109 class TestRunnerStyle(style.Style):
109 110 default_style = ""
110 111 skipped = token.string_to_tokentype("Token.Generic.Skipped")
111 112 failed = token.string_to_tokentype("Token.Generic.Failed")
112 113 skippedname = token.string_to_tokentype("Token.Generic.SName")
113 114 failedname = token.string_to_tokentype("Token.Generic.FName")
114 115 styles = {
115 116 skipped: '#e5e5e5',
116 117 skippedname: '#00ffff',
117 118 failed: '#7f0000',
118 119 failedname: '#ff0000',
119 120 }
120 121
121 122 class TestRunnerLexer(lexer.RegexLexer):
122 123 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
123 124 tokens = {
124 125 'root': [
125 126 (r'^Skipped', token.Generic.Skipped, 'skipped'),
126 127 (r'^Failed ', token.Generic.Failed, 'failed'),
127 128 (r'^ERROR: ', token.Generic.Failed, 'failed'),
128 129 ],
129 130 'skipped': [
130 131 (testpattern, token.Generic.SName),
131 132 (r':.*', token.Generic.Skipped),
132 133 ],
133 134 'failed': [
134 135 (testpattern, token.Generic.FName),
135 136 (r'(:| ).*', token.Generic.Failed),
136 137 ]
137 138 }
138 139
139 140 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
140 141 runnerlexer = TestRunnerLexer()
141 142
142 143 origenviron = os.environ.copy()
143 144
144 145 if sys.version_info > (3, 5, 0):
145 146 PYTHON3 = True
146 147 xrange = range # we use xrange in one place, and we'd rather not use range
147 148 def _bytespath(p):
148 149 if p is None:
149 150 return p
150 151 return p.encode('utf-8')
151 152
152 153 def _strpath(p):
153 154 if p is None:
154 155 return p
155 156 return p.decode('utf-8')
156 157
157 158 osenvironb = getattr(os, 'environb', None)
158 159 if osenvironb is None:
159 160 # Windows lacks os.environb, for instance. A proxy over the real thing
160 161 # instead of a copy allows the environment to be updated via bytes on
161 162 # all platforms.
162 163 class environbytes(object):
163 164 def __init__(self, strenv):
164 165 self.__len__ = strenv.__len__
165 166 self.clear = strenv.clear
166 167 self._strenv = strenv
167 168 def __getitem__(self, k):
168 169 v = self._strenv.__getitem__(_strpath(k))
169 170 return _bytespath(v)
170 171 def __setitem__(self, k, v):
171 172 self._strenv.__setitem__(_strpath(k), _strpath(v))
172 173 def __delitem__(self, k):
173 174 self._strenv.__delitem__(_strpath(k))
174 175 def __contains__(self, k):
175 176 return self._strenv.__contains__(_strpath(k))
176 177 def __iter__(self):
177 178 return iter([_bytespath(k) for k in iter(self._strenv)])
178 179 def get(self, k, default=None):
179 180 v = self._strenv.get(_strpath(k), _strpath(default))
180 181 return _bytespath(v)
181 182 def pop(self, k, default=None):
182 183 v = self._strenv.pop(_strpath(k), _strpath(default))
183 184 return _bytespath(v)
184 185
185 186 osenvironb = environbytes(os.environ)
186 187
187 188 getcwdb = getattr(os, 'getcwdb')
188 189 if not getcwdb or os.name == 'nt':
189 190 getcwdb = lambda: _bytespath(os.getcwd())
190 191
191 192 elif sys.version_info >= (3, 0, 0):
192 193 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
193 194 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
194 195 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
195 196 else:
196 197 PYTHON3 = False
197 198
198 199 # In python 2.x, path operations are generally done using
199 200 # bytestrings by default, so we don't have to do any extra
200 201 # fiddling there. We define the wrapper functions anyway just to
201 202 # help keep code consistent between platforms.
202 203 def _bytespath(p):
203 204 return p
204 205
205 206 _strpath = _bytespath
206 207 osenvironb = os.environ
207 208 getcwdb = os.getcwd
208 209
209 210 # For Windows support
210 211 wifexited = getattr(os, "WIFEXITED", lambda x: False)
211 212
212 213 # Whether to use IPv6
213 214 def checksocketfamily(name, port=20058):
214 215 """return true if we can listen on localhost using family=name
215 216
216 217 name should be either 'AF_INET', or 'AF_INET6'.
217 218 port being used is okay - EADDRINUSE is considered as successful.
218 219 """
219 220 family = getattr(socket, name, None)
220 221 if family is None:
221 222 return False
222 223 try:
223 224 s = socket.socket(family, socket.SOCK_STREAM)
224 225 s.bind(('localhost', port))
225 226 s.close()
226 227 return True
227 228 except socket.error as exc:
228 229 if exc.errno == errno.EADDRINUSE:
229 230 return True
230 231 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
231 232 return False
232 233 else:
233 234 raise
234 235 else:
235 236 return False
236 237
237 238 # useipv6 will be set by parseargs
238 239 useipv6 = None
239 240
240 241 def checkportisavailable(port):
241 242 """return true if a port seems free to bind on localhost"""
242 243 if useipv6:
243 244 family = socket.AF_INET6
244 245 else:
245 246 family = socket.AF_INET
246 247 try:
247 248 s = socket.socket(family, socket.SOCK_STREAM)
248 249 s.bind(('localhost', port))
249 250 s.close()
250 251 return True
251 252 except socket.error as exc:
252 253 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
253 254 errno.EPROTONOSUPPORT):
254 255 raise
255 256 return False
256 257
257 258 closefds = os.name == 'posix'
258 259 def Popen4(cmd, wd, timeout, env=None):
259 260 processlock.acquire()
260 261 p = subprocess.Popen(_strpath(cmd), shell=True, bufsize=-1,
261 262 cwd=_strpath(wd), env=env,
262 263 close_fds=closefds,
263 264 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
264 265 stderr=subprocess.STDOUT)
265 266 processlock.release()
266 267
267 268 p.fromchild = p.stdout
268 269 p.tochild = p.stdin
269 270 p.childerr = p.stderr
270 271
271 272 p.timeout = False
272 273 if timeout:
273 274 def t():
274 275 start = time.time()
275 276 while time.time() - start < timeout and p.returncode is None:
276 277 time.sleep(.1)
277 278 p.timeout = True
278 279 if p.returncode is None:
279 280 terminate(p)
280 281 threading.Thread(target=t).start()
281 282
282 283 return p
283 284
284 285 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
285 286 IMPL_PATH = b'PYTHONPATH'
286 287 if 'java' in sys.platform:
287 288 IMPL_PATH = b'JYTHONPATH'
288 289
289 290 defaults = {
290 'jobs': ('HGTEST_JOBS', 1),
291 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
291 292 'timeout': ('HGTEST_TIMEOUT', 180),
292 293 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
293 294 'port': ('HGTEST_PORT', 20059),
294 295 'shell': ('HGTEST_SHELL', 'sh'),
295 296 }
296 297
297 298 def canonpath(path):
298 299 return os.path.realpath(os.path.expanduser(path))
299 300
300 301 def parselistfiles(files, listtype, warn=True):
301 302 entries = dict()
302 303 for filename in files:
303 304 try:
304 305 path = os.path.expanduser(os.path.expandvars(filename))
305 306 f = open(path, "rb")
306 307 except IOError as err:
307 308 if err.errno != errno.ENOENT:
308 309 raise
309 310 if warn:
310 311 print("warning: no such %s file: %s" % (listtype, filename))
311 312 continue
312 313
313 314 for line in f.readlines():
314 315 line = line.split(b'#', 1)[0].strip()
315 316 if line:
316 317 entries[line] = filename
317 318
318 319 f.close()
319 320 return entries
320 321
321 322 def parsettestcases(path):
322 323 """read a .t test file, return a set of test case names
323 324
324 325 If path does not exist, return an empty set.
325 326 """
326 327 cases = []
327 328 try:
328 329 with open(path, 'rb') as f:
329 330 for l in f:
330 331 if l.startswith(b'#testcases '):
331 332 cases.append(sorted(l[11:].split()))
332 333 except IOError as ex:
333 334 if ex.errno != errno.ENOENT:
334 335 raise
335 336 return cases
336 337
337 338 def getparser():
338 339 """Obtain the OptionParser used by the CLI."""
339 340 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
340 341
341 342 selection = parser.add_argument_group('Test Selection')
342 343 selection.add_argument('--allow-slow-tests', action='store_true',
343 344 help='allow extremely slow tests')
344 345 selection.add_argument("--blacklist", action="append",
345 346 help="skip tests listed in the specified blacklist file")
346 347 selection.add_argument("--changed",
347 348 help="run tests that are changed in parent rev or working directory")
348 349 selection.add_argument("-k", "--keywords",
349 350 help="run tests matching keywords")
350 351 selection.add_argument("-r", "--retest", action="store_true",
351 352 help = "retest failed tests")
352 353 selection.add_argument("--test-list", action="append",
353 354 help="read tests to run from the specified file")
354 355 selection.add_argument("--whitelist", action="append",
355 356 help="always run tests listed in the specified whitelist file")
356 357 selection.add_argument('tests', metavar='TESTS', nargs='*',
357 358 help='Tests to run')
358 359
359 360 harness = parser.add_argument_group('Test Harness Behavior')
360 361 harness.add_argument('--bisect-repo',
361 362 metavar='bisect_repo',
362 363 help=("Path of a repo to bisect. Use together with "
363 364 "--known-good-rev"))
364 365 harness.add_argument("-d", "--debug", action="store_true",
365 366 help="debug mode: write output of test scripts to console"
366 367 " rather than capturing and diffing it (disables timeout)")
367 368 harness.add_argument("-f", "--first", action="store_true",
368 369 help="exit on the first test failure")
369 370 harness.add_argument("-i", "--interactive", action="store_true",
370 371 help="prompt to accept changed output")
371 372 harness.add_argument("-j", "--jobs", type=int,
372 373 help="number of jobs to run in parallel"
373 374 " (default: $%s or %d)" % defaults['jobs'])
374 375 harness.add_argument("--keep-tmpdir", action="store_true",
375 376 help="keep temporary directory after running tests")
376 377 harness.add_argument('--known-good-rev',
377 378 metavar="known_good_rev",
378 379 help=("Automatically bisect any failures using this "
379 380 "revision as a known-good revision."))
380 381 harness.add_argument("--list-tests", action="store_true",
381 382 help="list tests instead of running them")
382 383 harness.add_argument("--loop", action="store_true",
383 384 help="loop tests repeatedly")
384 385 harness.add_argument('--random', action="store_true",
385 386 help='run tests in random order')
386 387 harness.add_argument('--order-by-runtime', action="store_true",
387 388 help='run slowest tests first, according to .testtimes')
388 389 harness.add_argument("-p", "--port", type=int,
389 390 help="port on which servers should listen"
390 391 " (default: $%s or %d)" % defaults['port'])
391 392 harness.add_argument('--profile-runner', action='store_true',
392 393 help='run statprof on run-tests')
393 394 harness.add_argument("-R", "--restart", action="store_true",
394 395 help="restart at last error")
395 396 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
396 397 help="run each test N times (default=1)", default=1)
397 398 harness.add_argument("--shell",
398 399 help="shell to use (default: $%s or %s)" % defaults['shell'])
399 400 harness.add_argument('--showchannels', action='store_true',
400 401 help='show scheduling channels')
401 402 harness.add_argument("--slowtimeout", type=int,
402 403 help="kill errant slow tests after SLOWTIMEOUT seconds"
403 404 " (default: $%s or %d)" % defaults['slowtimeout'])
404 405 harness.add_argument("-t", "--timeout", type=int,
405 406 help="kill errant tests after TIMEOUT seconds"
406 407 " (default: $%s or %d)" % defaults['timeout'])
407 408 harness.add_argument("--tmpdir",
408 409 help="run tests in the given temporary directory"
409 410 " (implies --keep-tmpdir)")
410 411 harness.add_argument("-v", "--verbose", action="store_true",
411 412 help="output verbose messages")
412 413
413 414 hgconf = parser.add_argument_group('Mercurial Configuration')
414 415 hgconf.add_argument("--chg", action="store_true",
415 416 help="install and use chg wrapper in place of hg")
416 417 hgconf.add_argument("--compiler",
417 418 help="compiler to build with")
418 419 hgconf.add_argument('--extra-config-opt', action="append", default=[],
419 420 help='set the given config opt in the test hgrc')
420 421 hgconf.add_argument("-l", "--local", action="store_true",
421 422 help="shortcut for --with-hg=<testdir>/../hg, "
422 423 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
423 424 hgconf.add_argument("--ipv6", action="store_true",
424 425 help="prefer IPv6 to IPv4 for network related tests")
425 426 hgconf.add_argument("--pure", action="store_true",
426 427 help="use pure Python code instead of C extensions")
427 428 hgconf.add_argument("-3", "--py3k-warnings", action="store_true",
428 429 help="enable Py3k warnings on Python 2.7+")
429 430 hgconf.add_argument("--with-chg", metavar="CHG",
430 431 help="use specified chg wrapper in place of hg")
431 432 hgconf.add_argument("--with-hg",
432 433 metavar="HG",
433 434 help="test using specified hg script rather than a "
434 435 "temporary installation")
435 436
436 437 reporting = parser.add_argument_group('Results Reporting')
437 438 reporting.add_argument("-C", "--annotate", action="store_true",
438 439 help="output files annotated with coverage")
439 440 reporting.add_argument("--color", choices=["always", "auto", "never"],
440 441 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
441 442 help="colorisation: always|auto|never (default: auto)")
442 443 reporting.add_argument("-c", "--cover", action="store_true",
443 444 help="print a test coverage report")
444 445 reporting.add_argument('--exceptions', action='store_true',
445 446 help='log all exceptions and generate an exception report')
446 447 reporting.add_argument("-H", "--htmlcov", action="store_true",
447 448 help="create an HTML report of the coverage of the files")
448 449 reporting.add_argument("--json", action="store_true",
449 450 help="store test result data in 'report.json' file")
450 451 reporting.add_argument("--outputdir",
451 452 help="directory to write error logs to (default=test directory)")
452 453 reporting.add_argument("-n", "--nodiff", action="store_true",
453 454 help="skip showing test changes")
454 455 reporting.add_argument("-S", "--noskips", action="store_true",
455 456 help="don't report skip tests verbosely")
456 457 reporting.add_argument("--time", action="store_true",
457 458 help="time how long each test takes")
458 459 reporting.add_argument("--view",
459 460 help="external diff viewer")
460 461 reporting.add_argument("--xunit",
461 462 help="record xunit results at specified path")
462 463
463 464 for option, (envvar, default) in defaults.items():
464 465 defaults[option] = type(default)(os.environ.get(envvar, default))
465 466 parser.set_defaults(**defaults)
466 467
467 468 return parser
468 469
469 470 def parseargs(args, parser):
470 471 """Parse arguments with our OptionParser and validate results."""
471 472 options = parser.parse_args(args)
472 473
473 474 # jython is always pure
474 475 if 'java' in sys.platform or '__pypy__' in sys.modules:
475 476 options.pure = True
476 477
477 478 if options.with_hg:
478 479 options.with_hg = canonpath(_bytespath(options.with_hg))
479 480 if not (os.path.isfile(options.with_hg) and
480 481 os.access(options.with_hg, os.X_OK)):
481 482 parser.error('--with-hg must specify an executable hg script')
482 483 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
483 484 sys.stderr.write('warning: --with-hg should specify an hg script\n')
484 485 if options.local:
485 486 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
486 487 reporootdir = os.path.dirname(testdir)
487 488 pathandattrs = [(b'hg', 'with_hg')]
488 489 if options.chg:
489 490 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
490 491 for relpath, attr in pathandattrs:
491 492 binpath = os.path.join(reporootdir, relpath)
492 493 if os.name != 'nt' and not os.access(binpath, os.X_OK):
493 494 parser.error('--local specified, but %r not found or '
494 495 'not executable' % binpath)
495 496 setattr(options, attr, binpath)
496 497
497 498 if (options.chg or options.with_chg) and os.name == 'nt':
498 499 parser.error('chg does not work on %s' % os.name)
499 500 if options.with_chg:
500 501 options.chg = False # no installation to temporary location
501 502 options.with_chg = canonpath(_bytespath(options.with_chg))
502 503 if not (os.path.isfile(options.with_chg) and
503 504 os.access(options.with_chg, os.X_OK)):
504 505 parser.error('--with-chg must specify a chg executable')
505 506 if options.chg and options.with_hg:
506 507 # chg shares installation location with hg
507 508 parser.error('--chg does not work when --with-hg is specified '
508 509 '(use --with-chg instead)')
509 510
510 511 if options.color == 'always' and not pygmentspresent:
511 512 sys.stderr.write('warning: --color=always ignored because '
512 513 'pygments is not installed\n')
513 514
514 515 if options.bisect_repo and not options.known_good_rev:
515 516 parser.error("--bisect-repo cannot be used without --known-good-rev")
516 517
517 518 global useipv6
518 519 if options.ipv6:
519 520 useipv6 = checksocketfamily('AF_INET6')
520 521 else:
521 522 # only use IPv6 if IPv4 is unavailable and IPv6 is available
522 523 useipv6 = ((not checksocketfamily('AF_INET'))
523 524 and checksocketfamily('AF_INET6'))
524 525
525 526 options.anycoverage = options.cover or options.annotate or options.htmlcov
526 527 if options.anycoverage:
527 528 try:
528 529 import coverage
529 530 covver = version.StrictVersion(coverage.__version__).version
530 531 if covver < (3, 3):
531 532 parser.error('coverage options require coverage 3.3 or later')
532 533 except ImportError:
533 534 parser.error('coverage options now require the coverage package')
534 535
535 536 if options.anycoverage and options.local:
536 537 # this needs some path mangling somewhere, I guess
537 538 parser.error("sorry, coverage options do not work when --local "
538 539 "is specified")
539 540
540 541 if options.anycoverage and options.with_hg:
541 542 parser.error("sorry, coverage options do not work when --with-hg "
542 543 "is specified")
543 544
544 545 global verbose
545 546 if options.verbose:
546 547 verbose = ''
547 548
548 549 if options.tmpdir:
549 550 options.tmpdir = canonpath(options.tmpdir)
550 551
551 552 if options.jobs < 1:
552 553 parser.error('--jobs must be positive')
553 554 if options.interactive and options.debug:
554 555 parser.error("-i/--interactive and -d/--debug are incompatible")
555 556 if options.debug:
556 557 if options.timeout != defaults['timeout']:
557 558 sys.stderr.write(
558 559 'warning: --timeout option ignored with --debug\n')
559 560 if options.slowtimeout != defaults['slowtimeout']:
560 561 sys.stderr.write(
561 562 'warning: --slowtimeout option ignored with --debug\n')
562 563 options.timeout = 0
563 564 options.slowtimeout = 0
564 565 if options.py3k_warnings:
565 566 if PYTHON3:
566 567 parser.error(
567 568 '--py3k-warnings can only be used on Python 2.7')
568 569
569 570 if options.blacklist:
570 571 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
571 572 if options.whitelist:
572 573 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
573 574 else:
574 575 options.whitelisted = {}
575 576
576 577 if options.showchannels:
577 578 options.nodiff = True
578 579
579 580 return options
580 581
581 582 def rename(src, dst):
582 583 """Like os.rename(), trade atomicity and opened files friendliness
583 584 for existing destination support.
584 585 """
585 586 shutil.copy(src, dst)
586 587 os.remove(src)
587 588
588 589 _unified_diff = difflib.unified_diff
589 590 if PYTHON3:
590 591 import functools
591 592 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
592 593
593 594 def getdiff(expected, output, ref, err):
594 595 servefail = False
595 596 lines = []
596 597 for line in _unified_diff(expected, output, ref, err):
597 598 if line.startswith(b'+++') or line.startswith(b'---'):
598 599 line = line.replace(b'\\', b'/')
599 600 if line.endswith(b' \n'):
600 601 line = line[:-2] + b'\n'
601 602 lines.append(line)
602 603 if not servefail and line.startswith(
603 604 b'+ abort: child process failed to start'):
604 605 servefail = True
605 606
606 607 return servefail, lines
607 608
608 609 verbose = False
609 610 def vlog(*msg):
610 611 """Log only when in verbose mode."""
611 612 if verbose is False:
612 613 return
613 614
614 615 return log(*msg)
615 616
616 617 # Bytes that break XML even in a CDATA block: control characters 0-31
617 618 # sans \t, \n and \r
618 619 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
619 620
620 621 # Match feature conditionalized output lines in the form, capturing the feature
621 622 # list in group 2, and the preceeding line output in group 1:
622 623 #
623 624 # output..output (feature !)\n
624 625 optline = re.compile(b'(.*) \((.+?) !\)\n$')
625 626
626 627 def cdatasafe(data):
627 628 """Make a string safe to include in a CDATA block.
628 629
629 630 Certain control characters are illegal in a CDATA block, and
630 631 there's no way to include a ]]> in a CDATA either. This function
631 632 replaces illegal bytes with ? and adds a space between the ]] so
632 633 that it won't break the CDATA block.
633 634 """
634 635 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
635 636
636 637 def log(*msg):
637 638 """Log something to stdout.
638 639
639 640 Arguments are strings to print.
640 641 """
641 642 with iolock:
642 643 if verbose:
643 644 print(verbose, end=' ')
644 645 for m in msg:
645 646 print(m, end=' ')
646 647 print()
647 648 sys.stdout.flush()
648 649
649 650 def highlightdiff(line, color):
650 651 if not color:
651 652 return line
652 653 assert pygmentspresent
653 654 return pygments.highlight(line.decode('latin1'), difflexer,
654 655 terminal256formatter).encode('latin1')
655 656
656 657 def highlightmsg(msg, color):
657 658 if not color:
658 659 return msg
659 660 assert pygmentspresent
660 661 return pygments.highlight(msg, runnerlexer, runnerformatter)
661 662
662 663 def terminate(proc):
663 664 """Terminate subprocess"""
664 665 vlog('# Terminating process %d' % proc.pid)
665 666 try:
666 667 proc.terminate()
667 668 except OSError:
668 669 pass
669 670
670 671 def killdaemons(pidfile):
671 672 import killdaemons as killmod
672 673 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
673 674 logfn=vlog)
674 675
675 676 class Test(unittest.TestCase):
676 677 """Encapsulates a single, runnable test.
677 678
678 679 While this class conforms to the unittest.TestCase API, it differs in that
679 680 instances need to be instantiated manually. (Typically, unittest.TestCase
680 681 classes are instantiated automatically by scanning modules.)
681 682 """
682 683
683 684 # Status code reserved for skipped tests (used by hghave).
684 685 SKIPPED_STATUS = 80
685 686
686 687 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
687 688 debug=False,
688 689 first=False,
689 690 timeout=None,
690 691 startport=None, extraconfigopts=None,
691 692 py3kwarnings=False, shell=None, hgcommand=None,
692 693 slowtimeout=None, usechg=False,
693 694 useipv6=False):
694 695 """Create a test from parameters.
695 696
696 697 path is the full path to the file defining the test.
697 698
698 699 tmpdir is the main temporary directory to use for this test.
699 700
700 701 keeptmpdir determines whether to keep the test's temporary directory
701 702 after execution. It defaults to removal (False).
702 703
703 704 debug mode will make the test execute verbosely, with unfiltered
704 705 output.
705 706
706 707 timeout controls the maximum run time of the test. It is ignored when
707 708 debug is True. See slowtimeout for tests with #require slow.
708 709
709 710 slowtimeout overrides timeout if the test has #require slow.
710 711
711 712 startport controls the starting port number to use for this test. Each
712 713 test will reserve 3 port numbers for execution. It is the caller's
713 714 responsibility to allocate a non-overlapping port range to Test
714 715 instances.
715 716
716 717 extraconfigopts is an iterable of extra hgrc config options. Values
717 718 must have the form "key=value" (something understood by hgrc). Values
718 719 of the form "foo.key=value" will result in "[foo] key=value".
719 720
720 721 py3kwarnings enables Py3k warnings.
721 722
722 723 shell is the shell to execute tests in.
723 724 """
724 725 if timeout is None:
725 726 timeout = defaults['timeout']
726 727 if startport is None:
727 728 startport = defaults['port']
728 729 if slowtimeout is None:
729 730 slowtimeout = defaults['slowtimeout']
730 731 self.path = path
731 732 self.bname = os.path.basename(path)
732 733 self.name = _strpath(self.bname)
733 734 self._testdir = os.path.dirname(path)
734 735 self._outputdir = outputdir
735 736 self._tmpname = os.path.basename(path)
736 737 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
737 738
738 739 self._threadtmp = tmpdir
739 740 self._keeptmpdir = keeptmpdir
740 741 self._debug = debug
741 742 self._first = first
742 743 self._timeout = timeout
743 744 self._slowtimeout = slowtimeout
744 745 self._startport = startport
745 746 self._extraconfigopts = extraconfigopts or []
746 747 self._py3kwarnings = py3kwarnings
747 748 self._shell = _bytespath(shell)
748 749 self._hgcommand = hgcommand or b'hg'
749 750 self._usechg = usechg
750 751 self._useipv6 = useipv6
751 752
752 753 self._aborted = False
753 754 self._daemonpids = []
754 755 self._finished = None
755 756 self._ret = None
756 757 self._out = None
757 758 self._skipped = None
758 759 self._testtmp = None
759 760 self._chgsockdir = None
760 761
761 762 self._refout = self.readrefout()
762 763
763 764 def readrefout(self):
764 765 """read reference output"""
765 766 # If we're not in --debug mode and reference output file exists,
766 767 # check test output against it.
767 768 if self._debug:
768 769 return None # to match "out is None"
769 770 elif os.path.exists(self.refpath):
770 771 with open(self.refpath, 'rb') as f:
771 772 return f.read().splitlines(True)
772 773 else:
773 774 return []
774 775
775 776 # needed to get base class __repr__ running
776 777 @property
777 778 def _testMethodName(self):
778 779 return self.name
779 780
780 781 def __str__(self):
781 782 return self.name
782 783
783 784 def shortDescription(self):
784 785 return self.name
785 786
786 787 def setUp(self):
787 788 """Tasks to perform before run()."""
788 789 self._finished = False
789 790 self._ret = None
790 791 self._out = None
791 792 self._skipped = None
792 793
793 794 try:
794 795 os.mkdir(self._threadtmp)
795 796 except OSError as e:
796 797 if e.errno != errno.EEXIST:
797 798 raise
798 799
799 800 name = self._tmpname
800 801 self._testtmp = os.path.join(self._threadtmp, name)
801 802 os.mkdir(self._testtmp)
802 803
803 804 # Remove any previous output files.
804 805 if os.path.exists(self.errpath):
805 806 try:
806 807 os.remove(self.errpath)
807 808 except OSError as e:
808 809 # We might have raced another test to clean up a .err
809 810 # file, so ignore ENOENT when removing a previous .err
810 811 # file.
811 812 if e.errno != errno.ENOENT:
812 813 raise
813 814
814 815 if self._usechg:
815 816 self._chgsockdir = os.path.join(self._threadtmp,
816 817 b'%s.chgsock' % name)
817 818 os.mkdir(self._chgsockdir)
818 819
819 820 def run(self, result):
820 821 """Run this test and report results against a TestResult instance."""
821 822 # This function is extremely similar to unittest.TestCase.run(). Once
822 823 # we require Python 2.7 (or at least its version of unittest), this
823 824 # function can largely go away.
824 825 self._result = result
825 826 result.startTest(self)
826 827 try:
827 828 try:
828 829 self.setUp()
829 830 except (KeyboardInterrupt, SystemExit):
830 831 self._aborted = True
831 832 raise
832 833 except Exception:
833 834 result.addError(self, sys.exc_info())
834 835 return
835 836
836 837 success = False
837 838 try:
838 839 self.runTest()
839 840 except KeyboardInterrupt:
840 841 self._aborted = True
841 842 raise
842 843 except unittest.SkipTest as e:
843 844 result.addSkip(self, str(e))
844 845 # The base class will have already counted this as a
845 846 # test we "ran", but we want to exclude skipped tests
846 847 # from those we count towards those run.
847 848 result.testsRun -= 1
848 849 except self.failureException as e:
849 850 # This differs from unittest in that we don't capture
850 851 # the stack trace. This is for historical reasons and
851 852 # this decision could be revisited in the future,
852 853 # especially for PythonTest instances.
853 854 if result.addFailure(self, str(e)):
854 855 success = True
855 856 except Exception:
856 857 result.addError(self, sys.exc_info())
857 858 else:
858 859 success = True
859 860
860 861 try:
861 862 self.tearDown()
862 863 except (KeyboardInterrupt, SystemExit):
863 864 self._aborted = True
864 865 raise
865 866 except Exception:
866 867 result.addError(self, sys.exc_info())
867 868 success = False
868 869
869 870 if success:
870 871 result.addSuccess(self)
871 872 finally:
872 873 result.stopTest(self, interrupted=self._aborted)
873 874
874 875 def runTest(self):
875 876 """Run this test instance.
876 877
877 878 This will return a tuple describing the result of the test.
878 879 """
879 880 env = self._getenv()
880 881 self._genrestoreenv(env)
881 882 self._daemonpids.append(env['DAEMON_PIDS'])
882 883 self._createhgrc(env['HGRCPATH'])
883 884
884 885 vlog('# Test', self.name)
885 886
886 887 ret, out = self._run(env)
887 888 self._finished = True
888 889 self._ret = ret
889 890 self._out = out
890 891
891 892 def describe(ret):
892 893 if ret < 0:
893 894 return 'killed by signal: %d' % -ret
894 895 return 'returned error code %d' % ret
895 896
896 897 self._skipped = False
897 898
898 899 if ret == self.SKIPPED_STATUS:
899 900 if out is None: # Debug mode, nothing to parse.
900 901 missing = ['unknown']
901 902 failed = None
902 903 else:
903 904 missing, failed = TTest.parsehghaveoutput(out)
904 905
905 906 if not missing:
906 907 missing = ['skipped']
907 908
908 909 if failed:
909 910 self.fail('hg have failed checking for %s' % failed[-1])
910 911 else:
911 912 self._skipped = True
912 913 raise unittest.SkipTest(missing[-1])
913 914 elif ret == 'timeout':
914 915 self.fail('timed out')
915 916 elif ret is False:
916 917 self.fail('no result code from test')
917 918 elif out != self._refout:
918 919 # Diff generation may rely on written .err file.
919 920 if (ret != 0 or out != self._refout) and not self._skipped \
920 921 and not self._debug:
921 922 with open(self.errpath, 'wb') as f:
922 923 for line in out:
923 924 f.write(line)
924 925
925 926 # The result object handles diff calculation for us.
926 927 with firstlock:
927 928 if self._result.addOutputMismatch(self, ret, out, self._refout):
928 929 # change was accepted, skip failing
929 930 return
930 931 if self._first:
931 932 global firsterror
932 933 firsterror = True
933 934
934 935 if ret:
935 936 msg = 'output changed and ' + describe(ret)
936 937 else:
937 938 msg = 'output changed'
938 939
939 940 self.fail(msg)
940 941 elif ret:
941 942 self.fail(describe(ret))
942 943
943 944 def tearDown(self):
944 945 """Tasks to perform after run()."""
945 946 for entry in self._daemonpids:
946 947 killdaemons(entry)
947 948 self._daemonpids = []
948 949
949 950 if self._keeptmpdir:
950 951 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
951 952 (self._testtmp.decode('utf-8'),
952 953 self._threadtmp.decode('utf-8')))
953 954 else:
954 955 shutil.rmtree(self._testtmp, True)
955 956 shutil.rmtree(self._threadtmp, True)
956 957
957 958 if self._usechg:
958 959 # chgservers will stop automatically after they find the socket
959 960 # files are deleted
960 961 shutil.rmtree(self._chgsockdir, True)
961 962
962 963 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
963 964 and not self._debug and self._out:
964 965 with open(self.errpath, 'wb') as f:
965 966 for line in self._out:
966 967 f.write(line)
967 968
968 969 vlog("# Ret was:", self._ret, '(%s)' % self.name)
969 970
970 971 def _run(self, env):
971 972 # This should be implemented in child classes to run tests.
972 973 raise unittest.SkipTest('unknown test type')
973 974
974 975 def abort(self):
975 976 """Terminate execution of this test."""
976 977 self._aborted = True
977 978
978 979 def _portmap(self, i):
979 980 offset = b'' if i == 0 else b'%d' % i
980 981 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
981 982
982 983 def _getreplacements(self):
983 984 """Obtain a mapping of text replacements to apply to test output.
984 985
985 986 Test output needs to be normalized so it can be compared to expected
986 987 output. This function defines how some of that normalization will
987 988 occur.
988 989 """
989 990 r = [
990 991 # This list should be parallel to defineport in _getenv
991 992 self._portmap(0),
992 993 self._portmap(1),
993 994 self._portmap(2),
994 995 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
995 996 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
996 997 ]
997 998 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
998 999
999 1000 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1000 1001
1001 1002 if os.path.exists(replacementfile):
1002 1003 data = {}
1003 1004 with open(replacementfile, mode='rb') as source:
1004 1005 # the intermediate 'compile' step help with debugging
1005 1006 code = compile(source.read(), replacementfile, 'exec')
1006 1007 exec(code, data)
1007 1008 for value in data.get('substitutions', ()):
1008 1009 if len(value) != 2:
1009 1010 msg = 'malformatted substitution in %s: %r'
1010 1011 msg %= (replacementfile, value)
1011 1012 raise ValueError(msg)
1012 1013 r.append(value)
1013 1014 return r
1014 1015
1015 1016 def _escapepath(self, p):
1016 1017 if os.name == 'nt':
1017 1018 return (
1018 1019 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1019 1020 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1020 1021 for c in [p[i:i + 1] for i in range(len(p))]))
1021 1022 )
1022 1023 else:
1023 1024 return re.escape(p)
1024 1025
1025 1026 def _localip(self):
1026 1027 if self._useipv6:
1027 1028 return b'::1'
1028 1029 else:
1029 1030 return b'127.0.0.1'
1030 1031
1031 1032 def _genrestoreenv(self, testenv):
1032 1033 """Generate a script that can be used by tests to restore the original
1033 1034 environment."""
1034 1035 # Put the restoreenv script inside self._threadtmp
1035 1036 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1036 1037 testenv['HGTEST_RESTOREENV'] = _strpath(scriptpath)
1037 1038
1038 1039 # Only restore environment variable names that the shell allows
1039 1040 # us to export.
1040 1041 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1041 1042
1042 1043 # Do not restore these variables; otherwise tests would fail.
1043 1044 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1044 1045
1045 1046 with open(scriptpath, 'w') as envf:
1046 1047 for name, value in origenviron.items():
1047 1048 if not name_regex.match(name):
1048 1049 # Skip environment variables with unusual names not
1049 1050 # allowed by most shells.
1050 1051 continue
1051 1052 if name in reqnames:
1052 1053 continue
1053 1054 envf.write('%s=%s\n' % (name, shellquote(value)))
1054 1055
1055 1056 for name in testenv:
1056 1057 if name in origenviron or name in reqnames:
1057 1058 continue
1058 1059 envf.write('unset %s\n' % (name,))
1059 1060
1060 1061 def _getenv(self):
1061 1062 """Obtain environment variables to use during test execution."""
1062 1063 def defineport(i):
1063 1064 offset = '' if i == 0 else '%s' % i
1064 1065 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1065 1066 env = os.environ.copy()
1066 1067 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1067 1068 env['HGEMITWARNINGS'] = '1'
1068 1069 env['TESTTMP'] = _strpath(self._testtmp)
1069 1070 env['TESTNAME'] = self.name
1070 1071 env['HOME'] = _strpath(self._testtmp)
1071 1072 # This number should match portneeded in _getport
1072 1073 for port in xrange(3):
1073 1074 # This list should be parallel to _portmap in _getreplacements
1074 1075 defineport(port)
1075 1076 env["HGRCPATH"] = _strpath(os.path.join(self._threadtmp, b'.hgrc'))
1076 1077 env["DAEMON_PIDS"] = _strpath(os.path.join(self._threadtmp,
1077 1078 b'daemon.pids'))
1078 1079 env["HGEDITOR"] = ('"' + sys.executable + '"'
1079 1080 + ' -c "import sys; sys.exit(0)"')
1080 1081 env["HGMERGE"] = "internal:merge"
1081 1082 env["HGUSER"] = "test"
1082 1083 env["HGENCODING"] = "ascii"
1083 1084 env["HGENCODINGMODE"] = "strict"
1084 1085 env["HGHOSTNAME"] = "test-hostname"
1085 1086 env['HGIPV6'] = str(int(self._useipv6))
1086 1087 if 'HGCATAPULTSERVERPIPE' not in env:
1087 1088 env['HGCATAPULTSERVERPIPE'] = os.devnull
1088 1089
1089 1090 extraextensions = []
1090 1091 for opt in self._extraconfigopts:
1091 1092 section, key = opt.encode('utf-8').split(b'.', 1)
1092 1093 if section != 'extensions':
1093 1094 continue
1094 1095 name = key.split(b'=', 1)[0]
1095 1096 extraextensions.append(name)
1096 1097
1097 1098 if extraextensions:
1098 1099 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1099 1100
1100 1101 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1101 1102 # IP addresses.
1102 1103 env['LOCALIP'] = _strpath(self._localip())
1103 1104
1104 1105 # Reset some environment variables to well-known values so that
1105 1106 # the tests produce repeatable output.
1106 1107 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1107 1108 env['TZ'] = 'GMT'
1108 1109 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1109 1110 env['COLUMNS'] = '80'
1110 1111 env['TERM'] = 'xterm'
1111 1112
1112 1113 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1113 1114 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1114 1115 'NO_PROXY CHGDEBUG').split():
1115 1116 if k in env:
1116 1117 del env[k]
1117 1118
1118 1119 # unset env related to hooks
1119 1120 for k in list(env):
1120 1121 if k.startswith('HG_'):
1121 1122 del env[k]
1122 1123
1123 1124 if self._usechg:
1124 1125 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1125 1126
1126 1127 return env
1127 1128
1128 1129 def _createhgrc(self, path):
1129 1130 """Create an hgrc file for this test."""
1130 1131 with open(path, 'wb') as hgrc:
1131 1132 hgrc.write(b'[ui]\n')
1132 1133 hgrc.write(b'slash = True\n')
1133 1134 hgrc.write(b'interactive = False\n')
1134 1135 hgrc.write(b'mergemarkers = detailed\n')
1135 1136 hgrc.write(b'promptecho = True\n')
1136 1137 hgrc.write(b'[defaults]\n')
1137 1138 hgrc.write(b'[devel]\n')
1138 1139 hgrc.write(b'all-warnings = true\n')
1139 1140 hgrc.write(b'default-date = 0 0\n')
1140 1141 hgrc.write(b'[largefiles]\n')
1141 1142 hgrc.write(b'usercache = %s\n' %
1142 1143 (os.path.join(self._testtmp, b'.cache/largefiles')))
1143 1144 hgrc.write(b'[lfs]\n')
1144 1145 hgrc.write(b'usercache = %s\n' %
1145 1146 (os.path.join(self._testtmp, b'.cache/lfs')))
1146 1147 hgrc.write(b'[web]\n')
1147 1148 hgrc.write(b'address = localhost\n')
1148 1149 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1149 1150 hgrc.write(b'server-header = testing stub value\n')
1150 1151
1151 1152 for opt in self._extraconfigopts:
1152 1153 section, key = opt.encode('utf-8').split(b'.', 1)
1153 1154 assert b'=' in key, ('extra config opt %s must '
1154 1155 'have an = for assignment' % opt)
1155 1156 hgrc.write(b'[%s]\n%s\n' % (section, key))
1156 1157
1157 1158 def fail(self, msg):
1158 1159 # unittest differentiates between errored and failed.
1159 1160 # Failed is denoted by AssertionError (by default at least).
1160 1161 raise AssertionError(msg)
1161 1162
1162 1163 def _runcommand(self, cmd, env, normalizenewlines=False):
1163 1164 """Run command in a sub-process, capturing the output (stdout and
1164 1165 stderr).
1165 1166
1166 1167 Return a tuple (exitcode, output). output is None in debug mode.
1167 1168 """
1168 1169 if self._debug:
1169 1170 proc = subprocess.Popen(_strpath(cmd), shell=True,
1170 1171 cwd=_strpath(self._testtmp),
1171 1172 env=env)
1172 1173 ret = proc.wait()
1173 1174 return (ret, None)
1174 1175
1175 1176 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1176 1177 def cleanup():
1177 1178 terminate(proc)
1178 1179 ret = proc.wait()
1179 1180 if ret == 0:
1180 1181 ret = signal.SIGTERM << 8
1181 1182 killdaemons(env['DAEMON_PIDS'])
1182 1183 return ret
1183 1184
1184 1185 output = b''
1185 1186 proc.tochild.close()
1186 1187
1187 1188 try:
1188 1189 output = proc.fromchild.read()
1189 1190 except KeyboardInterrupt:
1190 1191 vlog('# Handling keyboard interrupt')
1191 1192 cleanup()
1192 1193 raise
1193 1194
1194 1195 ret = proc.wait()
1195 1196 if wifexited(ret):
1196 1197 ret = os.WEXITSTATUS(ret)
1197 1198
1198 1199 if proc.timeout:
1199 1200 ret = 'timeout'
1200 1201
1201 1202 if ret:
1202 1203 killdaemons(env['DAEMON_PIDS'])
1203 1204
1204 1205 for s, r in self._getreplacements():
1205 1206 output = re.sub(s, r, output)
1206 1207
1207 1208 if normalizenewlines:
1208 1209 output = output.replace(b'\r\n', b'\n')
1209 1210
1210 1211 return ret, output.splitlines(True)
1211 1212
1212 1213 class PythonTest(Test):
1213 1214 """A Python-based test."""
1214 1215
1215 1216 @property
1216 1217 def refpath(self):
1217 1218 return os.path.join(self._testdir, b'%s.out' % self.bname)
1218 1219
1219 1220 def _run(self, env):
1220 1221 py3kswitch = self._py3kwarnings and b' -3' or b''
1221 1222 cmd = b'"%s"%s "%s"' % (PYTHON, py3kswitch, self.path)
1222 1223 vlog("# Running", cmd)
1223 1224 normalizenewlines = os.name == 'nt'
1224 1225 result = self._runcommand(cmd, env,
1225 1226 normalizenewlines=normalizenewlines)
1226 1227 if self._aborted:
1227 1228 raise KeyboardInterrupt()
1228 1229
1229 1230 return result
1230 1231
1231 1232 # Some glob patterns apply only in some circumstances, so the script
1232 1233 # might want to remove (glob) annotations that otherwise should be
1233 1234 # retained.
1234 1235 checkcodeglobpats = [
1235 1236 # On Windows it looks like \ doesn't require a (glob), but we know
1236 1237 # better.
1237 1238 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1238 1239 re.compile(br'^moving \S+/.*[^)]$'),
1239 1240 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1240 1241 # Not all platforms have 127.0.0.1 as loopback (though most do),
1241 1242 # so we always glob that too.
1242 1243 re.compile(br'.*\$LOCALIP.*$'),
1243 1244 ]
1244 1245
1245 1246 bchr = chr
1246 1247 if PYTHON3:
1247 1248 bchr = lambda x: bytes([x])
1248 1249
1249 1250 class TTest(Test):
1250 1251 """A "t test" is a test backed by a .t file."""
1251 1252
1252 1253 SKIPPED_PREFIX = b'skipped: '
1253 1254 FAILED_PREFIX = b'hghave check failed: '
1254 1255 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1255 1256
1256 1257 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1257 1258 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1258 1259 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1259 1260
1260 1261 def __init__(self, path, *args, **kwds):
1261 1262 # accept an extra "case" parameter
1262 1263 case = kwds.pop('case', [])
1263 1264 self._case = case
1264 1265 self._allcases = {x for y in parsettestcases(path) for x in y}
1265 1266 super(TTest, self).__init__(path, *args, **kwds)
1266 1267 if case:
1267 1268 casepath = b'#'.join(case)
1268 1269 self.name = '%s#%s' % (self.name, _strpath(casepath))
1269 1270 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1270 1271 self._tmpname += b'-%s' % casepath
1271 1272 self._have = {}
1272 1273
1273 1274 @property
1274 1275 def refpath(self):
1275 1276 return os.path.join(self._testdir, self.bname)
1276 1277
1277 1278 def _run(self, env):
1278 1279 with open(self.path, 'rb') as f:
1279 1280 lines = f.readlines()
1280 1281
1281 1282 # .t file is both reference output and the test input, keep reference
1282 1283 # output updated with the the test input. This avoids some race
1283 1284 # conditions where the reference output does not match the actual test.
1284 1285 if self._refout is not None:
1285 1286 self._refout = lines
1286 1287
1287 1288 salt, script, after, expected = self._parsetest(lines)
1288 1289
1289 1290 # Write out the generated script.
1290 1291 fname = b'%s.sh' % self._testtmp
1291 1292 with open(fname, 'wb') as f:
1292 1293 for l in script:
1293 1294 f.write(l)
1294 1295
1295 1296 cmd = b'%s "%s"' % (self._shell, fname)
1296 1297 vlog("# Running", cmd)
1297 1298
1298 1299 exitcode, output = self._runcommand(cmd, env)
1299 1300
1300 1301 if self._aborted:
1301 1302 raise KeyboardInterrupt()
1302 1303
1303 1304 # Do not merge output if skipped. Return hghave message instead.
1304 1305 # Similarly, with --debug, output is None.
1305 1306 if exitcode == self.SKIPPED_STATUS or output is None:
1306 1307 return exitcode, output
1307 1308
1308 1309 return self._processoutput(exitcode, output, salt, after, expected)
1309 1310
1310 1311 def _hghave(self, reqs):
1311 1312 allreqs = b' '.join(reqs)
1312 1313 if allreqs in self._have:
1313 1314 return self._have.get(allreqs)
1314 1315
1315 1316 # TODO do something smarter when all other uses of hghave are gone.
1316 1317 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1317 1318 tdir = runtestdir.replace(b'\\', b'/')
1318 1319 proc = Popen4(b'%s -c "%s/hghave %s"' %
1319 1320 (self._shell, tdir, allreqs),
1320 1321 self._testtmp, 0, self._getenv())
1321 1322 stdout, stderr = proc.communicate()
1322 1323 ret = proc.wait()
1323 1324 if wifexited(ret):
1324 1325 ret = os.WEXITSTATUS(ret)
1325 1326 if ret == 2:
1326 1327 print(stdout.decode('utf-8'))
1327 1328 sys.exit(1)
1328 1329
1329 1330 if ret != 0:
1330 1331 self._have[allreqs] = (False, stdout)
1331 1332 return False, stdout
1332 1333
1333 1334 if b'slow' in reqs:
1334 1335 self._timeout = self._slowtimeout
1335 1336
1336 1337 self._have[allreqs] = (True, None)
1337 1338 return True, None
1338 1339
1339 1340 def _iftest(self, args):
1340 1341 # implements "#if"
1341 1342 reqs = []
1342 1343 for arg in args:
1343 1344 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1344 1345 if arg[3:] in self._case:
1345 1346 return False
1346 1347 elif arg in self._allcases:
1347 1348 if arg not in self._case:
1348 1349 return False
1349 1350 else:
1350 1351 reqs.append(arg)
1351 1352 return self._hghave(reqs)[0]
1352 1353
1353 1354 def _parsetest(self, lines):
1354 1355 # We generate a shell script which outputs unique markers to line
1355 1356 # up script results with our source. These markers include input
1356 1357 # line number and the last return code.
1357 1358 salt = b"SALT%d" % time.time()
1358 1359 def addsalt(line, inpython):
1359 1360 if inpython:
1360 1361 script.append(b'%s %d 0\n' % (salt, line))
1361 1362 else:
1362 1363 script.append(b'echo %s %d $?\n' % (salt, line))
1363 1364 active = []
1364 1365 session = str(uuid.uuid4())
1365 1366 if PYTHON3:
1366 1367 session = session.encode('ascii')
1367 1368 def toggletrace(cmd):
1368 1369 if isinstance(cmd, str):
1369 1370 quoted = shellquote(cmd.strip())
1370 1371 else:
1371 1372 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1372 1373 quoted = quoted.replace(b'\\', b'\\\\')
1373 1374 if active:
1374 1375 script.append(
1375 1376 b'echo END %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
1376 1377 session, active[0]))
1377 1378 script.append(
1378 1379 b'echo START %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
1379 1380 session, quoted))
1380 1381 active[0:] = [quoted]
1381 1382
1382 1383 script = []
1383 1384
1384 1385 # After we run the shell script, we re-unify the script output
1385 1386 # with non-active parts of the source, with synchronization by our
1386 1387 # SALT line number markers. The after table contains the non-active
1387 1388 # components, ordered by line number.
1388 1389 after = {}
1389 1390
1390 1391 # Expected shell script output.
1391 1392 expected = {}
1392 1393
1393 1394 pos = prepos = -1
1394 1395
1395 1396 # True or False when in a true or false conditional section
1396 1397 skipping = None
1397 1398
1398 1399 # We keep track of whether or not we're in a Python block so we
1399 1400 # can generate the surrounding doctest magic.
1400 1401 inpython = False
1401 1402
1402 1403 if self._debug:
1403 1404 script.append(b'set -x\n')
1404 1405 if self._hgcommand != b'hg':
1405 1406 script.append(b'alias hg="%s"\n' % self._hgcommand)
1406 1407 if os.getenv('MSYSTEM'):
1407 1408 script.append(b'alias pwd="pwd -W"\n')
1408 1409
1409 1410 hgcatapult = os.getenv('HGCATAPULTSERVERPIPE')
1410 1411 if hgcatapult and hgcatapult != os.devnull:
1411 1412 # Kludge: use a while loop to keep the pipe from getting
1412 1413 # closed by our echo commands. The still-running file gets
1413 1414 # reaped at the end of the script, which causes the while
1414 1415 # loop to exit and closes the pipe. Sigh.
1415 1416 script.append(
1416 1417 b'rtendtracing() {\n'
1417 1418 b' echo END %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
1418 1419 b' rm -f "$TESTTMP/.still-running"\n'
1419 1420 b'}\n'
1420 1421 b'trap "rtendtracing" 0\n'
1421 1422 b'touch "$TESTTMP/.still-running"\n'
1422 1423 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1423 1424 b'> $HGCATAPULTSERVERPIPE &\n'
1424 1425 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1425 1426 b'echo START %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
1426 1427 % {
1427 1428 'name': self.name,
1428 1429 'session': session,
1429 1430 }
1430 1431 )
1431 1432
1432 1433 if self._case:
1433 1434 casestr = b'#'.join(self._case)
1434 1435 if isinstance(self._case, str):
1435 1436 quoted = shellquote(casestr)
1436 1437 else:
1437 1438 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1438 1439 script.append(b'TESTCASE=%s\n' % quoted)
1439 1440 script.append(b'export TESTCASE\n')
1440 1441
1441 1442 n = 0
1442 1443 for n, l in enumerate(lines):
1443 1444 if not l.endswith(b'\n'):
1444 1445 l += b'\n'
1445 1446 if l.startswith(b'#require'):
1446 1447 lsplit = l.split()
1447 1448 if len(lsplit) < 2 or lsplit[0] != b'#require':
1448 1449 after.setdefault(pos, []).append(' !!! invalid #require\n')
1449 1450 if not skipping:
1450 1451 haveresult, message = self._hghave(lsplit[1:])
1451 1452 if not haveresult:
1452 1453 script = [b'echo "%s"\nexit 80\n' % message]
1453 1454 break
1454 1455 after.setdefault(pos, []).append(l)
1455 1456 elif l.startswith(b'#if'):
1456 1457 lsplit = l.split()
1457 1458 if len(lsplit) < 2 or lsplit[0] != b'#if':
1458 1459 after.setdefault(pos, []).append(' !!! invalid #if\n')
1459 1460 if skipping is not None:
1460 1461 after.setdefault(pos, []).append(' !!! nested #if\n')
1461 1462 skipping = not self._iftest(lsplit[1:])
1462 1463 after.setdefault(pos, []).append(l)
1463 1464 elif l.startswith(b'#else'):
1464 1465 if skipping is None:
1465 1466 after.setdefault(pos, []).append(' !!! missing #if\n')
1466 1467 skipping = not skipping
1467 1468 after.setdefault(pos, []).append(l)
1468 1469 elif l.startswith(b'#endif'):
1469 1470 if skipping is None:
1470 1471 after.setdefault(pos, []).append(' !!! missing #if\n')
1471 1472 skipping = None
1472 1473 after.setdefault(pos, []).append(l)
1473 1474 elif skipping:
1474 1475 after.setdefault(pos, []).append(l)
1475 1476 elif l.startswith(b' >>> '): # python inlines
1476 1477 after.setdefault(pos, []).append(l)
1477 1478 prepos = pos
1478 1479 pos = n
1479 1480 if not inpython:
1480 1481 # We've just entered a Python block. Add the header.
1481 1482 inpython = True
1482 1483 addsalt(prepos, False) # Make sure we report the exit code.
1483 1484 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1484 1485 addsalt(n, True)
1485 1486 script.append(l[2:])
1486 1487 elif l.startswith(b' ... '): # python inlines
1487 1488 after.setdefault(prepos, []).append(l)
1488 1489 script.append(l[2:])
1489 1490 elif l.startswith(b' $ '): # commands
1490 1491 if inpython:
1491 1492 script.append(b'EOF\n')
1492 1493 inpython = False
1493 1494 after.setdefault(pos, []).append(l)
1494 1495 prepos = pos
1495 1496 pos = n
1496 1497 addsalt(n, False)
1497 1498 rawcmd = l[4:]
1498 1499 cmd = rawcmd.split()
1499 1500 toggletrace(rawcmd)
1500 1501 if len(cmd) == 2 and cmd[0] == b'cd':
1501 1502 l = b' $ cd %s || exit 1\n' % cmd[1]
1502 1503 script.append(rawcmd)
1503 1504 elif l.startswith(b' > '): # continuations
1504 1505 after.setdefault(prepos, []).append(l)
1505 1506 script.append(l[4:])
1506 1507 elif l.startswith(b' '): # results
1507 1508 # Queue up a list of expected results.
1508 1509 expected.setdefault(pos, []).append(l[2:])
1509 1510 else:
1510 1511 if inpython:
1511 1512 script.append(b'EOF\n')
1512 1513 inpython = False
1513 1514 # Non-command/result. Queue up for merged output.
1514 1515 after.setdefault(pos, []).append(l)
1515 1516
1516 1517 if inpython:
1517 1518 script.append(b'EOF\n')
1518 1519 if skipping is not None:
1519 1520 after.setdefault(pos, []).append(' !!! missing #endif\n')
1520 1521 addsalt(n + 1, False)
1521 1522 return salt, script, after, expected
1522 1523
1523 1524 def _processoutput(self, exitcode, output, salt, after, expected):
1524 1525 # Merge the script output back into a unified test.
1525 1526 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1526 1527 if exitcode != 0:
1527 1528 warnonly = 3
1528 1529
1529 1530 pos = -1
1530 1531 postout = []
1531 1532 for l in output:
1532 1533 lout, lcmd = l, None
1533 1534 if salt in l:
1534 1535 lout, lcmd = l.split(salt, 1)
1535 1536
1536 1537 while lout:
1537 1538 if not lout.endswith(b'\n'):
1538 1539 lout += b' (no-eol)\n'
1539 1540
1540 1541 # Find the expected output at the current position.
1541 1542 els = [None]
1542 1543 if expected.get(pos, None):
1543 1544 els = expected[pos]
1544 1545
1545 1546 optional = []
1546 1547 for i, el in enumerate(els):
1547 1548 r = False
1548 1549 if el:
1549 1550 r, exact = self.linematch(el, lout)
1550 1551 if isinstance(r, str):
1551 1552 if r == '-glob':
1552 1553 lout = ''.join(el.rsplit(' (glob)', 1))
1553 1554 r = '' # Warn only this line.
1554 1555 elif r == "retry":
1555 1556 postout.append(b' ' + el)
1556 1557 else:
1557 1558 log('\ninfo, unknown linematch result: %r\n' % r)
1558 1559 r = False
1559 1560 if r:
1560 1561 els.pop(i)
1561 1562 break
1562 1563 if el:
1563 1564 if el.endswith(b" (?)\n"):
1564 1565 optional.append(i)
1565 1566 else:
1566 1567 m = optline.match(el)
1567 1568 if m:
1568 1569 conditions = [
1569 1570 c for c in m.group(2).split(b' ')]
1570 1571
1571 1572 if not self._iftest(conditions):
1572 1573 optional.append(i)
1573 1574 if exact:
1574 1575 # Don't allow line to be matches against a later
1575 1576 # line in the output
1576 1577 els.pop(i)
1577 1578 break
1578 1579
1579 1580 if r:
1580 1581 if r == "retry":
1581 1582 continue
1582 1583 # clean up any optional leftovers
1583 1584 for i in optional:
1584 1585 postout.append(b' ' + els[i])
1585 1586 for i in reversed(optional):
1586 1587 del els[i]
1587 1588 postout.append(b' ' + el)
1588 1589 else:
1589 1590 if self.NEEDESCAPE(lout):
1590 1591 lout = TTest._stringescape(b'%s (esc)\n' %
1591 1592 lout.rstrip(b'\n'))
1592 1593 postout.append(b' ' + lout) # Let diff deal with it.
1593 1594 if r != '': # If line failed.
1594 1595 warnonly = 3 # for sure not
1595 1596 elif warnonly == 1: # Is "not yet" and line is warn only.
1596 1597 warnonly = 2 # Yes do warn.
1597 1598 break
1598 1599 else:
1599 1600 # clean up any optional leftovers
1600 1601 while expected.get(pos, None):
1601 1602 el = expected[pos].pop(0)
1602 1603 if el:
1603 1604 if not el.endswith(b" (?)\n"):
1604 1605 m = optline.match(el)
1605 1606 if m:
1606 1607 conditions = [c for c in m.group(2).split(b' ')]
1607 1608
1608 1609 if self._iftest(conditions):
1609 1610 # Don't append as optional line
1610 1611 continue
1611 1612 else:
1612 1613 continue
1613 1614 postout.append(b' ' + el)
1614 1615
1615 1616 if lcmd:
1616 1617 # Add on last return code.
1617 1618 ret = int(lcmd.split()[1])
1618 1619 if ret != 0:
1619 1620 postout.append(b' [%d]\n' % ret)
1620 1621 if pos in after:
1621 1622 # Merge in non-active test bits.
1622 1623 postout += after.pop(pos)
1623 1624 pos = int(lcmd.split()[0])
1624 1625
1625 1626 if pos in after:
1626 1627 postout += after.pop(pos)
1627 1628
1628 1629 if warnonly == 2:
1629 1630 exitcode = False # Set exitcode to warned.
1630 1631
1631 1632 return exitcode, postout
1632 1633
1633 1634 @staticmethod
1634 1635 def rematch(el, l):
1635 1636 try:
1636 1637 el = b'(?:' + el + b')'
1637 1638 # use \Z to ensure that the regex matches to the end of the string
1638 1639 if os.name == 'nt':
1639 1640 return re.match(el + br'\r?\n\Z', l)
1640 1641 return re.match(el + br'\n\Z', l)
1641 1642 except re.error:
1642 1643 # el is an invalid regex
1643 1644 return False
1644 1645
1645 1646 @staticmethod
1646 1647 def globmatch(el, l):
1647 1648 # The only supported special characters are * and ? plus / which also
1648 1649 # matches \ on windows. Escaping of these characters is supported.
1649 1650 if el + b'\n' == l:
1650 1651 if os.altsep:
1651 1652 # matching on "/" is not needed for this line
1652 1653 for pat in checkcodeglobpats:
1653 1654 if pat.match(el):
1654 1655 return True
1655 1656 return b'-glob'
1656 1657 return True
1657 1658 el = el.replace(b'$LOCALIP', b'*')
1658 1659 i, n = 0, len(el)
1659 1660 res = b''
1660 1661 while i < n:
1661 1662 c = el[i:i + 1]
1662 1663 i += 1
1663 1664 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1664 1665 res += el[i - 1:i + 1]
1665 1666 i += 1
1666 1667 elif c == b'*':
1667 1668 res += b'.*'
1668 1669 elif c == b'?':
1669 1670 res += b'.'
1670 1671 elif c == b'/' and os.altsep:
1671 1672 res += b'[/\\\\]'
1672 1673 else:
1673 1674 res += re.escape(c)
1674 1675 return TTest.rematch(res, l)
1675 1676
1676 1677 def linematch(self, el, l):
1677 1678 if el == l: # perfect match (fast)
1678 1679 return True, True
1679 1680 retry = False
1680 1681 if el.endswith(b" (?)\n"):
1681 1682 retry = "retry"
1682 1683 el = el[:-5] + b"\n"
1683 1684 else:
1684 1685 m = optline.match(el)
1685 1686 if m:
1686 1687 conditions = [c for c in m.group(2).split(b' ')]
1687 1688
1688 1689 el = m.group(1) + b"\n"
1689 1690 if not self._iftest(conditions):
1690 1691 retry = "retry" # Not required by listed features
1691 1692
1692 1693 if el.endswith(b" (esc)\n"):
1693 1694 if PYTHON3:
1694 1695 el = el[:-7].decode('unicode_escape') + '\n'
1695 1696 el = el.encode('utf-8')
1696 1697 else:
1697 1698 el = el[:-7].decode('string-escape') + '\n'
1698 1699 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1699 1700 return True, True
1700 1701 if el.endswith(b" (re)\n"):
1701 1702 return (TTest.rematch(el[:-6], l) or retry), False
1702 1703 if el.endswith(b" (glob)\n"):
1703 1704 # ignore '(glob)' added to l by 'replacements'
1704 1705 if l.endswith(b" (glob)\n"):
1705 1706 l = l[:-8] + b"\n"
1706 1707 return (TTest.globmatch(el[:-8], l) or retry), False
1707 1708 if os.altsep:
1708 1709 _l = l.replace(b'\\', b'/')
1709 1710 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1710 1711 return True, True
1711 1712 return retry, True
1712 1713
1713 1714 @staticmethod
1714 1715 def parsehghaveoutput(lines):
1715 1716 '''Parse hghave log lines.
1716 1717
1717 1718 Return tuple of lists (missing, failed):
1718 1719 * the missing/unknown features
1719 1720 * the features for which existence check failed'''
1720 1721 missing = []
1721 1722 failed = []
1722 1723 for line in lines:
1723 1724 if line.startswith(TTest.SKIPPED_PREFIX):
1724 1725 line = line.splitlines()[0]
1725 1726 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1726 1727 elif line.startswith(TTest.FAILED_PREFIX):
1727 1728 line = line.splitlines()[0]
1728 1729 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1729 1730
1730 1731 return missing, failed
1731 1732
1732 1733 @staticmethod
1733 1734 def _escapef(m):
1734 1735 return TTest.ESCAPEMAP[m.group(0)]
1735 1736
1736 1737 @staticmethod
1737 1738 def _stringescape(s):
1738 1739 return TTest.ESCAPESUB(TTest._escapef, s)
1739 1740
1740 1741 iolock = threading.RLock()
1741 1742 firstlock = threading.RLock()
1742 1743 firsterror = False
1743 1744
1744 1745 class TestResult(unittest._TextTestResult):
1745 1746 """Holds results when executing via unittest."""
1746 1747 # Don't worry too much about accessing the non-public _TextTestResult.
1747 1748 # It is relatively common in Python testing tools.
1748 1749 def __init__(self, options, *args, **kwargs):
1749 1750 super(TestResult, self).__init__(*args, **kwargs)
1750 1751
1751 1752 self._options = options
1752 1753
1753 1754 # unittest.TestResult didn't have skipped until 2.7. We need to
1754 1755 # polyfill it.
1755 1756 self.skipped = []
1756 1757
1757 1758 # We have a custom "ignored" result that isn't present in any Python
1758 1759 # unittest implementation. It is very similar to skipped. It may make
1759 1760 # sense to map it into skip some day.
1760 1761 self.ignored = []
1761 1762
1762 1763 self.times = []
1763 1764 self._firststarttime = None
1764 1765 # Data stored for the benefit of generating xunit reports.
1765 1766 self.successes = []
1766 1767 self.faildata = {}
1767 1768
1768 1769 if options.color == 'auto':
1769 1770 self.color = pygmentspresent and self.stream.isatty()
1770 1771 elif options.color == 'never':
1771 1772 self.color = False
1772 1773 else: # 'always', for testing purposes
1773 1774 self.color = pygmentspresent
1774 1775
1775 1776 def onStart(self, test):
1776 1777 """ Can be overriden by custom TestResult
1777 1778 """
1778 1779
1779 1780 def onEnd(self):
1780 1781 """ Can be overriden by custom TestResult
1781 1782 """
1782 1783
1783 1784 def addFailure(self, test, reason):
1784 1785 self.failures.append((test, reason))
1785 1786
1786 1787 if self._options.first:
1787 1788 self.stop()
1788 1789 else:
1789 1790 with iolock:
1790 1791 if reason == "timed out":
1791 1792 self.stream.write('t')
1792 1793 else:
1793 1794 if not self._options.nodiff:
1794 1795 self.stream.write('\n')
1795 1796 # Exclude the '\n' from highlighting to lex correctly
1796 1797 formatted = 'ERROR: %s output changed\n' % test
1797 1798 self.stream.write(highlightmsg(formatted, self.color))
1798 1799 self.stream.write('!')
1799 1800
1800 1801 self.stream.flush()
1801 1802
1802 1803 def addSuccess(self, test):
1803 1804 with iolock:
1804 1805 super(TestResult, self).addSuccess(test)
1805 1806 self.successes.append(test)
1806 1807
1807 1808 def addError(self, test, err):
1808 1809 super(TestResult, self).addError(test, err)
1809 1810 if self._options.first:
1810 1811 self.stop()
1811 1812
1812 1813 # Polyfill.
1813 1814 def addSkip(self, test, reason):
1814 1815 self.skipped.append((test, reason))
1815 1816 with iolock:
1816 1817 if self.showAll:
1817 1818 self.stream.writeln('skipped %s' % reason)
1818 1819 else:
1819 1820 self.stream.write('s')
1820 1821 self.stream.flush()
1821 1822
1822 1823 def addIgnore(self, test, reason):
1823 1824 self.ignored.append((test, reason))
1824 1825 with iolock:
1825 1826 if self.showAll:
1826 1827 self.stream.writeln('ignored %s' % reason)
1827 1828 else:
1828 1829 if reason not in ('not retesting', "doesn't match keyword"):
1829 1830 self.stream.write('i')
1830 1831 else:
1831 1832 self.testsRun += 1
1832 1833 self.stream.flush()
1833 1834
1834 1835 def addOutputMismatch(self, test, ret, got, expected):
1835 1836 """Record a mismatch in test output for a particular test."""
1836 1837 if self.shouldStop or firsterror:
1837 1838 # don't print, some other test case already failed and
1838 1839 # printed, we're just stale and probably failed due to our
1839 1840 # temp dir getting cleaned up.
1840 1841 return
1841 1842
1842 1843 accepted = False
1843 1844 lines = []
1844 1845
1845 1846 with iolock:
1846 1847 if self._options.nodiff:
1847 1848 pass
1848 1849 elif self._options.view:
1849 1850 v = self._options.view
1850 1851 os.system(r"%s %s %s" %
1851 1852 (v, _strpath(test.refpath), _strpath(test.errpath)))
1852 1853 else:
1853 1854 servefail, lines = getdiff(expected, got,
1854 1855 test.refpath, test.errpath)
1855 1856 self.stream.write('\n')
1856 1857 for line in lines:
1857 1858 line = highlightdiff(line, self.color)
1858 1859 if PYTHON3:
1859 1860 self.stream.flush()
1860 1861 self.stream.buffer.write(line)
1861 1862 self.stream.buffer.flush()
1862 1863 else:
1863 1864 self.stream.write(line)
1864 1865 self.stream.flush()
1865 1866
1866 1867 if servefail:
1867 1868 raise test.failureException(
1868 1869 'server failed to start (HGPORT=%s)' % test._startport)
1869 1870
1870 1871 # handle interactive prompt without releasing iolock
1871 1872 if self._options.interactive:
1872 1873 if test.readrefout() != expected:
1873 1874 self.stream.write(
1874 1875 'Reference output has changed (run again to prompt '
1875 1876 'changes)')
1876 1877 else:
1877 1878 self.stream.write('Accept this change? [n] ')
1878 1879 self.stream.flush()
1879 1880 answer = sys.stdin.readline().strip()
1880 1881 if answer.lower() in ('y', 'yes'):
1881 1882 if test.path.endswith(b'.t'):
1882 1883 rename(test.errpath, test.path)
1883 1884 else:
1884 1885 rename(test.errpath, '%s.out' % test.path)
1885 1886 accepted = True
1886 1887 if not accepted:
1887 1888 self.faildata[test.name] = b''.join(lines)
1888 1889
1889 1890 return accepted
1890 1891
1891 1892 def startTest(self, test):
1892 1893 super(TestResult, self).startTest(test)
1893 1894
1894 1895 # os.times module computes the user time and system time spent by
1895 1896 # child's processes along with real elapsed time taken by a process.
1896 1897 # This module has one limitation. It can only work for Linux user
1897 1898 # and not for Windows.
1898 1899 test.started = os.times()
1899 1900 if self._firststarttime is None: # thread racy but irrelevant
1900 1901 self._firststarttime = test.started[4]
1901 1902
1902 1903 def stopTest(self, test, interrupted=False):
1903 1904 super(TestResult, self).stopTest(test)
1904 1905
1905 1906 test.stopped = os.times()
1906 1907
1907 1908 starttime = test.started
1908 1909 endtime = test.stopped
1909 1910 origin = self._firststarttime
1910 1911 self.times.append((test.name,
1911 1912 endtime[2] - starttime[2], # user space CPU time
1912 1913 endtime[3] - starttime[3], # sys space CPU time
1913 1914 endtime[4] - starttime[4], # real time
1914 1915 starttime[4] - origin, # start date in run context
1915 1916 endtime[4] - origin, # end date in run context
1916 1917 ))
1917 1918
1918 1919 if interrupted:
1919 1920 with iolock:
1920 1921 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1921 1922 test.name, self.times[-1][3]))
1922 1923
1923 1924 def getTestResult():
1924 1925 """
1925 1926 Returns the relevant test result
1926 1927 """
1927 1928 if "CUSTOM_TEST_RESULT" in os.environ:
1928 1929 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
1929 1930 return testresultmodule.TestResult
1930 1931 else:
1931 1932 return TestResult
1932 1933
1933 1934 class TestSuite(unittest.TestSuite):
1934 1935 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1935 1936
1936 1937 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1937 1938 retest=False, keywords=None, loop=False, runs_per_test=1,
1938 1939 loadtest=None, showchannels=False,
1939 1940 *args, **kwargs):
1940 1941 """Create a new instance that can run tests with a configuration.
1941 1942
1942 1943 testdir specifies the directory where tests are executed from. This
1943 1944 is typically the ``tests`` directory from Mercurial's source
1944 1945 repository.
1945 1946
1946 1947 jobs specifies the number of jobs to run concurrently. Each test
1947 1948 executes on its own thread. Tests actually spawn new processes, so
1948 1949 state mutation should not be an issue.
1949 1950
1950 1951 If there is only one job, it will use the main thread.
1951 1952
1952 1953 whitelist and blacklist denote tests that have been whitelisted and
1953 1954 blacklisted, respectively. These arguments don't belong in TestSuite.
1954 1955 Instead, whitelist and blacklist should be handled by the thing that
1955 1956 populates the TestSuite with tests. They are present to preserve
1956 1957 backwards compatible behavior which reports skipped tests as part
1957 1958 of the results.
1958 1959
1959 1960 retest denotes whether to retest failed tests. This arguably belongs
1960 1961 outside of TestSuite.
1961 1962
1962 1963 keywords denotes key words that will be used to filter which tests
1963 1964 to execute. This arguably belongs outside of TestSuite.
1964 1965
1965 1966 loop denotes whether to loop over tests forever.
1966 1967 """
1967 1968 super(TestSuite, self).__init__(*args, **kwargs)
1968 1969
1969 1970 self._jobs = jobs
1970 1971 self._whitelist = whitelist
1971 1972 self._blacklist = blacklist
1972 1973 self._retest = retest
1973 1974 self._keywords = keywords
1974 1975 self._loop = loop
1975 1976 self._runs_per_test = runs_per_test
1976 1977 self._loadtest = loadtest
1977 1978 self._showchannels = showchannels
1978 1979
1979 1980 def run(self, result):
1980 1981 # We have a number of filters that need to be applied. We do this
1981 1982 # here instead of inside Test because it makes the running logic for
1982 1983 # Test simpler.
1983 1984 tests = []
1984 1985 num_tests = [0]
1985 1986 for test in self._tests:
1986 1987 def get():
1987 1988 num_tests[0] += 1
1988 1989 if getattr(test, 'should_reload', False):
1989 1990 return self._loadtest(test, num_tests[0])
1990 1991 return test
1991 1992 if not os.path.exists(test.path):
1992 1993 result.addSkip(test, "Doesn't exist")
1993 1994 continue
1994 1995
1995 1996 if not (self._whitelist and test.bname in self._whitelist):
1996 1997 if self._blacklist and test.bname in self._blacklist:
1997 1998 result.addSkip(test, 'blacklisted')
1998 1999 continue
1999 2000
2000 2001 if self._retest and not os.path.exists(test.errpath):
2001 2002 result.addIgnore(test, 'not retesting')
2002 2003 continue
2003 2004
2004 2005 if self._keywords:
2005 2006 with open(test.path, 'rb') as f:
2006 2007 t = f.read().lower() + test.bname.lower()
2007 2008 ignored = False
2008 2009 for k in self._keywords.lower().split():
2009 2010 if k not in t:
2010 2011 result.addIgnore(test, "doesn't match keyword")
2011 2012 ignored = True
2012 2013 break
2013 2014
2014 2015 if ignored:
2015 2016 continue
2016 2017 for _ in xrange(self._runs_per_test):
2017 2018 tests.append(get())
2018 2019
2019 2020 runtests = list(tests)
2020 2021 done = queue.Queue()
2021 2022 running = 0
2022 2023
2023 2024 channels = [""] * self._jobs
2024 2025
2025 2026 def job(test, result):
2026 2027 for n, v in enumerate(channels):
2027 2028 if not v:
2028 2029 channel = n
2029 2030 break
2030 2031 else:
2031 2032 raise ValueError('Could not find output channel')
2032 2033 channels[channel] = "=" + test.name[5:].split(".")[0]
2033 2034 try:
2034 2035 test(result)
2035 2036 done.put(None)
2036 2037 except KeyboardInterrupt:
2037 2038 pass
2038 2039 except: # re-raises
2039 2040 done.put(('!', test, 'run-test raised an error, see traceback'))
2040 2041 raise
2041 2042 finally:
2042 2043 try:
2043 2044 channels[channel] = ''
2044 2045 except IndexError:
2045 2046 pass
2046 2047
2047 2048 def stat():
2048 2049 count = 0
2049 2050 while channels:
2050 2051 d = '\n%03s ' % count
2051 2052 for n, v in enumerate(channels):
2052 2053 if v:
2053 2054 d += v[0]
2054 2055 channels[n] = v[1:] or '.'
2055 2056 else:
2056 2057 d += ' '
2057 2058 d += ' '
2058 2059 with iolock:
2059 2060 sys.stdout.write(d + ' ')
2060 2061 sys.stdout.flush()
2061 2062 for x in xrange(10):
2062 2063 if channels:
2063 2064 time.sleep(.1)
2064 2065 count += 1
2065 2066
2066 2067 stoppedearly = False
2067 2068
2068 2069 if self._showchannels:
2069 2070 statthread = threading.Thread(target=stat, name="stat")
2070 2071 statthread.start()
2071 2072
2072 2073 try:
2073 2074 while tests or running:
2074 2075 if not done.empty() or running == self._jobs or not tests:
2075 2076 try:
2076 2077 done.get(True, 1)
2077 2078 running -= 1
2078 2079 if result and result.shouldStop:
2079 2080 stoppedearly = True
2080 2081 break
2081 2082 except queue.Empty:
2082 2083 continue
2083 2084 if tests and not running == self._jobs:
2084 2085 test = tests.pop(0)
2085 2086 if self._loop:
2086 2087 if getattr(test, 'should_reload', False):
2087 2088 num_tests[0] += 1
2088 2089 tests.append(
2089 2090 self._loadtest(test, num_tests[0]))
2090 2091 else:
2091 2092 tests.append(test)
2092 2093 if self._jobs == 1:
2093 2094 job(test, result)
2094 2095 else:
2095 2096 t = threading.Thread(target=job, name=test.name,
2096 2097 args=(test, result))
2097 2098 t.start()
2098 2099 running += 1
2099 2100
2100 2101 # If we stop early we still need to wait on started tests to
2101 2102 # finish. Otherwise, there is a race between the test completing
2102 2103 # and the test's cleanup code running. This could result in the
2103 2104 # test reporting incorrect.
2104 2105 if stoppedearly:
2105 2106 while running:
2106 2107 try:
2107 2108 done.get(True, 1)
2108 2109 running -= 1
2109 2110 except queue.Empty:
2110 2111 continue
2111 2112 except KeyboardInterrupt:
2112 2113 for test in runtests:
2113 2114 test.abort()
2114 2115
2115 2116 channels = []
2116 2117
2117 2118 return result
2118 2119
2119 2120 # Save the most recent 5 wall-clock runtimes of each test to a
2120 2121 # human-readable text file named .testtimes. Tests are sorted
2121 2122 # alphabetically, while times for each test are listed from oldest to
2122 2123 # newest.
2123 2124
2124 2125 def loadtimes(outputdir):
2125 2126 times = []
2126 2127 try:
2127 2128 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2128 2129 for line in fp:
2129 2130 m = re.match('(.*?) ([0-9. ]+)', line)
2130 2131 times.append((m.group(1),
2131 2132 [float(t) for t in m.group(2).split()]))
2132 2133 except IOError as err:
2133 2134 if err.errno != errno.ENOENT:
2134 2135 raise
2135 2136 return times
2136 2137
2137 2138 def savetimes(outputdir, result):
2138 2139 saved = dict(loadtimes(outputdir))
2139 2140 maxruns = 5
2140 2141 skipped = set([str(t[0]) for t in result.skipped])
2141 2142 for tdata in result.times:
2142 2143 test, real = tdata[0], tdata[3]
2143 2144 if test not in skipped:
2144 2145 ts = saved.setdefault(test, [])
2145 2146 ts.append(real)
2146 2147 ts[:] = ts[-maxruns:]
2147 2148
2148 2149 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2149 2150 dir=outputdir, text=True)
2150 2151 with os.fdopen(fd, 'w') as fp:
2151 2152 for name, ts in sorted(saved.items()):
2152 2153 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2153 2154 timepath = os.path.join(outputdir, b'.testtimes')
2154 2155 try:
2155 2156 os.unlink(timepath)
2156 2157 except OSError:
2157 2158 pass
2158 2159 try:
2159 2160 os.rename(tmpname, timepath)
2160 2161 except OSError:
2161 2162 pass
2162 2163
2163 2164 class TextTestRunner(unittest.TextTestRunner):
2164 2165 """Custom unittest test runner that uses appropriate settings."""
2165 2166
2166 2167 def __init__(self, runner, *args, **kwargs):
2167 2168 super(TextTestRunner, self).__init__(*args, **kwargs)
2168 2169
2169 2170 self._runner = runner
2170 2171
2171 2172 self._result = getTestResult()(self._runner.options, self.stream,
2172 2173 self.descriptions, self.verbosity)
2173 2174
2174 2175 def listtests(self, test):
2175 2176 test = sorted(test, key=lambda t: t.name)
2176 2177
2177 2178 self._result.onStart(test)
2178 2179
2179 2180 for t in test:
2180 2181 print(t.name)
2181 2182 self._result.addSuccess(t)
2182 2183
2183 2184 if self._runner.options.xunit:
2184 2185 with open(self._runner.options.xunit, "wb") as xuf:
2185 2186 self._writexunit(self._result, xuf)
2186 2187
2187 2188 if self._runner.options.json:
2188 2189 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2189 2190 with open(jsonpath, 'w') as fp:
2190 2191 self._writejson(self._result, fp)
2191 2192
2192 2193 return self._result
2193 2194
2194 2195 def run(self, test):
2195 2196 self._result.onStart(test)
2196 2197 test(self._result)
2197 2198
2198 2199 failed = len(self._result.failures)
2199 2200 skipped = len(self._result.skipped)
2200 2201 ignored = len(self._result.ignored)
2201 2202
2202 2203 with iolock:
2203 2204 self.stream.writeln('')
2204 2205
2205 2206 if not self._runner.options.noskips:
2206 2207 for test, msg in self._result.skipped:
2207 2208 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2208 2209 msg = highlightmsg(formatted, self._result.color)
2209 2210 self.stream.write(msg)
2210 2211 for test, msg in self._result.failures:
2211 2212 formatted = 'Failed %s: %s\n' % (test.name, msg)
2212 2213 self.stream.write(highlightmsg(formatted, self._result.color))
2213 2214 for test, msg in self._result.errors:
2214 2215 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2215 2216
2216 2217 if self._runner.options.xunit:
2217 2218 with open(self._runner.options.xunit, "wb") as xuf:
2218 2219 self._writexunit(self._result, xuf)
2219 2220
2220 2221 if self._runner.options.json:
2221 2222 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2222 2223 with open(jsonpath, 'w') as fp:
2223 2224 self._writejson(self._result, fp)
2224 2225
2225 2226 self._runner._checkhglib('Tested')
2226 2227
2227 2228 savetimes(self._runner._outputdir, self._result)
2228 2229
2229 2230 if failed and self._runner.options.known_good_rev:
2230 2231 self._bisecttests(t for t, m in self._result.failures)
2231 2232 self.stream.writeln(
2232 2233 '# Ran %d tests, %d skipped, %d failed.'
2233 2234 % (self._result.testsRun, skipped + ignored, failed))
2234 2235 if failed:
2235 2236 self.stream.writeln('python hash seed: %s' %
2236 2237 os.environ['PYTHONHASHSEED'])
2237 2238 if self._runner.options.time:
2238 2239 self.printtimes(self._result.times)
2239 2240
2240 2241 if self._runner.options.exceptions:
2241 2242 exceptions = aggregateexceptions(
2242 2243 os.path.join(self._runner._outputdir, b'exceptions'))
2243 2244
2244 2245 self.stream.writeln('Exceptions Report:')
2245 2246 self.stream.writeln('%d total from %d frames' %
2246 2247 (exceptions['total'],
2247 2248 len(exceptions['exceptioncounts'])))
2248 2249 combined = exceptions['combined']
2249 2250 for key in sorted(combined, key=combined.get, reverse=True):
2250 2251 frame, line, exc = key
2251 2252 totalcount, testcount, leastcount, leasttest = combined[key]
2252 2253
2253 2254 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2254 2255 % (totalcount,
2255 2256 testcount,
2256 2257 frame, exc,
2257 2258 leasttest, leastcount))
2258 2259
2259 2260 self.stream.flush()
2260 2261
2261 2262 return self._result
2262 2263
2263 2264 def _bisecttests(self, tests):
2264 2265 bisectcmd = ['hg', 'bisect']
2265 2266 bisectrepo = self._runner.options.bisect_repo
2266 2267 if bisectrepo:
2267 2268 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2268 2269 def pread(args):
2269 2270 env = os.environ.copy()
2270 2271 env['HGPLAIN'] = '1'
2271 2272 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2272 2273 stdout=subprocess.PIPE, env=env)
2273 2274 data = p.stdout.read()
2274 2275 p.wait()
2275 2276 return data
2276 2277 for test in tests:
2277 2278 pread(bisectcmd + ['--reset']),
2278 2279 pread(bisectcmd + ['--bad', '.'])
2279 2280 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2280 2281 # TODO: we probably need to forward more options
2281 2282 # that alter hg's behavior inside the tests.
2282 2283 opts = ''
2283 2284 withhg = self._runner.options.with_hg
2284 2285 if withhg:
2285 2286 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2286 2287 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2287 2288 test)
2288 2289 data = pread(bisectcmd + ['--command', rtc])
2289 2290 m = re.search(
2290 2291 (br'\nThe first (?P<goodbad>bad|good) revision '
2291 2292 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2292 2293 br'summary: +(?P<summary>[^\n]+)\n'),
2293 2294 data, (re.MULTILINE | re.DOTALL))
2294 2295 if m is None:
2295 2296 self.stream.writeln(
2296 2297 'Failed to identify failure point for %s' % test)
2297 2298 continue
2298 2299 dat = m.groupdict()
2299 2300 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2300 2301 self.stream.writeln(
2301 2302 '%s %s by %s (%s)' % (
2302 2303 test, verb, dat['node'].decode('ascii'),
2303 2304 dat['summary'].decode('utf8', 'ignore')))
2304 2305
2305 2306 def printtimes(self, times):
2306 2307 # iolock held by run
2307 2308 self.stream.writeln('# Producing time report')
2308 2309 times.sort(key=lambda t: (t[3]))
2309 2310 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2310 2311 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2311 2312 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2312 2313 for tdata in times:
2313 2314 test = tdata[0]
2314 2315 cuser, csys, real, start, end = tdata[1:6]
2315 2316 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2316 2317
2317 2318 @staticmethod
2318 2319 def _writexunit(result, outf):
2319 2320 # See http://llg.cubic.org/docs/junit/ for a reference.
2320 2321 timesd = dict((t[0], t[3]) for t in result.times)
2321 2322 doc = minidom.Document()
2322 2323 s = doc.createElement('testsuite')
2323 2324 s.setAttribute('name', 'run-tests')
2324 2325 s.setAttribute('tests', str(result.testsRun))
2325 2326 s.setAttribute('errors', "0") # TODO
2326 2327 s.setAttribute('failures', str(len(result.failures)))
2327 2328 s.setAttribute('skipped', str(len(result.skipped) +
2328 2329 len(result.ignored)))
2329 2330 doc.appendChild(s)
2330 2331 for tc in result.successes:
2331 2332 t = doc.createElement('testcase')
2332 2333 t.setAttribute('name', tc.name)
2333 2334 tctime = timesd.get(tc.name)
2334 2335 if tctime is not None:
2335 2336 t.setAttribute('time', '%.3f' % tctime)
2336 2337 s.appendChild(t)
2337 2338 for tc, err in sorted(result.faildata.items()):
2338 2339 t = doc.createElement('testcase')
2339 2340 t.setAttribute('name', tc)
2340 2341 tctime = timesd.get(tc)
2341 2342 if tctime is not None:
2342 2343 t.setAttribute('time', '%.3f' % tctime)
2343 2344 # createCDATASection expects a unicode or it will
2344 2345 # convert using default conversion rules, which will
2345 2346 # fail if string isn't ASCII.
2346 2347 err = cdatasafe(err).decode('utf-8', 'replace')
2347 2348 cd = doc.createCDATASection(err)
2348 2349 # Use 'failure' here instead of 'error' to match errors = 0,
2349 2350 # failures = len(result.failures) in the testsuite element.
2350 2351 failelem = doc.createElement('failure')
2351 2352 failelem.setAttribute('message', 'output changed')
2352 2353 failelem.setAttribute('type', 'output-mismatch')
2353 2354 failelem.appendChild(cd)
2354 2355 t.appendChild(failelem)
2355 2356 s.appendChild(t)
2356 2357 for tc, message in result.skipped:
2357 2358 # According to the schema, 'skipped' has no attributes. So store
2358 2359 # the skip message as a text node instead.
2359 2360 t = doc.createElement('testcase')
2360 2361 t.setAttribute('name', tc.name)
2361 2362 binmessage = message.encode('utf-8')
2362 2363 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2363 2364 cd = doc.createCDATASection(message)
2364 2365 skipelem = doc.createElement('skipped')
2365 2366 skipelem.appendChild(cd)
2366 2367 t.appendChild(skipelem)
2367 2368 s.appendChild(t)
2368 2369 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2369 2370
2370 2371 @staticmethod
2371 2372 def _writejson(result, outf):
2372 2373 timesd = {}
2373 2374 for tdata in result.times:
2374 2375 test = tdata[0]
2375 2376 timesd[test] = tdata[1:]
2376 2377
2377 2378 outcome = {}
2378 2379 groups = [('success', ((tc, None)
2379 2380 for tc in result.successes)),
2380 2381 ('failure', result.failures),
2381 2382 ('skip', result.skipped)]
2382 2383 for res, testcases in groups:
2383 2384 for tc, __ in testcases:
2384 2385 if tc.name in timesd:
2385 2386 diff = result.faildata.get(tc.name, b'')
2386 2387 try:
2387 2388 diff = diff.decode('unicode_escape')
2388 2389 except UnicodeDecodeError as e:
2389 2390 diff = '%r decoding diff, sorry' % e
2390 2391 tres = {'result': res,
2391 2392 'time': ('%0.3f' % timesd[tc.name][2]),
2392 2393 'cuser': ('%0.3f' % timesd[tc.name][0]),
2393 2394 'csys': ('%0.3f' % timesd[tc.name][1]),
2394 2395 'start': ('%0.3f' % timesd[tc.name][3]),
2395 2396 'end': ('%0.3f' % timesd[tc.name][4]),
2396 2397 'diff': diff,
2397 2398 }
2398 2399 else:
2399 2400 # blacklisted test
2400 2401 tres = {'result': res}
2401 2402
2402 2403 outcome[tc.name] = tres
2403 2404 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2404 2405 separators=(',', ': '))
2405 2406 outf.writelines(("testreport =", jsonout))
2406 2407
2407 2408 def sorttests(testdescs, previoustimes, shuffle=False):
2408 2409 """Do an in-place sort of tests."""
2409 2410 if shuffle:
2410 2411 random.shuffle(testdescs)
2411 2412 return
2412 2413
2413 2414 if previoustimes:
2414 2415 def sortkey(f):
2415 2416 f = f['path']
2416 2417 if f in previoustimes:
2417 2418 # Use most recent time as estimate
2418 2419 return -previoustimes[f][-1]
2419 2420 else:
2420 2421 # Default to a rather arbitrary value of 1 second for new tests
2421 2422 return -1.0
2422 2423 else:
2423 2424 # keywords for slow tests
2424 2425 slow = {b'svn': 10,
2425 2426 b'cvs': 10,
2426 2427 b'hghave': 10,
2427 2428 b'largefiles-update': 10,
2428 2429 b'run-tests': 10,
2429 2430 b'corruption': 10,
2430 2431 b'race': 10,
2431 2432 b'i18n': 10,
2432 2433 b'check': 100,
2433 2434 b'gendoc': 100,
2434 2435 b'contrib-perf': 200,
2435 2436 }
2436 2437 perf = {}
2437 2438
2438 2439 def sortkey(f):
2439 2440 # run largest tests first, as they tend to take the longest
2440 2441 f = f['path']
2441 2442 try:
2442 2443 return perf[f]
2443 2444 except KeyError:
2444 2445 try:
2445 2446 val = -os.stat(f).st_size
2446 2447 except OSError as e:
2447 2448 if e.errno != errno.ENOENT:
2448 2449 raise
2449 2450 perf[f] = -1e9 # file does not exist, tell early
2450 2451 return -1e9
2451 2452 for kw, mul in slow.items():
2452 2453 if kw in f:
2453 2454 val *= mul
2454 2455 if f.endswith(b'.py'):
2455 2456 val /= 10.0
2456 2457 perf[f] = val / 1000.0
2457 2458 return perf[f]
2458 2459
2459 2460 testdescs.sort(key=sortkey)
2460 2461
2461 2462 class TestRunner(object):
2462 2463 """Holds context for executing tests.
2463 2464
2464 2465 Tests rely on a lot of state. This object holds it for them.
2465 2466 """
2466 2467
2467 2468 # Programs required to run tests.
2468 2469 REQUIREDTOOLS = [
2469 2470 b'diff',
2470 2471 b'grep',
2471 2472 b'unzip',
2472 2473 b'gunzip',
2473 2474 b'bunzip2',
2474 2475 b'sed',
2475 2476 ]
2476 2477
2477 2478 # Maps file extensions to test class.
2478 2479 TESTTYPES = [
2479 2480 (b'.py', PythonTest),
2480 2481 (b'.t', TTest),
2481 2482 ]
2482 2483
2483 2484 def __init__(self):
2484 2485 self.options = None
2485 2486 self._hgroot = None
2486 2487 self._testdir = None
2487 2488 self._outputdir = None
2488 2489 self._hgtmp = None
2489 2490 self._installdir = None
2490 2491 self._bindir = None
2491 2492 self._tmpbinddir = None
2492 2493 self._pythondir = None
2493 2494 self._coveragefile = None
2494 2495 self._createdfiles = []
2495 2496 self._hgcommand = None
2496 2497 self._hgpath = None
2497 2498 self._portoffset = 0
2498 2499 self._ports = {}
2499 2500
2500 2501 def run(self, args, parser=None):
2501 2502 """Run the test suite."""
2502 2503 oldmask = os.umask(0o22)
2503 2504 try:
2504 2505 parser = parser or getparser()
2505 2506 options = parseargs(args, parser)
2506 2507 tests = [_bytespath(a) for a in options.tests]
2507 2508 if options.test_list is not None:
2508 2509 for listfile in options.test_list:
2509 2510 with open(listfile, 'rb') as f:
2510 2511 tests.extend(t for t in f.read().splitlines() if t)
2511 2512 self.options = options
2512 2513
2513 2514 self._checktools()
2514 2515 testdescs = self.findtests(tests)
2515 2516 if options.profile_runner:
2516 2517 import statprof
2517 2518 statprof.start()
2518 2519 result = self._run(testdescs)
2519 2520 if options.profile_runner:
2520 2521 statprof.stop()
2521 2522 statprof.display()
2522 2523 return result
2523 2524
2524 2525 finally:
2525 2526 os.umask(oldmask)
2526 2527
2527 2528 def _run(self, testdescs):
2528 2529 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
2529 2530 # assume all tests in same folder for now
2530 2531 if testdescs:
2531 2532 pathname = os.path.dirname(testdescs[0]['path'])
2532 2533 if pathname:
2533 2534 osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
2534 2535 pathname)
2535 2536 if self.options.outputdir:
2536 2537 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2537 2538 else:
2538 2539 self._outputdir = self._testdir
2539 2540 if testdescs and pathname:
2540 2541 self._outputdir = os.path.join(self._outputdir, pathname)
2541 2542 previoustimes = {}
2542 2543 if self.options.order_by_runtime:
2543 2544 previoustimes = dict(loadtimes(self._outputdir))
2544 2545 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2545 2546
2546 2547 if 'PYTHONHASHSEED' not in os.environ:
2547 2548 # use a random python hash seed all the time
2548 2549 # we do the randomness ourself to know what seed is used
2549 2550 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2550 2551
2551 2552 if self.options.tmpdir:
2552 2553 self.options.keep_tmpdir = True
2553 2554 tmpdir = _bytespath(self.options.tmpdir)
2554 2555 if os.path.exists(tmpdir):
2555 2556 # Meaning of tmpdir has changed since 1.3: we used to create
2556 2557 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2557 2558 # tmpdir already exists.
2558 2559 print("error: temp dir %r already exists" % tmpdir)
2559 2560 return 1
2560 2561
2561 2562 os.makedirs(tmpdir)
2562 2563 else:
2563 2564 d = None
2564 2565 if os.name == 'nt':
2565 2566 # without this, we get the default temp dir location, but
2566 2567 # in all lowercase, which causes troubles with paths (issue3490)
2567 2568 d = osenvironb.get(b'TMP', None)
2568 2569 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2569 2570
2570 2571 self._hgtmp = osenvironb[b'HGTMP'] = (
2571 2572 os.path.realpath(tmpdir))
2572 2573
2573 2574 if self.options.with_hg:
2574 2575 self._installdir = None
2575 2576 whg = self.options.with_hg
2576 2577 self._bindir = os.path.dirname(os.path.realpath(whg))
2577 2578 assert isinstance(self._bindir, bytes)
2578 2579 self._hgcommand = os.path.basename(whg)
2579 2580 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2580 2581 os.makedirs(self._tmpbindir)
2581 2582
2582 2583 normbin = os.path.normpath(os.path.abspath(whg))
2583 2584 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2584 2585
2585 2586 # Other Python scripts in the test harness need to
2586 2587 # `import mercurial`. If `hg` is a Python script, we assume
2587 2588 # the Mercurial modules are relative to its path and tell the tests
2588 2589 # to load Python modules from its directory.
2589 2590 with open(whg, 'rb') as fh:
2590 2591 initial = fh.read(1024)
2591 2592
2592 2593 if re.match(b'#!.*python', initial):
2593 2594 self._pythondir = self._bindir
2594 2595 # If it looks like our in-repo Rust binary, use the source root.
2595 2596 # This is a bit hacky. But rhg is still not supported outside the
2596 2597 # source directory. So until it is, do the simple thing.
2597 2598 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2598 2599 self._pythondir = os.path.dirname(self._testdir)
2599 2600 # Fall back to the legacy behavior.
2600 2601 else:
2601 2602 self._pythondir = self._bindir
2602 2603
2603 2604 else:
2604 2605 self._installdir = os.path.join(self._hgtmp, b"install")
2605 2606 self._bindir = os.path.join(self._installdir, b"bin")
2606 2607 self._hgcommand = b'hg'
2607 2608 self._tmpbindir = self._bindir
2608 2609 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2609 2610
2610 2611 # set CHGHG, then replace "hg" command by "chg"
2611 2612 chgbindir = self._bindir
2612 2613 if self.options.chg or self.options.with_chg:
2613 2614 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2614 2615 else:
2615 2616 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2616 2617 if self.options.chg:
2617 2618 self._hgcommand = b'chg'
2618 2619 elif self.options.with_chg:
2619 2620 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2620 2621 self._hgcommand = os.path.basename(self.options.with_chg)
2621 2622
2622 2623 osenvironb[b"BINDIR"] = self._bindir
2623 2624 osenvironb[b"PYTHON"] = PYTHON
2624 2625
2625 2626 fileb = _bytespath(__file__)
2626 2627 runtestdir = os.path.abspath(os.path.dirname(fileb))
2627 2628 osenvironb[b'RUNTESTDIR'] = runtestdir
2628 2629 if PYTHON3:
2629 2630 sepb = _bytespath(os.pathsep)
2630 2631 else:
2631 2632 sepb = os.pathsep
2632 2633 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2633 2634 if os.path.islink(__file__):
2634 2635 # test helper will likely be at the end of the symlink
2635 2636 realfile = os.path.realpath(fileb)
2636 2637 realdir = os.path.abspath(os.path.dirname(realfile))
2637 2638 path.insert(2, realdir)
2638 2639 if chgbindir != self._bindir:
2639 2640 path.insert(1, chgbindir)
2640 2641 if self._testdir != runtestdir:
2641 2642 path = [self._testdir] + path
2642 2643 if self._tmpbindir != self._bindir:
2643 2644 path = [self._tmpbindir] + path
2644 2645 osenvironb[b"PATH"] = sepb.join(path)
2645 2646
2646 2647 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2647 2648 # can run .../tests/run-tests.py test-foo where test-foo
2648 2649 # adds an extension to HGRC. Also include run-test.py directory to
2649 2650 # import modules like heredoctest.
2650 2651 pypath = [self._pythondir, self._testdir, runtestdir]
2651 2652 # We have to augment PYTHONPATH, rather than simply replacing
2652 2653 # it, in case external libraries are only available via current
2653 2654 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2654 2655 # are in /opt/subversion.)
2655 2656 oldpypath = osenvironb.get(IMPL_PATH)
2656 2657 if oldpypath:
2657 2658 pypath.append(oldpypath)
2658 2659 osenvironb[IMPL_PATH] = sepb.join(pypath)
2659 2660
2660 2661 if self.options.pure:
2661 2662 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2662 2663 os.environ["HGMODULEPOLICY"] = "py"
2663 2664
2664 2665 if self.options.allow_slow_tests:
2665 2666 os.environ["HGTEST_SLOW"] = "slow"
2666 2667 elif 'HGTEST_SLOW' in os.environ:
2667 2668 del os.environ['HGTEST_SLOW']
2668 2669
2669 2670 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2670 2671
2671 2672 if self.options.exceptions:
2672 2673 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2673 2674 try:
2674 2675 os.makedirs(exceptionsdir)
2675 2676 except OSError as e:
2676 2677 if e.errno != errno.EEXIST:
2677 2678 raise
2678 2679
2679 2680 # Remove all existing exception reports.
2680 2681 for f in os.listdir(exceptionsdir):
2681 2682 os.unlink(os.path.join(exceptionsdir, f))
2682 2683
2683 2684 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2684 2685 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2685 2686 self.options.extra_config_opt.append(
2686 2687 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2687 2688
2688 2689 vlog("# Using TESTDIR", self._testdir)
2689 2690 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2690 2691 vlog("# Using HGTMP", self._hgtmp)
2691 2692 vlog("# Using PATH", os.environ["PATH"])
2692 2693 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2693 2694 vlog("# Writing to directory", self._outputdir)
2694 2695
2695 2696 try:
2696 2697 return self._runtests(testdescs) or 0
2697 2698 finally:
2698 2699 time.sleep(.1)
2699 2700 self._cleanup()
2700 2701
2701 2702 def findtests(self, args):
2702 2703 """Finds possible test files from arguments.
2703 2704
2704 2705 If you wish to inject custom tests into the test harness, this would
2705 2706 be a good function to monkeypatch or override in a derived class.
2706 2707 """
2707 2708 if not args:
2708 2709 if self.options.changed:
2709 2710 proc = Popen4('hg st --rev "%s" -man0 .' %
2710 2711 self.options.changed, None, 0)
2711 2712 stdout, stderr = proc.communicate()
2712 2713 args = stdout.strip(b'\0').split(b'\0')
2713 2714 else:
2714 2715 args = os.listdir(b'.')
2715 2716
2716 2717 expanded_args = []
2717 2718 for arg in args:
2718 2719 if os.path.isdir(arg):
2719 2720 if not arg.endswith(b'/'):
2720 2721 arg += b'/'
2721 2722 expanded_args.extend([arg + a for a in os.listdir(arg)])
2722 2723 else:
2723 2724 expanded_args.append(arg)
2724 2725 args = expanded_args
2725 2726
2726 2727 testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.#]+))')
2727 2728 tests = []
2728 2729 for t in args:
2729 2730 case = []
2730 2731
2731 2732 if not (os.path.basename(t).startswith(b'test-')
2732 2733 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2733 2734
2734 2735 m = testcasepattern.match(t)
2735 2736 if m is not None:
2736 2737 t, _, casestr = m.groups()
2737 2738 if casestr:
2738 2739 case = casestr.split(b'#')
2739 2740 else:
2740 2741 continue
2741 2742
2742 2743 if t.endswith(b'.t'):
2743 2744 # .t file may contain multiple test cases
2744 2745 casedimensions = parsettestcases(t)
2745 2746 if casedimensions:
2746 2747 cases = []
2747 2748 def addcases(case, casedimensions):
2748 2749 if not casedimensions:
2749 2750 cases.append(case)
2750 2751 else:
2751 2752 for c in casedimensions[0]:
2752 2753 addcases(case + [c], casedimensions[1:])
2753 2754 addcases([], casedimensions)
2754 2755 if case and case in cases:
2755 2756 cases = [case]
2756 2757 elif case:
2757 2758 # Ignore invalid cases
2758 2759 cases = []
2759 2760 else:
2760 2761 pass
2761 2762 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2762 2763 else:
2763 2764 tests.append({'path': t})
2764 2765 else:
2765 2766 tests.append({'path': t})
2766 2767 return tests
2767 2768
2768 2769 def _runtests(self, testdescs):
2769 2770 def _reloadtest(test, i):
2770 2771 # convert a test back to its description dict
2771 2772 desc = {'path': test.path}
2772 2773 case = getattr(test, '_case', [])
2773 2774 if case:
2774 2775 desc['case'] = case
2775 2776 return self._gettest(desc, i)
2776 2777
2777 2778 try:
2778 2779 if self.options.restart:
2779 2780 orig = list(testdescs)
2780 2781 while testdescs:
2781 2782 desc = testdescs[0]
2782 2783 # desc['path'] is a relative path
2783 2784 if 'case' in desc:
2784 2785 casestr = b'#'.join(desc['case'])
2785 2786 errpath = b'%s#%s.err' % (desc['path'], casestr)
2786 2787 else:
2787 2788 errpath = b'%s.err' % desc['path']
2788 2789 errpath = os.path.join(self._outputdir, errpath)
2789 2790 if os.path.exists(errpath):
2790 2791 break
2791 2792 testdescs.pop(0)
2792 2793 if not testdescs:
2793 2794 print("running all tests")
2794 2795 testdescs = orig
2795 2796
2796 2797 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2797 2798
2798 2799 jobs = min(len(tests), self.options.jobs)
2799 2800
2800 2801 failed = False
2801 2802 kws = self.options.keywords
2802 2803 if kws is not None and PYTHON3:
2803 2804 kws = kws.encode('utf-8')
2804 2805
2805 2806 suite = TestSuite(self._testdir,
2806 2807 jobs=jobs,
2807 2808 whitelist=self.options.whitelisted,
2808 2809 blacklist=self.options.blacklist,
2809 2810 retest=self.options.retest,
2810 2811 keywords=kws,
2811 2812 loop=self.options.loop,
2812 2813 runs_per_test=self.options.runs_per_test,
2813 2814 showchannels=self.options.showchannels,
2814 2815 tests=tests, loadtest=_reloadtest)
2815 2816 verbosity = 1
2816 2817 if self.options.list_tests:
2817 2818 verbosity = 0
2818 2819 elif self.options.verbose:
2819 2820 verbosity = 2
2820 2821 runner = TextTestRunner(self, verbosity=verbosity)
2821 2822
2822 2823 if self.options.list_tests:
2823 2824 result = runner.listtests(suite)
2824 2825 else:
2825 2826 if self._installdir:
2826 2827 self._installhg()
2827 2828 self._checkhglib("Testing")
2828 2829 else:
2829 2830 self._usecorrectpython()
2830 2831 if self.options.chg:
2831 2832 assert self._installdir
2832 2833 self._installchg()
2833 2834
2834 2835 log('running %d tests using %d parallel processes' % (
2835 2836 len(tests), jobs))
2836 2837
2837 2838 result = runner.run(suite)
2838 2839
2839 2840 if result.failures:
2840 2841 failed = True
2841 2842
2842 2843 result.onEnd()
2843 2844
2844 2845 if self.options.anycoverage:
2845 2846 self._outputcoverage()
2846 2847 except KeyboardInterrupt:
2847 2848 failed = True
2848 2849 print("\ninterrupted!")
2849 2850
2850 2851 if failed:
2851 2852 return 1
2852 2853
2853 2854 def _getport(self, count):
2854 2855 port = self._ports.get(count) # do we have a cached entry?
2855 2856 if port is None:
2856 2857 portneeded = 3
2857 2858 # above 100 tries we just give up and let test reports failure
2858 2859 for tries in xrange(100):
2859 2860 allfree = True
2860 2861 port = self.options.port + self._portoffset
2861 2862 for idx in xrange(portneeded):
2862 2863 if not checkportisavailable(port + idx):
2863 2864 allfree = False
2864 2865 break
2865 2866 self._portoffset += portneeded
2866 2867 if allfree:
2867 2868 break
2868 2869 self._ports[count] = port
2869 2870 return port
2870 2871
2871 2872 def _gettest(self, testdesc, count):
2872 2873 """Obtain a Test by looking at its filename.
2873 2874
2874 2875 Returns a Test instance. The Test may not be runnable if it doesn't
2875 2876 map to a known type.
2876 2877 """
2877 2878 path = testdesc['path']
2878 2879 lctest = path.lower()
2879 2880 testcls = Test
2880 2881
2881 2882 for ext, cls in self.TESTTYPES:
2882 2883 if lctest.endswith(ext):
2883 2884 testcls = cls
2884 2885 break
2885 2886
2886 2887 refpath = os.path.join(self._testdir, path)
2887 2888 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2888 2889
2889 2890 # extra keyword parameters. 'case' is used by .t tests
2890 2891 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2891 2892
2892 2893 t = testcls(refpath, self._outputdir, tmpdir,
2893 2894 keeptmpdir=self.options.keep_tmpdir,
2894 2895 debug=self.options.debug,
2895 2896 first=self.options.first,
2896 2897 timeout=self.options.timeout,
2897 2898 startport=self._getport(count),
2898 2899 extraconfigopts=self.options.extra_config_opt,
2899 2900 py3kwarnings=self.options.py3k_warnings,
2900 2901 shell=self.options.shell,
2901 2902 hgcommand=self._hgcommand,
2902 2903 usechg=bool(self.options.with_chg or self.options.chg),
2903 2904 useipv6=useipv6, **kwds)
2904 2905 t.should_reload = True
2905 2906 return t
2906 2907
2907 2908 def _cleanup(self):
2908 2909 """Clean up state from this test invocation."""
2909 2910 if self.options.keep_tmpdir:
2910 2911 return
2911 2912
2912 2913 vlog("# Cleaning up HGTMP", self._hgtmp)
2913 2914 shutil.rmtree(self._hgtmp, True)
2914 2915 for f in self._createdfiles:
2915 2916 try:
2916 2917 os.remove(f)
2917 2918 except OSError:
2918 2919 pass
2919 2920
2920 2921 def _usecorrectpython(self):
2921 2922 """Configure the environment to use the appropriate Python in tests."""
2922 2923 # Tests must use the same interpreter as us or bad things will happen.
2923 2924 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2924 2925
2925 2926 # os.symlink() is a thing with py3 on Windows, but it requires
2926 2927 # Administrator rights.
2927 2928 if getattr(os, 'symlink', None) and os.name != 'nt':
2928 2929 vlog("# Making python executable in test path a symlink to '%s'" %
2929 2930 sys.executable)
2930 2931 mypython = os.path.join(self._tmpbindir, pyexename)
2931 2932 try:
2932 2933 if os.readlink(mypython) == sys.executable:
2933 2934 return
2934 2935 os.unlink(mypython)
2935 2936 except OSError as err:
2936 2937 if err.errno != errno.ENOENT:
2937 2938 raise
2938 2939 if self._findprogram(pyexename) != sys.executable:
2939 2940 try:
2940 2941 os.symlink(sys.executable, mypython)
2941 2942 self._createdfiles.append(mypython)
2942 2943 except OSError as err:
2943 2944 # child processes may race, which is harmless
2944 2945 if err.errno != errno.EEXIST:
2945 2946 raise
2946 2947 else:
2947 2948 exedir, exename = os.path.split(sys.executable)
2948 2949 vlog("# Modifying search path to find %s as %s in '%s'" %
2949 2950 (exename, pyexename, exedir))
2950 2951 path = os.environ['PATH'].split(os.pathsep)
2951 2952 while exedir in path:
2952 2953 path.remove(exedir)
2953 2954 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2954 2955 if not self._findprogram(pyexename):
2955 2956 print("WARNING: Cannot find %s in search path" % pyexename)
2956 2957
2957 2958 def _installhg(self):
2958 2959 """Install hg into the test environment.
2959 2960
2960 2961 This will also configure hg with the appropriate testing settings.
2961 2962 """
2962 2963 vlog("# Performing temporary installation of HG")
2963 2964 installerrs = os.path.join(self._hgtmp, b"install.err")
2964 2965 compiler = ''
2965 2966 if self.options.compiler:
2966 2967 compiler = '--compiler ' + self.options.compiler
2967 2968 if self.options.pure:
2968 2969 pure = b"--pure"
2969 2970 else:
2970 2971 pure = b""
2971 2972
2972 2973 # Run installer in hg root
2973 2974 script = os.path.realpath(sys.argv[0])
2974 2975 exe = sys.executable
2975 2976 if PYTHON3:
2976 2977 compiler = _bytespath(compiler)
2977 2978 script = _bytespath(script)
2978 2979 exe = _bytespath(exe)
2979 2980 hgroot = os.path.dirname(os.path.dirname(script))
2980 2981 self._hgroot = hgroot
2981 2982 os.chdir(hgroot)
2982 2983 nohome = b'--home=""'
2983 2984 if os.name == 'nt':
2984 2985 # The --home="" trick works only on OS where os.sep == '/'
2985 2986 # because of a distutils convert_path() fast-path. Avoid it at
2986 2987 # least on Windows for now, deal with .pydistutils.cfg bugs
2987 2988 # when they happen.
2988 2989 nohome = b''
2989 2990 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2990 2991 b' build %(compiler)s --build-base="%(base)s"'
2991 2992 b' install --force --prefix="%(prefix)s"'
2992 2993 b' --install-lib="%(libdir)s"'
2993 2994 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2994 2995 % {b'exe': exe, b'pure': pure,
2995 2996 b'compiler': compiler,
2996 2997 b'base': os.path.join(self._hgtmp, b"build"),
2997 2998 b'prefix': self._installdir, b'libdir': self._pythondir,
2998 2999 b'bindir': self._bindir,
2999 3000 b'nohome': nohome, b'logfile': installerrs})
3000 3001
3001 3002 # setuptools requires install directories to exist.
3002 3003 def makedirs(p):
3003 3004 try:
3004 3005 os.makedirs(p)
3005 3006 except OSError as e:
3006 3007 if e.errno != errno.EEXIST:
3007 3008 raise
3008 3009 makedirs(self._pythondir)
3009 3010 makedirs(self._bindir)
3010 3011
3011 3012 vlog("# Running", cmd)
3012 3013 if os.system(_strpath(cmd)) == 0:
3013 3014 if not self.options.verbose:
3014 3015 try:
3015 3016 os.remove(installerrs)
3016 3017 except OSError as e:
3017 3018 if e.errno != errno.ENOENT:
3018 3019 raise
3019 3020 else:
3020 3021 with open(installerrs, 'rb') as f:
3021 3022 for line in f:
3022 3023 if PYTHON3:
3023 3024 sys.stdout.buffer.write(line)
3024 3025 else:
3025 3026 sys.stdout.write(line)
3026 3027 sys.exit(1)
3027 3028 os.chdir(self._testdir)
3028 3029
3029 3030 self._usecorrectpython()
3030 3031
3031 3032 if self.options.py3k_warnings and not self.options.anycoverage:
3032 3033 vlog("# Updating hg command to enable Py3k Warnings switch")
3033 3034 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
3034 3035 lines = [line.rstrip() for line in f]
3035 3036 lines[0] += ' -3'
3036 3037 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
3037 3038 for line in lines:
3038 3039 f.write(line + '\n')
3039 3040
3040 3041 hgbat = os.path.join(self._bindir, b'hg.bat')
3041 3042 if os.path.isfile(hgbat):
3042 3043 # hg.bat expects to be put in bin/scripts while run-tests.py
3043 3044 # installation layout put it in bin/ directly. Fix it
3044 3045 with open(hgbat, 'rb') as f:
3045 3046 data = f.read()
3046 3047 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
3047 3048 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
3048 3049 b'"%~dp0python" "%~dp0hg" %*')
3049 3050 with open(hgbat, 'wb') as f:
3050 3051 f.write(data)
3051 3052 else:
3052 3053 print('WARNING: cannot fix hg.bat reference to python.exe')
3053 3054
3054 3055 if self.options.anycoverage:
3055 3056 custom = os.path.join(self._testdir, 'sitecustomize.py')
3056 3057 target = os.path.join(self._pythondir, 'sitecustomize.py')
3057 3058 vlog('# Installing coverage trigger to %s' % target)
3058 3059 shutil.copyfile(custom, target)
3059 3060 rc = os.path.join(self._testdir, '.coveragerc')
3060 3061 vlog('# Installing coverage rc to %s' % rc)
3061 3062 os.environ['COVERAGE_PROCESS_START'] = rc
3062 3063 covdir = os.path.join(self._installdir, '..', 'coverage')
3063 3064 try:
3064 3065 os.mkdir(covdir)
3065 3066 except OSError as e:
3066 3067 if e.errno != errno.EEXIST:
3067 3068 raise
3068 3069
3069 3070 os.environ['COVERAGE_DIR'] = covdir
3070 3071
3071 3072 def _checkhglib(self, verb):
3072 3073 """Ensure that the 'mercurial' package imported by python is
3073 3074 the one we expect it to be. If not, print a warning to stderr."""
3074 3075 if ((self._bindir == self._pythondir) and
3075 3076 (self._bindir != self._tmpbindir)):
3076 3077 # The pythondir has been inferred from --with-hg flag.
3077 3078 # We cannot expect anything sensible here.
3078 3079 return
3079 3080 expecthg = os.path.join(self._pythondir, b'mercurial')
3080 3081 actualhg = self._gethgpath()
3081 3082 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3082 3083 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
3083 3084 ' (expected %s)\n'
3084 3085 % (verb, actualhg, expecthg))
3085 3086 def _gethgpath(self):
3086 3087 """Return the path to the mercurial package that is actually found by
3087 3088 the current Python interpreter."""
3088 3089 if self._hgpath is not None:
3089 3090 return self._hgpath
3090 3091
3091 3092 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
3092 3093 cmd = cmd % PYTHON
3093 3094 if PYTHON3:
3094 3095 cmd = _strpath(cmd)
3095 3096 pipe = os.popen(cmd)
3096 3097 try:
3097 3098 self._hgpath = _bytespath(pipe.read().strip())
3098 3099 finally:
3099 3100 pipe.close()
3100 3101
3101 3102 return self._hgpath
3102 3103
3103 3104 def _installchg(self):
3104 3105 """Install chg into the test environment"""
3105 3106 vlog('# Performing temporary installation of CHG')
3106 3107 assert os.path.dirname(self._bindir) == self._installdir
3107 3108 assert self._hgroot, 'must be called after _installhg()'
3108 3109 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
3109 3110 % {b'make': 'make', # TODO: switch by option or environment?
3110 3111 b'prefix': self._installdir})
3111 3112 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3112 3113 vlog("# Running", cmd)
3113 3114 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
3114 3115 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
3115 3116 stderr=subprocess.STDOUT)
3116 3117 out, _err = proc.communicate()
3117 3118 if proc.returncode != 0:
3118 3119 if PYTHON3:
3119 3120 sys.stdout.buffer.write(out)
3120 3121 else:
3121 3122 sys.stdout.write(out)
3122 3123 sys.exit(1)
3123 3124
3124 3125 def _outputcoverage(self):
3125 3126 """Produce code coverage output."""
3126 3127 import coverage
3127 3128 coverage = coverage.coverage
3128 3129
3129 3130 vlog('# Producing coverage report')
3130 3131 # chdir is the easiest way to get short, relative paths in the
3131 3132 # output.
3132 3133 os.chdir(self._hgroot)
3133 3134 covdir = os.path.join(self._installdir, '..', 'coverage')
3134 3135 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3135 3136
3136 3137 # Map install directory paths back to source directory.
3137 3138 cov.config.paths['srcdir'] = ['.', self._pythondir]
3138 3139
3139 3140 cov.combine()
3140 3141
3141 3142 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
3142 3143 cov.report(ignore_errors=True, omit=omit)
3143 3144
3144 3145 if self.options.htmlcov:
3145 3146 htmldir = os.path.join(self._outputdir, 'htmlcov')
3146 3147 cov.html_report(directory=htmldir, omit=omit)
3147 3148 if self.options.annotate:
3148 3149 adir = os.path.join(self._outputdir, 'annotated')
3149 3150 if not os.path.isdir(adir):
3150 3151 os.mkdir(adir)
3151 3152 cov.annotate(directory=adir, omit=omit)
3152 3153
3153 3154 def _findprogram(self, program):
3154 3155 """Search PATH for a executable program"""
3155 3156 dpb = _bytespath(os.defpath)
3156 3157 sepb = _bytespath(os.pathsep)
3157 3158 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3158 3159 name = os.path.join(p, program)
3159 3160 if os.name == 'nt' or os.access(name, os.X_OK):
3160 3161 return name
3161 3162 return None
3162 3163
3163 3164 def _checktools(self):
3164 3165 """Ensure tools required to run tests are present."""
3165 3166 for p in self.REQUIREDTOOLS:
3166 3167 if os.name == 'nt' and not p.endswith(b'.exe'):
3167 3168 p += b'.exe'
3168 3169 found = self._findprogram(p)
3169 3170 if found:
3170 3171 vlog("# Found prerequisite", p, "at", found)
3171 3172 else:
3172 3173 print("WARNING: Did not find prerequisite tool: %s " %
3173 3174 p.decode("utf-8"))
3174 3175
3175 3176 def aggregateexceptions(path):
3176 3177 exceptioncounts = collections.Counter()
3177 3178 testsbyfailure = collections.defaultdict(set)
3178 3179 failuresbytest = collections.defaultdict(set)
3179 3180
3180 3181 for f in os.listdir(path):
3181 3182 with open(os.path.join(path, f), 'rb') as fh:
3182 3183 data = fh.read().split(b'\0')
3183 3184 if len(data) != 5:
3184 3185 continue
3185 3186
3186 3187 exc, mainframe, hgframe, hgline, testname = data
3187 3188 exc = exc.decode('utf-8')
3188 3189 mainframe = mainframe.decode('utf-8')
3189 3190 hgframe = hgframe.decode('utf-8')
3190 3191 hgline = hgline.decode('utf-8')
3191 3192 testname = testname.decode('utf-8')
3192 3193
3193 3194 key = (hgframe, hgline, exc)
3194 3195 exceptioncounts[key] += 1
3195 3196 testsbyfailure[key].add(testname)
3196 3197 failuresbytest[testname].add(key)
3197 3198
3198 3199 # Find test having fewest failures for each failure.
3199 3200 leastfailing = {}
3200 3201 for key, tests in testsbyfailure.items():
3201 3202 fewesttest = None
3202 3203 fewestcount = 99999999
3203 3204 for test in sorted(tests):
3204 3205 if len(failuresbytest[test]) < fewestcount:
3205 3206 fewesttest = test
3206 3207 fewestcount = len(failuresbytest[test])
3207 3208
3208 3209 leastfailing[key] = (fewestcount, fewesttest)
3209 3210
3210 3211 # Create a combined counter so we can sort by total occurrences and
3211 3212 # impacted tests.
3212 3213 combined = {}
3213 3214 for key in exceptioncounts:
3214 3215 combined[key] = (exceptioncounts[key],
3215 3216 len(testsbyfailure[key]),
3216 3217 leastfailing[key][0],
3217 3218 leastfailing[key][1])
3218 3219
3219 3220 return {
3220 3221 'exceptioncounts': exceptioncounts,
3221 3222 'total': sum(exceptioncounts.values()),
3222 3223 'combined': combined,
3223 3224 'leastfailing': leastfailing,
3224 3225 'byfailure': testsbyfailure,
3225 3226 'bytest': failuresbytest,
3226 3227 }
3227 3228
3228 3229 if __name__ == '__main__':
3229 3230 runner = TestRunner()
3230 3231
3231 3232 try:
3232 3233 import msvcrt
3233 3234 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3234 3235 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3235 3236 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3236 3237 except ImportError:
3237 3238 pass
3238 3239
3239 3240 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1928 +1,1928 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
10 10 running 0 tests using 0 parallel processes
11 11
12 12 # Ran 0 tests, 0 skipped, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` "$@"
18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27 running 0 tests using 0 parallel processes
28 28
29 29 # Ran 0 tests, 0 skipped, 0 failed.
30 30 $ rm hg
31 31 #endif
32 32
33 33 #if execbit
34 34 $ touch hg
35 35 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
36 36 usage: run-tests.py [options] [tests]
37 37 run-tests.py: error: --with-hg must specify an executable hg script
38 38 [2]
39 39 $ rm hg
40 40 #endif
41 41
42 42 Features for testing optional lines
43 43 ===================================
44 44
45 45 $ cat > hghaveaddon.py <<EOF
46 46 > import hghave
47 47 > @hghave.check("custom", "custom hghave feature")
48 48 > def has_custom():
49 49 > return True
50 50 > @hghave.check("missing", "missing hghave feature")
51 51 > def has_missing():
52 52 > return False
53 53 > EOF
54 54
55 55 an empty test
56 56 =======================
57 57
58 58 $ touch test-empty.t
59 59 $ rt
60 60 running 1 tests using 1 parallel processes
61 61 .
62 62 # Ran 1 tests, 0 skipped, 0 failed.
63 63 $ rm test-empty.t
64 64
65 65 a succesful test
66 66 =======================
67 67
68 68 $ cat > test-success.t << EOF
69 69 > $ echo babar
70 70 > babar
71 71 > $ echo xyzzy
72 72 > dont_print (?)
73 73 > nothing[42]line (re) (?)
74 74 > never*happens (glob) (?)
75 75 > more_nothing (?)
76 76 > xyzzy
77 77 > nor this (?)
78 78 > $ printf 'abc\ndef\nxyz\n'
79 79 > 123 (?)
80 80 > abc
81 81 > def (?)
82 82 > 456 (?)
83 83 > xyz
84 84 > $ printf 'zyx\nwvu\ntsr\n'
85 85 > abc (?)
86 86 > zyx (custom !)
87 87 > wvu
88 88 > no_print (no-custom !)
89 89 > tsr (no-missing !)
90 90 > missing (missing !)
91 91 > EOF
92 92
93 93 $ rt
94 94 running 1 tests using 1 parallel processes
95 95 .
96 96 # Ran 1 tests, 0 skipped, 0 failed.
97 97
98 98 failing test
99 99 ==================
100 100
101 101 test churn with globs
102 102 $ cat > test-failure.t <<EOF
103 103 > $ echo "bar-baz"; echo "bar-bad"; echo foo
104 104 > bar*bad (glob)
105 105 > bar*baz (glob)
106 106 > | fo (re)
107 107 > EOF
108 108 $ rt test-failure.t
109 109 running 1 tests using 1 parallel processes
110 110
111 111 --- $TESTTMP/test-failure.t
112 112 +++ $TESTTMP/test-failure.t.err
113 113 @@ -1,4 +1,4 @@
114 114 $ echo "bar-baz"; echo "bar-bad"; echo foo
115 115 + bar*baz (glob)
116 116 bar*bad (glob)
117 117 - bar*baz (glob)
118 118 - | fo (re)
119 119 + foo
120 120
121 121 ERROR: test-failure.t output changed
122 122 !
123 123 Failed test-failure.t: output changed
124 124 # Ran 1 tests, 0 skipped, 1 failed.
125 125 python hash seed: * (glob)
126 126 [1]
127 127
128 128 test how multiple globs gets matched with lines in output
129 129 $ cat > test-failure-globs.t <<EOF
130 130 > $ echo "context"; echo "context"; \
131 131 > echo "key: 1"; echo "value: not a"; \
132 132 > echo "key: 2"; echo "value: not b"; \
133 133 > echo "key: 3"; echo "value: c"; \
134 134 > echo "key: 4"; echo "value: d"
135 135 > context
136 136 > context
137 137 > key: 1
138 138 > value: a
139 139 > key: 2
140 140 > value: b
141 141 > key: 3
142 142 > value: * (glob)
143 143 > key: 4
144 144 > value: * (glob)
145 145 > EOF
146 146 $ rt test-failure-globs.t
147 147 running 1 tests using 1 parallel processes
148 148
149 149 --- $TESTTMP/test-failure-globs.t
150 150 +++ $TESTTMP/test-failure-globs.t.err
151 151 @@ -2,9 +2,9 @@
152 152 context
153 153 context
154 154 key: 1
155 155 - value: a
156 156 + value: not a
157 157 key: 2
158 158 - value: b
159 159 + value: not b
160 160 key: 3
161 161 value: * (glob)
162 162 key: 4
163 163
164 164 ERROR: test-failure-globs.t output changed
165 165 !
166 166 Failed test-failure-globs.t: output changed
167 167 # Ran 1 tests, 0 skipped, 1 failed.
168 168 python hash seed: * (glob)
169 169 [1]
170 170 $ rm test-failure-globs.t
171 171
172 172 test diff colorisation
173 173
174 174 #if no-windows pygments
175 175 $ rt test-failure.t --color always
176 176
177 177 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
178 178 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
179 179 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
180 180 $ echo "bar-baz"; echo "bar-bad"; echo foo
181 181 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
182 182 bar*bad (glob)
183 183 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
184 184 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
185 185 \x1b[38;5;34m+ foo\x1b[39m (esc)
186 186
187 187 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
188 188 !
189 189 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
190 190 # Ran 1 tests, 0 skipped, 1 failed.
191 191 python hash seed: * (glob)
192 192 [1]
193 193
194 194 $ rt test-failure.t 2> tmp.log
195 195 [1]
196 196 $ cat tmp.log
197 197
198 198 --- $TESTTMP/test-failure.t
199 199 +++ $TESTTMP/test-failure.t.err
200 200 @@ -1,4 +1,4 @@
201 201 $ echo "bar-baz"; echo "bar-bad"; echo foo
202 202 + bar*baz (glob)
203 203 bar*bad (glob)
204 204 - bar*baz (glob)
205 205 - | fo (re)
206 206 + foo
207 207
208 208 ERROR: test-failure.t output changed
209 209 !
210 210 Failed test-failure.t: output changed
211 211 # Ran 1 tests, 0 skipped, 1 failed.
212 212 python hash seed: * (glob)
213 213 #endif
214 214
215 215 $ cat > test-failure.t << EOF
216 216 > $ true
217 217 > should go away (true !)
218 218 > $ true
219 219 > should stay (false !)
220 220 >
221 221 > Should remove first line, not second or third
222 222 > $ echo 'testing'
223 223 > baz*foo (glob) (true !)
224 224 > foobar*foo (glob) (false !)
225 225 > te*ting (glob) (true !)
226 226 >
227 227 > Should keep first two lines, remove third and last
228 228 > $ echo 'testing'
229 229 > test.ng (re) (true !)
230 230 > foo.ar (re) (false !)
231 231 > b.r (re) (true !)
232 232 > missing (?)
233 233 > awol (true !)
234 234 >
235 235 > The "missing" line should stay, even though awol is dropped
236 236 > $ echo 'testing'
237 237 > test.ng (re) (true !)
238 238 > foo.ar (?)
239 239 > awol
240 240 > missing (?)
241 241 > EOF
242 242 $ rt test-failure.t
243 243 running 1 tests using 1 parallel processes
244 244
245 245 --- $TESTTMP/test-failure.t
246 246 +++ $TESTTMP/test-failure.t.err
247 247 @@ -1,11 +1,9 @@
248 248 $ true
249 249 - should go away (true !)
250 250 $ true
251 251 should stay (false !)
252 252
253 253 Should remove first line, not second or third
254 254 $ echo 'testing'
255 255 - baz*foo (glob) (true !)
256 256 foobar*foo (glob) (false !)
257 257 te*ting (glob) (true !)
258 258
259 259 foo.ar (re) (false !)
260 260 missing (?)
261 261 @@ -13,13 +11,10 @@
262 262 $ echo 'testing'
263 263 test.ng (re) (true !)
264 264 foo.ar (re) (false !)
265 265 - b.r (re) (true !)
266 266 missing (?)
267 267 - awol (true !)
268 268
269 269 The "missing" line should stay, even though awol is dropped
270 270 $ echo 'testing'
271 271 test.ng (re) (true !)
272 272 foo.ar (?)
273 273 - awol
274 274 missing (?)
275 275
276 276 ERROR: test-failure.t output changed
277 277 !
278 278 Failed test-failure.t: output changed
279 279 # Ran 1 tests, 0 skipped, 1 failed.
280 280 python hash seed: * (glob)
281 281 [1]
282 282
283 283 basic failing test
284 284 $ cat > test-failure.t << EOF
285 285 > $ echo babar
286 286 > rataxes
287 287 > This is a noop statement so that
288 288 > this test is still more bytes than success.
289 289 > pad pad pad pad............................................................
290 290 > pad pad pad pad............................................................
291 291 > pad pad pad pad............................................................
292 292 > pad pad pad pad............................................................
293 293 > pad pad pad pad............................................................
294 294 > pad pad pad pad............................................................
295 295 > EOF
296 296
297 297 >>> fh = open('test-failure-unicode.t', 'wb')
298 298 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
299 299 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
300 300
301 301 $ rt
302 302 running 3 tests using 1 parallel processes
303 303
304 304 --- $TESTTMP/test-failure.t
305 305 +++ $TESTTMP/test-failure.t.err
306 306 @@ -1,5 +1,5 @@
307 307 $ echo babar
308 308 - rataxes
309 309 + babar
310 310 This is a noop statement so that
311 311 this test is still more bytes than success.
312 312 pad pad pad pad............................................................
313 313
314 314 ERROR: test-failure.t output changed
315 315 !.
316 316 --- $TESTTMP/test-failure-unicode.t
317 317 +++ $TESTTMP/test-failure-unicode.t.err
318 318 @@ -1,2 +1,2 @@
319 319 $ echo babar\xce\xb1 (esc)
320 320 - l\xce\xb5\xce\xb5t (esc)
321 321 + babar\xce\xb1 (esc)
322 322
323 323 ERROR: test-failure-unicode.t output changed
324 324 !
325 325 Failed test-failure.t: output changed
326 326 Failed test-failure-unicode.t: output changed
327 327 # Ran 3 tests, 0 skipped, 2 failed.
328 328 python hash seed: * (glob)
329 329 [1]
330 330
331 331 test --outputdir
332 332 $ mkdir output
333 333 $ rt --outputdir output
334 334 running 3 tests using 1 parallel processes
335 335
336 336 --- $TESTTMP/test-failure.t
337 337 +++ $TESTTMP/output/test-failure.t.err
338 338 @@ -1,5 +1,5 @@
339 339 $ echo babar
340 340 - rataxes
341 341 + babar
342 342 This is a noop statement so that
343 343 this test is still more bytes than success.
344 344 pad pad pad pad............................................................
345 345
346 346 ERROR: test-failure.t output changed
347 347 !.
348 348 --- $TESTTMP/test-failure-unicode.t
349 349 +++ $TESTTMP/output/test-failure-unicode.t.err
350 350 @@ -1,2 +1,2 @@
351 351 $ echo babar\xce\xb1 (esc)
352 352 - l\xce\xb5\xce\xb5t (esc)
353 353 + babar\xce\xb1 (esc)
354 354
355 355 ERROR: test-failure-unicode.t output changed
356 356 !
357 357 Failed test-failure.t: output changed
358 358 Failed test-failure-unicode.t: output changed
359 359 # Ran 3 tests, 0 skipped, 2 failed.
360 360 python hash seed: * (glob)
361 361 [1]
362 362 $ ls -a output
363 363 .
364 364 ..
365 365 .testtimes
366 366 test-failure-unicode.t.err
367 367 test-failure.t.err
368 368
369 369 test --xunit support
370 370 $ rt --xunit=xunit.xml
371 371 running 3 tests using 1 parallel processes
372 372
373 373 --- $TESTTMP/test-failure.t
374 374 +++ $TESTTMP/test-failure.t.err
375 375 @@ -1,5 +1,5 @@
376 376 $ echo babar
377 377 - rataxes
378 378 + babar
379 379 This is a noop statement so that
380 380 this test is still more bytes than success.
381 381 pad pad pad pad............................................................
382 382
383 383 ERROR: test-failure.t output changed
384 384 !.
385 385 --- $TESTTMP/test-failure-unicode.t
386 386 +++ $TESTTMP/test-failure-unicode.t.err
387 387 @@ -1,2 +1,2 @@
388 388 $ echo babar\xce\xb1 (esc)
389 389 - l\xce\xb5\xce\xb5t (esc)
390 390 + babar\xce\xb1 (esc)
391 391
392 392 ERROR: test-failure-unicode.t output changed
393 393 !
394 394 Failed test-failure.t: output changed
395 395 Failed test-failure-unicode.t: output changed
396 396 # Ran 3 tests, 0 skipped, 2 failed.
397 397 python hash seed: * (glob)
398 398 [1]
399 399 $ cat xunit.xml
400 400 <?xml version="1.0" encoding="utf-8"?>
401 401 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
402 402 <testcase name="test-success.t" time="*"/> (glob)
403 403 <testcase name="test-failure-unicode.t" time="*"> (glob)
404 404 <failure message="output changed" type="output-mismatch">
405 405 <![CDATA[--- $TESTTMP/test-failure-unicode.t
406 406 +++ $TESTTMP/test-failure-unicode.t.err
407 407 @@ -1,2 +1,2 @@
408 408 $ echo babar\xce\xb1 (esc)
409 409 - l\xce\xb5\xce\xb5t (esc)
410 410 + babar\xce\xb1 (esc)
411 411 ]]> </failure>
412 412 </testcase>
413 413 <testcase name="test-failure.t" time="*"> (glob)
414 414 <failure message="output changed" type="output-mismatch">
415 415 <![CDATA[--- $TESTTMP/test-failure.t
416 416 +++ $TESTTMP/test-failure.t.err
417 417 @@ -1,5 +1,5 @@
418 418 $ echo babar
419 419 - rataxes
420 420 + babar
421 421 This is a noop statement so that
422 422 this test is still more bytes than success.
423 423 pad pad pad pad............................................................
424 424 ]]> </failure>
425 425 </testcase>
426 426 </testsuite>
427 427
428 428 $ cat .testtimes
429 429 test-empty.t * (glob)
430 430 test-failure-globs.t * (glob)
431 431 test-failure-unicode.t * (glob)
432 432 test-failure.t * (glob)
433 433 test-success.t * (glob)
434 434
435 435 $ rt --list-tests
436 436 test-failure-unicode.t
437 437 test-failure.t
438 438 test-success.t
439 439
440 440 $ rt --list-tests --json
441 441 test-failure-unicode.t
442 442 test-failure.t
443 443 test-success.t
444 444 $ cat report.json
445 445 testreport ={
446 446 "test-failure-unicode.t": {
447 447 "result": "success"
448 448 },
449 449 "test-failure.t": {
450 450 "result": "success"
451 451 },
452 452 "test-success.t": {
453 453 "result": "success"
454 454 }
455 455 } (no-eol)
456 456
457 457 $ rt --list-tests --xunit=xunit.xml
458 458 test-failure-unicode.t
459 459 test-failure.t
460 460 test-success.t
461 461 $ cat xunit.xml
462 462 <?xml version="1.0" encoding="utf-8"?>
463 463 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
464 464 <testcase name="test-failure-unicode.t"/>
465 465 <testcase name="test-failure.t"/>
466 466 <testcase name="test-success.t"/>
467 467 </testsuite>
468 468
469 469 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
470 470 test-failure-unicode.t
471 471 test-failure.t
472 472 $ cat output/report.json
473 473 testreport ={
474 474 "test-failure-unicode.t": {
475 475 "result": "success"
476 476 },
477 477 "test-failure.t": {
478 478 "result": "success"
479 479 }
480 480 } (no-eol)
481 481 $ cat xunit.xml
482 482 <?xml version="1.0" encoding="utf-8"?>
483 483 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
484 484 <testcase name="test-failure-unicode.t"/>
485 485 <testcase name="test-failure.t"/>
486 486 </testsuite>
487 487
488 488 $ rm test-failure-unicode.t
489 489
490 490 test for --retest
491 491 ====================
492 492
493 493 $ rt --retest
494 494 running 2 tests using 1 parallel processes
495 495
496 496 --- $TESTTMP/test-failure.t
497 497 +++ $TESTTMP/test-failure.t.err
498 498 @@ -1,5 +1,5 @@
499 499 $ echo babar
500 500 - rataxes
501 501 + babar
502 502 This is a noop statement so that
503 503 this test is still more bytes than success.
504 504 pad pad pad pad............................................................
505 505
506 506 ERROR: test-failure.t output changed
507 507 !
508 508 Failed test-failure.t: output changed
509 509 # Ran 2 tests, 1 skipped, 1 failed.
510 510 python hash seed: * (glob)
511 511 [1]
512 512
513 513 --retest works with --outputdir
514 514 $ rm -r output
515 515 $ mkdir output
516 516 $ mv test-failure.t.err output
517 517 $ rt --retest --outputdir output
518 518 running 2 tests using 1 parallel processes
519 519
520 520 --- $TESTTMP/test-failure.t
521 521 +++ $TESTTMP/output/test-failure.t.err
522 522 @@ -1,5 +1,5 @@
523 523 $ echo babar
524 524 - rataxes
525 525 + babar
526 526 This is a noop statement so that
527 527 this test is still more bytes than success.
528 528 pad pad pad pad............................................................
529 529
530 530 ERROR: test-failure.t output changed
531 531 !
532 532 Failed test-failure.t: output changed
533 533 # Ran 2 tests, 1 skipped, 1 failed.
534 534 python hash seed: * (glob)
535 535 [1]
536 536
537 537 Selecting Tests To Run
538 538 ======================
539 539
540 540 successful
541 541
542 542 $ rt test-success.t
543 543 running 1 tests using 1 parallel processes
544 544 .
545 545 # Ran 1 tests, 0 skipped, 0 failed.
546 546
547 547 success w/ keyword
548 548 $ rt -k xyzzy
549 549 running 2 tests using 1 parallel processes
550 550 .
551 551 # Ran 2 tests, 1 skipped, 0 failed.
552 552
553 553 failed
554 554
555 555 $ rt test-failure.t
556 556 running 1 tests using 1 parallel processes
557 557
558 558 --- $TESTTMP/test-failure.t
559 559 +++ $TESTTMP/test-failure.t.err
560 560 @@ -1,5 +1,5 @@
561 561 $ echo babar
562 562 - rataxes
563 563 + babar
564 564 This is a noop statement so that
565 565 this test is still more bytes than success.
566 566 pad pad pad pad............................................................
567 567
568 568 ERROR: test-failure.t output changed
569 569 !
570 570 Failed test-failure.t: output changed
571 571 # Ran 1 tests, 0 skipped, 1 failed.
572 572 python hash seed: * (glob)
573 573 [1]
574 574
575 575 failure w/ keyword
576 576 $ rt -k rataxes
577 577 running 2 tests using 1 parallel processes
578 578
579 579 --- $TESTTMP/test-failure.t
580 580 +++ $TESTTMP/test-failure.t.err
581 581 @@ -1,5 +1,5 @@
582 582 $ echo babar
583 583 - rataxes
584 584 + babar
585 585 This is a noop statement so that
586 586 this test is still more bytes than success.
587 587 pad pad pad pad............................................................
588 588
589 589 ERROR: test-failure.t output changed
590 590 !
591 591 Failed test-failure.t: output changed
592 592 # Ran 2 tests, 1 skipped, 1 failed.
593 593 python hash seed: * (glob)
594 594 [1]
595 595
596 596 Verify that when a process fails to start we show a useful message
597 597 ==================================================================
598 598
599 599 $ cat > test-serve-fail.t <<EOF
600 600 > $ echo 'abort: child process failed to start blah'
601 601 > EOF
602 602 $ rt test-serve-fail.t
603 603 running 1 tests using 1 parallel processes
604 604
605 605 --- $TESTTMP/test-serve-fail.t
606 606 +++ $TESTTMP/test-serve-fail.t.err
607 607 @@ -1* +1,2 @@ (glob)
608 608 $ echo 'abort: child process failed to start blah'
609 609 + abort: child process failed to start blah
610 610
611 611 ERROR: test-serve-fail.t output changed
612 612 !
613 613 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
614 614 # Ran 1 tests, 0 skipped, 1 failed.
615 615 python hash seed: * (glob)
616 616 [1]
617 617 $ rm test-serve-fail.t
618 618
619 619 Verify that we can try other ports
620 620 ===================================
621 621
622 622 Extensions aren't inherited by the invoked run-tests.py. An extension
623 623 introducing a repository requirement could cause this to fail. So we force
624 624 HGRCPATH to get a clean environment.
625 625
626 626 $ HGRCPATH= hg init inuse
627 627 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
628 628 $ cat blocks.pid >> $DAEMON_PIDS
629 629 $ cat > test-serve-inuse.t <<EOF
630 630 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
631 631 > $ cat hg.pid >> \$DAEMON_PIDS
632 632 > EOF
633 633 $ rt test-serve-inuse.t
634 634 running 1 tests using 1 parallel processes
635 635 .
636 636 # Ran 1 tests, 0 skipped, 0 failed.
637 637 $ rm test-serve-inuse.t
638 638 $ killdaemons.py $DAEMON_PIDS
639 639
640 640 Running In Debug Mode
641 641 ======================
642 642
643 643 $ rt --debug 2>&1 | grep -v pwd
644 644 running 2 tests using 1 parallel processes
645 645 + echo *SALT* 0 0 (glob)
646 646 *SALT* 0 0 (glob)
647 647 + echo babar
648 648 babar
649 649 + echo *SALT* 10 0 (glob)
650 650 *SALT* 10 0 (glob)
651 651 *+ echo *SALT* 0 0 (glob)
652 652 *SALT* 0 0 (glob)
653 653 + echo babar
654 654 babar
655 655 + echo *SALT* 2 0 (glob)
656 656 *SALT* 2 0 (glob)
657 657 + echo xyzzy
658 658 xyzzy
659 659 + echo *SALT* 9 0 (glob)
660 660 *SALT* 9 0 (glob)
661 661 + printf *abc\ndef\nxyz\n* (glob)
662 662 abc
663 663 def
664 664 xyz
665 665 + echo *SALT* 15 0 (glob)
666 666 *SALT* 15 0 (glob)
667 667 + printf *zyx\nwvu\ntsr\n* (glob)
668 668 zyx
669 669 wvu
670 670 tsr
671 671 + echo *SALT* 22 0 (glob)
672 672 *SALT* 22 0 (glob)
673 673 .
674 674 # Ran 2 tests, 0 skipped, 0 failed.
675 675
676 676 Parallel runs
677 677 ==============
678 678
679 679 (duplicate the failing test to get predictable output)
680 680 $ cp test-failure.t test-failure-copy.t
681 681
682 682 $ rt --jobs 2 test-failure*.t -n
683 683 running 2 tests using 2 parallel processes
684 684 !!
685 685 Failed test-failure*.t: output changed (glob)
686 686 Failed test-failure*.t: output changed (glob)
687 687 # Ran 2 tests, 0 skipped, 2 failed.
688 688 python hash seed: * (glob)
689 689 [1]
690 690
691 691 failures in parallel with --first should only print one failure
692 692 $ rt --jobs 2 --first test-failure*.t
693 693 running 2 tests using 2 parallel processes
694 694
695 695 --- $TESTTMP/test-failure*.t (glob)
696 696 +++ $TESTTMP/test-failure*.t.err (glob)
697 697 @@ -1,5 +1,5 @@
698 698 $ echo babar
699 699 - rataxes
700 700 + babar
701 701 This is a noop statement so that
702 702 this test is still more bytes than success.
703 703 pad pad pad pad............................................................
704 704
705 705 Failed test-failure*.t: output changed (glob)
706 706 Failed test-failure*.t: output changed (glob)
707 707 # Ran 2 tests, 0 skipped, 2 failed.
708 708 python hash seed: * (glob)
709 709 [1]
710 710
711 711
712 712 (delete the duplicated test file)
713 713 $ rm test-failure-copy.t
714 714
715 715
716 716 Interactive run
717 717 ===============
718 718
719 719 (backup the failing test)
720 720 $ cp test-failure.t backup
721 721
722 722 Refuse the fix
723 723
724 724 $ echo 'n' | rt -i
725 725 running 2 tests using 1 parallel processes
726 726
727 727 --- $TESTTMP/test-failure.t
728 728 +++ $TESTTMP/test-failure.t.err
729 729 @@ -1,5 +1,5 @@
730 730 $ echo babar
731 731 - rataxes
732 732 + babar
733 733 This is a noop statement so that
734 734 this test is still more bytes than success.
735 735 pad pad pad pad............................................................
736 736 Accept this change? [n]
737 737 ERROR: test-failure.t output changed
738 738 !.
739 739 Failed test-failure.t: output changed
740 740 # Ran 2 tests, 0 skipped, 1 failed.
741 741 python hash seed: * (glob)
742 742 [1]
743 743
744 744 $ cat test-failure.t
745 745 $ echo babar
746 746 rataxes
747 747 This is a noop statement so that
748 748 this test is still more bytes than success.
749 749 pad pad pad pad............................................................
750 750 pad pad pad pad............................................................
751 751 pad pad pad pad............................................................
752 752 pad pad pad pad............................................................
753 753 pad pad pad pad............................................................
754 754 pad pad pad pad............................................................
755 755
756 756 Interactive with custom view
757 757
758 758 $ echo 'n' | rt -i --view echo
759 759 running 2 tests using 1 parallel processes
760 760 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
761 761 Accept this change? [n]* (glob)
762 762 ERROR: test-failure.t output changed
763 763 !.
764 764 Failed test-failure.t: output changed
765 765 # Ran 2 tests, 0 skipped, 1 failed.
766 766 python hash seed: * (glob)
767 767 [1]
768 768
769 769 View the fix
770 770
771 771 $ echo 'y' | rt --view echo
772 772 running 2 tests using 1 parallel processes
773 773 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
774 774
775 775 ERROR: test-failure.t output changed
776 776 !.
777 777 Failed test-failure.t: output changed
778 778 # Ran 2 tests, 0 skipped, 1 failed.
779 779 python hash seed: * (glob)
780 780 [1]
781 781
782 782 Accept the fix
783 783
784 784 $ cat >> test-failure.t <<EOF
785 785 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
786 786 > saved backup bundle to \$TESTTMP/foo.hg
787 787 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
788 788 > saved backup bundle to $TESTTMP\\foo.hg
789 789 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
790 790 > saved backup bundle to \$TESTTMP/*.hg (glob)
791 791 > EOF
792 792 $ echo 'y' | rt -i 2>&1
793 793 running 2 tests using 1 parallel processes
794 794
795 795 --- $TESTTMP/test-failure.t
796 796 +++ $TESTTMP/test-failure.t.err
797 797 @@ -1,5 +1,5 @@
798 798 $ echo babar
799 799 - rataxes
800 800 + babar
801 801 This is a noop statement so that
802 802 this test is still more bytes than success.
803 803 pad pad pad pad............................................................
804 804 @@ -11,6 +11,6 @@
805 805 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
806 806 saved backup bundle to $TESTTMP/foo.hg
807 807 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
808 808 - saved backup bundle to $TESTTMP\foo.hg
809 809 + saved backup bundle to $TESTTMP/foo.hg
810 810 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
811 811 saved backup bundle to $TESTTMP/*.hg (glob)
812 812 Accept this change? [n] ..
813 813 # Ran 2 tests, 0 skipped, 0 failed.
814 814
815 815 $ sed -e 's,(glob)$,&<,g' test-failure.t
816 816 $ echo babar
817 817 babar
818 818 This is a noop statement so that
819 819 this test is still more bytes than success.
820 820 pad pad pad pad............................................................
821 821 pad pad pad pad............................................................
822 822 pad pad pad pad............................................................
823 823 pad pad pad pad............................................................
824 824 pad pad pad pad............................................................
825 825 pad pad pad pad............................................................
826 826 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
827 827 saved backup bundle to $TESTTMP/foo.hg
828 828 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
829 829 saved backup bundle to $TESTTMP/foo.hg
830 830 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
831 831 saved backup bundle to $TESTTMP/*.hg (glob)<
832 832
833 833 Race condition - test file was modified when test is running
834 834
835 835 $ TESTRACEDIR=`pwd`
836 836 $ export TESTRACEDIR
837 837 $ cat > test-race.t <<EOF
838 838 > $ echo 1
839 839 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
840 840 > EOF
841 841
842 842 $ rt -i test-race.t
843 843 running 1 tests using 1 parallel processes
844 844
845 845 --- $TESTTMP/test-race.t
846 846 +++ $TESTTMP/test-race.t.err
847 847 @@ -1,2 +1,3 @@
848 848 $ echo 1
849 849 + 1
850 850 $ echo "# a new line" >> $TESTTMP/test-race.t
851 851 Reference output has changed (run again to prompt changes)
852 852 ERROR: test-race.t output changed
853 853 !
854 854 Failed test-race.t: output changed
855 855 # Ran 1 tests, 0 skipped, 1 failed.
856 856 python hash seed: * (glob)
857 857 [1]
858 858
859 859 $ rm test-race.t
860 860
861 861 When "#testcases" is used in .t files
862 862
863 863 $ cat >> test-cases.t <<EOF
864 864 > #testcases a b
865 865 > #if a
866 866 > $ echo 1
867 867 > #endif
868 868 > #if b
869 869 > $ echo 2
870 870 > #endif
871 871 > EOF
872 872
873 873 $ cat <<EOF | rt -i test-cases.t 2>&1
874 874 > y
875 875 > y
876 876 > EOF
877 877 running 2 tests using 1 parallel processes
878 878
879 879 --- $TESTTMP/test-cases.t
880 880 +++ $TESTTMP/test-cases.t#a.err
881 881 @@ -1,6 +1,7 @@
882 882 #testcases a b
883 883 #if a
884 884 $ echo 1
885 885 + 1
886 886 #endif
887 887 #if b
888 888 $ echo 2
889 889 Accept this change? [n] .
890 890 --- $TESTTMP/test-cases.t
891 891 +++ $TESTTMP/test-cases.t#b.err
892 892 @@ -5,4 +5,5 @@
893 893 #endif
894 894 #if b
895 895 $ echo 2
896 896 + 2
897 897 #endif
898 898 Accept this change? [n] .
899 899 # Ran 2 tests, 0 skipped, 0 failed.
900 900
901 901 $ cat test-cases.t
902 902 #testcases a b
903 903 #if a
904 904 $ echo 1
905 905 1
906 906 #endif
907 907 #if b
908 908 $ echo 2
909 909 2
910 910 #endif
911 911
912 912 $ cat >> test-cases.t <<'EOF'
913 913 > #if a
914 914 > $ NAME=A
915 915 > #else
916 916 > $ NAME=B
917 917 > #endif
918 918 > $ echo $NAME
919 919 > A (a !)
920 920 > B (b !)
921 921 > EOF
922 922 $ rt test-cases.t
923 923 running 2 tests using 1 parallel processes
924 924 ..
925 925 # Ran 2 tests, 0 skipped, 0 failed.
926 926
927 927 When using multiple dimensions of "#testcases" in .t files
928 928
929 929 $ cat > test-cases.t <<'EOF'
930 930 > #testcases a b
931 931 > #testcases c d
932 932 > #if a d
933 933 > $ echo $TESTCASE
934 934 > a#d
935 935 > #endif
936 936 > #if b c
937 937 > $ echo yes
938 938 > no
939 939 > #endif
940 940 > EOF
941 941 $ rt test-cases.t
942 942 running 4 tests using 1 parallel processes
943 943 ..
944 944 --- $TESTTMP/test-cases.t
945 945 +++ $TESTTMP/test-cases.t#b#c.err
946 946 @@ -6,5 +6,5 @@
947 947 #endif
948 948 #if b c
949 949 $ echo yes
950 950 - no
951 951 + yes
952 952 #endif
953 953
954 954 ERROR: test-cases.t#b#c output changed
955 955 !.
956 956 Failed test-cases.t#b#c: output changed
957 957 # Ran 4 tests, 0 skipped, 1 failed.
958 958 python hash seed: * (glob)
959 959 [1]
960 960
961 961 $ rm test-cases.t#b#c.err
962 962 $ rm test-cases.t
963 963
964 964 (reinstall)
965 965 $ mv backup test-failure.t
966 966
967 967 No Diff
968 968 ===============
969 969
970 970 $ rt --nodiff
971 971 running 2 tests using 1 parallel processes
972 972 !.
973 973 Failed test-failure.t: output changed
974 974 # Ran 2 tests, 0 skipped, 1 failed.
975 975 python hash seed: * (glob)
976 976 [1]
977 977
978 978 test --tmpdir support
979 979 $ rt --tmpdir=$TESTTMP/keep test-success.t
980 980 running 1 tests using 1 parallel processes
981 981
982 982 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
983 983 Keeping threadtmp dir: $TESTTMP/keep/child1
984 984 .
985 985 # Ran 1 tests, 0 skipped, 0 failed.
986 986
987 987 timeouts
988 988 ========
989 989 $ cat > test-timeout.t <<EOF
990 990 > $ sleep 2
991 991 > $ echo pass
992 992 > pass
993 993 > EOF
994 994 > echo '#require slow' > test-slow-timeout.t
995 995 > cat test-timeout.t >> test-slow-timeout.t
996 996 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
997 997 running 2 tests using 1 parallel processes
998 998 st
999 999 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
1000 1000 Failed test-timeout.t: timed out
1001 1001 # Ran 1 tests, 1 skipped, 1 failed.
1002 1002 python hash seed: * (glob)
1003 1003 [1]
1004 1004 $ rt --timeout=1 --slowtimeout=3 \
1005 1005 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1006 1006 running 2 tests using 1 parallel processes
1007 1007 .t
1008 1008 Failed test-timeout.t: timed out
1009 1009 # Ran 2 tests, 0 skipped, 1 failed.
1010 1010 python hash seed: * (glob)
1011 1011 [1]
1012 1012 $ rm test-timeout.t test-slow-timeout.t
1013 1013
1014 1014 test for --time
1015 1015 ==================
1016 1016
1017 1017 $ rt test-success.t --time
1018 1018 running 1 tests using 1 parallel processes
1019 1019 .
1020 1020 # Ran 1 tests, 0 skipped, 0 failed.
1021 1021 # Producing time report
1022 1022 start end cuser csys real Test
1023 1023 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1024 1024
1025 1025 test for --time with --job enabled
1026 1026 ====================================
1027 1027
1028 1028 $ rt test-success.t --time --jobs 2
1029 1029 running 1 tests using 1 parallel processes
1030 1030 .
1031 1031 # Ran 1 tests, 0 skipped, 0 failed.
1032 1032 # Producing time report
1033 1033 start end cuser csys real Test
1034 1034 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1035 1035
1036 1036 Skips
1037 1037 ================
1038 1038 $ cat > test-skip.t <<EOF
1039 1039 > $ echo xyzzy
1040 1040 > #if true
1041 1041 > #require false
1042 1042 > #end
1043 1043 > EOF
1044 1044 $ cat > test-noskip.t <<EOF
1045 1045 > #if false
1046 1046 > #require false
1047 1047 > #endif
1048 1048 > EOF
1049 1049 $ rt --nodiff
1050 1050 running 4 tests using 1 parallel processes
1051 1051 !.s.
1052 1052 Skipped test-skip.t: missing feature: nail clipper
1053 1053 Failed test-failure.t: output changed
1054 1054 # Ran 3 tests, 1 skipped, 1 failed.
1055 1055 python hash seed: * (glob)
1056 1056 [1]
1057 1057
1058 1058 $ rm test-noskip.t
1059 1059 $ rt --keyword xyzzy
1060 1060 running 3 tests using 1 parallel processes
1061 1061 .s
1062 1062 Skipped test-skip.t: missing feature: nail clipper
1063 1063 # Ran 2 tests, 2 skipped, 0 failed.
1064 1064
1065 1065 Skips with xml
1066 1066 $ rt --keyword xyzzy \
1067 1067 > --xunit=xunit.xml
1068 1068 running 3 tests using 1 parallel processes
1069 1069 .s
1070 1070 Skipped test-skip.t: missing feature: nail clipper
1071 1071 # Ran 2 tests, 2 skipped, 0 failed.
1072 1072 $ cat xunit.xml
1073 1073 <?xml version="1.0" encoding="utf-8"?>
1074 1074 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1075 1075 <testcase name="test-success.t" time="*"/> (glob)
1076 1076 <testcase name="test-skip.t">
1077 1077 <skipped>
1078 1078 <![CDATA[missing feature: nail clipper]]> </skipped>
1079 1079 </testcase>
1080 1080 </testsuite>
1081 1081
1082 1082 Missing skips or blacklisted skips don't count as executed:
1083 1083 $ echo test-failure.t > blacklist
1084 1084 $ rt --blacklist=blacklist --json\
1085 1085 > test-failure.t test-bogus.t
1086 1086 running 2 tests using 1 parallel processes
1087 1087 ss
1088 1088 Skipped test-bogus.t: Doesn't exist
1089 1089 Skipped test-failure.t: blacklisted
1090 1090 # Ran 0 tests, 2 skipped, 0 failed.
1091 1091 $ cat report.json
1092 1092 testreport ={
1093 1093 "test-bogus.t": {
1094 1094 "result": "skip"
1095 1095 },
1096 1096 "test-failure.t": {
1097 1097 "result": "skip"
1098 1098 }
1099 1099 } (no-eol)
1100 1100
1101 1101 Whitelist trumps blacklist
1102 1102 $ echo test-failure.t > whitelist
1103 1103 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1104 1104 > test-failure.t test-bogus.t
1105 1105 running 2 tests using 1 parallel processes
1106 1106 s
1107 1107 --- $TESTTMP/test-failure.t
1108 1108 +++ $TESTTMP/test-failure.t.err
1109 1109 @@ -1,5 +1,5 @@
1110 1110 $ echo babar
1111 1111 - rataxes
1112 1112 + babar
1113 1113 This is a noop statement so that
1114 1114 this test is still more bytes than success.
1115 1115 pad pad pad pad............................................................
1116 1116
1117 1117 ERROR: test-failure.t output changed
1118 1118 !
1119 1119 Skipped test-bogus.t: Doesn't exist
1120 1120 Failed test-failure.t: output changed
1121 1121 # Ran 1 tests, 1 skipped, 1 failed.
1122 1122 python hash seed: * (glob)
1123 1123 [1]
1124 1124
1125 1125 Ensure that --test-list causes only the tests listed in that file to
1126 1126 be executed.
1127 1127 $ echo test-success.t >> onlytest
1128 1128 $ rt --test-list=onlytest
1129 1129 running 1 tests using 1 parallel processes
1130 1130 .
1131 1131 # Ran 1 tests, 0 skipped, 0 failed.
1132 1132 $ echo test-bogus.t >> anothertest
1133 1133 $ rt --test-list=onlytest --test-list=anothertest
1134 1134 running 2 tests using 1 parallel processes
1135 1135 s.
1136 1136 Skipped test-bogus.t: Doesn't exist
1137 1137 # Ran 1 tests, 1 skipped, 0 failed.
1138 1138 $ rm onlytest anothertest
1139 1139
1140 1140 test for --json
1141 1141 ==================
1142 1142
1143 1143 $ rt --json
1144 1144 running 3 tests using 1 parallel processes
1145 1145
1146 1146 --- $TESTTMP/test-failure.t
1147 1147 +++ $TESTTMP/test-failure.t.err
1148 1148 @@ -1,5 +1,5 @@
1149 1149 $ echo babar
1150 1150 - rataxes
1151 1151 + babar
1152 1152 This is a noop statement so that
1153 1153 this test is still more bytes than success.
1154 1154 pad pad pad pad............................................................
1155 1155
1156 1156 ERROR: test-failure.t output changed
1157 1157 !.s
1158 1158 Skipped test-skip.t: missing feature: nail clipper
1159 1159 Failed test-failure.t: output changed
1160 1160 # Ran 2 tests, 1 skipped, 1 failed.
1161 1161 python hash seed: * (glob)
1162 1162 [1]
1163 1163
1164 1164 $ cat report.json
1165 1165 testreport ={
1166 1166 "test-failure.t": [\{] (re)
1167 1167 "csys": "\s*[\d\.]{4,5}", ? (re)
1168 1168 "cuser": "\s*[\d\.]{4,5}", ? (re)
1169 1169 "diff": "---.+\+\+\+.+", ? (re)
1170 1170 "end": "\s*[\d\.]{4,5}", ? (re)
1171 1171 "result": "failure", ? (re)
1172 1172 "start": "\s*[\d\.]{4,5}", ? (re)
1173 1173 "time": "\s*[\d\.]{4,5}" (re)
1174 1174 }, ? (re)
1175 1175 "test-skip.t": {
1176 1176 "csys": "\s*[\d\.]{4,5}", ? (re)
1177 1177 "cuser": "\s*[\d\.]{4,5}", ? (re)
1178 1178 "diff": "", ? (re)
1179 1179 "end": "\s*[\d\.]{4,5}", ? (re)
1180 1180 "result": "skip", ? (re)
1181 1181 "start": "\s*[\d\.]{4,5}", ? (re)
1182 1182 "time": "\s*[\d\.]{4,5}" (re)
1183 1183 }, ? (re)
1184 1184 "test-success.t": [\{] (re)
1185 1185 "csys": "\s*[\d\.]{4,5}", ? (re)
1186 1186 "cuser": "\s*[\d\.]{4,5}", ? (re)
1187 1187 "diff": "", ? (re)
1188 1188 "end": "\s*[\d\.]{4,5}", ? (re)
1189 1189 "result": "success", ? (re)
1190 1190 "start": "\s*[\d\.]{4,5}", ? (re)
1191 1191 "time": "\s*[\d\.]{4,5}" (re)
1192 1192 }
1193 1193 } (no-eol)
1194 1194 --json with --outputdir
1195 1195
1196 1196 $ rm report.json
1197 1197 $ rm -r output
1198 1198 $ mkdir output
1199 1199 $ rt --json --outputdir output
1200 1200 running 3 tests using 1 parallel processes
1201 1201
1202 1202 --- $TESTTMP/test-failure.t
1203 1203 +++ $TESTTMP/output/test-failure.t.err
1204 1204 @@ -1,5 +1,5 @@
1205 1205 $ echo babar
1206 1206 - rataxes
1207 1207 + babar
1208 1208 This is a noop statement so that
1209 1209 this test is still more bytes than success.
1210 1210 pad pad pad pad............................................................
1211 1211
1212 1212 ERROR: test-failure.t output changed
1213 1213 !.s
1214 1214 Skipped test-skip.t: missing feature: nail clipper
1215 1215 Failed test-failure.t: output changed
1216 1216 # Ran 2 tests, 1 skipped, 1 failed.
1217 1217 python hash seed: * (glob)
1218 1218 [1]
1219 1219 $ f report.json
1220 1220 report.json: file not found
1221 1221 $ cat output/report.json
1222 1222 testreport ={
1223 1223 "test-failure.t": [\{] (re)
1224 1224 "csys": "\s*[\d\.]{4,5}", ? (re)
1225 1225 "cuser": "\s*[\d\.]{4,5}", ? (re)
1226 1226 "diff": "---.+\+\+\+.+", ? (re)
1227 1227 "end": "\s*[\d\.]{4,5}", ? (re)
1228 1228 "result": "failure", ? (re)
1229 1229 "start": "\s*[\d\.]{4,5}", ? (re)
1230 1230 "time": "\s*[\d\.]{4,5}" (re)
1231 1231 }, ? (re)
1232 1232 "test-skip.t": {
1233 1233 "csys": "\s*[\d\.]{4,5}", ? (re)
1234 1234 "cuser": "\s*[\d\.]{4,5}", ? (re)
1235 1235 "diff": "", ? (re)
1236 1236 "end": "\s*[\d\.]{4,5}", ? (re)
1237 1237 "result": "skip", ? (re)
1238 1238 "start": "\s*[\d\.]{4,5}", ? (re)
1239 1239 "time": "\s*[\d\.]{4,5}" (re)
1240 1240 }, ? (re)
1241 1241 "test-success.t": [\{] (re)
1242 1242 "csys": "\s*[\d\.]{4,5}", ? (re)
1243 1243 "cuser": "\s*[\d\.]{4,5}", ? (re)
1244 1244 "diff": "", ? (re)
1245 1245 "end": "\s*[\d\.]{4,5}", ? (re)
1246 1246 "result": "success", ? (re)
1247 1247 "start": "\s*[\d\.]{4,5}", ? (re)
1248 1248 "time": "\s*[\d\.]{4,5}" (re)
1249 1249 }
1250 1250 } (no-eol)
1251 1251 $ ls -a output
1252 1252 .
1253 1253 ..
1254 1254 .testtimes
1255 1255 report.json
1256 1256 test-failure.t.err
1257 1257
1258 1258 Test that failed test accepted through interactive are properly reported:
1259 1259
1260 1260 $ cp test-failure.t backup
1261 1261 $ echo y | rt --json -i
1262 1262 running 3 tests using 1 parallel processes
1263 1263
1264 1264 --- $TESTTMP/test-failure.t
1265 1265 +++ $TESTTMP/test-failure.t.err
1266 1266 @@ -1,5 +1,5 @@
1267 1267 $ echo babar
1268 1268 - rataxes
1269 1269 + babar
1270 1270 This is a noop statement so that
1271 1271 this test is still more bytes than success.
1272 1272 pad pad pad pad............................................................
1273 1273 Accept this change? [n] ..s
1274 1274 Skipped test-skip.t: missing feature: nail clipper
1275 1275 # Ran 2 tests, 1 skipped, 0 failed.
1276 1276
1277 1277 $ cat report.json
1278 1278 testreport ={
1279 1279 "test-failure.t": [\{] (re)
1280 1280 "csys": "\s*[\d\.]{4,5}", ? (re)
1281 1281 "cuser": "\s*[\d\.]{4,5}", ? (re)
1282 1282 "diff": "", ? (re)
1283 1283 "end": "\s*[\d\.]{4,5}", ? (re)
1284 1284 "result": "success", ? (re)
1285 1285 "start": "\s*[\d\.]{4,5}", ? (re)
1286 1286 "time": "\s*[\d\.]{4,5}" (re)
1287 1287 }, ? (re)
1288 1288 "test-skip.t": {
1289 1289 "csys": "\s*[\d\.]{4,5}", ? (re)
1290 1290 "cuser": "\s*[\d\.]{4,5}", ? (re)
1291 1291 "diff": "", ? (re)
1292 1292 "end": "\s*[\d\.]{4,5}", ? (re)
1293 1293 "result": "skip", ? (re)
1294 1294 "start": "\s*[\d\.]{4,5}", ? (re)
1295 1295 "time": "\s*[\d\.]{4,5}" (re)
1296 1296 }, ? (re)
1297 1297 "test-success.t": [\{] (re)
1298 1298 "csys": "\s*[\d\.]{4,5}", ? (re)
1299 1299 "cuser": "\s*[\d\.]{4,5}", ? (re)
1300 1300 "diff": "", ? (re)
1301 1301 "end": "\s*[\d\.]{4,5}", ? (re)
1302 1302 "result": "success", ? (re)
1303 1303 "start": "\s*[\d\.]{4,5}", ? (re)
1304 1304 "time": "\s*[\d\.]{4,5}" (re)
1305 1305 }
1306 1306 } (no-eol)
1307 1307 $ mv backup test-failure.t
1308 1308
1309 1309 backslash on end of line with glob matching is handled properly
1310 1310
1311 1311 $ cat > test-glob-backslash.t << EOF
1312 1312 > $ echo 'foo bar \\'
1313 1313 > foo * \ (glob)
1314 1314 > EOF
1315 1315
1316 1316 $ rt test-glob-backslash.t
1317 1317 running 1 tests using 1 parallel processes
1318 1318 .
1319 1319 # Ran 1 tests, 0 skipped, 0 failed.
1320 1320
1321 1321 $ rm -f test-glob-backslash.t
1322 1322
1323 1323 Test globbing of local IP addresses
1324 1324 $ echo 172.16.18.1
1325 1325 $LOCALIP (glob)
1326 1326 $ echo dead:beef::1
1327 1327 $LOCALIP (glob)
1328 1328
1329 1329 Add support for external test formatter
1330 1330 =======================================
1331 1331
1332 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` "$@" test-success.t test-failure.t
1332 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@" test-success.t test-failure.t
1333 1333 running 2 tests using 1 parallel processes
1334 1334
1335 1335 # Ran 2 tests, 0 skipped, 0 failed.
1336 1336 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1337 1337 FAILURE! test-failure.t output changed
1338 1338 SUCCESS! test-success.t
1339 1339 ON_END!
1340 1340
1341 1341 Test reusability for third party tools
1342 1342 ======================================
1343 1343
1344 1344 $ mkdir "$TESTTMP"/anothertests
1345 1345 $ cd "$TESTTMP"/anothertests
1346 1346
1347 1347 test that `run-tests.py` can execute hghave, even if it runs not in
1348 1348 Mercurial source tree.
1349 1349
1350 1350 $ cat > test-hghave.t <<EOF
1351 1351 > #require true
1352 1352 > $ echo foo
1353 1353 > foo
1354 1354 > EOF
1355 1355 $ rt test-hghave.t
1356 1356 running 1 tests using 1 parallel processes
1357 1357 .
1358 1358 # Ran 1 tests, 0 skipped, 0 failed.
1359 1359
1360 1360 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1361 1361 running is placed.
1362 1362
1363 1363 $ cat > test-runtestdir.t <<EOF
1364 1364 > - $TESTDIR, in which test-run-tests.t is placed
1365 1365 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1366 1366 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1367 1367 >
1368 1368 > #if windows
1369 1369 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1370 1370 > #else
1371 1371 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1372 1372 > #endif
1373 1373 > If this prints a path, that means RUNTESTDIR didn't equal
1374 1374 > TESTDIR as it should have.
1375 1375 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1376 1376 > This should print the start of check-code. If this passes but the
1377 1377 > previous check failed, that means we found a copy of check-code at whatever
1378 1378 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1379 1379 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1380 1380 > #!USRBINENVPY
1381 1381 > #
1382 1382 > # check-code - a style and portability checker for Mercurial
1383 1383 > EOF
1384 1384 $ rt test-runtestdir.t
1385 1385 running 1 tests using 1 parallel processes
1386 1386 .
1387 1387 # Ran 1 tests, 0 skipped, 0 failed.
1388 1388
1389 1389 #if execbit
1390 1390
1391 1391 test that TESTDIR is referred in PATH
1392 1392
1393 1393 $ cat > custom-command.sh <<EOF
1394 1394 > #!/bin/sh
1395 1395 > echo "hello world"
1396 1396 > EOF
1397 1397 $ chmod +x custom-command.sh
1398 1398 $ cat > test-testdir-path.t <<EOF
1399 1399 > $ custom-command.sh
1400 1400 > hello world
1401 1401 > EOF
1402 1402 $ rt test-testdir-path.t
1403 1403 running 1 tests using 1 parallel processes
1404 1404 .
1405 1405 # Ran 1 tests, 0 skipped, 0 failed.
1406 1406
1407 1407 #endif
1408 1408
1409 1409 test support for --allow-slow-tests
1410 1410 $ cat > test-very-slow-test.t <<EOF
1411 1411 > #require slow
1412 1412 > $ echo pass
1413 1413 > pass
1414 1414 > EOF
1415 1415 $ rt test-very-slow-test.t
1416 1416 running 1 tests using 1 parallel processes
1417 1417 s
1418 1418 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1419 1419 # Ran 0 tests, 1 skipped, 0 failed.
1420 1420 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1421 1421 running 1 tests using 1 parallel processes
1422 1422 .
1423 1423 # Ran 1 tests, 0 skipped, 0 failed.
1424 1424
1425 1425 support for running a test outside the current directory
1426 1426 $ mkdir nonlocal
1427 1427 $ cat > nonlocal/test-is-not-here.t << EOF
1428 1428 > $ echo pass
1429 1429 > pass
1430 1430 > EOF
1431 1431 $ rt nonlocal/test-is-not-here.t
1432 1432 running 1 tests using 1 parallel processes
1433 1433 .
1434 1434 # Ran 1 tests, 0 skipped, 0 failed.
1435 1435
1436 1436 support for automatically discovering test if arg is a folder
1437 1437 $ mkdir tmp && cd tmp
1438 1438
1439 1439 $ cat > test-uno.t << EOF
1440 1440 > $ echo line
1441 1441 > line
1442 1442 > EOF
1443 1443
1444 1444 $ cp test-uno.t test-dos.t
1445 1445 $ cd ..
1446 1446 $ cp -R tmp tmpp
1447 1447 $ cp tmp/test-uno.t test-solo.t
1448 1448
1449 1449 $ rt tmp/ test-solo.t tmpp
1450 1450 running 5 tests using 1 parallel processes
1451 1451 .....
1452 1452 # Ran 5 tests, 0 skipped, 0 failed.
1453 1453 $ rm -rf tmp tmpp
1454 1454
1455 1455 support for running run-tests.py from another directory
1456 1456 $ mkdir tmp && cd tmp
1457 1457
1458 1458 $ cat > useful-file.sh << EOF
1459 1459 > important command
1460 1460 > EOF
1461 1461
1462 1462 $ cat > test-folder.t << EOF
1463 1463 > $ cat \$TESTDIR/useful-file.sh
1464 1464 > important command
1465 1465 > EOF
1466 1466
1467 1467 $ cat > test-folder-fail.t << EOF
1468 1468 > $ cat \$TESTDIR/useful-file.sh
1469 1469 > important commando
1470 1470 > EOF
1471 1471
1472 1472 $ cd ..
1473 1473 $ rt tmp/test-*.t
1474 1474 running 2 tests using 1 parallel processes
1475 1475
1476 1476 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1477 1477 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1478 1478 @@ -1,2 +1,2 @@
1479 1479 $ cat $TESTDIR/useful-file.sh
1480 1480 - important commando
1481 1481 + important command
1482 1482
1483 1483 ERROR: test-folder-fail.t output changed
1484 1484 !.
1485 1485 Failed test-folder-fail.t: output changed
1486 1486 # Ran 2 tests, 0 skipped, 1 failed.
1487 1487 python hash seed: * (glob)
1488 1488 [1]
1489 1489
1490 1490 support for bisecting failed tests automatically
1491 1491 $ hg init bisect
1492 1492 $ cd bisect
1493 1493 $ cat >> test-bisect.t <<EOF
1494 1494 > $ echo pass
1495 1495 > pass
1496 1496 > EOF
1497 1497 $ hg add test-bisect.t
1498 1498 $ hg ci -m 'good'
1499 1499 $ cat >> test-bisect.t <<EOF
1500 1500 > $ echo pass
1501 1501 > fail
1502 1502 > EOF
1503 1503 $ hg ci -m 'bad'
1504 1504 $ rt --known-good-rev=0 test-bisect.t
1505 1505 running 1 tests using 1 parallel processes
1506 1506
1507 1507 --- $TESTTMP/anothertests/bisect/test-bisect.t
1508 1508 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1509 1509 @@ -1,4 +1,4 @@
1510 1510 $ echo pass
1511 1511 pass
1512 1512 $ echo pass
1513 1513 - fail
1514 1514 + pass
1515 1515
1516 1516 ERROR: test-bisect.t output changed
1517 1517 !
1518 1518 Failed test-bisect.t: output changed
1519 1519 test-bisect.t broken by 72cbf122d116 (bad)
1520 1520 # Ran 1 tests, 0 skipped, 1 failed.
1521 1521 python hash seed: * (glob)
1522 1522 [1]
1523 1523
1524 1524 $ cd ..
1525 1525
1526 1526 support bisecting a separate repo
1527 1527
1528 1528 $ hg init bisect-dependent
1529 1529 $ cd bisect-dependent
1530 1530 $ cat > test-bisect-dependent.t <<EOF
1531 1531 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1532 1532 > pass
1533 1533 > EOF
1534 1534 $ hg commit -Am dependent test-bisect-dependent.t
1535 1535
1536 1536 $ rt --known-good-rev=0 test-bisect-dependent.t
1537 1537 running 1 tests using 1 parallel processes
1538 1538
1539 1539 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1540 1540 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1541 1541 @@ -1,2 +1,2 @@
1542 1542 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1543 1543 - pass
1544 1544 + fail
1545 1545
1546 1546 ERROR: test-bisect-dependent.t output changed
1547 1547 !
1548 1548 Failed test-bisect-dependent.t: output changed
1549 1549 Failed to identify failure point for test-bisect-dependent.t
1550 1550 # Ran 1 tests, 0 skipped, 1 failed.
1551 1551 python hash seed: * (glob)
1552 1552 [1]
1553 1553
1554 1554 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1555 1555 usage: run-tests.py [options] [tests]
1556 1556 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1557 1557 [2]
1558 1558
1559 1559 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1560 1560 running 1 tests using 1 parallel processes
1561 1561
1562 1562 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1563 1563 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1564 1564 @@ -1,2 +1,2 @@
1565 1565 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1566 1566 - pass
1567 1567 + fail
1568 1568
1569 1569 ERROR: test-bisect-dependent.t output changed
1570 1570 !
1571 1571 Failed test-bisect-dependent.t: output changed
1572 1572 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1573 1573 # Ran 1 tests, 0 skipped, 1 failed.
1574 1574 python hash seed: * (glob)
1575 1575 [1]
1576 1576
1577 1577 $ cd ..
1578 1578
1579 1579 Test a broken #if statement doesn't break run-tests threading.
1580 1580 ==============================================================
1581 1581 $ mkdir broken
1582 1582 $ cd broken
1583 1583 $ cat > test-broken.t <<EOF
1584 1584 > true
1585 1585 > #if notarealhghavefeature
1586 1586 > $ false
1587 1587 > #endif
1588 1588 > EOF
1589 1589 $ for f in 1 2 3 4 ; do
1590 1590 > cat > test-works-$f.t <<EOF
1591 1591 > This is test case $f
1592 1592 > $ sleep 1
1593 1593 > EOF
1594 1594 > done
1595 1595 $ rt -j 2
1596 1596 running 5 tests using 2 parallel processes
1597 1597 ....
1598 1598 # Ran 5 tests, 0 skipped, 0 failed.
1599 1599 skipped: unknown feature: notarealhghavefeature
1600 1600
1601 1601 $ cd ..
1602 1602 $ rm -rf broken
1603 1603
1604 1604 Test cases in .t files
1605 1605 ======================
1606 1606 $ mkdir cases
1607 1607 $ cd cases
1608 1608 $ cat > test-cases-abc.t <<'EOF'
1609 1609 > #testcases A B C
1610 1610 > $ V=B
1611 1611 > #if A
1612 1612 > $ V=A
1613 1613 > #endif
1614 1614 > #if C
1615 1615 > $ V=C
1616 1616 > #endif
1617 1617 > $ echo $V | sed 's/A/C/'
1618 1618 > C
1619 1619 > #if C
1620 1620 > $ [ $V = C ]
1621 1621 > #endif
1622 1622 > #if A
1623 1623 > $ [ $V = C ]
1624 1624 > [1]
1625 1625 > #endif
1626 1626 > #if no-C
1627 1627 > $ [ $V = C ]
1628 1628 > [1]
1629 1629 > #endif
1630 1630 > $ [ $V = D ]
1631 1631 > [1]
1632 1632 > EOF
1633 1633 $ rt
1634 1634 running 3 tests using 1 parallel processes
1635 1635 .
1636 1636 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1637 1637 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1638 1638 @@ -7,7 +7,7 @@
1639 1639 $ V=C
1640 1640 #endif
1641 1641 $ echo $V | sed 's/A/C/'
1642 1642 - C
1643 1643 + B
1644 1644 #if C
1645 1645 $ [ $V = C ]
1646 1646 #endif
1647 1647
1648 1648 ERROR: test-cases-abc.t#B output changed
1649 1649 !.
1650 1650 Failed test-cases-abc.t#B: output changed
1651 1651 # Ran 3 tests, 0 skipped, 1 failed.
1652 1652 python hash seed: * (glob)
1653 1653 [1]
1654 1654
1655 1655 --restart works
1656 1656
1657 1657 $ rt --restart
1658 1658 running 2 tests using 1 parallel processes
1659 1659
1660 1660 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1661 1661 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1662 1662 @@ -7,7 +7,7 @@
1663 1663 $ V=C
1664 1664 #endif
1665 1665 $ echo $V | sed 's/A/C/'
1666 1666 - C
1667 1667 + B
1668 1668 #if C
1669 1669 $ [ $V = C ]
1670 1670 #endif
1671 1671
1672 1672 ERROR: test-cases-abc.t#B output changed
1673 1673 !.
1674 1674 Failed test-cases-abc.t#B: output changed
1675 1675 # Ran 2 tests, 0 skipped, 1 failed.
1676 1676 python hash seed: * (glob)
1677 1677 [1]
1678 1678
1679 1679 --restart works with outputdir
1680 1680
1681 1681 $ mkdir output
1682 1682 $ mv test-cases-abc.t#B.err output
1683 1683 $ rt --restart --outputdir output
1684 1684 running 2 tests using 1 parallel processes
1685 1685
1686 1686 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1687 1687 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1688 1688 @@ -7,7 +7,7 @@
1689 1689 $ V=C
1690 1690 #endif
1691 1691 $ echo $V | sed 's/A/C/'
1692 1692 - C
1693 1693 + B
1694 1694 #if C
1695 1695 $ [ $V = C ]
1696 1696 #endif
1697 1697
1698 1698 ERROR: test-cases-abc.t#B output changed
1699 1699 !.
1700 1700 Failed test-cases-abc.t#B: output changed
1701 1701 # Ran 2 tests, 0 skipped, 1 failed.
1702 1702 python hash seed: * (glob)
1703 1703 [1]
1704 1704
1705 1705 Test TESTCASE variable
1706 1706
1707 1707 $ cat > test-cases-ab.t <<'EOF'
1708 1708 > $ dostuff() {
1709 1709 > > echo "In case $TESTCASE"
1710 1710 > > }
1711 1711 > #testcases A B
1712 1712 > #if A
1713 1713 > $ dostuff
1714 1714 > In case A
1715 1715 > #endif
1716 1716 > #if B
1717 1717 > $ dostuff
1718 1718 > In case B
1719 1719 > #endif
1720 1720 > EOF
1721 1721 $ rt test-cases-ab.t
1722 1722 running 2 tests using 1 parallel processes
1723 1723 ..
1724 1724 # Ran 2 tests, 0 skipped, 0 failed.
1725 1725
1726 1726 Support running a specific test case
1727 1727
1728 1728 $ rt "test-cases-abc.t#B"
1729 1729 running 1 tests using 1 parallel processes
1730 1730
1731 1731 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1732 1732 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1733 1733 @@ -7,7 +7,7 @@
1734 1734 $ V=C
1735 1735 #endif
1736 1736 $ echo $V | sed 's/A/C/'
1737 1737 - C
1738 1738 + B
1739 1739 #if C
1740 1740 $ [ $V = C ]
1741 1741 #endif
1742 1742
1743 1743 ERROR: test-cases-abc.t#B output changed
1744 1744 !
1745 1745 Failed test-cases-abc.t#B: output changed
1746 1746 # Ran 1 tests, 0 skipped, 1 failed.
1747 1747 python hash seed: * (glob)
1748 1748 [1]
1749 1749
1750 1750 Support running multiple test cases in the same file
1751 1751
1752 1752 $ rt test-cases-abc.t#B test-cases-abc.t#C
1753 1753 running 2 tests using 1 parallel processes
1754 1754
1755 1755 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1756 1756 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1757 1757 @@ -7,7 +7,7 @@
1758 1758 $ V=C
1759 1759 #endif
1760 1760 $ echo $V | sed 's/A/C/'
1761 1761 - C
1762 1762 + B
1763 1763 #if C
1764 1764 $ [ $V = C ]
1765 1765 #endif
1766 1766
1767 1767 ERROR: test-cases-abc.t#B output changed
1768 1768 !.
1769 1769 Failed test-cases-abc.t#B: output changed
1770 1770 # Ran 2 tests, 0 skipped, 1 failed.
1771 1771 python hash seed: * (glob)
1772 1772 [1]
1773 1773
1774 1774 Support ignoring invalid test cases
1775 1775
1776 1776 $ rt test-cases-abc.t#B test-cases-abc.t#D
1777 1777 running 1 tests using 1 parallel processes
1778 1778
1779 1779 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1780 1780 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1781 1781 @@ -7,7 +7,7 @@
1782 1782 $ V=C
1783 1783 #endif
1784 1784 $ echo $V | sed 's/A/C/'
1785 1785 - C
1786 1786 + B
1787 1787 #if C
1788 1788 $ [ $V = C ]
1789 1789 #endif
1790 1790
1791 1791 ERROR: test-cases-abc.t#B output changed
1792 1792 !
1793 1793 Failed test-cases-abc.t#B: output changed
1794 1794 # Ran 1 tests, 0 skipped, 1 failed.
1795 1795 python hash seed: * (glob)
1796 1796 [1]
1797 1797
1798 1798 Support running complex test cases names
1799 1799
1800 1800 $ cat > test-cases-advanced-cases.t <<'EOF'
1801 1801 > #testcases simple case-with-dashes casewith_-.chars
1802 1802 > $ echo $TESTCASE
1803 1803 > simple
1804 1804 > EOF
1805 1805
1806 1806 $ cat test-cases-advanced-cases.t
1807 1807 #testcases simple case-with-dashes casewith_-.chars
1808 1808 $ echo $TESTCASE
1809 1809 simple
1810 1810
1811 1811 $ rt test-cases-advanced-cases.t
1812 1812 running 3 tests using 1 parallel processes
1813 1813
1814 1814 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1815 1815 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1816 1816 @@ -1,3 +1,3 @@
1817 1817 #testcases simple case-with-dashes casewith_-.chars
1818 1818 $ echo $TESTCASE
1819 1819 - simple
1820 1820 + case-with-dashes
1821 1821
1822 1822 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1823 1823 !
1824 1824 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1825 1825 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1826 1826 @@ -1,3 +1,3 @@
1827 1827 #testcases simple case-with-dashes casewith_-.chars
1828 1828 $ echo $TESTCASE
1829 1829 - simple
1830 1830 + casewith_-.chars
1831 1831
1832 1832 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1833 1833 !.
1834 1834 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1835 1835 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1836 1836 # Ran 3 tests, 0 skipped, 2 failed.
1837 1837 python hash seed: * (glob)
1838 1838 [1]
1839 1839
1840 1840 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1841 1841 running 1 tests using 1 parallel processes
1842 1842
1843 1843 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1844 1844 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1845 1845 @@ -1,3 +1,3 @@
1846 1846 #testcases simple case-with-dashes casewith_-.chars
1847 1847 $ echo $TESTCASE
1848 1848 - simple
1849 1849 + case-with-dashes
1850 1850
1851 1851 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1852 1852 !
1853 1853 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1854 1854 # Ran 1 tests, 0 skipped, 1 failed.
1855 1855 python hash seed: * (glob)
1856 1856 [1]
1857 1857
1858 1858 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1859 1859 running 1 tests using 1 parallel processes
1860 1860
1861 1861 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1862 1862 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1863 1863 @@ -1,3 +1,3 @@
1864 1864 #testcases simple case-with-dashes casewith_-.chars
1865 1865 $ echo $TESTCASE
1866 1866 - simple
1867 1867 + casewith_-.chars
1868 1868
1869 1869 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1870 1870 !
1871 1871 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1872 1872 # Ran 1 tests, 0 skipped, 1 failed.
1873 1873 python hash seed: * (glob)
1874 1874 [1]
1875 1875
1876 1876 Test automatic pattern replacement
1877 1877 ==================================
1878 1878
1879 1879 $ cat << EOF >> common-pattern.py
1880 1880 > substitutions = [
1881 1881 > (br'foo-(.*)\\b',
1882 1882 > br'\$XXX=\\1\$'),
1883 1883 > (br'bar\\n',
1884 1884 > br'\$YYY$\\n'),
1885 1885 > ]
1886 1886 > EOF
1887 1887
1888 1888 $ cat << EOF >> test-substitution.t
1889 1889 > $ echo foo-12
1890 1890 > \$XXX=12$
1891 1891 > $ echo foo-42
1892 1892 > \$XXX=42$
1893 1893 > $ echo bar prior
1894 1894 > bar prior
1895 1895 > $ echo lastbar
1896 1896 > last\$YYY$
1897 1897 > $ echo foo-bar foo-baz
1898 1898 > EOF
1899 1899
1900 1900 $ rt test-substitution.t
1901 1901 running 1 tests using 1 parallel processes
1902 1902
1903 1903 --- $TESTTMP/anothertests/cases/test-substitution.t
1904 1904 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1905 1905 @@ -7,3 +7,4 @@
1906 1906 $ echo lastbar
1907 1907 last$YYY$
1908 1908 $ echo foo-bar foo-baz
1909 1909 + $XXX=bar foo-baz$
1910 1910
1911 1911 ERROR: test-substitution.t output changed
1912 1912 !
1913 1913 Failed test-substitution.t: output changed
1914 1914 # Ran 1 tests, 0 skipped, 1 failed.
1915 1915 python hash seed: * (glob)
1916 1916 [1]
1917 1917
1918 1918 --extra-config-opt works
1919 1919
1920 1920 $ cat << EOF >> test-config-opt.t
1921 1921 > $ hg init test-config-opt
1922 1922 > $ hg -R test-config-opt purge
1923 1923 > EOF
1924 1924
1925 1925 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1926 1926 running 1 tests using 1 parallel processes
1927 1927 .
1928 1928 # Ran 1 tests, 0 skipped, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now