##// END OF EJS Templates
run-tests: sort the skip, failure and error lists in the final output...
Matt Harbison -
r41532:bb98870a default draft
parent child Browse files
Show More
@@ -1,3305 +1,3308
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import argparse
49 49 import collections
50 50 import difflib
51 51 import distutils.version as version
52 52 import errno
53 53 import json
54 54 import multiprocessing
55 55 import os
56 56 import random
57 57 import re
58 58 import shutil
59 59 import signal
60 60 import socket
61 61 import subprocess
62 62 import sys
63 63 import sysconfig
64 64 import tempfile
65 65 import threading
66 66 import time
67 67 import unittest
68 68 import uuid
69 69 import xml.dom.minidom as minidom
70 70
71 71 try:
72 72 import Queue as queue
73 73 except ImportError:
74 74 import queue
75 75
76 76 try:
77 77 import shlex
78 78 shellquote = shlex.quote
79 79 except (ImportError, AttributeError):
80 80 import pipes
81 81 shellquote = pipes.quote
82 82
83 83 if os.environ.get('RTUNICODEPEDANTRY', False):
84 84 try:
85 85 reload(sys)
86 86 sys.setdefaultencoding("undefined")
87 87 except NameError:
88 88 pass
89 89
90 90 processlock = threading.Lock()
91 91
92 92 pygmentspresent = False
93 93 # ANSI color is unsupported prior to Windows 10
94 94 if os.name != 'nt':
95 95 try: # is pygments installed
96 96 import pygments
97 97 import pygments.lexers as lexers
98 98 import pygments.lexer as lexer
99 99 import pygments.formatters as formatters
100 100 import pygments.token as token
101 101 import pygments.style as style
102 102 pygmentspresent = True
103 103 difflexer = lexers.DiffLexer()
104 104 terminal256formatter = formatters.Terminal256Formatter()
105 105 except ImportError:
106 106 pass
107 107
108 108 if pygmentspresent:
109 109 class TestRunnerStyle(style.Style):
110 110 default_style = ""
111 111 skipped = token.string_to_tokentype("Token.Generic.Skipped")
112 112 failed = token.string_to_tokentype("Token.Generic.Failed")
113 113 skippedname = token.string_to_tokentype("Token.Generic.SName")
114 114 failedname = token.string_to_tokentype("Token.Generic.FName")
115 115 styles = {
116 116 skipped: '#e5e5e5',
117 117 skippedname: '#00ffff',
118 118 failed: '#7f0000',
119 119 failedname: '#ff0000',
120 120 }
121 121
122 122 class TestRunnerLexer(lexer.RegexLexer):
123 123 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
124 124 tokens = {
125 125 'root': [
126 126 (r'^Skipped', token.Generic.Skipped, 'skipped'),
127 127 (r'^Failed ', token.Generic.Failed, 'failed'),
128 128 (r'^ERROR: ', token.Generic.Failed, 'failed'),
129 129 ],
130 130 'skipped': [
131 131 (testpattern, token.Generic.SName),
132 132 (r':.*', token.Generic.Skipped),
133 133 ],
134 134 'failed': [
135 135 (testpattern, token.Generic.FName),
136 136 (r'(:| ).*', token.Generic.Failed),
137 137 ]
138 138 }
139 139
140 140 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
141 141 runnerlexer = TestRunnerLexer()
142 142
143 143 origenviron = os.environ.copy()
144 144
145 145 if sys.version_info > (3, 5, 0):
146 146 PYTHON3 = True
147 147 xrange = range # we use xrange in one place, and we'd rather not use range
148 148 def _bytespath(p):
149 149 if p is None:
150 150 return p
151 151 return p.encode('utf-8')
152 152
153 153 def _strpath(p):
154 154 if p is None:
155 155 return p
156 156 return p.decode('utf-8')
157 157
158 158 osenvironb = getattr(os, 'environb', None)
159 159 if osenvironb is None:
160 160 # Windows lacks os.environb, for instance. A proxy over the real thing
161 161 # instead of a copy allows the environment to be updated via bytes on
162 162 # all platforms.
163 163 class environbytes(object):
164 164 def __init__(self, strenv):
165 165 self.__len__ = strenv.__len__
166 166 self.clear = strenv.clear
167 167 self._strenv = strenv
168 168 def __getitem__(self, k):
169 169 v = self._strenv.__getitem__(_strpath(k))
170 170 return _bytespath(v)
171 171 def __setitem__(self, k, v):
172 172 self._strenv.__setitem__(_strpath(k), _strpath(v))
173 173 def __delitem__(self, k):
174 174 self._strenv.__delitem__(_strpath(k))
175 175 def __contains__(self, k):
176 176 return self._strenv.__contains__(_strpath(k))
177 177 def __iter__(self):
178 178 return iter([_bytespath(k) for k in iter(self._strenv)])
179 179 def get(self, k, default=None):
180 180 v = self._strenv.get(_strpath(k), _strpath(default))
181 181 return _bytespath(v)
182 182 def pop(self, k, default=None):
183 183 v = self._strenv.pop(_strpath(k), _strpath(default))
184 184 return _bytespath(v)
185 185
186 186 osenvironb = environbytes(os.environ)
187 187
188 188 getcwdb = getattr(os, 'getcwdb')
189 189 if not getcwdb or os.name == 'nt':
190 190 getcwdb = lambda: _bytespath(os.getcwd())
191 191
192 192 elif sys.version_info >= (3, 0, 0):
193 193 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
194 194 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
195 195 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
196 196 else:
197 197 PYTHON3 = False
198 198
199 199 # In python 2.x, path operations are generally done using
200 200 # bytestrings by default, so we don't have to do any extra
201 201 # fiddling there. We define the wrapper functions anyway just to
202 202 # help keep code consistent between platforms.
203 203 def _bytespath(p):
204 204 return p
205 205
206 206 _strpath = _bytespath
207 207 osenvironb = os.environ
208 208 getcwdb = os.getcwd
209 209
210 210 # For Windows support
211 211 wifexited = getattr(os, "WIFEXITED", lambda x: False)
212 212
213 213 # Whether to use IPv6
214 214 def checksocketfamily(name, port=20058):
215 215 """return true if we can listen on localhost using family=name
216 216
217 217 name should be either 'AF_INET', or 'AF_INET6'.
218 218 port being used is okay - EADDRINUSE is considered as successful.
219 219 """
220 220 family = getattr(socket, name, None)
221 221 if family is None:
222 222 return False
223 223 try:
224 224 s = socket.socket(family, socket.SOCK_STREAM)
225 225 s.bind(('localhost', port))
226 226 s.close()
227 227 return True
228 228 except socket.error as exc:
229 229 if exc.errno == errno.EADDRINUSE:
230 230 return True
231 231 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
232 232 return False
233 233 else:
234 234 raise
235 235 else:
236 236 return False
237 237
238 238 # useipv6 will be set by parseargs
239 239 useipv6 = None
240 240
241 241 def checkportisavailable(port):
242 242 """return true if a port seems free to bind on localhost"""
243 243 if useipv6:
244 244 family = socket.AF_INET6
245 245 else:
246 246 family = socket.AF_INET
247 247 try:
248 248 s = socket.socket(family, socket.SOCK_STREAM)
249 249 s.bind(('localhost', port))
250 250 s.close()
251 251 return True
252 252 except socket.error as exc:
253 253 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
254 254 errno.EPROTONOSUPPORT):
255 255 raise
256 256 return False
257 257
258 258 closefds = os.name == 'posix'
259 259 def Popen4(cmd, wd, timeout, env=None):
260 260 processlock.acquire()
261 261 p = subprocess.Popen(_strpath(cmd), shell=True, bufsize=-1,
262 262 cwd=_strpath(wd), env=env,
263 263 close_fds=closefds,
264 264 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
265 265 stderr=subprocess.STDOUT)
266 266 processlock.release()
267 267
268 268 p.fromchild = p.stdout
269 269 p.tochild = p.stdin
270 270 p.childerr = p.stderr
271 271
272 272 p.timeout = False
273 273 if timeout:
274 274 def t():
275 275 start = time.time()
276 276 while time.time() - start < timeout and p.returncode is None:
277 277 time.sleep(.1)
278 278 p.timeout = True
279 279 if p.returncode is None:
280 280 terminate(p)
281 281 threading.Thread(target=t).start()
282 282
283 283 return p
284 284
285 285 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
286 286 IMPL_PATH = b'PYTHONPATH'
287 287 if 'java' in sys.platform:
288 288 IMPL_PATH = b'JYTHONPATH'
289 289
290 290 defaults = {
291 291 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
292 292 'timeout': ('HGTEST_TIMEOUT', 180),
293 293 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
294 294 'port': ('HGTEST_PORT', 20059),
295 295 'shell': ('HGTEST_SHELL', 'sh'),
296 296 }
297 297
298 298 def canonpath(path):
299 299 return os.path.realpath(os.path.expanduser(path))
300 300
301 301 def parselistfiles(files, listtype, warn=True):
302 302 entries = dict()
303 303 for filename in files:
304 304 try:
305 305 path = os.path.expanduser(os.path.expandvars(filename))
306 306 f = open(path, "rb")
307 307 except IOError as err:
308 308 if err.errno != errno.ENOENT:
309 309 raise
310 310 if warn:
311 311 print("warning: no such %s file: %s" % (listtype, filename))
312 312 continue
313 313
314 314 for line in f.readlines():
315 315 line = line.split(b'#', 1)[0].strip()
316 316 if line:
317 317 entries[line] = filename
318 318
319 319 f.close()
320 320 return entries
321 321
322 322 def parsettestcases(path):
323 323 """read a .t test file, return a set of test case names
324 324
325 325 If path does not exist, return an empty set.
326 326 """
327 327 cases = []
328 328 try:
329 329 with open(path, 'rb') as f:
330 330 for l in f:
331 331 if l.startswith(b'#testcases '):
332 332 cases.append(sorted(l[11:].split()))
333 333 except IOError as ex:
334 334 if ex.errno != errno.ENOENT:
335 335 raise
336 336 return cases
337 337
338 338 def getparser():
339 339 """Obtain the OptionParser used by the CLI."""
340 340 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
341 341
342 342 selection = parser.add_argument_group('Test Selection')
343 343 selection.add_argument('--allow-slow-tests', action='store_true',
344 344 help='allow extremely slow tests')
345 345 selection.add_argument("--blacklist", action="append",
346 346 help="skip tests listed in the specified blacklist file")
347 347 selection.add_argument("--changed",
348 348 help="run tests that are changed in parent rev or working directory")
349 349 selection.add_argument("-k", "--keywords",
350 350 help="run tests matching keywords")
351 351 selection.add_argument("-r", "--retest", action="store_true",
352 352 help = "retest failed tests")
353 353 selection.add_argument("--test-list", action="append",
354 354 help="read tests to run from the specified file")
355 355 selection.add_argument("--whitelist", action="append",
356 356 help="always run tests listed in the specified whitelist file")
357 357 selection.add_argument('tests', metavar='TESTS', nargs='*',
358 358 help='Tests to run')
359 359
360 360 harness = parser.add_argument_group('Test Harness Behavior')
361 361 harness.add_argument('--bisect-repo',
362 362 metavar='bisect_repo',
363 363 help=("Path of a repo to bisect. Use together with "
364 364 "--known-good-rev"))
365 365 harness.add_argument("-d", "--debug", action="store_true",
366 366 help="debug mode: write output of test scripts to console"
367 367 " rather than capturing and diffing it (disables timeout)")
368 368 harness.add_argument("-f", "--first", action="store_true",
369 369 help="exit on the first test failure")
370 370 harness.add_argument("-i", "--interactive", action="store_true",
371 371 help="prompt to accept changed output")
372 372 harness.add_argument("-j", "--jobs", type=int,
373 373 help="number of jobs to run in parallel"
374 374 " (default: $%s or %d)" % defaults['jobs'])
375 375 harness.add_argument("--keep-tmpdir", action="store_true",
376 376 help="keep temporary directory after running tests")
377 377 harness.add_argument('--known-good-rev',
378 378 metavar="known_good_rev",
379 379 help=("Automatically bisect any failures using this "
380 380 "revision as a known-good revision."))
381 381 harness.add_argument("--list-tests", action="store_true",
382 382 help="list tests instead of running them")
383 383 harness.add_argument("--loop", action="store_true",
384 384 help="loop tests repeatedly")
385 385 harness.add_argument('--random', action="store_true",
386 386 help='run tests in random order')
387 387 harness.add_argument('--order-by-runtime', action="store_true",
388 388 help='run slowest tests first, according to .testtimes')
389 389 harness.add_argument("-p", "--port", type=int,
390 390 help="port on which servers should listen"
391 391 " (default: $%s or %d)" % defaults['port'])
392 392 harness.add_argument('--profile-runner', action='store_true',
393 393 help='run statprof on run-tests')
394 394 harness.add_argument("-R", "--restart", action="store_true",
395 395 help="restart at last error")
396 396 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
397 397 help="run each test N times (default=1)", default=1)
398 398 harness.add_argument("--shell",
399 399 help="shell to use (default: $%s or %s)" % defaults['shell'])
400 400 harness.add_argument('--showchannels', action='store_true',
401 401 help='show scheduling channels')
402 402 harness.add_argument("--slowtimeout", type=int,
403 403 help="kill errant slow tests after SLOWTIMEOUT seconds"
404 404 " (default: $%s or %d)" % defaults['slowtimeout'])
405 405 harness.add_argument("-t", "--timeout", type=int,
406 406 help="kill errant tests after TIMEOUT seconds"
407 407 " (default: $%s or %d)" % defaults['timeout'])
408 408 harness.add_argument("--tmpdir",
409 409 help="run tests in the given temporary directory"
410 410 " (implies --keep-tmpdir)")
411 411 harness.add_argument("-v", "--verbose", action="store_true",
412 412 help="output verbose messages")
413 413
414 414 hgconf = parser.add_argument_group('Mercurial Configuration')
415 415 hgconf.add_argument("--chg", action="store_true",
416 416 help="install and use chg wrapper in place of hg")
417 417 hgconf.add_argument("--compiler",
418 418 help="compiler to build with")
419 419 hgconf.add_argument('--extra-config-opt', action="append", default=[],
420 420 help='set the given config opt in the test hgrc')
421 421 hgconf.add_argument("-l", "--local", action="store_true",
422 422 help="shortcut for --with-hg=<testdir>/../hg, "
423 423 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
424 424 hgconf.add_argument("--ipv6", action="store_true",
425 425 help="prefer IPv6 to IPv4 for network related tests")
426 426 hgconf.add_argument("--pure", action="store_true",
427 427 help="use pure Python code instead of C extensions")
428 428 hgconf.add_argument("-3", "--py3-warnings", action="store_true",
429 429 help="enable Py3k warnings on Python 2.7+")
430 430 hgconf.add_argument("--with-chg", metavar="CHG",
431 431 help="use specified chg wrapper in place of hg")
432 432 hgconf.add_argument("--with-hg",
433 433 metavar="HG",
434 434 help="test using specified hg script rather than a "
435 435 "temporary installation")
436 436
437 437 reporting = parser.add_argument_group('Results Reporting')
438 438 reporting.add_argument("-C", "--annotate", action="store_true",
439 439 help="output files annotated with coverage")
440 440 reporting.add_argument("--color", choices=["always", "auto", "never"],
441 441 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
442 442 help="colorisation: always|auto|never (default: auto)")
443 443 reporting.add_argument("-c", "--cover", action="store_true",
444 444 help="print a test coverage report")
445 445 reporting.add_argument('--exceptions', action='store_true',
446 446 help='log all exceptions and generate an exception report')
447 447 reporting.add_argument("-H", "--htmlcov", action="store_true",
448 448 help="create an HTML report of the coverage of the files")
449 449 reporting.add_argument("--json", action="store_true",
450 450 help="store test result data in 'report.json' file")
451 451 reporting.add_argument("--outputdir",
452 452 help="directory to write error logs to (default=test directory)")
453 453 reporting.add_argument("-n", "--nodiff", action="store_true",
454 454 help="skip showing test changes")
455 455 reporting.add_argument("-S", "--noskips", action="store_true",
456 456 help="don't report skip tests verbosely")
457 457 reporting.add_argument("--time", action="store_true",
458 458 help="time how long each test takes")
459 459 reporting.add_argument("--view",
460 460 help="external diff viewer")
461 461 reporting.add_argument("--xunit",
462 462 help="record xunit results at specified path")
463 463
464 464 for option, (envvar, default) in defaults.items():
465 465 defaults[option] = type(default)(os.environ.get(envvar, default))
466 466 parser.set_defaults(**defaults)
467 467
468 468 return parser
469 469
470 470 def parseargs(args, parser):
471 471 """Parse arguments with our OptionParser and validate results."""
472 472 options = parser.parse_args(args)
473 473
474 474 # jython is always pure
475 475 if 'java' in sys.platform or '__pypy__' in sys.modules:
476 476 options.pure = True
477 477
478 478 if options.with_hg:
479 479 options.with_hg = canonpath(_bytespath(options.with_hg))
480 480 if not (os.path.isfile(options.with_hg) and
481 481 os.access(options.with_hg, os.X_OK)):
482 482 parser.error('--with-hg must specify an executable hg script')
483 483 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
484 484 sys.stderr.write('warning: --with-hg should specify an hg script\n')
485 485 sys.stderr.flush()
486 486 if options.local:
487 487 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
488 488 reporootdir = os.path.dirname(testdir)
489 489 pathandattrs = [(b'hg', 'with_hg')]
490 490 if options.chg:
491 491 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
492 492 for relpath, attr in pathandattrs:
493 493 binpath = os.path.join(reporootdir, relpath)
494 494 if os.name != 'nt' and not os.access(binpath, os.X_OK):
495 495 parser.error('--local specified, but %r not found or '
496 496 'not executable' % binpath)
497 497 setattr(options, attr, binpath)
498 498
499 499 if (options.chg or options.with_chg) and os.name == 'nt':
500 500 parser.error('chg does not work on %s' % os.name)
501 501 if options.with_chg:
502 502 options.chg = False # no installation to temporary location
503 503 options.with_chg = canonpath(_bytespath(options.with_chg))
504 504 if not (os.path.isfile(options.with_chg) and
505 505 os.access(options.with_chg, os.X_OK)):
506 506 parser.error('--with-chg must specify a chg executable')
507 507 if options.chg and options.with_hg:
508 508 # chg shares installation location with hg
509 509 parser.error('--chg does not work when --with-hg is specified '
510 510 '(use --with-chg instead)')
511 511
512 512 if options.color == 'always' and not pygmentspresent:
513 513 sys.stderr.write('warning: --color=always ignored because '
514 514 'pygments is not installed\n')
515 515
516 516 if options.bisect_repo and not options.known_good_rev:
517 517 parser.error("--bisect-repo cannot be used without --known-good-rev")
518 518
519 519 global useipv6
520 520 if options.ipv6:
521 521 useipv6 = checksocketfamily('AF_INET6')
522 522 else:
523 523 # only use IPv6 if IPv4 is unavailable and IPv6 is available
524 524 useipv6 = ((not checksocketfamily('AF_INET'))
525 525 and checksocketfamily('AF_INET6'))
526 526
527 527 options.anycoverage = options.cover or options.annotate or options.htmlcov
528 528 if options.anycoverage:
529 529 try:
530 530 import coverage
531 531 covver = version.StrictVersion(coverage.__version__).version
532 532 if covver < (3, 3):
533 533 parser.error('coverage options require coverage 3.3 or later')
534 534 except ImportError:
535 535 parser.error('coverage options now require the coverage package')
536 536
537 537 if options.anycoverage and options.local:
538 538 # this needs some path mangling somewhere, I guess
539 539 parser.error("sorry, coverage options do not work when --local "
540 540 "is specified")
541 541
542 542 if options.anycoverage and options.with_hg:
543 543 parser.error("sorry, coverage options do not work when --with-hg "
544 544 "is specified")
545 545
546 546 global verbose
547 547 if options.verbose:
548 548 verbose = ''
549 549
550 550 if options.tmpdir:
551 551 options.tmpdir = canonpath(options.tmpdir)
552 552
553 553 if options.jobs < 1:
554 554 parser.error('--jobs must be positive')
555 555 if options.interactive and options.debug:
556 556 parser.error("-i/--interactive and -d/--debug are incompatible")
557 557 if options.debug:
558 558 if options.timeout != defaults['timeout']:
559 559 sys.stderr.write(
560 560 'warning: --timeout option ignored with --debug\n')
561 561 if options.slowtimeout != defaults['slowtimeout']:
562 562 sys.stderr.write(
563 563 'warning: --slowtimeout option ignored with --debug\n')
564 564 options.timeout = 0
565 565 options.slowtimeout = 0
566 566 if options.py3_warnings:
567 567 if PYTHON3:
568 568 parser.error(
569 569 '--py3-warnings can only be used on Python 2.7')
570 570
571 571 if options.blacklist:
572 572 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
573 573 if options.whitelist:
574 574 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
575 575 else:
576 576 options.whitelisted = {}
577 577
578 578 if options.showchannels:
579 579 options.nodiff = True
580 580
581 581 return options
582 582
583 583 def rename(src, dst):
584 584 """Like os.rename(), trade atomicity and opened files friendliness
585 585 for existing destination support.
586 586 """
587 587 shutil.copy(src, dst)
588 588 os.remove(src)
589 589
590 590 def makecleanable(path):
591 591 """Try to fix directory permission recursively so that the entire tree
592 592 can be deleted"""
593 593 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
594 594 for d in dirnames:
595 595 p = os.path.join(dirpath, d)
596 596 try:
597 597 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
598 598 except OSError:
599 599 pass
600 600
601 601 _unified_diff = difflib.unified_diff
602 602 if PYTHON3:
603 603 import functools
604 604 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
605 605
606 606 def getdiff(expected, output, ref, err):
607 607 servefail = False
608 608 lines = []
609 609 for line in _unified_diff(expected, output, ref, err):
610 610 if line.startswith(b'+++') or line.startswith(b'---'):
611 611 line = line.replace(b'\\', b'/')
612 612 if line.endswith(b' \n'):
613 613 line = line[:-2] + b'\n'
614 614 lines.append(line)
615 615 if not servefail and line.startswith(
616 616 b'+ abort: child process failed to start'):
617 617 servefail = True
618 618
619 619 return servefail, lines
620 620
621 621 verbose = False
622 622 def vlog(*msg):
623 623 """Log only when in verbose mode."""
624 624 if verbose is False:
625 625 return
626 626
627 627 return log(*msg)
628 628
629 629 # Bytes that break XML even in a CDATA block: control characters 0-31
630 630 # sans \t, \n and \r
631 631 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
632 632
633 633 # Match feature conditionalized output lines in the form, capturing the feature
634 634 # list in group 2, and the preceeding line output in group 1:
635 635 #
636 636 # output..output (feature !)\n
637 637 optline = re.compile(b'(.*) \((.+?) !\)\n$')
638 638
639 639 def cdatasafe(data):
640 640 """Make a string safe to include in a CDATA block.
641 641
642 642 Certain control characters are illegal in a CDATA block, and
643 643 there's no way to include a ]]> in a CDATA either. This function
644 644 replaces illegal bytes with ? and adds a space between the ]] so
645 645 that it won't break the CDATA block.
646 646 """
647 647 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
648 648
649 649 def log(*msg):
650 650 """Log something to stdout.
651 651
652 652 Arguments are strings to print.
653 653 """
654 654 with iolock:
655 655 if verbose:
656 656 print(verbose, end=' ')
657 657 for m in msg:
658 658 print(m, end=' ')
659 659 print()
660 660 sys.stdout.flush()
661 661
662 662 def highlightdiff(line, color):
663 663 if not color:
664 664 return line
665 665 assert pygmentspresent
666 666 return pygments.highlight(line.decode('latin1'), difflexer,
667 667 terminal256formatter).encode('latin1')
668 668
669 669 def highlightmsg(msg, color):
670 670 if not color:
671 671 return msg
672 672 assert pygmentspresent
673 673 return pygments.highlight(msg, runnerlexer, runnerformatter)
674 674
675 675 def terminate(proc):
676 676 """Terminate subprocess"""
677 677 vlog('# Terminating process %d' % proc.pid)
678 678 try:
679 679 proc.terminate()
680 680 except OSError:
681 681 pass
682 682
683 683 def killdaemons(pidfile):
684 684 import killdaemons as killmod
685 685 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
686 686 logfn=vlog)
687 687
688 688 class Test(unittest.TestCase):
689 689 """Encapsulates a single, runnable test.
690 690
691 691 While this class conforms to the unittest.TestCase API, it differs in that
692 692 instances need to be instantiated manually. (Typically, unittest.TestCase
693 693 classes are instantiated automatically by scanning modules.)
694 694 """
695 695
696 696 # Status code reserved for skipped tests (used by hghave).
697 697 SKIPPED_STATUS = 80
698 698
699 699 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
700 700 debug=False,
701 701 first=False,
702 702 timeout=None,
703 703 startport=None, extraconfigopts=None,
704 704 py3warnings=False, shell=None, hgcommand=None,
705 705 slowtimeout=None, usechg=False,
706 706 useipv6=False):
707 707 """Create a test from parameters.
708 708
709 709 path is the full path to the file defining the test.
710 710
711 711 tmpdir is the main temporary directory to use for this test.
712 712
713 713 keeptmpdir determines whether to keep the test's temporary directory
714 714 after execution. It defaults to removal (False).
715 715
716 716 debug mode will make the test execute verbosely, with unfiltered
717 717 output.
718 718
719 719 timeout controls the maximum run time of the test. It is ignored when
720 720 debug is True. See slowtimeout for tests with #require slow.
721 721
722 722 slowtimeout overrides timeout if the test has #require slow.
723 723
724 724 startport controls the starting port number to use for this test. Each
725 725 test will reserve 3 port numbers for execution. It is the caller's
726 726 responsibility to allocate a non-overlapping port range to Test
727 727 instances.
728 728
729 729 extraconfigopts is an iterable of extra hgrc config options. Values
730 730 must have the form "key=value" (something understood by hgrc). Values
731 731 of the form "foo.key=value" will result in "[foo] key=value".
732 732
733 733 py3warnings enables Py3k warnings.
734 734
735 735 shell is the shell to execute tests in.
736 736 """
737 737 if timeout is None:
738 738 timeout = defaults['timeout']
739 739 if startport is None:
740 740 startport = defaults['port']
741 741 if slowtimeout is None:
742 742 slowtimeout = defaults['slowtimeout']
743 743 self.path = path
744 744 self.bname = os.path.basename(path)
745 745 self.name = _strpath(self.bname)
746 746 self._testdir = os.path.dirname(path)
747 747 self._outputdir = outputdir
748 748 self._tmpname = os.path.basename(path)
749 749 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
750 750
751 751 self._threadtmp = tmpdir
752 752 self._keeptmpdir = keeptmpdir
753 753 self._debug = debug
754 754 self._first = first
755 755 self._timeout = timeout
756 756 self._slowtimeout = slowtimeout
757 757 self._startport = startport
758 758 self._extraconfigopts = extraconfigopts or []
759 759 self._py3warnings = py3warnings
760 760 self._shell = _bytespath(shell)
761 761 self._hgcommand = hgcommand or b'hg'
762 762 self._usechg = usechg
763 763 self._useipv6 = useipv6
764 764
765 765 self._aborted = False
766 766 self._daemonpids = []
767 767 self._finished = None
768 768 self._ret = None
769 769 self._out = None
770 770 self._skipped = None
771 771 self._testtmp = None
772 772 self._chgsockdir = None
773 773
774 774 self._refout = self.readrefout()
775 775
776 776 def readrefout(self):
777 777 """read reference output"""
778 778 # If we're not in --debug mode and reference output file exists,
779 779 # check test output against it.
780 780 if self._debug:
781 781 return None # to match "out is None"
782 782 elif os.path.exists(self.refpath):
783 783 with open(self.refpath, 'rb') as f:
784 784 return f.read().splitlines(True)
785 785 else:
786 786 return []
787 787
788 788 # needed to get base class __repr__ running
789 789 @property
790 790 def _testMethodName(self):
791 791 return self.name
792 792
793 793 def __str__(self):
794 794 return self.name
795 795
796 796 def shortDescription(self):
797 797 return self.name
798 798
799 799 def setUp(self):
800 800 """Tasks to perform before run()."""
801 801 self._finished = False
802 802 self._ret = None
803 803 self._out = None
804 804 self._skipped = None
805 805
806 806 try:
807 807 os.mkdir(self._threadtmp)
808 808 except OSError as e:
809 809 if e.errno != errno.EEXIST:
810 810 raise
811 811
812 812 name = self._tmpname
813 813 self._testtmp = os.path.join(self._threadtmp, name)
814 814 os.mkdir(self._testtmp)
815 815
816 816 # Remove any previous output files.
817 817 if os.path.exists(self.errpath):
818 818 try:
819 819 os.remove(self.errpath)
820 820 except OSError as e:
821 821 # We might have raced another test to clean up a .err
822 822 # file, so ignore ENOENT when removing a previous .err
823 823 # file.
824 824 if e.errno != errno.ENOENT:
825 825 raise
826 826
827 827 if self._usechg:
828 828 self._chgsockdir = os.path.join(self._threadtmp,
829 829 b'%s.chgsock' % name)
830 830 os.mkdir(self._chgsockdir)
831 831
832 832 def run(self, result):
833 833 """Run this test and report results against a TestResult instance."""
834 834 # This function is extremely similar to unittest.TestCase.run(). Once
835 835 # we require Python 2.7 (or at least its version of unittest), this
836 836 # function can largely go away.
837 837 self._result = result
838 838 result.startTest(self)
839 839 try:
840 840 try:
841 841 self.setUp()
842 842 except (KeyboardInterrupt, SystemExit):
843 843 self._aborted = True
844 844 raise
845 845 except Exception:
846 846 result.addError(self, sys.exc_info())
847 847 return
848 848
849 849 success = False
850 850 try:
851 851 self.runTest()
852 852 except KeyboardInterrupt:
853 853 self._aborted = True
854 854 raise
855 855 except unittest.SkipTest as e:
856 856 result.addSkip(self, str(e))
857 857 # The base class will have already counted this as a
858 858 # test we "ran", but we want to exclude skipped tests
859 859 # from those we count towards those run.
860 860 result.testsRun -= 1
861 861 except self.failureException as e:
862 862 # This differs from unittest in that we don't capture
863 863 # the stack trace. This is for historical reasons and
864 864 # this decision could be revisited in the future,
865 865 # especially for PythonTest instances.
866 866 if result.addFailure(self, str(e)):
867 867 success = True
868 868 except Exception:
869 869 result.addError(self, sys.exc_info())
870 870 else:
871 871 success = True
872 872
873 873 try:
874 874 self.tearDown()
875 875 except (KeyboardInterrupt, SystemExit):
876 876 self._aborted = True
877 877 raise
878 878 except Exception:
879 879 result.addError(self, sys.exc_info())
880 880 success = False
881 881
882 882 if success:
883 883 result.addSuccess(self)
884 884 finally:
885 885 result.stopTest(self, interrupted=self._aborted)
886 886
887 887 def runTest(self):
888 888 """Run this test instance.
889 889
890 890 This will return a tuple describing the result of the test.
891 891 """
892 892 env = self._getenv()
893 893 self._genrestoreenv(env)
894 894 self._daemonpids.append(env['DAEMON_PIDS'])
895 895 self._createhgrc(env['HGRCPATH'])
896 896
897 897 vlog('# Test', self.name)
898 898
899 899 ret, out = self._run(env)
900 900 self._finished = True
901 901 self._ret = ret
902 902 self._out = out
903 903
904 904 def describe(ret):
905 905 if ret < 0:
906 906 return 'killed by signal: %d' % -ret
907 907 return 'returned error code %d' % ret
908 908
909 909 self._skipped = False
910 910
911 911 if ret == self.SKIPPED_STATUS:
912 912 if out is None: # Debug mode, nothing to parse.
913 913 missing = ['unknown']
914 914 failed = None
915 915 else:
916 916 missing, failed = TTest.parsehghaveoutput(out)
917 917
918 918 if not missing:
919 919 missing = ['skipped']
920 920
921 921 if failed:
922 922 self.fail('hg have failed checking for %s' % failed[-1])
923 923 else:
924 924 self._skipped = True
925 925 raise unittest.SkipTest(missing[-1])
926 926 elif ret == 'timeout':
927 927 self.fail('timed out')
928 928 elif ret is False:
929 929 self.fail('no result code from test')
930 930 elif out != self._refout:
931 931 # Diff generation may rely on written .err file.
932 932 if (ret != 0 or out != self._refout) and not self._skipped \
933 933 and not self._debug:
934 934 with open(self.errpath, 'wb') as f:
935 935 for line in out:
936 936 f.write(line)
937 937
938 938 # The result object handles diff calculation for us.
939 939 with firstlock:
940 940 if self._result.addOutputMismatch(self, ret, out, self._refout):
941 941 # change was accepted, skip failing
942 942 return
943 943 if self._first:
944 944 global firsterror
945 945 firsterror = True
946 946
947 947 if ret:
948 948 msg = 'output changed and ' + describe(ret)
949 949 else:
950 950 msg = 'output changed'
951 951
952 952 self.fail(msg)
953 953 elif ret:
954 954 self.fail(describe(ret))
955 955
956 956 def tearDown(self):
957 957 """Tasks to perform after run()."""
958 958 for entry in self._daemonpids:
959 959 killdaemons(entry)
960 960 self._daemonpids = []
961 961
962 962 if self._keeptmpdir:
963 963 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
964 964 (self._testtmp.decode('utf-8'),
965 965 self._threadtmp.decode('utf-8')))
966 966 else:
967 967 try:
968 968 shutil.rmtree(self._testtmp)
969 969 except OSError:
970 970 # unreadable directory may be left in $TESTTMP; fix permission
971 971 # and try again
972 972 makecleanable(self._testtmp)
973 973 shutil.rmtree(self._testtmp, True)
974 974 shutil.rmtree(self._threadtmp, True)
975 975
976 976 if self._usechg:
977 977 # chgservers will stop automatically after they find the socket
978 978 # files are deleted
979 979 shutil.rmtree(self._chgsockdir, True)
980 980
981 981 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
982 982 and not self._debug and self._out:
983 983 with open(self.errpath, 'wb') as f:
984 984 for line in self._out:
985 985 f.write(line)
986 986
987 987 vlog("# Ret was:", self._ret, '(%s)' % self.name)
988 988
989 989 def _run(self, env):
990 990 # This should be implemented in child classes to run tests.
991 991 raise unittest.SkipTest('unknown test type')
992 992
993 993 def abort(self):
994 994 """Terminate execution of this test."""
995 995 self._aborted = True
996 996
997 997 def _portmap(self, i):
998 998 offset = b'' if i == 0 else b'%d' % i
999 999 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1000 1000
1001 1001 def _getreplacements(self):
1002 1002 """Obtain a mapping of text replacements to apply to test output.
1003 1003
1004 1004 Test output needs to be normalized so it can be compared to expected
1005 1005 output. This function defines how some of that normalization will
1006 1006 occur.
1007 1007 """
1008 1008 r = [
1009 1009 # This list should be parallel to defineport in _getenv
1010 1010 self._portmap(0),
1011 1011 self._portmap(1),
1012 1012 self._portmap(2),
1013 1013 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1014 1014 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1015 1015 ]
1016 1016 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1017 1017
1018 1018 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1019 1019
1020 1020 if os.path.exists(replacementfile):
1021 1021 data = {}
1022 1022 with open(replacementfile, mode='rb') as source:
1023 1023 # the intermediate 'compile' step help with debugging
1024 1024 code = compile(source.read(), replacementfile, 'exec')
1025 1025 exec(code, data)
1026 1026 for value in data.get('substitutions', ()):
1027 1027 if len(value) != 2:
1028 1028 msg = 'malformatted substitution in %s: %r'
1029 1029 msg %= (replacementfile, value)
1030 1030 raise ValueError(msg)
1031 1031 r.append(value)
1032 1032 return r
1033 1033
1034 1034 def _escapepath(self, p):
1035 1035 if os.name == 'nt':
1036 1036 return (
1037 1037 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1038 1038 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1039 1039 for c in [p[i:i + 1] for i in range(len(p))]))
1040 1040 )
1041 1041 else:
1042 1042 return re.escape(p)
1043 1043
1044 1044 def _localip(self):
1045 1045 if self._useipv6:
1046 1046 return b'::1'
1047 1047 else:
1048 1048 return b'127.0.0.1'
1049 1049
1050 1050 def _genrestoreenv(self, testenv):
1051 1051 """Generate a script that can be used by tests to restore the original
1052 1052 environment."""
1053 1053 # Put the restoreenv script inside self._threadtmp
1054 1054 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1055 1055 testenv['HGTEST_RESTOREENV'] = _strpath(scriptpath)
1056 1056
1057 1057 # Only restore environment variable names that the shell allows
1058 1058 # us to export.
1059 1059 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1060 1060
1061 1061 # Do not restore these variables; otherwise tests would fail.
1062 1062 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1063 1063
1064 1064 with open(scriptpath, 'w') as envf:
1065 1065 for name, value in origenviron.items():
1066 1066 if not name_regex.match(name):
1067 1067 # Skip environment variables with unusual names not
1068 1068 # allowed by most shells.
1069 1069 continue
1070 1070 if name in reqnames:
1071 1071 continue
1072 1072 envf.write('%s=%s\n' % (name, shellquote(value)))
1073 1073
1074 1074 for name in testenv:
1075 1075 if name in origenviron or name in reqnames:
1076 1076 continue
1077 1077 envf.write('unset %s\n' % (name,))
1078 1078
1079 1079 def _getenv(self):
1080 1080 """Obtain environment variables to use during test execution."""
1081 1081 def defineport(i):
1082 1082 offset = '' if i == 0 else '%s' % i
1083 1083 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1084 1084 env = os.environ.copy()
1085 1085 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1086 1086 env['HGEMITWARNINGS'] = '1'
1087 1087 env['TESTTMP'] = _strpath(self._testtmp)
1088 1088 env['TESTNAME'] = self.name
1089 1089 env['HOME'] = _strpath(self._testtmp)
1090 1090 # This number should match portneeded in _getport
1091 1091 for port in xrange(3):
1092 1092 # This list should be parallel to _portmap in _getreplacements
1093 1093 defineport(port)
1094 1094 env["HGRCPATH"] = _strpath(os.path.join(self._threadtmp, b'.hgrc'))
1095 1095 env["DAEMON_PIDS"] = _strpath(os.path.join(self._threadtmp,
1096 1096 b'daemon.pids'))
1097 1097 env["HGEDITOR"] = ('"' + sys.executable + '"'
1098 1098 + ' -c "import sys; sys.exit(0)"')
1099 1099 env["HGUSER"] = "test"
1100 1100 env["HGENCODING"] = "ascii"
1101 1101 env["HGENCODINGMODE"] = "strict"
1102 1102 env["HGHOSTNAME"] = "test-hostname"
1103 1103 env['HGIPV6'] = str(int(self._useipv6))
1104 1104 # See contrib/catapipe.py for how to use this functionality.
1105 1105 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1106 1106 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1107 1107 # non-test one in as a default, otherwise set to devnull
1108 1108 env['HGTESTCATAPULTSERVERPIPE'] = \
1109 1109 env.get('HGCATAPULTSERVERPIPE', os.devnull)
1110 1110
1111 1111 extraextensions = []
1112 1112 for opt in self._extraconfigopts:
1113 1113 section, key = opt.encode('utf-8').split(b'.', 1)
1114 1114 if section != 'extensions':
1115 1115 continue
1116 1116 name = key.split(b'=', 1)[0]
1117 1117 extraextensions.append(name)
1118 1118
1119 1119 if extraextensions:
1120 1120 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1121 1121
1122 1122 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1123 1123 # IP addresses.
1124 1124 env['LOCALIP'] = _strpath(self._localip())
1125 1125
1126 1126 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1127 1127 # but this is needed for testing python instances like dummyssh,
1128 1128 # dummysmtpd.py, and dumbhttp.py.
1129 1129 if PYTHON3 and os.name == 'nt':
1130 1130 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1131 1131
1132 1132 # Reset some environment variables to well-known values so that
1133 1133 # the tests produce repeatable output.
1134 1134 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1135 1135 env['TZ'] = 'GMT'
1136 1136 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1137 1137 env['COLUMNS'] = '80'
1138 1138 env['TERM'] = 'xterm'
1139 1139
1140 1140 dropped = [
1141 1141 'CDPATH',
1142 1142 'CHGDEBUG',
1143 1143 'EDITOR',
1144 1144 'GREP_OPTIONS',
1145 1145 'HG',
1146 1146 'HGMERGE',
1147 1147 'HGPLAIN',
1148 1148 'HGPLAINEXCEPT',
1149 1149 'HGPROF',
1150 1150 'http_proxy',
1151 1151 'no_proxy',
1152 1152 'NO_PROXY',
1153 1153 'PAGER',
1154 1154 'VISUAL',
1155 1155 ]
1156 1156
1157 1157 for k in dropped:
1158 1158 if k in env:
1159 1159 del env[k]
1160 1160
1161 1161 # unset env related to hooks
1162 1162 for k in list(env):
1163 1163 if k.startswith('HG_'):
1164 1164 del env[k]
1165 1165
1166 1166 if self._usechg:
1167 1167 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1168 1168
1169 1169 return env
1170 1170
1171 1171 def _createhgrc(self, path):
1172 1172 """Create an hgrc file for this test."""
1173 1173 with open(path, 'wb') as hgrc:
1174 1174 hgrc.write(b'[ui]\n')
1175 1175 hgrc.write(b'slash = True\n')
1176 1176 hgrc.write(b'interactive = False\n')
1177 1177 hgrc.write(b'merge = internal:merge\n')
1178 1178 hgrc.write(b'mergemarkers = detailed\n')
1179 1179 hgrc.write(b'promptecho = True\n')
1180 1180 hgrc.write(b'[defaults]\n')
1181 1181 hgrc.write(b'[devel]\n')
1182 1182 hgrc.write(b'all-warnings = true\n')
1183 1183 hgrc.write(b'default-date = 0 0\n')
1184 1184 hgrc.write(b'[largefiles]\n')
1185 1185 hgrc.write(b'usercache = %s\n' %
1186 1186 (os.path.join(self._testtmp, b'.cache/largefiles')))
1187 1187 hgrc.write(b'[lfs]\n')
1188 1188 hgrc.write(b'usercache = %s\n' %
1189 1189 (os.path.join(self._testtmp, b'.cache/lfs')))
1190 1190 hgrc.write(b'[web]\n')
1191 1191 hgrc.write(b'address = localhost\n')
1192 1192 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1193 1193 hgrc.write(b'server-header = testing stub value\n')
1194 1194
1195 1195 for opt in self._extraconfigopts:
1196 1196 section, key = opt.encode('utf-8').split(b'.', 1)
1197 1197 assert b'=' in key, ('extra config opt %s must '
1198 1198 'have an = for assignment' % opt)
1199 1199 hgrc.write(b'[%s]\n%s\n' % (section, key))
1200 1200
1201 1201 def fail(self, msg):
1202 1202 # unittest differentiates between errored and failed.
1203 1203 # Failed is denoted by AssertionError (by default at least).
1204 1204 raise AssertionError(msg)
1205 1205
1206 1206 def _runcommand(self, cmd, env, normalizenewlines=False):
1207 1207 """Run command in a sub-process, capturing the output (stdout and
1208 1208 stderr).
1209 1209
1210 1210 Return a tuple (exitcode, output). output is None in debug mode.
1211 1211 """
1212 1212 if self._debug:
1213 1213 proc = subprocess.Popen(_strpath(cmd), shell=True,
1214 1214 cwd=_strpath(self._testtmp),
1215 1215 env=env)
1216 1216 ret = proc.wait()
1217 1217 return (ret, None)
1218 1218
1219 1219 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1220 1220 def cleanup():
1221 1221 terminate(proc)
1222 1222 ret = proc.wait()
1223 1223 if ret == 0:
1224 1224 ret = signal.SIGTERM << 8
1225 1225 killdaemons(env['DAEMON_PIDS'])
1226 1226 return ret
1227 1227
1228 1228 proc.tochild.close()
1229 1229
1230 1230 try:
1231 1231 output = proc.fromchild.read()
1232 1232 except KeyboardInterrupt:
1233 1233 vlog('# Handling keyboard interrupt')
1234 1234 cleanup()
1235 1235 raise
1236 1236
1237 1237 ret = proc.wait()
1238 1238 if wifexited(ret):
1239 1239 ret = os.WEXITSTATUS(ret)
1240 1240
1241 1241 if proc.timeout:
1242 1242 ret = 'timeout'
1243 1243
1244 1244 if ret:
1245 1245 killdaemons(env['DAEMON_PIDS'])
1246 1246
1247 1247 for s, r in self._getreplacements():
1248 1248 output = re.sub(s, r, output)
1249 1249
1250 1250 if normalizenewlines:
1251 1251 output = output.replace(b'\r\n', b'\n')
1252 1252
1253 1253 return ret, output.splitlines(True)
1254 1254
1255 1255 class PythonTest(Test):
1256 1256 """A Python-based test."""
1257 1257
1258 1258 @property
1259 1259 def refpath(self):
1260 1260 return os.path.join(self._testdir, b'%s.out' % self.bname)
1261 1261
1262 1262 def _run(self, env):
1263 1263 py3switch = self._py3warnings and b' -3' or b''
1264 1264 # Quote the python(3) executable for Windows
1265 1265 cmd = b'"%s"%s "%s"' % (PYTHON, py3switch, self.path)
1266 1266 vlog("# Running", cmd)
1267 1267 normalizenewlines = os.name == 'nt'
1268 1268 result = self._runcommand(cmd, env,
1269 1269 normalizenewlines=normalizenewlines)
1270 1270 if self._aborted:
1271 1271 raise KeyboardInterrupt()
1272 1272
1273 1273 return result
1274 1274
1275 1275 # Some glob patterns apply only in some circumstances, so the script
1276 1276 # might want to remove (glob) annotations that otherwise should be
1277 1277 # retained.
1278 1278 checkcodeglobpats = [
1279 1279 # On Windows it looks like \ doesn't require a (glob), but we know
1280 1280 # better.
1281 1281 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1282 1282 re.compile(br'^moving \S+/.*[^)]$'),
1283 1283 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1284 1284 # Not all platforms have 127.0.0.1 as loopback (though most do),
1285 1285 # so we always glob that too.
1286 1286 re.compile(br'.*\$LOCALIP.*$'),
1287 1287 ]
1288 1288
1289 1289 bchr = chr
1290 1290 if PYTHON3:
1291 1291 bchr = lambda x: bytes([x])
1292 1292
1293 1293 class TTest(Test):
1294 1294 """A "t test" is a test backed by a .t file."""
1295 1295
1296 1296 SKIPPED_PREFIX = b'skipped: '
1297 1297 FAILED_PREFIX = b'hghave check failed: '
1298 1298 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1299 1299
1300 1300 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1301 1301 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1302 1302 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1303 1303
1304 1304 def __init__(self, path, *args, **kwds):
1305 1305 # accept an extra "case" parameter
1306 1306 case = kwds.pop('case', [])
1307 1307 self._case = case
1308 1308 self._allcases = {x for y in parsettestcases(path) for x in y}
1309 1309 super(TTest, self).__init__(path, *args, **kwds)
1310 1310 if case:
1311 1311 casepath = b'#'.join(case)
1312 1312 self.name = '%s#%s' % (self.name, _strpath(casepath))
1313 1313 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1314 1314 self._tmpname += b'-%s' % casepath
1315 1315 self._have = {}
1316 1316
1317 1317 @property
1318 1318 def refpath(self):
1319 1319 return os.path.join(self._testdir, self.bname)
1320 1320
1321 1321 def _run(self, env):
1322 1322 with open(self.path, 'rb') as f:
1323 1323 lines = f.readlines()
1324 1324
1325 1325 # .t file is both reference output and the test input, keep reference
1326 1326 # output updated with the the test input. This avoids some race
1327 1327 # conditions where the reference output does not match the actual test.
1328 1328 if self._refout is not None:
1329 1329 self._refout = lines
1330 1330
1331 1331 salt, script, after, expected = self._parsetest(lines)
1332 1332
1333 1333 # Write out the generated script.
1334 1334 fname = b'%s.sh' % self._testtmp
1335 1335 with open(fname, 'wb') as f:
1336 1336 for l in script:
1337 1337 f.write(l)
1338 1338
1339 1339 cmd = b'%s "%s"' % (self._shell, fname)
1340 1340 vlog("# Running", cmd)
1341 1341
1342 1342 exitcode, output = self._runcommand(cmd, env)
1343 1343
1344 1344 if self._aborted:
1345 1345 raise KeyboardInterrupt()
1346 1346
1347 1347 # Do not merge output if skipped. Return hghave message instead.
1348 1348 # Similarly, with --debug, output is None.
1349 1349 if exitcode == self.SKIPPED_STATUS or output is None:
1350 1350 return exitcode, output
1351 1351
1352 1352 return self._processoutput(exitcode, output, salt, after, expected)
1353 1353
1354 1354 def _hghave(self, reqs):
1355 1355 allreqs = b' '.join(reqs)
1356 1356 if allreqs in self._have:
1357 1357 return self._have.get(allreqs)
1358 1358
1359 1359 # TODO do something smarter when all other uses of hghave are gone.
1360 1360 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1361 1361 tdir = runtestdir.replace(b'\\', b'/')
1362 1362 proc = Popen4(b'%s -c "%s/hghave %s"' %
1363 1363 (self._shell, tdir, allreqs),
1364 1364 self._testtmp, 0, self._getenv())
1365 1365 stdout, stderr = proc.communicate()
1366 1366 ret = proc.wait()
1367 1367 if wifexited(ret):
1368 1368 ret = os.WEXITSTATUS(ret)
1369 1369 if ret == 2:
1370 1370 print(stdout.decode('utf-8'))
1371 1371 sys.exit(1)
1372 1372
1373 1373 if ret != 0:
1374 1374 self._have[allreqs] = (False, stdout)
1375 1375 return False, stdout
1376 1376
1377 1377 if b'slow' in reqs:
1378 1378 self._timeout = self._slowtimeout
1379 1379
1380 1380 self._have[allreqs] = (True, None)
1381 1381 return True, None
1382 1382
1383 1383 def _iftest(self, args):
1384 1384 # implements "#if"
1385 1385 reqs = []
1386 1386 for arg in args:
1387 1387 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1388 1388 if arg[3:] in self._case:
1389 1389 return False
1390 1390 elif arg in self._allcases:
1391 1391 if arg not in self._case:
1392 1392 return False
1393 1393 else:
1394 1394 reqs.append(arg)
1395 1395 return self._hghave(reqs)[0]
1396 1396
1397 1397 def _parsetest(self, lines):
1398 1398 # We generate a shell script which outputs unique markers to line
1399 1399 # up script results with our source. These markers include input
1400 1400 # line number and the last return code.
1401 1401 salt = b"SALT%d" % time.time()
1402 1402 def addsalt(line, inpython):
1403 1403 if inpython:
1404 1404 script.append(b'%s %d 0\n' % (salt, line))
1405 1405 else:
1406 1406 script.append(b'echo %s %d $?\n' % (salt, line))
1407 1407 activetrace = []
1408 1408 session = str(uuid.uuid4())
1409 1409 if PYTHON3:
1410 1410 session = session.encode('ascii')
1411 1411 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or \
1412 1412 os.getenv('HGCATAPULTSERVERPIPE')
1413 1413 def toggletrace(cmd=None):
1414 1414 if not hgcatapult or hgcatapult == os.devnull:
1415 1415 return
1416 1416
1417 1417 if activetrace:
1418 1418 script.append(
1419 1419 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (
1420 1420 session, activetrace[0]))
1421 1421 if cmd is None:
1422 1422 return
1423 1423
1424 1424 if isinstance(cmd, str):
1425 1425 quoted = shellquote(cmd.strip())
1426 1426 else:
1427 1427 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1428 1428 quoted = quoted.replace(b'\\', b'\\\\')
1429 1429 script.append(
1430 1430 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (
1431 1431 session, quoted))
1432 1432 activetrace[0:] = [quoted]
1433 1433
1434 1434 script = []
1435 1435
1436 1436 # After we run the shell script, we re-unify the script output
1437 1437 # with non-active parts of the source, with synchronization by our
1438 1438 # SALT line number markers. The after table contains the non-active
1439 1439 # components, ordered by line number.
1440 1440 after = {}
1441 1441
1442 1442 # Expected shell script output.
1443 1443 expected = {}
1444 1444
1445 1445 pos = prepos = -1
1446 1446
1447 1447 # True or False when in a true or false conditional section
1448 1448 skipping = None
1449 1449
1450 1450 # We keep track of whether or not we're in a Python block so we
1451 1451 # can generate the surrounding doctest magic.
1452 1452 inpython = False
1453 1453
1454 1454 if self._debug:
1455 1455 script.append(b'set -x\n')
1456 1456 if self._hgcommand != b'hg':
1457 1457 script.append(b'alias hg="%s"\n' % self._hgcommand)
1458 1458 if os.getenv('MSYSTEM'):
1459 1459 script.append(b'alias pwd="pwd -W"\n')
1460 1460
1461 1461 if hgcatapult and hgcatapult != os.devnull:
1462 1462 # Kludge: use a while loop to keep the pipe from getting
1463 1463 # closed by our echo commands. The still-running file gets
1464 1464 # reaped at the end of the script, which causes the while
1465 1465 # loop to exit and closes the pipe. Sigh.
1466 1466 script.append(
1467 1467 b'rtendtracing() {\n'
1468 1468 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1469 1469 b' rm -f "$TESTTMP/.still-running"\n'
1470 1470 b'}\n'
1471 1471 b'trap "rtendtracing" 0\n'
1472 1472 b'touch "$TESTTMP/.still-running"\n'
1473 1473 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1474 1474 b'> %(catapult)s &\n'
1475 1475 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1476 1476 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1477 1477 % {
1478 1478 'name': self.name,
1479 1479 'session': session,
1480 1480 'catapult': hgcatapult,
1481 1481 }
1482 1482 )
1483 1483
1484 1484 if self._case:
1485 1485 casestr = b'#'.join(self._case)
1486 1486 if isinstance(self._case, str):
1487 1487 quoted = shellquote(casestr)
1488 1488 else:
1489 1489 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1490 1490 script.append(b'TESTCASE=%s\n' % quoted)
1491 1491 script.append(b'export TESTCASE\n')
1492 1492
1493 1493 n = 0
1494 1494 for n, l in enumerate(lines):
1495 1495 if not l.endswith(b'\n'):
1496 1496 l += b'\n'
1497 1497 if l.startswith(b'#require'):
1498 1498 lsplit = l.split()
1499 1499 if len(lsplit) < 2 or lsplit[0] != b'#require':
1500 1500 after.setdefault(pos, []).append(' !!! invalid #require\n')
1501 1501 if not skipping:
1502 1502 haveresult, message = self._hghave(lsplit[1:])
1503 1503 if not haveresult:
1504 1504 script = [b'echo "%s"\nexit 80\n' % message]
1505 1505 break
1506 1506 after.setdefault(pos, []).append(l)
1507 1507 elif l.startswith(b'#if'):
1508 1508 lsplit = l.split()
1509 1509 if len(lsplit) < 2 or lsplit[0] != b'#if':
1510 1510 after.setdefault(pos, []).append(' !!! invalid #if\n')
1511 1511 if skipping is not None:
1512 1512 after.setdefault(pos, []).append(' !!! nested #if\n')
1513 1513 skipping = not self._iftest(lsplit[1:])
1514 1514 after.setdefault(pos, []).append(l)
1515 1515 elif l.startswith(b'#else'):
1516 1516 if skipping is None:
1517 1517 after.setdefault(pos, []).append(' !!! missing #if\n')
1518 1518 skipping = not skipping
1519 1519 after.setdefault(pos, []).append(l)
1520 1520 elif l.startswith(b'#endif'):
1521 1521 if skipping is None:
1522 1522 after.setdefault(pos, []).append(' !!! missing #if\n')
1523 1523 skipping = None
1524 1524 after.setdefault(pos, []).append(l)
1525 1525 elif skipping:
1526 1526 after.setdefault(pos, []).append(l)
1527 1527 elif l.startswith(b' >>> '): # python inlines
1528 1528 after.setdefault(pos, []).append(l)
1529 1529 prepos = pos
1530 1530 pos = n
1531 1531 if not inpython:
1532 1532 # We've just entered a Python block. Add the header.
1533 1533 inpython = True
1534 1534 addsalt(prepos, False) # Make sure we report the exit code.
1535 1535 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1536 1536 addsalt(n, True)
1537 1537 script.append(l[2:])
1538 1538 elif l.startswith(b' ... '): # python inlines
1539 1539 after.setdefault(prepos, []).append(l)
1540 1540 script.append(l[2:])
1541 1541 elif l.startswith(b' $ '): # commands
1542 1542 if inpython:
1543 1543 script.append(b'EOF\n')
1544 1544 inpython = False
1545 1545 after.setdefault(pos, []).append(l)
1546 1546 prepos = pos
1547 1547 pos = n
1548 1548 addsalt(n, False)
1549 1549 rawcmd = l[4:]
1550 1550 cmd = rawcmd.split()
1551 1551 toggletrace(rawcmd)
1552 1552 if len(cmd) == 2 and cmd[0] == b'cd':
1553 1553 l = b' $ cd %s || exit 1\n' % cmd[1]
1554 1554 script.append(rawcmd)
1555 1555 elif l.startswith(b' > '): # continuations
1556 1556 after.setdefault(prepos, []).append(l)
1557 1557 script.append(l[4:])
1558 1558 elif l.startswith(b' '): # results
1559 1559 # Queue up a list of expected results.
1560 1560 expected.setdefault(pos, []).append(l[2:])
1561 1561 else:
1562 1562 if inpython:
1563 1563 script.append(b'EOF\n')
1564 1564 inpython = False
1565 1565 # Non-command/result. Queue up for merged output.
1566 1566 after.setdefault(pos, []).append(l)
1567 1567
1568 1568 if inpython:
1569 1569 script.append(b'EOF\n')
1570 1570 if skipping is not None:
1571 1571 after.setdefault(pos, []).append(' !!! missing #endif\n')
1572 1572 addsalt(n + 1, False)
1573 1573 # Need to end any current per-command trace
1574 1574 if activetrace:
1575 1575 toggletrace()
1576 1576 return salt, script, after, expected
1577 1577
1578 1578 def _processoutput(self, exitcode, output, salt, after, expected):
1579 1579 # Merge the script output back into a unified test.
1580 1580 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1581 1581 if exitcode != 0:
1582 1582 warnonly = 3
1583 1583
1584 1584 pos = -1
1585 1585 postout = []
1586 1586 for l in output:
1587 1587 lout, lcmd = l, None
1588 1588 if salt in l:
1589 1589 lout, lcmd = l.split(salt, 1)
1590 1590
1591 1591 while lout:
1592 1592 if not lout.endswith(b'\n'):
1593 1593 lout += b' (no-eol)\n'
1594 1594
1595 1595 # Find the expected output at the current position.
1596 1596 els = [None]
1597 1597 if expected.get(pos, None):
1598 1598 els = expected[pos]
1599 1599
1600 1600 optional = []
1601 1601 for i, el in enumerate(els):
1602 1602 r = False
1603 1603 if el:
1604 1604 r, exact = self.linematch(el, lout)
1605 1605 if isinstance(r, str):
1606 1606 if r == '-glob':
1607 1607 lout = ''.join(el.rsplit(' (glob)', 1))
1608 1608 r = '' # Warn only this line.
1609 1609 elif r == "retry":
1610 1610 postout.append(b' ' + el)
1611 1611 else:
1612 1612 log('\ninfo, unknown linematch result: %r\n' % r)
1613 1613 r = False
1614 1614 if r:
1615 1615 els.pop(i)
1616 1616 break
1617 1617 if el:
1618 1618 if el.endswith(b" (?)\n"):
1619 1619 optional.append(i)
1620 1620 else:
1621 1621 m = optline.match(el)
1622 1622 if m:
1623 1623 conditions = [
1624 1624 c for c in m.group(2).split(b' ')]
1625 1625
1626 1626 if not self._iftest(conditions):
1627 1627 optional.append(i)
1628 1628 if exact:
1629 1629 # Don't allow line to be matches against a later
1630 1630 # line in the output
1631 1631 els.pop(i)
1632 1632 break
1633 1633
1634 1634 if r:
1635 1635 if r == "retry":
1636 1636 continue
1637 1637 # clean up any optional leftovers
1638 1638 for i in optional:
1639 1639 postout.append(b' ' + els[i])
1640 1640 for i in reversed(optional):
1641 1641 del els[i]
1642 1642 postout.append(b' ' + el)
1643 1643 else:
1644 1644 if self.NEEDESCAPE(lout):
1645 1645 lout = TTest._stringescape(b'%s (esc)\n' %
1646 1646 lout.rstrip(b'\n'))
1647 1647 postout.append(b' ' + lout) # Let diff deal with it.
1648 1648 if r != '': # If line failed.
1649 1649 warnonly = 3 # for sure not
1650 1650 elif warnonly == 1: # Is "not yet" and line is warn only.
1651 1651 warnonly = 2 # Yes do warn.
1652 1652 break
1653 1653 else:
1654 1654 # clean up any optional leftovers
1655 1655 while expected.get(pos, None):
1656 1656 el = expected[pos].pop(0)
1657 1657 if el:
1658 1658 if not el.endswith(b" (?)\n"):
1659 1659 m = optline.match(el)
1660 1660 if m:
1661 1661 conditions = [c for c in m.group(2).split(b' ')]
1662 1662
1663 1663 if self._iftest(conditions):
1664 1664 # Don't append as optional line
1665 1665 continue
1666 1666 else:
1667 1667 continue
1668 1668 postout.append(b' ' + el)
1669 1669
1670 1670 if lcmd:
1671 1671 # Add on last return code.
1672 1672 ret = int(lcmd.split()[1])
1673 1673 if ret != 0:
1674 1674 postout.append(b' [%d]\n' % ret)
1675 1675 if pos in after:
1676 1676 # Merge in non-active test bits.
1677 1677 postout += after.pop(pos)
1678 1678 pos = int(lcmd.split()[0])
1679 1679
1680 1680 if pos in after:
1681 1681 postout += after.pop(pos)
1682 1682
1683 1683 if warnonly == 2:
1684 1684 exitcode = False # Set exitcode to warned.
1685 1685
1686 1686 return exitcode, postout
1687 1687
1688 1688 @staticmethod
1689 1689 def rematch(el, l):
1690 1690 try:
1691 1691 el = b'(?:' + el + b')'
1692 1692 # use \Z to ensure that the regex matches to the end of the string
1693 1693 if os.name == 'nt':
1694 1694 return re.match(el + br'\r?\n\Z', l)
1695 1695 return re.match(el + br'\n\Z', l)
1696 1696 except re.error:
1697 1697 # el is an invalid regex
1698 1698 return False
1699 1699
1700 1700 @staticmethod
1701 1701 def globmatch(el, l):
1702 1702 # The only supported special characters are * and ? plus / which also
1703 1703 # matches \ on windows. Escaping of these characters is supported.
1704 1704 if el + b'\n' == l:
1705 1705 if os.altsep:
1706 1706 # matching on "/" is not needed for this line
1707 1707 for pat in checkcodeglobpats:
1708 1708 if pat.match(el):
1709 1709 return True
1710 1710 return b'-glob'
1711 1711 return True
1712 1712 el = el.replace(b'$LOCALIP', b'*')
1713 1713 i, n = 0, len(el)
1714 1714 res = b''
1715 1715 while i < n:
1716 1716 c = el[i:i + 1]
1717 1717 i += 1
1718 1718 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1719 1719 res += el[i - 1:i + 1]
1720 1720 i += 1
1721 1721 elif c == b'*':
1722 1722 res += b'.*'
1723 1723 elif c == b'?':
1724 1724 res += b'.'
1725 1725 elif c == b'/' and os.altsep:
1726 1726 res += b'[/\\\\]'
1727 1727 else:
1728 1728 res += re.escape(c)
1729 1729 return TTest.rematch(res, l)
1730 1730
1731 1731 def linematch(self, el, l):
1732 1732 if el == l: # perfect match (fast)
1733 1733 return True, True
1734 1734 retry = False
1735 1735 if el.endswith(b" (?)\n"):
1736 1736 retry = "retry"
1737 1737 el = el[:-5] + b"\n"
1738 1738 else:
1739 1739 m = optline.match(el)
1740 1740 if m:
1741 1741 conditions = [c for c in m.group(2).split(b' ')]
1742 1742
1743 1743 el = m.group(1) + b"\n"
1744 1744 if not self._iftest(conditions):
1745 1745 retry = "retry" # Not required by listed features
1746 1746
1747 1747 if el.endswith(b" (esc)\n"):
1748 1748 if PYTHON3:
1749 1749 el = el[:-7].decode('unicode_escape') + '\n'
1750 1750 el = el.encode('utf-8')
1751 1751 else:
1752 1752 el = el[:-7].decode('string-escape') + '\n'
1753 1753 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1754 1754 return True, True
1755 1755 if el.endswith(b" (re)\n"):
1756 1756 return (TTest.rematch(el[:-6], l) or retry), False
1757 1757 if el.endswith(b" (glob)\n"):
1758 1758 # ignore '(glob)' added to l by 'replacements'
1759 1759 if l.endswith(b" (glob)\n"):
1760 1760 l = l[:-8] + b"\n"
1761 1761 return (TTest.globmatch(el[:-8], l) or retry), False
1762 1762 if os.altsep:
1763 1763 _l = l.replace(b'\\', b'/')
1764 1764 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1765 1765 return True, True
1766 1766 return retry, True
1767 1767
1768 1768 @staticmethod
1769 1769 def parsehghaveoutput(lines):
1770 1770 '''Parse hghave log lines.
1771 1771
1772 1772 Return tuple of lists (missing, failed):
1773 1773 * the missing/unknown features
1774 1774 * the features for which existence check failed'''
1775 1775 missing = []
1776 1776 failed = []
1777 1777 for line in lines:
1778 1778 if line.startswith(TTest.SKIPPED_PREFIX):
1779 1779 line = line.splitlines()[0]
1780 1780 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1781 1781 elif line.startswith(TTest.FAILED_PREFIX):
1782 1782 line = line.splitlines()[0]
1783 1783 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1784 1784
1785 1785 return missing, failed
1786 1786
1787 1787 @staticmethod
1788 1788 def _escapef(m):
1789 1789 return TTest.ESCAPEMAP[m.group(0)]
1790 1790
1791 1791 @staticmethod
1792 1792 def _stringescape(s):
1793 1793 return TTest.ESCAPESUB(TTest._escapef, s)
1794 1794
1795 1795 iolock = threading.RLock()
1796 1796 firstlock = threading.RLock()
1797 1797 firsterror = False
1798 1798
1799 1799 class TestResult(unittest._TextTestResult):
1800 1800 """Holds results when executing via unittest."""
1801 1801 # Don't worry too much about accessing the non-public _TextTestResult.
1802 1802 # It is relatively common in Python testing tools.
1803 1803 def __init__(self, options, *args, **kwargs):
1804 1804 super(TestResult, self).__init__(*args, **kwargs)
1805 1805
1806 1806 self._options = options
1807 1807
1808 1808 # unittest.TestResult didn't have skipped until 2.7. We need to
1809 1809 # polyfill it.
1810 1810 self.skipped = []
1811 1811
1812 1812 # We have a custom "ignored" result that isn't present in any Python
1813 1813 # unittest implementation. It is very similar to skipped. It may make
1814 1814 # sense to map it into skip some day.
1815 1815 self.ignored = []
1816 1816
1817 1817 self.times = []
1818 1818 self._firststarttime = None
1819 1819 # Data stored for the benefit of generating xunit reports.
1820 1820 self.successes = []
1821 1821 self.faildata = {}
1822 1822
1823 1823 if options.color == 'auto':
1824 1824 self.color = pygmentspresent and self.stream.isatty()
1825 1825 elif options.color == 'never':
1826 1826 self.color = False
1827 1827 else: # 'always', for testing purposes
1828 1828 self.color = pygmentspresent
1829 1829
1830 1830 def onStart(self, test):
1831 1831 """ Can be overriden by custom TestResult
1832 1832 """
1833 1833
1834 1834 def onEnd(self):
1835 1835 """ Can be overriden by custom TestResult
1836 1836 """
1837 1837
1838 1838 def addFailure(self, test, reason):
1839 1839 self.failures.append((test, reason))
1840 1840
1841 1841 if self._options.first:
1842 1842 self.stop()
1843 1843 else:
1844 1844 with iolock:
1845 1845 if reason == "timed out":
1846 1846 self.stream.write('t')
1847 1847 else:
1848 1848 if not self._options.nodiff:
1849 1849 self.stream.write('\n')
1850 1850 # Exclude the '\n' from highlighting to lex correctly
1851 1851 formatted = 'ERROR: %s output changed\n' % test
1852 1852 self.stream.write(highlightmsg(formatted, self.color))
1853 1853 self.stream.write('!')
1854 1854
1855 1855 self.stream.flush()
1856 1856
1857 1857 def addSuccess(self, test):
1858 1858 with iolock:
1859 1859 super(TestResult, self).addSuccess(test)
1860 1860 self.successes.append(test)
1861 1861
1862 1862 def addError(self, test, err):
1863 1863 super(TestResult, self).addError(test, err)
1864 1864 if self._options.first:
1865 1865 self.stop()
1866 1866
1867 1867 # Polyfill.
1868 1868 def addSkip(self, test, reason):
1869 1869 self.skipped.append((test, reason))
1870 1870 with iolock:
1871 1871 if self.showAll:
1872 1872 self.stream.writeln('skipped %s' % reason)
1873 1873 else:
1874 1874 self.stream.write('s')
1875 1875 self.stream.flush()
1876 1876
1877 1877 def addIgnore(self, test, reason):
1878 1878 self.ignored.append((test, reason))
1879 1879 with iolock:
1880 1880 if self.showAll:
1881 1881 self.stream.writeln('ignored %s' % reason)
1882 1882 else:
1883 1883 if reason not in ('not retesting', "doesn't match keyword"):
1884 1884 self.stream.write('i')
1885 1885 else:
1886 1886 self.testsRun += 1
1887 1887 self.stream.flush()
1888 1888
1889 1889 def addOutputMismatch(self, test, ret, got, expected):
1890 1890 """Record a mismatch in test output for a particular test."""
1891 1891 if self.shouldStop or firsterror:
1892 1892 # don't print, some other test case already failed and
1893 1893 # printed, we're just stale and probably failed due to our
1894 1894 # temp dir getting cleaned up.
1895 1895 return
1896 1896
1897 1897 accepted = False
1898 1898 lines = []
1899 1899
1900 1900 with iolock:
1901 1901 if self._options.nodiff:
1902 1902 pass
1903 1903 elif self._options.view:
1904 1904 v = self._options.view
1905 1905 os.system(r"%s %s %s" %
1906 1906 (v, _strpath(test.refpath), _strpath(test.errpath)))
1907 1907 else:
1908 1908 servefail, lines = getdiff(expected, got,
1909 1909 test.refpath, test.errpath)
1910 1910 self.stream.write('\n')
1911 1911 for line in lines:
1912 1912 line = highlightdiff(line, self.color)
1913 1913 if PYTHON3:
1914 1914 self.stream.flush()
1915 1915 self.stream.buffer.write(line)
1916 1916 self.stream.buffer.flush()
1917 1917 else:
1918 1918 self.stream.write(line)
1919 1919 self.stream.flush()
1920 1920
1921 1921 if servefail:
1922 1922 raise test.failureException(
1923 1923 'server failed to start (HGPORT=%s)' % test._startport)
1924 1924
1925 1925 # handle interactive prompt without releasing iolock
1926 1926 if self._options.interactive:
1927 1927 if test.readrefout() != expected:
1928 1928 self.stream.write(
1929 1929 'Reference output has changed (run again to prompt '
1930 1930 'changes)')
1931 1931 else:
1932 1932 self.stream.write('Accept this change? [n] ')
1933 1933 self.stream.flush()
1934 1934 answer = sys.stdin.readline().strip()
1935 1935 if answer.lower() in ('y', 'yes'):
1936 1936 if test.path.endswith(b'.t'):
1937 1937 rename(test.errpath, test.path)
1938 1938 else:
1939 1939 rename(test.errpath, '%s.out' % test.path)
1940 1940 accepted = True
1941 1941 if not accepted:
1942 1942 self.faildata[test.name] = b''.join(lines)
1943 1943
1944 1944 return accepted
1945 1945
1946 1946 def startTest(self, test):
1947 1947 super(TestResult, self).startTest(test)
1948 1948
1949 1949 # os.times module computes the user time and system time spent by
1950 1950 # child's processes along with real elapsed time taken by a process.
1951 1951 # This module has one limitation. It can only work for Linux user
1952 1952 # and not for Windows.
1953 1953 test.started = os.times()
1954 1954 if self._firststarttime is None: # thread racy but irrelevant
1955 1955 self._firststarttime = test.started[4]
1956 1956
1957 1957 def stopTest(self, test, interrupted=False):
1958 1958 super(TestResult, self).stopTest(test)
1959 1959
1960 1960 test.stopped = os.times()
1961 1961
1962 1962 starttime = test.started
1963 1963 endtime = test.stopped
1964 1964 origin = self._firststarttime
1965 1965 self.times.append((test.name,
1966 1966 endtime[2] - starttime[2], # user space CPU time
1967 1967 endtime[3] - starttime[3], # sys space CPU time
1968 1968 endtime[4] - starttime[4], # real time
1969 1969 starttime[4] - origin, # start date in run context
1970 1970 endtime[4] - origin, # end date in run context
1971 1971 ))
1972 1972
1973 1973 if interrupted:
1974 1974 with iolock:
1975 1975 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1976 1976 test.name, self.times[-1][3]))
1977 1977
1978 1978 def getTestResult():
1979 1979 """
1980 1980 Returns the relevant test result
1981 1981 """
1982 1982 if "CUSTOM_TEST_RESULT" in os.environ:
1983 1983 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
1984 1984 return testresultmodule.TestResult
1985 1985 else:
1986 1986 return TestResult
1987 1987
1988 1988 class TestSuite(unittest.TestSuite):
1989 1989 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1990 1990
1991 1991 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1992 1992 retest=False, keywords=None, loop=False, runs_per_test=1,
1993 1993 loadtest=None, showchannels=False,
1994 1994 *args, **kwargs):
1995 1995 """Create a new instance that can run tests with a configuration.
1996 1996
1997 1997 testdir specifies the directory where tests are executed from. This
1998 1998 is typically the ``tests`` directory from Mercurial's source
1999 1999 repository.
2000 2000
2001 2001 jobs specifies the number of jobs to run concurrently. Each test
2002 2002 executes on its own thread. Tests actually spawn new processes, so
2003 2003 state mutation should not be an issue.
2004 2004
2005 2005 If there is only one job, it will use the main thread.
2006 2006
2007 2007 whitelist and blacklist denote tests that have been whitelisted and
2008 2008 blacklisted, respectively. These arguments don't belong in TestSuite.
2009 2009 Instead, whitelist and blacklist should be handled by the thing that
2010 2010 populates the TestSuite with tests. They are present to preserve
2011 2011 backwards compatible behavior which reports skipped tests as part
2012 2012 of the results.
2013 2013
2014 2014 retest denotes whether to retest failed tests. This arguably belongs
2015 2015 outside of TestSuite.
2016 2016
2017 2017 keywords denotes key words that will be used to filter which tests
2018 2018 to execute. This arguably belongs outside of TestSuite.
2019 2019
2020 2020 loop denotes whether to loop over tests forever.
2021 2021 """
2022 2022 super(TestSuite, self).__init__(*args, **kwargs)
2023 2023
2024 2024 self._jobs = jobs
2025 2025 self._whitelist = whitelist
2026 2026 self._blacklist = blacklist
2027 2027 self._retest = retest
2028 2028 self._keywords = keywords
2029 2029 self._loop = loop
2030 2030 self._runs_per_test = runs_per_test
2031 2031 self._loadtest = loadtest
2032 2032 self._showchannels = showchannels
2033 2033
2034 2034 def run(self, result):
2035 2035 # We have a number of filters that need to be applied. We do this
2036 2036 # here instead of inside Test because it makes the running logic for
2037 2037 # Test simpler.
2038 2038 tests = []
2039 2039 num_tests = [0]
2040 2040 for test in self._tests:
2041 2041 def get():
2042 2042 num_tests[0] += 1
2043 2043 if getattr(test, 'should_reload', False):
2044 2044 return self._loadtest(test, num_tests[0])
2045 2045 return test
2046 2046 if not os.path.exists(test.path):
2047 2047 result.addSkip(test, "Doesn't exist")
2048 2048 continue
2049 2049
2050 2050 if not (self._whitelist and test.bname in self._whitelist):
2051 2051 if self._blacklist and test.bname in self._blacklist:
2052 2052 result.addSkip(test, 'blacklisted')
2053 2053 continue
2054 2054
2055 2055 if self._retest and not os.path.exists(test.errpath):
2056 2056 result.addIgnore(test, 'not retesting')
2057 2057 continue
2058 2058
2059 2059 if self._keywords:
2060 2060 with open(test.path, 'rb') as f:
2061 2061 t = f.read().lower() + test.bname.lower()
2062 2062 ignored = False
2063 2063 for k in self._keywords.lower().split():
2064 2064 if k not in t:
2065 2065 result.addIgnore(test, "doesn't match keyword")
2066 2066 ignored = True
2067 2067 break
2068 2068
2069 2069 if ignored:
2070 2070 continue
2071 2071 for _ in xrange(self._runs_per_test):
2072 2072 tests.append(get())
2073 2073
2074 2074 runtests = list(tests)
2075 2075 done = queue.Queue()
2076 2076 running = 0
2077 2077
2078 2078 channels = [""] * self._jobs
2079 2079
2080 2080 def job(test, result):
2081 2081 for n, v in enumerate(channels):
2082 2082 if not v:
2083 2083 channel = n
2084 2084 break
2085 2085 else:
2086 2086 raise ValueError('Could not find output channel')
2087 2087 channels[channel] = "=" + test.name[5:].split(".")[0]
2088 2088 try:
2089 2089 test(result)
2090 2090 done.put(None)
2091 2091 except KeyboardInterrupt:
2092 2092 pass
2093 2093 except: # re-raises
2094 2094 done.put(('!', test, 'run-test raised an error, see traceback'))
2095 2095 raise
2096 2096 finally:
2097 2097 try:
2098 2098 channels[channel] = ''
2099 2099 except IndexError:
2100 2100 pass
2101 2101
2102 2102 def stat():
2103 2103 count = 0
2104 2104 while channels:
2105 2105 d = '\n%03s ' % count
2106 2106 for n, v in enumerate(channels):
2107 2107 if v:
2108 2108 d += v[0]
2109 2109 channels[n] = v[1:] or '.'
2110 2110 else:
2111 2111 d += ' '
2112 2112 d += ' '
2113 2113 with iolock:
2114 2114 sys.stdout.write(d + ' ')
2115 2115 sys.stdout.flush()
2116 2116 for x in xrange(10):
2117 2117 if channels:
2118 2118 time.sleep(.1)
2119 2119 count += 1
2120 2120
2121 2121 stoppedearly = False
2122 2122
2123 2123 if self._showchannels:
2124 2124 statthread = threading.Thread(target=stat, name="stat")
2125 2125 statthread.start()
2126 2126
2127 2127 try:
2128 2128 while tests or running:
2129 2129 if not done.empty() or running == self._jobs or not tests:
2130 2130 try:
2131 2131 done.get(True, 1)
2132 2132 running -= 1
2133 2133 if result and result.shouldStop:
2134 2134 stoppedearly = True
2135 2135 break
2136 2136 except queue.Empty:
2137 2137 continue
2138 2138 if tests and not running == self._jobs:
2139 2139 test = tests.pop(0)
2140 2140 if self._loop:
2141 2141 if getattr(test, 'should_reload', False):
2142 2142 num_tests[0] += 1
2143 2143 tests.append(
2144 2144 self._loadtest(test, num_tests[0]))
2145 2145 else:
2146 2146 tests.append(test)
2147 2147 if self._jobs == 1:
2148 2148 job(test, result)
2149 2149 else:
2150 2150 t = threading.Thread(target=job, name=test.name,
2151 2151 args=(test, result))
2152 2152 t.start()
2153 2153 running += 1
2154 2154
2155 2155 # If we stop early we still need to wait on started tests to
2156 2156 # finish. Otherwise, there is a race between the test completing
2157 2157 # and the test's cleanup code running. This could result in the
2158 2158 # test reporting incorrect.
2159 2159 if stoppedearly:
2160 2160 while running:
2161 2161 try:
2162 2162 done.get(True, 1)
2163 2163 running -= 1
2164 2164 except queue.Empty:
2165 2165 continue
2166 2166 except KeyboardInterrupt:
2167 2167 for test in runtests:
2168 2168 test.abort()
2169 2169
2170 2170 channels = []
2171 2171
2172 2172 return result
2173 2173
2174 2174 # Save the most recent 5 wall-clock runtimes of each test to a
2175 2175 # human-readable text file named .testtimes. Tests are sorted
2176 2176 # alphabetically, while times for each test are listed from oldest to
2177 2177 # newest.
2178 2178
2179 2179 def loadtimes(outputdir):
2180 2180 times = []
2181 2181 try:
2182 2182 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2183 2183 for line in fp:
2184 2184 m = re.match('(.*?) ([0-9. ]+)', line)
2185 2185 times.append((m.group(1),
2186 2186 [float(t) for t in m.group(2).split()]))
2187 2187 except IOError as err:
2188 2188 if err.errno != errno.ENOENT:
2189 2189 raise
2190 2190 return times
2191 2191
2192 2192 def savetimes(outputdir, result):
2193 2193 saved = dict(loadtimes(outputdir))
2194 2194 maxruns = 5
2195 2195 skipped = set([str(t[0]) for t in result.skipped])
2196 2196 for tdata in result.times:
2197 2197 test, real = tdata[0], tdata[3]
2198 2198 if test not in skipped:
2199 2199 ts = saved.setdefault(test, [])
2200 2200 ts.append(real)
2201 2201 ts[:] = ts[-maxruns:]
2202 2202
2203 2203 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2204 2204 dir=outputdir, text=True)
2205 2205 with os.fdopen(fd, 'w') as fp:
2206 2206 for name, ts in sorted(saved.items()):
2207 2207 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2208 2208 timepath = os.path.join(outputdir, b'.testtimes')
2209 2209 try:
2210 2210 os.unlink(timepath)
2211 2211 except OSError:
2212 2212 pass
2213 2213 try:
2214 2214 os.rename(tmpname, timepath)
2215 2215 except OSError:
2216 2216 pass
2217 2217
2218 2218 class TextTestRunner(unittest.TextTestRunner):
2219 2219 """Custom unittest test runner that uses appropriate settings."""
2220 2220
2221 2221 def __init__(self, runner, *args, **kwargs):
2222 2222 super(TextTestRunner, self).__init__(*args, **kwargs)
2223 2223
2224 2224 self._runner = runner
2225 2225
2226 2226 self._result = getTestResult()(self._runner.options, self.stream,
2227 2227 self.descriptions, self.verbosity)
2228 2228
2229 2229 def listtests(self, test):
2230 2230 test = sorted(test, key=lambda t: t.name)
2231 2231
2232 2232 self._result.onStart(test)
2233 2233
2234 2234 for t in test:
2235 2235 print(t.name)
2236 2236 self._result.addSuccess(t)
2237 2237
2238 2238 if self._runner.options.xunit:
2239 2239 with open(self._runner.options.xunit, "wb") as xuf:
2240 2240 self._writexunit(self._result, xuf)
2241 2241
2242 2242 if self._runner.options.json:
2243 2243 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2244 2244 with open(jsonpath, 'w') as fp:
2245 2245 self._writejson(self._result, fp)
2246 2246
2247 2247 return self._result
2248 2248
2249 2249 def run(self, test):
2250 2250 self._result.onStart(test)
2251 2251 test(self._result)
2252 2252
2253 2253 failed = len(self._result.failures)
2254 2254 skipped = len(self._result.skipped)
2255 2255 ignored = len(self._result.ignored)
2256 2256
2257 2257 with iolock:
2258 2258 self.stream.writeln('')
2259 2259
2260 2260 if not self._runner.options.noskips:
2261 for test, msg in self._result.skipped:
2261 for test, msg in sorted(self._result.skipped,
2262 key=lambda s: s[0].name):
2262 2263 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2263 2264 msg = highlightmsg(formatted, self._result.color)
2264 2265 self.stream.write(msg)
2265 for test, msg in self._result.failures:
2266 for test, msg in sorted(self._result.failures,
2267 key=lambda f: f[0].name):
2266 2268 formatted = 'Failed %s: %s\n' % (test.name, msg)
2267 2269 self.stream.write(highlightmsg(formatted, self._result.color))
2268 for test, msg in self._result.errors:
2270 for test, msg in sorted(self._result.errors,
2271 key=lambda e: e[0].name):
2269 2272 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2270 2273
2271 2274 if self._runner.options.xunit:
2272 2275 with open(self._runner.options.xunit, "wb") as xuf:
2273 2276 self._writexunit(self._result, xuf)
2274 2277
2275 2278 if self._runner.options.json:
2276 2279 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2277 2280 with open(jsonpath, 'w') as fp:
2278 2281 self._writejson(self._result, fp)
2279 2282
2280 2283 self._runner._checkhglib('Tested')
2281 2284
2282 2285 savetimes(self._runner._outputdir, self._result)
2283 2286
2284 2287 if failed and self._runner.options.known_good_rev:
2285 2288 self._bisecttests(t for t, m in self._result.failures)
2286 2289 self.stream.writeln(
2287 2290 '# Ran %d tests, %d skipped, %d failed.'
2288 2291 % (self._result.testsRun, skipped + ignored, failed))
2289 2292 if failed:
2290 2293 self.stream.writeln('python hash seed: %s' %
2291 2294 os.environ['PYTHONHASHSEED'])
2292 2295 if self._runner.options.time:
2293 2296 self.printtimes(self._result.times)
2294 2297
2295 2298 if self._runner.options.exceptions:
2296 2299 exceptions = aggregateexceptions(
2297 2300 os.path.join(self._runner._outputdir, b'exceptions'))
2298 2301
2299 2302 self.stream.writeln('Exceptions Report:')
2300 2303 self.stream.writeln('%d total from %d frames' %
2301 2304 (exceptions['total'],
2302 2305 len(exceptions['exceptioncounts'])))
2303 2306 combined = exceptions['combined']
2304 2307 for key in sorted(combined, key=combined.get, reverse=True):
2305 2308 frame, line, exc = key
2306 2309 totalcount, testcount, leastcount, leasttest = combined[key]
2307 2310
2308 2311 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2309 2312 % (totalcount,
2310 2313 testcount,
2311 2314 frame, exc,
2312 2315 leasttest, leastcount))
2313 2316
2314 2317 self.stream.flush()
2315 2318
2316 2319 return self._result
2317 2320
2318 2321 def _bisecttests(self, tests):
2319 2322 bisectcmd = ['hg', 'bisect']
2320 2323 bisectrepo = self._runner.options.bisect_repo
2321 2324 if bisectrepo:
2322 2325 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2323 2326 def pread(args):
2324 2327 env = os.environ.copy()
2325 2328 env['HGPLAIN'] = '1'
2326 2329 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2327 2330 stdout=subprocess.PIPE, env=env)
2328 2331 data = p.stdout.read()
2329 2332 p.wait()
2330 2333 return data
2331 2334 for test in tests:
2332 2335 pread(bisectcmd + ['--reset']),
2333 2336 pread(bisectcmd + ['--bad', '.'])
2334 2337 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2335 2338 # TODO: we probably need to forward more options
2336 2339 # that alter hg's behavior inside the tests.
2337 2340 opts = ''
2338 2341 withhg = self._runner.options.with_hg
2339 2342 if withhg:
2340 2343 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2341 2344 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2342 2345 test)
2343 2346 data = pread(bisectcmd + ['--command', rtc])
2344 2347 m = re.search(
2345 2348 (br'\nThe first (?P<goodbad>bad|good) revision '
2346 2349 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2347 2350 br'summary: +(?P<summary>[^\n]+)\n'),
2348 2351 data, (re.MULTILINE | re.DOTALL))
2349 2352 if m is None:
2350 2353 self.stream.writeln(
2351 2354 'Failed to identify failure point for %s' % test)
2352 2355 continue
2353 2356 dat = m.groupdict()
2354 2357 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2355 2358 self.stream.writeln(
2356 2359 '%s %s by %s (%s)' % (
2357 2360 test, verb, dat['node'].decode('ascii'),
2358 2361 dat['summary'].decode('utf8', 'ignore')))
2359 2362
2360 2363 def printtimes(self, times):
2361 2364 # iolock held by run
2362 2365 self.stream.writeln('# Producing time report')
2363 2366 times.sort(key=lambda t: (t[3]))
2364 2367 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2365 2368 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2366 2369 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2367 2370 for tdata in times:
2368 2371 test = tdata[0]
2369 2372 cuser, csys, real, start, end = tdata[1:6]
2370 2373 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2371 2374
2372 2375 @staticmethod
2373 2376 def _writexunit(result, outf):
2374 2377 # See http://llg.cubic.org/docs/junit/ for a reference.
2375 2378 timesd = dict((t[0], t[3]) for t in result.times)
2376 2379 doc = minidom.Document()
2377 2380 s = doc.createElement('testsuite')
2378 2381 s.setAttribute('name', 'run-tests')
2379 2382 s.setAttribute('tests', str(result.testsRun))
2380 2383 s.setAttribute('errors', "0") # TODO
2381 2384 s.setAttribute('failures', str(len(result.failures)))
2382 2385 s.setAttribute('skipped', str(len(result.skipped) +
2383 2386 len(result.ignored)))
2384 2387 doc.appendChild(s)
2385 2388 for tc in result.successes:
2386 2389 t = doc.createElement('testcase')
2387 2390 t.setAttribute('name', tc.name)
2388 2391 tctime = timesd.get(tc.name)
2389 2392 if tctime is not None:
2390 2393 t.setAttribute('time', '%.3f' % tctime)
2391 2394 s.appendChild(t)
2392 2395 for tc, err in sorted(result.faildata.items()):
2393 2396 t = doc.createElement('testcase')
2394 2397 t.setAttribute('name', tc)
2395 2398 tctime = timesd.get(tc)
2396 2399 if tctime is not None:
2397 2400 t.setAttribute('time', '%.3f' % tctime)
2398 2401 # createCDATASection expects a unicode or it will
2399 2402 # convert using default conversion rules, which will
2400 2403 # fail if string isn't ASCII.
2401 2404 err = cdatasafe(err).decode('utf-8', 'replace')
2402 2405 cd = doc.createCDATASection(err)
2403 2406 # Use 'failure' here instead of 'error' to match errors = 0,
2404 2407 # failures = len(result.failures) in the testsuite element.
2405 2408 failelem = doc.createElement('failure')
2406 2409 failelem.setAttribute('message', 'output changed')
2407 2410 failelem.setAttribute('type', 'output-mismatch')
2408 2411 failelem.appendChild(cd)
2409 2412 t.appendChild(failelem)
2410 2413 s.appendChild(t)
2411 2414 for tc, message in result.skipped:
2412 2415 # According to the schema, 'skipped' has no attributes. So store
2413 2416 # the skip message as a text node instead.
2414 2417 t = doc.createElement('testcase')
2415 2418 t.setAttribute('name', tc.name)
2416 2419 binmessage = message.encode('utf-8')
2417 2420 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2418 2421 cd = doc.createCDATASection(message)
2419 2422 skipelem = doc.createElement('skipped')
2420 2423 skipelem.appendChild(cd)
2421 2424 t.appendChild(skipelem)
2422 2425 s.appendChild(t)
2423 2426 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2424 2427
2425 2428 @staticmethod
2426 2429 def _writejson(result, outf):
2427 2430 timesd = {}
2428 2431 for tdata in result.times:
2429 2432 test = tdata[0]
2430 2433 timesd[test] = tdata[1:]
2431 2434
2432 2435 outcome = {}
2433 2436 groups = [('success', ((tc, None)
2434 2437 for tc in result.successes)),
2435 2438 ('failure', result.failures),
2436 2439 ('skip', result.skipped)]
2437 2440 for res, testcases in groups:
2438 2441 for tc, __ in testcases:
2439 2442 if tc.name in timesd:
2440 2443 diff = result.faildata.get(tc.name, b'')
2441 2444 try:
2442 2445 diff = diff.decode('unicode_escape')
2443 2446 except UnicodeDecodeError as e:
2444 2447 diff = '%r decoding diff, sorry' % e
2445 2448 tres = {'result': res,
2446 2449 'time': ('%0.3f' % timesd[tc.name][2]),
2447 2450 'cuser': ('%0.3f' % timesd[tc.name][0]),
2448 2451 'csys': ('%0.3f' % timesd[tc.name][1]),
2449 2452 'start': ('%0.3f' % timesd[tc.name][3]),
2450 2453 'end': ('%0.3f' % timesd[tc.name][4]),
2451 2454 'diff': diff,
2452 2455 }
2453 2456 else:
2454 2457 # blacklisted test
2455 2458 tres = {'result': res}
2456 2459
2457 2460 outcome[tc.name] = tres
2458 2461 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2459 2462 separators=(',', ': '))
2460 2463 outf.writelines(("testreport =", jsonout))
2461 2464
2462 2465 def sorttests(testdescs, previoustimes, shuffle=False):
2463 2466 """Do an in-place sort of tests."""
2464 2467 if shuffle:
2465 2468 random.shuffle(testdescs)
2466 2469 return
2467 2470
2468 2471 if previoustimes:
2469 2472 def sortkey(f):
2470 2473 f = f['path']
2471 2474 if f in previoustimes:
2472 2475 # Use most recent time as estimate
2473 2476 return -previoustimes[f][-1]
2474 2477 else:
2475 2478 # Default to a rather arbitrary value of 1 second for new tests
2476 2479 return -1.0
2477 2480 else:
2478 2481 # keywords for slow tests
2479 2482 slow = {b'svn': 10,
2480 2483 b'cvs': 10,
2481 2484 b'hghave': 10,
2482 2485 b'largefiles-update': 10,
2483 2486 b'run-tests': 10,
2484 2487 b'corruption': 10,
2485 2488 b'race': 10,
2486 2489 b'i18n': 10,
2487 2490 b'check': 100,
2488 2491 b'gendoc': 100,
2489 2492 b'contrib-perf': 200,
2490 2493 }
2491 2494 perf = {}
2492 2495
2493 2496 def sortkey(f):
2494 2497 # run largest tests first, as they tend to take the longest
2495 2498 f = f['path']
2496 2499 try:
2497 2500 return perf[f]
2498 2501 except KeyError:
2499 2502 try:
2500 2503 val = -os.stat(f).st_size
2501 2504 except OSError as e:
2502 2505 if e.errno != errno.ENOENT:
2503 2506 raise
2504 2507 perf[f] = -1e9 # file does not exist, tell early
2505 2508 return -1e9
2506 2509 for kw, mul in slow.items():
2507 2510 if kw in f:
2508 2511 val *= mul
2509 2512 if f.endswith(b'.py'):
2510 2513 val /= 10.0
2511 2514 perf[f] = val / 1000.0
2512 2515 return perf[f]
2513 2516
2514 2517 testdescs.sort(key=sortkey)
2515 2518
2516 2519 class TestRunner(object):
2517 2520 """Holds context for executing tests.
2518 2521
2519 2522 Tests rely on a lot of state. This object holds it for them.
2520 2523 """
2521 2524
2522 2525 # Programs required to run tests.
2523 2526 REQUIREDTOOLS = [
2524 2527 b'diff',
2525 2528 b'grep',
2526 2529 b'unzip',
2527 2530 b'gunzip',
2528 2531 b'bunzip2',
2529 2532 b'sed',
2530 2533 ]
2531 2534
2532 2535 # Maps file extensions to test class.
2533 2536 TESTTYPES = [
2534 2537 (b'.py', PythonTest),
2535 2538 (b'.t', TTest),
2536 2539 ]
2537 2540
2538 2541 def __init__(self):
2539 2542 self.options = None
2540 2543 self._hgroot = None
2541 2544 self._testdir = None
2542 2545 self._outputdir = None
2543 2546 self._hgtmp = None
2544 2547 self._installdir = None
2545 2548 self._bindir = None
2546 2549 self._tmpbinddir = None
2547 2550 self._pythondir = None
2548 2551 self._coveragefile = None
2549 2552 self._createdfiles = []
2550 2553 self._hgcommand = None
2551 2554 self._hgpath = None
2552 2555 self._portoffset = 0
2553 2556 self._ports = {}
2554 2557
2555 2558 def run(self, args, parser=None):
2556 2559 """Run the test suite."""
2557 2560 oldmask = os.umask(0o22)
2558 2561 try:
2559 2562 parser = parser or getparser()
2560 2563 options = parseargs(args, parser)
2561 2564 tests = [_bytespath(a) for a in options.tests]
2562 2565 if options.test_list is not None:
2563 2566 for listfile in options.test_list:
2564 2567 with open(listfile, 'rb') as f:
2565 2568 tests.extend(t for t in f.read().splitlines() if t)
2566 2569 self.options = options
2567 2570
2568 2571 self._checktools()
2569 2572 testdescs = self.findtests(tests)
2570 2573 if options.profile_runner:
2571 2574 import statprof
2572 2575 statprof.start()
2573 2576 result = self._run(testdescs)
2574 2577 if options.profile_runner:
2575 2578 statprof.stop()
2576 2579 statprof.display()
2577 2580 return result
2578 2581
2579 2582 finally:
2580 2583 os.umask(oldmask)
2581 2584
2582 2585 def _run(self, testdescs):
2583 2586 testdir = getcwdb()
2584 2587 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
2585 2588 # assume all tests in same folder for now
2586 2589 if testdescs:
2587 2590 pathname = os.path.dirname(testdescs[0]['path'])
2588 2591 if pathname:
2589 2592 testdir = os.path.join(testdir, pathname)
2590 2593 self._testdir = osenvironb[b'TESTDIR'] = testdir
2591 2594 if self.options.outputdir:
2592 2595 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2593 2596 else:
2594 2597 self._outputdir = getcwdb()
2595 2598 if testdescs and pathname:
2596 2599 self._outputdir = os.path.join(self._outputdir, pathname)
2597 2600 previoustimes = {}
2598 2601 if self.options.order_by_runtime:
2599 2602 previoustimes = dict(loadtimes(self._outputdir))
2600 2603 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2601 2604
2602 2605 if 'PYTHONHASHSEED' not in os.environ:
2603 2606 # use a random python hash seed all the time
2604 2607 # we do the randomness ourself to know what seed is used
2605 2608 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2606 2609
2607 2610 if self.options.tmpdir:
2608 2611 self.options.keep_tmpdir = True
2609 2612 tmpdir = _bytespath(self.options.tmpdir)
2610 2613 if os.path.exists(tmpdir):
2611 2614 # Meaning of tmpdir has changed since 1.3: we used to create
2612 2615 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2613 2616 # tmpdir already exists.
2614 2617 print("error: temp dir %r already exists" % tmpdir)
2615 2618 return 1
2616 2619
2617 2620 os.makedirs(tmpdir)
2618 2621 else:
2619 2622 d = None
2620 2623 if os.name == 'nt':
2621 2624 # without this, we get the default temp dir location, but
2622 2625 # in all lowercase, which causes troubles with paths (issue3490)
2623 2626 d = osenvironb.get(b'TMP', None)
2624 2627 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2625 2628
2626 2629 self._hgtmp = osenvironb[b'HGTMP'] = (
2627 2630 os.path.realpath(tmpdir))
2628 2631
2629 2632 if self.options.with_hg:
2630 2633 self._installdir = None
2631 2634 whg = self.options.with_hg
2632 2635 self._bindir = os.path.dirname(os.path.realpath(whg))
2633 2636 assert isinstance(self._bindir, bytes)
2634 2637 self._hgcommand = os.path.basename(whg)
2635 2638 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2636 2639 os.makedirs(self._tmpbindir)
2637 2640
2638 2641 normbin = os.path.normpath(os.path.abspath(whg))
2639 2642 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2640 2643
2641 2644 # Other Python scripts in the test harness need to
2642 2645 # `import mercurial`. If `hg` is a Python script, we assume
2643 2646 # the Mercurial modules are relative to its path and tell the tests
2644 2647 # to load Python modules from its directory.
2645 2648 with open(whg, 'rb') as fh:
2646 2649 initial = fh.read(1024)
2647 2650
2648 2651 if re.match(b'#!.*python', initial):
2649 2652 self._pythondir = self._bindir
2650 2653 # If it looks like our in-repo Rust binary, use the source root.
2651 2654 # This is a bit hacky. But rhg is still not supported outside the
2652 2655 # source directory. So until it is, do the simple thing.
2653 2656 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2654 2657 self._pythondir = os.path.dirname(self._testdir)
2655 2658 # Fall back to the legacy behavior.
2656 2659 else:
2657 2660 self._pythondir = self._bindir
2658 2661
2659 2662 else:
2660 2663 self._installdir = os.path.join(self._hgtmp, b"install")
2661 2664 self._bindir = os.path.join(self._installdir, b"bin")
2662 2665 self._hgcommand = b'hg'
2663 2666 self._tmpbindir = self._bindir
2664 2667 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2665 2668
2666 2669 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
2667 2670 # a python script and feed it to python.exe. Legacy stdio is force
2668 2671 # enabled by hg.exe, and this is a more realistic way to launch hg
2669 2672 # anyway.
2670 2673 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
2671 2674 self._hgcommand += b'.exe'
2672 2675
2673 2676 # set CHGHG, then replace "hg" command by "chg"
2674 2677 chgbindir = self._bindir
2675 2678 if self.options.chg or self.options.with_chg:
2676 2679 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2677 2680 else:
2678 2681 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2679 2682 if self.options.chg:
2680 2683 self._hgcommand = b'chg'
2681 2684 elif self.options.with_chg:
2682 2685 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2683 2686 self._hgcommand = os.path.basename(self.options.with_chg)
2684 2687
2685 2688 osenvironb[b"BINDIR"] = self._bindir
2686 2689 osenvironb[b"PYTHON"] = PYTHON
2687 2690
2688 2691 fileb = _bytespath(__file__)
2689 2692 runtestdir = os.path.abspath(os.path.dirname(fileb))
2690 2693 osenvironb[b'RUNTESTDIR'] = runtestdir
2691 2694 if PYTHON3:
2692 2695 sepb = _bytespath(os.pathsep)
2693 2696 else:
2694 2697 sepb = os.pathsep
2695 2698 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2696 2699 if os.path.islink(__file__):
2697 2700 # test helper will likely be at the end of the symlink
2698 2701 realfile = os.path.realpath(fileb)
2699 2702 realdir = os.path.abspath(os.path.dirname(realfile))
2700 2703 path.insert(2, realdir)
2701 2704 if chgbindir != self._bindir:
2702 2705 path.insert(1, chgbindir)
2703 2706 if self._testdir != runtestdir:
2704 2707 path = [self._testdir] + path
2705 2708 if self._tmpbindir != self._bindir:
2706 2709 path = [self._tmpbindir] + path
2707 2710 osenvironb[b"PATH"] = sepb.join(path)
2708 2711
2709 2712 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2710 2713 # can run .../tests/run-tests.py test-foo where test-foo
2711 2714 # adds an extension to HGRC. Also include run-test.py directory to
2712 2715 # import modules like heredoctest.
2713 2716 pypath = [self._pythondir, self._testdir, runtestdir]
2714 2717 # We have to augment PYTHONPATH, rather than simply replacing
2715 2718 # it, in case external libraries are only available via current
2716 2719 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2717 2720 # are in /opt/subversion.)
2718 2721 oldpypath = osenvironb.get(IMPL_PATH)
2719 2722 if oldpypath:
2720 2723 pypath.append(oldpypath)
2721 2724 osenvironb[IMPL_PATH] = sepb.join(pypath)
2722 2725
2723 2726 if self.options.pure:
2724 2727 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2725 2728 os.environ["HGMODULEPOLICY"] = "py"
2726 2729
2727 2730 if self.options.allow_slow_tests:
2728 2731 os.environ["HGTEST_SLOW"] = "slow"
2729 2732 elif 'HGTEST_SLOW' in os.environ:
2730 2733 del os.environ['HGTEST_SLOW']
2731 2734
2732 2735 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2733 2736
2734 2737 if self.options.exceptions:
2735 2738 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2736 2739 try:
2737 2740 os.makedirs(exceptionsdir)
2738 2741 except OSError as e:
2739 2742 if e.errno != errno.EEXIST:
2740 2743 raise
2741 2744
2742 2745 # Remove all existing exception reports.
2743 2746 for f in os.listdir(exceptionsdir):
2744 2747 os.unlink(os.path.join(exceptionsdir, f))
2745 2748
2746 2749 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2747 2750 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2748 2751 self.options.extra_config_opt.append(
2749 2752 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2750 2753
2751 2754 vlog("# Using TESTDIR", self._testdir)
2752 2755 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2753 2756 vlog("# Using HGTMP", self._hgtmp)
2754 2757 vlog("# Using PATH", os.environ["PATH"])
2755 2758 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2756 2759 vlog("# Writing to directory", self._outputdir)
2757 2760
2758 2761 try:
2759 2762 return self._runtests(testdescs) or 0
2760 2763 finally:
2761 2764 time.sleep(.1)
2762 2765 self._cleanup()
2763 2766
2764 2767 def findtests(self, args):
2765 2768 """Finds possible test files from arguments.
2766 2769
2767 2770 If you wish to inject custom tests into the test harness, this would
2768 2771 be a good function to monkeypatch or override in a derived class.
2769 2772 """
2770 2773 if not args:
2771 2774 if self.options.changed:
2772 2775 proc = Popen4('hg st --rev "%s" -man0 .' %
2773 2776 self.options.changed, None, 0)
2774 2777 stdout, stderr = proc.communicate()
2775 2778 args = stdout.strip(b'\0').split(b'\0')
2776 2779 else:
2777 2780 args = os.listdir(b'.')
2778 2781
2779 2782 expanded_args = []
2780 2783 for arg in args:
2781 2784 if os.path.isdir(arg):
2782 2785 if not arg.endswith(b'/'):
2783 2786 arg += b'/'
2784 2787 expanded_args.extend([arg + a for a in os.listdir(arg)])
2785 2788 else:
2786 2789 expanded_args.append(arg)
2787 2790 args = expanded_args
2788 2791
2789 2792 testcasepattern = re.compile(
2790 2793 br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-\.#]+))')
2791 2794 tests = []
2792 2795 for t in args:
2793 2796 case = []
2794 2797
2795 2798 if not (os.path.basename(t).startswith(b'test-')
2796 2799 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2797 2800
2798 2801 m = testcasepattern.match(os.path.basename(t))
2799 2802 if m is not None:
2800 2803 t_basename, casestr = m.groups()
2801 2804 t = os.path.join(os.path.dirname(t), t_basename)
2802 2805 if casestr:
2803 2806 case = casestr.split(b'#')
2804 2807 else:
2805 2808 continue
2806 2809
2807 2810 if t.endswith(b'.t'):
2808 2811 # .t file may contain multiple test cases
2809 2812 casedimensions = parsettestcases(t)
2810 2813 if casedimensions:
2811 2814 cases = []
2812 2815 def addcases(case, casedimensions):
2813 2816 if not casedimensions:
2814 2817 cases.append(case)
2815 2818 else:
2816 2819 for c in casedimensions[0]:
2817 2820 addcases(case + [c], casedimensions[1:])
2818 2821 addcases([], casedimensions)
2819 2822 if case and case in cases:
2820 2823 cases = [case]
2821 2824 elif case:
2822 2825 # Ignore invalid cases
2823 2826 cases = []
2824 2827 else:
2825 2828 pass
2826 2829 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2827 2830 else:
2828 2831 tests.append({'path': t})
2829 2832 else:
2830 2833 tests.append({'path': t})
2831 2834 return tests
2832 2835
2833 2836 def _runtests(self, testdescs):
2834 2837 def _reloadtest(test, i):
2835 2838 # convert a test back to its description dict
2836 2839 desc = {'path': test.path}
2837 2840 case = getattr(test, '_case', [])
2838 2841 if case:
2839 2842 desc['case'] = case
2840 2843 return self._gettest(desc, i)
2841 2844
2842 2845 try:
2843 2846 if self.options.restart:
2844 2847 orig = list(testdescs)
2845 2848 while testdescs:
2846 2849 desc = testdescs[0]
2847 2850 # desc['path'] is a relative path
2848 2851 if 'case' in desc:
2849 2852 casestr = b'#'.join(desc['case'])
2850 2853 errpath = b'%s#%s.err' % (desc['path'], casestr)
2851 2854 else:
2852 2855 errpath = b'%s.err' % desc['path']
2853 2856 errpath = os.path.join(self._outputdir, errpath)
2854 2857 if os.path.exists(errpath):
2855 2858 break
2856 2859 testdescs.pop(0)
2857 2860 if not testdescs:
2858 2861 print("running all tests")
2859 2862 testdescs = orig
2860 2863
2861 2864 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2862 2865 num_tests = len(tests) * self.options.runs_per_test
2863 2866
2864 2867 jobs = min(num_tests, self.options.jobs)
2865 2868
2866 2869 failed = False
2867 2870 kws = self.options.keywords
2868 2871 if kws is not None and PYTHON3:
2869 2872 kws = kws.encode('utf-8')
2870 2873
2871 2874 suite = TestSuite(self._testdir,
2872 2875 jobs=jobs,
2873 2876 whitelist=self.options.whitelisted,
2874 2877 blacklist=self.options.blacklist,
2875 2878 retest=self.options.retest,
2876 2879 keywords=kws,
2877 2880 loop=self.options.loop,
2878 2881 runs_per_test=self.options.runs_per_test,
2879 2882 showchannels=self.options.showchannels,
2880 2883 tests=tests, loadtest=_reloadtest)
2881 2884 verbosity = 1
2882 2885 if self.options.list_tests:
2883 2886 verbosity = 0
2884 2887 elif self.options.verbose:
2885 2888 verbosity = 2
2886 2889 runner = TextTestRunner(self, verbosity=verbosity)
2887 2890
2888 2891 if self.options.list_tests:
2889 2892 result = runner.listtests(suite)
2890 2893 else:
2891 2894 if self._installdir:
2892 2895 self._installhg()
2893 2896 self._checkhglib("Testing")
2894 2897 else:
2895 2898 self._usecorrectpython()
2896 2899 if self.options.chg:
2897 2900 assert self._installdir
2898 2901 self._installchg()
2899 2902
2900 2903 log('running %d tests using %d parallel processes' % (
2901 2904 num_tests, jobs))
2902 2905
2903 2906 result = runner.run(suite)
2904 2907
2905 2908 if result.failures:
2906 2909 failed = True
2907 2910
2908 2911 result.onEnd()
2909 2912
2910 2913 if self.options.anycoverage:
2911 2914 self._outputcoverage()
2912 2915 except KeyboardInterrupt:
2913 2916 failed = True
2914 2917 print("\ninterrupted!")
2915 2918
2916 2919 if failed:
2917 2920 return 1
2918 2921
2919 2922 def _getport(self, count):
2920 2923 port = self._ports.get(count) # do we have a cached entry?
2921 2924 if port is None:
2922 2925 portneeded = 3
2923 2926 # above 100 tries we just give up and let test reports failure
2924 2927 for tries in xrange(100):
2925 2928 allfree = True
2926 2929 port = self.options.port + self._portoffset
2927 2930 for idx in xrange(portneeded):
2928 2931 if not checkportisavailable(port + idx):
2929 2932 allfree = False
2930 2933 break
2931 2934 self._portoffset += portneeded
2932 2935 if allfree:
2933 2936 break
2934 2937 self._ports[count] = port
2935 2938 return port
2936 2939
2937 2940 def _gettest(self, testdesc, count):
2938 2941 """Obtain a Test by looking at its filename.
2939 2942
2940 2943 Returns a Test instance. The Test may not be runnable if it doesn't
2941 2944 map to a known type.
2942 2945 """
2943 2946 path = testdesc['path']
2944 2947 lctest = path.lower()
2945 2948 testcls = Test
2946 2949
2947 2950 for ext, cls in self.TESTTYPES:
2948 2951 if lctest.endswith(ext):
2949 2952 testcls = cls
2950 2953 break
2951 2954
2952 2955 refpath = os.path.join(getcwdb(), path)
2953 2956 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2954 2957
2955 2958 # extra keyword parameters. 'case' is used by .t tests
2956 2959 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2957 2960
2958 2961 t = testcls(refpath, self._outputdir, tmpdir,
2959 2962 keeptmpdir=self.options.keep_tmpdir,
2960 2963 debug=self.options.debug,
2961 2964 first=self.options.first,
2962 2965 timeout=self.options.timeout,
2963 2966 startport=self._getport(count),
2964 2967 extraconfigopts=self.options.extra_config_opt,
2965 2968 py3warnings=self.options.py3_warnings,
2966 2969 shell=self.options.shell,
2967 2970 hgcommand=self._hgcommand,
2968 2971 usechg=bool(self.options.with_chg or self.options.chg),
2969 2972 useipv6=useipv6, **kwds)
2970 2973 t.should_reload = True
2971 2974 return t
2972 2975
2973 2976 def _cleanup(self):
2974 2977 """Clean up state from this test invocation."""
2975 2978 if self.options.keep_tmpdir:
2976 2979 return
2977 2980
2978 2981 vlog("# Cleaning up HGTMP", self._hgtmp)
2979 2982 shutil.rmtree(self._hgtmp, True)
2980 2983 for f in self._createdfiles:
2981 2984 try:
2982 2985 os.remove(f)
2983 2986 except OSError:
2984 2987 pass
2985 2988
2986 2989 def _usecorrectpython(self):
2987 2990 """Configure the environment to use the appropriate Python in tests."""
2988 2991 # Tests must use the same interpreter as us or bad things will happen.
2989 2992 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2990 2993
2991 2994 # os.symlink() is a thing with py3 on Windows, but it requires
2992 2995 # Administrator rights.
2993 2996 if getattr(os, 'symlink', None) and os.name != 'nt':
2994 2997 vlog("# Making python executable in test path a symlink to '%s'" %
2995 2998 sys.executable)
2996 2999 mypython = os.path.join(self._tmpbindir, pyexename)
2997 3000 try:
2998 3001 if os.readlink(mypython) == sys.executable:
2999 3002 return
3000 3003 os.unlink(mypython)
3001 3004 except OSError as err:
3002 3005 if err.errno != errno.ENOENT:
3003 3006 raise
3004 3007 if self._findprogram(pyexename) != sys.executable:
3005 3008 try:
3006 3009 os.symlink(sys.executable, mypython)
3007 3010 self._createdfiles.append(mypython)
3008 3011 except OSError as err:
3009 3012 # child processes may race, which is harmless
3010 3013 if err.errno != errno.EEXIST:
3011 3014 raise
3012 3015 else:
3013 3016 exedir, exename = os.path.split(sys.executable)
3014 3017 vlog("# Modifying search path to find %s as %s in '%s'" %
3015 3018 (exename, pyexename, exedir))
3016 3019 path = os.environ['PATH'].split(os.pathsep)
3017 3020 while exedir in path:
3018 3021 path.remove(exedir)
3019 3022 os.environ['PATH'] = os.pathsep.join([exedir] + path)
3020 3023 if not self._findprogram(pyexename):
3021 3024 print("WARNING: Cannot find %s in search path" % pyexename)
3022 3025
3023 3026 def _installhg(self):
3024 3027 """Install hg into the test environment.
3025 3028
3026 3029 This will also configure hg with the appropriate testing settings.
3027 3030 """
3028 3031 vlog("# Performing temporary installation of HG")
3029 3032 installerrs = os.path.join(self._hgtmp, b"install.err")
3030 3033 compiler = ''
3031 3034 if self.options.compiler:
3032 3035 compiler = '--compiler ' + self.options.compiler
3033 3036 if self.options.pure:
3034 3037 pure = b"--pure"
3035 3038 else:
3036 3039 pure = b""
3037 3040
3038 3041 # Run installer in hg root
3039 3042 script = os.path.realpath(sys.argv[0])
3040 3043 exe = sys.executable
3041 3044 if PYTHON3:
3042 3045 compiler = _bytespath(compiler)
3043 3046 script = _bytespath(script)
3044 3047 exe = _bytespath(exe)
3045 3048 hgroot = os.path.dirname(os.path.dirname(script))
3046 3049 self._hgroot = hgroot
3047 3050 os.chdir(hgroot)
3048 3051 nohome = b'--home=""'
3049 3052 if os.name == 'nt':
3050 3053 # The --home="" trick works only on OS where os.sep == '/'
3051 3054 # because of a distutils convert_path() fast-path. Avoid it at
3052 3055 # least on Windows for now, deal with .pydistutils.cfg bugs
3053 3056 # when they happen.
3054 3057 nohome = b''
3055 3058 cmd = (b'"%(exe)s" setup.py %(pure)s clean --all'
3056 3059 b' build %(compiler)s --build-base="%(base)s"'
3057 3060 b' install --force --prefix="%(prefix)s"'
3058 3061 b' --install-lib="%(libdir)s"'
3059 3062 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3060 3063 % {b'exe': exe, b'pure': pure,
3061 3064 b'compiler': compiler,
3062 3065 b'base': os.path.join(self._hgtmp, b"build"),
3063 3066 b'prefix': self._installdir, b'libdir': self._pythondir,
3064 3067 b'bindir': self._bindir,
3065 3068 b'nohome': nohome, b'logfile': installerrs})
3066 3069
3067 3070 # setuptools requires install directories to exist.
3068 3071 def makedirs(p):
3069 3072 try:
3070 3073 os.makedirs(p)
3071 3074 except OSError as e:
3072 3075 if e.errno != errno.EEXIST:
3073 3076 raise
3074 3077 makedirs(self._pythondir)
3075 3078 makedirs(self._bindir)
3076 3079
3077 3080 vlog("# Running", cmd)
3078 3081 if subprocess.call(_strpath(cmd), shell=True) == 0:
3079 3082 if not self.options.verbose:
3080 3083 try:
3081 3084 os.remove(installerrs)
3082 3085 except OSError as e:
3083 3086 if e.errno != errno.ENOENT:
3084 3087 raise
3085 3088 else:
3086 3089 with open(installerrs, 'rb') as f:
3087 3090 for line in f:
3088 3091 if PYTHON3:
3089 3092 sys.stdout.buffer.write(line)
3090 3093 else:
3091 3094 sys.stdout.write(line)
3092 3095 sys.exit(1)
3093 3096 os.chdir(self._testdir)
3094 3097
3095 3098 self._usecorrectpython()
3096 3099
3097 3100 if self.options.py3_warnings and not self.options.anycoverage:
3098 3101 vlog("# Updating hg command to enable Py3k Warnings switch")
3099 3102 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
3100 3103 lines = [line.rstrip() for line in f]
3101 3104 lines[0] += ' -3'
3102 3105 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
3103 3106 for line in lines:
3104 3107 f.write(line + '\n')
3105 3108
3106 3109 hgbat = os.path.join(self._bindir, b'hg.bat')
3107 3110 if os.path.isfile(hgbat):
3108 3111 # hg.bat expects to be put in bin/scripts while run-tests.py
3109 3112 # installation layout put it in bin/ directly. Fix it
3110 3113 with open(hgbat, 'rb') as f:
3111 3114 data = f.read()
3112 3115 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
3113 3116 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
3114 3117 b'"%~dp0python" "%~dp0hg" %*')
3115 3118 with open(hgbat, 'wb') as f:
3116 3119 f.write(data)
3117 3120 else:
3118 3121 print('WARNING: cannot fix hg.bat reference to python.exe')
3119 3122
3120 3123 if self.options.anycoverage:
3121 3124 custom = os.path.join(self._testdir, 'sitecustomize.py')
3122 3125 target = os.path.join(self._pythondir, 'sitecustomize.py')
3123 3126 vlog('# Installing coverage trigger to %s' % target)
3124 3127 shutil.copyfile(custom, target)
3125 3128 rc = os.path.join(self._testdir, '.coveragerc')
3126 3129 vlog('# Installing coverage rc to %s' % rc)
3127 3130 os.environ['COVERAGE_PROCESS_START'] = rc
3128 3131 covdir = os.path.join(self._installdir, '..', 'coverage')
3129 3132 try:
3130 3133 os.mkdir(covdir)
3131 3134 except OSError as e:
3132 3135 if e.errno != errno.EEXIST:
3133 3136 raise
3134 3137
3135 3138 os.environ['COVERAGE_DIR'] = covdir
3136 3139
3137 3140 def _checkhglib(self, verb):
3138 3141 """Ensure that the 'mercurial' package imported by python is
3139 3142 the one we expect it to be. If not, print a warning to stderr."""
3140 3143 if ((self._bindir == self._pythondir) and
3141 3144 (self._bindir != self._tmpbindir)):
3142 3145 # The pythondir has been inferred from --with-hg flag.
3143 3146 # We cannot expect anything sensible here.
3144 3147 return
3145 3148 expecthg = os.path.join(self._pythondir, b'mercurial')
3146 3149 actualhg = self._gethgpath()
3147 3150 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3148 3151 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
3149 3152 ' (expected %s)\n'
3150 3153 % (verb, actualhg, expecthg))
3151 3154 def _gethgpath(self):
3152 3155 """Return the path to the mercurial package that is actually found by
3153 3156 the current Python interpreter."""
3154 3157 if self._hgpath is not None:
3155 3158 return self._hgpath
3156 3159
3157 3160 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3158 3161 cmd = cmd % PYTHON
3159 3162 if PYTHON3:
3160 3163 cmd = _strpath(cmd)
3161 3164
3162 3165 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3163 3166 out, err = p.communicate()
3164 3167
3165 3168 self._hgpath = out.strip()
3166 3169
3167 3170 return self._hgpath
3168 3171
3169 3172 def _installchg(self):
3170 3173 """Install chg into the test environment"""
3171 3174 vlog('# Performing temporary installation of CHG')
3172 3175 assert os.path.dirname(self._bindir) == self._installdir
3173 3176 assert self._hgroot, 'must be called after _installhg()'
3174 3177 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
3175 3178 % {b'make': 'make', # TODO: switch by option or environment?
3176 3179 b'prefix': self._installdir})
3177 3180 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3178 3181 vlog("# Running", cmd)
3179 3182 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
3180 3183 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
3181 3184 stderr=subprocess.STDOUT)
3182 3185 out, _err = proc.communicate()
3183 3186 if proc.returncode != 0:
3184 3187 if PYTHON3:
3185 3188 sys.stdout.buffer.write(out)
3186 3189 else:
3187 3190 sys.stdout.write(out)
3188 3191 sys.exit(1)
3189 3192
3190 3193 def _outputcoverage(self):
3191 3194 """Produce code coverage output."""
3192 3195 import coverage
3193 3196 coverage = coverage.coverage
3194 3197
3195 3198 vlog('# Producing coverage report')
3196 3199 # chdir is the easiest way to get short, relative paths in the
3197 3200 # output.
3198 3201 os.chdir(self._hgroot)
3199 3202 covdir = os.path.join(self._installdir, '..', 'coverage')
3200 3203 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3201 3204
3202 3205 # Map install directory paths back to source directory.
3203 3206 cov.config.paths['srcdir'] = ['.', self._pythondir]
3204 3207
3205 3208 cov.combine()
3206 3209
3207 3210 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
3208 3211 cov.report(ignore_errors=True, omit=omit)
3209 3212
3210 3213 if self.options.htmlcov:
3211 3214 htmldir = os.path.join(self._outputdir, 'htmlcov')
3212 3215 cov.html_report(directory=htmldir, omit=omit)
3213 3216 if self.options.annotate:
3214 3217 adir = os.path.join(self._outputdir, 'annotated')
3215 3218 if not os.path.isdir(adir):
3216 3219 os.mkdir(adir)
3217 3220 cov.annotate(directory=adir, omit=omit)
3218 3221
3219 3222 def _findprogram(self, program):
3220 3223 """Search PATH for a executable program"""
3221 3224 dpb = _bytespath(os.defpath)
3222 3225 sepb = _bytespath(os.pathsep)
3223 3226 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3224 3227 name = os.path.join(p, program)
3225 3228 if os.name == 'nt' or os.access(name, os.X_OK):
3226 3229 return name
3227 3230 return None
3228 3231
3229 3232 def _checktools(self):
3230 3233 """Ensure tools required to run tests are present."""
3231 3234 for p in self.REQUIREDTOOLS:
3232 3235 if os.name == 'nt' and not p.endswith(b'.exe'):
3233 3236 p += b'.exe'
3234 3237 found = self._findprogram(p)
3235 3238 if found:
3236 3239 vlog("# Found prerequisite", p, "at", found)
3237 3240 else:
3238 3241 print("WARNING: Did not find prerequisite tool: %s " %
3239 3242 p.decode("utf-8"))
3240 3243
3241 3244 def aggregateexceptions(path):
3242 3245 exceptioncounts = collections.Counter()
3243 3246 testsbyfailure = collections.defaultdict(set)
3244 3247 failuresbytest = collections.defaultdict(set)
3245 3248
3246 3249 for f in os.listdir(path):
3247 3250 with open(os.path.join(path, f), 'rb') as fh:
3248 3251 data = fh.read().split(b'\0')
3249 3252 if len(data) != 5:
3250 3253 continue
3251 3254
3252 3255 exc, mainframe, hgframe, hgline, testname = data
3253 3256 exc = exc.decode('utf-8')
3254 3257 mainframe = mainframe.decode('utf-8')
3255 3258 hgframe = hgframe.decode('utf-8')
3256 3259 hgline = hgline.decode('utf-8')
3257 3260 testname = testname.decode('utf-8')
3258 3261
3259 3262 key = (hgframe, hgline, exc)
3260 3263 exceptioncounts[key] += 1
3261 3264 testsbyfailure[key].add(testname)
3262 3265 failuresbytest[testname].add(key)
3263 3266
3264 3267 # Find test having fewest failures for each failure.
3265 3268 leastfailing = {}
3266 3269 for key, tests in testsbyfailure.items():
3267 3270 fewesttest = None
3268 3271 fewestcount = 99999999
3269 3272 for test in sorted(tests):
3270 3273 if len(failuresbytest[test]) < fewestcount:
3271 3274 fewesttest = test
3272 3275 fewestcount = len(failuresbytest[test])
3273 3276
3274 3277 leastfailing[key] = (fewestcount, fewesttest)
3275 3278
3276 3279 # Create a combined counter so we can sort by total occurrences and
3277 3280 # impacted tests.
3278 3281 combined = {}
3279 3282 for key in exceptioncounts:
3280 3283 combined[key] = (exceptioncounts[key],
3281 3284 len(testsbyfailure[key]),
3282 3285 leastfailing[key][0],
3283 3286 leastfailing[key][1])
3284 3287
3285 3288 return {
3286 3289 'exceptioncounts': exceptioncounts,
3287 3290 'total': sum(exceptioncounts.values()),
3288 3291 'combined': combined,
3289 3292 'leastfailing': leastfailing,
3290 3293 'byfailure': testsbyfailure,
3291 3294 'bytest': failuresbytest,
3292 3295 }
3293 3296
3294 3297 if __name__ == '__main__':
3295 3298 runner = TestRunner()
3296 3299
3297 3300 try:
3298 3301 import msvcrt
3299 3302 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3300 3303 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3301 3304 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3302 3305 except ImportError:
3303 3306 pass
3304 3307
3305 3308 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1938 +1,1938
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
10 10 running 0 tests using 0 parallel processes
11 11
12 12 # Ran 0 tests, 0 skipped, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27 running 0 tests using 0 parallel processes
28 28
29 29 # Ran 0 tests, 0 skipped, 0 failed.
30 30 $ rm hg
31 31 #endif
32 32
33 33 #if execbit
34 34 $ touch hg
35 35 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
36 36 usage: run-tests.py [options] [tests]
37 37 run-tests.py: error: --with-hg must specify an executable hg script
38 38 [2]
39 39 $ rm hg
40 40 #endif
41 41
42 42 Features for testing optional lines
43 43 ===================================
44 44
45 45 $ cat > hghaveaddon.py <<EOF
46 46 > import hghave
47 47 > @hghave.check("custom", "custom hghave feature")
48 48 > def has_custom():
49 49 > return True
50 50 > @hghave.check("missing", "missing hghave feature")
51 51 > def has_missing():
52 52 > return False
53 53 > EOF
54 54
55 55 an empty test
56 56 =======================
57 57
58 58 $ touch test-empty.t
59 59 $ rt
60 60 running 1 tests using 1 parallel processes
61 61 .
62 62 # Ran 1 tests, 0 skipped, 0 failed.
63 63 $ rm test-empty.t
64 64
65 65 a succesful test
66 66 =======================
67 67
68 68 $ cat > test-success.t << EOF
69 69 > $ echo babar
70 70 > babar
71 71 > $ echo xyzzy
72 72 > dont_print (?)
73 73 > nothing[42]line (re) (?)
74 74 > never*happens (glob) (?)
75 75 > more_nothing (?)
76 76 > xyzzy
77 77 > nor this (?)
78 78 > $ printf 'abc\ndef\nxyz\n'
79 79 > 123 (?)
80 80 > abc
81 81 > def (?)
82 82 > 456 (?)
83 83 > xyz
84 84 > $ printf 'zyx\nwvu\ntsr\n'
85 85 > abc (?)
86 86 > zyx (custom !)
87 87 > wvu
88 88 > no_print (no-custom !)
89 89 > tsr (no-missing !)
90 90 > missing (missing !)
91 91 > EOF
92 92
93 93 $ rt
94 94 running 1 tests using 1 parallel processes
95 95 .
96 96 # Ran 1 tests, 0 skipped, 0 failed.
97 97
98 98 failing test
99 99 ==================
100 100
101 101 test churn with globs
102 102 $ cat > test-failure.t <<EOF
103 103 > $ echo "bar-baz"; echo "bar-bad"; echo foo
104 104 > bar*bad (glob)
105 105 > bar*baz (glob)
106 106 > | fo (re)
107 107 > EOF
108 108 $ rt test-failure.t
109 109 running 1 tests using 1 parallel processes
110 110
111 111 --- $TESTTMP/test-failure.t
112 112 +++ $TESTTMP/test-failure.t.err
113 113 @@ -1,4 +1,4 @@
114 114 $ echo "bar-baz"; echo "bar-bad"; echo foo
115 115 + bar*baz (glob)
116 116 bar*bad (glob)
117 117 - bar*baz (glob)
118 118 - | fo (re)
119 119 + foo
120 120
121 121 ERROR: test-failure.t output changed
122 122 !
123 123 Failed test-failure.t: output changed
124 124 # Ran 1 tests, 0 skipped, 1 failed.
125 125 python hash seed: * (glob)
126 126 [1]
127 127
128 128 test how multiple globs gets matched with lines in output
129 129 $ cat > test-failure-globs.t <<EOF
130 130 > $ echo "context"; echo "context"; \
131 131 > echo "key: 1"; echo "value: not a"; \
132 132 > echo "key: 2"; echo "value: not b"; \
133 133 > echo "key: 3"; echo "value: c"; \
134 134 > echo "key: 4"; echo "value: d"
135 135 > context
136 136 > context
137 137 > key: 1
138 138 > value: a
139 139 > key: 2
140 140 > value: b
141 141 > key: 3
142 142 > value: * (glob)
143 143 > key: 4
144 144 > value: * (glob)
145 145 > EOF
146 146 $ rt test-failure-globs.t
147 147 running 1 tests using 1 parallel processes
148 148
149 149 --- $TESTTMP/test-failure-globs.t
150 150 +++ $TESTTMP/test-failure-globs.t.err
151 151 @@ -2,9 +2,9 @@
152 152 context
153 153 context
154 154 key: 1
155 155 - value: a
156 156 + value: not a
157 157 key: 2
158 158 - value: b
159 159 + value: not b
160 160 key: 3
161 161 value: * (glob)
162 162 key: 4
163 163
164 164 ERROR: test-failure-globs.t output changed
165 165 !
166 166 Failed test-failure-globs.t: output changed
167 167 # Ran 1 tests, 0 skipped, 1 failed.
168 168 python hash seed: * (glob)
169 169 [1]
170 170 $ rm test-failure-globs.t
171 171
172 172 test diff colorisation
173 173
174 174 #if no-windows pygments
175 175 $ rt test-failure.t --color always
176 176 running 1 tests using 1 parallel processes
177 177
178 178 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
179 179 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
180 180 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
181 181 $ echo "bar-baz"; echo "bar-bad"; echo foo
182 182 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
183 183 bar*bad (glob)
184 184 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
185 185 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
186 186 \x1b[38;5;34m+ foo\x1b[39m (esc)
187 187
188 188 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
189 189 !
190 190 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
191 191 # Ran 1 tests, 0 skipped, 1 failed.
192 192 python hash seed: * (glob)
193 193 [1]
194 194
195 195 $ rt test-failure.t 2> tmp.log
196 196 running 1 tests using 1 parallel processes
197 197 [1]
198 198 $ cat tmp.log
199 199
200 200 --- $TESTTMP/test-failure.t
201 201 +++ $TESTTMP/test-failure.t.err
202 202 @@ -1,4 +1,4 @@
203 203 $ echo "bar-baz"; echo "bar-bad"; echo foo
204 204 + bar*baz (glob)
205 205 bar*bad (glob)
206 206 - bar*baz (glob)
207 207 - | fo (re)
208 208 + foo
209 209
210 210 ERROR: test-failure.t output changed
211 211 !
212 212 Failed test-failure.t: output changed
213 213 # Ran 1 tests, 0 skipped, 1 failed.
214 214 python hash seed: * (glob)
215 215 #endif
216 216
217 217 $ cat > test-failure.t << EOF
218 218 > $ true
219 219 > should go away (true !)
220 220 > $ true
221 221 > should stay (false !)
222 222 >
223 223 > Should remove first line, not second or third
224 224 > $ echo 'testing'
225 225 > baz*foo (glob) (true !)
226 226 > foobar*foo (glob) (false !)
227 227 > te*ting (glob) (true !)
228 228 >
229 229 > Should keep first two lines, remove third and last
230 230 > $ echo 'testing'
231 231 > test.ng (re) (true !)
232 232 > foo.ar (re) (false !)
233 233 > b.r (re) (true !)
234 234 > missing (?)
235 235 > awol (true !)
236 236 >
237 237 > The "missing" line should stay, even though awol is dropped
238 238 > $ echo 'testing'
239 239 > test.ng (re) (true !)
240 240 > foo.ar (?)
241 241 > awol
242 242 > missing (?)
243 243 > EOF
244 244 $ rt test-failure.t
245 245 running 1 tests using 1 parallel processes
246 246
247 247 --- $TESTTMP/test-failure.t
248 248 +++ $TESTTMP/test-failure.t.err
249 249 @@ -1,11 +1,9 @@
250 250 $ true
251 251 - should go away (true !)
252 252 $ true
253 253 should stay (false !)
254 254
255 255 Should remove first line, not second or third
256 256 $ echo 'testing'
257 257 - baz*foo (glob) (true !)
258 258 foobar*foo (glob) (false !)
259 259 te*ting (glob) (true !)
260 260
261 261 foo.ar (re) (false !)
262 262 missing (?)
263 263 @@ -13,13 +11,10 @@
264 264 $ echo 'testing'
265 265 test.ng (re) (true !)
266 266 foo.ar (re) (false !)
267 267 - b.r (re) (true !)
268 268 missing (?)
269 269 - awol (true !)
270 270
271 271 The "missing" line should stay, even though awol is dropped
272 272 $ echo 'testing'
273 273 test.ng (re) (true !)
274 274 foo.ar (?)
275 275 - awol
276 276 missing (?)
277 277
278 278 ERROR: test-failure.t output changed
279 279 !
280 280 Failed test-failure.t: output changed
281 281 # Ran 1 tests, 0 skipped, 1 failed.
282 282 python hash seed: * (glob)
283 283 [1]
284 284
285 285 basic failing test
286 286 $ cat > test-failure.t << EOF
287 287 > $ echo babar
288 288 > rataxes
289 289 > This is a noop statement so that
290 290 > this test is still more bytes than success.
291 291 > pad pad pad pad............................................................
292 292 > pad pad pad pad............................................................
293 293 > pad pad pad pad............................................................
294 294 > pad pad pad pad............................................................
295 295 > pad pad pad pad............................................................
296 296 > pad pad pad pad............................................................
297 297 > EOF
298 298
299 299 >>> fh = open('test-failure-unicode.t', 'wb')
300 300 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
301 301 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
302 302
303 303 $ rt
304 304 running 3 tests using 1 parallel processes
305 305
306 306 --- $TESTTMP/test-failure.t
307 307 +++ $TESTTMP/test-failure.t.err
308 308 @@ -1,5 +1,5 @@
309 309 $ echo babar
310 310 - rataxes
311 311 + babar
312 312 This is a noop statement so that
313 313 this test is still more bytes than success.
314 314 pad pad pad pad............................................................
315 315
316 316 ERROR: test-failure.t output changed
317 317 !.
318 318 --- $TESTTMP/test-failure-unicode.t
319 319 +++ $TESTTMP/test-failure-unicode.t.err
320 320 @@ -1,2 +1,2 @@
321 321 $ echo babar\xce\xb1 (esc)
322 322 - l\xce\xb5\xce\xb5t (esc)
323 323 + babar\xce\xb1 (esc)
324 324
325 325 ERROR: test-failure-unicode.t output changed
326 326 !
327 Failed test-failure-unicode.t: output changed
327 328 Failed test-failure.t: output changed
328 Failed test-failure-unicode.t: output changed
329 329 # Ran 3 tests, 0 skipped, 2 failed.
330 330 python hash seed: * (glob)
331 331 [1]
332 332
333 333 test --outputdir
334 334 $ mkdir output
335 335 $ rt --outputdir output
336 336 running 3 tests using 1 parallel processes
337 337
338 338 --- $TESTTMP/test-failure.t
339 339 +++ $TESTTMP/output/test-failure.t.err
340 340 @@ -1,5 +1,5 @@
341 341 $ echo babar
342 342 - rataxes
343 343 + babar
344 344 This is a noop statement so that
345 345 this test is still more bytes than success.
346 346 pad pad pad pad............................................................
347 347
348 348 ERROR: test-failure.t output changed
349 349 !.
350 350 --- $TESTTMP/test-failure-unicode.t
351 351 +++ $TESTTMP/output/test-failure-unicode.t.err
352 352 @@ -1,2 +1,2 @@
353 353 $ echo babar\xce\xb1 (esc)
354 354 - l\xce\xb5\xce\xb5t (esc)
355 355 + babar\xce\xb1 (esc)
356 356
357 357 ERROR: test-failure-unicode.t output changed
358 358 !
359 Failed test-failure-unicode.t: output changed
359 360 Failed test-failure.t: output changed
360 Failed test-failure-unicode.t: output changed
361 361 # Ran 3 tests, 0 skipped, 2 failed.
362 362 python hash seed: * (glob)
363 363 [1]
364 364 $ ls -a output
365 365 .
366 366 ..
367 367 .testtimes
368 368 test-failure-unicode.t.err
369 369 test-failure.t.err
370 370
371 371 test --xunit support
372 372 $ rt --xunit=xunit.xml
373 373 running 3 tests using 1 parallel processes
374 374
375 375 --- $TESTTMP/test-failure.t
376 376 +++ $TESTTMP/test-failure.t.err
377 377 @@ -1,5 +1,5 @@
378 378 $ echo babar
379 379 - rataxes
380 380 + babar
381 381 This is a noop statement so that
382 382 this test is still more bytes than success.
383 383 pad pad pad pad............................................................
384 384
385 385 ERROR: test-failure.t output changed
386 386 !.
387 387 --- $TESTTMP/test-failure-unicode.t
388 388 +++ $TESTTMP/test-failure-unicode.t.err
389 389 @@ -1,2 +1,2 @@
390 390 $ echo babar\xce\xb1 (esc)
391 391 - l\xce\xb5\xce\xb5t (esc)
392 392 + babar\xce\xb1 (esc)
393 393
394 394 ERROR: test-failure-unicode.t output changed
395 395 !
396 Failed test-failure-unicode.t: output changed
396 397 Failed test-failure.t: output changed
397 Failed test-failure-unicode.t: output changed
398 398 # Ran 3 tests, 0 skipped, 2 failed.
399 399 python hash seed: * (glob)
400 400 [1]
401 401 $ cat xunit.xml
402 402 <?xml version="1.0" encoding="utf-8"?>
403 403 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
404 404 <testcase name="test-success.t" time="*"/> (glob)
405 405 <testcase name="test-failure-unicode.t" time="*"> (glob)
406 406 <failure message="output changed" type="output-mismatch">
407 407 <![CDATA[--- $TESTTMP/test-failure-unicode.t
408 408 +++ $TESTTMP/test-failure-unicode.t.err
409 409 @@ -1,2 +1,2 @@
410 410 $ echo babar\xce\xb1 (esc)
411 411 - l\xce\xb5\xce\xb5t (esc)
412 412 + babar\xce\xb1 (esc)
413 413 ]]> </failure>
414 414 </testcase>
415 415 <testcase name="test-failure.t" time="*"> (glob)
416 416 <failure message="output changed" type="output-mismatch">
417 417 <![CDATA[--- $TESTTMP/test-failure.t
418 418 +++ $TESTTMP/test-failure.t.err
419 419 @@ -1,5 +1,5 @@
420 420 $ echo babar
421 421 - rataxes
422 422 + babar
423 423 This is a noop statement so that
424 424 this test is still more bytes than success.
425 425 pad pad pad pad............................................................
426 426 ]]> </failure>
427 427 </testcase>
428 428 </testsuite>
429 429
430 430 $ cat .testtimes
431 431 test-empty.t * (glob)
432 432 test-failure-globs.t * (glob)
433 433 test-failure-unicode.t * (glob)
434 434 test-failure.t * (glob)
435 435 test-success.t * (glob)
436 436
437 437 $ rt --list-tests
438 438 test-failure-unicode.t
439 439 test-failure.t
440 440 test-success.t
441 441
442 442 $ rt --list-tests --json
443 443 test-failure-unicode.t
444 444 test-failure.t
445 445 test-success.t
446 446 $ cat report.json
447 447 testreport ={
448 448 "test-failure-unicode.t": {
449 449 "result": "success"
450 450 },
451 451 "test-failure.t": {
452 452 "result": "success"
453 453 },
454 454 "test-success.t": {
455 455 "result": "success"
456 456 }
457 457 } (no-eol)
458 458
459 459 $ rt --list-tests --xunit=xunit.xml
460 460 test-failure-unicode.t
461 461 test-failure.t
462 462 test-success.t
463 463 $ cat xunit.xml
464 464 <?xml version="1.0" encoding="utf-8"?>
465 465 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
466 466 <testcase name="test-failure-unicode.t"/>
467 467 <testcase name="test-failure.t"/>
468 468 <testcase name="test-success.t"/>
469 469 </testsuite>
470 470
471 471 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
472 472 test-failure-unicode.t
473 473 test-failure.t
474 474 $ cat output/report.json
475 475 testreport ={
476 476 "test-failure-unicode.t": {
477 477 "result": "success"
478 478 },
479 479 "test-failure.t": {
480 480 "result": "success"
481 481 }
482 482 } (no-eol)
483 483 $ cat xunit.xml
484 484 <?xml version="1.0" encoding="utf-8"?>
485 485 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
486 486 <testcase name="test-failure-unicode.t"/>
487 487 <testcase name="test-failure.t"/>
488 488 </testsuite>
489 489
490 490 $ rm test-failure-unicode.t
491 491
492 492 test for --retest
493 493 ====================
494 494
495 495 $ rt --retest
496 496 running 2 tests using 1 parallel processes
497 497
498 498 --- $TESTTMP/test-failure.t
499 499 +++ $TESTTMP/test-failure.t.err
500 500 @@ -1,5 +1,5 @@
501 501 $ echo babar
502 502 - rataxes
503 503 + babar
504 504 This is a noop statement so that
505 505 this test is still more bytes than success.
506 506 pad pad pad pad............................................................
507 507
508 508 ERROR: test-failure.t output changed
509 509 !
510 510 Failed test-failure.t: output changed
511 511 # Ran 2 tests, 1 skipped, 1 failed.
512 512 python hash seed: * (glob)
513 513 [1]
514 514
515 515 --retest works with --outputdir
516 516 $ rm -r output
517 517 $ mkdir output
518 518 $ mv test-failure.t.err output
519 519 $ rt --retest --outputdir output
520 520 running 2 tests using 1 parallel processes
521 521
522 522 --- $TESTTMP/test-failure.t
523 523 +++ $TESTTMP/output/test-failure.t.err
524 524 @@ -1,5 +1,5 @@
525 525 $ echo babar
526 526 - rataxes
527 527 + babar
528 528 This is a noop statement so that
529 529 this test is still more bytes than success.
530 530 pad pad pad pad............................................................
531 531
532 532 ERROR: test-failure.t output changed
533 533 !
534 534 Failed test-failure.t: output changed
535 535 # Ran 2 tests, 1 skipped, 1 failed.
536 536 python hash seed: * (glob)
537 537 [1]
538 538
539 539 Selecting Tests To Run
540 540 ======================
541 541
542 542 successful
543 543
544 544 $ rt test-success.t
545 545 running 1 tests using 1 parallel processes
546 546 .
547 547 # Ran 1 tests, 0 skipped, 0 failed.
548 548
549 549 success w/ keyword
550 550 $ rt -k xyzzy
551 551 running 2 tests using 1 parallel processes
552 552 .
553 553 # Ran 2 tests, 1 skipped, 0 failed.
554 554
555 555 failed
556 556
557 557 $ rt test-failure.t
558 558 running 1 tests using 1 parallel processes
559 559
560 560 --- $TESTTMP/test-failure.t
561 561 +++ $TESTTMP/test-failure.t.err
562 562 @@ -1,5 +1,5 @@
563 563 $ echo babar
564 564 - rataxes
565 565 + babar
566 566 This is a noop statement so that
567 567 this test is still more bytes than success.
568 568 pad pad pad pad............................................................
569 569
570 570 ERROR: test-failure.t output changed
571 571 !
572 572 Failed test-failure.t: output changed
573 573 # Ran 1 tests, 0 skipped, 1 failed.
574 574 python hash seed: * (glob)
575 575 [1]
576 576
577 577 failure w/ keyword
578 578 $ rt -k rataxes
579 579 running 2 tests using 1 parallel processes
580 580
581 581 --- $TESTTMP/test-failure.t
582 582 +++ $TESTTMP/test-failure.t.err
583 583 @@ -1,5 +1,5 @@
584 584 $ echo babar
585 585 - rataxes
586 586 + babar
587 587 This is a noop statement so that
588 588 this test is still more bytes than success.
589 589 pad pad pad pad............................................................
590 590
591 591 ERROR: test-failure.t output changed
592 592 !
593 593 Failed test-failure.t: output changed
594 594 # Ran 2 tests, 1 skipped, 1 failed.
595 595 python hash seed: * (glob)
596 596 [1]
597 597
598 598 Verify that when a process fails to start we show a useful message
599 599 ==================================================================
600 600
601 601 $ cat > test-serve-fail.t <<EOF
602 602 > $ echo 'abort: child process failed to start blah'
603 603 > EOF
604 604 $ rt test-serve-fail.t
605 605 running 1 tests using 1 parallel processes
606 606
607 607 --- $TESTTMP/test-serve-fail.t
608 608 +++ $TESTTMP/test-serve-fail.t.err
609 609 @@ -1* +1,2 @@ (glob)
610 610 $ echo 'abort: child process failed to start blah'
611 611 + abort: child process failed to start blah
612 612
613 613 ERROR: test-serve-fail.t output changed
614 614 !
615 615 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
616 616 # Ran 1 tests, 0 skipped, 1 failed.
617 617 python hash seed: * (glob)
618 618 [1]
619 619 $ rm test-serve-fail.t
620 620
621 621 Verify that we can try other ports
622 622 ===================================
623 623
624 624 Extensions aren't inherited by the invoked run-tests.py. An extension
625 625 introducing a repository requirement could cause this to fail. So we force
626 626 HGRCPATH to get a clean environment.
627 627
628 628 $ HGRCPATH= hg init inuse
629 629 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
630 630 $ cat blocks.pid >> $DAEMON_PIDS
631 631 $ cat > test-serve-inuse.t <<EOF
632 632 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
633 633 > $ cat hg.pid >> \$DAEMON_PIDS
634 634 > EOF
635 635 $ rt test-serve-inuse.t
636 636 running 1 tests using 1 parallel processes
637 637 .
638 638 # Ran 1 tests, 0 skipped, 0 failed.
639 639 $ rm test-serve-inuse.t
640 640 $ killdaemons.py $DAEMON_PIDS
641 641
642 642 Running In Debug Mode
643 643 ======================
644 644
645 645 $ rt --debug 2>&1 | grep -v pwd
646 646 running 2 tests using 1 parallel processes
647 647 + alias hg=hg.exe (windows !)
648 648 + echo *SALT* 0 0 (glob)
649 649 *SALT* 0 0 (glob)
650 650 + echo babar
651 651 babar
652 652 + echo *SALT* 10 0 (glob)
653 653 *SALT* 10 0 (glob)
654 654 .+ alias hg=hg.exe (windows !)
655 655 *+ echo *SALT* 0 0 (glob)
656 656 *SALT* 0 0 (glob)
657 657 + echo babar
658 658 babar
659 659 + echo *SALT* 2 0 (glob)
660 660 *SALT* 2 0 (glob)
661 661 + echo xyzzy
662 662 xyzzy
663 663 + echo *SALT* 9 0 (glob)
664 664 *SALT* 9 0 (glob)
665 665 + printf *abc\ndef\nxyz\n* (glob)
666 666 abc
667 667 def
668 668 xyz
669 669 + echo *SALT* 15 0 (glob)
670 670 *SALT* 15 0 (glob)
671 671 + printf *zyx\nwvu\ntsr\n* (glob)
672 672 zyx
673 673 wvu
674 674 tsr
675 675 + echo *SALT* 22 0 (glob)
676 676 *SALT* 22 0 (glob)
677 677 .
678 678 # Ran 2 tests, 0 skipped, 0 failed.
679 679
680 680 Parallel runs
681 681 ==============
682 682
683 683 (duplicate the failing test to get predictable output)
684 684 $ cp test-failure.t test-failure-copy.t
685 685
686 686 $ rt --jobs 2 test-failure*.t -n
687 687 running 2 tests using 2 parallel processes
688 688 !!
689 689 Failed test-failure*.t: output changed (glob)
690 690 Failed test-failure*.t: output changed (glob)
691 691 # Ran 2 tests, 0 skipped, 2 failed.
692 692 python hash seed: * (glob)
693 693 [1]
694 694
695 695 failures in parallel with --first should only print one failure
696 696 $ rt --jobs 2 --first test-failure*.t
697 697 running 2 tests using 2 parallel processes
698 698
699 699 --- $TESTTMP/test-failure*.t (glob)
700 700 +++ $TESTTMP/test-failure*.t.err (glob)
701 701 @@ -1,5 +1,5 @@
702 702 $ echo babar
703 703 - rataxes
704 704 + babar
705 705 This is a noop statement so that
706 706 this test is still more bytes than success.
707 707 pad pad pad pad............................................................
708 708
709 709 Failed test-failure*.t: output changed (glob)
710 710 Failed test-failure*.t: output changed (glob)
711 711 # Ran 2 tests, 0 skipped, 2 failed.
712 712 python hash seed: * (glob)
713 713 [1]
714 714
715 715
716 716 (delete the duplicated test file)
717 717 $ rm test-failure-copy.t
718 718
719 719 multiple runs per test should be parallelized
720 720
721 721 $ rt --jobs 2 --runs-per-test 2 test-success.t
722 722 running 2 tests using 2 parallel processes
723 723 ..
724 724 # Ran 2 tests, 0 skipped, 0 failed.
725 725
726 726 Interactive run
727 727 ===============
728 728
729 729 (backup the failing test)
730 730 $ cp test-failure.t backup
731 731
732 732 Refuse the fix
733 733
734 734 $ echo 'n' | rt -i
735 735 running 2 tests using 1 parallel processes
736 736
737 737 --- $TESTTMP/test-failure.t
738 738 +++ $TESTTMP/test-failure.t.err
739 739 @@ -1,5 +1,5 @@
740 740 $ echo babar
741 741 - rataxes
742 742 + babar
743 743 This is a noop statement so that
744 744 this test is still more bytes than success.
745 745 pad pad pad pad............................................................
746 746 Accept this change? [n]
747 747 ERROR: test-failure.t output changed
748 748 !.
749 749 Failed test-failure.t: output changed
750 750 # Ran 2 tests, 0 skipped, 1 failed.
751 751 python hash seed: * (glob)
752 752 [1]
753 753
754 754 $ cat test-failure.t
755 755 $ echo babar
756 756 rataxes
757 757 This is a noop statement so that
758 758 this test is still more bytes than success.
759 759 pad pad pad pad............................................................
760 760 pad pad pad pad............................................................
761 761 pad pad pad pad............................................................
762 762 pad pad pad pad............................................................
763 763 pad pad pad pad............................................................
764 764 pad pad pad pad............................................................
765 765
766 766 Interactive with custom view
767 767
768 768 $ echo 'n' | rt -i --view echo
769 769 running 2 tests using 1 parallel processes
770 770 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
771 771 Accept this change? [n]* (glob)
772 772 ERROR: test-failure.t output changed
773 773 !.
774 774 Failed test-failure.t: output changed
775 775 # Ran 2 tests, 0 skipped, 1 failed.
776 776 python hash seed: * (glob)
777 777 [1]
778 778
779 779 View the fix
780 780
781 781 $ echo 'y' | rt --view echo
782 782 running 2 tests using 1 parallel processes
783 783 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
784 784
785 785 ERROR: test-failure.t output changed
786 786 !.
787 787 Failed test-failure.t: output changed
788 788 # Ran 2 tests, 0 skipped, 1 failed.
789 789 python hash seed: * (glob)
790 790 [1]
791 791
792 792 Accept the fix
793 793
794 794 $ cat >> test-failure.t <<EOF
795 795 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
796 796 > saved backup bundle to \$TESTTMP/foo.hg
797 797 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
798 798 > saved backup bundle to $TESTTMP\\foo.hg
799 799 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
800 800 > saved backup bundle to \$TESTTMP/*.hg (glob)
801 801 > EOF
802 802 $ echo 'y' | rt -i 2>&1
803 803 running 2 tests using 1 parallel processes
804 804
805 805 --- $TESTTMP/test-failure.t
806 806 +++ $TESTTMP/test-failure.t.err
807 807 @@ -1,5 +1,5 @@
808 808 $ echo babar
809 809 - rataxes
810 810 + babar
811 811 This is a noop statement so that
812 812 this test is still more bytes than success.
813 813 pad pad pad pad............................................................
814 814 @@ -11,6 +11,6 @@
815 815 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
816 816 saved backup bundle to $TESTTMP/foo.hg
817 817 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
818 818 - saved backup bundle to $TESTTMP\foo.hg
819 819 + saved backup bundle to $TESTTMP/foo.hg
820 820 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
821 821 saved backup bundle to $TESTTMP/*.hg (glob)
822 822 Accept this change? [n] ..
823 823 # Ran 2 tests, 0 skipped, 0 failed.
824 824
825 825 $ sed -e 's,(glob)$,&<,g' test-failure.t
826 826 $ echo babar
827 827 babar
828 828 This is a noop statement so that
829 829 this test is still more bytes than success.
830 830 pad pad pad pad............................................................
831 831 pad pad pad pad............................................................
832 832 pad pad pad pad............................................................
833 833 pad pad pad pad............................................................
834 834 pad pad pad pad............................................................
835 835 pad pad pad pad............................................................
836 836 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
837 837 saved backup bundle to $TESTTMP/foo.hg
838 838 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
839 839 saved backup bundle to $TESTTMP/foo.hg
840 840 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
841 841 saved backup bundle to $TESTTMP/*.hg (glob)<
842 842
843 843 Race condition - test file was modified when test is running
844 844
845 845 $ TESTRACEDIR=`pwd`
846 846 $ export TESTRACEDIR
847 847 $ cat > test-race.t <<EOF
848 848 > $ echo 1
849 849 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
850 850 > EOF
851 851
852 852 $ rt -i test-race.t
853 853 running 1 tests using 1 parallel processes
854 854
855 855 --- $TESTTMP/test-race.t
856 856 +++ $TESTTMP/test-race.t.err
857 857 @@ -1,2 +1,3 @@
858 858 $ echo 1
859 859 + 1
860 860 $ echo "# a new line" >> $TESTTMP/test-race.t
861 861 Reference output has changed (run again to prompt changes)
862 862 ERROR: test-race.t output changed
863 863 !
864 864 Failed test-race.t: output changed
865 865 # Ran 1 tests, 0 skipped, 1 failed.
866 866 python hash seed: * (glob)
867 867 [1]
868 868
869 869 $ rm test-race.t
870 870
871 871 When "#testcases" is used in .t files
872 872
873 873 $ cat >> test-cases.t <<EOF
874 874 > #testcases a b
875 875 > #if a
876 876 > $ echo 1
877 877 > #endif
878 878 > #if b
879 879 > $ echo 2
880 880 > #endif
881 881 > EOF
882 882
883 883 $ cat <<EOF | rt -i test-cases.t 2>&1
884 884 > y
885 885 > y
886 886 > EOF
887 887 running 2 tests using 1 parallel processes
888 888
889 889 --- $TESTTMP/test-cases.t
890 890 +++ $TESTTMP/test-cases.t#a.err
891 891 @@ -1,6 +1,7 @@
892 892 #testcases a b
893 893 #if a
894 894 $ echo 1
895 895 + 1
896 896 #endif
897 897 #if b
898 898 $ echo 2
899 899 Accept this change? [n] .
900 900 --- $TESTTMP/test-cases.t
901 901 +++ $TESTTMP/test-cases.t#b.err
902 902 @@ -5,4 +5,5 @@
903 903 #endif
904 904 #if b
905 905 $ echo 2
906 906 + 2
907 907 #endif
908 908 Accept this change? [n] .
909 909 # Ran 2 tests, 0 skipped, 0 failed.
910 910
911 911 $ cat test-cases.t
912 912 #testcases a b
913 913 #if a
914 914 $ echo 1
915 915 1
916 916 #endif
917 917 #if b
918 918 $ echo 2
919 919 2
920 920 #endif
921 921
922 922 $ cat >> test-cases.t <<'EOF'
923 923 > #if a
924 924 > $ NAME=A
925 925 > #else
926 926 > $ NAME=B
927 927 > #endif
928 928 > $ echo $NAME
929 929 > A (a !)
930 930 > B (b !)
931 931 > EOF
932 932 $ rt test-cases.t
933 933 running 2 tests using 1 parallel processes
934 934 ..
935 935 # Ran 2 tests, 0 skipped, 0 failed.
936 936
937 937 When using multiple dimensions of "#testcases" in .t files
938 938
939 939 $ cat > test-cases.t <<'EOF'
940 940 > #testcases a b
941 941 > #testcases c d
942 942 > #if a d
943 943 > $ echo $TESTCASE
944 944 > a#d
945 945 > #endif
946 946 > #if b c
947 947 > $ echo yes
948 948 > no
949 949 > #endif
950 950 > EOF
951 951 $ rt test-cases.t
952 952 running 4 tests using 1 parallel processes
953 953 ..
954 954 --- $TESTTMP/test-cases.t
955 955 +++ $TESTTMP/test-cases.t#b#c.err
956 956 @@ -6,5 +6,5 @@
957 957 #endif
958 958 #if b c
959 959 $ echo yes
960 960 - no
961 961 + yes
962 962 #endif
963 963
964 964 ERROR: test-cases.t#b#c output changed
965 965 !.
966 966 Failed test-cases.t#b#c: output changed
967 967 # Ran 4 tests, 0 skipped, 1 failed.
968 968 python hash seed: * (glob)
969 969 [1]
970 970
971 971 $ rm test-cases.t#b#c.err
972 972 $ rm test-cases.t
973 973
974 974 (reinstall)
975 975 $ mv backup test-failure.t
976 976
977 977 No Diff
978 978 ===============
979 979
980 980 $ rt --nodiff
981 981 running 2 tests using 1 parallel processes
982 982 !.
983 983 Failed test-failure.t: output changed
984 984 # Ran 2 tests, 0 skipped, 1 failed.
985 985 python hash seed: * (glob)
986 986 [1]
987 987
988 988 test --tmpdir support
989 989 $ rt --tmpdir=$TESTTMP/keep test-success.t
990 990 running 1 tests using 1 parallel processes
991 991
992 992 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
993 993 Keeping threadtmp dir: $TESTTMP/keep/child1
994 994 .
995 995 # Ran 1 tests, 0 skipped, 0 failed.
996 996
997 997 timeouts
998 998 ========
999 999 $ cat > test-timeout.t <<EOF
1000 1000 > $ sleep 2
1001 1001 > $ echo pass
1002 1002 > pass
1003 1003 > EOF
1004 1004 > echo '#require slow' > test-slow-timeout.t
1005 1005 > cat test-timeout.t >> test-slow-timeout.t
1006 1006 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
1007 1007 running 2 tests using 1 parallel processes
1008 1008 st
1009 1009 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
1010 1010 Failed test-timeout.t: timed out
1011 1011 # Ran 1 tests, 1 skipped, 1 failed.
1012 1012 python hash seed: * (glob)
1013 1013 [1]
1014 1014 $ rt --timeout=1 --slowtimeout=3 \
1015 1015 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1016 1016 running 2 tests using 1 parallel processes
1017 1017 .t
1018 1018 Failed test-timeout.t: timed out
1019 1019 # Ran 2 tests, 0 skipped, 1 failed.
1020 1020 python hash seed: * (glob)
1021 1021 [1]
1022 1022 $ rm test-timeout.t test-slow-timeout.t
1023 1023
1024 1024 test for --time
1025 1025 ==================
1026 1026
1027 1027 $ rt test-success.t --time
1028 1028 running 1 tests using 1 parallel processes
1029 1029 .
1030 1030 # Ran 1 tests, 0 skipped, 0 failed.
1031 1031 # Producing time report
1032 1032 start end cuser csys real Test
1033 1033 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1034 1034
1035 1035 test for --time with --job enabled
1036 1036 ====================================
1037 1037
1038 1038 $ rt test-success.t --time --jobs 2
1039 1039 running 1 tests using 1 parallel processes
1040 1040 .
1041 1041 # Ran 1 tests, 0 skipped, 0 failed.
1042 1042 # Producing time report
1043 1043 start end cuser csys real Test
1044 1044 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1045 1045
1046 1046 Skips
1047 1047 ================
1048 1048 $ cat > test-skip.t <<EOF
1049 1049 > $ echo xyzzy
1050 1050 > #if true
1051 1051 > #require false
1052 1052 > #end
1053 1053 > EOF
1054 1054 $ cat > test-noskip.t <<EOF
1055 1055 > #if false
1056 1056 > #require false
1057 1057 > #endif
1058 1058 > EOF
1059 1059 $ rt --nodiff
1060 1060 running 4 tests using 1 parallel processes
1061 1061 !.s.
1062 1062 Skipped test-skip.t: missing feature: nail clipper
1063 1063 Failed test-failure.t: output changed
1064 1064 # Ran 3 tests, 1 skipped, 1 failed.
1065 1065 python hash seed: * (glob)
1066 1066 [1]
1067 1067
1068 1068 $ rm test-noskip.t
1069 1069 $ rt --keyword xyzzy
1070 1070 running 3 tests using 1 parallel processes
1071 1071 .s
1072 1072 Skipped test-skip.t: missing feature: nail clipper
1073 1073 # Ran 2 tests, 2 skipped, 0 failed.
1074 1074
1075 1075 Skips with xml
1076 1076 $ rt --keyword xyzzy \
1077 1077 > --xunit=xunit.xml
1078 1078 running 3 tests using 1 parallel processes
1079 1079 .s
1080 1080 Skipped test-skip.t: missing feature: nail clipper
1081 1081 # Ran 2 tests, 2 skipped, 0 failed.
1082 1082 $ cat xunit.xml
1083 1083 <?xml version="1.0" encoding="utf-8"?>
1084 1084 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1085 1085 <testcase name="test-success.t" time="*"/> (glob)
1086 1086 <testcase name="test-skip.t">
1087 1087 <skipped>
1088 1088 <![CDATA[missing feature: nail clipper]]> </skipped>
1089 1089 </testcase>
1090 1090 </testsuite>
1091 1091
1092 1092 Missing skips or blacklisted skips don't count as executed:
1093 1093 $ echo test-failure.t > blacklist
1094 1094 $ rt --blacklist=blacklist --json\
1095 1095 > test-failure.t test-bogus.t
1096 1096 running 2 tests using 1 parallel processes
1097 1097 ss
1098 1098 Skipped test-bogus.t: Doesn't exist
1099 1099 Skipped test-failure.t: blacklisted
1100 1100 # Ran 0 tests, 2 skipped, 0 failed.
1101 1101 $ cat report.json
1102 1102 testreport ={
1103 1103 "test-bogus.t": {
1104 1104 "result": "skip"
1105 1105 },
1106 1106 "test-failure.t": {
1107 1107 "result": "skip"
1108 1108 }
1109 1109 } (no-eol)
1110 1110
1111 1111 Whitelist trumps blacklist
1112 1112 $ echo test-failure.t > whitelist
1113 1113 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1114 1114 > test-failure.t test-bogus.t
1115 1115 running 2 tests using 1 parallel processes
1116 1116 s
1117 1117 --- $TESTTMP/test-failure.t
1118 1118 +++ $TESTTMP/test-failure.t.err
1119 1119 @@ -1,5 +1,5 @@
1120 1120 $ echo babar
1121 1121 - rataxes
1122 1122 + babar
1123 1123 This is a noop statement so that
1124 1124 this test is still more bytes than success.
1125 1125 pad pad pad pad............................................................
1126 1126
1127 1127 ERROR: test-failure.t output changed
1128 1128 !
1129 1129 Skipped test-bogus.t: Doesn't exist
1130 1130 Failed test-failure.t: output changed
1131 1131 # Ran 1 tests, 1 skipped, 1 failed.
1132 1132 python hash seed: * (glob)
1133 1133 [1]
1134 1134
1135 1135 Ensure that --test-list causes only the tests listed in that file to
1136 1136 be executed.
1137 1137 $ echo test-success.t >> onlytest
1138 1138 $ rt --test-list=onlytest
1139 1139 running 1 tests using 1 parallel processes
1140 1140 .
1141 1141 # Ran 1 tests, 0 skipped, 0 failed.
1142 1142 $ echo test-bogus.t >> anothertest
1143 1143 $ rt --test-list=onlytest --test-list=anothertest
1144 1144 running 2 tests using 1 parallel processes
1145 1145 s.
1146 1146 Skipped test-bogus.t: Doesn't exist
1147 1147 # Ran 1 tests, 1 skipped, 0 failed.
1148 1148 $ rm onlytest anothertest
1149 1149
1150 1150 test for --json
1151 1151 ==================
1152 1152
1153 1153 $ rt --json
1154 1154 running 3 tests using 1 parallel processes
1155 1155
1156 1156 --- $TESTTMP/test-failure.t
1157 1157 +++ $TESTTMP/test-failure.t.err
1158 1158 @@ -1,5 +1,5 @@
1159 1159 $ echo babar
1160 1160 - rataxes
1161 1161 + babar
1162 1162 This is a noop statement so that
1163 1163 this test is still more bytes than success.
1164 1164 pad pad pad pad............................................................
1165 1165
1166 1166 ERROR: test-failure.t output changed
1167 1167 !.s
1168 1168 Skipped test-skip.t: missing feature: nail clipper
1169 1169 Failed test-failure.t: output changed
1170 1170 # Ran 2 tests, 1 skipped, 1 failed.
1171 1171 python hash seed: * (glob)
1172 1172 [1]
1173 1173
1174 1174 $ cat report.json
1175 1175 testreport ={
1176 1176 "test-failure.t": [\{] (re)
1177 1177 "csys": "\s*[\d\.]{4,5}", ? (re)
1178 1178 "cuser": "\s*[\d\.]{4,5}", ? (re)
1179 1179 "diff": "---.+\+\+\+.+", ? (re)
1180 1180 "end": "\s*[\d\.]{4,5}", ? (re)
1181 1181 "result": "failure", ? (re)
1182 1182 "start": "\s*[\d\.]{4,5}", ? (re)
1183 1183 "time": "\s*[\d\.]{4,5}" (re)
1184 1184 }, ? (re)
1185 1185 "test-skip.t": {
1186 1186 "csys": "\s*[\d\.]{4,5}", ? (re)
1187 1187 "cuser": "\s*[\d\.]{4,5}", ? (re)
1188 1188 "diff": "", ? (re)
1189 1189 "end": "\s*[\d\.]{4,5}", ? (re)
1190 1190 "result": "skip", ? (re)
1191 1191 "start": "\s*[\d\.]{4,5}", ? (re)
1192 1192 "time": "\s*[\d\.]{4,5}" (re)
1193 1193 }, ? (re)
1194 1194 "test-success.t": [\{] (re)
1195 1195 "csys": "\s*[\d\.]{4,5}", ? (re)
1196 1196 "cuser": "\s*[\d\.]{4,5}", ? (re)
1197 1197 "diff": "", ? (re)
1198 1198 "end": "\s*[\d\.]{4,5}", ? (re)
1199 1199 "result": "success", ? (re)
1200 1200 "start": "\s*[\d\.]{4,5}", ? (re)
1201 1201 "time": "\s*[\d\.]{4,5}" (re)
1202 1202 }
1203 1203 } (no-eol)
1204 1204 --json with --outputdir
1205 1205
1206 1206 $ rm report.json
1207 1207 $ rm -r output
1208 1208 $ mkdir output
1209 1209 $ rt --json --outputdir output
1210 1210 running 3 tests using 1 parallel processes
1211 1211
1212 1212 --- $TESTTMP/test-failure.t
1213 1213 +++ $TESTTMP/output/test-failure.t.err
1214 1214 @@ -1,5 +1,5 @@
1215 1215 $ echo babar
1216 1216 - rataxes
1217 1217 + babar
1218 1218 This is a noop statement so that
1219 1219 this test is still more bytes than success.
1220 1220 pad pad pad pad............................................................
1221 1221
1222 1222 ERROR: test-failure.t output changed
1223 1223 !.s
1224 1224 Skipped test-skip.t: missing feature: nail clipper
1225 1225 Failed test-failure.t: output changed
1226 1226 # Ran 2 tests, 1 skipped, 1 failed.
1227 1227 python hash seed: * (glob)
1228 1228 [1]
1229 1229 $ f report.json
1230 1230 report.json: file not found
1231 1231 $ cat output/report.json
1232 1232 testreport ={
1233 1233 "test-failure.t": [\{] (re)
1234 1234 "csys": "\s*[\d\.]{4,5}", ? (re)
1235 1235 "cuser": "\s*[\d\.]{4,5}", ? (re)
1236 1236 "diff": "---.+\+\+\+.+", ? (re)
1237 1237 "end": "\s*[\d\.]{4,5}", ? (re)
1238 1238 "result": "failure", ? (re)
1239 1239 "start": "\s*[\d\.]{4,5}", ? (re)
1240 1240 "time": "\s*[\d\.]{4,5}" (re)
1241 1241 }, ? (re)
1242 1242 "test-skip.t": {
1243 1243 "csys": "\s*[\d\.]{4,5}", ? (re)
1244 1244 "cuser": "\s*[\d\.]{4,5}", ? (re)
1245 1245 "diff": "", ? (re)
1246 1246 "end": "\s*[\d\.]{4,5}", ? (re)
1247 1247 "result": "skip", ? (re)
1248 1248 "start": "\s*[\d\.]{4,5}", ? (re)
1249 1249 "time": "\s*[\d\.]{4,5}" (re)
1250 1250 }, ? (re)
1251 1251 "test-success.t": [\{] (re)
1252 1252 "csys": "\s*[\d\.]{4,5}", ? (re)
1253 1253 "cuser": "\s*[\d\.]{4,5}", ? (re)
1254 1254 "diff": "", ? (re)
1255 1255 "end": "\s*[\d\.]{4,5}", ? (re)
1256 1256 "result": "success", ? (re)
1257 1257 "start": "\s*[\d\.]{4,5}", ? (re)
1258 1258 "time": "\s*[\d\.]{4,5}" (re)
1259 1259 }
1260 1260 } (no-eol)
1261 1261 $ ls -a output
1262 1262 .
1263 1263 ..
1264 1264 .testtimes
1265 1265 report.json
1266 1266 test-failure.t.err
1267 1267
1268 1268 Test that failed test accepted through interactive are properly reported:
1269 1269
1270 1270 $ cp test-failure.t backup
1271 1271 $ echo y | rt --json -i
1272 1272 running 3 tests using 1 parallel processes
1273 1273
1274 1274 --- $TESTTMP/test-failure.t
1275 1275 +++ $TESTTMP/test-failure.t.err
1276 1276 @@ -1,5 +1,5 @@
1277 1277 $ echo babar
1278 1278 - rataxes
1279 1279 + babar
1280 1280 This is a noop statement so that
1281 1281 this test is still more bytes than success.
1282 1282 pad pad pad pad............................................................
1283 1283 Accept this change? [n] ..s
1284 1284 Skipped test-skip.t: missing feature: nail clipper
1285 1285 # Ran 2 tests, 1 skipped, 0 failed.
1286 1286
1287 1287 $ cat report.json
1288 1288 testreport ={
1289 1289 "test-failure.t": [\{] (re)
1290 1290 "csys": "\s*[\d\.]{4,5}", ? (re)
1291 1291 "cuser": "\s*[\d\.]{4,5}", ? (re)
1292 1292 "diff": "", ? (re)
1293 1293 "end": "\s*[\d\.]{4,5}", ? (re)
1294 1294 "result": "success", ? (re)
1295 1295 "start": "\s*[\d\.]{4,5}", ? (re)
1296 1296 "time": "\s*[\d\.]{4,5}" (re)
1297 1297 }, ? (re)
1298 1298 "test-skip.t": {
1299 1299 "csys": "\s*[\d\.]{4,5}", ? (re)
1300 1300 "cuser": "\s*[\d\.]{4,5}", ? (re)
1301 1301 "diff": "", ? (re)
1302 1302 "end": "\s*[\d\.]{4,5}", ? (re)
1303 1303 "result": "skip", ? (re)
1304 1304 "start": "\s*[\d\.]{4,5}", ? (re)
1305 1305 "time": "\s*[\d\.]{4,5}" (re)
1306 1306 }, ? (re)
1307 1307 "test-success.t": [\{] (re)
1308 1308 "csys": "\s*[\d\.]{4,5}", ? (re)
1309 1309 "cuser": "\s*[\d\.]{4,5}", ? (re)
1310 1310 "diff": "", ? (re)
1311 1311 "end": "\s*[\d\.]{4,5}", ? (re)
1312 1312 "result": "success", ? (re)
1313 1313 "start": "\s*[\d\.]{4,5}", ? (re)
1314 1314 "time": "\s*[\d\.]{4,5}" (re)
1315 1315 }
1316 1316 } (no-eol)
1317 1317 $ mv backup test-failure.t
1318 1318
1319 1319 backslash on end of line with glob matching is handled properly
1320 1320
1321 1321 $ cat > test-glob-backslash.t << EOF
1322 1322 > $ echo 'foo bar \\'
1323 1323 > foo * \ (glob)
1324 1324 > EOF
1325 1325
1326 1326 $ rt test-glob-backslash.t
1327 1327 running 1 tests using 1 parallel processes
1328 1328 .
1329 1329 # Ran 1 tests, 0 skipped, 0 failed.
1330 1330
1331 1331 $ rm -f test-glob-backslash.t
1332 1332
1333 1333 Test globbing of local IP addresses
1334 1334 $ echo 172.16.18.1
1335 1335 $LOCALIP (glob)
1336 1336 $ echo dead:beef::1
1337 1337 $LOCALIP (glob)
1338 1338
1339 1339 Add support for external test formatter
1340 1340 =======================================
1341 1341
1342 1342 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@" test-success.t test-failure.t
1343 1343 running 2 tests using 1 parallel processes
1344 1344
1345 1345 # Ran 2 tests, 0 skipped, 0 failed.
1346 1346 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1347 1347 FAILURE! test-failure.t output changed
1348 1348 SUCCESS! test-success.t
1349 1349 ON_END!
1350 1350
1351 1351 Test reusability for third party tools
1352 1352 ======================================
1353 1353
1354 1354 $ mkdir "$TESTTMP"/anothertests
1355 1355 $ cd "$TESTTMP"/anothertests
1356 1356
1357 1357 test that `run-tests.py` can execute hghave, even if it runs not in
1358 1358 Mercurial source tree.
1359 1359
1360 1360 $ cat > test-hghave.t <<EOF
1361 1361 > #require true
1362 1362 > $ echo foo
1363 1363 > foo
1364 1364 > EOF
1365 1365 $ rt test-hghave.t
1366 1366 running 1 tests using 1 parallel processes
1367 1367 .
1368 1368 # Ran 1 tests, 0 skipped, 0 failed.
1369 1369
1370 1370 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1371 1371 running is placed.
1372 1372
1373 1373 $ cat > test-runtestdir.t <<EOF
1374 1374 > - $TESTDIR, in which test-run-tests.t is placed
1375 1375 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1376 1376 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1377 1377 >
1378 1378 > #if windows
1379 1379 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1380 1380 > #else
1381 1381 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1382 1382 > #endif
1383 1383 > If this prints a path, that means RUNTESTDIR didn't equal
1384 1384 > TESTDIR as it should have.
1385 1385 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1386 1386 > This should print the start of check-code. If this passes but the
1387 1387 > previous check failed, that means we found a copy of check-code at whatever
1388 1388 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1389 1389 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1390 1390 > #!USRBINENVPY
1391 1391 > #
1392 1392 > # check-code - a style and portability checker for Mercurial
1393 1393 > EOF
1394 1394 $ rt test-runtestdir.t
1395 1395 running 1 tests using 1 parallel processes
1396 1396 .
1397 1397 # Ran 1 tests, 0 skipped, 0 failed.
1398 1398
1399 1399 #if execbit
1400 1400
1401 1401 test that TESTDIR is referred in PATH
1402 1402
1403 1403 $ cat > custom-command.sh <<EOF
1404 1404 > #!/bin/sh
1405 1405 > echo "hello world"
1406 1406 > EOF
1407 1407 $ chmod +x custom-command.sh
1408 1408 $ cat > test-testdir-path.t <<EOF
1409 1409 > $ custom-command.sh
1410 1410 > hello world
1411 1411 > EOF
1412 1412 $ rt test-testdir-path.t
1413 1413 running 1 tests using 1 parallel processes
1414 1414 .
1415 1415 # Ran 1 tests, 0 skipped, 0 failed.
1416 1416
1417 1417 #endif
1418 1418
1419 1419 test support for --allow-slow-tests
1420 1420 $ cat > test-very-slow-test.t <<EOF
1421 1421 > #require slow
1422 1422 > $ echo pass
1423 1423 > pass
1424 1424 > EOF
1425 1425 $ rt test-very-slow-test.t
1426 1426 running 1 tests using 1 parallel processes
1427 1427 s
1428 1428 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1429 1429 # Ran 0 tests, 1 skipped, 0 failed.
1430 1430 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1431 1431 running 1 tests using 1 parallel processes
1432 1432 .
1433 1433 # Ran 1 tests, 0 skipped, 0 failed.
1434 1434
1435 1435 support for running a test outside the current directory
1436 1436 $ mkdir nonlocal
1437 1437 $ cat > nonlocal/test-is-not-here.t << EOF
1438 1438 > $ echo pass
1439 1439 > pass
1440 1440 > EOF
1441 1441 $ rt nonlocal/test-is-not-here.t
1442 1442 running 1 tests using 1 parallel processes
1443 1443 .
1444 1444 # Ran 1 tests, 0 skipped, 0 failed.
1445 1445
1446 1446 support for automatically discovering test if arg is a folder
1447 1447 $ mkdir tmp && cd tmp
1448 1448
1449 1449 $ cat > test-uno.t << EOF
1450 1450 > $ echo line
1451 1451 > line
1452 1452 > EOF
1453 1453
1454 1454 $ cp test-uno.t test-dos.t
1455 1455 $ cd ..
1456 1456 $ cp -R tmp tmpp
1457 1457 $ cp tmp/test-uno.t test-solo.t
1458 1458
1459 1459 $ rt tmp/ test-solo.t tmpp
1460 1460 running 5 tests using 1 parallel processes
1461 1461 .....
1462 1462 # Ran 5 tests, 0 skipped, 0 failed.
1463 1463 $ rm -rf tmp tmpp
1464 1464
1465 1465 support for running run-tests.py from another directory
1466 1466 $ mkdir tmp && cd tmp
1467 1467
1468 1468 $ cat > useful-file.sh << EOF
1469 1469 > important command
1470 1470 > EOF
1471 1471
1472 1472 $ cat > test-folder.t << EOF
1473 1473 > $ cat \$TESTDIR/useful-file.sh
1474 1474 > important command
1475 1475 > EOF
1476 1476
1477 1477 $ cat > test-folder-fail.t << EOF
1478 1478 > $ cat \$TESTDIR/useful-file.sh
1479 1479 > important commando
1480 1480 > EOF
1481 1481
1482 1482 $ cd ..
1483 1483 $ rt tmp/test-*.t
1484 1484 running 2 tests using 1 parallel processes
1485 1485
1486 1486 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1487 1487 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1488 1488 @@ -1,2 +1,2 @@
1489 1489 $ cat $TESTDIR/useful-file.sh
1490 1490 - important commando
1491 1491 + important command
1492 1492
1493 1493 ERROR: test-folder-fail.t output changed
1494 1494 !.
1495 1495 Failed test-folder-fail.t: output changed
1496 1496 # Ran 2 tests, 0 skipped, 1 failed.
1497 1497 python hash seed: * (glob)
1498 1498 [1]
1499 1499
1500 1500 support for bisecting failed tests automatically
1501 1501 $ hg init bisect
1502 1502 $ cd bisect
1503 1503 $ cat >> test-bisect.t <<EOF
1504 1504 > $ echo pass
1505 1505 > pass
1506 1506 > EOF
1507 1507 $ hg add test-bisect.t
1508 1508 $ hg ci -m 'good'
1509 1509 $ cat >> test-bisect.t <<EOF
1510 1510 > $ echo pass
1511 1511 > fail
1512 1512 > EOF
1513 1513 $ hg ci -m 'bad'
1514 1514 $ rt --known-good-rev=0 test-bisect.t
1515 1515 running 1 tests using 1 parallel processes
1516 1516
1517 1517 --- $TESTTMP/anothertests/bisect/test-bisect.t
1518 1518 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1519 1519 @@ -1,4 +1,4 @@
1520 1520 $ echo pass
1521 1521 pass
1522 1522 $ echo pass
1523 1523 - fail
1524 1524 + pass
1525 1525
1526 1526 ERROR: test-bisect.t output changed
1527 1527 !
1528 1528 Failed test-bisect.t: output changed
1529 1529 test-bisect.t broken by 72cbf122d116 (bad)
1530 1530 # Ran 1 tests, 0 skipped, 1 failed.
1531 1531 python hash seed: * (glob)
1532 1532 [1]
1533 1533
1534 1534 $ cd ..
1535 1535
1536 1536 support bisecting a separate repo
1537 1537
1538 1538 $ hg init bisect-dependent
1539 1539 $ cd bisect-dependent
1540 1540 $ cat > test-bisect-dependent.t <<EOF
1541 1541 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1542 1542 > pass
1543 1543 > EOF
1544 1544 $ hg commit -Am dependent test-bisect-dependent.t
1545 1545
1546 1546 $ rt --known-good-rev=0 test-bisect-dependent.t
1547 1547 running 1 tests using 1 parallel processes
1548 1548
1549 1549 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1550 1550 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1551 1551 @@ -1,2 +1,2 @@
1552 1552 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1553 1553 - pass
1554 1554 + fail
1555 1555
1556 1556 ERROR: test-bisect-dependent.t output changed
1557 1557 !
1558 1558 Failed test-bisect-dependent.t: output changed
1559 1559 Failed to identify failure point for test-bisect-dependent.t
1560 1560 # Ran 1 tests, 0 skipped, 1 failed.
1561 1561 python hash seed: * (glob)
1562 1562 [1]
1563 1563
1564 1564 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1565 1565 usage: run-tests.py [options] [tests]
1566 1566 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1567 1567 [2]
1568 1568
1569 1569 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1570 1570 running 1 tests using 1 parallel processes
1571 1571
1572 1572 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1573 1573 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1574 1574 @@ -1,2 +1,2 @@
1575 1575 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1576 1576 - pass
1577 1577 + fail
1578 1578
1579 1579 ERROR: test-bisect-dependent.t output changed
1580 1580 !
1581 1581 Failed test-bisect-dependent.t: output changed
1582 1582 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1583 1583 # Ran 1 tests, 0 skipped, 1 failed.
1584 1584 python hash seed: * (glob)
1585 1585 [1]
1586 1586
1587 1587 $ cd ..
1588 1588
1589 1589 Test a broken #if statement doesn't break run-tests threading.
1590 1590 ==============================================================
1591 1591 $ mkdir broken
1592 1592 $ cd broken
1593 1593 $ cat > test-broken.t <<EOF
1594 1594 > true
1595 1595 > #if notarealhghavefeature
1596 1596 > $ false
1597 1597 > #endif
1598 1598 > EOF
1599 1599 $ for f in 1 2 3 4 ; do
1600 1600 > cat > test-works-$f.t <<EOF
1601 1601 > This is test case $f
1602 1602 > $ sleep 1
1603 1603 > EOF
1604 1604 > done
1605 1605 $ rt -j 2
1606 1606 running 5 tests using 2 parallel processes
1607 1607 ....
1608 1608 # Ran 5 tests, 0 skipped, 0 failed.
1609 1609 skipped: unknown feature: notarealhghavefeature
1610 1610
1611 1611 $ cd ..
1612 1612 $ rm -rf broken
1613 1613
1614 1614 Test cases in .t files
1615 1615 ======================
1616 1616 $ mkdir cases
1617 1617 $ cd cases
1618 1618 $ cat > test-cases-abc.t <<'EOF'
1619 1619 > #testcases A B C
1620 1620 > $ V=B
1621 1621 > #if A
1622 1622 > $ V=A
1623 1623 > #endif
1624 1624 > #if C
1625 1625 > $ V=C
1626 1626 > #endif
1627 1627 > $ echo $V | sed 's/A/C/'
1628 1628 > C
1629 1629 > #if C
1630 1630 > $ [ $V = C ]
1631 1631 > #endif
1632 1632 > #if A
1633 1633 > $ [ $V = C ]
1634 1634 > [1]
1635 1635 > #endif
1636 1636 > #if no-C
1637 1637 > $ [ $V = C ]
1638 1638 > [1]
1639 1639 > #endif
1640 1640 > $ [ $V = D ]
1641 1641 > [1]
1642 1642 > EOF
1643 1643 $ rt
1644 1644 running 3 tests using 1 parallel processes
1645 1645 .
1646 1646 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1647 1647 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1648 1648 @@ -7,7 +7,7 @@
1649 1649 $ V=C
1650 1650 #endif
1651 1651 $ echo $V | sed 's/A/C/'
1652 1652 - C
1653 1653 + B
1654 1654 #if C
1655 1655 $ [ $V = C ]
1656 1656 #endif
1657 1657
1658 1658 ERROR: test-cases-abc.t#B output changed
1659 1659 !.
1660 1660 Failed test-cases-abc.t#B: output changed
1661 1661 # Ran 3 tests, 0 skipped, 1 failed.
1662 1662 python hash seed: * (glob)
1663 1663 [1]
1664 1664
1665 1665 --restart works
1666 1666
1667 1667 $ rt --restart
1668 1668 running 2 tests using 1 parallel processes
1669 1669
1670 1670 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1671 1671 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1672 1672 @@ -7,7 +7,7 @@
1673 1673 $ V=C
1674 1674 #endif
1675 1675 $ echo $V | sed 's/A/C/'
1676 1676 - C
1677 1677 + B
1678 1678 #if C
1679 1679 $ [ $V = C ]
1680 1680 #endif
1681 1681
1682 1682 ERROR: test-cases-abc.t#B output changed
1683 1683 !.
1684 1684 Failed test-cases-abc.t#B: output changed
1685 1685 # Ran 2 tests, 0 skipped, 1 failed.
1686 1686 python hash seed: * (glob)
1687 1687 [1]
1688 1688
1689 1689 --restart works with outputdir
1690 1690
1691 1691 $ mkdir output
1692 1692 $ mv test-cases-abc.t#B.err output
1693 1693 $ rt --restart --outputdir output
1694 1694 running 2 tests using 1 parallel processes
1695 1695
1696 1696 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1697 1697 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1698 1698 @@ -7,7 +7,7 @@
1699 1699 $ V=C
1700 1700 #endif
1701 1701 $ echo $V | sed 's/A/C/'
1702 1702 - C
1703 1703 + B
1704 1704 #if C
1705 1705 $ [ $V = C ]
1706 1706 #endif
1707 1707
1708 1708 ERROR: test-cases-abc.t#B output changed
1709 1709 !.
1710 1710 Failed test-cases-abc.t#B: output changed
1711 1711 # Ran 2 tests, 0 skipped, 1 failed.
1712 1712 python hash seed: * (glob)
1713 1713 [1]
1714 1714
1715 1715 Test TESTCASE variable
1716 1716
1717 1717 $ cat > test-cases-ab.t <<'EOF'
1718 1718 > $ dostuff() {
1719 1719 > > echo "In case $TESTCASE"
1720 1720 > > }
1721 1721 > #testcases A B
1722 1722 > #if A
1723 1723 > $ dostuff
1724 1724 > In case A
1725 1725 > #endif
1726 1726 > #if B
1727 1727 > $ dostuff
1728 1728 > In case B
1729 1729 > #endif
1730 1730 > EOF
1731 1731 $ rt test-cases-ab.t
1732 1732 running 2 tests using 1 parallel processes
1733 1733 ..
1734 1734 # Ran 2 tests, 0 skipped, 0 failed.
1735 1735
1736 1736 Support running a specific test case
1737 1737
1738 1738 $ rt "test-cases-abc.t#B"
1739 1739 running 1 tests using 1 parallel processes
1740 1740
1741 1741 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1742 1742 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1743 1743 @@ -7,7 +7,7 @@
1744 1744 $ V=C
1745 1745 #endif
1746 1746 $ echo $V | sed 's/A/C/'
1747 1747 - C
1748 1748 + B
1749 1749 #if C
1750 1750 $ [ $V = C ]
1751 1751 #endif
1752 1752
1753 1753 ERROR: test-cases-abc.t#B output changed
1754 1754 !
1755 1755 Failed test-cases-abc.t#B: output changed
1756 1756 # Ran 1 tests, 0 skipped, 1 failed.
1757 1757 python hash seed: * (glob)
1758 1758 [1]
1759 1759
1760 1760 Support running multiple test cases in the same file
1761 1761
1762 1762 $ rt test-cases-abc.t#B test-cases-abc.t#C
1763 1763 running 2 tests using 1 parallel processes
1764 1764
1765 1765 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1766 1766 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1767 1767 @@ -7,7 +7,7 @@
1768 1768 $ V=C
1769 1769 #endif
1770 1770 $ echo $V | sed 's/A/C/'
1771 1771 - C
1772 1772 + B
1773 1773 #if C
1774 1774 $ [ $V = C ]
1775 1775 #endif
1776 1776
1777 1777 ERROR: test-cases-abc.t#B output changed
1778 1778 !.
1779 1779 Failed test-cases-abc.t#B: output changed
1780 1780 # Ran 2 tests, 0 skipped, 1 failed.
1781 1781 python hash seed: * (glob)
1782 1782 [1]
1783 1783
1784 1784 Support ignoring invalid test cases
1785 1785
1786 1786 $ rt test-cases-abc.t#B test-cases-abc.t#D
1787 1787 running 1 tests using 1 parallel processes
1788 1788
1789 1789 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1790 1790 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1791 1791 @@ -7,7 +7,7 @@
1792 1792 $ V=C
1793 1793 #endif
1794 1794 $ echo $V | sed 's/A/C/'
1795 1795 - C
1796 1796 + B
1797 1797 #if C
1798 1798 $ [ $V = C ]
1799 1799 #endif
1800 1800
1801 1801 ERROR: test-cases-abc.t#B output changed
1802 1802 !
1803 1803 Failed test-cases-abc.t#B: output changed
1804 1804 # Ran 1 tests, 0 skipped, 1 failed.
1805 1805 python hash seed: * (glob)
1806 1806 [1]
1807 1807
1808 1808 Support running complex test cases names
1809 1809
1810 1810 $ cat > test-cases-advanced-cases.t <<'EOF'
1811 1811 > #testcases simple case-with-dashes casewith_-.chars
1812 1812 > $ echo $TESTCASE
1813 1813 > simple
1814 1814 > EOF
1815 1815
1816 1816 $ cat test-cases-advanced-cases.t
1817 1817 #testcases simple case-with-dashes casewith_-.chars
1818 1818 $ echo $TESTCASE
1819 1819 simple
1820 1820
1821 1821 $ rt test-cases-advanced-cases.t
1822 1822 running 3 tests using 1 parallel processes
1823 1823
1824 1824 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1825 1825 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1826 1826 @@ -1,3 +1,3 @@
1827 1827 #testcases simple case-with-dashes casewith_-.chars
1828 1828 $ echo $TESTCASE
1829 1829 - simple
1830 1830 + case-with-dashes
1831 1831
1832 1832 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1833 1833 !
1834 1834 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1835 1835 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1836 1836 @@ -1,3 +1,3 @@
1837 1837 #testcases simple case-with-dashes casewith_-.chars
1838 1838 $ echo $TESTCASE
1839 1839 - simple
1840 1840 + casewith_-.chars
1841 1841
1842 1842 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1843 1843 !.
1844 1844 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1845 1845 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1846 1846 # Ran 3 tests, 0 skipped, 2 failed.
1847 1847 python hash seed: * (glob)
1848 1848 [1]
1849 1849
1850 1850 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1851 1851 running 1 tests using 1 parallel processes
1852 1852
1853 1853 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1854 1854 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1855 1855 @@ -1,3 +1,3 @@
1856 1856 #testcases simple case-with-dashes casewith_-.chars
1857 1857 $ echo $TESTCASE
1858 1858 - simple
1859 1859 + case-with-dashes
1860 1860
1861 1861 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1862 1862 !
1863 1863 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1864 1864 # Ran 1 tests, 0 skipped, 1 failed.
1865 1865 python hash seed: * (glob)
1866 1866 [1]
1867 1867
1868 1868 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1869 1869 running 1 tests using 1 parallel processes
1870 1870
1871 1871 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1872 1872 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1873 1873 @@ -1,3 +1,3 @@
1874 1874 #testcases simple case-with-dashes casewith_-.chars
1875 1875 $ echo $TESTCASE
1876 1876 - simple
1877 1877 + casewith_-.chars
1878 1878
1879 1879 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1880 1880 !
1881 1881 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1882 1882 # Ran 1 tests, 0 skipped, 1 failed.
1883 1883 python hash seed: * (glob)
1884 1884 [1]
1885 1885
1886 1886 Test automatic pattern replacement
1887 1887 ==================================
1888 1888
1889 1889 $ cat << EOF >> common-pattern.py
1890 1890 > substitutions = [
1891 1891 > (br'foo-(.*)\\b',
1892 1892 > br'\$XXX=\\1\$'),
1893 1893 > (br'bar\\n',
1894 1894 > br'\$YYY$\\n'),
1895 1895 > ]
1896 1896 > EOF
1897 1897
1898 1898 $ cat << EOF >> test-substitution.t
1899 1899 > $ echo foo-12
1900 1900 > \$XXX=12$
1901 1901 > $ echo foo-42
1902 1902 > \$XXX=42$
1903 1903 > $ echo bar prior
1904 1904 > bar prior
1905 1905 > $ echo lastbar
1906 1906 > last\$YYY$
1907 1907 > $ echo foo-bar foo-baz
1908 1908 > EOF
1909 1909
1910 1910 $ rt test-substitution.t
1911 1911 running 1 tests using 1 parallel processes
1912 1912
1913 1913 --- $TESTTMP/anothertests/cases/test-substitution.t
1914 1914 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1915 1915 @@ -7,3 +7,4 @@
1916 1916 $ echo lastbar
1917 1917 last$YYY$
1918 1918 $ echo foo-bar foo-baz
1919 1919 + $XXX=bar foo-baz$
1920 1920
1921 1921 ERROR: test-substitution.t output changed
1922 1922 !
1923 1923 Failed test-substitution.t: output changed
1924 1924 # Ran 1 tests, 0 skipped, 1 failed.
1925 1925 python hash seed: * (glob)
1926 1926 [1]
1927 1927
1928 1928 --extra-config-opt works
1929 1929
1930 1930 $ cat << EOF >> test-config-opt.t
1931 1931 > $ hg init test-config-opt
1932 1932 > $ hg -R test-config-opt purge
1933 1933 > EOF
1934 1934
1935 1935 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1936 1936 running 1 tests using 1 parallel processes
1937 1937 .
1938 1938 # Ran 1 tests, 0 skipped, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now