##// END OF EJS Templates
testrunner: make `-j100 --runs-per-test=100 test-foo.t` use 100 jobs...
Martin von Zweigbergk -
r41214:53327bfb default
parent child Browse files
Show More
@@ -1,3304 +1,3305 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import argparse
49 49 import collections
50 50 import difflib
51 51 import distutils.version as version
52 52 import errno
53 53 import json
54 54 import multiprocessing
55 55 import os
56 56 import random
57 57 import re
58 58 import shutil
59 59 import signal
60 60 import socket
61 61 import subprocess
62 62 import sys
63 63 import sysconfig
64 64 import tempfile
65 65 import threading
66 66 import time
67 67 import unittest
68 68 import uuid
69 69 import xml.dom.minidom as minidom
70 70
71 71 try:
72 72 import Queue as queue
73 73 except ImportError:
74 74 import queue
75 75
76 76 try:
77 77 import shlex
78 78 shellquote = shlex.quote
79 79 except (ImportError, AttributeError):
80 80 import pipes
81 81 shellquote = pipes.quote
82 82
83 83 if os.environ.get('RTUNICODEPEDANTRY', False):
84 84 try:
85 85 reload(sys)
86 86 sys.setdefaultencoding("undefined")
87 87 except NameError:
88 88 pass
89 89
90 90 processlock = threading.Lock()
91 91
92 92 pygmentspresent = False
93 93 # ANSI color is unsupported prior to Windows 10
94 94 if os.name != 'nt':
95 95 try: # is pygments installed
96 96 import pygments
97 97 import pygments.lexers as lexers
98 98 import pygments.lexer as lexer
99 99 import pygments.formatters as formatters
100 100 import pygments.token as token
101 101 import pygments.style as style
102 102 pygmentspresent = True
103 103 difflexer = lexers.DiffLexer()
104 104 terminal256formatter = formatters.Terminal256Formatter()
105 105 except ImportError:
106 106 pass
107 107
108 108 if pygmentspresent:
109 109 class TestRunnerStyle(style.Style):
110 110 default_style = ""
111 111 skipped = token.string_to_tokentype("Token.Generic.Skipped")
112 112 failed = token.string_to_tokentype("Token.Generic.Failed")
113 113 skippedname = token.string_to_tokentype("Token.Generic.SName")
114 114 failedname = token.string_to_tokentype("Token.Generic.FName")
115 115 styles = {
116 116 skipped: '#e5e5e5',
117 117 skippedname: '#00ffff',
118 118 failed: '#7f0000',
119 119 failedname: '#ff0000',
120 120 }
121 121
122 122 class TestRunnerLexer(lexer.RegexLexer):
123 123 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
124 124 tokens = {
125 125 'root': [
126 126 (r'^Skipped', token.Generic.Skipped, 'skipped'),
127 127 (r'^Failed ', token.Generic.Failed, 'failed'),
128 128 (r'^ERROR: ', token.Generic.Failed, 'failed'),
129 129 ],
130 130 'skipped': [
131 131 (testpattern, token.Generic.SName),
132 132 (r':.*', token.Generic.Skipped),
133 133 ],
134 134 'failed': [
135 135 (testpattern, token.Generic.FName),
136 136 (r'(:| ).*', token.Generic.Failed),
137 137 ]
138 138 }
139 139
140 140 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
141 141 runnerlexer = TestRunnerLexer()
142 142
143 143 origenviron = os.environ.copy()
144 144
145 145 if sys.version_info > (3, 5, 0):
146 146 PYTHON3 = True
147 147 xrange = range # we use xrange in one place, and we'd rather not use range
148 148 def _bytespath(p):
149 149 if p is None:
150 150 return p
151 151 return p.encode('utf-8')
152 152
153 153 def _strpath(p):
154 154 if p is None:
155 155 return p
156 156 return p.decode('utf-8')
157 157
158 158 osenvironb = getattr(os, 'environb', None)
159 159 if osenvironb is None:
160 160 # Windows lacks os.environb, for instance. A proxy over the real thing
161 161 # instead of a copy allows the environment to be updated via bytes on
162 162 # all platforms.
163 163 class environbytes(object):
164 164 def __init__(self, strenv):
165 165 self.__len__ = strenv.__len__
166 166 self.clear = strenv.clear
167 167 self._strenv = strenv
168 168 def __getitem__(self, k):
169 169 v = self._strenv.__getitem__(_strpath(k))
170 170 return _bytespath(v)
171 171 def __setitem__(self, k, v):
172 172 self._strenv.__setitem__(_strpath(k), _strpath(v))
173 173 def __delitem__(self, k):
174 174 self._strenv.__delitem__(_strpath(k))
175 175 def __contains__(self, k):
176 176 return self._strenv.__contains__(_strpath(k))
177 177 def __iter__(self):
178 178 return iter([_bytespath(k) for k in iter(self._strenv)])
179 179 def get(self, k, default=None):
180 180 v = self._strenv.get(_strpath(k), _strpath(default))
181 181 return _bytespath(v)
182 182 def pop(self, k, default=None):
183 183 v = self._strenv.pop(_strpath(k), _strpath(default))
184 184 return _bytespath(v)
185 185
186 186 osenvironb = environbytes(os.environ)
187 187
188 188 getcwdb = getattr(os, 'getcwdb')
189 189 if not getcwdb or os.name == 'nt':
190 190 getcwdb = lambda: _bytespath(os.getcwd())
191 191
192 192 elif sys.version_info >= (3, 0, 0):
193 193 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
194 194 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
195 195 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
196 196 else:
197 197 PYTHON3 = False
198 198
199 199 # In python 2.x, path operations are generally done using
200 200 # bytestrings by default, so we don't have to do any extra
201 201 # fiddling there. We define the wrapper functions anyway just to
202 202 # help keep code consistent between platforms.
203 203 def _bytespath(p):
204 204 return p
205 205
206 206 _strpath = _bytespath
207 207 osenvironb = os.environ
208 208 getcwdb = os.getcwd
209 209
210 210 # For Windows support
211 211 wifexited = getattr(os, "WIFEXITED", lambda x: False)
212 212
213 213 # Whether to use IPv6
214 214 def checksocketfamily(name, port=20058):
215 215 """return true if we can listen on localhost using family=name
216 216
217 217 name should be either 'AF_INET', or 'AF_INET6'.
218 218 port being used is okay - EADDRINUSE is considered as successful.
219 219 """
220 220 family = getattr(socket, name, None)
221 221 if family is None:
222 222 return False
223 223 try:
224 224 s = socket.socket(family, socket.SOCK_STREAM)
225 225 s.bind(('localhost', port))
226 226 s.close()
227 227 return True
228 228 except socket.error as exc:
229 229 if exc.errno == errno.EADDRINUSE:
230 230 return True
231 231 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
232 232 return False
233 233 else:
234 234 raise
235 235 else:
236 236 return False
237 237
238 238 # useipv6 will be set by parseargs
239 239 useipv6 = None
240 240
241 241 def checkportisavailable(port):
242 242 """return true if a port seems free to bind on localhost"""
243 243 if useipv6:
244 244 family = socket.AF_INET6
245 245 else:
246 246 family = socket.AF_INET
247 247 try:
248 248 s = socket.socket(family, socket.SOCK_STREAM)
249 249 s.bind(('localhost', port))
250 250 s.close()
251 251 return True
252 252 except socket.error as exc:
253 253 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
254 254 errno.EPROTONOSUPPORT):
255 255 raise
256 256 return False
257 257
258 258 closefds = os.name == 'posix'
259 259 def Popen4(cmd, wd, timeout, env=None):
260 260 processlock.acquire()
261 261 p = subprocess.Popen(_strpath(cmd), shell=True, bufsize=-1,
262 262 cwd=_strpath(wd), env=env,
263 263 close_fds=closefds,
264 264 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
265 265 stderr=subprocess.STDOUT)
266 266 processlock.release()
267 267
268 268 p.fromchild = p.stdout
269 269 p.tochild = p.stdin
270 270 p.childerr = p.stderr
271 271
272 272 p.timeout = False
273 273 if timeout:
274 274 def t():
275 275 start = time.time()
276 276 while time.time() - start < timeout and p.returncode is None:
277 277 time.sleep(.1)
278 278 p.timeout = True
279 279 if p.returncode is None:
280 280 terminate(p)
281 281 threading.Thread(target=t).start()
282 282
283 283 return p
284 284
285 285 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
286 286 IMPL_PATH = b'PYTHONPATH'
287 287 if 'java' in sys.platform:
288 288 IMPL_PATH = b'JYTHONPATH'
289 289
290 290 defaults = {
291 291 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
292 292 'timeout': ('HGTEST_TIMEOUT', 180),
293 293 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
294 294 'port': ('HGTEST_PORT', 20059),
295 295 'shell': ('HGTEST_SHELL', 'sh'),
296 296 }
297 297
298 298 def canonpath(path):
299 299 return os.path.realpath(os.path.expanduser(path))
300 300
301 301 def parselistfiles(files, listtype, warn=True):
302 302 entries = dict()
303 303 for filename in files:
304 304 try:
305 305 path = os.path.expanduser(os.path.expandvars(filename))
306 306 f = open(path, "rb")
307 307 except IOError as err:
308 308 if err.errno != errno.ENOENT:
309 309 raise
310 310 if warn:
311 311 print("warning: no such %s file: %s" % (listtype, filename))
312 312 continue
313 313
314 314 for line in f.readlines():
315 315 line = line.split(b'#', 1)[0].strip()
316 316 if line:
317 317 entries[line] = filename
318 318
319 319 f.close()
320 320 return entries
321 321
322 322 def parsettestcases(path):
323 323 """read a .t test file, return a set of test case names
324 324
325 325 If path does not exist, return an empty set.
326 326 """
327 327 cases = []
328 328 try:
329 329 with open(path, 'rb') as f:
330 330 for l in f:
331 331 if l.startswith(b'#testcases '):
332 332 cases.append(sorted(l[11:].split()))
333 333 except IOError as ex:
334 334 if ex.errno != errno.ENOENT:
335 335 raise
336 336 return cases
337 337
338 338 def getparser():
339 339 """Obtain the OptionParser used by the CLI."""
340 340 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
341 341
342 342 selection = parser.add_argument_group('Test Selection')
343 343 selection.add_argument('--allow-slow-tests', action='store_true',
344 344 help='allow extremely slow tests')
345 345 selection.add_argument("--blacklist", action="append",
346 346 help="skip tests listed in the specified blacklist file")
347 347 selection.add_argument("--changed",
348 348 help="run tests that are changed in parent rev or working directory")
349 349 selection.add_argument("-k", "--keywords",
350 350 help="run tests matching keywords")
351 351 selection.add_argument("-r", "--retest", action="store_true",
352 352 help = "retest failed tests")
353 353 selection.add_argument("--test-list", action="append",
354 354 help="read tests to run from the specified file")
355 355 selection.add_argument("--whitelist", action="append",
356 356 help="always run tests listed in the specified whitelist file")
357 357 selection.add_argument('tests', metavar='TESTS', nargs='*',
358 358 help='Tests to run')
359 359
360 360 harness = parser.add_argument_group('Test Harness Behavior')
361 361 harness.add_argument('--bisect-repo',
362 362 metavar='bisect_repo',
363 363 help=("Path of a repo to bisect. Use together with "
364 364 "--known-good-rev"))
365 365 harness.add_argument("-d", "--debug", action="store_true",
366 366 help="debug mode: write output of test scripts to console"
367 367 " rather than capturing and diffing it (disables timeout)")
368 368 harness.add_argument("-f", "--first", action="store_true",
369 369 help="exit on the first test failure")
370 370 harness.add_argument("-i", "--interactive", action="store_true",
371 371 help="prompt to accept changed output")
372 372 harness.add_argument("-j", "--jobs", type=int,
373 373 help="number of jobs to run in parallel"
374 374 " (default: $%s or %d)" % defaults['jobs'])
375 375 harness.add_argument("--keep-tmpdir", action="store_true",
376 376 help="keep temporary directory after running tests")
377 377 harness.add_argument('--known-good-rev',
378 378 metavar="known_good_rev",
379 379 help=("Automatically bisect any failures using this "
380 380 "revision as a known-good revision."))
381 381 harness.add_argument("--list-tests", action="store_true",
382 382 help="list tests instead of running them")
383 383 harness.add_argument("--loop", action="store_true",
384 384 help="loop tests repeatedly")
385 385 harness.add_argument('--random', action="store_true",
386 386 help='run tests in random order')
387 387 harness.add_argument('--order-by-runtime', action="store_true",
388 388 help='run slowest tests first, according to .testtimes')
389 389 harness.add_argument("-p", "--port", type=int,
390 390 help="port on which servers should listen"
391 391 " (default: $%s or %d)" % defaults['port'])
392 392 harness.add_argument('--profile-runner', action='store_true',
393 393 help='run statprof on run-tests')
394 394 harness.add_argument("-R", "--restart", action="store_true",
395 395 help="restart at last error")
396 396 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
397 397 help="run each test N times (default=1)", default=1)
398 398 harness.add_argument("--shell",
399 399 help="shell to use (default: $%s or %s)" % defaults['shell'])
400 400 harness.add_argument('--showchannels', action='store_true',
401 401 help='show scheduling channels')
402 402 harness.add_argument("--slowtimeout", type=int,
403 403 help="kill errant slow tests after SLOWTIMEOUT seconds"
404 404 " (default: $%s or %d)" % defaults['slowtimeout'])
405 405 harness.add_argument("-t", "--timeout", type=int,
406 406 help="kill errant tests after TIMEOUT seconds"
407 407 " (default: $%s or %d)" % defaults['timeout'])
408 408 harness.add_argument("--tmpdir",
409 409 help="run tests in the given temporary directory"
410 410 " (implies --keep-tmpdir)")
411 411 harness.add_argument("-v", "--verbose", action="store_true",
412 412 help="output verbose messages")
413 413
414 414 hgconf = parser.add_argument_group('Mercurial Configuration')
415 415 hgconf.add_argument("--chg", action="store_true",
416 416 help="install and use chg wrapper in place of hg")
417 417 hgconf.add_argument("--compiler",
418 418 help="compiler to build with")
419 419 hgconf.add_argument('--extra-config-opt', action="append", default=[],
420 420 help='set the given config opt in the test hgrc')
421 421 hgconf.add_argument("-l", "--local", action="store_true",
422 422 help="shortcut for --with-hg=<testdir>/../hg, "
423 423 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
424 424 hgconf.add_argument("--ipv6", action="store_true",
425 425 help="prefer IPv6 to IPv4 for network related tests")
426 426 hgconf.add_argument("--pure", action="store_true",
427 427 help="use pure Python code instead of C extensions")
428 428 hgconf.add_argument("-3", "--py3-warnings", action="store_true",
429 429 help="enable Py3k warnings on Python 2.7+")
430 430 hgconf.add_argument("--with-chg", metavar="CHG",
431 431 help="use specified chg wrapper in place of hg")
432 432 hgconf.add_argument("--with-hg",
433 433 metavar="HG",
434 434 help="test using specified hg script rather than a "
435 435 "temporary installation")
436 436
437 437 reporting = parser.add_argument_group('Results Reporting')
438 438 reporting.add_argument("-C", "--annotate", action="store_true",
439 439 help="output files annotated with coverage")
440 440 reporting.add_argument("--color", choices=["always", "auto", "never"],
441 441 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
442 442 help="colorisation: always|auto|never (default: auto)")
443 443 reporting.add_argument("-c", "--cover", action="store_true",
444 444 help="print a test coverage report")
445 445 reporting.add_argument('--exceptions', action='store_true',
446 446 help='log all exceptions and generate an exception report')
447 447 reporting.add_argument("-H", "--htmlcov", action="store_true",
448 448 help="create an HTML report of the coverage of the files")
449 449 reporting.add_argument("--json", action="store_true",
450 450 help="store test result data in 'report.json' file")
451 451 reporting.add_argument("--outputdir",
452 452 help="directory to write error logs to (default=test directory)")
453 453 reporting.add_argument("-n", "--nodiff", action="store_true",
454 454 help="skip showing test changes")
455 455 reporting.add_argument("-S", "--noskips", action="store_true",
456 456 help="don't report skip tests verbosely")
457 457 reporting.add_argument("--time", action="store_true",
458 458 help="time how long each test takes")
459 459 reporting.add_argument("--view",
460 460 help="external diff viewer")
461 461 reporting.add_argument("--xunit",
462 462 help="record xunit results at specified path")
463 463
464 464 for option, (envvar, default) in defaults.items():
465 465 defaults[option] = type(default)(os.environ.get(envvar, default))
466 466 parser.set_defaults(**defaults)
467 467
468 468 return parser
469 469
470 470 def parseargs(args, parser):
471 471 """Parse arguments with our OptionParser and validate results."""
472 472 options = parser.parse_args(args)
473 473
474 474 # jython is always pure
475 475 if 'java' in sys.platform or '__pypy__' in sys.modules:
476 476 options.pure = True
477 477
478 478 if options.with_hg:
479 479 options.with_hg = canonpath(_bytespath(options.with_hg))
480 480 if not (os.path.isfile(options.with_hg) and
481 481 os.access(options.with_hg, os.X_OK)):
482 482 parser.error('--with-hg must specify an executable hg script')
483 483 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
484 484 sys.stderr.write('warning: --with-hg should specify an hg script\n')
485 485 sys.stderr.flush()
486 486 if options.local:
487 487 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
488 488 reporootdir = os.path.dirname(testdir)
489 489 pathandattrs = [(b'hg', 'with_hg')]
490 490 if options.chg:
491 491 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
492 492 for relpath, attr in pathandattrs:
493 493 binpath = os.path.join(reporootdir, relpath)
494 494 if os.name != 'nt' and not os.access(binpath, os.X_OK):
495 495 parser.error('--local specified, but %r not found or '
496 496 'not executable' % binpath)
497 497 setattr(options, attr, binpath)
498 498
499 499 if (options.chg or options.with_chg) and os.name == 'nt':
500 500 parser.error('chg does not work on %s' % os.name)
501 501 if options.with_chg:
502 502 options.chg = False # no installation to temporary location
503 503 options.with_chg = canonpath(_bytespath(options.with_chg))
504 504 if not (os.path.isfile(options.with_chg) and
505 505 os.access(options.with_chg, os.X_OK)):
506 506 parser.error('--with-chg must specify a chg executable')
507 507 if options.chg and options.with_hg:
508 508 # chg shares installation location with hg
509 509 parser.error('--chg does not work when --with-hg is specified '
510 510 '(use --with-chg instead)')
511 511
512 512 if options.color == 'always' and not pygmentspresent:
513 513 sys.stderr.write('warning: --color=always ignored because '
514 514 'pygments is not installed\n')
515 515
516 516 if options.bisect_repo and not options.known_good_rev:
517 517 parser.error("--bisect-repo cannot be used without --known-good-rev")
518 518
519 519 global useipv6
520 520 if options.ipv6:
521 521 useipv6 = checksocketfamily('AF_INET6')
522 522 else:
523 523 # only use IPv6 if IPv4 is unavailable and IPv6 is available
524 524 useipv6 = ((not checksocketfamily('AF_INET'))
525 525 and checksocketfamily('AF_INET6'))
526 526
527 527 options.anycoverage = options.cover or options.annotate or options.htmlcov
528 528 if options.anycoverage:
529 529 try:
530 530 import coverage
531 531 covver = version.StrictVersion(coverage.__version__).version
532 532 if covver < (3, 3):
533 533 parser.error('coverage options require coverage 3.3 or later')
534 534 except ImportError:
535 535 parser.error('coverage options now require the coverage package')
536 536
537 537 if options.anycoverage and options.local:
538 538 # this needs some path mangling somewhere, I guess
539 539 parser.error("sorry, coverage options do not work when --local "
540 540 "is specified")
541 541
542 542 if options.anycoverage and options.with_hg:
543 543 parser.error("sorry, coverage options do not work when --with-hg "
544 544 "is specified")
545 545
546 546 global verbose
547 547 if options.verbose:
548 548 verbose = ''
549 549
550 550 if options.tmpdir:
551 551 options.tmpdir = canonpath(options.tmpdir)
552 552
553 553 if options.jobs < 1:
554 554 parser.error('--jobs must be positive')
555 555 if options.interactive and options.debug:
556 556 parser.error("-i/--interactive and -d/--debug are incompatible")
557 557 if options.debug:
558 558 if options.timeout != defaults['timeout']:
559 559 sys.stderr.write(
560 560 'warning: --timeout option ignored with --debug\n')
561 561 if options.slowtimeout != defaults['slowtimeout']:
562 562 sys.stderr.write(
563 563 'warning: --slowtimeout option ignored with --debug\n')
564 564 options.timeout = 0
565 565 options.slowtimeout = 0
566 566 if options.py3_warnings:
567 567 if PYTHON3:
568 568 parser.error(
569 569 '--py3-warnings can only be used on Python 2.7')
570 570
571 571 if options.blacklist:
572 572 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
573 573 if options.whitelist:
574 574 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
575 575 else:
576 576 options.whitelisted = {}
577 577
578 578 if options.showchannels:
579 579 options.nodiff = True
580 580
581 581 return options
582 582
583 583 def rename(src, dst):
584 584 """Like os.rename(), trade atomicity and opened files friendliness
585 585 for existing destination support.
586 586 """
587 587 shutil.copy(src, dst)
588 588 os.remove(src)
589 589
590 590 def makecleanable(path):
591 591 """Try to fix directory permission recursively so that the entire tree
592 592 can be deleted"""
593 593 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
594 594 for d in dirnames:
595 595 p = os.path.join(dirpath, d)
596 596 try:
597 597 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
598 598 except OSError:
599 599 pass
600 600
601 601 _unified_diff = difflib.unified_diff
602 602 if PYTHON3:
603 603 import functools
604 604 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
605 605
606 606 def getdiff(expected, output, ref, err):
607 607 servefail = False
608 608 lines = []
609 609 for line in _unified_diff(expected, output, ref, err):
610 610 if line.startswith(b'+++') or line.startswith(b'---'):
611 611 line = line.replace(b'\\', b'/')
612 612 if line.endswith(b' \n'):
613 613 line = line[:-2] + b'\n'
614 614 lines.append(line)
615 615 if not servefail and line.startswith(
616 616 b'+ abort: child process failed to start'):
617 617 servefail = True
618 618
619 619 return servefail, lines
620 620
621 621 verbose = False
622 622 def vlog(*msg):
623 623 """Log only when in verbose mode."""
624 624 if verbose is False:
625 625 return
626 626
627 627 return log(*msg)
628 628
629 629 # Bytes that break XML even in a CDATA block: control characters 0-31
630 630 # sans \t, \n and \r
631 631 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
632 632
633 633 # Match feature conditionalized output lines in the form, capturing the feature
634 634 # list in group 2, and the preceeding line output in group 1:
635 635 #
636 636 # output..output (feature !)\n
637 637 optline = re.compile(b'(.*) \((.+?) !\)\n$')
638 638
639 639 def cdatasafe(data):
640 640 """Make a string safe to include in a CDATA block.
641 641
642 642 Certain control characters are illegal in a CDATA block, and
643 643 there's no way to include a ]]> in a CDATA either. This function
644 644 replaces illegal bytes with ? and adds a space between the ]] so
645 645 that it won't break the CDATA block.
646 646 """
647 647 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
648 648
649 649 def log(*msg):
650 650 """Log something to stdout.
651 651
652 652 Arguments are strings to print.
653 653 """
654 654 with iolock:
655 655 if verbose:
656 656 print(verbose, end=' ')
657 657 for m in msg:
658 658 print(m, end=' ')
659 659 print()
660 660 sys.stdout.flush()
661 661
662 662 def highlightdiff(line, color):
663 663 if not color:
664 664 return line
665 665 assert pygmentspresent
666 666 return pygments.highlight(line.decode('latin1'), difflexer,
667 667 terminal256formatter).encode('latin1')
668 668
669 669 def highlightmsg(msg, color):
670 670 if not color:
671 671 return msg
672 672 assert pygmentspresent
673 673 return pygments.highlight(msg, runnerlexer, runnerformatter)
674 674
675 675 def terminate(proc):
676 676 """Terminate subprocess"""
677 677 vlog('# Terminating process %d' % proc.pid)
678 678 try:
679 679 proc.terminate()
680 680 except OSError:
681 681 pass
682 682
683 683 def killdaemons(pidfile):
684 684 import killdaemons as killmod
685 685 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
686 686 logfn=vlog)
687 687
688 688 class Test(unittest.TestCase):
689 689 """Encapsulates a single, runnable test.
690 690
691 691 While this class conforms to the unittest.TestCase API, it differs in that
692 692 instances need to be instantiated manually. (Typically, unittest.TestCase
693 693 classes are instantiated automatically by scanning modules.)
694 694 """
695 695
696 696 # Status code reserved for skipped tests (used by hghave).
697 697 SKIPPED_STATUS = 80
698 698
699 699 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
700 700 debug=False,
701 701 first=False,
702 702 timeout=None,
703 703 startport=None, extraconfigopts=None,
704 704 py3warnings=False, shell=None, hgcommand=None,
705 705 slowtimeout=None, usechg=False,
706 706 useipv6=False):
707 707 """Create a test from parameters.
708 708
709 709 path is the full path to the file defining the test.
710 710
711 711 tmpdir is the main temporary directory to use for this test.
712 712
713 713 keeptmpdir determines whether to keep the test's temporary directory
714 714 after execution. It defaults to removal (False).
715 715
716 716 debug mode will make the test execute verbosely, with unfiltered
717 717 output.
718 718
719 719 timeout controls the maximum run time of the test. It is ignored when
720 720 debug is True. See slowtimeout for tests with #require slow.
721 721
722 722 slowtimeout overrides timeout if the test has #require slow.
723 723
724 724 startport controls the starting port number to use for this test. Each
725 725 test will reserve 3 port numbers for execution. It is the caller's
726 726 responsibility to allocate a non-overlapping port range to Test
727 727 instances.
728 728
729 729 extraconfigopts is an iterable of extra hgrc config options. Values
730 730 must have the form "key=value" (something understood by hgrc). Values
731 731 of the form "foo.key=value" will result in "[foo] key=value".
732 732
733 733 py3warnings enables Py3k warnings.
734 734
735 735 shell is the shell to execute tests in.
736 736 """
737 737 if timeout is None:
738 738 timeout = defaults['timeout']
739 739 if startport is None:
740 740 startport = defaults['port']
741 741 if slowtimeout is None:
742 742 slowtimeout = defaults['slowtimeout']
743 743 self.path = path
744 744 self.bname = os.path.basename(path)
745 745 self.name = _strpath(self.bname)
746 746 self._testdir = os.path.dirname(path)
747 747 self._outputdir = outputdir
748 748 self._tmpname = os.path.basename(path)
749 749 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
750 750
751 751 self._threadtmp = tmpdir
752 752 self._keeptmpdir = keeptmpdir
753 753 self._debug = debug
754 754 self._first = first
755 755 self._timeout = timeout
756 756 self._slowtimeout = slowtimeout
757 757 self._startport = startport
758 758 self._extraconfigopts = extraconfigopts or []
759 759 self._py3warnings = py3warnings
760 760 self._shell = _bytespath(shell)
761 761 self._hgcommand = hgcommand or b'hg'
762 762 self._usechg = usechg
763 763 self._useipv6 = useipv6
764 764
765 765 self._aborted = False
766 766 self._daemonpids = []
767 767 self._finished = None
768 768 self._ret = None
769 769 self._out = None
770 770 self._skipped = None
771 771 self._testtmp = None
772 772 self._chgsockdir = None
773 773
774 774 self._refout = self.readrefout()
775 775
776 776 def readrefout(self):
777 777 """read reference output"""
778 778 # If we're not in --debug mode and reference output file exists,
779 779 # check test output against it.
780 780 if self._debug:
781 781 return None # to match "out is None"
782 782 elif os.path.exists(self.refpath):
783 783 with open(self.refpath, 'rb') as f:
784 784 return f.read().splitlines(True)
785 785 else:
786 786 return []
787 787
788 788 # needed to get base class __repr__ running
789 789 @property
790 790 def _testMethodName(self):
791 791 return self.name
792 792
793 793 def __str__(self):
794 794 return self.name
795 795
796 796 def shortDescription(self):
797 797 return self.name
798 798
799 799 def setUp(self):
800 800 """Tasks to perform before run()."""
801 801 self._finished = False
802 802 self._ret = None
803 803 self._out = None
804 804 self._skipped = None
805 805
806 806 try:
807 807 os.mkdir(self._threadtmp)
808 808 except OSError as e:
809 809 if e.errno != errno.EEXIST:
810 810 raise
811 811
812 812 name = self._tmpname
813 813 self._testtmp = os.path.join(self._threadtmp, name)
814 814 os.mkdir(self._testtmp)
815 815
816 816 # Remove any previous output files.
817 817 if os.path.exists(self.errpath):
818 818 try:
819 819 os.remove(self.errpath)
820 820 except OSError as e:
821 821 # We might have raced another test to clean up a .err
822 822 # file, so ignore ENOENT when removing a previous .err
823 823 # file.
824 824 if e.errno != errno.ENOENT:
825 825 raise
826 826
827 827 if self._usechg:
828 828 self._chgsockdir = os.path.join(self._threadtmp,
829 829 b'%s.chgsock' % name)
830 830 os.mkdir(self._chgsockdir)
831 831
832 832 def run(self, result):
833 833 """Run this test and report results against a TestResult instance."""
834 834 # This function is extremely similar to unittest.TestCase.run(). Once
835 835 # we require Python 2.7 (or at least its version of unittest), this
836 836 # function can largely go away.
837 837 self._result = result
838 838 result.startTest(self)
839 839 try:
840 840 try:
841 841 self.setUp()
842 842 except (KeyboardInterrupt, SystemExit):
843 843 self._aborted = True
844 844 raise
845 845 except Exception:
846 846 result.addError(self, sys.exc_info())
847 847 return
848 848
849 849 success = False
850 850 try:
851 851 self.runTest()
852 852 except KeyboardInterrupt:
853 853 self._aborted = True
854 854 raise
855 855 except unittest.SkipTest as e:
856 856 result.addSkip(self, str(e))
857 857 # The base class will have already counted this as a
858 858 # test we "ran", but we want to exclude skipped tests
859 859 # from those we count towards those run.
860 860 result.testsRun -= 1
861 861 except self.failureException as e:
862 862 # This differs from unittest in that we don't capture
863 863 # the stack trace. This is for historical reasons and
864 864 # this decision could be revisited in the future,
865 865 # especially for PythonTest instances.
866 866 if result.addFailure(self, str(e)):
867 867 success = True
868 868 except Exception:
869 869 result.addError(self, sys.exc_info())
870 870 else:
871 871 success = True
872 872
873 873 try:
874 874 self.tearDown()
875 875 except (KeyboardInterrupt, SystemExit):
876 876 self._aborted = True
877 877 raise
878 878 except Exception:
879 879 result.addError(self, sys.exc_info())
880 880 success = False
881 881
882 882 if success:
883 883 result.addSuccess(self)
884 884 finally:
885 885 result.stopTest(self, interrupted=self._aborted)
886 886
887 887 def runTest(self):
888 888 """Run this test instance.
889 889
890 890 This will return a tuple describing the result of the test.
891 891 """
892 892 env = self._getenv()
893 893 self._genrestoreenv(env)
894 894 self._daemonpids.append(env['DAEMON_PIDS'])
895 895 self._createhgrc(env['HGRCPATH'])
896 896
897 897 vlog('# Test', self.name)
898 898
899 899 ret, out = self._run(env)
900 900 self._finished = True
901 901 self._ret = ret
902 902 self._out = out
903 903
904 904 def describe(ret):
905 905 if ret < 0:
906 906 return 'killed by signal: %d' % -ret
907 907 return 'returned error code %d' % ret
908 908
909 909 self._skipped = False
910 910
911 911 if ret == self.SKIPPED_STATUS:
912 912 if out is None: # Debug mode, nothing to parse.
913 913 missing = ['unknown']
914 914 failed = None
915 915 else:
916 916 missing, failed = TTest.parsehghaveoutput(out)
917 917
918 918 if not missing:
919 919 missing = ['skipped']
920 920
921 921 if failed:
922 922 self.fail('hg have failed checking for %s' % failed[-1])
923 923 else:
924 924 self._skipped = True
925 925 raise unittest.SkipTest(missing[-1])
926 926 elif ret == 'timeout':
927 927 self.fail('timed out')
928 928 elif ret is False:
929 929 self.fail('no result code from test')
930 930 elif out != self._refout:
931 931 # Diff generation may rely on written .err file.
932 932 if (ret != 0 or out != self._refout) and not self._skipped \
933 933 and not self._debug:
934 934 with open(self.errpath, 'wb') as f:
935 935 for line in out:
936 936 f.write(line)
937 937
938 938 # The result object handles diff calculation for us.
939 939 with firstlock:
940 940 if self._result.addOutputMismatch(self, ret, out, self._refout):
941 941 # change was accepted, skip failing
942 942 return
943 943 if self._first:
944 944 global firsterror
945 945 firsterror = True
946 946
947 947 if ret:
948 948 msg = 'output changed and ' + describe(ret)
949 949 else:
950 950 msg = 'output changed'
951 951
952 952 self.fail(msg)
953 953 elif ret:
954 954 self.fail(describe(ret))
955 955
956 956 def tearDown(self):
957 957 """Tasks to perform after run()."""
958 958 for entry in self._daemonpids:
959 959 killdaemons(entry)
960 960 self._daemonpids = []
961 961
962 962 if self._keeptmpdir:
963 963 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
964 964 (self._testtmp.decode('utf-8'),
965 965 self._threadtmp.decode('utf-8')))
966 966 else:
967 967 try:
968 968 shutil.rmtree(self._testtmp)
969 969 except OSError:
970 970 # unreadable directory may be left in $TESTTMP; fix permission
971 971 # and try again
972 972 makecleanable(self._testtmp)
973 973 shutil.rmtree(self._testtmp, True)
974 974 shutil.rmtree(self._threadtmp, True)
975 975
976 976 if self._usechg:
977 977 # chgservers will stop automatically after they find the socket
978 978 # files are deleted
979 979 shutil.rmtree(self._chgsockdir, True)
980 980
981 981 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
982 982 and not self._debug and self._out:
983 983 with open(self.errpath, 'wb') as f:
984 984 for line in self._out:
985 985 f.write(line)
986 986
987 987 vlog("# Ret was:", self._ret, '(%s)' % self.name)
988 988
989 989 def _run(self, env):
990 990 # This should be implemented in child classes to run tests.
991 991 raise unittest.SkipTest('unknown test type')
992 992
993 993 def abort(self):
994 994 """Terminate execution of this test."""
995 995 self._aborted = True
996 996
997 997 def _portmap(self, i):
998 998 offset = b'' if i == 0 else b'%d' % i
999 999 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1000 1000
1001 1001 def _getreplacements(self):
1002 1002 """Obtain a mapping of text replacements to apply to test output.
1003 1003
1004 1004 Test output needs to be normalized so it can be compared to expected
1005 1005 output. This function defines how some of that normalization will
1006 1006 occur.
1007 1007 """
1008 1008 r = [
1009 1009 # This list should be parallel to defineport in _getenv
1010 1010 self._portmap(0),
1011 1011 self._portmap(1),
1012 1012 self._portmap(2),
1013 1013 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1014 1014 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1015 1015 ]
1016 1016 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1017 1017
1018 1018 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1019 1019
1020 1020 if os.path.exists(replacementfile):
1021 1021 data = {}
1022 1022 with open(replacementfile, mode='rb') as source:
1023 1023 # the intermediate 'compile' step help with debugging
1024 1024 code = compile(source.read(), replacementfile, 'exec')
1025 1025 exec(code, data)
1026 1026 for value in data.get('substitutions', ()):
1027 1027 if len(value) != 2:
1028 1028 msg = 'malformatted substitution in %s: %r'
1029 1029 msg %= (replacementfile, value)
1030 1030 raise ValueError(msg)
1031 1031 r.append(value)
1032 1032 return r
1033 1033
1034 1034 def _escapepath(self, p):
1035 1035 if os.name == 'nt':
1036 1036 return (
1037 1037 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1038 1038 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1039 1039 for c in [p[i:i + 1] for i in range(len(p))]))
1040 1040 )
1041 1041 else:
1042 1042 return re.escape(p)
1043 1043
1044 1044 def _localip(self):
1045 1045 if self._useipv6:
1046 1046 return b'::1'
1047 1047 else:
1048 1048 return b'127.0.0.1'
1049 1049
1050 1050 def _genrestoreenv(self, testenv):
1051 1051 """Generate a script that can be used by tests to restore the original
1052 1052 environment."""
1053 1053 # Put the restoreenv script inside self._threadtmp
1054 1054 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1055 1055 testenv['HGTEST_RESTOREENV'] = _strpath(scriptpath)
1056 1056
1057 1057 # Only restore environment variable names that the shell allows
1058 1058 # us to export.
1059 1059 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1060 1060
1061 1061 # Do not restore these variables; otherwise tests would fail.
1062 1062 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1063 1063
1064 1064 with open(scriptpath, 'w') as envf:
1065 1065 for name, value in origenviron.items():
1066 1066 if not name_regex.match(name):
1067 1067 # Skip environment variables with unusual names not
1068 1068 # allowed by most shells.
1069 1069 continue
1070 1070 if name in reqnames:
1071 1071 continue
1072 1072 envf.write('%s=%s\n' % (name, shellquote(value)))
1073 1073
1074 1074 for name in testenv:
1075 1075 if name in origenviron or name in reqnames:
1076 1076 continue
1077 1077 envf.write('unset %s\n' % (name,))
1078 1078
1079 1079 def _getenv(self):
1080 1080 """Obtain environment variables to use during test execution."""
1081 1081 def defineport(i):
1082 1082 offset = '' if i == 0 else '%s' % i
1083 1083 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1084 1084 env = os.environ.copy()
1085 1085 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1086 1086 env['HGEMITWARNINGS'] = '1'
1087 1087 env['TESTTMP'] = _strpath(self._testtmp)
1088 1088 env['TESTNAME'] = self.name
1089 1089 env['HOME'] = _strpath(self._testtmp)
1090 1090 # This number should match portneeded in _getport
1091 1091 for port in xrange(3):
1092 1092 # This list should be parallel to _portmap in _getreplacements
1093 1093 defineport(port)
1094 1094 env["HGRCPATH"] = _strpath(os.path.join(self._threadtmp, b'.hgrc'))
1095 1095 env["DAEMON_PIDS"] = _strpath(os.path.join(self._threadtmp,
1096 1096 b'daemon.pids'))
1097 1097 env["HGEDITOR"] = ('"' + sys.executable + '"'
1098 1098 + ' -c "import sys; sys.exit(0)"')
1099 1099 env["HGUSER"] = "test"
1100 1100 env["HGENCODING"] = "ascii"
1101 1101 env["HGENCODINGMODE"] = "strict"
1102 1102 env["HGHOSTNAME"] = "test-hostname"
1103 1103 env['HGIPV6'] = str(int(self._useipv6))
1104 1104 # See contrib/catapipe.py for how to use this functionality.
1105 1105 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1106 1106 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1107 1107 # non-test one in as a default, otherwise set to devnull
1108 1108 env['HGTESTCATAPULTSERVERPIPE'] = \
1109 1109 env.get('HGCATAPULTSERVERPIPE', os.devnull)
1110 1110
1111 1111 extraextensions = []
1112 1112 for opt in self._extraconfigopts:
1113 1113 section, key = opt.encode('utf-8').split(b'.', 1)
1114 1114 if section != 'extensions':
1115 1115 continue
1116 1116 name = key.split(b'=', 1)[0]
1117 1117 extraextensions.append(name)
1118 1118
1119 1119 if extraextensions:
1120 1120 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1121 1121
1122 1122 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1123 1123 # IP addresses.
1124 1124 env['LOCALIP'] = _strpath(self._localip())
1125 1125
1126 1126 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1127 1127 # but this is needed for testing python instances like dummyssh,
1128 1128 # dummysmtpd.py, and dumbhttp.py.
1129 1129 if PYTHON3 and os.name == 'nt':
1130 1130 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1131 1131
1132 1132 # Reset some environment variables to well-known values so that
1133 1133 # the tests produce repeatable output.
1134 1134 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1135 1135 env['TZ'] = 'GMT'
1136 1136 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1137 1137 env['COLUMNS'] = '80'
1138 1138 env['TERM'] = 'xterm'
1139 1139
1140 1140 dropped = [
1141 1141 'CDPATH',
1142 1142 'CHGDEBUG',
1143 1143 'EDITOR',
1144 1144 'GREP_OPTIONS',
1145 1145 'HG',
1146 1146 'HGMERGE',
1147 1147 'HGPLAIN',
1148 1148 'HGPLAINEXCEPT',
1149 1149 'HGPROF',
1150 1150 'http_proxy',
1151 1151 'no_proxy',
1152 1152 'NO_PROXY',
1153 1153 'PAGER',
1154 1154 'VISUAL',
1155 1155 ]
1156 1156
1157 1157 for k in dropped:
1158 1158 if k in env:
1159 1159 del env[k]
1160 1160
1161 1161 # unset env related to hooks
1162 1162 for k in list(env):
1163 1163 if k.startswith('HG_'):
1164 1164 del env[k]
1165 1165
1166 1166 if self._usechg:
1167 1167 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1168 1168
1169 1169 return env
1170 1170
1171 1171 def _createhgrc(self, path):
1172 1172 """Create an hgrc file for this test."""
1173 1173 with open(path, 'wb') as hgrc:
1174 1174 hgrc.write(b'[ui]\n')
1175 1175 hgrc.write(b'slash = True\n')
1176 1176 hgrc.write(b'interactive = False\n')
1177 1177 hgrc.write(b'merge = internal:merge\n')
1178 1178 hgrc.write(b'mergemarkers = detailed\n')
1179 1179 hgrc.write(b'promptecho = True\n')
1180 1180 hgrc.write(b'[defaults]\n')
1181 1181 hgrc.write(b'[devel]\n')
1182 1182 hgrc.write(b'all-warnings = true\n')
1183 1183 hgrc.write(b'default-date = 0 0\n')
1184 1184 hgrc.write(b'[largefiles]\n')
1185 1185 hgrc.write(b'usercache = %s\n' %
1186 1186 (os.path.join(self._testtmp, b'.cache/largefiles')))
1187 1187 hgrc.write(b'[lfs]\n')
1188 1188 hgrc.write(b'usercache = %s\n' %
1189 1189 (os.path.join(self._testtmp, b'.cache/lfs')))
1190 1190 hgrc.write(b'[web]\n')
1191 1191 hgrc.write(b'address = localhost\n')
1192 1192 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1193 1193 hgrc.write(b'server-header = testing stub value\n')
1194 1194
1195 1195 for opt in self._extraconfigopts:
1196 1196 section, key = opt.encode('utf-8').split(b'.', 1)
1197 1197 assert b'=' in key, ('extra config opt %s must '
1198 1198 'have an = for assignment' % opt)
1199 1199 hgrc.write(b'[%s]\n%s\n' % (section, key))
1200 1200
1201 1201 def fail(self, msg):
1202 1202 # unittest differentiates between errored and failed.
1203 1203 # Failed is denoted by AssertionError (by default at least).
1204 1204 raise AssertionError(msg)
1205 1205
1206 1206 def _runcommand(self, cmd, env, normalizenewlines=False):
1207 1207 """Run command in a sub-process, capturing the output (stdout and
1208 1208 stderr).
1209 1209
1210 1210 Return a tuple (exitcode, output). output is None in debug mode.
1211 1211 """
1212 1212 if self._debug:
1213 1213 proc = subprocess.Popen(_strpath(cmd), shell=True,
1214 1214 cwd=_strpath(self._testtmp),
1215 1215 env=env)
1216 1216 ret = proc.wait()
1217 1217 return (ret, None)
1218 1218
1219 1219 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1220 1220 def cleanup():
1221 1221 terminate(proc)
1222 1222 ret = proc.wait()
1223 1223 if ret == 0:
1224 1224 ret = signal.SIGTERM << 8
1225 1225 killdaemons(env['DAEMON_PIDS'])
1226 1226 return ret
1227 1227
1228 1228 output = b''
1229 1229 proc.tochild.close()
1230 1230
1231 1231 try:
1232 1232 output = proc.fromchild.read()
1233 1233 except KeyboardInterrupt:
1234 1234 vlog('# Handling keyboard interrupt')
1235 1235 cleanup()
1236 1236 raise
1237 1237
1238 1238 ret = proc.wait()
1239 1239 if wifexited(ret):
1240 1240 ret = os.WEXITSTATUS(ret)
1241 1241
1242 1242 if proc.timeout:
1243 1243 ret = 'timeout'
1244 1244
1245 1245 if ret:
1246 1246 killdaemons(env['DAEMON_PIDS'])
1247 1247
1248 1248 for s, r in self._getreplacements():
1249 1249 output = re.sub(s, r, output)
1250 1250
1251 1251 if normalizenewlines:
1252 1252 output = output.replace(b'\r\n', b'\n')
1253 1253
1254 1254 return ret, output.splitlines(True)
1255 1255
1256 1256 class PythonTest(Test):
1257 1257 """A Python-based test."""
1258 1258
1259 1259 @property
1260 1260 def refpath(self):
1261 1261 return os.path.join(self._testdir, b'%s.out' % self.bname)
1262 1262
1263 1263 def _run(self, env):
1264 1264 py3switch = self._py3warnings and b' -3' or b''
1265 1265 # Quote the python(3) executable for Windows
1266 1266 cmd = b'"%s"%s "%s"' % (PYTHON, py3switch, self.path)
1267 1267 vlog("# Running", cmd)
1268 1268 normalizenewlines = os.name == 'nt'
1269 1269 result = self._runcommand(cmd, env,
1270 1270 normalizenewlines=normalizenewlines)
1271 1271 if self._aborted:
1272 1272 raise KeyboardInterrupt()
1273 1273
1274 1274 return result
1275 1275
1276 1276 # Some glob patterns apply only in some circumstances, so the script
1277 1277 # might want to remove (glob) annotations that otherwise should be
1278 1278 # retained.
1279 1279 checkcodeglobpats = [
1280 1280 # On Windows it looks like \ doesn't require a (glob), but we know
1281 1281 # better.
1282 1282 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1283 1283 re.compile(br'^moving \S+/.*[^)]$'),
1284 1284 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1285 1285 # Not all platforms have 127.0.0.1 as loopback (though most do),
1286 1286 # so we always glob that too.
1287 1287 re.compile(br'.*\$LOCALIP.*$'),
1288 1288 ]
1289 1289
1290 1290 bchr = chr
1291 1291 if PYTHON3:
1292 1292 bchr = lambda x: bytes([x])
1293 1293
1294 1294 class TTest(Test):
1295 1295 """A "t test" is a test backed by a .t file."""
1296 1296
1297 1297 SKIPPED_PREFIX = b'skipped: '
1298 1298 FAILED_PREFIX = b'hghave check failed: '
1299 1299 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1300 1300
1301 1301 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1302 1302 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1303 1303 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1304 1304
1305 1305 def __init__(self, path, *args, **kwds):
1306 1306 # accept an extra "case" parameter
1307 1307 case = kwds.pop('case', [])
1308 1308 self._case = case
1309 1309 self._allcases = {x for y in parsettestcases(path) for x in y}
1310 1310 super(TTest, self).__init__(path, *args, **kwds)
1311 1311 if case:
1312 1312 casepath = b'#'.join(case)
1313 1313 self.name = '%s#%s' % (self.name, _strpath(casepath))
1314 1314 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1315 1315 self._tmpname += b'-%s' % casepath
1316 1316 self._have = {}
1317 1317
1318 1318 @property
1319 1319 def refpath(self):
1320 1320 return os.path.join(self._testdir, self.bname)
1321 1321
1322 1322 def _run(self, env):
1323 1323 with open(self.path, 'rb') as f:
1324 1324 lines = f.readlines()
1325 1325
1326 1326 # .t file is both reference output and the test input, keep reference
1327 1327 # output updated with the the test input. This avoids some race
1328 1328 # conditions where the reference output does not match the actual test.
1329 1329 if self._refout is not None:
1330 1330 self._refout = lines
1331 1331
1332 1332 salt, script, after, expected = self._parsetest(lines)
1333 1333
1334 1334 # Write out the generated script.
1335 1335 fname = b'%s.sh' % self._testtmp
1336 1336 with open(fname, 'wb') as f:
1337 1337 for l in script:
1338 1338 f.write(l)
1339 1339
1340 1340 cmd = b'%s "%s"' % (self._shell, fname)
1341 1341 vlog("# Running", cmd)
1342 1342
1343 1343 exitcode, output = self._runcommand(cmd, env)
1344 1344
1345 1345 if self._aborted:
1346 1346 raise KeyboardInterrupt()
1347 1347
1348 1348 # Do not merge output if skipped. Return hghave message instead.
1349 1349 # Similarly, with --debug, output is None.
1350 1350 if exitcode == self.SKIPPED_STATUS or output is None:
1351 1351 return exitcode, output
1352 1352
1353 1353 return self._processoutput(exitcode, output, salt, after, expected)
1354 1354
1355 1355 def _hghave(self, reqs):
1356 1356 allreqs = b' '.join(reqs)
1357 1357 if allreqs in self._have:
1358 1358 return self._have.get(allreqs)
1359 1359
1360 1360 # TODO do something smarter when all other uses of hghave are gone.
1361 1361 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1362 1362 tdir = runtestdir.replace(b'\\', b'/')
1363 1363 proc = Popen4(b'%s -c "%s/hghave %s"' %
1364 1364 (self._shell, tdir, allreqs),
1365 1365 self._testtmp, 0, self._getenv())
1366 1366 stdout, stderr = proc.communicate()
1367 1367 ret = proc.wait()
1368 1368 if wifexited(ret):
1369 1369 ret = os.WEXITSTATUS(ret)
1370 1370 if ret == 2:
1371 1371 print(stdout.decode('utf-8'))
1372 1372 sys.exit(1)
1373 1373
1374 1374 if ret != 0:
1375 1375 self._have[allreqs] = (False, stdout)
1376 1376 return False, stdout
1377 1377
1378 1378 if b'slow' in reqs:
1379 1379 self._timeout = self._slowtimeout
1380 1380
1381 1381 self._have[allreqs] = (True, None)
1382 1382 return True, None
1383 1383
1384 1384 def _iftest(self, args):
1385 1385 # implements "#if"
1386 1386 reqs = []
1387 1387 for arg in args:
1388 1388 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1389 1389 if arg[3:] in self._case:
1390 1390 return False
1391 1391 elif arg in self._allcases:
1392 1392 if arg not in self._case:
1393 1393 return False
1394 1394 else:
1395 1395 reqs.append(arg)
1396 1396 return self._hghave(reqs)[0]
1397 1397
1398 1398 def _parsetest(self, lines):
1399 1399 # We generate a shell script which outputs unique markers to line
1400 1400 # up script results with our source. These markers include input
1401 1401 # line number and the last return code.
1402 1402 salt = b"SALT%d" % time.time()
1403 1403 def addsalt(line, inpython):
1404 1404 if inpython:
1405 1405 script.append(b'%s %d 0\n' % (salt, line))
1406 1406 else:
1407 1407 script.append(b'echo %s %d $?\n' % (salt, line))
1408 1408 activetrace = []
1409 1409 session = str(uuid.uuid4())
1410 1410 if PYTHON3:
1411 1411 session = session.encode('ascii')
1412 1412 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE')
1413 1413 def toggletrace(cmd=None):
1414 1414 if not hgcatapult or hgcatapult == os.devnull:
1415 1415 return
1416 1416
1417 1417 if activetrace:
1418 1418 script.append(
1419 1419 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (
1420 1420 session, activetrace[0]))
1421 1421 if cmd is None:
1422 1422 return
1423 1423
1424 1424 if isinstance(cmd, str):
1425 1425 quoted = shellquote(cmd.strip())
1426 1426 else:
1427 1427 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1428 1428 quoted = quoted.replace(b'\\', b'\\\\')
1429 1429 script.append(
1430 1430 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (
1431 1431 session, quoted))
1432 1432 activetrace[0:] = [quoted]
1433 1433
1434 1434 script = []
1435 1435
1436 1436 # After we run the shell script, we re-unify the script output
1437 1437 # with non-active parts of the source, with synchronization by our
1438 1438 # SALT line number markers. The after table contains the non-active
1439 1439 # components, ordered by line number.
1440 1440 after = {}
1441 1441
1442 1442 # Expected shell script output.
1443 1443 expected = {}
1444 1444
1445 1445 pos = prepos = -1
1446 1446
1447 1447 # True or False when in a true or false conditional section
1448 1448 skipping = None
1449 1449
1450 1450 # We keep track of whether or not we're in a Python block so we
1451 1451 # can generate the surrounding doctest magic.
1452 1452 inpython = False
1453 1453
1454 1454 if self._debug:
1455 1455 script.append(b'set -x\n')
1456 1456 if self._hgcommand != b'hg':
1457 1457 script.append(b'alias hg="%s"\n' % self._hgcommand)
1458 1458 if os.getenv('MSYSTEM'):
1459 1459 script.append(b'alias pwd="pwd -W"\n')
1460 1460
1461 1461 if hgcatapult and hgcatapult != os.devnull:
1462 1462 # Kludge: use a while loop to keep the pipe from getting
1463 1463 # closed by our echo commands. The still-running file gets
1464 1464 # reaped at the end of the script, which causes the while
1465 1465 # loop to exit and closes the pipe. Sigh.
1466 1466 script.append(
1467 1467 b'rtendtracing() {\n'
1468 1468 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1469 1469 b' rm -f "$TESTTMP/.still-running"\n'
1470 1470 b'}\n'
1471 1471 b'trap "rtendtracing" 0\n'
1472 1472 b'touch "$TESTTMP/.still-running"\n'
1473 1473 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1474 1474 b'> %(catapult)s &\n'
1475 1475 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1476 1476 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1477 1477 % {
1478 1478 'name': self.name,
1479 1479 'session': session,
1480 1480 'catapult': hgcatapult,
1481 1481 }
1482 1482 )
1483 1483
1484 1484 if self._case:
1485 1485 casestr = b'#'.join(self._case)
1486 1486 if isinstance(self._case, str):
1487 1487 quoted = shellquote(casestr)
1488 1488 else:
1489 1489 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1490 1490 script.append(b'TESTCASE=%s\n' % quoted)
1491 1491 script.append(b'export TESTCASE\n')
1492 1492
1493 1493 n = 0
1494 1494 for n, l in enumerate(lines):
1495 1495 if not l.endswith(b'\n'):
1496 1496 l += b'\n'
1497 1497 if l.startswith(b'#require'):
1498 1498 lsplit = l.split()
1499 1499 if len(lsplit) < 2 or lsplit[0] != b'#require':
1500 1500 after.setdefault(pos, []).append(' !!! invalid #require\n')
1501 1501 if not skipping:
1502 1502 haveresult, message = self._hghave(lsplit[1:])
1503 1503 if not haveresult:
1504 1504 script = [b'echo "%s"\nexit 80\n' % message]
1505 1505 break
1506 1506 after.setdefault(pos, []).append(l)
1507 1507 elif l.startswith(b'#if'):
1508 1508 lsplit = l.split()
1509 1509 if len(lsplit) < 2 or lsplit[0] != b'#if':
1510 1510 after.setdefault(pos, []).append(' !!! invalid #if\n')
1511 1511 if skipping is not None:
1512 1512 after.setdefault(pos, []).append(' !!! nested #if\n')
1513 1513 skipping = not self._iftest(lsplit[1:])
1514 1514 after.setdefault(pos, []).append(l)
1515 1515 elif l.startswith(b'#else'):
1516 1516 if skipping is None:
1517 1517 after.setdefault(pos, []).append(' !!! missing #if\n')
1518 1518 skipping = not skipping
1519 1519 after.setdefault(pos, []).append(l)
1520 1520 elif l.startswith(b'#endif'):
1521 1521 if skipping is None:
1522 1522 after.setdefault(pos, []).append(' !!! missing #if\n')
1523 1523 skipping = None
1524 1524 after.setdefault(pos, []).append(l)
1525 1525 elif skipping:
1526 1526 after.setdefault(pos, []).append(l)
1527 1527 elif l.startswith(b' >>> '): # python inlines
1528 1528 after.setdefault(pos, []).append(l)
1529 1529 prepos = pos
1530 1530 pos = n
1531 1531 if not inpython:
1532 1532 # We've just entered a Python block. Add the header.
1533 1533 inpython = True
1534 1534 addsalt(prepos, False) # Make sure we report the exit code.
1535 1535 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1536 1536 addsalt(n, True)
1537 1537 script.append(l[2:])
1538 1538 elif l.startswith(b' ... '): # python inlines
1539 1539 after.setdefault(prepos, []).append(l)
1540 1540 script.append(l[2:])
1541 1541 elif l.startswith(b' $ '): # commands
1542 1542 if inpython:
1543 1543 script.append(b'EOF\n')
1544 1544 inpython = False
1545 1545 after.setdefault(pos, []).append(l)
1546 1546 prepos = pos
1547 1547 pos = n
1548 1548 addsalt(n, False)
1549 1549 rawcmd = l[4:]
1550 1550 cmd = rawcmd.split()
1551 1551 toggletrace(rawcmd)
1552 1552 if len(cmd) == 2 and cmd[0] == b'cd':
1553 1553 l = b' $ cd %s || exit 1\n' % cmd[1]
1554 1554 script.append(rawcmd)
1555 1555 elif l.startswith(b' > '): # continuations
1556 1556 after.setdefault(prepos, []).append(l)
1557 1557 script.append(l[4:])
1558 1558 elif l.startswith(b' '): # results
1559 1559 # Queue up a list of expected results.
1560 1560 expected.setdefault(pos, []).append(l[2:])
1561 1561 else:
1562 1562 if inpython:
1563 1563 script.append(b'EOF\n')
1564 1564 inpython = False
1565 1565 # Non-command/result. Queue up for merged output.
1566 1566 after.setdefault(pos, []).append(l)
1567 1567
1568 1568 if inpython:
1569 1569 script.append(b'EOF\n')
1570 1570 if skipping is not None:
1571 1571 after.setdefault(pos, []).append(' !!! missing #endif\n')
1572 1572 addsalt(n + 1, False)
1573 1573 # Need to end any current per-command trace
1574 1574 if activetrace:
1575 1575 toggletrace()
1576 1576 return salt, script, after, expected
1577 1577
1578 1578 def _processoutput(self, exitcode, output, salt, after, expected):
1579 1579 # Merge the script output back into a unified test.
1580 1580 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1581 1581 if exitcode != 0:
1582 1582 warnonly = 3
1583 1583
1584 1584 pos = -1
1585 1585 postout = []
1586 1586 for l in output:
1587 1587 lout, lcmd = l, None
1588 1588 if salt in l:
1589 1589 lout, lcmd = l.split(salt, 1)
1590 1590
1591 1591 while lout:
1592 1592 if not lout.endswith(b'\n'):
1593 1593 lout += b' (no-eol)\n'
1594 1594
1595 1595 # Find the expected output at the current position.
1596 1596 els = [None]
1597 1597 if expected.get(pos, None):
1598 1598 els = expected[pos]
1599 1599
1600 1600 optional = []
1601 1601 for i, el in enumerate(els):
1602 1602 r = False
1603 1603 if el:
1604 1604 r, exact = self.linematch(el, lout)
1605 1605 if isinstance(r, str):
1606 1606 if r == '-glob':
1607 1607 lout = ''.join(el.rsplit(' (glob)', 1))
1608 1608 r = '' # Warn only this line.
1609 1609 elif r == "retry":
1610 1610 postout.append(b' ' + el)
1611 1611 else:
1612 1612 log('\ninfo, unknown linematch result: %r\n' % r)
1613 1613 r = False
1614 1614 if r:
1615 1615 els.pop(i)
1616 1616 break
1617 1617 if el:
1618 1618 if el.endswith(b" (?)\n"):
1619 1619 optional.append(i)
1620 1620 else:
1621 1621 m = optline.match(el)
1622 1622 if m:
1623 1623 conditions = [
1624 1624 c for c in m.group(2).split(b' ')]
1625 1625
1626 1626 if not self._iftest(conditions):
1627 1627 optional.append(i)
1628 1628 if exact:
1629 1629 # Don't allow line to be matches against a later
1630 1630 # line in the output
1631 1631 els.pop(i)
1632 1632 break
1633 1633
1634 1634 if r:
1635 1635 if r == "retry":
1636 1636 continue
1637 1637 # clean up any optional leftovers
1638 1638 for i in optional:
1639 1639 postout.append(b' ' + els[i])
1640 1640 for i in reversed(optional):
1641 1641 del els[i]
1642 1642 postout.append(b' ' + el)
1643 1643 else:
1644 1644 if self.NEEDESCAPE(lout):
1645 1645 lout = TTest._stringescape(b'%s (esc)\n' %
1646 1646 lout.rstrip(b'\n'))
1647 1647 postout.append(b' ' + lout) # Let diff deal with it.
1648 1648 if r != '': # If line failed.
1649 1649 warnonly = 3 # for sure not
1650 1650 elif warnonly == 1: # Is "not yet" and line is warn only.
1651 1651 warnonly = 2 # Yes do warn.
1652 1652 break
1653 1653 else:
1654 1654 # clean up any optional leftovers
1655 1655 while expected.get(pos, None):
1656 1656 el = expected[pos].pop(0)
1657 1657 if el:
1658 1658 if not el.endswith(b" (?)\n"):
1659 1659 m = optline.match(el)
1660 1660 if m:
1661 1661 conditions = [c for c in m.group(2).split(b' ')]
1662 1662
1663 1663 if self._iftest(conditions):
1664 1664 # Don't append as optional line
1665 1665 continue
1666 1666 else:
1667 1667 continue
1668 1668 postout.append(b' ' + el)
1669 1669
1670 1670 if lcmd:
1671 1671 # Add on last return code.
1672 1672 ret = int(lcmd.split()[1])
1673 1673 if ret != 0:
1674 1674 postout.append(b' [%d]\n' % ret)
1675 1675 if pos in after:
1676 1676 # Merge in non-active test bits.
1677 1677 postout += after.pop(pos)
1678 1678 pos = int(lcmd.split()[0])
1679 1679
1680 1680 if pos in after:
1681 1681 postout += after.pop(pos)
1682 1682
1683 1683 if warnonly == 2:
1684 1684 exitcode = False # Set exitcode to warned.
1685 1685
1686 1686 return exitcode, postout
1687 1687
1688 1688 @staticmethod
1689 1689 def rematch(el, l):
1690 1690 try:
1691 1691 el = b'(?:' + el + b')'
1692 1692 # use \Z to ensure that the regex matches to the end of the string
1693 1693 if os.name == 'nt':
1694 1694 return re.match(el + br'\r?\n\Z', l)
1695 1695 return re.match(el + br'\n\Z', l)
1696 1696 except re.error:
1697 1697 # el is an invalid regex
1698 1698 return False
1699 1699
1700 1700 @staticmethod
1701 1701 def globmatch(el, l):
1702 1702 # The only supported special characters are * and ? plus / which also
1703 1703 # matches \ on windows. Escaping of these characters is supported.
1704 1704 if el + b'\n' == l:
1705 1705 if os.altsep:
1706 1706 # matching on "/" is not needed for this line
1707 1707 for pat in checkcodeglobpats:
1708 1708 if pat.match(el):
1709 1709 return True
1710 1710 return b'-glob'
1711 1711 return True
1712 1712 el = el.replace(b'$LOCALIP', b'*')
1713 1713 i, n = 0, len(el)
1714 1714 res = b''
1715 1715 while i < n:
1716 1716 c = el[i:i + 1]
1717 1717 i += 1
1718 1718 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1719 1719 res += el[i - 1:i + 1]
1720 1720 i += 1
1721 1721 elif c == b'*':
1722 1722 res += b'.*'
1723 1723 elif c == b'?':
1724 1724 res += b'.'
1725 1725 elif c == b'/' and os.altsep:
1726 1726 res += b'[/\\\\]'
1727 1727 else:
1728 1728 res += re.escape(c)
1729 1729 return TTest.rematch(res, l)
1730 1730
1731 1731 def linematch(self, el, l):
1732 1732 if el == l: # perfect match (fast)
1733 1733 return True, True
1734 1734 retry = False
1735 1735 if el.endswith(b" (?)\n"):
1736 1736 retry = "retry"
1737 1737 el = el[:-5] + b"\n"
1738 1738 else:
1739 1739 m = optline.match(el)
1740 1740 if m:
1741 1741 conditions = [c for c in m.group(2).split(b' ')]
1742 1742
1743 1743 el = m.group(1) + b"\n"
1744 1744 if not self._iftest(conditions):
1745 1745 retry = "retry" # Not required by listed features
1746 1746
1747 1747 if el.endswith(b" (esc)\n"):
1748 1748 if PYTHON3:
1749 1749 el = el[:-7].decode('unicode_escape') + '\n'
1750 1750 el = el.encode('utf-8')
1751 1751 else:
1752 1752 el = el[:-7].decode('string-escape') + '\n'
1753 1753 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1754 1754 return True, True
1755 1755 if el.endswith(b" (re)\n"):
1756 1756 return (TTest.rematch(el[:-6], l) or retry), False
1757 1757 if el.endswith(b" (glob)\n"):
1758 1758 # ignore '(glob)' added to l by 'replacements'
1759 1759 if l.endswith(b" (glob)\n"):
1760 1760 l = l[:-8] + b"\n"
1761 1761 return (TTest.globmatch(el[:-8], l) or retry), False
1762 1762 if os.altsep:
1763 1763 _l = l.replace(b'\\', b'/')
1764 1764 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1765 1765 return True, True
1766 1766 return retry, True
1767 1767
1768 1768 @staticmethod
1769 1769 def parsehghaveoutput(lines):
1770 1770 '''Parse hghave log lines.
1771 1771
1772 1772 Return tuple of lists (missing, failed):
1773 1773 * the missing/unknown features
1774 1774 * the features for which existence check failed'''
1775 1775 missing = []
1776 1776 failed = []
1777 1777 for line in lines:
1778 1778 if line.startswith(TTest.SKIPPED_PREFIX):
1779 1779 line = line.splitlines()[0]
1780 1780 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1781 1781 elif line.startswith(TTest.FAILED_PREFIX):
1782 1782 line = line.splitlines()[0]
1783 1783 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1784 1784
1785 1785 return missing, failed
1786 1786
1787 1787 @staticmethod
1788 1788 def _escapef(m):
1789 1789 return TTest.ESCAPEMAP[m.group(0)]
1790 1790
1791 1791 @staticmethod
1792 1792 def _stringescape(s):
1793 1793 return TTest.ESCAPESUB(TTest._escapef, s)
1794 1794
1795 1795 iolock = threading.RLock()
1796 1796 firstlock = threading.RLock()
1797 1797 firsterror = False
1798 1798
1799 1799 class TestResult(unittest._TextTestResult):
1800 1800 """Holds results when executing via unittest."""
1801 1801 # Don't worry too much about accessing the non-public _TextTestResult.
1802 1802 # It is relatively common in Python testing tools.
1803 1803 def __init__(self, options, *args, **kwargs):
1804 1804 super(TestResult, self).__init__(*args, **kwargs)
1805 1805
1806 1806 self._options = options
1807 1807
1808 1808 # unittest.TestResult didn't have skipped until 2.7. We need to
1809 1809 # polyfill it.
1810 1810 self.skipped = []
1811 1811
1812 1812 # We have a custom "ignored" result that isn't present in any Python
1813 1813 # unittest implementation. It is very similar to skipped. It may make
1814 1814 # sense to map it into skip some day.
1815 1815 self.ignored = []
1816 1816
1817 1817 self.times = []
1818 1818 self._firststarttime = None
1819 1819 # Data stored for the benefit of generating xunit reports.
1820 1820 self.successes = []
1821 1821 self.faildata = {}
1822 1822
1823 1823 if options.color == 'auto':
1824 1824 self.color = pygmentspresent and self.stream.isatty()
1825 1825 elif options.color == 'never':
1826 1826 self.color = False
1827 1827 else: # 'always', for testing purposes
1828 1828 self.color = pygmentspresent
1829 1829
1830 1830 def onStart(self, test):
1831 1831 """ Can be overriden by custom TestResult
1832 1832 """
1833 1833
1834 1834 def onEnd(self):
1835 1835 """ Can be overriden by custom TestResult
1836 1836 """
1837 1837
1838 1838 def addFailure(self, test, reason):
1839 1839 self.failures.append((test, reason))
1840 1840
1841 1841 if self._options.first:
1842 1842 self.stop()
1843 1843 else:
1844 1844 with iolock:
1845 1845 if reason == "timed out":
1846 1846 self.stream.write('t')
1847 1847 else:
1848 1848 if not self._options.nodiff:
1849 1849 self.stream.write('\n')
1850 1850 # Exclude the '\n' from highlighting to lex correctly
1851 1851 formatted = 'ERROR: %s output changed\n' % test
1852 1852 self.stream.write(highlightmsg(formatted, self.color))
1853 1853 self.stream.write('!')
1854 1854
1855 1855 self.stream.flush()
1856 1856
1857 1857 def addSuccess(self, test):
1858 1858 with iolock:
1859 1859 super(TestResult, self).addSuccess(test)
1860 1860 self.successes.append(test)
1861 1861
1862 1862 def addError(self, test, err):
1863 1863 super(TestResult, self).addError(test, err)
1864 1864 if self._options.first:
1865 1865 self.stop()
1866 1866
1867 1867 # Polyfill.
1868 1868 def addSkip(self, test, reason):
1869 1869 self.skipped.append((test, reason))
1870 1870 with iolock:
1871 1871 if self.showAll:
1872 1872 self.stream.writeln('skipped %s' % reason)
1873 1873 else:
1874 1874 self.stream.write('s')
1875 1875 self.stream.flush()
1876 1876
1877 1877 def addIgnore(self, test, reason):
1878 1878 self.ignored.append((test, reason))
1879 1879 with iolock:
1880 1880 if self.showAll:
1881 1881 self.stream.writeln('ignored %s' % reason)
1882 1882 else:
1883 1883 if reason not in ('not retesting', "doesn't match keyword"):
1884 1884 self.stream.write('i')
1885 1885 else:
1886 1886 self.testsRun += 1
1887 1887 self.stream.flush()
1888 1888
1889 1889 def addOutputMismatch(self, test, ret, got, expected):
1890 1890 """Record a mismatch in test output for a particular test."""
1891 1891 if self.shouldStop or firsterror:
1892 1892 # don't print, some other test case already failed and
1893 1893 # printed, we're just stale and probably failed due to our
1894 1894 # temp dir getting cleaned up.
1895 1895 return
1896 1896
1897 1897 accepted = False
1898 1898 lines = []
1899 1899
1900 1900 with iolock:
1901 1901 if self._options.nodiff:
1902 1902 pass
1903 1903 elif self._options.view:
1904 1904 v = self._options.view
1905 1905 os.system(r"%s %s %s" %
1906 1906 (v, _strpath(test.refpath), _strpath(test.errpath)))
1907 1907 else:
1908 1908 servefail, lines = getdiff(expected, got,
1909 1909 test.refpath, test.errpath)
1910 1910 self.stream.write('\n')
1911 1911 for line in lines:
1912 1912 line = highlightdiff(line, self.color)
1913 1913 if PYTHON3:
1914 1914 self.stream.flush()
1915 1915 self.stream.buffer.write(line)
1916 1916 self.stream.buffer.flush()
1917 1917 else:
1918 1918 self.stream.write(line)
1919 1919 self.stream.flush()
1920 1920
1921 1921 if servefail:
1922 1922 raise test.failureException(
1923 1923 'server failed to start (HGPORT=%s)' % test._startport)
1924 1924
1925 1925 # handle interactive prompt without releasing iolock
1926 1926 if self._options.interactive:
1927 1927 if test.readrefout() != expected:
1928 1928 self.stream.write(
1929 1929 'Reference output has changed (run again to prompt '
1930 1930 'changes)')
1931 1931 else:
1932 1932 self.stream.write('Accept this change? [n] ')
1933 1933 self.stream.flush()
1934 1934 answer = sys.stdin.readline().strip()
1935 1935 if answer.lower() in ('y', 'yes'):
1936 1936 if test.path.endswith(b'.t'):
1937 1937 rename(test.errpath, test.path)
1938 1938 else:
1939 1939 rename(test.errpath, '%s.out' % test.path)
1940 1940 accepted = True
1941 1941 if not accepted:
1942 1942 self.faildata[test.name] = b''.join(lines)
1943 1943
1944 1944 return accepted
1945 1945
1946 1946 def startTest(self, test):
1947 1947 super(TestResult, self).startTest(test)
1948 1948
1949 1949 # os.times module computes the user time and system time spent by
1950 1950 # child's processes along with real elapsed time taken by a process.
1951 1951 # This module has one limitation. It can only work for Linux user
1952 1952 # and not for Windows.
1953 1953 test.started = os.times()
1954 1954 if self._firststarttime is None: # thread racy but irrelevant
1955 1955 self._firststarttime = test.started[4]
1956 1956
1957 1957 def stopTest(self, test, interrupted=False):
1958 1958 super(TestResult, self).stopTest(test)
1959 1959
1960 1960 test.stopped = os.times()
1961 1961
1962 1962 starttime = test.started
1963 1963 endtime = test.stopped
1964 1964 origin = self._firststarttime
1965 1965 self.times.append((test.name,
1966 1966 endtime[2] - starttime[2], # user space CPU time
1967 1967 endtime[3] - starttime[3], # sys space CPU time
1968 1968 endtime[4] - starttime[4], # real time
1969 1969 starttime[4] - origin, # start date in run context
1970 1970 endtime[4] - origin, # end date in run context
1971 1971 ))
1972 1972
1973 1973 if interrupted:
1974 1974 with iolock:
1975 1975 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1976 1976 test.name, self.times[-1][3]))
1977 1977
1978 1978 def getTestResult():
1979 1979 """
1980 1980 Returns the relevant test result
1981 1981 """
1982 1982 if "CUSTOM_TEST_RESULT" in os.environ:
1983 1983 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
1984 1984 return testresultmodule.TestResult
1985 1985 else:
1986 1986 return TestResult
1987 1987
1988 1988 class TestSuite(unittest.TestSuite):
1989 1989 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1990 1990
1991 1991 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1992 1992 retest=False, keywords=None, loop=False, runs_per_test=1,
1993 1993 loadtest=None, showchannels=False,
1994 1994 *args, **kwargs):
1995 1995 """Create a new instance that can run tests with a configuration.
1996 1996
1997 1997 testdir specifies the directory where tests are executed from. This
1998 1998 is typically the ``tests`` directory from Mercurial's source
1999 1999 repository.
2000 2000
2001 2001 jobs specifies the number of jobs to run concurrently. Each test
2002 2002 executes on its own thread. Tests actually spawn new processes, so
2003 2003 state mutation should not be an issue.
2004 2004
2005 2005 If there is only one job, it will use the main thread.
2006 2006
2007 2007 whitelist and blacklist denote tests that have been whitelisted and
2008 2008 blacklisted, respectively. These arguments don't belong in TestSuite.
2009 2009 Instead, whitelist and blacklist should be handled by the thing that
2010 2010 populates the TestSuite with tests. They are present to preserve
2011 2011 backwards compatible behavior which reports skipped tests as part
2012 2012 of the results.
2013 2013
2014 2014 retest denotes whether to retest failed tests. This arguably belongs
2015 2015 outside of TestSuite.
2016 2016
2017 2017 keywords denotes key words that will be used to filter which tests
2018 2018 to execute. This arguably belongs outside of TestSuite.
2019 2019
2020 2020 loop denotes whether to loop over tests forever.
2021 2021 """
2022 2022 super(TestSuite, self).__init__(*args, **kwargs)
2023 2023
2024 2024 self._jobs = jobs
2025 2025 self._whitelist = whitelist
2026 2026 self._blacklist = blacklist
2027 2027 self._retest = retest
2028 2028 self._keywords = keywords
2029 2029 self._loop = loop
2030 2030 self._runs_per_test = runs_per_test
2031 2031 self._loadtest = loadtest
2032 2032 self._showchannels = showchannels
2033 2033
2034 2034 def run(self, result):
2035 2035 # We have a number of filters that need to be applied. We do this
2036 2036 # here instead of inside Test because it makes the running logic for
2037 2037 # Test simpler.
2038 2038 tests = []
2039 2039 num_tests = [0]
2040 2040 for test in self._tests:
2041 2041 def get():
2042 2042 num_tests[0] += 1
2043 2043 if getattr(test, 'should_reload', False):
2044 2044 return self._loadtest(test, num_tests[0])
2045 2045 return test
2046 2046 if not os.path.exists(test.path):
2047 2047 result.addSkip(test, "Doesn't exist")
2048 2048 continue
2049 2049
2050 2050 if not (self._whitelist and test.bname in self._whitelist):
2051 2051 if self._blacklist and test.bname in self._blacklist:
2052 2052 result.addSkip(test, 'blacklisted')
2053 2053 continue
2054 2054
2055 2055 if self._retest and not os.path.exists(test.errpath):
2056 2056 result.addIgnore(test, 'not retesting')
2057 2057 continue
2058 2058
2059 2059 if self._keywords:
2060 2060 with open(test.path, 'rb') as f:
2061 2061 t = f.read().lower() + test.bname.lower()
2062 2062 ignored = False
2063 2063 for k in self._keywords.lower().split():
2064 2064 if k not in t:
2065 2065 result.addIgnore(test, "doesn't match keyword")
2066 2066 ignored = True
2067 2067 break
2068 2068
2069 2069 if ignored:
2070 2070 continue
2071 2071 for _ in xrange(self._runs_per_test):
2072 2072 tests.append(get())
2073 2073
2074 2074 runtests = list(tests)
2075 2075 done = queue.Queue()
2076 2076 running = 0
2077 2077
2078 2078 channels = [""] * self._jobs
2079 2079
2080 2080 def job(test, result):
2081 2081 for n, v in enumerate(channels):
2082 2082 if not v:
2083 2083 channel = n
2084 2084 break
2085 2085 else:
2086 2086 raise ValueError('Could not find output channel')
2087 2087 channels[channel] = "=" + test.name[5:].split(".")[0]
2088 2088 try:
2089 2089 test(result)
2090 2090 done.put(None)
2091 2091 except KeyboardInterrupt:
2092 2092 pass
2093 2093 except: # re-raises
2094 2094 done.put(('!', test, 'run-test raised an error, see traceback'))
2095 2095 raise
2096 2096 finally:
2097 2097 try:
2098 2098 channels[channel] = ''
2099 2099 except IndexError:
2100 2100 pass
2101 2101
2102 2102 def stat():
2103 2103 count = 0
2104 2104 while channels:
2105 2105 d = '\n%03s ' % count
2106 2106 for n, v in enumerate(channels):
2107 2107 if v:
2108 2108 d += v[0]
2109 2109 channels[n] = v[1:] or '.'
2110 2110 else:
2111 2111 d += ' '
2112 2112 d += ' '
2113 2113 with iolock:
2114 2114 sys.stdout.write(d + ' ')
2115 2115 sys.stdout.flush()
2116 2116 for x in xrange(10):
2117 2117 if channels:
2118 2118 time.sleep(.1)
2119 2119 count += 1
2120 2120
2121 2121 stoppedearly = False
2122 2122
2123 2123 if self._showchannels:
2124 2124 statthread = threading.Thread(target=stat, name="stat")
2125 2125 statthread.start()
2126 2126
2127 2127 try:
2128 2128 while tests or running:
2129 2129 if not done.empty() or running == self._jobs or not tests:
2130 2130 try:
2131 2131 done.get(True, 1)
2132 2132 running -= 1
2133 2133 if result and result.shouldStop:
2134 2134 stoppedearly = True
2135 2135 break
2136 2136 except queue.Empty:
2137 2137 continue
2138 2138 if tests and not running == self._jobs:
2139 2139 test = tests.pop(0)
2140 2140 if self._loop:
2141 2141 if getattr(test, 'should_reload', False):
2142 2142 num_tests[0] += 1
2143 2143 tests.append(
2144 2144 self._loadtest(test, num_tests[0]))
2145 2145 else:
2146 2146 tests.append(test)
2147 2147 if self._jobs == 1:
2148 2148 job(test, result)
2149 2149 else:
2150 2150 t = threading.Thread(target=job, name=test.name,
2151 2151 args=(test, result))
2152 2152 t.start()
2153 2153 running += 1
2154 2154
2155 2155 # If we stop early we still need to wait on started tests to
2156 2156 # finish. Otherwise, there is a race between the test completing
2157 2157 # and the test's cleanup code running. This could result in the
2158 2158 # test reporting incorrect.
2159 2159 if stoppedearly:
2160 2160 while running:
2161 2161 try:
2162 2162 done.get(True, 1)
2163 2163 running -= 1
2164 2164 except queue.Empty:
2165 2165 continue
2166 2166 except KeyboardInterrupt:
2167 2167 for test in runtests:
2168 2168 test.abort()
2169 2169
2170 2170 channels = []
2171 2171
2172 2172 return result
2173 2173
2174 2174 # Save the most recent 5 wall-clock runtimes of each test to a
2175 2175 # human-readable text file named .testtimes. Tests are sorted
2176 2176 # alphabetically, while times for each test are listed from oldest to
2177 2177 # newest.
2178 2178
2179 2179 def loadtimes(outputdir):
2180 2180 times = []
2181 2181 try:
2182 2182 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2183 2183 for line in fp:
2184 2184 m = re.match('(.*?) ([0-9. ]+)', line)
2185 2185 times.append((m.group(1),
2186 2186 [float(t) for t in m.group(2).split()]))
2187 2187 except IOError as err:
2188 2188 if err.errno != errno.ENOENT:
2189 2189 raise
2190 2190 return times
2191 2191
2192 2192 def savetimes(outputdir, result):
2193 2193 saved = dict(loadtimes(outputdir))
2194 2194 maxruns = 5
2195 2195 skipped = set([str(t[0]) for t in result.skipped])
2196 2196 for tdata in result.times:
2197 2197 test, real = tdata[0], tdata[3]
2198 2198 if test not in skipped:
2199 2199 ts = saved.setdefault(test, [])
2200 2200 ts.append(real)
2201 2201 ts[:] = ts[-maxruns:]
2202 2202
2203 2203 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2204 2204 dir=outputdir, text=True)
2205 2205 with os.fdopen(fd, 'w') as fp:
2206 2206 for name, ts in sorted(saved.items()):
2207 2207 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2208 2208 timepath = os.path.join(outputdir, b'.testtimes')
2209 2209 try:
2210 2210 os.unlink(timepath)
2211 2211 except OSError:
2212 2212 pass
2213 2213 try:
2214 2214 os.rename(tmpname, timepath)
2215 2215 except OSError:
2216 2216 pass
2217 2217
2218 2218 class TextTestRunner(unittest.TextTestRunner):
2219 2219 """Custom unittest test runner that uses appropriate settings."""
2220 2220
2221 2221 def __init__(self, runner, *args, **kwargs):
2222 2222 super(TextTestRunner, self).__init__(*args, **kwargs)
2223 2223
2224 2224 self._runner = runner
2225 2225
2226 2226 self._result = getTestResult()(self._runner.options, self.stream,
2227 2227 self.descriptions, self.verbosity)
2228 2228
2229 2229 def listtests(self, test):
2230 2230 test = sorted(test, key=lambda t: t.name)
2231 2231
2232 2232 self._result.onStart(test)
2233 2233
2234 2234 for t in test:
2235 2235 print(t.name)
2236 2236 self._result.addSuccess(t)
2237 2237
2238 2238 if self._runner.options.xunit:
2239 2239 with open(self._runner.options.xunit, "wb") as xuf:
2240 2240 self._writexunit(self._result, xuf)
2241 2241
2242 2242 if self._runner.options.json:
2243 2243 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2244 2244 with open(jsonpath, 'w') as fp:
2245 2245 self._writejson(self._result, fp)
2246 2246
2247 2247 return self._result
2248 2248
2249 2249 def run(self, test):
2250 2250 self._result.onStart(test)
2251 2251 test(self._result)
2252 2252
2253 2253 failed = len(self._result.failures)
2254 2254 skipped = len(self._result.skipped)
2255 2255 ignored = len(self._result.ignored)
2256 2256
2257 2257 with iolock:
2258 2258 self.stream.writeln('')
2259 2259
2260 2260 if not self._runner.options.noskips:
2261 2261 for test, msg in self._result.skipped:
2262 2262 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2263 2263 msg = highlightmsg(formatted, self._result.color)
2264 2264 self.stream.write(msg)
2265 2265 for test, msg in self._result.failures:
2266 2266 formatted = 'Failed %s: %s\n' % (test.name, msg)
2267 2267 self.stream.write(highlightmsg(formatted, self._result.color))
2268 2268 for test, msg in self._result.errors:
2269 2269 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2270 2270
2271 2271 if self._runner.options.xunit:
2272 2272 with open(self._runner.options.xunit, "wb") as xuf:
2273 2273 self._writexunit(self._result, xuf)
2274 2274
2275 2275 if self._runner.options.json:
2276 2276 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2277 2277 with open(jsonpath, 'w') as fp:
2278 2278 self._writejson(self._result, fp)
2279 2279
2280 2280 self._runner._checkhglib('Tested')
2281 2281
2282 2282 savetimes(self._runner._outputdir, self._result)
2283 2283
2284 2284 if failed and self._runner.options.known_good_rev:
2285 2285 self._bisecttests(t for t, m in self._result.failures)
2286 2286 self.stream.writeln(
2287 2287 '# Ran %d tests, %d skipped, %d failed.'
2288 2288 % (self._result.testsRun, skipped + ignored, failed))
2289 2289 if failed:
2290 2290 self.stream.writeln('python hash seed: %s' %
2291 2291 os.environ['PYTHONHASHSEED'])
2292 2292 if self._runner.options.time:
2293 2293 self.printtimes(self._result.times)
2294 2294
2295 2295 if self._runner.options.exceptions:
2296 2296 exceptions = aggregateexceptions(
2297 2297 os.path.join(self._runner._outputdir, b'exceptions'))
2298 2298
2299 2299 self.stream.writeln('Exceptions Report:')
2300 2300 self.stream.writeln('%d total from %d frames' %
2301 2301 (exceptions['total'],
2302 2302 len(exceptions['exceptioncounts'])))
2303 2303 combined = exceptions['combined']
2304 2304 for key in sorted(combined, key=combined.get, reverse=True):
2305 2305 frame, line, exc = key
2306 2306 totalcount, testcount, leastcount, leasttest = combined[key]
2307 2307
2308 2308 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2309 2309 % (totalcount,
2310 2310 testcount,
2311 2311 frame, exc,
2312 2312 leasttest, leastcount))
2313 2313
2314 2314 self.stream.flush()
2315 2315
2316 2316 return self._result
2317 2317
2318 2318 def _bisecttests(self, tests):
2319 2319 bisectcmd = ['hg', 'bisect']
2320 2320 bisectrepo = self._runner.options.bisect_repo
2321 2321 if bisectrepo:
2322 2322 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2323 2323 def pread(args):
2324 2324 env = os.environ.copy()
2325 2325 env['HGPLAIN'] = '1'
2326 2326 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2327 2327 stdout=subprocess.PIPE, env=env)
2328 2328 data = p.stdout.read()
2329 2329 p.wait()
2330 2330 return data
2331 2331 for test in tests:
2332 2332 pread(bisectcmd + ['--reset']),
2333 2333 pread(bisectcmd + ['--bad', '.'])
2334 2334 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2335 2335 # TODO: we probably need to forward more options
2336 2336 # that alter hg's behavior inside the tests.
2337 2337 opts = ''
2338 2338 withhg = self._runner.options.with_hg
2339 2339 if withhg:
2340 2340 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2341 2341 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2342 2342 test)
2343 2343 data = pread(bisectcmd + ['--command', rtc])
2344 2344 m = re.search(
2345 2345 (br'\nThe first (?P<goodbad>bad|good) revision '
2346 2346 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2347 2347 br'summary: +(?P<summary>[^\n]+)\n'),
2348 2348 data, (re.MULTILINE | re.DOTALL))
2349 2349 if m is None:
2350 2350 self.stream.writeln(
2351 2351 'Failed to identify failure point for %s' % test)
2352 2352 continue
2353 2353 dat = m.groupdict()
2354 2354 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2355 2355 self.stream.writeln(
2356 2356 '%s %s by %s (%s)' % (
2357 2357 test, verb, dat['node'].decode('ascii'),
2358 2358 dat['summary'].decode('utf8', 'ignore')))
2359 2359
2360 2360 def printtimes(self, times):
2361 2361 # iolock held by run
2362 2362 self.stream.writeln('# Producing time report')
2363 2363 times.sort(key=lambda t: (t[3]))
2364 2364 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2365 2365 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2366 2366 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2367 2367 for tdata in times:
2368 2368 test = tdata[0]
2369 2369 cuser, csys, real, start, end = tdata[1:6]
2370 2370 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2371 2371
2372 2372 @staticmethod
2373 2373 def _writexunit(result, outf):
2374 2374 # See http://llg.cubic.org/docs/junit/ for a reference.
2375 2375 timesd = dict((t[0], t[3]) for t in result.times)
2376 2376 doc = minidom.Document()
2377 2377 s = doc.createElement('testsuite')
2378 2378 s.setAttribute('name', 'run-tests')
2379 2379 s.setAttribute('tests', str(result.testsRun))
2380 2380 s.setAttribute('errors', "0") # TODO
2381 2381 s.setAttribute('failures', str(len(result.failures)))
2382 2382 s.setAttribute('skipped', str(len(result.skipped) +
2383 2383 len(result.ignored)))
2384 2384 doc.appendChild(s)
2385 2385 for tc in result.successes:
2386 2386 t = doc.createElement('testcase')
2387 2387 t.setAttribute('name', tc.name)
2388 2388 tctime = timesd.get(tc.name)
2389 2389 if tctime is not None:
2390 2390 t.setAttribute('time', '%.3f' % tctime)
2391 2391 s.appendChild(t)
2392 2392 for tc, err in sorted(result.faildata.items()):
2393 2393 t = doc.createElement('testcase')
2394 2394 t.setAttribute('name', tc)
2395 2395 tctime = timesd.get(tc)
2396 2396 if tctime is not None:
2397 2397 t.setAttribute('time', '%.3f' % tctime)
2398 2398 # createCDATASection expects a unicode or it will
2399 2399 # convert using default conversion rules, which will
2400 2400 # fail if string isn't ASCII.
2401 2401 err = cdatasafe(err).decode('utf-8', 'replace')
2402 2402 cd = doc.createCDATASection(err)
2403 2403 # Use 'failure' here instead of 'error' to match errors = 0,
2404 2404 # failures = len(result.failures) in the testsuite element.
2405 2405 failelem = doc.createElement('failure')
2406 2406 failelem.setAttribute('message', 'output changed')
2407 2407 failelem.setAttribute('type', 'output-mismatch')
2408 2408 failelem.appendChild(cd)
2409 2409 t.appendChild(failelem)
2410 2410 s.appendChild(t)
2411 2411 for tc, message in result.skipped:
2412 2412 # According to the schema, 'skipped' has no attributes. So store
2413 2413 # the skip message as a text node instead.
2414 2414 t = doc.createElement('testcase')
2415 2415 t.setAttribute('name', tc.name)
2416 2416 binmessage = message.encode('utf-8')
2417 2417 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2418 2418 cd = doc.createCDATASection(message)
2419 2419 skipelem = doc.createElement('skipped')
2420 2420 skipelem.appendChild(cd)
2421 2421 t.appendChild(skipelem)
2422 2422 s.appendChild(t)
2423 2423 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2424 2424
2425 2425 @staticmethod
2426 2426 def _writejson(result, outf):
2427 2427 timesd = {}
2428 2428 for tdata in result.times:
2429 2429 test = tdata[0]
2430 2430 timesd[test] = tdata[1:]
2431 2431
2432 2432 outcome = {}
2433 2433 groups = [('success', ((tc, None)
2434 2434 for tc in result.successes)),
2435 2435 ('failure', result.failures),
2436 2436 ('skip', result.skipped)]
2437 2437 for res, testcases in groups:
2438 2438 for tc, __ in testcases:
2439 2439 if tc.name in timesd:
2440 2440 diff = result.faildata.get(tc.name, b'')
2441 2441 try:
2442 2442 diff = diff.decode('unicode_escape')
2443 2443 except UnicodeDecodeError as e:
2444 2444 diff = '%r decoding diff, sorry' % e
2445 2445 tres = {'result': res,
2446 2446 'time': ('%0.3f' % timesd[tc.name][2]),
2447 2447 'cuser': ('%0.3f' % timesd[tc.name][0]),
2448 2448 'csys': ('%0.3f' % timesd[tc.name][1]),
2449 2449 'start': ('%0.3f' % timesd[tc.name][3]),
2450 2450 'end': ('%0.3f' % timesd[tc.name][4]),
2451 2451 'diff': diff,
2452 2452 }
2453 2453 else:
2454 2454 # blacklisted test
2455 2455 tres = {'result': res}
2456 2456
2457 2457 outcome[tc.name] = tres
2458 2458 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2459 2459 separators=(',', ': '))
2460 2460 outf.writelines(("testreport =", jsonout))
2461 2461
2462 2462 def sorttests(testdescs, previoustimes, shuffle=False):
2463 2463 """Do an in-place sort of tests."""
2464 2464 if shuffle:
2465 2465 random.shuffle(testdescs)
2466 2466 return
2467 2467
2468 2468 if previoustimes:
2469 2469 def sortkey(f):
2470 2470 f = f['path']
2471 2471 if f in previoustimes:
2472 2472 # Use most recent time as estimate
2473 2473 return -previoustimes[f][-1]
2474 2474 else:
2475 2475 # Default to a rather arbitrary value of 1 second for new tests
2476 2476 return -1.0
2477 2477 else:
2478 2478 # keywords for slow tests
2479 2479 slow = {b'svn': 10,
2480 2480 b'cvs': 10,
2481 2481 b'hghave': 10,
2482 2482 b'largefiles-update': 10,
2483 2483 b'run-tests': 10,
2484 2484 b'corruption': 10,
2485 2485 b'race': 10,
2486 2486 b'i18n': 10,
2487 2487 b'check': 100,
2488 2488 b'gendoc': 100,
2489 2489 b'contrib-perf': 200,
2490 2490 }
2491 2491 perf = {}
2492 2492
2493 2493 def sortkey(f):
2494 2494 # run largest tests first, as they tend to take the longest
2495 2495 f = f['path']
2496 2496 try:
2497 2497 return perf[f]
2498 2498 except KeyError:
2499 2499 try:
2500 2500 val = -os.stat(f).st_size
2501 2501 except OSError as e:
2502 2502 if e.errno != errno.ENOENT:
2503 2503 raise
2504 2504 perf[f] = -1e9 # file does not exist, tell early
2505 2505 return -1e9
2506 2506 for kw, mul in slow.items():
2507 2507 if kw in f:
2508 2508 val *= mul
2509 2509 if f.endswith(b'.py'):
2510 2510 val /= 10.0
2511 2511 perf[f] = val / 1000.0
2512 2512 return perf[f]
2513 2513
2514 2514 testdescs.sort(key=sortkey)
2515 2515
2516 2516 class TestRunner(object):
2517 2517 """Holds context for executing tests.
2518 2518
2519 2519 Tests rely on a lot of state. This object holds it for them.
2520 2520 """
2521 2521
2522 2522 # Programs required to run tests.
2523 2523 REQUIREDTOOLS = [
2524 2524 b'diff',
2525 2525 b'grep',
2526 2526 b'unzip',
2527 2527 b'gunzip',
2528 2528 b'bunzip2',
2529 2529 b'sed',
2530 2530 ]
2531 2531
2532 2532 # Maps file extensions to test class.
2533 2533 TESTTYPES = [
2534 2534 (b'.py', PythonTest),
2535 2535 (b'.t', TTest),
2536 2536 ]
2537 2537
2538 2538 def __init__(self):
2539 2539 self.options = None
2540 2540 self._hgroot = None
2541 2541 self._testdir = None
2542 2542 self._outputdir = None
2543 2543 self._hgtmp = None
2544 2544 self._installdir = None
2545 2545 self._bindir = None
2546 2546 self._tmpbinddir = None
2547 2547 self._pythondir = None
2548 2548 self._coveragefile = None
2549 2549 self._createdfiles = []
2550 2550 self._hgcommand = None
2551 2551 self._hgpath = None
2552 2552 self._portoffset = 0
2553 2553 self._ports = {}
2554 2554
2555 2555 def run(self, args, parser=None):
2556 2556 """Run the test suite."""
2557 2557 oldmask = os.umask(0o22)
2558 2558 try:
2559 2559 parser = parser or getparser()
2560 2560 options = parseargs(args, parser)
2561 2561 tests = [_bytespath(a) for a in options.tests]
2562 2562 if options.test_list is not None:
2563 2563 for listfile in options.test_list:
2564 2564 with open(listfile, 'rb') as f:
2565 2565 tests.extend(t for t in f.read().splitlines() if t)
2566 2566 self.options = options
2567 2567
2568 2568 self._checktools()
2569 2569 testdescs = self.findtests(tests)
2570 2570 if options.profile_runner:
2571 2571 import statprof
2572 2572 statprof.start()
2573 2573 result = self._run(testdescs)
2574 2574 if options.profile_runner:
2575 2575 statprof.stop()
2576 2576 statprof.display()
2577 2577 return result
2578 2578
2579 2579 finally:
2580 2580 os.umask(oldmask)
2581 2581
2582 2582 def _run(self, testdescs):
2583 2583 testdir = getcwdb()
2584 2584 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
2585 2585 # assume all tests in same folder for now
2586 2586 if testdescs:
2587 2587 pathname = os.path.dirname(testdescs[0]['path'])
2588 2588 if pathname:
2589 2589 testdir = os.path.join(testdir, pathname)
2590 2590 self._testdir = osenvironb[b'TESTDIR'] = testdir
2591 2591 if self.options.outputdir:
2592 2592 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2593 2593 else:
2594 2594 self._outputdir = getcwdb()
2595 2595 if testdescs and pathname:
2596 2596 self._outputdir = os.path.join(self._outputdir, pathname)
2597 2597 previoustimes = {}
2598 2598 if self.options.order_by_runtime:
2599 2599 previoustimes = dict(loadtimes(self._outputdir))
2600 2600 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2601 2601
2602 2602 if 'PYTHONHASHSEED' not in os.environ:
2603 2603 # use a random python hash seed all the time
2604 2604 # we do the randomness ourself to know what seed is used
2605 2605 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2606 2606
2607 2607 if self.options.tmpdir:
2608 2608 self.options.keep_tmpdir = True
2609 2609 tmpdir = _bytespath(self.options.tmpdir)
2610 2610 if os.path.exists(tmpdir):
2611 2611 # Meaning of tmpdir has changed since 1.3: we used to create
2612 2612 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2613 2613 # tmpdir already exists.
2614 2614 print("error: temp dir %r already exists" % tmpdir)
2615 2615 return 1
2616 2616
2617 2617 os.makedirs(tmpdir)
2618 2618 else:
2619 2619 d = None
2620 2620 if os.name == 'nt':
2621 2621 # without this, we get the default temp dir location, but
2622 2622 # in all lowercase, which causes troubles with paths (issue3490)
2623 2623 d = osenvironb.get(b'TMP', None)
2624 2624 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2625 2625
2626 2626 self._hgtmp = osenvironb[b'HGTMP'] = (
2627 2627 os.path.realpath(tmpdir))
2628 2628
2629 2629 if self.options.with_hg:
2630 2630 self._installdir = None
2631 2631 whg = self.options.with_hg
2632 2632 self._bindir = os.path.dirname(os.path.realpath(whg))
2633 2633 assert isinstance(self._bindir, bytes)
2634 2634 self._hgcommand = os.path.basename(whg)
2635 2635 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2636 2636 os.makedirs(self._tmpbindir)
2637 2637
2638 2638 normbin = os.path.normpath(os.path.abspath(whg))
2639 2639 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2640 2640
2641 2641 # Other Python scripts in the test harness need to
2642 2642 # `import mercurial`. If `hg` is a Python script, we assume
2643 2643 # the Mercurial modules are relative to its path and tell the tests
2644 2644 # to load Python modules from its directory.
2645 2645 with open(whg, 'rb') as fh:
2646 2646 initial = fh.read(1024)
2647 2647
2648 2648 if re.match(b'#!.*python', initial):
2649 2649 self._pythondir = self._bindir
2650 2650 # If it looks like our in-repo Rust binary, use the source root.
2651 2651 # This is a bit hacky. But rhg is still not supported outside the
2652 2652 # source directory. So until it is, do the simple thing.
2653 2653 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2654 2654 self._pythondir = os.path.dirname(self._testdir)
2655 2655 # Fall back to the legacy behavior.
2656 2656 else:
2657 2657 self._pythondir = self._bindir
2658 2658
2659 2659 else:
2660 2660 self._installdir = os.path.join(self._hgtmp, b"install")
2661 2661 self._bindir = os.path.join(self._installdir, b"bin")
2662 2662 self._hgcommand = b'hg'
2663 2663 self._tmpbindir = self._bindir
2664 2664 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2665 2665
2666 2666 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
2667 2667 # a python script and feed it to python.exe. Legacy stdio is force
2668 2668 # enabled by hg.exe, and this is a more realistic way to launch hg
2669 2669 # anyway.
2670 2670 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
2671 2671 self._hgcommand += b'.exe'
2672 2672
2673 2673 # set CHGHG, then replace "hg" command by "chg"
2674 2674 chgbindir = self._bindir
2675 2675 if self.options.chg or self.options.with_chg:
2676 2676 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2677 2677 else:
2678 2678 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2679 2679 if self.options.chg:
2680 2680 self._hgcommand = b'chg'
2681 2681 elif self.options.with_chg:
2682 2682 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2683 2683 self._hgcommand = os.path.basename(self.options.with_chg)
2684 2684
2685 2685 osenvironb[b"BINDIR"] = self._bindir
2686 2686 osenvironb[b"PYTHON"] = PYTHON
2687 2687
2688 2688 fileb = _bytespath(__file__)
2689 2689 runtestdir = os.path.abspath(os.path.dirname(fileb))
2690 2690 osenvironb[b'RUNTESTDIR'] = runtestdir
2691 2691 if PYTHON3:
2692 2692 sepb = _bytespath(os.pathsep)
2693 2693 else:
2694 2694 sepb = os.pathsep
2695 2695 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2696 2696 if os.path.islink(__file__):
2697 2697 # test helper will likely be at the end of the symlink
2698 2698 realfile = os.path.realpath(fileb)
2699 2699 realdir = os.path.abspath(os.path.dirname(realfile))
2700 2700 path.insert(2, realdir)
2701 2701 if chgbindir != self._bindir:
2702 2702 path.insert(1, chgbindir)
2703 2703 if self._testdir != runtestdir:
2704 2704 path = [self._testdir] + path
2705 2705 if self._tmpbindir != self._bindir:
2706 2706 path = [self._tmpbindir] + path
2707 2707 osenvironb[b"PATH"] = sepb.join(path)
2708 2708
2709 2709 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2710 2710 # can run .../tests/run-tests.py test-foo where test-foo
2711 2711 # adds an extension to HGRC. Also include run-test.py directory to
2712 2712 # import modules like heredoctest.
2713 2713 pypath = [self._pythondir, self._testdir, runtestdir]
2714 2714 # We have to augment PYTHONPATH, rather than simply replacing
2715 2715 # it, in case external libraries are only available via current
2716 2716 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2717 2717 # are in /opt/subversion.)
2718 2718 oldpypath = osenvironb.get(IMPL_PATH)
2719 2719 if oldpypath:
2720 2720 pypath.append(oldpypath)
2721 2721 osenvironb[IMPL_PATH] = sepb.join(pypath)
2722 2722
2723 2723 if self.options.pure:
2724 2724 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2725 2725 os.environ["HGMODULEPOLICY"] = "py"
2726 2726
2727 2727 if self.options.allow_slow_tests:
2728 2728 os.environ["HGTEST_SLOW"] = "slow"
2729 2729 elif 'HGTEST_SLOW' in os.environ:
2730 2730 del os.environ['HGTEST_SLOW']
2731 2731
2732 2732 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2733 2733
2734 2734 if self.options.exceptions:
2735 2735 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2736 2736 try:
2737 2737 os.makedirs(exceptionsdir)
2738 2738 except OSError as e:
2739 2739 if e.errno != errno.EEXIST:
2740 2740 raise
2741 2741
2742 2742 # Remove all existing exception reports.
2743 2743 for f in os.listdir(exceptionsdir):
2744 2744 os.unlink(os.path.join(exceptionsdir, f))
2745 2745
2746 2746 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2747 2747 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2748 2748 self.options.extra_config_opt.append(
2749 2749 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2750 2750
2751 2751 vlog("# Using TESTDIR", self._testdir)
2752 2752 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2753 2753 vlog("# Using HGTMP", self._hgtmp)
2754 2754 vlog("# Using PATH", os.environ["PATH"])
2755 2755 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2756 2756 vlog("# Writing to directory", self._outputdir)
2757 2757
2758 2758 try:
2759 2759 return self._runtests(testdescs) or 0
2760 2760 finally:
2761 2761 time.sleep(.1)
2762 2762 self._cleanup()
2763 2763
2764 2764 def findtests(self, args):
2765 2765 """Finds possible test files from arguments.
2766 2766
2767 2767 If you wish to inject custom tests into the test harness, this would
2768 2768 be a good function to monkeypatch or override in a derived class.
2769 2769 """
2770 2770 if not args:
2771 2771 if self.options.changed:
2772 2772 proc = Popen4('hg st --rev "%s" -man0 .' %
2773 2773 self.options.changed, None, 0)
2774 2774 stdout, stderr = proc.communicate()
2775 2775 args = stdout.strip(b'\0').split(b'\0')
2776 2776 else:
2777 2777 args = os.listdir(b'.')
2778 2778
2779 2779 expanded_args = []
2780 2780 for arg in args:
2781 2781 if os.path.isdir(arg):
2782 2782 if not arg.endswith(b'/'):
2783 2783 arg += b'/'
2784 2784 expanded_args.extend([arg + a for a in os.listdir(arg)])
2785 2785 else:
2786 2786 expanded_args.append(arg)
2787 2787 args = expanded_args
2788 2788
2789 2789 testcasepattern = re.compile(
2790 2790 br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-\.#]+))')
2791 2791 tests = []
2792 2792 for t in args:
2793 2793 case = []
2794 2794
2795 2795 if not (os.path.basename(t).startswith(b'test-')
2796 2796 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2797 2797
2798 2798 m = testcasepattern.match(os.path.basename(t))
2799 2799 if m is not None:
2800 2800 t_basename, casestr = m.groups()
2801 2801 t = os.path.join(os.path.dirname(t), t_basename)
2802 2802 if casestr:
2803 2803 case = casestr.split(b'#')
2804 2804 else:
2805 2805 continue
2806 2806
2807 2807 if t.endswith(b'.t'):
2808 2808 # .t file may contain multiple test cases
2809 2809 casedimensions = parsettestcases(t)
2810 2810 if casedimensions:
2811 2811 cases = []
2812 2812 def addcases(case, casedimensions):
2813 2813 if not casedimensions:
2814 2814 cases.append(case)
2815 2815 else:
2816 2816 for c in casedimensions[0]:
2817 2817 addcases(case + [c], casedimensions[1:])
2818 2818 addcases([], casedimensions)
2819 2819 if case and case in cases:
2820 2820 cases = [case]
2821 2821 elif case:
2822 2822 # Ignore invalid cases
2823 2823 cases = []
2824 2824 else:
2825 2825 pass
2826 2826 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2827 2827 else:
2828 2828 tests.append({'path': t})
2829 2829 else:
2830 2830 tests.append({'path': t})
2831 2831 return tests
2832 2832
2833 2833 def _runtests(self, testdescs):
2834 2834 def _reloadtest(test, i):
2835 2835 # convert a test back to its description dict
2836 2836 desc = {'path': test.path}
2837 2837 case = getattr(test, '_case', [])
2838 2838 if case:
2839 2839 desc['case'] = case
2840 2840 return self._gettest(desc, i)
2841 2841
2842 2842 try:
2843 2843 if self.options.restart:
2844 2844 orig = list(testdescs)
2845 2845 while testdescs:
2846 2846 desc = testdescs[0]
2847 2847 # desc['path'] is a relative path
2848 2848 if 'case' in desc:
2849 2849 casestr = b'#'.join(desc['case'])
2850 2850 errpath = b'%s#%s.err' % (desc['path'], casestr)
2851 2851 else:
2852 2852 errpath = b'%s.err' % desc['path']
2853 2853 errpath = os.path.join(self._outputdir, errpath)
2854 2854 if os.path.exists(errpath):
2855 2855 break
2856 2856 testdescs.pop(0)
2857 2857 if not testdescs:
2858 2858 print("running all tests")
2859 2859 testdescs = orig
2860 2860
2861 2861 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2862
2863 jobs = min(len(tests), self.options.jobs)
2862 num_tests = len(tests) * self.options.runs_per_test
2863
2864 jobs = min(num_tests, self.options.jobs)
2864 2865
2865 2866 failed = False
2866 2867 kws = self.options.keywords
2867 2868 if kws is not None and PYTHON3:
2868 2869 kws = kws.encode('utf-8')
2869 2870
2870 2871 suite = TestSuite(self._testdir,
2871 2872 jobs=jobs,
2872 2873 whitelist=self.options.whitelisted,
2873 2874 blacklist=self.options.blacklist,
2874 2875 retest=self.options.retest,
2875 2876 keywords=kws,
2876 2877 loop=self.options.loop,
2877 2878 runs_per_test=self.options.runs_per_test,
2878 2879 showchannels=self.options.showchannels,
2879 2880 tests=tests, loadtest=_reloadtest)
2880 2881 verbosity = 1
2881 2882 if self.options.list_tests:
2882 2883 verbosity = 0
2883 2884 elif self.options.verbose:
2884 2885 verbosity = 2
2885 2886 runner = TextTestRunner(self, verbosity=verbosity)
2886 2887
2887 2888 if self.options.list_tests:
2888 2889 result = runner.listtests(suite)
2889 2890 else:
2890 2891 if self._installdir:
2891 2892 self._installhg()
2892 2893 self._checkhglib("Testing")
2893 2894 else:
2894 2895 self._usecorrectpython()
2895 2896 if self.options.chg:
2896 2897 assert self._installdir
2897 2898 self._installchg()
2898 2899
2899 2900 log('running %d tests using %d parallel processes' % (
2900 len(tests), jobs))
2901 num_tests, jobs))
2901 2902
2902 2903 result = runner.run(suite)
2903 2904
2904 2905 if result.failures:
2905 2906 failed = True
2906 2907
2907 2908 result.onEnd()
2908 2909
2909 2910 if self.options.anycoverage:
2910 2911 self._outputcoverage()
2911 2912 except KeyboardInterrupt:
2912 2913 failed = True
2913 2914 print("\ninterrupted!")
2914 2915
2915 2916 if failed:
2916 2917 return 1
2917 2918
2918 2919 def _getport(self, count):
2919 2920 port = self._ports.get(count) # do we have a cached entry?
2920 2921 if port is None:
2921 2922 portneeded = 3
2922 2923 # above 100 tries we just give up and let test reports failure
2923 2924 for tries in xrange(100):
2924 2925 allfree = True
2925 2926 port = self.options.port + self._portoffset
2926 2927 for idx in xrange(portneeded):
2927 2928 if not checkportisavailable(port + idx):
2928 2929 allfree = False
2929 2930 break
2930 2931 self._portoffset += portneeded
2931 2932 if allfree:
2932 2933 break
2933 2934 self._ports[count] = port
2934 2935 return port
2935 2936
2936 2937 def _gettest(self, testdesc, count):
2937 2938 """Obtain a Test by looking at its filename.
2938 2939
2939 2940 Returns a Test instance. The Test may not be runnable if it doesn't
2940 2941 map to a known type.
2941 2942 """
2942 2943 path = testdesc['path']
2943 2944 lctest = path.lower()
2944 2945 testcls = Test
2945 2946
2946 2947 for ext, cls in self.TESTTYPES:
2947 2948 if lctest.endswith(ext):
2948 2949 testcls = cls
2949 2950 break
2950 2951
2951 2952 refpath = os.path.join(getcwdb(), path)
2952 2953 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2953 2954
2954 2955 # extra keyword parameters. 'case' is used by .t tests
2955 2956 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2956 2957
2957 2958 t = testcls(refpath, self._outputdir, tmpdir,
2958 2959 keeptmpdir=self.options.keep_tmpdir,
2959 2960 debug=self.options.debug,
2960 2961 first=self.options.first,
2961 2962 timeout=self.options.timeout,
2962 2963 startport=self._getport(count),
2963 2964 extraconfigopts=self.options.extra_config_opt,
2964 2965 py3warnings=self.options.py3_warnings,
2965 2966 shell=self.options.shell,
2966 2967 hgcommand=self._hgcommand,
2967 2968 usechg=bool(self.options.with_chg or self.options.chg),
2968 2969 useipv6=useipv6, **kwds)
2969 2970 t.should_reload = True
2970 2971 return t
2971 2972
2972 2973 def _cleanup(self):
2973 2974 """Clean up state from this test invocation."""
2974 2975 if self.options.keep_tmpdir:
2975 2976 return
2976 2977
2977 2978 vlog("# Cleaning up HGTMP", self._hgtmp)
2978 2979 shutil.rmtree(self._hgtmp, True)
2979 2980 for f in self._createdfiles:
2980 2981 try:
2981 2982 os.remove(f)
2982 2983 except OSError:
2983 2984 pass
2984 2985
2985 2986 def _usecorrectpython(self):
2986 2987 """Configure the environment to use the appropriate Python in tests."""
2987 2988 # Tests must use the same interpreter as us or bad things will happen.
2988 2989 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2989 2990
2990 2991 # os.symlink() is a thing with py3 on Windows, but it requires
2991 2992 # Administrator rights.
2992 2993 if getattr(os, 'symlink', None) and os.name != 'nt':
2993 2994 vlog("# Making python executable in test path a symlink to '%s'" %
2994 2995 sys.executable)
2995 2996 mypython = os.path.join(self._tmpbindir, pyexename)
2996 2997 try:
2997 2998 if os.readlink(mypython) == sys.executable:
2998 2999 return
2999 3000 os.unlink(mypython)
3000 3001 except OSError as err:
3001 3002 if err.errno != errno.ENOENT:
3002 3003 raise
3003 3004 if self._findprogram(pyexename) != sys.executable:
3004 3005 try:
3005 3006 os.symlink(sys.executable, mypython)
3006 3007 self._createdfiles.append(mypython)
3007 3008 except OSError as err:
3008 3009 # child processes may race, which is harmless
3009 3010 if err.errno != errno.EEXIST:
3010 3011 raise
3011 3012 else:
3012 3013 exedir, exename = os.path.split(sys.executable)
3013 3014 vlog("# Modifying search path to find %s as %s in '%s'" %
3014 3015 (exename, pyexename, exedir))
3015 3016 path = os.environ['PATH'].split(os.pathsep)
3016 3017 while exedir in path:
3017 3018 path.remove(exedir)
3018 3019 os.environ['PATH'] = os.pathsep.join([exedir] + path)
3019 3020 if not self._findprogram(pyexename):
3020 3021 print("WARNING: Cannot find %s in search path" % pyexename)
3021 3022
3022 3023 def _installhg(self):
3023 3024 """Install hg into the test environment.
3024 3025
3025 3026 This will also configure hg with the appropriate testing settings.
3026 3027 """
3027 3028 vlog("# Performing temporary installation of HG")
3028 3029 installerrs = os.path.join(self._hgtmp, b"install.err")
3029 3030 compiler = ''
3030 3031 if self.options.compiler:
3031 3032 compiler = '--compiler ' + self.options.compiler
3032 3033 if self.options.pure:
3033 3034 pure = b"--pure"
3034 3035 else:
3035 3036 pure = b""
3036 3037
3037 3038 # Run installer in hg root
3038 3039 script = os.path.realpath(sys.argv[0])
3039 3040 exe = sys.executable
3040 3041 if PYTHON3:
3041 3042 compiler = _bytespath(compiler)
3042 3043 script = _bytespath(script)
3043 3044 exe = _bytespath(exe)
3044 3045 hgroot = os.path.dirname(os.path.dirname(script))
3045 3046 self._hgroot = hgroot
3046 3047 os.chdir(hgroot)
3047 3048 nohome = b'--home=""'
3048 3049 if os.name == 'nt':
3049 3050 # The --home="" trick works only on OS where os.sep == '/'
3050 3051 # because of a distutils convert_path() fast-path. Avoid it at
3051 3052 # least on Windows for now, deal with .pydistutils.cfg bugs
3052 3053 # when they happen.
3053 3054 nohome = b''
3054 3055 cmd = (b'"%(exe)s" setup.py %(pure)s clean --all'
3055 3056 b' build %(compiler)s --build-base="%(base)s"'
3056 3057 b' install --force --prefix="%(prefix)s"'
3057 3058 b' --install-lib="%(libdir)s"'
3058 3059 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3059 3060 % {b'exe': exe, b'pure': pure,
3060 3061 b'compiler': compiler,
3061 3062 b'base': os.path.join(self._hgtmp, b"build"),
3062 3063 b'prefix': self._installdir, b'libdir': self._pythondir,
3063 3064 b'bindir': self._bindir,
3064 3065 b'nohome': nohome, b'logfile': installerrs})
3065 3066
3066 3067 # setuptools requires install directories to exist.
3067 3068 def makedirs(p):
3068 3069 try:
3069 3070 os.makedirs(p)
3070 3071 except OSError as e:
3071 3072 if e.errno != errno.EEXIST:
3072 3073 raise
3073 3074 makedirs(self._pythondir)
3074 3075 makedirs(self._bindir)
3075 3076
3076 3077 vlog("# Running", cmd)
3077 3078 if subprocess.call(_strpath(cmd), shell=True) == 0:
3078 3079 if not self.options.verbose:
3079 3080 try:
3080 3081 os.remove(installerrs)
3081 3082 except OSError as e:
3082 3083 if e.errno != errno.ENOENT:
3083 3084 raise
3084 3085 else:
3085 3086 with open(installerrs, 'rb') as f:
3086 3087 for line in f:
3087 3088 if PYTHON3:
3088 3089 sys.stdout.buffer.write(line)
3089 3090 else:
3090 3091 sys.stdout.write(line)
3091 3092 sys.exit(1)
3092 3093 os.chdir(self._testdir)
3093 3094
3094 3095 self._usecorrectpython()
3095 3096
3096 3097 if self.options.py3_warnings and not self.options.anycoverage:
3097 3098 vlog("# Updating hg command to enable Py3k Warnings switch")
3098 3099 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
3099 3100 lines = [line.rstrip() for line in f]
3100 3101 lines[0] += ' -3'
3101 3102 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
3102 3103 for line in lines:
3103 3104 f.write(line + '\n')
3104 3105
3105 3106 hgbat = os.path.join(self._bindir, b'hg.bat')
3106 3107 if os.path.isfile(hgbat):
3107 3108 # hg.bat expects to be put in bin/scripts while run-tests.py
3108 3109 # installation layout put it in bin/ directly. Fix it
3109 3110 with open(hgbat, 'rb') as f:
3110 3111 data = f.read()
3111 3112 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
3112 3113 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
3113 3114 b'"%~dp0python" "%~dp0hg" %*')
3114 3115 with open(hgbat, 'wb') as f:
3115 3116 f.write(data)
3116 3117 else:
3117 3118 print('WARNING: cannot fix hg.bat reference to python.exe')
3118 3119
3119 3120 if self.options.anycoverage:
3120 3121 custom = os.path.join(self._testdir, 'sitecustomize.py')
3121 3122 target = os.path.join(self._pythondir, 'sitecustomize.py')
3122 3123 vlog('# Installing coverage trigger to %s' % target)
3123 3124 shutil.copyfile(custom, target)
3124 3125 rc = os.path.join(self._testdir, '.coveragerc')
3125 3126 vlog('# Installing coverage rc to %s' % rc)
3126 3127 os.environ['COVERAGE_PROCESS_START'] = rc
3127 3128 covdir = os.path.join(self._installdir, '..', 'coverage')
3128 3129 try:
3129 3130 os.mkdir(covdir)
3130 3131 except OSError as e:
3131 3132 if e.errno != errno.EEXIST:
3132 3133 raise
3133 3134
3134 3135 os.environ['COVERAGE_DIR'] = covdir
3135 3136
3136 3137 def _checkhglib(self, verb):
3137 3138 """Ensure that the 'mercurial' package imported by python is
3138 3139 the one we expect it to be. If not, print a warning to stderr."""
3139 3140 if ((self._bindir == self._pythondir) and
3140 3141 (self._bindir != self._tmpbindir)):
3141 3142 # The pythondir has been inferred from --with-hg flag.
3142 3143 # We cannot expect anything sensible here.
3143 3144 return
3144 3145 expecthg = os.path.join(self._pythondir, b'mercurial')
3145 3146 actualhg = self._gethgpath()
3146 3147 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3147 3148 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
3148 3149 ' (expected %s)\n'
3149 3150 % (verb, actualhg, expecthg))
3150 3151 def _gethgpath(self):
3151 3152 """Return the path to the mercurial package that is actually found by
3152 3153 the current Python interpreter."""
3153 3154 if self._hgpath is not None:
3154 3155 return self._hgpath
3155 3156
3156 3157 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3157 3158 cmd = cmd % PYTHON
3158 3159 if PYTHON3:
3159 3160 cmd = _strpath(cmd)
3160 3161
3161 3162 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3162 3163 out, err = p.communicate()
3163 3164
3164 3165 self._hgpath = out.strip()
3165 3166
3166 3167 return self._hgpath
3167 3168
3168 3169 def _installchg(self):
3169 3170 """Install chg into the test environment"""
3170 3171 vlog('# Performing temporary installation of CHG')
3171 3172 assert os.path.dirname(self._bindir) == self._installdir
3172 3173 assert self._hgroot, 'must be called after _installhg()'
3173 3174 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
3174 3175 % {b'make': 'make', # TODO: switch by option or environment?
3175 3176 b'prefix': self._installdir})
3176 3177 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3177 3178 vlog("# Running", cmd)
3178 3179 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
3179 3180 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
3180 3181 stderr=subprocess.STDOUT)
3181 3182 out, _err = proc.communicate()
3182 3183 if proc.returncode != 0:
3183 3184 if PYTHON3:
3184 3185 sys.stdout.buffer.write(out)
3185 3186 else:
3186 3187 sys.stdout.write(out)
3187 3188 sys.exit(1)
3188 3189
3189 3190 def _outputcoverage(self):
3190 3191 """Produce code coverage output."""
3191 3192 import coverage
3192 3193 coverage = coverage.coverage
3193 3194
3194 3195 vlog('# Producing coverage report')
3195 3196 # chdir is the easiest way to get short, relative paths in the
3196 3197 # output.
3197 3198 os.chdir(self._hgroot)
3198 3199 covdir = os.path.join(self._installdir, '..', 'coverage')
3199 3200 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3200 3201
3201 3202 # Map install directory paths back to source directory.
3202 3203 cov.config.paths['srcdir'] = ['.', self._pythondir]
3203 3204
3204 3205 cov.combine()
3205 3206
3206 3207 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
3207 3208 cov.report(ignore_errors=True, omit=omit)
3208 3209
3209 3210 if self.options.htmlcov:
3210 3211 htmldir = os.path.join(self._outputdir, 'htmlcov')
3211 3212 cov.html_report(directory=htmldir, omit=omit)
3212 3213 if self.options.annotate:
3213 3214 adir = os.path.join(self._outputdir, 'annotated')
3214 3215 if not os.path.isdir(adir):
3215 3216 os.mkdir(adir)
3216 3217 cov.annotate(directory=adir, omit=omit)
3217 3218
3218 3219 def _findprogram(self, program):
3219 3220 """Search PATH for a executable program"""
3220 3221 dpb = _bytespath(os.defpath)
3221 3222 sepb = _bytespath(os.pathsep)
3222 3223 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3223 3224 name = os.path.join(p, program)
3224 3225 if os.name == 'nt' or os.access(name, os.X_OK):
3225 3226 return name
3226 3227 return None
3227 3228
3228 3229 def _checktools(self):
3229 3230 """Ensure tools required to run tests are present."""
3230 3231 for p in self.REQUIREDTOOLS:
3231 3232 if os.name == 'nt' and not p.endswith(b'.exe'):
3232 3233 p += b'.exe'
3233 3234 found = self._findprogram(p)
3234 3235 if found:
3235 3236 vlog("# Found prerequisite", p, "at", found)
3236 3237 else:
3237 3238 print("WARNING: Did not find prerequisite tool: %s " %
3238 3239 p.decode("utf-8"))
3239 3240
3240 3241 def aggregateexceptions(path):
3241 3242 exceptioncounts = collections.Counter()
3242 3243 testsbyfailure = collections.defaultdict(set)
3243 3244 failuresbytest = collections.defaultdict(set)
3244 3245
3245 3246 for f in os.listdir(path):
3246 3247 with open(os.path.join(path, f), 'rb') as fh:
3247 3248 data = fh.read().split(b'\0')
3248 3249 if len(data) != 5:
3249 3250 continue
3250 3251
3251 3252 exc, mainframe, hgframe, hgline, testname = data
3252 3253 exc = exc.decode('utf-8')
3253 3254 mainframe = mainframe.decode('utf-8')
3254 3255 hgframe = hgframe.decode('utf-8')
3255 3256 hgline = hgline.decode('utf-8')
3256 3257 testname = testname.decode('utf-8')
3257 3258
3258 3259 key = (hgframe, hgline, exc)
3259 3260 exceptioncounts[key] += 1
3260 3261 testsbyfailure[key].add(testname)
3261 3262 failuresbytest[testname].add(key)
3262 3263
3263 3264 # Find test having fewest failures for each failure.
3264 3265 leastfailing = {}
3265 3266 for key, tests in testsbyfailure.items():
3266 3267 fewesttest = None
3267 3268 fewestcount = 99999999
3268 3269 for test in sorted(tests):
3269 3270 if len(failuresbytest[test]) < fewestcount:
3270 3271 fewesttest = test
3271 3272 fewestcount = len(failuresbytest[test])
3272 3273
3273 3274 leastfailing[key] = (fewestcount, fewesttest)
3274 3275
3275 3276 # Create a combined counter so we can sort by total occurrences and
3276 3277 # impacted tests.
3277 3278 combined = {}
3278 3279 for key in exceptioncounts:
3279 3280 combined[key] = (exceptioncounts[key],
3280 3281 len(testsbyfailure[key]),
3281 3282 leastfailing[key][0],
3282 3283 leastfailing[key][1])
3283 3284
3284 3285 return {
3285 3286 'exceptioncounts': exceptioncounts,
3286 3287 'total': sum(exceptioncounts.values()),
3287 3288 'combined': combined,
3288 3289 'leastfailing': leastfailing,
3289 3290 'byfailure': testsbyfailure,
3290 3291 'bytest': failuresbytest,
3291 3292 }
3292 3293
3293 3294 if __name__ == '__main__':
3294 3295 runner = TestRunner()
3295 3296
3296 3297 try:
3297 3298 import msvcrt
3298 3299 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3299 3300 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3300 3301 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3301 3302 except ImportError:
3302 3303 pass
3303 3304
3304 3305 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1932 +1,1938 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
10 10 running 0 tests using 0 parallel processes
11 11
12 12 # Ran 0 tests, 0 skipped, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27 running 0 tests using 0 parallel processes
28 28
29 29 # Ran 0 tests, 0 skipped, 0 failed.
30 30 $ rm hg
31 31 #endif
32 32
33 33 #if execbit
34 34 $ touch hg
35 35 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
36 36 usage: run-tests.py [options] [tests]
37 37 run-tests.py: error: --with-hg must specify an executable hg script
38 38 [2]
39 39 $ rm hg
40 40 #endif
41 41
42 42 Features for testing optional lines
43 43 ===================================
44 44
45 45 $ cat > hghaveaddon.py <<EOF
46 46 > import hghave
47 47 > @hghave.check("custom", "custom hghave feature")
48 48 > def has_custom():
49 49 > return True
50 50 > @hghave.check("missing", "missing hghave feature")
51 51 > def has_missing():
52 52 > return False
53 53 > EOF
54 54
55 55 an empty test
56 56 =======================
57 57
58 58 $ touch test-empty.t
59 59 $ rt
60 60 running 1 tests using 1 parallel processes
61 61 .
62 62 # Ran 1 tests, 0 skipped, 0 failed.
63 63 $ rm test-empty.t
64 64
65 65 a succesful test
66 66 =======================
67 67
68 68 $ cat > test-success.t << EOF
69 69 > $ echo babar
70 70 > babar
71 71 > $ echo xyzzy
72 72 > dont_print (?)
73 73 > nothing[42]line (re) (?)
74 74 > never*happens (glob) (?)
75 75 > more_nothing (?)
76 76 > xyzzy
77 77 > nor this (?)
78 78 > $ printf 'abc\ndef\nxyz\n'
79 79 > 123 (?)
80 80 > abc
81 81 > def (?)
82 82 > 456 (?)
83 83 > xyz
84 84 > $ printf 'zyx\nwvu\ntsr\n'
85 85 > abc (?)
86 86 > zyx (custom !)
87 87 > wvu
88 88 > no_print (no-custom !)
89 89 > tsr (no-missing !)
90 90 > missing (missing !)
91 91 > EOF
92 92
93 93 $ rt
94 94 running 1 tests using 1 parallel processes
95 95 .
96 96 # Ran 1 tests, 0 skipped, 0 failed.
97 97
98 98 failing test
99 99 ==================
100 100
101 101 test churn with globs
102 102 $ cat > test-failure.t <<EOF
103 103 > $ echo "bar-baz"; echo "bar-bad"; echo foo
104 104 > bar*bad (glob)
105 105 > bar*baz (glob)
106 106 > | fo (re)
107 107 > EOF
108 108 $ rt test-failure.t
109 109 running 1 tests using 1 parallel processes
110 110
111 111 --- $TESTTMP/test-failure.t
112 112 +++ $TESTTMP/test-failure.t.err
113 113 @@ -1,4 +1,4 @@
114 114 $ echo "bar-baz"; echo "bar-bad"; echo foo
115 115 + bar*baz (glob)
116 116 bar*bad (glob)
117 117 - bar*baz (glob)
118 118 - | fo (re)
119 119 + foo
120 120
121 121 ERROR: test-failure.t output changed
122 122 !
123 123 Failed test-failure.t: output changed
124 124 # Ran 1 tests, 0 skipped, 1 failed.
125 125 python hash seed: * (glob)
126 126 [1]
127 127
128 128 test how multiple globs gets matched with lines in output
129 129 $ cat > test-failure-globs.t <<EOF
130 130 > $ echo "context"; echo "context"; \
131 131 > echo "key: 1"; echo "value: not a"; \
132 132 > echo "key: 2"; echo "value: not b"; \
133 133 > echo "key: 3"; echo "value: c"; \
134 134 > echo "key: 4"; echo "value: d"
135 135 > context
136 136 > context
137 137 > key: 1
138 138 > value: a
139 139 > key: 2
140 140 > value: b
141 141 > key: 3
142 142 > value: * (glob)
143 143 > key: 4
144 144 > value: * (glob)
145 145 > EOF
146 146 $ rt test-failure-globs.t
147 147 running 1 tests using 1 parallel processes
148 148
149 149 --- $TESTTMP/test-failure-globs.t
150 150 +++ $TESTTMP/test-failure-globs.t.err
151 151 @@ -2,9 +2,9 @@
152 152 context
153 153 context
154 154 key: 1
155 155 - value: a
156 156 + value: not a
157 157 key: 2
158 158 - value: b
159 159 + value: not b
160 160 key: 3
161 161 value: * (glob)
162 162 key: 4
163 163
164 164 ERROR: test-failure-globs.t output changed
165 165 !
166 166 Failed test-failure-globs.t: output changed
167 167 # Ran 1 tests, 0 skipped, 1 failed.
168 168 python hash seed: * (glob)
169 169 [1]
170 170 $ rm test-failure-globs.t
171 171
172 172 test diff colorisation
173 173
174 174 #if no-windows pygments
175 175 $ rt test-failure.t --color always
176 176 running 1 tests using 1 parallel processes
177 177
178 178 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
179 179 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
180 180 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
181 181 $ echo "bar-baz"; echo "bar-bad"; echo foo
182 182 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
183 183 bar*bad (glob)
184 184 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
185 185 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
186 186 \x1b[38;5;34m+ foo\x1b[39m (esc)
187 187
188 188 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
189 189 !
190 190 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
191 191 # Ran 1 tests, 0 skipped, 1 failed.
192 192 python hash seed: * (glob)
193 193 [1]
194 194
195 195 $ rt test-failure.t 2> tmp.log
196 196 running 1 tests using 1 parallel processes
197 197 [1]
198 198 $ cat tmp.log
199 199
200 200 --- $TESTTMP/test-failure.t
201 201 +++ $TESTTMP/test-failure.t.err
202 202 @@ -1,4 +1,4 @@
203 203 $ echo "bar-baz"; echo "bar-bad"; echo foo
204 204 + bar*baz (glob)
205 205 bar*bad (glob)
206 206 - bar*baz (glob)
207 207 - | fo (re)
208 208 + foo
209 209
210 210 ERROR: test-failure.t output changed
211 211 !
212 212 Failed test-failure.t: output changed
213 213 # Ran 1 tests, 0 skipped, 1 failed.
214 214 python hash seed: * (glob)
215 215 #endif
216 216
217 217 $ cat > test-failure.t << EOF
218 218 > $ true
219 219 > should go away (true !)
220 220 > $ true
221 221 > should stay (false !)
222 222 >
223 223 > Should remove first line, not second or third
224 224 > $ echo 'testing'
225 225 > baz*foo (glob) (true !)
226 226 > foobar*foo (glob) (false !)
227 227 > te*ting (glob) (true !)
228 228 >
229 229 > Should keep first two lines, remove third and last
230 230 > $ echo 'testing'
231 231 > test.ng (re) (true !)
232 232 > foo.ar (re) (false !)
233 233 > b.r (re) (true !)
234 234 > missing (?)
235 235 > awol (true !)
236 236 >
237 237 > The "missing" line should stay, even though awol is dropped
238 238 > $ echo 'testing'
239 239 > test.ng (re) (true !)
240 240 > foo.ar (?)
241 241 > awol
242 242 > missing (?)
243 243 > EOF
244 244 $ rt test-failure.t
245 245 running 1 tests using 1 parallel processes
246 246
247 247 --- $TESTTMP/test-failure.t
248 248 +++ $TESTTMP/test-failure.t.err
249 249 @@ -1,11 +1,9 @@
250 250 $ true
251 251 - should go away (true !)
252 252 $ true
253 253 should stay (false !)
254 254
255 255 Should remove first line, not second or third
256 256 $ echo 'testing'
257 257 - baz*foo (glob) (true !)
258 258 foobar*foo (glob) (false !)
259 259 te*ting (glob) (true !)
260 260
261 261 foo.ar (re) (false !)
262 262 missing (?)
263 263 @@ -13,13 +11,10 @@
264 264 $ echo 'testing'
265 265 test.ng (re) (true !)
266 266 foo.ar (re) (false !)
267 267 - b.r (re) (true !)
268 268 missing (?)
269 269 - awol (true !)
270 270
271 271 The "missing" line should stay, even though awol is dropped
272 272 $ echo 'testing'
273 273 test.ng (re) (true !)
274 274 foo.ar (?)
275 275 - awol
276 276 missing (?)
277 277
278 278 ERROR: test-failure.t output changed
279 279 !
280 280 Failed test-failure.t: output changed
281 281 # Ran 1 tests, 0 skipped, 1 failed.
282 282 python hash seed: * (glob)
283 283 [1]
284 284
285 285 basic failing test
286 286 $ cat > test-failure.t << EOF
287 287 > $ echo babar
288 288 > rataxes
289 289 > This is a noop statement so that
290 290 > this test is still more bytes than success.
291 291 > pad pad pad pad............................................................
292 292 > pad pad pad pad............................................................
293 293 > pad pad pad pad............................................................
294 294 > pad pad pad pad............................................................
295 295 > pad pad pad pad............................................................
296 296 > pad pad pad pad............................................................
297 297 > EOF
298 298
299 299 >>> fh = open('test-failure-unicode.t', 'wb')
300 300 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
301 301 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
302 302
303 303 $ rt
304 304 running 3 tests using 1 parallel processes
305 305
306 306 --- $TESTTMP/test-failure.t
307 307 +++ $TESTTMP/test-failure.t.err
308 308 @@ -1,5 +1,5 @@
309 309 $ echo babar
310 310 - rataxes
311 311 + babar
312 312 This is a noop statement so that
313 313 this test is still more bytes than success.
314 314 pad pad pad pad............................................................
315 315
316 316 ERROR: test-failure.t output changed
317 317 !.
318 318 --- $TESTTMP/test-failure-unicode.t
319 319 +++ $TESTTMP/test-failure-unicode.t.err
320 320 @@ -1,2 +1,2 @@
321 321 $ echo babar\xce\xb1 (esc)
322 322 - l\xce\xb5\xce\xb5t (esc)
323 323 + babar\xce\xb1 (esc)
324 324
325 325 ERROR: test-failure-unicode.t output changed
326 326 !
327 327 Failed test-failure.t: output changed
328 328 Failed test-failure-unicode.t: output changed
329 329 # Ran 3 tests, 0 skipped, 2 failed.
330 330 python hash seed: * (glob)
331 331 [1]
332 332
333 333 test --outputdir
334 334 $ mkdir output
335 335 $ rt --outputdir output
336 336 running 3 tests using 1 parallel processes
337 337
338 338 --- $TESTTMP/test-failure.t
339 339 +++ $TESTTMP/output/test-failure.t.err
340 340 @@ -1,5 +1,5 @@
341 341 $ echo babar
342 342 - rataxes
343 343 + babar
344 344 This is a noop statement so that
345 345 this test is still more bytes than success.
346 346 pad pad pad pad............................................................
347 347
348 348 ERROR: test-failure.t output changed
349 349 !.
350 350 --- $TESTTMP/test-failure-unicode.t
351 351 +++ $TESTTMP/output/test-failure-unicode.t.err
352 352 @@ -1,2 +1,2 @@
353 353 $ echo babar\xce\xb1 (esc)
354 354 - l\xce\xb5\xce\xb5t (esc)
355 355 + babar\xce\xb1 (esc)
356 356
357 357 ERROR: test-failure-unicode.t output changed
358 358 !
359 359 Failed test-failure.t: output changed
360 360 Failed test-failure-unicode.t: output changed
361 361 # Ran 3 tests, 0 skipped, 2 failed.
362 362 python hash seed: * (glob)
363 363 [1]
364 364 $ ls -a output
365 365 .
366 366 ..
367 367 .testtimes
368 368 test-failure-unicode.t.err
369 369 test-failure.t.err
370 370
371 371 test --xunit support
372 372 $ rt --xunit=xunit.xml
373 373 running 3 tests using 1 parallel processes
374 374
375 375 --- $TESTTMP/test-failure.t
376 376 +++ $TESTTMP/test-failure.t.err
377 377 @@ -1,5 +1,5 @@
378 378 $ echo babar
379 379 - rataxes
380 380 + babar
381 381 This is a noop statement so that
382 382 this test is still more bytes than success.
383 383 pad pad pad pad............................................................
384 384
385 385 ERROR: test-failure.t output changed
386 386 !.
387 387 --- $TESTTMP/test-failure-unicode.t
388 388 +++ $TESTTMP/test-failure-unicode.t.err
389 389 @@ -1,2 +1,2 @@
390 390 $ echo babar\xce\xb1 (esc)
391 391 - l\xce\xb5\xce\xb5t (esc)
392 392 + babar\xce\xb1 (esc)
393 393
394 394 ERROR: test-failure-unicode.t output changed
395 395 !
396 396 Failed test-failure.t: output changed
397 397 Failed test-failure-unicode.t: output changed
398 398 # Ran 3 tests, 0 skipped, 2 failed.
399 399 python hash seed: * (glob)
400 400 [1]
401 401 $ cat xunit.xml
402 402 <?xml version="1.0" encoding="utf-8"?>
403 403 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
404 404 <testcase name="test-success.t" time="*"/> (glob)
405 405 <testcase name="test-failure-unicode.t" time="*"> (glob)
406 406 <failure message="output changed" type="output-mismatch">
407 407 <![CDATA[--- $TESTTMP/test-failure-unicode.t
408 408 +++ $TESTTMP/test-failure-unicode.t.err
409 409 @@ -1,2 +1,2 @@
410 410 $ echo babar\xce\xb1 (esc)
411 411 - l\xce\xb5\xce\xb5t (esc)
412 412 + babar\xce\xb1 (esc)
413 413 ]]> </failure>
414 414 </testcase>
415 415 <testcase name="test-failure.t" time="*"> (glob)
416 416 <failure message="output changed" type="output-mismatch">
417 417 <![CDATA[--- $TESTTMP/test-failure.t
418 418 +++ $TESTTMP/test-failure.t.err
419 419 @@ -1,5 +1,5 @@
420 420 $ echo babar
421 421 - rataxes
422 422 + babar
423 423 This is a noop statement so that
424 424 this test is still more bytes than success.
425 425 pad pad pad pad............................................................
426 426 ]]> </failure>
427 427 </testcase>
428 428 </testsuite>
429 429
430 430 $ cat .testtimes
431 431 test-empty.t * (glob)
432 432 test-failure-globs.t * (glob)
433 433 test-failure-unicode.t * (glob)
434 434 test-failure.t * (glob)
435 435 test-success.t * (glob)
436 436
437 437 $ rt --list-tests
438 438 test-failure-unicode.t
439 439 test-failure.t
440 440 test-success.t
441 441
442 442 $ rt --list-tests --json
443 443 test-failure-unicode.t
444 444 test-failure.t
445 445 test-success.t
446 446 $ cat report.json
447 447 testreport ={
448 448 "test-failure-unicode.t": {
449 449 "result": "success"
450 450 },
451 451 "test-failure.t": {
452 452 "result": "success"
453 453 },
454 454 "test-success.t": {
455 455 "result": "success"
456 456 }
457 457 } (no-eol)
458 458
459 459 $ rt --list-tests --xunit=xunit.xml
460 460 test-failure-unicode.t
461 461 test-failure.t
462 462 test-success.t
463 463 $ cat xunit.xml
464 464 <?xml version="1.0" encoding="utf-8"?>
465 465 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
466 466 <testcase name="test-failure-unicode.t"/>
467 467 <testcase name="test-failure.t"/>
468 468 <testcase name="test-success.t"/>
469 469 </testsuite>
470 470
471 471 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
472 472 test-failure-unicode.t
473 473 test-failure.t
474 474 $ cat output/report.json
475 475 testreport ={
476 476 "test-failure-unicode.t": {
477 477 "result": "success"
478 478 },
479 479 "test-failure.t": {
480 480 "result": "success"
481 481 }
482 482 } (no-eol)
483 483 $ cat xunit.xml
484 484 <?xml version="1.0" encoding="utf-8"?>
485 485 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
486 486 <testcase name="test-failure-unicode.t"/>
487 487 <testcase name="test-failure.t"/>
488 488 </testsuite>
489 489
490 490 $ rm test-failure-unicode.t
491 491
492 492 test for --retest
493 493 ====================
494 494
495 495 $ rt --retest
496 496 running 2 tests using 1 parallel processes
497 497
498 498 --- $TESTTMP/test-failure.t
499 499 +++ $TESTTMP/test-failure.t.err
500 500 @@ -1,5 +1,5 @@
501 501 $ echo babar
502 502 - rataxes
503 503 + babar
504 504 This is a noop statement so that
505 505 this test is still more bytes than success.
506 506 pad pad pad pad............................................................
507 507
508 508 ERROR: test-failure.t output changed
509 509 !
510 510 Failed test-failure.t: output changed
511 511 # Ran 2 tests, 1 skipped, 1 failed.
512 512 python hash seed: * (glob)
513 513 [1]
514 514
515 515 --retest works with --outputdir
516 516 $ rm -r output
517 517 $ mkdir output
518 518 $ mv test-failure.t.err output
519 519 $ rt --retest --outputdir output
520 520 running 2 tests using 1 parallel processes
521 521
522 522 --- $TESTTMP/test-failure.t
523 523 +++ $TESTTMP/output/test-failure.t.err
524 524 @@ -1,5 +1,5 @@
525 525 $ echo babar
526 526 - rataxes
527 527 + babar
528 528 This is a noop statement so that
529 529 this test is still more bytes than success.
530 530 pad pad pad pad............................................................
531 531
532 532 ERROR: test-failure.t output changed
533 533 !
534 534 Failed test-failure.t: output changed
535 535 # Ran 2 tests, 1 skipped, 1 failed.
536 536 python hash seed: * (glob)
537 537 [1]
538 538
539 539 Selecting Tests To Run
540 540 ======================
541 541
542 542 successful
543 543
544 544 $ rt test-success.t
545 545 running 1 tests using 1 parallel processes
546 546 .
547 547 # Ran 1 tests, 0 skipped, 0 failed.
548 548
549 549 success w/ keyword
550 550 $ rt -k xyzzy
551 551 running 2 tests using 1 parallel processes
552 552 .
553 553 # Ran 2 tests, 1 skipped, 0 failed.
554 554
555 555 failed
556 556
557 557 $ rt test-failure.t
558 558 running 1 tests using 1 parallel processes
559 559
560 560 --- $TESTTMP/test-failure.t
561 561 +++ $TESTTMP/test-failure.t.err
562 562 @@ -1,5 +1,5 @@
563 563 $ echo babar
564 564 - rataxes
565 565 + babar
566 566 This is a noop statement so that
567 567 this test is still more bytes than success.
568 568 pad pad pad pad............................................................
569 569
570 570 ERROR: test-failure.t output changed
571 571 !
572 572 Failed test-failure.t: output changed
573 573 # Ran 1 tests, 0 skipped, 1 failed.
574 574 python hash seed: * (glob)
575 575 [1]
576 576
577 577 failure w/ keyword
578 578 $ rt -k rataxes
579 579 running 2 tests using 1 parallel processes
580 580
581 581 --- $TESTTMP/test-failure.t
582 582 +++ $TESTTMP/test-failure.t.err
583 583 @@ -1,5 +1,5 @@
584 584 $ echo babar
585 585 - rataxes
586 586 + babar
587 587 This is a noop statement so that
588 588 this test is still more bytes than success.
589 589 pad pad pad pad............................................................
590 590
591 591 ERROR: test-failure.t output changed
592 592 !
593 593 Failed test-failure.t: output changed
594 594 # Ran 2 tests, 1 skipped, 1 failed.
595 595 python hash seed: * (glob)
596 596 [1]
597 597
598 598 Verify that when a process fails to start we show a useful message
599 599 ==================================================================
600 600
601 601 $ cat > test-serve-fail.t <<EOF
602 602 > $ echo 'abort: child process failed to start blah'
603 603 > EOF
604 604 $ rt test-serve-fail.t
605 605 running 1 tests using 1 parallel processes
606 606
607 607 --- $TESTTMP/test-serve-fail.t
608 608 +++ $TESTTMP/test-serve-fail.t.err
609 609 @@ -1* +1,2 @@ (glob)
610 610 $ echo 'abort: child process failed to start blah'
611 611 + abort: child process failed to start blah
612 612
613 613 ERROR: test-serve-fail.t output changed
614 614 !
615 615 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
616 616 # Ran 1 tests, 0 skipped, 1 failed.
617 617 python hash seed: * (glob)
618 618 [1]
619 619 $ rm test-serve-fail.t
620 620
621 621 Verify that we can try other ports
622 622 ===================================
623 623
624 624 Extensions aren't inherited by the invoked run-tests.py. An extension
625 625 introducing a repository requirement could cause this to fail. So we force
626 626 HGRCPATH to get a clean environment.
627 627
628 628 $ HGRCPATH= hg init inuse
629 629 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
630 630 $ cat blocks.pid >> $DAEMON_PIDS
631 631 $ cat > test-serve-inuse.t <<EOF
632 632 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
633 633 > $ cat hg.pid >> \$DAEMON_PIDS
634 634 > EOF
635 635 $ rt test-serve-inuse.t
636 636 running 1 tests using 1 parallel processes
637 637 .
638 638 # Ran 1 tests, 0 skipped, 0 failed.
639 639 $ rm test-serve-inuse.t
640 640 $ killdaemons.py $DAEMON_PIDS
641 641
642 642 Running In Debug Mode
643 643 ======================
644 644
645 645 $ rt --debug 2>&1 | grep -v pwd
646 646 running 2 tests using 1 parallel processes
647 647 + alias hg=hg.exe (windows !)
648 648 + echo *SALT* 0 0 (glob)
649 649 *SALT* 0 0 (glob)
650 650 + echo babar
651 651 babar
652 652 + echo *SALT* 10 0 (glob)
653 653 *SALT* 10 0 (glob)
654 654 .+ alias hg=hg.exe (windows !)
655 655 *+ echo *SALT* 0 0 (glob)
656 656 *SALT* 0 0 (glob)
657 657 + echo babar
658 658 babar
659 659 + echo *SALT* 2 0 (glob)
660 660 *SALT* 2 0 (glob)
661 661 + echo xyzzy
662 662 xyzzy
663 663 + echo *SALT* 9 0 (glob)
664 664 *SALT* 9 0 (glob)
665 665 + printf *abc\ndef\nxyz\n* (glob)
666 666 abc
667 667 def
668 668 xyz
669 669 + echo *SALT* 15 0 (glob)
670 670 *SALT* 15 0 (glob)
671 671 + printf *zyx\nwvu\ntsr\n* (glob)
672 672 zyx
673 673 wvu
674 674 tsr
675 675 + echo *SALT* 22 0 (glob)
676 676 *SALT* 22 0 (glob)
677 677 .
678 678 # Ran 2 tests, 0 skipped, 0 failed.
679 679
680 680 Parallel runs
681 681 ==============
682 682
683 683 (duplicate the failing test to get predictable output)
684 684 $ cp test-failure.t test-failure-copy.t
685 685
686 686 $ rt --jobs 2 test-failure*.t -n
687 687 running 2 tests using 2 parallel processes
688 688 !!
689 689 Failed test-failure*.t: output changed (glob)
690 690 Failed test-failure*.t: output changed (glob)
691 691 # Ran 2 tests, 0 skipped, 2 failed.
692 692 python hash seed: * (glob)
693 693 [1]
694 694
695 695 failures in parallel with --first should only print one failure
696 696 $ rt --jobs 2 --first test-failure*.t
697 697 running 2 tests using 2 parallel processes
698 698
699 699 --- $TESTTMP/test-failure*.t (glob)
700 700 +++ $TESTTMP/test-failure*.t.err (glob)
701 701 @@ -1,5 +1,5 @@
702 702 $ echo babar
703 703 - rataxes
704 704 + babar
705 705 This is a noop statement so that
706 706 this test is still more bytes than success.
707 707 pad pad pad pad............................................................
708 708
709 709 Failed test-failure*.t: output changed (glob)
710 710 Failed test-failure*.t: output changed (glob)
711 711 # Ran 2 tests, 0 skipped, 2 failed.
712 712 python hash seed: * (glob)
713 713 [1]
714 714
715 715
716 716 (delete the duplicated test file)
717 717 $ rm test-failure-copy.t
718 718
719 multiple runs per test should be parallelized
720
721 $ rt --jobs 2 --runs-per-test 2 test-success.t
722 running 2 tests using 2 parallel processes
723 ..
724 # Ran 2 tests, 0 skipped, 0 failed.
719 725
720 726 Interactive run
721 727 ===============
722 728
723 729 (backup the failing test)
724 730 $ cp test-failure.t backup
725 731
726 732 Refuse the fix
727 733
728 734 $ echo 'n' | rt -i
729 735 running 2 tests using 1 parallel processes
730 736
731 737 --- $TESTTMP/test-failure.t
732 738 +++ $TESTTMP/test-failure.t.err
733 739 @@ -1,5 +1,5 @@
734 740 $ echo babar
735 741 - rataxes
736 742 + babar
737 743 This is a noop statement so that
738 744 this test is still more bytes than success.
739 745 pad pad pad pad............................................................
740 746 Accept this change? [n]
741 747 ERROR: test-failure.t output changed
742 748 !.
743 749 Failed test-failure.t: output changed
744 750 # Ran 2 tests, 0 skipped, 1 failed.
745 751 python hash seed: * (glob)
746 752 [1]
747 753
748 754 $ cat test-failure.t
749 755 $ echo babar
750 756 rataxes
751 757 This is a noop statement so that
752 758 this test is still more bytes than success.
753 759 pad pad pad pad............................................................
754 760 pad pad pad pad............................................................
755 761 pad pad pad pad............................................................
756 762 pad pad pad pad............................................................
757 763 pad pad pad pad............................................................
758 764 pad pad pad pad............................................................
759 765
760 766 Interactive with custom view
761 767
762 768 $ echo 'n' | rt -i --view echo
763 769 running 2 tests using 1 parallel processes
764 770 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
765 771 Accept this change? [n]* (glob)
766 772 ERROR: test-failure.t output changed
767 773 !.
768 774 Failed test-failure.t: output changed
769 775 # Ran 2 tests, 0 skipped, 1 failed.
770 776 python hash seed: * (glob)
771 777 [1]
772 778
773 779 View the fix
774 780
775 781 $ echo 'y' | rt --view echo
776 782 running 2 tests using 1 parallel processes
777 783 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
778 784
779 785 ERROR: test-failure.t output changed
780 786 !.
781 787 Failed test-failure.t: output changed
782 788 # Ran 2 tests, 0 skipped, 1 failed.
783 789 python hash seed: * (glob)
784 790 [1]
785 791
786 792 Accept the fix
787 793
788 794 $ cat >> test-failure.t <<EOF
789 795 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
790 796 > saved backup bundle to \$TESTTMP/foo.hg
791 797 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
792 798 > saved backup bundle to $TESTTMP\\foo.hg
793 799 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
794 800 > saved backup bundle to \$TESTTMP/*.hg (glob)
795 801 > EOF
796 802 $ echo 'y' | rt -i 2>&1
797 803 running 2 tests using 1 parallel processes
798 804
799 805 --- $TESTTMP/test-failure.t
800 806 +++ $TESTTMP/test-failure.t.err
801 807 @@ -1,5 +1,5 @@
802 808 $ echo babar
803 809 - rataxes
804 810 + babar
805 811 This is a noop statement so that
806 812 this test is still more bytes than success.
807 813 pad pad pad pad............................................................
808 814 @@ -11,6 +11,6 @@
809 815 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
810 816 saved backup bundle to $TESTTMP/foo.hg
811 817 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
812 818 - saved backup bundle to $TESTTMP\foo.hg
813 819 + saved backup bundle to $TESTTMP/foo.hg
814 820 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
815 821 saved backup bundle to $TESTTMP/*.hg (glob)
816 822 Accept this change? [n] ..
817 823 # Ran 2 tests, 0 skipped, 0 failed.
818 824
819 825 $ sed -e 's,(glob)$,&<,g' test-failure.t
820 826 $ echo babar
821 827 babar
822 828 This is a noop statement so that
823 829 this test is still more bytes than success.
824 830 pad pad pad pad............................................................
825 831 pad pad pad pad............................................................
826 832 pad pad pad pad............................................................
827 833 pad pad pad pad............................................................
828 834 pad pad pad pad............................................................
829 835 pad pad pad pad............................................................
830 836 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
831 837 saved backup bundle to $TESTTMP/foo.hg
832 838 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
833 839 saved backup bundle to $TESTTMP/foo.hg
834 840 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
835 841 saved backup bundle to $TESTTMP/*.hg (glob)<
836 842
837 843 Race condition - test file was modified when test is running
838 844
839 845 $ TESTRACEDIR=`pwd`
840 846 $ export TESTRACEDIR
841 847 $ cat > test-race.t <<EOF
842 848 > $ echo 1
843 849 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
844 850 > EOF
845 851
846 852 $ rt -i test-race.t
847 853 running 1 tests using 1 parallel processes
848 854
849 855 --- $TESTTMP/test-race.t
850 856 +++ $TESTTMP/test-race.t.err
851 857 @@ -1,2 +1,3 @@
852 858 $ echo 1
853 859 + 1
854 860 $ echo "# a new line" >> $TESTTMP/test-race.t
855 861 Reference output has changed (run again to prompt changes)
856 862 ERROR: test-race.t output changed
857 863 !
858 864 Failed test-race.t: output changed
859 865 # Ran 1 tests, 0 skipped, 1 failed.
860 866 python hash seed: * (glob)
861 867 [1]
862 868
863 869 $ rm test-race.t
864 870
865 871 When "#testcases" is used in .t files
866 872
867 873 $ cat >> test-cases.t <<EOF
868 874 > #testcases a b
869 875 > #if a
870 876 > $ echo 1
871 877 > #endif
872 878 > #if b
873 879 > $ echo 2
874 880 > #endif
875 881 > EOF
876 882
877 883 $ cat <<EOF | rt -i test-cases.t 2>&1
878 884 > y
879 885 > y
880 886 > EOF
881 887 running 2 tests using 1 parallel processes
882 888
883 889 --- $TESTTMP/test-cases.t
884 890 +++ $TESTTMP/test-cases.t#a.err
885 891 @@ -1,6 +1,7 @@
886 892 #testcases a b
887 893 #if a
888 894 $ echo 1
889 895 + 1
890 896 #endif
891 897 #if b
892 898 $ echo 2
893 899 Accept this change? [n] .
894 900 --- $TESTTMP/test-cases.t
895 901 +++ $TESTTMP/test-cases.t#b.err
896 902 @@ -5,4 +5,5 @@
897 903 #endif
898 904 #if b
899 905 $ echo 2
900 906 + 2
901 907 #endif
902 908 Accept this change? [n] .
903 909 # Ran 2 tests, 0 skipped, 0 failed.
904 910
905 911 $ cat test-cases.t
906 912 #testcases a b
907 913 #if a
908 914 $ echo 1
909 915 1
910 916 #endif
911 917 #if b
912 918 $ echo 2
913 919 2
914 920 #endif
915 921
916 922 $ cat >> test-cases.t <<'EOF'
917 923 > #if a
918 924 > $ NAME=A
919 925 > #else
920 926 > $ NAME=B
921 927 > #endif
922 928 > $ echo $NAME
923 929 > A (a !)
924 930 > B (b !)
925 931 > EOF
926 932 $ rt test-cases.t
927 933 running 2 tests using 1 parallel processes
928 934 ..
929 935 # Ran 2 tests, 0 skipped, 0 failed.
930 936
931 937 When using multiple dimensions of "#testcases" in .t files
932 938
933 939 $ cat > test-cases.t <<'EOF'
934 940 > #testcases a b
935 941 > #testcases c d
936 942 > #if a d
937 943 > $ echo $TESTCASE
938 944 > a#d
939 945 > #endif
940 946 > #if b c
941 947 > $ echo yes
942 948 > no
943 949 > #endif
944 950 > EOF
945 951 $ rt test-cases.t
946 952 running 4 tests using 1 parallel processes
947 953 ..
948 954 --- $TESTTMP/test-cases.t
949 955 +++ $TESTTMP/test-cases.t#b#c.err
950 956 @@ -6,5 +6,5 @@
951 957 #endif
952 958 #if b c
953 959 $ echo yes
954 960 - no
955 961 + yes
956 962 #endif
957 963
958 964 ERROR: test-cases.t#b#c output changed
959 965 !.
960 966 Failed test-cases.t#b#c: output changed
961 967 # Ran 4 tests, 0 skipped, 1 failed.
962 968 python hash seed: * (glob)
963 969 [1]
964 970
965 971 $ rm test-cases.t#b#c.err
966 972 $ rm test-cases.t
967 973
968 974 (reinstall)
969 975 $ mv backup test-failure.t
970 976
971 977 No Diff
972 978 ===============
973 979
974 980 $ rt --nodiff
975 981 running 2 tests using 1 parallel processes
976 982 !.
977 983 Failed test-failure.t: output changed
978 984 # Ran 2 tests, 0 skipped, 1 failed.
979 985 python hash seed: * (glob)
980 986 [1]
981 987
982 988 test --tmpdir support
983 989 $ rt --tmpdir=$TESTTMP/keep test-success.t
984 990 running 1 tests using 1 parallel processes
985 991
986 992 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
987 993 Keeping threadtmp dir: $TESTTMP/keep/child1
988 994 .
989 995 # Ran 1 tests, 0 skipped, 0 failed.
990 996
991 997 timeouts
992 998 ========
993 999 $ cat > test-timeout.t <<EOF
994 1000 > $ sleep 2
995 1001 > $ echo pass
996 1002 > pass
997 1003 > EOF
998 1004 > echo '#require slow' > test-slow-timeout.t
999 1005 > cat test-timeout.t >> test-slow-timeout.t
1000 1006 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
1001 1007 running 2 tests using 1 parallel processes
1002 1008 st
1003 1009 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
1004 1010 Failed test-timeout.t: timed out
1005 1011 # Ran 1 tests, 1 skipped, 1 failed.
1006 1012 python hash seed: * (glob)
1007 1013 [1]
1008 1014 $ rt --timeout=1 --slowtimeout=3 \
1009 1015 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1010 1016 running 2 tests using 1 parallel processes
1011 1017 .t
1012 1018 Failed test-timeout.t: timed out
1013 1019 # Ran 2 tests, 0 skipped, 1 failed.
1014 1020 python hash seed: * (glob)
1015 1021 [1]
1016 1022 $ rm test-timeout.t test-slow-timeout.t
1017 1023
1018 1024 test for --time
1019 1025 ==================
1020 1026
1021 1027 $ rt test-success.t --time
1022 1028 running 1 tests using 1 parallel processes
1023 1029 .
1024 1030 # Ran 1 tests, 0 skipped, 0 failed.
1025 1031 # Producing time report
1026 1032 start end cuser csys real Test
1027 1033 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1028 1034
1029 1035 test for --time with --job enabled
1030 1036 ====================================
1031 1037
1032 1038 $ rt test-success.t --time --jobs 2
1033 1039 running 1 tests using 1 parallel processes
1034 1040 .
1035 1041 # Ran 1 tests, 0 skipped, 0 failed.
1036 1042 # Producing time report
1037 1043 start end cuser csys real Test
1038 1044 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1039 1045
1040 1046 Skips
1041 1047 ================
1042 1048 $ cat > test-skip.t <<EOF
1043 1049 > $ echo xyzzy
1044 1050 > #if true
1045 1051 > #require false
1046 1052 > #end
1047 1053 > EOF
1048 1054 $ cat > test-noskip.t <<EOF
1049 1055 > #if false
1050 1056 > #require false
1051 1057 > #endif
1052 1058 > EOF
1053 1059 $ rt --nodiff
1054 1060 running 4 tests using 1 parallel processes
1055 1061 !.s.
1056 1062 Skipped test-skip.t: missing feature: nail clipper
1057 1063 Failed test-failure.t: output changed
1058 1064 # Ran 3 tests, 1 skipped, 1 failed.
1059 1065 python hash seed: * (glob)
1060 1066 [1]
1061 1067
1062 1068 $ rm test-noskip.t
1063 1069 $ rt --keyword xyzzy
1064 1070 running 3 tests using 1 parallel processes
1065 1071 .s
1066 1072 Skipped test-skip.t: missing feature: nail clipper
1067 1073 # Ran 2 tests, 2 skipped, 0 failed.
1068 1074
1069 1075 Skips with xml
1070 1076 $ rt --keyword xyzzy \
1071 1077 > --xunit=xunit.xml
1072 1078 running 3 tests using 1 parallel processes
1073 1079 .s
1074 1080 Skipped test-skip.t: missing feature: nail clipper
1075 1081 # Ran 2 tests, 2 skipped, 0 failed.
1076 1082 $ cat xunit.xml
1077 1083 <?xml version="1.0" encoding="utf-8"?>
1078 1084 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1079 1085 <testcase name="test-success.t" time="*"/> (glob)
1080 1086 <testcase name="test-skip.t">
1081 1087 <skipped>
1082 1088 <![CDATA[missing feature: nail clipper]]> </skipped>
1083 1089 </testcase>
1084 1090 </testsuite>
1085 1091
1086 1092 Missing skips or blacklisted skips don't count as executed:
1087 1093 $ echo test-failure.t > blacklist
1088 1094 $ rt --blacklist=blacklist --json\
1089 1095 > test-failure.t test-bogus.t
1090 1096 running 2 tests using 1 parallel processes
1091 1097 ss
1092 1098 Skipped test-bogus.t: Doesn't exist
1093 1099 Skipped test-failure.t: blacklisted
1094 1100 # Ran 0 tests, 2 skipped, 0 failed.
1095 1101 $ cat report.json
1096 1102 testreport ={
1097 1103 "test-bogus.t": {
1098 1104 "result": "skip"
1099 1105 },
1100 1106 "test-failure.t": {
1101 1107 "result": "skip"
1102 1108 }
1103 1109 } (no-eol)
1104 1110
1105 1111 Whitelist trumps blacklist
1106 1112 $ echo test-failure.t > whitelist
1107 1113 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1108 1114 > test-failure.t test-bogus.t
1109 1115 running 2 tests using 1 parallel processes
1110 1116 s
1111 1117 --- $TESTTMP/test-failure.t
1112 1118 +++ $TESTTMP/test-failure.t.err
1113 1119 @@ -1,5 +1,5 @@
1114 1120 $ echo babar
1115 1121 - rataxes
1116 1122 + babar
1117 1123 This is a noop statement so that
1118 1124 this test is still more bytes than success.
1119 1125 pad pad pad pad............................................................
1120 1126
1121 1127 ERROR: test-failure.t output changed
1122 1128 !
1123 1129 Skipped test-bogus.t: Doesn't exist
1124 1130 Failed test-failure.t: output changed
1125 1131 # Ran 1 tests, 1 skipped, 1 failed.
1126 1132 python hash seed: * (glob)
1127 1133 [1]
1128 1134
1129 1135 Ensure that --test-list causes only the tests listed in that file to
1130 1136 be executed.
1131 1137 $ echo test-success.t >> onlytest
1132 1138 $ rt --test-list=onlytest
1133 1139 running 1 tests using 1 parallel processes
1134 1140 .
1135 1141 # Ran 1 tests, 0 skipped, 0 failed.
1136 1142 $ echo test-bogus.t >> anothertest
1137 1143 $ rt --test-list=onlytest --test-list=anothertest
1138 1144 running 2 tests using 1 parallel processes
1139 1145 s.
1140 1146 Skipped test-bogus.t: Doesn't exist
1141 1147 # Ran 1 tests, 1 skipped, 0 failed.
1142 1148 $ rm onlytest anothertest
1143 1149
1144 1150 test for --json
1145 1151 ==================
1146 1152
1147 1153 $ rt --json
1148 1154 running 3 tests using 1 parallel processes
1149 1155
1150 1156 --- $TESTTMP/test-failure.t
1151 1157 +++ $TESTTMP/test-failure.t.err
1152 1158 @@ -1,5 +1,5 @@
1153 1159 $ echo babar
1154 1160 - rataxes
1155 1161 + babar
1156 1162 This is a noop statement so that
1157 1163 this test is still more bytes than success.
1158 1164 pad pad pad pad............................................................
1159 1165
1160 1166 ERROR: test-failure.t output changed
1161 1167 !.s
1162 1168 Skipped test-skip.t: missing feature: nail clipper
1163 1169 Failed test-failure.t: output changed
1164 1170 # Ran 2 tests, 1 skipped, 1 failed.
1165 1171 python hash seed: * (glob)
1166 1172 [1]
1167 1173
1168 1174 $ cat report.json
1169 1175 testreport ={
1170 1176 "test-failure.t": [\{] (re)
1171 1177 "csys": "\s*[\d\.]{4,5}", ? (re)
1172 1178 "cuser": "\s*[\d\.]{4,5}", ? (re)
1173 1179 "diff": "---.+\+\+\+.+", ? (re)
1174 1180 "end": "\s*[\d\.]{4,5}", ? (re)
1175 1181 "result": "failure", ? (re)
1176 1182 "start": "\s*[\d\.]{4,5}", ? (re)
1177 1183 "time": "\s*[\d\.]{4,5}" (re)
1178 1184 }, ? (re)
1179 1185 "test-skip.t": {
1180 1186 "csys": "\s*[\d\.]{4,5}", ? (re)
1181 1187 "cuser": "\s*[\d\.]{4,5}", ? (re)
1182 1188 "diff": "", ? (re)
1183 1189 "end": "\s*[\d\.]{4,5}", ? (re)
1184 1190 "result": "skip", ? (re)
1185 1191 "start": "\s*[\d\.]{4,5}", ? (re)
1186 1192 "time": "\s*[\d\.]{4,5}" (re)
1187 1193 }, ? (re)
1188 1194 "test-success.t": [\{] (re)
1189 1195 "csys": "\s*[\d\.]{4,5}", ? (re)
1190 1196 "cuser": "\s*[\d\.]{4,5}", ? (re)
1191 1197 "diff": "", ? (re)
1192 1198 "end": "\s*[\d\.]{4,5}", ? (re)
1193 1199 "result": "success", ? (re)
1194 1200 "start": "\s*[\d\.]{4,5}", ? (re)
1195 1201 "time": "\s*[\d\.]{4,5}" (re)
1196 1202 }
1197 1203 } (no-eol)
1198 1204 --json with --outputdir
1199 1205
1200 1206 $ rm report.json
1201 1207 $ rm -r output
1202 1208 $ mkdir output
1203 1209 $ rt --json --outputdir output
1204 1210 running 3 tests using 1 parallel processes
1205 1211
1206 1212 --- $TESTTMP/test-failure.t
1207 1213 +++ $TESTTMP/output/test-failure.t.err
1208 1214 @@ -1,5 +1,5 @@
1209 1215 $ echo babar
1210 1216 - rataxes
1211 1217 + babar
1212 1218 This is a noop statement so that
1213 1219 this test is still more bytes than success.
1214 1220 pad pad pad pad............................................................
1215 1221
1216 1222 ERROR: test-failure.t output changed
1217 1223 !.s
1218 1224 Skipped test-skip.t: missing feature: nail clipper
1219 1225 Failed test-failure.t: output changed
1220 1226 # Ran 2 tests, 1 skipped, 1 failed.
1221 1227 python hash seed: * (glob)
1222 1228 [1]
1223 1229 $ f report.json
1224 1230 report.json: file not found
1225 1231 $ cat output/report.json
1226 1232 testreport ={
1227 1233 "test-failure.t": [\{] (re)
1228 1234 "csys": "\s*[\d\.]{4,5}", ? (re)
1229 1235 "cuser": "\s*[\d\.]{4,5}", ? (re)
1230 1236 "diff": "---.+\+\+\+.+", ? (re)
1231 1237 "end": "\s*[\d\.]{4,5}", ? (re)
1232 1238 "result": "failure", ? (re)
1233 1239 "start": "\s*[\d\.]{4,5}", ? (re)
1234 1240 "time": "\s*[\d\.]{4,5}" (re)
1235 1241 }, ? (re)
1236 1242 "test-skip.t": {
1237 1243 "csys": "\s*[\d\.]{4,5}", ? (re)
1238 1244 "cuser": "\s*[\d\.]{4,5}", ? (re)
1239 1245 "diff": "", ? (re)
1240 1246 "end": "\s*[\d\.]{4,5}", ? (re)
1241 1247 "result": "skip", ? (re)
1242 1248 "start": "\s*[\d\.]{4,5}", ? (re)
1243 1249 "time": "\s*[\d\.]{4,5}" (re)
1244 1250 }, ? (re)
1245 1251 "test-success.t": [\{] (re)
1246 1252 "csys": "\s*[\d\.]{4,5}", ? (re)
1247 1253 "cuser": "\s*[\d\.]{4,5}", ? (re)
1248 1254 "diff": "", ? (re)
1249 1255 "end": "\s*[\d\.]{4,5}", ? (re)
1250 1256 "result": "success", ? (re)
1251 1257 "start": "\s*[\d\.]{4,5}", ? (re)
1252 1258 "time": "\s*[\d\.]{4,5}" (re)
1253 1259 }
1254 1260 } (no-eol)
1255 1261 $ ls -a output
1256 1262 .
1257 1263 ..
1258 1264 .testtimes
1259 1265 report.json
1260 1266 test-failure.t.err
1261 1267
1262 1268 Test that failed test accepted through interactive are properly reported:
1263 1269
1264 1270 $ cp test-failure.t backup
1265 1271 $ echo y | rt --json -i
1266 1272 running 3 tests using 1 parallel processes
1267 1273
1268 1274 --- $TESTTMP/test-failure.t
1269 1275 +++ $TESTTMP/test-failure.t.err
1270 1276 @@ -1,5 +1,5 @@
1271 1277 $ echo babar
1272 1278 - rataxes
1273 1279 + babar
1274 1280 This is a noop statement so that
1275 1281 this test is still more bytes than success.
1276 1282 pad pad pad pad............................................................
1277 1283 Accept this change? [n] ..s
1278 1284 Skipped test-skip.t: missing feature: nail clipper
1279 1285 # Ran 2 tests, 1 skipped, 0 failed.
1280 1286
1281 1287 $ cat report.json
1282 1288 testreport ={
1283 1289 "test-failure.t": [\{] (re)
1284 1290 "csys": "\s*[\d\.]{4,5}", ? (re)
1285 1291 "cuser": "\s*[\d\.]{4,5}", ? (re)
1286 1292 "diff": "", ? (re)
1287 1293 "end": "\s*[\d\.]{4,5}", ? (re)
1288 1294 "result": "success", ? (re)
1289 1295 "start": "\s*[\d\.]{4,5}", ? (re)
1290 1296 "time": "\s*[\d\.]{4,5}" (re)
1291 1297 }, ? (re)
1292 1298 "test-skip.t": {
1293 1299 "csys": "\s*[\d\.]{4,5}", ? (re)
1294 1300 "cuser": "\s*[\d\.]{4,5}", ? (re)
1295 1301 "diff": "", ? (re)
1296 1302 "end": "\s*[\d\.]{4,5}", ? (re)
1297 1303 "result": "skip", ? (re)
1298 1304 "start": "\s*[\d\.]{4,5}", ? (re)
1299 1305 "time": "\s*[\d\.]{4,5}" (re)
1300 1306 }, ? (re)
1301 1307 "test-success.t": [\{] (re)
1302 1308 "csys": "\s*[\d\.]{4,5}", ? (re)
1303 1309 "cuser": "\s*[\d\.]{4,5}", ? (re)
1304 1310 "diff": "", ? (re)
1305 1311 "end": "\s*[\d\.]{4,5}", ? (re)
1306 1312 "result": "success", ? (re)
1307 1313 "start": "\s*[\d\.]{4,5}", ? (re)
1308 1314 "time": "\s*[\d\.]{4,5}" (re)
1309 1315 }
1310 1316 } (no-eol)
1311 1317 $ mv backup test-failure.t
1312 1318
1313 1319 backslash on end of line with glob matching is handled properly
1314 1320
1315 1321 $ cat > test-glob-backslash.t << EOF
1316 1322 > $ echo 'foo bar \\'
1317 1323 > foo * \ (glob)
1318 1324 > EOF
1319 1325
1320 1326 $ rt test-glob-backslash.t
1321 1327 running 1 tests using 1 parallel processes
1322 1328 .
1323 1329 # Ran 1 tests, 0 skipped, 0 failed.
1324 1330
1325 1331 $ rm -f test-glob-backslash.t
1326 1332
1327 1333 Test globbing of local IP addresses
1328 1334 $ echo 172.16.18.1
1329 1335 $LOCALIP (glob)
1330 1336 $ echo dead:beef::1
1331 1337 $LOCALIP (glob)
1332 1338
1333 1339 Add support for external test formatter
1334 1340 =======================================
1335 1341
1336 1342 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@" test-success.t test-failure.t
1337 1343 running 2 tests using 1 parallel processes
1338 1344
1339 1345 # Ran 2 tests, 0 skipped, 0 failed.
1340 1346 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1341 1347 FAILURE! test-failure.t output changed
1342 1348 SUCCESS! test-success.t
1343 1349 ON_END!
1344 1350
1345 1351 Test reusability for third party tools
1346 1352 ======================================
1347 1353
1348 1354 $ mkdir "$TESTTMP"/anothertests
1349 1355 $ cd "$TESTTMP"/anothertests
1350 1356
1351 1357 test that `run-tests.py` can execute hghave, even if it runs not in
1352 1358 Mercurial source tree.
1353 1359
1354 1360 $ cat > test-hghave.t <<EOF
1355 1361 > #require true
1356 1362 > $ echo foo
1357 1363 > foo
1358 1364 > EOF
1359 1365 $ rt test-hghave.t
1360 1366 running 1 tests using 1 parallel processes
1361 1367 .
1362 1368 # Ran 1 tests, 0 skipped, 0 failed.
1363 1369
1364 1370 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1365 1371 running is placed.
1366 1372
1367 1373 $ cat > test-runtestdir.t <<EOF
1368 1374 > - $TESTDIR, in which test-run-tests.t is placed
1369 1375 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1370 1376 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1371 1377 >
1372 1378 > #if windows
1373 1379 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1374 1380 > #else
1375 1381 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1376 1382 > #endif
1377 1383 > If this prints a path, that means RUNTESTDIR didn't equal
1378 1384 > TESTDIR as it should have.
1379 1385 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1380 1386 > This should print the start of check-code. If this passes but the
1381 1387 > previous check failed, that means we found a copy of check-code at whatever
1382 1388 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1383 1389 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1384 1390 > #!USRBINENVPY
1385 1391 > #
1386 1392 > # check-code - a style and portability checker for Mercurial
1387 1393 > EOF
1388 1394 $ rt test-runtestdir.t
1389 1395 running 1 tests using 1 parallel processes
1390 1396 .
1391 1397 # Ran 1 tests, 0 skipped, 0 failed.
1392 1398
1393 1399 #if execbit
1394 1400
1395 1401 test that TESTDIR is referred in PATH
1396 1402
1397 1403 $ cat > custom-command.sh <<EOF
1398 1404 > #!/bin/sh
1399 1405 > echo "hello world"
1400 1406 > EOF
1401 1407 $ chmod +x custom-command.sh
1402 1408 $ cat > test-testdir-path.t <<EOF
1403 1409 > $ custom-command.sh
1404 1410 > hello world
1405 1411 > EOF
1406 1412 $ rt test-testdir-path.t
1407 1413 running 1 tests using 1 parallel processes
1408 1414 .
1409 1415 # Ran 1 tests, 0 skipped, 0 failed.
1410 1416
1411 1417 #endif
1412 1418
1413 1419 test support for --allow-slow-tests
1414 1420 $ cat > test-very-slow-test.t <<EOF
1415 1421 > #require slow
1416 1422 > $ echo pass
1417 1423 > pass
1418 1424 > EOF
1419 1425 $ rt test-very-slow-test.t
1420 1426 running 1 tests using 1 parallel processes
1421 1427 s
1422 1428 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1423 1429 # Ran 0 tests, 1 skipped, 0 failed.
1424 1430 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1425 1431 running 1 tests using 1 parallel processes
1426 1432 .
1427 1433 # Ran 1 tests, 0 skipped, 0 failed.
1428 1434
1429 1435 support for running a test outside the current directory
1430 1436 $ mkdir nonlocal
1431 1437 $ cat > nonlocal/test-is-not-here.t << EOF
1432 1438 > $ echo pass
1433 1439 > pass
1434 1440 > EOF
1435 1441 $ rt nonlocal/test-is-not-here.t
1436 1442 running 1 tests using 1 parallel processes
1437 1443 .
1438 1444 # Ran 1 tests, 0 skipped, 0 failed.
1439 1445
1440 1446 support for automatically discovering test if arg is a folder
1441 1447 $ mkdir tmp && cd tmp
1442 1448
1443 1449 $ cat > test-uno.t << EOF
1444 1450 > $ echo line
1445 1451 > line
1446 1452 > EOF
1447 1453
1448 1454 $ cp test-uno.t test-dos.t
1449 1455 $ cd ..
1450 1456 $ cp -R tmp tmpp
1451 1457 $ cp tmp/test-uno.t test-solo.t
1452 1458
1453 1459 $ rt tmp/ test-solo.t tmpp
1454 1460 running 5 tests using 1 parallel processes
1455 1461 .....
1456 1462 # Ran 5 tests, 0 skipped, 0 failed.
1457 1463 $ rm -rf tmp tmpp
1458 1464
1459 1465 support for running run-tests.py from another directory
1460 1466 $ mkdir tmp && cd tmp
1461 1467
1462 1468 $ cat > useful-file.sh << EOF
1463 1469 > important command
1464 1470 > EOF
1465 1471
1466 1472 $ cat > test-folder.t << EOF
1467 1473 > $ cat \$TESTDIR/useful-file.sh
1468 1474 > important command
1469 1475 > EOF
1470 1476
1471 1477 $ cat > test-folder-fail.t << EOF
1472 1478 > $ cat \$TESTDIR/useful-file.sh
1473 1479 > important commando
1474 1480 > EOF
1475 1481
1476 1482 $ cd ..
1477 1483 $ rt tmp/test-*.t
1478 1484 running 2 tests using 1 parallel processes
1479 1485
1480 1486 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1481 1487 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1482 1488 @@ -1,2 +1,2 @@
1483 1489 $ cat $TESTDIR/useful-file.sh
1484 1490 - important commando
1485 1491 + important command
1486 1492
1487 1493 ERROR: test-folder-fail.t output changed
1488 1494 !.
1489 1495 Failed test-folder-fail.t: output changed
1490 1496 # Ran 2 tests, 0 skipped, 1 failed.
1491 1497 python hash seed: * (glob)
1492 1498 [1]
1493 1499
1494 1500 support for bisecting failed tests automatically
1495 1501 $ hg init bisect
1496 1502 $ cd bisect
1497 1503 $ cat >> test-bisect.t <<EOF
1498 1504 > $ echo pass
1499 1505 > pass
1500 1506 > EOF
1501 1507 $ hg add test-bisect.t
1502 1508 $ hg ci -m 'good'
1503 1509 $ cat >> test-bisect.t <<EOF
1504 1510 > $ echo pass
1505 1511 > fail
1506 1512 > EOF
1507 1513 $ hg ci -m 'bad'
1508 1514 $ rt --known-good-rev=0 test-bisect.t
1509 1515 running 1 tests using 1 parallel processes
1510 1516
1511 1517 --- $TESTTMP/anothertests/bisect/test-bisect.t
1512 1518 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1513 1519 @@ -1,4 +1,4 @@
1514 1520 $ echo pass
1515 1521 pass
1516 1522 $ echo pass
1517 1523 - fail
1518 1524 + pass
1519 1525
1520 1526 ERROR: test-bisect.t output changed
1521 1527 !
1522 1528 Failed test-bisect.t: output changed
1523 1529 test-bisect.t broken by 72cbf122d116 (bad)
1524 1530 # Ran 1 tests, 0 skipped, 1 failed.
1525 1531 python hash seed: * (glob)
1526 1532 [1]
1527 1533
1528 1534 $ cd ..
1529 1535
1530 1536 support bisecting a separate repo
1531 1537
1532 1538 $ hg init bisect-dependent
1533 1539 $ cd bisect-dependent
1534 1540 $ cat > test-bisect-dependent.t <<EOF
1535 1541 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1536 1542 > pass
1537 1543 > EOF
1538 1544 $ hg commit -Am dependent test-bisect-dependent.t
1539 1545
1540 1546 $ rt --known-good-rev=0 test-bisect-dependent.t
1541 1547 running 1 tests using 1 parallel processes
1542 1548
1543 1549 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1544 1550 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1545 1551 @@ -1,2 +1,2 @@
1546 1552 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1547 1553 - pass
1548 1554 + fail
1549 1555
1550 1556 ERROR: test-bisect-dependent.t output changed
1551 1557 !
1552 1558 Failed test-bisect-dependent.t: output changed
1553 1559 Failed to identify failure point for test-bisect-dependent.t
1554 1560 # Ran 1 tests, 0 skipped, 1 failed.
1555 1561 python hash seed: * (glob)
1556 1562 [1]
1557 1563
1558 1564 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1559 1565 usage: run-tests.py [options] [tests]
1560 1566 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1561 1567 [2]
1562 1568
1563 1569 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1564 1570 running 1 tests using 1 parallel processes
1565 1571
1566 1572 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1567 1573 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1568 1574 @@ -1,2 +1,2 @@
1569 1575 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1570 1576 - pass
1571 1577 + fail
1572 1578
1573 1579 ERROR: test-bisect-dependent.t output changed
1574 1580 !
1575 1581 Failed test-bisect-dependent.t: output changed
1576 1582 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1577 1583 # Ran 1 tests, 0 skipped, 1 failed.
1578 1584 python hash seed: * (glob)
1579 1585 [1]
1580 1586
1581 1587 $ cd ..
1582 1588
1583 1589 Test a broken #if statement doesn't break run-tests threading.
1584 1590 ==============================================================
1585 1591 $ mkdir broken
1586 1592 $ cd broken
1587 1593 $ cat > test-broken.t <<EOF
1588 1594 > true
1589 1595 > #if notarealhghavefeature
1590 1596 > $ false
1591 1597 > #endif
1592 1598 > EOF
1593 1599 $ for f in 1 2 3 4 ; do
1594 1600 > cat > test-works-$f.t <<EOF
1595 1601 > This is test case $f
1596 1602 > $ sleep 1
1597 1603 > EOF
1598 1604 > done
1599 1605 $ rt -j 2
1600 1606 running 5 tests using 2 parallel processes
1601 1607 ....
1602 1608 # Ran 5 tests, 0 skipped, 0 failed.
1603 1609 skipped: unknown feature: notarealhghavefeature
1604 1610
1605 1611 $ cd ..
1606 1612 $ rm -rf broken
1607 1613
1608 1614 Test cases in .t files
1609 1615 ======================
1610 1616 $ mkdir cases
1611 1617 $ cd cases
1612 1618 $ cat > test-cases-abc.t <<'EOF'
1613 1619 > #testcases A B C
1614 1620 > $ V=B
1615 1621 > #if A
1616 1622 > $ V=A
1617 1623 > #endif
1618 1624 > #if C
1619 1625 > $ V=C
1620 1626 > #endif
1621 1627 > $ echo $V | sed 's/A/C/'
1622 1628 > C
1623 1629 > #if C
1624 1630 > $ [ $V = C ]
1625 1631 > #endif
1626 1632 > #if A
1627 1633 > $ [ $V = C ]
1628 1634 > [1]
1629 1635 > #endif
1630 1636 > #if no-C
1631 1637 > $ [ $V = C ]
1632 1638 > [1]
1633 1639 > #endif
1634 1640 > $ [ $V = D ]
1635 1641 > [1]
1636 1642 > EOF
1637 1643 $ rt
1638 1644 running 3 tests using 1 parallel processes
1639 1645 .
1640 1646 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1641 1647 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1642 1648 @@ -7,7 +7,7 @@
1643 1649 $ V=C
1644 1650 #endif
1645 1651 $ echo $V | sed 's/A/C/'
1646 1652 - C
1647 1653 + B
1648 1654 #if C
1649 1655 $ [ $V = C ]
1650 1656 #endif
1651 1657
1652 1658 ERROR: test-cases-abc.t#B output changed
1653 1659 !.
1654 1660 Failed test-cases-abc.t#B: output changed
1655 1661 # Ran 3 tests, 0 skipped, 1 failed.
1656 1662 python hash seed: * (glob)
1657 1663 [1]
1658 1664
1659 1665 --restart works
1660 1666
1661 1667 $ rt --restart
1662 1668 running 2 tests using 1 parallel processes
1663 1669
1664 1670 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1665 1671 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1666 1672 @@ -7,7 +7,7 @@
1667 1673 $ V=C
1668 1674 #endif
1669 1675 $ echo $V | sed 's/A/C/'
1670 1676 - C
1671 1677 + B
1672 1678 #if C
1673 1679 $ [ $V = C ]
1674 1680 #endif
1675 1681
1676 1682 ERROR: test-cases-abc.t#B output changed
1677 1683 !.
1678 1684 Failed test-cases-abc.t#B: output changed
1679 1685 # Ran 2 tests, 0 skipped, 1 failed.
1680 1686 python hash seed: * (glob)
1681 1687 [1]
1682 1688
1683 1689 --restart works with outputdir
1684 1690
1685 1691 $ mkdir output
1686 1692 $ mv test-cases-abc.t#B.err output
1687 1693 $ rt --restart --outputdir output
1688 1694 running 2 tests using 1 parallel processes
1689 1695
1690 1696 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1691 1697 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1692 1698 @@ -7,7 +7,7 @@
1693 1699 $ V=C
1694 1700 #endif
1695 1701 $ echo $V | sed 's/A/C/'
1696 1702 - C
1697 1703 + B
1698 1704 #if C
1699 1705 $ [ $V = C ]
1700 1706 #endif
1701 1707
1702 1708 ERROR: test-cases-abc.t#B output changed
1703 1709 !.
1704 1710 Failed test-cases-abc.t#B: output changed
1705 1711 # Ran 2 tests, 0 skipped, 1 failed.
1706 1712 python hash seed: * (glob)
1707 1713 [1]
1708 1714
1709 1715 Test TESTCASE variable
1710 1716
1711 1717 $ cat > test-cases-ab.t <<'EOF'
1712 1718 > $ dostuff() {
1713 1719 > > echo "In case $TESTCASE"
1714 1720 > > }
1715 1721 > #testcases A B
1716 1722 > #if A
1717 1723 > $ dostuff
1718 1724 > In case A
1719 1725 > #endif
1720 1726 > #if B
1721 1727 > $ dostuff
1722 1728 > In case B
1723 1729 > #endif
1724 1730 > EOF
1725 1731 $ rt test-cases-ab.t
1726 1732 running 2 tests using 1 parallel processes
1727 1733 ..
1728 1734 # Ran 2 tests, 0 skipped, 0 failed.
1729 1735
1730 1736 Support running a specific test case
1731 1737
1732 1738 $ rt "test-cases-abc.t#B"
1733 1739 running 1 tests using 1 parallel processes
1734 1740
1735 1741 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1736 1742 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1737 1743 @@ -7,7 +7,7 @@
1738 1744 $ V=C
1739 1745 #endif
1740 1746 $ echo $V | sed 's/A/C/'
1741 1747 - C
1742 1748 + B
1743 1749 #if C
1744 1750 $ [ $V = C ]
1745 1751 #endif
1746 1752
1747 1753 ERROR: test-cases-abc.t#B output changed
1748 1754 !
1749 1755 Failed test-cases-abc.t#B: output changed
1750 1756 # Ran 1 tests, 0 skipped, 1 failed.
1751 1757 python hash seed: * (glob)
1752 1758 [1]
1753 1759
1754 1760 Support running multiple test cases in the same file
1755 1761
1756 1762 $ rt test-cases-abc.t#B test-cases-abc.t#C
1757 1763 running 2 tests using 1 parallel processes
1758 1764
1759 1765 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1760 1766 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1761 1767 @@ -7,7 +7,7 @@
1762 1768 $ V=C
1763 1769 #endif
1764 1770 $ echo $V | sed 's/A/C/'
1765 1771 - C
1766 1772 + B
1767 1773 #if C
1768 1774 $ [ $V = C ]
1769 1775 #endif
1770 1776
1771 1777 ERROR: test-cases-abc.t#B output changed
1772 1778 !.
1773 1779 Failed test-cases-abc.t#B: output changed
1774 1780 # Ran 2 tests, 0 skipped, 1 failed.
1775 1781 python hash seed: * (glob)
1776 1782 [1]
1777 1783
1778 1784 Support ignoring invalid test cases
1779 1785
1780 1786 $ rt test-cases-abc.t#B test-cases-abc.t#D
1781 1787 running 1 tests using 1 parallel processes
1782 1788
1783 1789 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1784 1790 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1785 1791 @@ -7,7 +7,7 @@
1786 1792 $ V=C
1787 1793 #endif
1788 1794 $ echo $V | sed 's/A/C/'
1789 1795 - C
1790 1796 + B
1791 1797 #if C
1792 1798 $ [ $V = C ]
1793 1799 #endif
1794 1800
1795 1801 ERROR: test-cases-abc.t#B output changed
1796 1802 !
1797 1803 Failed test-cases-abc.t#B: output changed
1798 1804 # Ran 1 tests, 0 skipped, 1 failed.
1799 1805 python hash seed: * (glob)
1800 1806 [1]
1801 1807
1802 1808 Support running complex test cases names
1803 1809
1804 1810 $ cat > test-cases-advanced-cases.t <<'EOF'
1805 1811 > #testcases simple case-with-dashes casewith_-.chars
1806 1812 > $ echo $TESTCASE
1807 1813 > simple
1808 1814 > EOF
1809 1815
1810 1816 $ cat test-cases-advanced-cases.t
1811 1817 #testcases simple case-with-dashes casewith_-.chars
1812 1818 $ echo $TESTCASE
1813 1819 simple
1814 1820
1815 1821 $ rt test-cases-advanced-cases.t
1816 1822 running 3 tests using 1 parallel processes
1817 1823
1818 1824 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1819 1825 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1820 1826 @@ -1,3 +1,3 @@
1821 1827 #testcases simple case-with-dashes casewith_-.chars
1822 1828 $ echo $TESTCASE
1823 1829 - simple
1824 1830 + case-with-dashes
1825 1831
1826 1832 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1827 1833 !
1828 1834 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1829 1835 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1830 1836 @@ -1,3 +1,3 @@
1831 1837 #testcases simple case-with-dashes casewith_-.chars
1832 1838 $ echo $TESTCASE
1833 1839 - simple
1834 1840 + casewith_-.chars
1835 1841
1836 1842 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1837 1843 !.
1838 1844 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1839 1845 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1840 1846 # Ran 3 tests, 0 skipped, 2 failed.
1841 1847 python hash seed: * (glob)
1842 1848 [1]
1843 1849
1844 1850 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1845 1851 running 1 tests using 1 parallel processes
1846 1852
1847 1853 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1848 1854 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1849 1855 @@ -1,3 +1,3 @@
1850 1856 #testcases simple case-with-dashes casewith_-.chars
1851 1857 $ echo $TESTCASE
1852 1858 - simple
1853 1859 + case-with-dashes
1854 1860
1855 1861 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1856 1862 !
1857 1863 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1858 1864 # Ran 1 tests, 0 skipped, 1 failed.
1859 1865 python hash seed: * (glob)
1860 1866 [1]
1861 1867
1862 1868 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1863 1869 running 1 tests using 1 parallel processes
1864 1870
1865 1871 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1866 1872 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1867 1873 @@ -1,3 +1,3 @@
1868 1874 #testcases simple case-with-dashes casewith_-.chars
1869 1875 $ echo $TESTCASE
1870 1876 - simple
1871 1877 + casewith_-.chars
1872 1878
1873 1879 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1874 1880 !
1875 1881 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1876 1882 # Ran 1 tests, 0 skipped, 1 failed.
1877 1883 python hash seed: * (glob)
1878 1884 [1]
1879 1885
1880 1886 Test automatic pattern replacement
1881 1887 ==================================
1882 1888
1883 1889 $ cat << EOF >> common-pattern.py
1884 1890 > substitutions = [
1885 1891 > (br'foo-(.*)\\b',
1886 1892 > br'\$XXX=\\1\$'),
1887 1893 > (br'bar\\n',
1888 1894 > br'\$YYY$\\n'),
1889 1895 > ]
1890 1896 > EOF
1891 1897
1892 1898 $ cat << EOF >> test-substitution.t
1893 1899 > $ echo foo-12
1894 1900 > \$XXX=12$
1895 1901 > $ echo foo-42
1896 1902 > \$XXX=42$
1897 1903 > $ echo bar prior
1898 1904 > bar prior
1899 1905 > $ echo lastbar
1900 1906 > last\$YYY$
1901 1907 > $ echo foo-bar foo-baz
1902 1908 > EOF
1903 1909
1904 1910 $ rt test-substitution.t
1905 1911 running 1 tests using 1 parallel processes
1906 1912
1907 1913 --- $TESTTMP/anothertests/cases/test-substitution.t
1908 1914 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1909 1915 @@ -7,3 +7,4 @@
1910 1916 $ echo lastbar
1911 1917 last$YYY$
1912 1918 $ echo foo-bar foo-baz
1913 1919 + $XXX=bar foo-baz$
1914 1920
1915 1921 ERROR: test-substitution.t output changed
1916 1922 !
1917 1923 Failed test-substitution.t: output changed
1918 1924 # Ran 1 tests, 0 skipped, 1 failed.
1919 1925 python hash seed: * (glob)
1920 1926 [1]
1921 1927
1922 1928 --extra-config-opt works
1923 1929
1924 1930 $ cat << EOF >> test-config-opt.t
1925 1931 > $ hg init test-config-opt
1926 1932 > $ hg -R test-config-opt purge
1927 1933 > EOF
1928 1934
1929 1935 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1930 1936 running 1 tests using 1 parallel processes
1931 1937 .
1932 1938 # Ran 1 tests, 0 skipped, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now