##// END OF EJS Templates
run-tests: make "| foo (re)" not match everything...
Martin von Zweigbergk -
r35156:6d5718e3 default
parent child Browse files
Show More
@@ -1,2960 +1,2961 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import difflib
49 49 import distutils.version as version
50 50 import errno
51 51 import json
52 52 import optparse
53 53 import os
54 54 import random
55 55 import re
56 56 import shutil
57 57 import signal
58 58 import socket
59 59 import subprocess
60 60 import sys
61 61 import sysconfig
62 62 import tempfile
63 63 import threading
64 64 import time
65 65 import unittest
66 66 import xml.dom.minidom as minidom
67 67
68 68 try:
69 69 import Queue as queue
70 70 except ImportError:
71 71 import queue
72 72
73 73 try:
74 74 import shlex
75 75 shellquote = shlex.quote
76 76 except (ImportError, AttributeError):
77 77 import pipes
78 78 shellquote = pipes.quote
79 79
80 80 if os.environ.get('RTUNICODEPEDANTRY', False):
81 81 try:
82 82 reload(sys)
83 83 sys.setdefaultencoding("undefined")
84 84 except NameError:
85 85 pass
86 86
87 87 origenviron = os.environ.copy()
88 88 osenvironb = getattr(os, 'environb', os.environ)
89 89 processlock = threading.Lock()
90 90
91 91 pygmentspresent = False
92 92 # ANSI color is unsupported prior to Windows 10
93 93 if os.name != 'nt':
94 94 try: # is pygments installed
95 95 import pygments
96 96 import pygments.lexers as lexers
97 97 import pygments.lexer as lexer
98 98 import pygments.formatters as formatters
99 99 import pygments.token as token
100 100 import pygments.style as style
101 101 pygmentspresent = True
102 102 difflexer = lexers.DiffLexer()
103 103 terminal256formatter = formatters.Terminal256Formatter()
104 104 except ImportError:
105 105 pass
106 106
107 107 if pygmentspresent:
108 108 class TestRunnerStyle(style.Style):
109 109 default_style = ""
110 110 skipped = token.string_to_tokentype("Token.Generic.Skipped")
111 111 failed = token.string_to_tokentype("Token.Generic.Failed")
112 112 skippedname = token.string_to_tokentype("Token.Generic.SName")
113 113 failedname = token.string_to_tokentype("Token.Generic.FName")
114 114 styles = {
115 115 skipped: '#e5e5e5',
116 116 skippedname: '#00ffff',
117 117 failed: '#7f0000',
118 118 failedname: '#ff0000',
119 119 }
120 120
121 121 class TestRunnerLexer(lexer.RegexLexer):
122 122 tokens = {
123 123 'root': [
124 124 (r'^Skipped', token.Generic.Skipped, 'skipped'),
125 125 (r'^Failed ', token.Generic.Failed, 'failed'),
126 126 (r'^ERROR: ', token.Generic.Failed, 'failed'),
127 127 ],
128 128 'skipped': [
129 129 (r'[\w-]+\.(t|py)', token.Generic.SName),
130 130 (r':.*', token.Generic.Skipped),
131 131 ],
132 132 'failed': [
133 133 (r'[\w-]+\.(t|py)', token.Generic.FName),
134 134 (r'(:| ).*', token.Generic.Failed),
135 135 ]
136 136 }
137 137
138 138 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
139 139 runnerlexer = TestRunnerLexer()
140 140
141 141 if sys.version_info > (3, 5, 0):
142 142 PYTHON3 = True
143 143 xrange = range # we use xrange in one place, and we'd rather not use range
144 144 def _bytespath(p):
145 145 if p is None:
146 146 return p
147 147 return p.encode('utf-8')
148 148
149 149 def _strpath(p):
150 150 if p is None:
151 151 return p
152 152 return p.decode('utf-8')
153 153
154 154 elif sys.version_info >= (3, 0, 0):
155 155 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
156 156 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
157 157 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
158 158 else:
159 159 PYTHON3 = False
160 160
161 161 # In python 2.x, path operations are generally done using
162 162 # bytestrings by default, so we don't have to do any extra
163 163 # fiddling there. We define the wrapper functions anyway just to
164 164 # help keep code consistent between platforms.
165 165 def _bytespath(p):
166 166 return p
167 167
168 168 _strpath = _bytespath
169 169
170 170 # For Windows support
171 171 wifexited = getattr(os, "WIFEXITED", lambda x: False)
172 172
173 173 # Whether to use IPv6
174 174 def checksocketfamily(name, port=20058):
175 175 """return true if we can listen on localhost using family=name
176 176
177 177 name should be either 'AF_INET', or 'AF_INET6'.
178 178 port being used is okay - EADDRINUSE is considered as successful.
179 179 """
180 180 family = getattr(socket, name, None)
181 181 if family is None:
182 182 return False
183 183 try:
184 184 s = socket.socket(family, socket.SOCK_STREAM)
185 185 s.bind(('localhost', port))
186 186 s.close()
187 187 return True
188 188 except socket.error as exc:
189 189 if exc.errno == errno.EADDRINUSE:
190 190 return True
191 191 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
192 192 return False
193 193 else:
194 194 raise
195 195 else:
196 196 return False
197 197
198 198 # useipv6 will be set by parseargs
199 199 useipv6 = None
200 200
201 201 def checkportisavailable(port):
202 202 """return true if a port seems free to bind on localhost"""
203 203 if useipv6:
204 204 family = socket.AF_INET6
205 205 else:
206 206 family = socket.AF_INET
207 207 try:
208 208 s = socket.socket(family, socket.SOCK_STREAM)
209 209 s.bind(('localhost', port))
210 210 s.close()
211 211 return True
212 212 except socket.error as exc:
213 213 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
214 214 errno.EPROTONOSUPPORT):
215 215 raise
216 216 return False
217 217
218 218 closefds = os.name == 'posix'
219 219 def Popen4(cmd, wd, timeout, env=None):
220 220 processlock.acquire()
221 221 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
222 222 close_fds=closefds,
223 223 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
224 224 stderr=subprocess.STDOUT)
225 225 processlock.release()
226 226
227 227 p.fromchild = p.stdout
228 228 p.tochild = p.stdin
229 229 p.childerr = p.stderr
230 230
231 231 p.timeout = False
232 232 if timeout:
233 233 def t():
234 234 start = time.time()
235 235 while time.time() - start < timeout and p.returncode is None:
236 236 time.sleep(.1)
237 237 p.timeout = True
238 238 if p.returncode is None:
239 239 terminate(p)
240 240 threading.Thread(target=t).start()
241 241
242 242 return p
243 243
244 244 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
245 245 IMPL_PATH = b'PYTHONPATH'
246 246 if 'java' in sys.platform:
247 247 IMPL_PATH = b'JYTHONPATH'
248 248
249 249 defaults = {
250 250 'jobs': ('HGTEST_JOBS', 1),
251 251 'timeout': ('HGTEST_TIMEOUT', 180),
252 252 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
253 253 'port': ('HGTEST_PORT', 20059),
254 254 'shell': ('HGTEST_SHELL', 'sh'),
255 255 }
256 256
257 257 def canonpath(path):
258 258 return os.path.realpath(os.path.expanduser(path))
259 259
260 260 def parselistfiles(files, listtype, warn=True):
261 261 entries = dict()
262 262 for filename in files:
263 263 try:
264 264 path = os.path.expanduser(os.path.expandvars(filename))
265 265 f = open(path, "rb")
266 266 except IOError as err:
267 267 if err.errno != errno.ENOENT:
268 268 raise
269 269 if warn:
270 270 print("warning: no such %s file: %s" % (listtype, filename))
271 271 continue
272 272
273 273 for line in f.readlines():
274 274 line = line.split(b'#', 1)[0].strip()
275 275 if line:
276 276 entries[line] = filename
277 277
278 278 f.close()
279 279 return entries
280 280
281 281 def parsettestcases(path):
282 282 """read a .t test file, return a set of test case names
283 283
284 284 If path does not exist, return an empty set.
285 285 """
286 286 cases = set()
287 287 try:
288 288 with open(path, 'rb') as f:
289 289 for l in f:
290 290 if l.startswith(b'#testcases '):
291 291 cases.update(l[11:].split())
292 292 except IOError as ex:
293 293 if ex.errno != errno.ENOENT:
294 294 raise
295 295 return cases
296 296
297 297 def getparser():
298 298 """Obtain the OptionParser used by the CLI."""
299 299 parser = optparse.OptionParser("%prog [options] [tests]")
300 300
301 301 # keep these sorted
302 302 parser.add_option("--blacklist", action="append",
303 303 help="skip tests listed in the specified blacklist file")
304 304 parser.add_option("--whitelist", action="append",
305 305 help="always run tests listed in the specified whitelist file")
306 306 parser.add_option("--test-list", action="append",
307 307 help="read tests to run from the specified file")
308 308 parser.add_option("--changed", type="string",
309 309 help="run tests that are changed in parent rev or working directory")
310 310 parser.add_option("-C", "--annotate", action="store_true",
311 311 help="output files annotated with coverage")
312 312 parser.add_option("-c", "--cover", action="store_true",
313 313 help="print a test coverage report")
314 314 parser.add_option("--color", choices=["always", "auto", "never"],
315 315 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
316 316 help="colorisation: always|auto|never (default: auto)")
317 317 parser.add_option("-d", "--debug", action="store_true",
318 318 help="debug mode: write output of test scripts to console"
319 319 " rather than capturing and diffing it (disables timeout)")
320 320 parser.add_option("-f", "--first", action="store_true",
321 321 help="exit on the first test failure")
322 322 parser.add_option("-H", "--htmlcov", action="store_true",
323 323 help="create an HTML report of the coverage of the files")
324 324 parser.add_option("-i", "--interactive", action="store_true",
325 325 help="prompt to accept changed output")
326 326 parser.add_option("-j", "--jobs", type="int",
327 327 help="number of jobs to run in parallel"
328 328 " (default: $%s or %d)" % defaults['jobs'])
329 329 parser.add_option("--keep-tmpdir", action="store_true",
330 330 help="keep temporary directory after running tests")
331 331 parser.add_option("-k", "--keywords",
332 332 help="run tests matching keywords")
333 333 parser.add_option("--list-tests", action="store_true",
334 334 help="list tests instead of running them")
335 335 parser.add_option("-l", "--local", action="store_true",
336 336 help="shortcut for --with-hg=<testdir>/../hg, "
337 337 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
338 338 parser.add_option("--loop", action="store_true",
339 339 help="loop tests repeatedly")
340 340 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
341 341 help="run each test N times (default=1)", default=1)
342 342 parser.add_option("-n", "--nodiff", action="store_true",
343 343 help="skip showing test changes")
344 344 parser.add_option("--outputdir", type="string",
345 345 help="directory to write error logs to (default=test directory)")
346 346 parser.add_option("-p", "--port", type="int",
347 347 help="port on which servers should listen"
348 348 " (default: $%s or %d)" % defaults['port'])
349 349 parser.add_option("--compiler", type="string",
350 350 help="compiler to build with")
351 351 parser.add_option("--pure", action="store_true",
352 352 help="use pure Python code instead of C extensions")
353 353 parser.add_option("-R", "--restart", action="store_true",
354 354 help="restart at last error")
355 355 parser.add_option("-r", "--retest", action="store_true",
356 356 help="retest failed tests")
357 357 parser.add_option("-S", "--noskips", action="store_true",
358 358 help="don't report skip tests verbosely")
359 359 parser.add_option("--shell", type="string",
360 360 help="shell to use (default: $%s or %s)" % defaults['shell'])
361 361 parser.add_option("-t", "--timeout", type="int",
362 362 help="kill errant tests after TIMEOUT seconds"
363 363 " (default: $%s or %d)" % defaults['timeout'])
364 364 parser.add_option("--slowtimeout", type="int",
365 365 help="kill errant slow tests after SLOWTIMEOUT seconds"
366 366 " (default: $%s or %d)" % defaults['slowtimeout'])
367 367 parser.add_option("--time", action="store_true",
368 368 help="time how long each test takes")
369 369 parser.add_option("--json", action="store_true",
370 370 help="store test result data in 'report.json' file")
371 371 parser.add_option("--tmpdir", type="string",
372 372 help="run tests in the given temporary directory"
373 373 " (implies --keep-tmpdir)")
374 374 parser.add_option("-v", "--verbose", action="store_true",
375 375 help="output verbose messages")
376 376 parser.add_option("--xunit", type="string",
377 377 help="record xunit results at specified path")
378 378 parser.add_option("--view", type="string",
379 379 help="external diff viewer")
380 380 parser.add_option("--with-hg", type="string",
381 381 metavar="HG",
382 382 help="test using specified hg script rather than a "
383 383 "temporary installation")
384 384 parser.add_option("--chg", action="store_true",
385 385 help="install and use chg wrapper in place of hg")
386 386 parser.add_option("--with-chg", metavar="CHG",
387 387 help="use specified chg wrapper in place of hg")
388 388 parser.add_option("--ipv6", action="store_true",
389 389 help="prefer IPv6 to IPv4 for network related tests")
390 390 parser.add_option("-3", "--py3k-warnings", action="store_true",
391 391 help="enable Py3k warnings on Python 2.7+")
392 392 # This option should be deleted once test-check-py3-compat.t and other
393 393 # Python 3 tests run with Python 3.
394 394 parser.add_option("--with-python3", metavar="PYTHON3",
395 395 help="Python 3 interpreter (if running under Python 2)"
396 396 " (TEMPORARY)")
397 397 parser.add_option('--extra-config-opt', action="append",
398 398 help='set the given config opt in the test hgrc')
399 399 parser.add_option('--random', action="store_true",
400 400 help='run tests in random order')
401 401 parser.add_option('--profile-runner', action='store_true',
402 402 help='run statprof on run-tests')
403 403 parser.add_option('--allow-slow-tests', action='store_true',
404 404 help='allow extremely slow tests')
405 405 parser.add_option('--showchannels', action='store_true',
406 406 help='show scheduling channels')
407 407 parser.add_option('--known-good-rev', type="string",
408 408 metavar="known_good_rev",
409 409 help=("Automatically bisect any failures using this "
410 410 "revision as a known-good revision."))
411 411 parser.add_option('--bisect-repo', type="string",
412 412 metavar='bisect_repo',
413 413 help=("Path of a repo to bisect. Use together with "
414 414 "--known-good-rev"))
415 415
416 416 for option, (envvar, default) in defaults.items():
417 417 defaults[option] = type(default)(os.environ.get(envvar, default))
418 418 parser.set_defaults(**defaults)
419 419
420 420 return parser
421 421
422 422 def parseargs(args, parser):
423 423 """Parse arguments with our OptionParser and validate results."""
424 424 (options, args) = parser.parse_args(args)
425 425
426 426 # jython is always pure
427 427 if 'java' in sys.platform or '__pypy__' in sys.modules:
428 428 options.pure = True
429 429
430 430 if options.with_hg:
431 431 options.with_hg = canonpath(_bytespath(options.with_hg))
432 432 if not (os.path.isfile(options.with_hg) and
433 433 os.access(options.with_hg, os.X_OK)):
434 434 parser.error('--with-hg must specify an executable hg script')
435 435 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
436 436 sys.stderr.write('warning: --with-hg should specify an hg script\n')
437 437 if options.local:
438 438 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
439 439 reporootdir = os.path.dirname(testdir)
440 440 pathandattrs = [(b'hg', 'with_hg')]
441 441 if options.chg:
442 442 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
443 443 for relpath, attr in pathandattrs:
444 444 binpath = os.path.join(reporootdir, relpath)
445 445 if os.name != 'nt' and not os.access(binpath, os.X_OK):
446 446 parser.error('--local specified, but %r not found or '
447 447 'not executable' % binpath)
448 448 setattr(options, attr, binpath)
449 449
450 450 if (options.chg or options.with_chg) and os.name == 'nt':
451 451 parser.error('chg does not work on %s' % os.name)
452 452 if options.with_chg:
453 453 options.chg = False # no installation to temporary location
454 454 options.with_chg = canonpath(_bytespath(options.with_chg))
455 455 if not (os.path.isfile(options.with_chg) and
456 456 os.access(options.with_chg, os.X_OK)):
457 457 parser.error('--with-chg must specify a chg executable')
458 458 if options.chg and options.with_hg:
459 459 # chg shares installation location with hg
460 460 parser.error('--chg does not work when --with-hg is specified '
461 461 '(use --with-chg instead)')
462 462
463 463 if options.color == 'always' and not pygmentspresent:
464 464 sys.stderr.write('warning: --color=always ignored because '
465 465 'pygments is not installed\n')
466 466
467 467 if options.bisect_repo and not options.known_good_rev:
468 468 parser.error("--bisect-repo cannot be used without --known-good-rev")
469 469
470 470 global useipv6
471 471 if options.ipv6:
472 472 useipv6 = checksocketfamily('AF_INET6')
473 473 else:
474 474 # only use IPv6 if IPv4 is unavailable and IPv6 is available
475 475 useipv6 = ((not checksocketfamily('AF_INET'))
476 476 and checksocketfamily('AF_INET6'))
477 477
478 478 options.anycoverage = options.cover or options.annotate or options.htmlcov
479 479 if options.anycoverage:
480 480 try:
481 481 import coverage
482 482 covver = version.StrictVersion(coverage.__version__).version
483 483 if covver < (3, 3):
484 484 parser.error('coverage options require coverage 3.3 or later')
485 485 except ImportError:
486 486 parser.error('coverage options now require the coverage package')
487 487
488 488 if options.anycoverage and options.local:
489 489 # this needs some path mangling somewhere, I guess
490 490 parser.error("sorry, coverage options do not work when --local "
491 491 "is specified")
492 492
493 493 if options.anycoverage and options.with_hg:
494 494 parser.error("sorry, coverage options do not work when --with-hg "
495 495 "is specified")
496 496
497 497 global verbose
498 498 if options.verbose:
499 499 verbose = ''
500 500
501 501 if options.tmpdir:
502 502 options.tmpdir = canonpath(options.tmpdir)
503 503
504 504 if options.jobs < 1:
505 505 parser.error('--jobs must be positive')
506 506 if options.interactive and options.debug:
507 507 parser.error("-i/--interactive and -d/--debug are incompatible")
508 508 if options.debug:
509 509 if options.timeout != defaults['timeout']:
510 510 sys.stderr.write(
511 511 'warning: --timeout option ignored with --debug\n')
512 512 if options.slowtimeout != defaults['slowtimeout']:
513 513 sys.stderr.write(
514 514 'warning: --slowtimeout option ignored with --debug\n')
515 515 options.timeout = 0
516 516 options.slowtimeout = 0
517 517 if options.py3k_warnings:
518 518 if PYTHON3:
519 519 parser.error(
520 520 '--py3k-warnings can only be used on Python 2.7')
521 521 if options.with_python3:
522 522 if PYTHON3:
523 523 parser.error('--with-python3 cannot be used when executing with '
524 524 'Python 3')
525 525
526 526 options.with_python3 = canonpath(options.with_python3)
527 527 # Verify Python3 executable is acceptable.
528 528 proc = subprocess.Popen([options.with_python3, b'--version'],
529 529 stdout=subprocess.PIPE,
530 530 stderr=subprocess.STDOUT)
531 531 out, _err = proc.communicate()
532 532 ret = proc.wait()
533 533 if ret != 0:
534 534 parser.error('could not determine version of python 3')
535 535 if not out.startswith('Python '):
536 536 parser.error('unexpected output from python3 --version: %s' %
537 537 out)
538 538 vers = version.LooseVersion(out[len('Python '):])
539 539 if vers < version.LooseVersion('3.5.0'):
540 540 parser.error('--with-python3 version must be 3.5.0 or greater; '
541 541 'got %s' % out)
542 542
543 543 if options.blacklist:
544 544 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
545 545 if options.whitelist:
546 546 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
547 547 else:
548 548 options.whitelisted = {}
549 549
550 550 if options.showchannels:
551 551 options.nodiff = True
552 552
553 553 return (options, args)
554 554
555 555 def rename(src, dst):
556 556 """Like os.rename(), trade atomicity and opened files friendliness
557 557 for existing destination support.
558 558 """
559 559 shutil.copy(src, dst)
560 560 os.remove(src)
561 561
562 562 _unified_diff = difflib.unified_diff
563 563 if PYTHON3:
564 564 import functools
565 565 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
566 566
567 567 def getdiff(expected, output, ref, err):
568 568 servefail = False
569 569 lines = []
570 570 for line in _unified_diff(expected, output, ref, err):
571 571 if line.startswith(b'+++') or line.startswith(b'---'):
572 572 line = line.replace(b'\\', b'/')
573 573 if line.endswith(b' \n'):
574 574 line = line[:-2] + b'\n'
575 575 lines.append(line)
576 576 if not servefail and line.startswith(
577 577 b'+ abort: child process failed to start'):
578 578 servefail = True
579 579
580 580 return servefail, lines
581 581
582 582 verbose = False
583 583 def vlog(*msg):
584 584 """Log only when in verbose mode."""
585 585 if verbose is False:
586 586 return
587 587
588 588 return log(*msg)
589 589
590 590 # Bytes that break XML even in a CDATA block: control characters 0-31
591 591 # sans \t, \n and \r
592 592 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
593 593
594 594 # Match feature conditionalized output lines in the form, capturing the feature
595 595 # list in group 2, and the preceeding line output in group 1:
596 596 #
597 597 # output..output (feature !)\n
598 598 optline = re.compile(b'(.*) \((.+?) !\)\n$')
599 599
600 600 def cdatasafe(data):
601 601 """Make a string safe to include in a CDATA block.
602 602
603 603 Certain control characters are illegal in a CDATA block, and
604 604 there's no way to include a ]]> in a CDATA either. This function
605 605 replaces illegal bytes with ? and adds a space between the ]] so
606 606 that it won't break the CDATA block.
607 607 """
608 608 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
609 609
610 610 def log(*msg):
611 611 """Log something to stdout.
612 612
613 613 Arguments are strings to print.
614 614 """
615 615 with iolock:
616 616 if verbose:
617 617 print(verbose, end=' ')
618 618 for m in msg:
619 619 print(m, end=' ')
620 620 print()
621 621 sys.stdout.flush()
622 622
623 623 def highlightdiff(line, color):
624 624 if not color:
625 625 return line
626 626 assert pygmentspresent
627 627 return pygments.highlight(line.decode('latin1'), difflexer,
628 628 terminal256formatter).encode('latin1')
629 629
630 630 def highlightmsg(msg, color):
631 631 if not color:
632 632 return msg
633 633 assert pygmentspresent
634 634 return pygments.highlight(msg, runnerlexer, runnerformatter)
635 635
636 636 def terminate(proc):
637 637 """Terminate subprocess"""
638 638 vlog('# Terminating process %d' % proc.pid)
639 639 try:
640 640 proc.terminate()
641 641 except OSError:
642 642 pass
643 643
644 644 def killdaemons(pidfile):
645 645 import killdaemons as killmod
646 646 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
647 647 logfn=vlog)
648 648
649 649 class Test(unittest.TestCase):
650 650 """Encapsulates a single, runnable test.
651 651
652 652 While this class conforms to the unittest.TestCase API, it differs in that
653 653 instances need to be instantiated manually. (Typically, unittest.TestCase
654 654 classes are instantiated automatically by scanning modules.)
655 655 """
656 656
657 657 # Status code reserved for skipped tests (used by hghave).
658 658 SKIPPED_STATUS = 80
659 659
660 660 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
661 661 debug=False,
662 662 timeout=None,
663 663 startport=None, extraconfigopts=None,
664 664 py3kwarnings=False, shell=None, hgcommand=None,
665 665 slowtimeout=None, usechg=False,
666 666 useipv6=False):
667 667 """Create a test from parameters.
668 668
669 669 path is the full path to the file defining the test.
670 670
671 671 tmpdir is the main temporary directory to use for this test.
672 672
673 673 keeptmpdir determines whether to keep the test's temporary directory
674 674 after execution. It defaults to removal (False).
675 675
676 676 debug mode will make the test execute verbosely, with unfiltered
677 677 output.
678 678
679 679 timeout controls the maximum run time of the test. It is ignored when
680 680 debug is True. See slowtimeout for tests with #require slow.
681 681
682 682 slowtimeout overrides timeout if the test has #require slow.
683 683
684 684 startport controls the starting port number to use for this test. Each
685 685 test will reserve 3 port numbers for execution. It is the caller's
686 686 responsibility to allocate a non-overlapping port range to Test
687 687 instances.
688 688
689 689 extraconfigopts is an iterable of extra hgrc config options. Values
690 690 must have the form "key=value" (something understood by hgrc). Values
691 691 of the form "foo.key=value" will result in "[foo] key=value".
692 692
693 693 py3kwarnings enables Py3k warnings.
694 694
695 695 shell is the shell to execute tests in.
696 696 """
697 697 if timeout is None:
698 698 timeout = defaults['timeout']
699 699 if startport is None:
700 700 startport = defaults['port']
701 701 if slowtimeout is None:
702 702 slowtimeout = defaults['slowtimeout']
703 703 self.path = path
704 704 self.bname = os.path.basename(path)
705 705 self.name = _strpath(self.bname)
706 706 self._testdir = os.path.dirname(path)
707 707 self._outputdir = outputdir
708 708 self._tmpname = os.path.basename(path)
709 709 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
710 710
711 711 self._threadtmp = tmpdir
712 712 self._keeptmpdir = keeptmpdir
713 713 self._debug = debug
714 714 self._timeout = timeout
715 715 self._slowtimeout = slowtimeout
716 716 self._startport = startport
717 717 self._extraconfigopts = extraconfigopts or []
718 718 self._py3kwarnings = py3kwarnings
719 719 self._shell = _bytespath(shell)
720 720 self._hgcommand = hgcommand or b'hg'
721 721 self._usechg = usechg
722 722 self._useipv6 = useipv6
723 723
724 724 self._aborted = False
725 725 self._daemonpids = []
726 726 self._finished = None
727 727 self._ret = None
728 728 self._out = None
729 729 self._skipped = None
730 730 self._testtmp = None
731 731 self._chgsockdir = None
732 732
733 733 self._refout = self.readrefout()
734 734
735 735 def readrefout(self):
736 736 """read reference output"""
737 737 # If we're not in --debug mode and reference output file exists,
738 738 # check test output against it.
739 739 if self._debug:
740 740 return None # to match "out is None"
741 741 elif os.path.exists(self.refpath):
742 742 with open(self.refpath, 'rb') as f:
743 743 return f.read().splitlines(True)
744 744 else:
745 745 return []
746 746
747 747 # needed to get base class __repr__ running
748 748 @property
749 749 def _testMethodName(self):
750 750 return self.name
751 751
752 752 def __str__(self):
753 753 return self.name
754 754
755 755 def shortDescription(self):
756 756 return self.name
757 757
758 758 def setUp(self):
759 759 """Tasks to perform before run()."""
760 760 self._finished = False
761 761 self._ret = None
762 762 self._out = None
763 763 self._skipped = None
764 764
765 765 try:
766 766 os.mkdir(self._threadtmp)
767 767 except OSError as e:
768 768 if e.errno != errno.EEXIST:
769 769 raise
770 770
771 771 name = self._tmpname
772 772 self._testtmp = os.path.join(self._threadtmp, name)
773 773 os.mkdir(self._testtmp)
774 774
775 775 # Remove any previous output files.
776 776 if os.path.exists(self.errpath):
777 777 try:
778 778 os.remove(self.errpath)
779 779 except OSError as e:
780 780 # We might have raced another test to clean up a .err
781 781 # file, so ignore ENOENT when removing a previous .err
782 782 # file.
783 783 if e.errno != errno.ENOENT:
784 784 raise
785 785
786 786 if self._usechg:
787 787 self._chgsockdir = os.path.join(self._threadtmp,
788 788 b'%s.chgsock' % name)
789 789 os.mkdir(self._chgsockdir)
790 790
791 791 def run(self, result):
792 792 """Run this test and report results against a TestResult instance."""
793 793 # This function is extremely similar to unittest.TestCase.run(). Once
794 794 # we require Python 2.7 (or at least its version of unittest), this
795 795 # function can largely go away.
796 796 self._result = result
797 797 result.startTest(self)
798 798 try:
799 799 try:
800 800 self.setUp()
801 801 except (KeyboardInterrupt, SystemExit):
802 802 self._aborted = True
803 803 raise
804 804 except Exception:
805 805 result.addError(self, sys.exc_info())
806 806 return
807 807
808 808 success = False
809 809 try:
810 810 self.runTest()
811 811 except KeyboardInterrupt:
812 812 self._aborted = True
813 813 raise
814 814 except unittest.SkipTest as e:
815 815 result.addSkip(self, str(e))
816 816 # The base class will have already counted this as a
817 817 # test we "ran", but we want to exclude skipped tests
818 818 # from those we count towards those run.
819 819 result.testsRun -= 1
820 820 except self.failureException as e:
821 821 # This differs from unittest in that we don't capture
822 822 # the stack trace. This is for historical reasons and
823 823 # this decision could be revisited in the future,
824 824 # especially for PythonTest instances.
825 825 if result.addFailure(self, str(e)):
826 826 success = True
827 827 except Exception:
828 828 result.addError(self, sys.exc_info())
829 829 else:
830 830 success = True
831 831
832 832 try:
833 833 self.tearDown()
834 834 except (KeyboardInterrupt, SystemExit):
835 835 self._aborted = True
836 836 raise
837 837 except Exception:
838 838 result.addError(self, sys.exc_info())
839 839 success = False
840 840
841 841 if success:
842 842 result.addSuccess(self)
843 843 finally:
844 844 result.stopTest(self, interrupted=self._aborted)
845 845
846 846 def runTest(self):
847 847 """Run this test instance.
848 848
849 849 This will return a tuple describing the result of the test.
850 850 """
851 851 env = self._getenv()
852 852 self._genrestoreenv(env)
853 853 self._daemonpids.append(env['DAEMON_PIDS'])
854 854 self._createhgrc(env['HGRCPATH'])
855 855
856 856 vlog('# Test', self.name)
857 857
858 858 ret, out = self._run(env)
859 859 self._finished = True
860 860 self._ret = ret
861 861 self._out = out
862 862
863 863 def describe(ret):
864 864 if ret < 0:
865 865 return 'killed by signal: %d' % -ret
866 866 return 'returned error code %d' % ret
867 867
868 868 self._skipped = False
869 869
870 870 if ret == self.SKIPPED_STATUS:
871 871 if out is None: # Debug mode, nothing to parse.
872 872 missing = ['unknown']
873 873 failed = None
874 874 else:
875 875 missing, failed = TTest.parsehghaveoutput(out)
876 876
877 877 if not missing:
878 878 missing = ['skipped']
879 879
880 880 if failed:
881 881 self.fail('hg have failed checking for %s' % failed[-1])
882 882 else:
883 883 self._skipped = True
884 884 raise unittest.SkipTest(missing[-1])
885 885 elif ret == 'timeout':
886 886 self.fail('timed out')
887 887 elif ret is False:
888 888 self.fail('no result code from test')
889 889 elif out != self._refout:
890 890 # Diff generation may rely on written .err file.
891 891 if (ret != 0 or out != self._refout) and not self._skipped \
892 892 and not self._debug:
893 893 f = open(self.errpath, 'wb')
894 894 for line in out:
895 895 f.write(line)
896 896 f.close()
897 897
898 898 # The result object handles diff calculation for us.
899 899 if self._result.addOutputMismatch(self, ret, out, self._refout):
900 900 # change was accepted, skip failing
901 901 return
902 902
903 903 if ret:
904 904 msg = 'output changed and ' + describe(ret)
905 905 else:
906 906 msg = 'output changed'
907 907
908 908 self.fail(msg)
909 909 elif ret:
910 910 self.fail(describe(ret))
911 911
912 912 def tearDown(self):
913 913 """Tasks to perform after run()."""
914 914 for entry in self._daemonpids:
915 915 killdaemons(entry)
916 916 self._daemonpids = []
917 917
918 918 if self._keeptmpdir:
919 919 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
920 920 (self._testtmp.decode('utf-8'),
921 921 self._threadtmp.decode('utf-8')))
922 922 else:
923 923 shutil.rmtree(self._testtmp, True)
924 924 shutil.rmtree(self._threadtmp, True)
925 925
926 926 if self._usechg:
927 927 # chgservers will stop automatically after they find the socket
928 928 # files are deleted
929 929 shutil.rmtree(self._chgsockdir, True)
930 930
931 931 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
932 932 and not self._debug and self._out:
933 933 f = open(self.errpath, 'wb')
934 934 for line in self._out:
935 935 f.write(line)
936 936 f.close()
937 937
938 938 vlog("# Ret was:", self._ret, '(%s)' % self.name)
939 939
940 940 def _run(self, env):
941 941 # This should be implemented in child classes to run tests.
942 942 raise unittest.SkipTest('unknown test type')
943 943
944 944 def abort(self):
945 945 """Terminate execution of this test."""
946 946 self._aborted = True
947 947
948 948 def _portmap(self, i):
949 949 offset = b'' if i == 0 else b'%d' % i
950 950 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
951 951
952 952 def _getreplacements(self):
953 953 """Obtain a mapping of text replacements to apply to test output.
954 954
955 955 Test output needs to be normalized so it can be compared to expected
956 956 output. This function defines how some of that normalization will
957 957 occur.
958 958 """
959 959 r = [
960 960 # This list should be parallel to defineport in _getenv
961 961 self._portmap(0),
962 962 self._portmap(1),
963 963 self._portmap(2),
964 964 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
965 965 br'\1 (glob)'),
966 966 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
967 967 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
968 968 ]
969 969 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
970 970
971 971 testdir = os.path.dirname(self.path)
972 972 replacementfile = os.path.join(testdir, b'common-pattern.py')
973 973
974 974 if os.path.exists(replacementfile):
975 975 data = {}
976 976 with open(replacementfile, mode='rb') as source:
977 977 # the intermediate 'compile' step help with debugging
978 978 code = compile(source.read(), replacementfile, 'exec')
979 979 exec(code, data)
980 980 r.extend(data.get('substitutions', ()))
981 981 return r
982 982
983 983 def _escapepath(self, p):
984 984 if os.name == 'nt':
985 985 return (
986 986 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
987 987 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
988 988 for c in p))
989 989 )
990 990 else:
991 991 return re.escape(p)
992 992
993 993 def _localip(self):
994 994 if self._useipv6:
995 995 return b'::1'
996 996 else:
997 997 return b'127.0.0.1'
998 998
999 999 def _genrestoreenv(self, testenv):
1000 1000 """Generate a script that can be used by tests to restore the original
1001 1001 environment."""
1002 1002 # Put the restoreenv script inside self._threadtmp
1003 1003 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1004 1004 testenv['HGTEST_RESTOREENV'] = scriptpath
1005 1005
1006 1006 # Only restore environment variable names that the shell allows
1007 1007 # us to export.
1008 1008 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1009 1009
1010 1010 # Do not restore these variables; otherwise tests would fail.
1011 1011 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1012 1012
1013 1013 with open(scriptpath, 'w') as envf:
1014 1014 for name, value in origenviron.items():
1015 1015 if not name_regex.match(name):
1016 1016 # Skip environment variables with unusual names not
1017 1017 # allowed by most shells.
1018 1018 continue
1019 1019 if name in reqnames:
1020 1020 continue
1021 1021 envf.write('%s=%s\n' % (name, shellquote(value)))
1022 1022
1023 1023 for name in testenv:
1024 1024 if name in origenviron or name in reqnames:
1025 1025 continue
1026 1026 envf.write('unset %s\n' % (name,))
1027 1027
1028 1028 def _getenv(self):
1029 1029 """Obtain environment variables to use during test execution."""
1030 1030 def defineport(i):
1031 1031 offset = '' if i == 0 else '%s' % i
1032 1032 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1033 1033 env = os.environ.copy()
1034 1034 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase')
1035 1035 env['HGEMITWARNINGS'] = '1'
1036 1036 env['TESTTMP'] = self._testtmp
1037 1037 env['HOME'] = self._testtmp
1038 1038 # This number should match portneeded in _getport
1039 1039 for port in xrange(3):
1040 1040 # This list should be parallel to _portmap in _getreplacements
1041 1041 defineport(port)
1042 1042 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
1043 1043 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
1044 1044 env["HGEDITOR"] = ('"' + sys.executable + '"'
1045 1045 + ' -c "import sys; sys.exit(0)"')
1046 1046 env["HGMERGE"] = "internal:merge"
1047 1047 env["HGUSER"] = "test"
1048 1048 env["HGENCODING"] = "ascii"
1049 1049 env["HGENCODINGMODE"] = "strict"
1050 1050 env['HGIPV6'] = str(int(self._useipv6))
1051 1051
1052 1052 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1053 1053 # IP addresses.
1054 1054 env['LOCALIP'] = self._localip()
1055 1055
1056 1056 # Reset some environment variables to well-known values so that
1057 1057 # the tests produce repeatable output.
1058 1058 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1059 1059 env['TZ'] = 'GMT'
1060 1060 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1061 1061 env['COLUMNS'] = '80'
1062 1062 env['TERM'] = 'xterm'
1063 1063
1064 1064 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1065 1065 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1066 1066 'NO_PROXY CHGDEBUG').split():
1067 1067 if k in env:
1068 1068 del env[k]
1069 1069
1070 1070 # unset env related to hooks
1071 1071 for k in env.keys():
1072 1072 if k.startswith('HG_'):
1073 1073 del env[k]
1074 1074
1075 1075 if self._usechg:
1076 1076 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1077 1077
1078 1078 return env
1079 1079
1080 1080 def _createhgrc(self, path):
1081 1081 """Create an hgrc file for this test."""
1082 1082 hgrc = open(path, 'wb')
1083 1083 hgrc.write(b'[ui]\n')
1084 1084 hgrc.write(b'slash = True\n')
1085 1085 hgrc.write(b'interactive = False\n')
1086 1086 hgrc.write(b'mergemarkers = detailed\n')
1087 1087 hgrc.write(b'promptecho = True\n')
1088 1088 hgrc.write(b'[defaults]\n')
1089 1089 hgrc.write(b'[devel]\n')
1090 1090 hgrc.write(b'all-warnings = true\n')
1091 1091 hgrc.write(b'default-date = 0 0\n')
1092 1092 hgrc.write(b'[largefiles]\n')
1093 1093 hgrc.write(b'usercache = %s\n' %
1094 1094 (os.path.join(self._testtmp, b'.cache/largefiles')))
1095 1095 hgrc.write(b'[web]\n')
1096 1096 hgrc.write(b'address = localhost\n')
1097 1097 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1098 1098
1099 1099 for opt in self._extraconfigopts:
1100 1100 section, key = opt.split('.', 1)
1101 1101 assert '=' in key, ('extra config opt %s must '
1102 1102 'have an = for assignment' % opt)
1103 1103 hgrc.write(b'[%s]\n%s\n' % (section, key))
1104 1104 hgrc.close()
1105 1105
1106 1106 def fail(self, msg):
1107 1107 # unittest differentiates between errored and failed.
1108 1108 # Failed is denoted by AssertionError (by default at least).
1109 1109 raise AssertionError(msg)
1110 1110
1111 1111 def _runcommand(self, cmd, env, normalizenewlines=False):
1112 1112 """Run command in a sub-process, capturing the output (stdout and
1113 1113 stderr).
1114 1114
1115 1115 Return a tuple (exitcode, output). output is None in debug mode.
1116 1116 """
1117 1117 if self._debug:
1118 1118 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
1119 1119 env=env)
1120 1120 ret = proc.wait()
1121 1121 return (ret, None)
1122 1122
1123 1123 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1124 1124 def cleanup():
1125 1125 terminate(proc)
1126 1126 ret = proc.wait()
1127 1127 if ret == 0:
1128 1128 ret = signal.SIGTERM << 8
1129 1129 killdaemons(env['DAEMON_PIDS'])
1130 1130 return ret
1131 1131
1132 1132 output = ''
1133 1133 proc.tochild.close()
1134 1134
1135 1135 try:
1136 1136 output = proc.fromchild.read()
1137 1137 except KeyboardInterrupt:
1138 1138 vlog('# Handling keyboard interrupt')
1139 1139 cleanup()
1140 1140 raise
1141 1141
1142 1142 ret = proc.wait()
1143 1143 if wifexited(ret):
1144 1144 ret = os.WEXITSTATUS(ret)
1145 1145
1146 1146 if proc.timeout:
1147 1147 ret = 'timeout'
1148 1148
1149 1149 if ret:
1150 1150 killdaemons(env['DAEMON_PIDS'])
1151 1151
1152 1152 for s, r in self._getreplacements():
1153 1153 output = re.sub(s, r, output)
1154 1154
1155 1155 if normalizenewlines:
1156 1156 output = output.replace('\r\n', '\n')
1157 1157
1158 1158 return ret, output.splitlines(True)
1159 1159
1160 1160 class PythonTest(Test):
1161 1161 """A Python-based test."""
1162 1162
1163 1163 @property
1164 1164 def refpath(self):
1165 1165 return os.path.join(self._testdir, b'%s.out' % self.bname)
1166 1166
1167 1167 def _run(self, env):
1168 1168 py3kswitch = self._py3kwarnings and b' -3' or b''
1169 1169 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
1170 1170 vlog("# Running", cmd)
1171 1171 normalizenewlines = os.name == 'nt'
1172 1172 result = self._runcommand(cmd, env,
1173 1173 normalizenewlines=normalizenewlines)
1174 1174 if self._aborted:
1175 1175 raise KeyboardInterrupt()
1176 1176
1177 1177 return result
1178 1178
1179 1179 # Some glob patterns apply only in some circumstances, so the script
1180 1180 # might want to remove (glob) annotations that otherwise should be
1181 1181 # retained.
1182 1182 checkcodeglobpats = [
1183 1183 # On Windows it looks like \ doesn't require a (glob), but we know
1184 1184 # better.
1185 1185 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1186 1186 re.compile(br'^moving \S+/.*[^)]$'),
1187 1187 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1188 1188 # Not all platforms have 127.0.0.1 as loopback (though most do),
1189 1189 # so we always glob that too.
1190 1190 re.compile(br'.*\$LOCALIP.*$'),
1191 1191 ]
1192 1192
1193 1193 bchr = chr
1194 1194 if PYTHON3:
1195 1195 bchr = lambda x: bytes([x])
1196 1196
1197 1197 class TTest(Test):
1198 1198 """A "t test" is a test backed by a .t file."""
1199 1199
1200 1200 SKIPPED_PREFIX = b'skipped: '
1201 1201 FAILED_PREFIX = b'hghave check failed: '
1202 1202 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1203 1203
1204 1204 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1205 1205 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1206 1206 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1207 1207
1208 1208 def __init__(self, path, *args, **kwds):
1209 1209 # accept an extra "case" parameter
1210 1210 case = None
1211 1211 if 'case' in kwds:
1212 1212 case = kwds.pop('case')
1213 1213 self._case = case
1214 1214 self._allcases = parsettestcases(path)
1215 1215 super(TTest, self).__init__(path, *args, **kwds)
1216 1216 if case:
1217 1217 self.name = '%s (case %s)' % (self.name, _strpath(case))
1218 1218 self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
1219 1219 self._tmpname += b'-%s' % case
1220 1220
1221 1221 @property
1222 1222 def refpath(self):
1223 1223 return os.path.join(self._testdir, self.bname)
1224 1224
1225 1225 def _run(self, env):
1226 1226 f = open(self.path, 'rb')
1227 1227 lines = f.readlines()
1228 1228 f.close()
1229 1229
1230 1230 # .t file is both reference output and the test input, keep reference
1231 1231 # output updated with the the test input. This avoids some race
1232 1232 # conditions where the reference output does not match the actual test.
1233 1233 if self._refout is not None:
1234 1234 self._refout = lines
1235 1235
1236 1236 salt, script, after, expected = self._parsetest(lines)
1237 1237
1238 1238 # Write out the generated script.
1239 1239 fname = b'%s.sh' % self._testtmp
1240 1240 f = open(fname, 'wb')
1241 1241 for l in script:
1242 1242 f.write(l)
1243 1243 f.close()
1244 1244
1245 1245 cmd = b'%s "%s"' % (self._shell, fname)
1246 1246 vlog("# Running", cmd)
1247 1247
1248 1248 exitcode, output = self._runcommand(cmd, env)
1249 1249
1250 1250 if self._aborted:
1251 1251 raise KeyboardInterrupt()
1252 1252
1253 1253 # Do not merge output if skipped. Return hghave message instead.
1254 1254 # Similarly, with --debug, output is None.
1255 1255 if exitcode == self.SKIPPED_STATUS or output is None:
1256 1256 return exitcode, output
1257 1257
1258 1258 return self._processoutput(exitcode, output, salt, after, expected)
1259 1259
1260 1260 def _hghave(self, reqs):
1261 1261 # TODO do something smarter when all other uses of hghave are gone.
1262 1262 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1263 1263 tdir = runtestdir.replace(b'\\', b'/')
1264 1264 proc = Popen4(b'%s -c "%s/hghave %s"' %
1265 1265 (self._shell, tdir, b' '.join(reqs)),
1266 1266 self._testtmp, 0, self._getenv())
1267 1267 stdout, stderr = proc.communicate()
1268 1268 ret = proc.wait()
1269 1269 if wifexited(ret):
1270 1270 ret = os.WEXITSTATUS(ret)
1271 1271 if ret == 2:
1272 1272 print(stdout.decode('utf-8'))
1273 1273 sys.exit(1)
1274 1274
1275 1275 if ret != 0:
1276 1276 return False, stdout
1277 1277
1278 1278 if b'slow' in reqs:
1279 1279 self._timeout = self._slowtimeout
1280 1280 return True, None
1281 1281
1282 1282 def _iftest(self, args):
1283 1283 # implements "#if"
1284 1284 reqs = []
1285 1285 for arg in args:
1286 1286 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1287 1287 if arg[3:] == self._case:
1288 1288 return False
1289 1289 elif arg in self._allcases:
1290 1290 if arg != self._case:
1291 1291 return False
1292 1292 else:
1293 1293 reqs.append(arg)
1294 1294 return self._hghave(reqs)[0]
1295 1295
1296 1296 def _parsetest(self, lines):
1297 1297 # We generate a shell script which outputs unique markers to line
1298 1298 # up script results with our source. These markers include input
1299 1299 # line number and the last return code.
1300 1300 salt = b"SALT%d" % time.time()
1301 1301 def addsalt(line, inpython):
1302 1302 if inpython:
1303 1303 script.append(b'%s %d 0\n' % (salt, line))
1304 1304 else:
1305 1305 script.append(b'echo %s %d $?\n' % (salt, line))
1306 1306
1307 1307 script = []
1308 1308
1309 1309 # After we run the shell script, we re-unify the script output
1310 1310 # with non-active parts of the source, with synchronization by our
1311 1311 # SALT line number markers. The after table contains the non-active
1312 1312 # components, ordered by line number.
1313 1313 after = {}
1314 1314
1315 1315 # Expected shell script output.
1316 1316 expected = {}
1317 1317
1318 1318 pos = prepos = -1
1319 1319
1320 1320 # True or False when in a true or false conditional section
1321 1321 skipping = None
1322 1322
1323 1323 # We keep track of whether or not we're in a Python block so we
1324 1324 # can generate the surrounding doctest magic.
1325 1325 inpython = False
1326 1326
1327 1327 if self._debug:
1328 1328 script.append(b'set -x\n')
1329 1329 if self._hgcommand != b'hg':
1330 1330 script.append(b'alias hg="%s"\n' % self._hgcommand)
1331 1331 if os.getenv('MSYSTEM'):
1332 1332 script.append(b'alias pwd="pwd -W"\n')
1333 1333
1334 1334 n = 0
1335 1335 for n, l in enumerate(lines):
1336 1336 if not l.endswith(b'\n'):
1337 1337 l += b'\n'
1338 1338 if l.startswith(b'#require'):
1339 1339 lsplit = l.split()
1340 1340 if len(lsplit) < 2 or lsplit[0] != b'#require':
1341 1341 after.setdefault(pos, []).append(' !!! invalid #require\n')
1342 1342 haveresult, message = self._hghave(lsplit[1:])
1343 1343 if not haveresult:
1344 1344 script = [b'echo "%s"\nexit 80\n' % message]
1345 1345 break
1346 1346 after.setdefault(pos, []).append(l)
1347 1347 elif l.startswith(b'#if'):
1348 1348 lsplit = l.split()
1349 1349 if len(lsplit) < 2 or lsplit[0] != b'#if':
1350 1350 after.setdefault(pos, []).append(' !!! invalid #if\n')
1351 1351 if skipping is not None:
1352 1352 after.setdefault(pos, []).append(' !!! nested #if\n')
1353 1353 skipping = not self._iftest(lsplit[1:])
1354 1354 after.setdefault(pos, []).append(l)
1355 1355 elif l.startswith(b'#else'):
1356 1356 if skipping is None:
1357 1357 after.setdefault(pos, []).append(' !!! missing #if\n')
1358 1358 skipping = not skipping
1359 1359 after.setdefault(pos, []).append(l)
1360 1360 elif l.startswith(b'#endif'):
1361 1361 if skipping is None:
1362 1362 after.setdefault(pos, []).append(' !!! missing #if\n')
1363 1363 skipping = None
1364 1364 after.setdefault(pos, []).append(l)
1365 1365 elif skipping:
1366 1366 after.setdefault(pos, []).append(l)
1367 1367 elif l.startswith(b' >>> '): # python inlines
1368 1368 after.setdefault(pos, []).append(l)
1369 1369 prepos = pos
1370 1370 pos = n
1371 1371 if not inpython:
1372 1372 # We've just entered a Python block. Add the header.
1373 1373 inpython = True
1374 1374 addsalt(prepos, False) # Make sure we report the exit code.
1375 1375 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1376 1376 addsalt(n, True)
1377 1377 script.append(l[2:])
1378 1378 elif l.startswith(b' ... '): # python inlines
1379 1379 after.setdefault(prepos, []).append(l)
1380 1380 script.append(l[2:])
1381 1381 elif l.startswith(b' $ '): # commands
1382 1382 if inpython:
1383 1383 script.append(b'EOF\n')
1384 1384 inpython = False
1385 1385 after.setdefault(pos, []).append(l)
1386 1386 prepos = pos
1387 1387 pos = n
1388 1388 addsalt(n, False)
1389 1389 cmd = l[4:].split()
1390 1390 if len(cmd) == 2 and cmd[0] == b'cd':
1391 1391 l = b' $ cd %s || exit 1\n' % cmd[1]
1392 1392 script.append(l[4:])
1393 1393 elif l.startswith(b' > '): # continuations
1394 1394 after.setdefault(prepos, []).append(l)
1395 1395 script.append(l[4:])
1396 1396 elif l.startswith(b' '): # results
1397 1397 # Queue up a list of expected results.
1398 1398 expected.setdefault(pos, []).append(l[2:])
1399 1399 else:
1400 1400 if inpython:
1401 1401 script.append(b'EOF\n')
1402 1402 inpython = False
1403 1403 # Non-command/result. Queue up for merged output.
1404 1404 after.setdefault(pos, []).append(l)
1405 1405
1406 1406 if inpython:
1407 1407 script.append(b'EOF\n')
1408 1408 if skipping is not None:
1409 1409 after.setdefault(pos, []).append(' !!! missing #endif\n')
1410 1410 addsalt(n + 1, False)
1411 1411
1412 1412 return salt, script, after, expected
1413 1413
1414 1414 def _processoutput(self, exitcode, output, salt, after, expected):
1415 1415 # Merge the script output back into a unified test.
1416 1416 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1417 1417 if exitcode != 0:
1418 1418 warnonly = 3
1419 1419
1420 1420 pos = -1
1421 1421 postout = []
1422 1422 for l in output:
1423 1423 lout, lcmd = l, None
1424 1424 if salt in l:
1425 1425 lout, lcmd = l.split(salt, 1)
1426 1426
1427 1427 while lout:
1428 1428 if not lout.endswith(b'\n'):
1429 1429 lout += b' (no-eol)\n'
1430 1430
1431 1431 # Find the expected output at the current position.
1432 1432 els = [None]
1433 1433 if expected.get(pos, None):
1434 1434 els = expected[pos]
1435 1435
1436 1436 i = 0
1437 1437 optional = []
1438 1438 while i < len(els):
1439 1439 el = els[i]
1440 1440
1441 1441 r = self.linematch(el, lout)
1442 1442 if isinstance(r, str):
1443 1443 if r == '+glob':
1444 1444 lout = el[:-1] + ' (glob)\n'
1445 1445 r = '' # Warn only this line.
1446 1446 elif r == '-glob':
1447 1447 lout = ''.join(el.rsplit(' (glob)', 1))
1448 1448 r = '' # Warn only this line.
1449 1449 elif r == "retry":
1450 1450 postout.append(b' ' + el)
1451 1451 els.pop(i)
1452 1452 break
1453 1453 else:
1454 1454 log('\ninfo, unknown linematch result: %r\n' % r)
1455 1455 r = False
1456 1456 if r:
1457 1457 els.pop(i)
1458 1458 break
1459 1459 if el:
1460 1460 if el.endswith(b" (?)\n"):
1461 1461 optional.append(i)
1462 1462 else:
1463 1463 m = optline.match(el)
1464 1464 if m:
1465 1465 conditions = [
1466 1466 c for c in m.group(2).split(b' ')]
1467 1467
1468 1468 if not self._iftest(conditions):
1469 1469 optional.append(i)
1470 1470
1471 1471 i += 1
1472 1472
1473 1473 if r:
1474 1474 if r == "retry":
1475 1475 continue
1476 1476 # clean up any optional leftovers
1477 1477 for i in optional:
1478 1478 postout.append(b' ' + els[i])
1479 1479 for i in reversed(optional):
1480 1480 del els[i]
1481 1481 postout.append(b' ' + el)
1482 1482 else:
1483 1483 if self.NEEDESCAPE(lout):
1484 1484 lout = TTest._stringescape(b'%s (esc)\n' %
1485 1485 lout.rstrip(b'\n'))
1486 1486 postout.append(b' ' + lout) # Let diff deal with it.
1487 1487 if r != '': # If line failed.
1488 1488 warnonly = 3 # for sure not
1489 1489 elif warnonly == 1: # Is "not yet" and line is warn only.
1490 1490 warnonly = 2 # Yes do warn.
1491 1491 break
1492 1492 else:
1493 1493 # clean up any optional leftovers
1494 1494 while expected.get(pos, None):
1495 1495 el = expected[pos].pop(0)
1496 1496 if el:
1497 1497 if not el.endswith(b" (?)\n"):
1498 1498 m = optline.match(el)
1499 1499 if m:
1500 1500 conditions = [c for c in m.group(2).split(b' ')]
1501 1501
1502 1502 if self._iftest(conditions):
1503 1503 # Don't append as optional line
1504 1504 continue
1505 1505 else:
1506 1506 continue
1507 1507 postout.append(b' ' + el)
1508 1508
1509 1509 if lcmd:
1510 1510 # Add on last return code.
1511 1511 ret = int(lcmd.split()[1])
1512 1512 if ret != 0:
1513 1513 postout.append(b' [%d]\n' % ret)
1514 1514 if pos in after:
1515 1515 # Merge in non-active test bits.
1516 1516 postout += after.pop(pos)
1517 1517 pos = int(lcmd.split()[0])
1518 1518
1519 1519 if pos in after:
1520 1520 postout += after.pop(pos)
1521 1521
1522 1522 if warnonly == 2:
1523 1523 exitcode = False # Set exitcode to warned.
1524 1524
1525 1525 return exitcode, postout
1526 1526
1527 1527 @staticmethod
1528 1528 def rematch(el, l):
1529 1529 try:
1530 el = b'(?:' + el + b')'
1530 1531 # use \Z to ensure that the regex matches to the end of the string
1531 1532 if os.name == 'nt':
1532 1533 return re.match(el + br'\r?\n\Z', l)
1533 1534 return re.match(el + br'\n\Z', l)
1534 1535 except re.error:
1535 1536 # el is an invalid regex
1536 1537 return False
1537 1538
1538 1539 @staticmethod
1539 1540 def globmatch(el, l):
1540 1541 # The only supported special characters are * and ? plus / which also
1541 1542 # matches \ on windows. Escaping of these characters is supported.
1542 1543 if el + b'\n' == l:
1543 1544 if os.altsep:
1544 1545 # matching on "/" is not needed for this line
1545 1546 for pat in checkcodeglobpats:
1546 1547 if pat.match(el):
1547 1548 return True
1548 1549 return b'-glob'
1549 1550 return True
1550 1551 el = el.replace(b'$LOCALIP', b'*')
1551 1552 i, n = 0, len(el)
1552 1553 res = b''
1553 1554 while i < n:
1554 1555 c = el[i:i + 1]
1555 1556 i += 1
1556 1557 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1557 1558 res += el[i - 1:i + 1]
1558 1559 i += 1
1559 1560 elif c == b'*':
1560 1561 res += b'.*'
1561 1562 elif c == b'?':
1562 1563 res += b'.'
1563 1564 elif c == b'/' and os.altsep:
1564 1565 res += b'[/\\\\]'
1565 1566 else:
1566 1567 res += re.escape(c)
1567 1568 return TTest.rematch(res, l)
1568 1569
1569 1570 def linematch(self, el, l):
1570 1571 retry = False
1571 1572 if el == l: # perfect match (fast)
1572 1573 return True
1573 1574 if el:
1574 1575 if el.endswith(b" (?)\n"):
1575 1576 retry = "retry"
1576 1577 el = el[:-5] + b"\n"
1577 1578 else:
1578 1579 m = optline.match(el)
1579 1580 if m:
1580 1581 conditions = [c for c in m.group(2).split(b' ')]
1581 1582
1582 1583 el = m.group(1) + b"\n"
1583 1584 if not self._iftest(conditions):
1584 1585 retry = "retry" # Not required by listed features
1585 1586
1586 1587 if el.endswith(b" (esc)\n"):
1587 1588 if PYTHON3:
1588 1589 el = el[:-7].decode('unicode_escape') + '\n'
1589 1590 el = el.encode('utf-8')
1590 1591 else:
1591 1592 el = el[:-7].decode('string-escape') + '\n'
1592 1593 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1593 1594 return True
1594 1595 if el.endswith(b" (re)\n"):
1595 1596 return TTest.rematch(el[:-6], l) or retry
1596 1597 if el.endswith(b" (glob)\n"):
1597 1598 # ignore '(glob)' added to l by 'replacements'
1598 1599 if l.endswith(b" (glob)\n"):
1599 1600 l = l[:-8] + b"\n"
1600 1601 return TTest.globmatch(el[:-8], l) or retry
1601 1602 if os.altsep and l.replace(b'\\', b'/') == el:
1602 1603 return b'+glob'
1603 1604 return retry
1604 1605
1605 1606 @staticmethod
1606 1607 def parsehghaveoutput(lines):
1607 1608 '''Parse hghave log lines.
1608 1609
1609 1610 Return tuple of lists (missing, failed):
1610 1611 * the missing/unknown features
1611 1612 * the features for which existence check failed'''
1612 1613 missing = []
1613 1614 failed = []
1614 1615 for line in lines:
1615 1616 if line.startswith(TTest.SKIPPED_PREFIX):
1616 1617 line = line.splitlines()[0]
1617 1618 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1618 1619 elif line.startswith(TTest.FAILED_PREFIX):
1619 1620 line = line.splitlines()[0]
1620 1621 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1621 1622
1622 1623 return missing, failed
1623 1624
1624 1625 @staticmethod
1625 1626 def _escapef(m):
1626 1627 return TTest.ESCAPEMAP[m.group(0)]
1627 1628
1628 1629 @staticmethod
1629 1630 def _stringescape(s):
1630 1631 return TTest.ESCAPESUB(TTest._escapef, s)
1631 1632
1632 1633 iolock = threading.RLock()
1633 1634
1634 1635 class TestResult(unittest._TextTestResult):
1635 1636 """Holds results when executing via unittest."""
1636 1637 # Don't worry too much about accessing the non-public _TextTestResult.
1637 1638 # It is relatively common in Python testing tools.
1638 1639 def __init__(self, options, *args, **kwargs):
1639 1640 super(TestResult, self).__init__(*args, **kwargs)
1640 1641
1641 1642 self._options = options
1642 1643
1643 1644 # unittest.TestResult didn't have skipped until 2.7. We need to
1644 1645 # polyfill it.
1645 1646 self.skipped = []
1646 1647
1647 1648 # We have a custom "ignored" result that isn't present in any Python
1648 1649 # unittest implementation. It is very similar to skipped. It may make
1649 1650 # sense to map it into skip some day.
1650 1651 self.ignored = []
1651 1652
1652 1653 self.times = []
1653 1654 self._firststarttime = None
1654 1655 # Data stored for the benefit of generating xunit reports.
1655 1656 self.successes = []
1656 1657 self.faildata = {}
1657 1658
1658 1659 if options.color == 'auto':
1659 1660 self.color = pygmentspresent and self.stream.isatty()
1660 1661 elif options.color == 'never':
1661 1662 self.color = False
1662 1663 else: # 'always', for testing purposes
1663 1664 self.color = pygmentspresent
1664 1665
1665 1666 def addFailure(self, test, reason):
1666 1667 self.failures.append((test, reason))
1667 1668
1668 1669 if self._options.first:
1669 1670 self.stop()
1670 1671 else:
1671 1672 with iolock:
1672 1673 if reason == "timed out":
1673 1674 self.stream.write('t')
1674 1675 else:
1675 1676 if not self._options.nodiff:
1676 1677 self.stream.write('\n')
1677 1678 # Exclude the '\n' from highlighting to lex correctly
1678 1679 formatted = 'ERROR: %s output changed\n' % test
1679 1680 self.stream.write(highlightmsg(formatted, self.color))
1680 1681 self.stream.write('!')
1681 1682
1682 1683 self.stream.flush()
1683 1684
1684 1685 def addSuccess(self, test):
1685 1686 with iolock:
1686 1687 super(TestResult, self).addSuccess(test)
1687 1688 self.successes.append(test)
1688 1689
1689 1690 def addError(self, test, err):
1690 1691 super(TestResult, self).addError(test, err)
1691 1692 if self._options.first:
1692 1693 self.stop()
1693 1694
1694 1695 # Polyfill.
1695 1696 def addSkip(self, test, reason):
1696 1697 self.skipped.append((test, reason))
1697 1698 with iolock:
1698 1699 if self.showAll:
1699 1700 self.stream.writeln('skipped %s' % reason)
1700 1701 else:
1701 1702 self.stream.write('s')
1702 1703 self.stream.flush()
1703 1704
1704 1705 def addIgnore(self, test, reason):
1705 1706 self.ignored.append((test, reason))
1706 1707 with iolock:
1707 1708 if self.showAll:
1708 1709 self.stream.writeln('ignored %s' % reason)
1709 1710 else:
1710 1711 if reason not in ('not retesting', "doesn't match keyword"):
1711 1712 self.stream.write('i')
1712 1713 else:
1713 1714 self.testsRun += 1
1714 1715 self.stream.flush()
1715 1716
1716 1717 def addOutputMismatch(self, test, ret, got, expected):
1717 1718 """Record a mismatch in test output for a particular test."""
1718 1719 if self.shouldStop:
1719 1720 # don't print, some other test case already failed and
1720 1721 # printed, we're just stale and probably failed due to our
1721 1722 # temp dir getting cleaned up.
1722 1723 return
1723 1724
1724 1725 accepted = False
1725 1726 lines = []
1726 1727
1727 1728 with iolock:
1728 1729 if self._options.nodiff:
1729 1730 pass
1730 1731 elif self._options.view:
1731 1732 v = self._options.view
1732 1733 if PYTHON3:
1733 1734 v = _bytespath(v)
1734 1735 os.system(b"%s %s %s" %
1735 1736 (v, test.refpath, test.errpath))
1736 1737 else:
1737 1738 servefail, lines = getdiff(expected, got,
1738 1739 test.refpath, test.errpath)
1739 1740 if servefail:
1740 1741 raise test.failureException(
1741 1742 'server failed to start (HGPORT=%s)' % test._startport)
1742 1743 else:
1743 1744 self.stream.write('\n')
1744 1745 for line in lines:
1745 1746 line = highlightdiff(line, self.color)
1746 1747 if PYTHON3:
1747 1748 self.stream.flush()
1748 1749 self.stream.buffer.write(line)
1749 1750 self.stream.buffer.flush()
1750 1751 else:
1751 1752 self.stream.write(line)
1752 1753 self.stream.flush()
1753 1754
1754 1755 # handle interactive prompt without releasing iolock
1755 1756 if self._options.interactive:
1756 1757 if test.readrefout() != expected:
1757 1758 self.stream.write(
1758 1759 'Reference output has changed (run again to prompt '
1759 1760 'changes)')
1760 1761 else:
1761 1762 self.stream.write('Accept this change? [n] ')
1762 1763 answer = sys.stdin.readline().strip()
1763 1764 if answer.lower() in ('y', 'yes'):
1764 1765 if test.path.endswith(b'.t'):
1765 1766 rename(test.errpath, test.path)
1766 1767 else:
1767 1768 rename(test.errpath, '%s.out' % test.path)
1768 1769 accepted = True
1769 1770 if not accepted:
1770 1771 self.faildata[test.name] = b''.join(lines)
1771 1772
1772 1773 return accepted
1773 1774
1774 1775 def startTest(self, test):
1775 1776 super(TestResult, self).startTest(test)
1776 1777
1777 1778 # os.times module computes the user time and system time spent by
1778 1779 # child's processes along with real elapsed time taken by a process.
1779 1780 # This module has one limitation. It can only work for Linux user
1780 1781 # and not for Windows.
1781 1782 test.started = os.times()
1782 1783 if self._firststarttime is None: # thread racy but irrelevant
1783 1784 self._firststarttime = test.started[4]
1784 1785
1785 1786 def stopTest(self, test, interrupted=False):
1786 1787 super(TestResult, self).stopTest(test)
1787 1788
1788 1789 test.stopped = os.times()
1789 1790
1790 1791 starttime = test.started
1791 1792 endtime = test.stopped
1792 1793 origin = self._firststarttime
1793 1794 self.times.append((test.name,
1794 1795 endtime[2] - starttime[2], # user space CPU time
1795 1796 endtime[3] - starttime[3], # sys space CPU time
1796 1797 endtime[4] - starttime[4], # real time
1797 1798 starttime[4] - origin, # start date in run context
1798 1799 endtime[4] - origin, # end date in run context
1799 1800 ))
1800 1801
1801 1802 if interrupted:
1802 1803 with iolock:
1803 1804 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1804 1805 test.name, self.times[-1][3]))
1805 1806
1806 1807 class TestSuite(unittest.TestSuite):
1807 1808 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1808 1809
1809 1810 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1810 1811 retest=False, keywords=None, loop=False, runs_per_test=1,
1811 1812 loadtest=None, showchannels=False,
1812 1813 *args, **kwargs):
1813 1814 """Create a new instance that can run tests with a configuration.
1814 1815
1815 1816 testdir specifies the directory where tests are executed from. This
1816 1817 is typically the ``tests`` directory from Mercurial's source
1817 1818 repository.
1818 1819
1819 1820 jobs specifies the number of jobs to run concurrently. Each test
1820 1821 executes on its own thread. Tests actually spawn new processes, so
1821 1822 state mutation should not be an issue.
1822 1823
1823 1824 If there is only one job, it will use the main thread.
1824 1825
1825 1826 whitelist and blacklist denote tests that have been whitelisted and
1826 1827 blacklisted, respectively. These arguments don't belong in TestSuite.
1827 1828 Instead, whitelist and blacklist should be handled by the thing that
1828 1829 populates the TestSuite with tests. They are present to preserve
1829 1830 backwards compatible behavior which reports skipped tests as part
1830 1831 of the results.
1831 1832
1832 1833 retest denotes whether to retest failed tests. This arguably belongs
1833 1834 outside of TestSuite.
1834 1835
1835 1836 keywords denotes key words that will be used to filter which tests
1836 1837 to execute. This arguably belongs outside of TestSuite.
1837 1838
1838 1839 loop denotes whether to loop over tests forever.
1839 1840 """
1840 1841 super(TestSuite, self).__init__(*args, **kwargs)
1841 1842
1842 1843 self._jobs = jobs
1843 1844 self._whitelist = whitelist
1844 1845 self._blacklist = blacklist
1845 1846 self._retest = retest
1846 1847 self._keywords = keywords
1847 1848 self._loop = loop
1848 1849 self._runs_per_test = runs_per_test
1849 1850 self._loadtest = loadtest
1850 1851 self._showchannels = showchannels
1851 1852
1852 1853 def run(self, result):
1853 1854 # We have a number of filters that need to be applied. We do this
1854 1855 # here instead of inside Test because it makes the running logic for
1855 1856 # Test simpler.
1856 1857 tests = []
1857 1858 num_tests = [0]
1858 1859 for test in self._tests:
1859 1860 def get():
1860 1861 num_tests[0] += 1
1861 1862 if getattr(test, 'should_reload', False):
1862 1863 return self._loadtest(test, num_tests[0])
1863 1864 return test
1864 1865 if not os.path.exists(test.path):
1865 1866 result.addSkip(test, "Doesn't exist")
1866 1867 continue
1867 1868
1868 1869 if not (self._whitelist and test.bname in self._whitelist):
1869 1870 if self._blacklist and test.bname in self._blacklist:
1870 1871 result.addSkip(test, 'blacklisted')
1871 1872 continue
1872 1873
1873 1874 if self._retest and not os.path.exists(test.errpath):
1874 1875 result.addIgnore(test, 'not retesting')
1875 1876 continue
1876 1877
1877 1878 if self._keywords:
1878 1879 f = open(test.path, 'rb')
1879 1880 t = f.read().lower() + test.bname.lower()
1880 1881 f.close()
1881 1882 ignored = False
1882 1883 for k in self._keywords.lower().split():
1883 1884 if k not in t:
1884 1885 result.addIgnore(test, "doesn't match keyword")
1885 1886 ignored = True
1886 1887 break
1887 1888
1888 1889 if ignored:
1889 1890 continue
1890 1891 for _ in xrange(self._runs_per_test):
1891 1892 tests.append(get())
1892 1893
1893 1894 runtests = list(tests)
1894 1895 done = queue.Queue()
1895 1896 running = 0
1896 1897
1897 1898 channels = [""] * self._jobs
1898 1899
1899 1900 def job(test, result):
1900 1901 for n, v in enumerate(channels):
1901 1902 if not v:
1902 1903 channel = n
1903 1904 break
1904 1905 else:
1905 1906 raise ValueError('Could not find output channel')
1906 1907 channels[channel] = "=" + test.name[5:].split(".")[0]
1907 1908 try:
1908 1909 test(result)
1909 1910 done.put(None)
1910 1911 except KeyboardInterrupt:
1911 1912 pass
1912 1913 except: # re-raises
1913 1914 done.put(('!', test, 'run-test raised an error, see traceback'))
1914 1915 raise
1915 1916 finally:
1916 1917 try:
1917 1918 channels[channel] = ''
1918 1919 except IndexError:
1919 1920 pass
1920 1921
1921 1922 def stat():
1922 1923 count = 0
1923 1924 while channels:
1924 1925 d = '\n%03s ' % count
1925 1926 for n, v in enumerate(channels):
1926 1927 if v:
1927 1928 d += v[0]
1928 1929 channels[n] = v[1:] or '.'
1929 1930 else:
1930 1931 d += ' '
1931 1932 d += ' '
1932 1933 with iolock:
1933 1934 sys.stdout.write(d + ' ')
1934 1935 sys.stdout.flush()
1935 1936 for x in xrange(10):
1936 1937 if channels:
1937 1938 time.sleep(.1)
1938 1939 count += 1
1939 1940
1940 1941 stoppedearly = False
1941 1942
1942 1943 if self._showchannels:
1943 1944 statthread = threading.Thread(target=stat, name="stat")
1944 1945 statthread.start()
1945 1946
1946 1947 try:
1947 1948 while tests or running:
1948 1949 if not done.empty() or running == self._jobs or not tests:
1949 1950 try:
1950 1951 done.get(True, 1)
1951 1952 running -= 1
1952 1953 if result and result.shouldStop:
1953 1954 stoppedearly = True
1954 1955 break
1955 1956 except queue.Empty:
1956 1957 continue
1957 1958 if tests and not running == self._jobs:
1958 1959 test = tests.pop(0)
1959 1960 if self._loop:
1960 1961 if getattr(test, 'should_reload', False):
1961 1962 num_tests[0] += 1
1962 1963 tests.append(
1963 1964 self._loadtest(test, num_tests[0]))
1964 1965 else:
1965 1966 tests.append(test)
1966 1967 if self._jobs == 1:
1967 1968 job(test, result)
1968 1969 else:
1969 1970 t = threading.Thread(target=job, name=test.name,
1970 1971 args=(test, result))
1971 1972 t.start()
1972 1973 running += 1
1973 1974
1974 1975 # If we stop early we still need to wait on started tests to
1975 1976 # finish. Otherwise, there is a race between the test completing
1976 1977 # and the test's cleanup code running. This could result in the
1977 1978 # test reporting incorrect.
1978 1979 if stoppedearly:
1979 1980 while running:
1980 1981 try:
1981 1982 done.get(True, 1)
1982 1983 running -= 1
1983 1984 except queue.Empty:
1984 1985 continue
1985 1986 except KeyboardInterrupt:
1986 1987 for test in runtests:
1987 1988 test.abort()
1988 1989
1989 1990 channels = []
1990 1991
1991 1992 return result
1992 1993
1993 1994 # Save the most recent 5 wall-clock runtimes of each test to a
1994 1995 # human-readable text file named .testtimes. Tests are sorted
1995 1996 # alphabetically, while times for each test are listed from oldest to
1996 1997 # newest.
1997 1998
1998 1999 def loadtimes(outputdir):
1999 2000 times = []
2000 2001 try:
2001 2002 with open(os.path.join(outputdir, b'.testtimes-')) as fp:
2002 2003 for line in fp:
2003 2004 ts = line.split()
2004 2005 times.append((ts[0], [float(t) for t in ts[1:]]))
2005 2006 except IOError as err:
2006 2007 if err.errno != errno.ENOENT:
2007 2008 raise
2008 2009 return times
2009 2010
2010 2011 def savetimes(outputdir, result):
2011 2012 saved = dict(loadtimes(outputdir))
2012 2013 maxruns = 5
2013 2014 skipped = set([str(t[0]) for t in result.skipped])
2014 2015 for tdata in result.times:
2015 2016 test, real = tdata[0], tdata[3]
2016 2017 if test not in skipped:
2017 2018 ts = saved.setdefault(test, [])
2018 2019 ts.append(real)
2019 2020 ts[:] = ts[-maxruns:]
2020 2021
2021 2022 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2022 2023 dir=outputdir, text=True)
2023 2024 with os.fdopen(fd, 'w') as fp:
2024 2025 for name, ts in sorted(saved.items()):
2025 2026 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2026 2027 timepath = os.path.join(outputdir, b'.testtimes')
2027 2028 try:
2028 2029 os.unlink(timepath)
2029 2030 except OSError:
2030 2031 pass
2031 2032 try:
2032 2033 os.rename(tmpname, timepath)
2033 2034 except OSError:
2034 2035 pass
2035 2036
2036 2037 class TextTestRunner(unittest.TextTestRunner):
2037 2038 """Custom unittest test runner that uses appropriate settings."""
2038 2039
2039 2040 def __init__(self, runner, *args, **kwargs):
2040 2041 super(TextTestRunner, self).__init__(*args, **kwargs)
2041 2042
2042 2043 self._runner = runner
2043 2044
2044 2045 def listtests(self, test):
2045 2046 result = TestResult(self._runner.options, self.stream,
2046 2047 self.descriptions, 0)
2047 2048 test = sorted(test, key=lambda t: t.name)
2048 2049 for t in test:
2049 2050 print(t.name)
2050 2051 result.addSuccess(t)
2051 2052
2052 2053 if self._runner.options.xunit:
2053 2054 with open(self._runner.options.xunit, "wb") as xuf:
2054 2055 self._writexunit(result, xuf)
2055 2056
2056 2057 if self._runner.options.json:
2057 2058 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2058 2059 with open(jsonpath, 'w') as fp:
2059 2060 self._writejson(result, fp)
2060 2061
2061 2062 return result
2062 2063
2063 2064 def run(self, test):
2064 2065 result = TestResult(self._runner.options, self.stream,
2065 2066 self.descriptions, self.verbosity)
2066 2067
2067 2068 test(result)
2068 2069
2069 2070 failed = len(result.failures)
2070 2071 skipped = len(result.skipped)
2071 2072 ignored = len(result.ignored)
2072 2073
2073 2074 with iolock:
2074 2075 self.stream.writeln('')
2075 2076
2076 2077 if not self._runner.options.noskips:
2077 2078 for test, msg in result.skipped:
2078 2079 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2079 2080 self.stream.write(highlightmsg(formatted, result.color))
2080 2081 for test, msg in result.failures:
2081 2082 formatted = 'Failed %s: %s\n' % (test.name, msg)
2082 2083 self.stream.write(highlightmsg(formatted, result.color))
2083 2084 for test, msg in result.errors:
2084 2085 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2085 2086
2086 2087 if self._runner.options.xunit:
2087 2088 with open(self._runner.options.xunit, "wb") as xuf:
2088 2089 self._writexunit(result, xuf)
2089 2090
2090 2091 if self._runner.options.json:
2091 2092 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2092 2093 with open(jsonpath, 'w') as fp:
2093 2094 self._writejson(result, fp)
2094 2095
2095 2096 self._runner._checkhglib('Tested')
2096 2097
2097 2098 savetimes(self._runner._outputdir, result)
2098 2099
2099 2100 if failed and self._runner.options.known_good_rev:
2100 2101 self._bisecttests(t for t, m in result.failures)
2101 2102 self.stream.writeln(
2102 2103 '# Ran %d tests, %d skipped, %d failed.'
2103 2104 % (result.testsRun, skipped + ignored, failed))
2104 2105 if failed:
2105 2106 self.stream.writeln('python hash seed: %s' %
2106 2107 os.environ['PYTHONHASHSEED'])
2107 2108 if self._runner.options.time:
2108 2109 self.printtimes(result.times)
2109 2110 self.stream.flush()
2110 2111
2111 2112 return result
2112 2113
2113 2114 def _bisecttests(self, tests):
2114 2115 bisectcmd = ['hg', 'bisect']
2115 2116 bisectrepo = self._runner.options.bisect_repo
2116 2117 if bisectrepo:
2117 2118 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2118 2119 def pread(args):
2119 2120 env = os.environ.copy()
2120 2121 env['HGPLAIN'] = '1'
2121 2122 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2122 2123 stdout=subprocess.PIPE, env=env)
2123 2124 data = p.stdout.read()
2124 2125 p.wait()
2125 2126 return data
2126 2127 for test in tests:
2127 2128 pread(bisectcmd + ['--reset']),
2128 2129 pread(bisectcmd + ['--bad', '.'])
2129 2130 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2130 2131 # TODO: we probably need to forward more options
2131 2132 # that alter hg's behavior inside the tests.
2132 2133 opts = ''
2133 2134 withhg = self._runner.options.with_hg
2134 2135 if withhg:
2135 2136 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2136 2137 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2137 2138 test)
2138 2139 data = pread(bisectcmd + ['--command', rtc])
2139 2140 m = re.search(
2140 2141 (br'\nThe first (?P<goodbad>bad|good) revision '
2141 2142 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2142 2143 br'summary: +(?P<summary>[^\n]+)\n'),
2143 2144 data, (re.MULTILINE | re.DOTALL))
2144 2145 if m is None:
2145 2146 self.stream.writeln(
2146 2147 'Failed to identify failure point for %s' % test)
2147 2148 continue
2148 2149 dat = m.groupdict()
2149 2150 verb = 'broken' if dat['goodbad'] == 'bad' else 'fixed'
2150 2151 self.stream.writeln(
2151 2152 '%s %s by %s (%s)' % (
2152 2153 test, verb, dat['node'], dat['summary']))
2153 2154
2154 2155 def printtimes(self, times):
2155 2156 # iolock held by run
2156 2157 self.stream.writeln('# Producing time report')
2157 2158 times.sort(key=lambda t: (t[3]))
2158 2159 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2159 2160 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2160 2161 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2161 2162 for tdata in times:
2162 2163 test = tdata[0]
2163 2164 cuser, csys, real, start, end = tdata[1:6]
2164 2165 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2165 2166
2166 2167 @staticmethod
2167 2168 def _writexunit(result, outf):
2168 2169 # See http://llg.cubic.org/docs/junit/ for a reference.
2169 2170 timesd = dict((t[0], t[3]) for t in result.times)
2170 2171 doc = minidom.Document()
2171 2172 s = doc.createElement('testsuite')
2172 2173 s.setAttribute('name', 'run-tests')
2173 2174 s.setAttribute('tests', str(result.testsRun))
2174 2175 s.setAttribute('errors', "0") # TODO
2175 2176 s.setAttribute('failures', str(len(result.failures)))
2176 2177 s.setAttribute('skipped', str(len(result.skipped) +
2177 2178 len(result.ignored)))
2178 2179 doc.appendChild(s)
2179 2180 for tc in result.successes:
2180 2181 t = doc.createElement('testcase')
2181 2182 t.setAttribute('name', tc.name)
2182 2183 tctime = timesd.get(tc.name)
2183 2184 if tctime is not None:
2184 2185 t.setAttribute('time', '%.3f' % tctime)
2185 2186 s.appendChild(t)
2186 2187 for tc, err in sorted(result.faildata.items()):
2187 2188 t = doc.createElement('testcase')
2188 2189 t.setAttribute('name', tc)
2189 2190 tctime = timesd.get(tc)
2190 2191 if tctime is not None:
2191 2192 t.setAttribute('time', '%.3f' % tctime)
2192 2193 # createCDATASection expects a unicode or it will
2193 2194 # convert using default conversion rules, which will
2194 2195 # fail if string isn't ASCII.
2195 2196 err = cdatasafe(err).decode('utf-8', 'replace')
2196 2197 cd = doc.createCDATASection(err)
2197 2198 # Use 'failure' here instead of 'error' to match errors = 0,
2198 2199 # failures = len(result.failures) in the testsuite element.
2199 2200 failelem = doc.createElement('failure')
2200 2201 failelem.setAttribute('message', 'output changed')
2201 2202 failelem.setAttribute('type', 'output-mismatch')
2202 2203 failelem.appendChild(cd)
2203 2204 t.appendChild(failelem)
2204 2205 s.appendChild(t)
2205 2206 for tc, message in result.skipped:
2206 2207 # According to the schema, 'skipped' has no attributes. So store
2207 2208 # the skip message as a text node instead.
2208 2209 t = doc.createElement('testcase')
2209 2210 t.setAttribute('name', tc.name)
2210 2211 binmessage = message.encode('utf-8')
2211 2212 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2212 2213 cd = doc.createCDATASection(message)
2213 2214 skipelem = doc.createElement('skipped')
2214 2215 skipelem.appendChild(cd)
2215 2216 t.appendChild(skipelem)
2216 2217 s.appendChild(t)
2217 2218 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2218 2219
2219 2220 @staticmethod
2220 2221 def _writejson(result, outf):
2221 2222 timesd = {}
2222 2223 for tdata in result.times:
2223 2224 test = tdata[0]
2224 2225 timesd[test] = tdata[1:]
2225 2226
2226 2227 outcome = {}
2227 2228 groups = [('success', ((tc, None)
2228 2229 for tc in result.successes)),
2229 2230 ('failure', result.failures),
2230 2231 ('skip', result.skipped)]
2231 2232 for res, testcases in groups:
2232 2233 for tc, __ in testcases:
2233 2234 if tc.name in timesd:
2234 2235 diff = result.faildata.get(tc.name, b'')
2235 2236 try:
2236 2237 diff = diff.decode('unicode_escape')
2237 2238 except UnicodeDecodeError as e:
2238 2239 diff = '%r decoding diff, sorry' % e
2239 2240 tres = {'result': res,
2240 2241 'time': ('%0.3f' % timesd[tc.name][2]),
2241 2242 'cuser': ('%0.3f' % timesd[tc.name][0]),
2242 2243 'csys': ('%0.3f' % timesd[tc.name][1]),
2243 2244 'start': ('%0.3f' % timesd[tc.name][3]),
2244 2245 'end': ('%0.3f' % timesd[tc.name][4]),
2245 2246 'diff': diff,
2246 2247 }
2247 2248 else:
2248 2249 # blacklisted test
2249 2250 tres = {'result': res}
2250 2251
2251 2252 outcome[tc.name] = tres
2252 2253 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2253 2254 separators=(',', ': '))
2254 2255 outf.writelines(("testreport =", jsonout))
2255 2256
2256 2257 class TestRunner(object):
2257 2258 """Holds context for executing tests.
2258 2259
2259 2260 Tests rely on a lot of state. This object holds it for them.
2260 2261 """
2261 2262
2262 2263 # Programs required to run tests.
2263 2264 REQUIREDTOOLS = [
2264 2265 b'diff',
2265 2266 b'grep',
2266 2267 b'unzip',
2267 2268 b'gunzip',
2268 2269 b'bunzip2',
2269 2270 b'sed',
2270 2271 ]
2271 2272
2272 2273 # Maps file extensions to test class.
2273 2274 TESTTYPES = [
2274 2275 (b'.py', PythonTest),
2275 2276 (b'.t', TTest),
2276 2277 ]
2277 2278
2278 2279 def __init__(self):
2279 2280 self.options = None
2280 2281 self._hgroot = None
2281 2282 self._testdir = None
2282 2283 self._outputdir = None
2283 2284 self._hgtmp = None
2284 2285 self._installdir = None
2285 2286 self._bindir = None
2286 2287 self._tmpbinddir = None
2287 2288 self._pythondir = None
2288 2289 self._coveragefile = None
2289 2290 self._createdfiles = []
2290 2291 self._hgcommand = None
2291 2292 self._hgpath = None
2292 2293 self._portoffset = 0
2293 2294 self._ports = {}
2294 2295
2295 2296 def run(self, args, parser=None):
2296 2297 """Run the test suite."""
2297 2298 oldmask = os.umask(0o22)
2298 2299 try:
2299 2300 parser = parser or getparser()
2300 2301 options, args = parseargs(args, parser)
2301 2302 # positional arguments are paths to test files to run, so
2302 2303 # we make sure they're all bytestrings
2303 2304 args = [_bytespath(a) for a in args]
2304 2305 if options.test_list is not None:
2305 2306 for listfile in options.test_list:
2306 2307 with open(listfile, 'rb') as f:
2307 2308 args.extend(t for t in f.read().splitlines() if t)
2308 2309 self.options = options
2309 2310
2310 2311 self._checktools()
2311 2312 testdescs = self.findtests(args)
2312 2313 if options.profile_runner:
2313 2314 import statprof
2314 2315 statprof.start()
2315 2316 result = self._run(testdescs)
2316 2317 if options.profile_runner:
2317 2318 statprof.stop()
2318 2319 statprof.display()
2319 2320 return result
2320 2321
2321 2322 finally:
2322 2323 os.umask(oldmask)
2323 2324
2324 2325 def _run(self, testdescs):
2325 2326 if self.options.random:
2326 2327 random.shuffle(testdescs)
2327 2328 else:
2328 2329 # keywords for slow tests
2329 2330 slow = {b'svn': 10,
2330 2331 b'cvs': 10,
2331 2332 b'hghave': 10,
2332 2333 b'largefiles-update': 10,
2333 2334 b'run-tests': 10,
2334 2335 b'corruption': 10,
2335 2336 b'race': 10,
2336 2337 b'i18n': 10,
2337 2338 b'check': 100,
2338 2339 b'gendoc': 100,
2339 2340 b'contrib-perf': 200,
2340 2341 }
2341 2342 perf = {}
2342 2343 def sortkey(f):
2343 2344 # run largest tests first, as they tend to take the longest
2344 2345 f = f['path']
2345 2346 try:
2346 2347 return perf[f]
2347 2348 except KeyError:
2348 2349 try:
2349 2350 val = -os.stat(f).st_size
2350 2351 except OSError as e:
2351 2352 if e.errno != errno.ENOENT:
2352 2353 raise
2353 2354 perf[f] = -1e9 # file does not exist, tell early
2354 2355 return -1e9
2355 2356 for kw, mul in slow.items():
2356 2357 if kw in f:
2357 2358 val *= mul
2358 2359 if f.endswith(b'.py'):
2359 2360 val /= 10.0
2360 2361 perf[f] = val / 1000.0
2361 2362 return perf[f]
2362 2363 testdescs.sort(key=sortkey)
2363 2364
2364 2365 self._testdir = osenvironb[b'TESTDIR'] = getattr(
2365 2366 os, 'getcwdb', os.getcwd)()
2366 2367 # assume all tests in same folder for now
2367 2368 if testdescs:
2368 2369 pathname = os.path.dirname(testdescs[0]['path'])
2369 2370 if pathname:
2370 2371 osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
2371 2372 pathname)
2372 2373 if self.options.outputdir:
2373 2374 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2374 2375 else:
2375 2376 self._outputdir = self._testdir
2376 2377 if testdescs and pathname:
2377 2378 self._outputdir = os.path.join(self._outputdir, pathname)
2378 2379
2379 2380 if 'PYTHONHASHSEED' not in os.environ:
2380 2381 # use a random python hash seed all the time
2381 2382 # we do the randomness ourself to know what seed is used
2382 2383 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2383 2384
2384 2385 if self.options.tmpdir:
2385 2386 self.options.keep_tmpdir = True
2386 2387 tmpdir = _bytespath(self.options.tmpdir)
2387 2388 if os.path.exists(tmpdir):
2388 2389 # Meaning of tmpdir has changed since 1.3: we used to create
2389 2390 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2390 2391 # tmpdir already exists.
2391 2392 print("error: temp dir %r already exists" % tmpdir)
2392 2393 return 1
2393 2394
2394 2395 # Automatically removing tmpdir sounds convenient, but could
2395 2396 # really annoy anyone in the habit of using "--tmpdir=/tmp"
2396 2397 # or "--tmpdir=$HOME".
2397 2398 #vlog("# Removing temp dir", tmpdir)
2398 2399 #shutil.rmtree(tmpdir)
2399 2400 os.makedirs(tmpdir)
2400 2401 else:
2401 2402 d = None
2402 2403 if os.name == 'nt':
2403 2404 # without this, we get the default temp dir location, but
2404 2405 # in all lowercase, which causes troubles with paths (issue3490)
2405 2406 d = osenvironb.get(b'TMP', None)
2406 2407 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2407 2408
2408 2409 self._hgtmp = osenvironb[b'HGTMP'] = (
2409 2410 os.path.realpath(tmpdir))
2410 2411
2411 2412 if self.options.with_hg:
2412 2413 self._installdir = None
2413 2414 whg = self.options.with_hg
2414 2415 self._bindir = os.path.dirname(os.path.realpath(whg))
2415 2416 assert isinstance(self._bindir, bytes)
2416 2417 self._hgcommand = os.path.basename(whg)
2417 2418 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2418 2419 os.makedirs(self._tmpbindir)
2419 2420
2420 2421 # This looks redundant with how Python initializes sys.path from
2421 2422 # the location of the script being executed. Needed because the
2422 2423 # "hg" specified by --with-hg is not the only Python script
2423 2424 # executed in the test suite that needs to import 'mercurial'
2424 2425 # ... which means it's not really redundant at all.
2425 2426 self._pythondir = self._bindir
2426 2427 else:
2427 2428 self._installdir = os.path.join(self._hgtmp, b"install")
2428 2429 self._bindir = os.path.join(self._installdir, b"bin")
2429 2430 self._hgcommand = b'hg'
2430 2431 self._tmpbindir = self._bindir
2431 2432 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2432 2433
2433 2434 # set CHGHG, then replace "hg" command by "chg"
2434 2435 chgbindir = self._bindir
2435 2436 if self.options.chg or self.options.with_chg:
2436 2437 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2437 2438 else:
2438 2439 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2439 2440 if self.options.chg:
2440 2441 self._hgcommand = b'chg'
2441 2442 elif self.options.with_chg:
2442 2443 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2443 2444 self._hgcommand = os.path.basename(self.options.with_chg)
2444 2445
2445 2446 osenvironb[b"BINDIR"] = self._bindir
2446 2447 osenvironb[b"PYTHON"] = PYTHON
2447 2448
2448 2449 if self.options.with_python3:
2449 2450 osenvironb[b'PYTHON3'] = self.options.with_python3
2450 2451
2451 2452 fileb = _bytespath(__file__)
2452 2453 runtestdir = os.path.abspath(os.path.dirname(fileb))
2453 2454 osenvironb[b'RUNTESTDIR'] = runtestdir
2454 2455 if PYTHON3:
2455 2456 sepb = _bytespath(os.pathsep)
2456 2457 else:
2457 2458 sepb = os.pathsep
2458 2459 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2459 2460 if os.path.islink(__file__):
2460 2461 # test helper will likely be at the end of the symlink
2461 2462 realfile = os.path.realpath(fileb)
2462 2463 realdir = os.path.abspath(os.path.dirname(realfile))
2463 2464 path.insert(2, realdir)
2464 2465 if chgbindir != self._bindir:
2465 2466 path.insert(1, chgbindir)
2466 2467 if self._testdir != runtestdir:
2467 2468 path = [self._testdir] + path
2468 2469 if self._tmpbindir != self._bindir:
2469 2470 path = [self._tmpbindir] + path
2470 2471 osenvironb[b"PATH"] = sepb.join(path)
2471 2472
2472 2473 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2473 2474 # can run .../tests/run-tests.py test-foo where test-foo
2474 2475 # adds an extension to HGRC. Also include run-test.py directory to
2475 2476 # import modules like heredoctest.
2476 2477 pypath = [self._pythondir, self._testdir, runtestdir]
2477 2478 # We have to augment PYTHONPATH, rather than simply replacing
2478 2479 # it, in case external libraries are only available via current
2479 2480 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2480 2481 # are in /opt/subversion.)
2481 2482 oldpypath = osenvironb.get(IMPL_PATH)
2482 2483 if oldpypath:
2483 2484 pypath.append(oldpypath)
2484 2485 osenvironb[IMPL_PATH] = sepb.join(pypath)
2485 2486
2486 2487 if self.options.pure:
2487 2488 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2488 2489 os.environ["HGMODULEPOLICY"] = "py"
2489 2490
2490 2491 if self.options.allow_slow_tests:
2491 2492 os.environ["HGTEST_SLOW"] = "slow"
2492 2493 elif 'HGTEST_SLOW' in os.environ:
2493 2494 del os.environ['HGTEST_SLOW']
2494 2495
2495 2496 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2496 2497
2497 2498 vlog("# Using TESTDIR", self._testdir)
2498 2499 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2499 2500 vlog("# Using HGTMP", self._hgtmp)
2500 2501 vlog("# Using PATH", os.environ["PATH"])
2501 2502 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2502 2503 vlog("# Writing to directory", self._outputdir)
2503 2504
2504 2505 try:
2505 2506 return self._runtests(testdescs) or 0
2506 2507 finally:
2507 2508 time.sleep(.1)
2508 2509 self._cleanup()
2509 2510
2510 2511 def findtests(self, args):
2511 2512 """Finds possible test files from arguments.
2512 2513
2513 2514 If you wish to inject custom tests into the test harness, this would
2514 2515 be a good function to monkeypatch or override in a derived class.
2515 2516 """
2516 2517 if not args:
2517 2518 if self.options.changed:
2518 2519 proc = Popen4('hg st --rev "%s" -man0 .' %
2519 2520 self.options.changed, None, 0)
2520 2521 stdout, stderr = proc.communicate()
2521 2522 args = stdout.strip(b'\0').split(b'\0')
2522 2523 else:
2523 2524 args = os.listdir(b'.')
2524 2525
2525 2526 expanded_args = []
2526 2527 for arg in args:
2527 2528 if os.path.isdir(arg):
2528 2529 if not arg.endswith(b'/'):
2529 2530 arg += b'/'
2530 2531 expanded_args.extend([arg + a for a in os.listdir(arg)])
2531 2532 else:
2532 2533 expanded_args.append(arg)
2533 2534 args = expanded_args
2534 2535
2535 2536 tests = []
2536 2537 for t in args:
2537 2538 if not (os.path.basename(t).startswith(b'test-')
2538 2539 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2539 2540 continue
2540 2541 if t.endswith(b'.t'):
2541 2542 # .t file may contain multiple test cases
2542 2543 cases = sorted(parsettestcases(t))
2543 2544 if cases:
2544 2545 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2545 2546 else:
2546 2547 tests.append({'path': t})
2547 2548 else:
2548 2549 tests.append({'path': t})
2549 2550 return tests
2550 2551
2551 2552 def _runtests(self, testdescs):
2552 2553 def _reloadtest(test, i):
2553 2554 # convert a test back to its description dict
2554 2555 desc = {'path': test.path}
2555 2556 case = getattr(test, '_case', None)
2556 2557 if case:
2557 2558 desc['case'] = case
2558 2559 return self._gettest(desc, i)
2559 2560
2560 2561 try:
2561 2562 if self.options.restart:
2562 2563 orig = list(testdescs)
2563 2564 while testdescs:
2564 2565 desc = testdescs[0]
2565 2566 # desc['path'] is a relative path
2566 2567 if 'case' in desc:
2567 2568 errpath = b'%s.%s.err' % (desc['path'], desc['case'])
2568 2569 else:
2569 2570 errpath = b'%s.err' % desc['path']
2570 2571 errpath = os.path.join(self._outputdir, errpath)
2571 2572 if os.path.exists(errpath):
2572 2573 break
2573 2574 testdescs.pop(0)
2574 2575 if not testdescs:
2575 2576 print("running all tests")
2576 2577 testdescs = orig
2577 2578
2578 2579 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2579 2580
2580 2581 failed = False
2581 2582 kws = self.options.keywords
2582 2583 if kws is not None and PYTHON3:
2583 2584 kws = kws.encode('utf-8')
2584 2585
2585 2586 suite = TestSuite(self._testdir,
2586 2587 jobs=self.options.jobs,
2587 2588 whitelist=self.options.whitelisted,
2588 2589 blacklist=self.options.blacklist,
2589 2590 retest=self.options.retest,
2590 2591 keywords=kws,
2591 2592 loop=self.options.loop,
2592 2593 runs_per_test=self.options.runs_per_test,
2593 2594 showchannels=self.options.showchannels,
2594 2595 tests=tests, loadtest=_reloadtest)
2595 2596 verbosity = 1
2596 2597 if self.options.verbose:
2597 2598 verbosity = 2
2598 2599 runner = TextTestRunner(self, verbosity=verbosity)
2599 2600
2600 2601 if self.options.list_tests:
2601 2602 result = runner.listtests(suite)
2602 2603 else:
2603 2604 if self._installdir:
2604 2605 self._installhg()
2605 2606 self._checkhglib("Testing")
2606 2607 else:
2607 2608 self._usecorrectpython()
2608 2609 if self.options.chg:
2609 2610 assert self._installdir
2610 2611 self._installchg()
2611 2612
2612 2613 result = runner.run(suite)
2613 2614
2614 2615 if result.failures:
2615 2616 failed = True
2616 2617
2617 2618 if self.options.anycoverage:
2618 2619 self._outputcoverage()
2619 2620 except KeyboardInterrupt:
2620 2621 failed = True
2621 2622 print("\ninterrupted!")
2622 2623
2623 2624 if failed:
2624 2625 return 1
2625 2626
2626 2627 def _getport(self, count):
2627 2628 port = self._ports.get(count) # do we have a cached entry?
2628 2629 if port is None:
2629 2630 portneeded = 3
2630 2631 # above 100 tries we just give up and let test reports failure
2631 2632 for tries in xrange(100):
2632 2633 allfree = True
2633 2634 port = self.options.port + self._portoffset
2634 2635 for idx in xrange(portneeded):
2635 2636 if not checkportisavailable(port + idx):
2636 2637 allfree = False
2637 2638 break
2638 2639 self._portoffset += portneeded
2639 2640 if allfree:
2640 2641 break
2641 2642 self._ports[count] = port
2642 2643 return port
2643 2644
2644 2645 def _gettest(self, testdesc, count):
2645 2646 """Obtain a Test by looking at its filename.
2646 2647
2647 2648 Returns a Test instance. The Test may not be runnable if it doesn't
2648 2649 map to a known type.
2649 2650 """
2650 2651 path = testdesc['path']
2651 2652 lctest = path.lower()
2652 2653 testcls = Test
2653 2654
2654 2655 for ext, cls in self.TESTTYPES:
2655 2656 if lctest.endswith(ext):
2656 2657 testcls = cls
2657 2658 break
2658 2659
2659 2660 refpath = os.path.join(self._testdir, path)
2660 2661 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2661 2662
2662 2663 # extra keyword parameters. 'case' is used by .t tests
2663 2664 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2664 2665
2665 2666 t = testcls(refpath, self._outputdir, tmpdir,
2666 2667 keeptmpdir=self.options.keep_tmpdir,
2667 2668 debug=self.options.debug,
2668 2669 timeout=self.options.timeout,
2669 2670 startport=self._getport(count),
2670 2671 extraconfigopts=self.options.extra_config_opt,
2671 2672 py3kwarnings=self.options.py3k_warnings,
2672 2673 shell=self.options.shell,
2673 2674 hgcommand=self._hgcommand,
2674 2675 usechg=bool(self.options.with_chg or self.options.chg),
2675 2676 useipv6=useipv6, **kwds)
2676 2677 t.should_reload = True
2677 2678 return t
2678 2679
2679 2680 def _cleanup(self):
2680 2681 """Clean up state from this test invocation."""
2681 2682 if self.options.keep_tmpdir:
2682 2683 return
2683 2684
2684 2685 vlog("# Cleaning up HGTMP", self._hgtmp)
2685 2686 shutil.rmtree(self._hgtmp, True)
2686 2687 for f in self._createdfiles:
2687 2688 try:
2688 2689 os.remove(f)
2689 2690 except OSError:
2690 2691 pass
2691 2692
2692 2693 def _usecorrectpython(self):
2693 2694 """Configure the environment to use the appropriate Python in tests."""
2694 2695 # Tests must use the same interpreter as us or bad things will happen.
2695 2696 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2696 2697 if getattr(os, 'symlink', None):
2697 2698 vlog("# Making python executable in test path a symlink to '%s'" %
2698 2699 sys.executable)
2699 2700 mypython = os.path.join(self._tmpbindir, pyexename)
2700 2701 try:
2701 2702 if os.readlink(mypython) == sys.executable:
2702 2703 return
2703 2704 os.unlink(mypython)
2704 2705 except OSError as err:
2705 2706 if err.errno != errno.ENOENT:
2706 2707 raise
2707 2708 if self._findprogram(pyexename) != sys.executable:
2708 2709 try:
2709 2710 os.symlink(sys.executable, mypython)
2710 2711 self._createdfiles.append(mypython)
2711 2712 except OSError as err:
2712 2713 # child processes may race, which is harmless
2713 2714 if err.errno != errno.EEXIST:
2714 2715 raise
2715 2716 else:
2716 2717 exedir, exename = os.path.split(sys.executable)
2717 2718 vlog("# Modifying search path to find %s as %s in '%s'" %
2718 2719 (exename, pyexename, exedir))
2719 2720 path = os.environ['PATH'].split(os.pathsep)
2720 2721 while exedir in path:
2721 2722 path.remove(exedir)
2722 2723 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2723 2724 if not self._findprogram(pyexename):
2724 2725 print("WARNING: Cannot find %s in search path" % pyexename)
2725 2726
2726 2727 def _installhg(self):
2727 2728 """Install hg into the test environment.
2728 2729
2729 2730 This will also configure hg with the appropriate testing settings.
2730 2731 """
2731 2732 vlog("# Performing temporary installation of HG")
2732 2733 installerrs = os.path.join(self._hgtmp, b"install.err")
2733 2734 compiler = ''
2734 2735 if self.options.compiler:
2735 2736 compiler = '--compiler ' + self.options.compiler
2736 2737 if self.options.pure:
2737 2738 pure = b"--pure"
2738 2739 else:
2739 2740 pure = b""
2740 2741
2741 2742 # Run installer in hg root
2742 2743 script = os.path.realpath(sys.argv[0])
2743 2744 exe = sys.executable
2744 2745 if PYTHON3:
2745 2746 compiler = _bytespath(compiler)
2746 2747 script = _bytespath(script)
2747 2748 exe = _bytespath(exe)
2748 2749 hgroot = os.path.dirname(os.path.dirname(script))
2749 2750 self._hgroot = hgroot
2750 2751 os.chdir(hgroot)
2751 2752 nohome = b'--home=""'
2752 2753 if os.name == 'nt':
2753 2754 # The --home="" trick works only on OS where os.sep == '/'
2754 2755 # because of a distutils convert_path() fast-path. Avoid it at
2755 2756 # least on Windows for now, deal with .pydistutils.cfg bugs
2756 2757 # when they happen.
2757 2758 nohome = b''
2758 2759 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2759 2760 b' build %(compiler)s --build-base="%(base)s"'
2760 2761 b' install --force --prefix="%(prefix)s"'
2761 2762 b' --install-lib="%(libdir)s"'
2762 2763 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2763 2764 % {b'exe': exe, b'pure': pure,
2764 2765 b'compiler': compiler,
2765 2766 b'base': os.path.join(self._hgtmp, b"build"),
2766 2767 b'prefix': self._installdir, b'libdir': self._pythondir,
2767 2768 b'bindir': self._bindir,
2768 2769 b'nohome': nohome, b'logfile': installerrs})
2769 2770
2770 2771 # setuptools requires install directories to exist.
2771 2772 def makedirs(p):
2772 2773 try:
2773 2774 os.makedirs(p)
2774 2775 except OSError as e:
2775 2776 if e.errno != errno.EEXIST:
2776 2777 raise
2777 2778 makedirs(self._pythondir)
2778 2779 makedirs(self._bindir)
2779 2780
2780 2781 vlog("# Running", cmd)
2781 2782 if os.system(cmd) == 0:
2782 2783 if not self.options.verbose:
2783 2784 try:
2784 2785 os.remove(installerrs)
2785 2786 except OSError as e:
2786 2787 if e.errno != errno.ENOENT:
2787 2788 raise
2788 2789 else:
2789 2790 f = open(installerrs, 'rb')
2790 2791 for line in f:
2791 2792 if PYTHON3:
2792 2793 sys.stdout.buffer.write(line)
2793 2794 else:
2794 2795 sys.stdout.write(line)
2795 2796 f.close()
2796 2797 sys.exit(1)
2797 2798 os.chdir(self._testdir)
2798 2799
2799 2800 self._usecorrectpython()
2800 2801
2801 2802 if self.options.py3k_warnings and not self.options.anycoverage:
2802 2803 vlog("# Updating hg command to enable Py3k Warnings switch")
2803 2804 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2804 2805 lines = [line.rstrip() for line in f]
2805 2806 lines[0] += ' -3'
2806 2807 f.close()
2807 2808 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2808 2809 for line in lines:
2809 2810 f.write(line + '\n')
2810 2811 f.close()
2811 2812
2812 2813 hgbat = os.path.join(self._bindir, b'hg.bat')
2813 2814 if os.path.isfile(hgbat):
2814 2815 # hg.bat expects to be put in bin/scripts while run-tests.py
2815 2816 # installation layout put it in bin/ directly. Fix it
2816 2817 f = open(hgbat, 'rb')
2817 2818 data = f.read()
2818 2819 f.close()
2819 2820 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2820 2821 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2821 2822 b'"%~dp0python" "%~dp0hg" %*')
2822 2823 f = open(hgbat, 'wb')
2823 2824 f.write(data)
2824 2825 f.close()
2825 2826 else:
2826 2827 print('WARNING: cannot fix hg.bat reference to python.exe')
2827 2828
2828 2829 if self.options.anycoverage:
2829 2830 custom = os.path.join(self._testdir, 'sitecustomize.py')
2830 2831 target = os.path.join(self._pythondir, 'sitecustomize.py')
2831 2832 vlog('# Installing coverage trigger to %s' % target)
2832 2833 shutil.copyfile(custom, target)
2833 2834 rc = os.path.join(self._testdir, '.coveragerc')
2834 2835 vlog('# Installing coverage rc to %s' % rc)
2835 2836 os.environ['COVERAGE_PROCESS_START'] = rc
2836 2837 covdir = os.path.join(self._installdir, '..', 'coverage')
2837 2838 try:
2838 2839 os.mkdir(covdir)
2839 2840 except OSError as e:
2840 2841 if e.errno != errno.EEXIST:
2841 2842 raise
2842 2843
2843 2844 os.environ['COVERAGE_DIR'] = covdir
2844 2845
2845 2846 def _checkhglib(self, verb):
2846 2847 """Ensure that the 'mercurial' package imported by python is
2847 2848 the one we expect it to be. If not, print a warning to stderr."""
2848 2849 if ((self._bindir == self._pythondir) and
2849 2850 (self._bindir != self._tmpbindir)):
2850 2851 # The pythondir has been inferred from --with-hg flag.
2851 2852 # We cannot expect anything sensible here.
2852 2853 return
2853 2854 expecthg = os.path.join(self._pythondir, b'mercurial')
2854 2855 actualhg = self._gethgpath()
2855 2856 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2856 2857 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2857 2858 ' (expected %s)\n'
2858 2859 % (verb, actualhg, expecthg))
2859 2860 def _gethgpath(self):
2860 2861 """Return the path to the mercurial package that is actually found by
2861 2862 the current Python interpreter."""
2862 2863 if self._hgpath is not None:
2863 2864 return self._hgpath
2864 2865
2865 2866 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2866 2867 cmd = cmd % PYTHON
2867 2868 if PYTHON3:
2868 2869 cmd = _strpath(cmd)
2869 2870 pipe = os.popen(cmd)
2870 2871 try:
2871 2872 self._hgpath = _bytespath(pipe.read().strip())
2872 2873 finally:
2873 2874 pipe.close()
2874 2875
2875 2876 return self._hgpath
2876 2877
2877 2878 def _installchg(self):
2878 2879 """Install chg into the test environment"""
2879 2880 vlog('# Performing temporary installation of CHG')
2880 2881 assert os.path.dirname(self._bindir) == self._installdir
2881 2882 assert self._hgroot, 'must be called after _installhg()'
2882 2883 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
2883 2884 % {b'make': 'make', # TODO: switch by option or environment?
2884 2885 b'prefix': self._installdir})
2885 2886 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
2886 2887 vlog("# Running", cmd)
2887 2888 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
2888 2889 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
2889 2890 stderr=subprocess.STDOUT)
2890 2891 out, _err = proc.communicate()
2891 2892 if proc.returncode != 0:
2892 2893 if PYTHON3:
2893 2894 sys.stdout.buffer.write(out)
2894 2895 else:
2895 2896 sys.stdout.write(out)
2896 2897 sys.exit(1)
2897 2898
2898 2899 def _outputcoverage(self):
2899 2900 """Produce code coverage output."""
2900 2901 import coverage
2901 2902 coverage = coverage.coverage
2902 2903
2903 2904 vlog('# Producing coverage report')
2904 2905 # chdir is the easiest way to get short, relative paths in the
2905 2906 # output.
2906 2907 os.chdir(self._hgroot)
2907 2908 covdir = os.path.join(self._installdir, '..', 'coverage')
2908 2909 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2909 2910
2910 2911 # Map install directory paths back to source directory.
2911 2912 cov.config.paths['srcdir'] = ['.', self._pythondir]
2912 2913
2913 2914 cov.combine()
2914 2915
2915 2916 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2916 2917 cov.report(ignore_errors=True, omit=omit)
2917 2918
2918 2919 if self.options.htmlcov:
2919 2920 htmldir = os.path.join(self._outputdir, 'htmlcov')
2920 2921 cov.html_report(directory=htmldir, omit=omit)
2921 2922 if self.options.annotate:
2922 2923 adir = os.path.join(self._outputdir, 'annotated')
2923 2924 if not os.path.isdir(adir):
2924 2925 os.mkdir(adir)
2925 2926 cov.annotate(directory=adir, omit=omit)
2926 2927
2927 2928 def _findprogram(self, program):
2928 2929 """Search PATH for a executable program"""
2929 2930 dpb = _bytespath(os.defpath)
2930 2931 sepb = _bytespath(os.pathsep)
2931 2932 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2932 2933 name = os.path.join(p, program)
2933 2934 if os.name == 'nt' or os.access(name, os.X_OK):
2934 2935 return name
2935 2936 return None
2936 2937
2937 2938 def _checktools(self):
2938 2939 """Ensure tools required to run tests are present."""
2939 2940 for p in self.REQUIREDTOOLS:
2940 2941 if os.name == 'nt' and not p.endswith('.exe'):
2941 2942 p += '.exe'
2942 2943 found = self._findprogram(p)
2943 2944 if found:
2944 2945 vlog("# Found prerequisite", p, "at", found)
2945 2946 else:
2946 2947 print("WARNING: Did not find prerequisite tool: %s " %
2947 2948 p.decode("utf-8"))
2948 2949
2949 2950 if __name__ == '__main__':
2950 2951 runner = TestRunner()
2951 2952
2952 2953 try:
2953 2954 import msvcrt
2954 2955 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2955 2956 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2956 2957 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2957 2958 except ImportError:
2958 2959 pass
2959 2960
2960 2961 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1563 +1,1570 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9
10 10 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
11 11
12 12 # Ran 0 tests, 0 skipped, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 18 > run-tests.py --with-hg=`which hg` "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27
28 28 # Ran 0 tests, 0 skipped, 0 failed.
29 29 $ rm hg
30 30 #endif
31 31
32 32 #if execbit
33 33 $ touch hg
34 34 $ run-tests.py --with-hg=./hg
35 35 Usage: run-tests.py [options] [tests]
36 36
37 37 run-tests.py: error: --with-hg must specify an executable hg script
38 38 [2]
39 39 $ rm hg
40 40 #endif
41 41
42 42 Features for testing optional lines
43 43 ===================================
44 44
45 45 $ cat > hghaveaddon.py <<EOF
46 46 > import hghave
47 47 > @hghave.check("custom", "custom hghave feature")
48 48 > def has_custom():
49 49 > return True
50 50 > @hghave.check("missing", "missing hghave feature")
51 51 > def has_missing():
52 52 > return False
53 53 > EOF
54 54
55 55 an empty test
56 56 =======================
57 57
58 58 $ touch test-empty.t
59 59 $ rt
60 60 .
61 61 # Ran 1 tests, 0 skipped, 0 failed.
62 62 $ rm test-empty.t
63 63
64 64 a succesful test
65 65 =======================
66 66
67 67 $ cat > test-success.t << EOF
68 68 > $ echo babar
69 69 > babar
70 70 > $ echo xyzzy
71 71 > dont_print (?)
72 72 > nothing[42]line (re) (?)
73 73 > never*happens (glob) (?)
74 74 > more_nothing (?)
75 75 > xyzzy
76 76 > nor this (?)
77 77 > $ printf 'abc\ndef\nxyz\n'
78 78 > 123 (?)
79 79 > abc
80 80 > def (?)
81 81 > 456 (?)
82 82 > xyz
83 83 > $ printf 'zyx\nwvu\ntsr\n'
84 84 > abc (?)
85 85 > zyx (custom !)
86 86 > wvu
87 87 > no_print (no-custom !)
88 88 > tsr (no-missing !)
89 89 > missing (missing !)
90 90 > EOF
91 91
92 92 $ rt
93 93 .
94 94 # Ran 1 tests, 0 skipped, 0 failed.
95 95
96 96 failing test
97 97 ==================
98 98
99 99 test churn with globs
100 100 $ cat > test-failure.t <<EOF
101 > $ echo "bar-baz"; echo "bar-bad"
101 > $ echo "bar-baz"; echo "bar-bad"; echo foo
102 102 > bar*bad (glob)
103 103 > bar*baz (glob)
104 > | fo (re)
104 105 > EOF
105 106 $ rt test-failure.t
106 107
107 108 --- $TESTTMP/test-failure.t
108 109 +++ $TESTTMP/test-failure.t.err
109 @@ -1,3 +1,3 @@
110 $ echo "bar-baz"; echo "bar-bad"
110 @@ -1,4 +1,4 @@
111 $ echo "bar-baz"; echo "bar-bad"; echo foo
111 112 + bar*baz (glob)
112 113 bar*bad (glob)
113 114 - bar*baz (glob)
115 - | fo (re)
116 + foo
114 117
115 118 ERROR: test-failure.t output changed
116 119 !
117 120 Failed test-failure.t: output changed
118 121 # Ran 1 tests, 0 skipped, 1 failed.
119 122 python hash seed: * (glob)
120 123 [1]
121 124
122 125 test diff colorisation
123 126
124 127 #if no-windows pygments
125 128 $ rt test-failure.t --color always
126 129
127 130 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
128 131 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
129 \x1b[38;5;90;01m@@ -1,3 +1,3 @@\x1b[39;00m (esc)
130 $ echo "bar-baz"; echo "bar-bad"
132 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
133 $ echo "bar-baz"; echo "bar-bad"; echo foo
131 134 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
132 135 bar*bad (glob)
133 136 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
137 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
138 \x1b[38;5;34m+ foo\x1b[39m (esc)
134 139
135 140 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
136 141 !
137 142 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
138 143 # Ran 1 tests, 0 skipped, 1 failed.
139 144 python hash seed: * (glob)
140 145 [1]
141 146
142 147 $ rt test-failure.t 2> tmp.log
143 148 [1]
144 149 $ cat tmp.log
145 150
146 151 --- $TESTTMP/test-failure.t
147 152 +++ $TESTTMP/test-failure.t.err
148 @@ -1,3 +1,3 @@
149 $ echo "bar-baz"; echo "bar-bad"
153 @@ -1,4 +1,4 @@
154 $ echo "bar-baz"; echo "bar-bad"; echo foo
150 155 + bar*baz (glob)
151 156 bar*bad (glob)
152 157 - bar*baz (glob)
158 - | fo (re)
159 + foo
153 160
154 161 ERROR: test-failure.t output changed
155 162 !
156 163 Failed test-failure.t: output changed
157 164 # Ran 1 tests, 0 skipped, 1 failed.
158 165 python hash seed: * (glob)
159 166 #endif
160 167
161 168 $ cat > test-failure.t << EOF
162 169 > $ true
163 170 > should go away (true !)
164 171 > $ true
165 172 > should stay (false !)
166 173 >
167 174 > Should remove first line, not second or third
168 175 > $ echo 'testing'
169 176 > baz*foo (glob) (true !)
170 177 > foobar*foo (glob) (false !)
171 178 > te*ting (glob) (true !)
172 179 >
173 180 > Should keep first two lines, remove third and last
174 181 > $ echo 'testing'
175 182 > test.ng (re) (true !)
176 183 > foo.ar (re) (false !)
177 184 > b.r (re) (true !)
178 185 > missing (?)
179 186 > awol (true !)
180 187 >
181 188 > The "missing" line should stay, even though awol is dropped
182 189 > $ echo 'testing'
183 190 > test.ng (re) (true !)
184 191 > foo.ar (?)
185 192 > awol
186 193 > missing (?)
187 194 > EOF
188 195 $ rt test-failure.t
189 196
190 197 --- $TESTTMP/test-failure.t
191 198 +++ $TESTTMP/test-failure.t.err
192 199 @@ -1,11 +1,9 @@
193 200 $ true
194 201 - should go away (true !)
195 202 $ true
196 203 should stay (false !)
197 204
198 205 Should remove first line, not second or third
199 206 $ echo 'testing'
200 207 - baz*foo (glob) (true !)
201 208 foobar*foo (glob) (false !)
202 209 te*ting (glob) (true !)
203 210
204 211 foo.ar (re) (false !)
205 212 missing (?)
206 213 @@ -13,13 +11,10 @@
207 214 $ echo 'testing'
208 215 test.ng (re) (true !)
209 216 foo.ar (re) (false !)
210 217 - b.r (re) (true !)
211 218 missing (?)
212 219 - awol (true !)
213 220
214 221 The "missing" line should stay, even though awol is dropped
215 222 $ echo 'testing'
216 223 test.ng (re) (true !)
217 224 foo.ar (?)
218 225 - awol
219 226 missing (?)
220 227
221 228 ERROR: test-failure.t output changed
222 229 !
223 230 Failed test-failure.t: output changed
224 231 # Ran 1 tests, 0 skipped, 1 failed.
225 232 python hash seed: * (glob)
226 233 [1]
227 234
228 235 basic failing test
229 236 $ cat > test-failure.t << EOF
230 237 > $ echo babar
231 238 > rataxes
232 239 > This is a noop statement so that
233 240 > this test is still more bytes than success.
234 241 > pad pad pad pad............................................................
235 242 > pad pad pad pad............................................................
236 243 > pad pad pad pad............................................................
237 244 > pad pad pad pad............................................................
238 245 > pad pad pad pad............................................................
239 246 > pad pad pad pad............................................................
240 247 > EOF
241 248
242 249 >>> fh = open('test-failure-unicode.t', 'wb')
243 250 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
244 251 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
245 252
246 253 $ rt
247 254
248 255 --- $TESTTMP/test-failure.t
249 256 +++ $TESTTMP/test-failure.t.err
250 257 @@ -1,5 +1,5 @@
251 258 $ echo babar
252 259 - rataxes
253 260 + babar
254 261 This is a noop statement so that
255 262 this test is still more bytes than success.
256 263 pad pad pad pad............................................................
257 264
258 265 ERROR: test-failure.t output changed
259 266 !.
260 267 --- $TESTTMP/test-failure-unicode.t
261 268 +++ $TESTTMP/test-failure-unicode.t.err
262 269 @@ -1,2 +1,2 @@
263 270 $ echo babar\xce\xb1 (esc)
264 271 - l\xce\xb5\xce\xb5t (esc)
265 272 + babar\xce\xb1 (esc)
266 273
267 274 ERROR: test-failure-unicode.t output changed
268 275 !
269 276 Failed test-failure.t: output changed
270 277 Failed test-failure-unicode.t: output changed
271 278 # Ran 3 tests, 0 skipped, 2 failed.
272 279 python hash seed: * (glob)
273 280 [1]
274 281
275 282 test --outputdir
276 283 $ mkdir output
277 284 $ rt --outputdir output
278 285
279 286 --- $TESTTMP/test-failure.t
280 287 +++ $TESTTMP/output/test-failure.t.err
281 288 @@ -1,5 +1,5 @@
282 289 $ echo babar
283 290 - rataxes
284 291 + babar
285 292 This is a noop statement so that
286 293 this test is still more bytes than success.
287 294 pad pad pad pad............................................................
288 295
289 296 ERROR: test-failure.t output changed
290 297 !.
291 298 --- $TESTTMP/test-failure-unicode.t
292 299 +++ $TESTTMP/output/test-failure-unicode.t.err
293 300 @@ -1,2 +1,2 @@
294 301 $ echo babar\xce\xb1 (esc)
295 302 - l\xce\xb5\xce\xb5t (esc)
296 303 + babar\xce\xb1 (esc)
297 304
298 305 ERROR: test-failure-unicode.t output changed
299 306 !
300 307 Failed test-failure.t: output changed
301 308 Failed test-failure-unicode.t: output changed
302 309 # Ran 3 tests, 0 skipped, 2 failed.
303 310 python hash seed: * (glob)
304 311 [1]
305 312 $ ls -a output
306 313 .
307 314 ..
308 315 .testtimes
309 316 test-failure-unicode.t.err
310 317 test-failure.t.err
311 318
312 319 test --xunit support
313 320 $ rt --xunit=xunit.xml
314 321
315 322 --- $TESTTMP/test-failure.t
316 323 +++ $TESTTMP/test-failure.t.err
317 324 @@ -1,5 +1,5 @@
318 325 $ echo babar
319 326 - rataxes
320 327 + babar
321 328 This is a noop statement so that
322 329 this test is still more bytes than success.
323 330 pad pad pad pad............................................................
324 331
325 332 ERROR: test-failure.t output changed
326 333 !.
327 334 --- $TESTTMP/test-failure-unicode.t
328 335 +++ $TESTTMP/test-failure-unicode.t.err
329 336 @@ -1,2 +1,2 @@
330 337 $ echo babar\xce\xb1 (esc)
331 338 - l\xce\xb5\xce\xb5t (esc)
332 339 + babar\xce\xb1 (esc)
333 340
334 341 ERROR: test-failure-unicode.t output changed
335 342 !
336 343 Failed test-failure.t: output changed
337 344 Failed test-failure-unicode.t: output changed
338 345 # Ran 3 tests, 0 skipped, 2 failed.
339 346 python hash seed: * (glob)
340 347 [1]
341 348 $ cat xunit.xml
342 349 <?xml version="1.0" encoding="utf-8"?>
343 350 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
344 351 <testcase name="test-success.t" time="*"/> (glob)
345 352 <testcase name="test-failure-unicode.t" time="*"> (glob)
346 353 <failure message="output changed" type="output-mismatch">
347 354 <![CDATA[--- $TESTTMP/test-failure-unicode.t
348 355 +++ $TESTTMP/test-failure-unicode.t.err
349 356 @@ -1,2 +1,2 @@
350 357 $ echo babar\xce\xb1 (esc)
351 358 - l\xce\xb5\xce\xb5t (esc)
352 359 + babar\xce\xb1 (esc)
353 360 ]]> </failure>
354 361 </testcase>
355 362 <testcase name="test-failure.t" time="*"> (glob)
356 363 <failure message="output changed" type="output-mismatch">
357 364 <![CDATA[--- $TESTTMP/test-failure.t
358 365 +++ $TESTTMP/test-failure.t.err
359 366 @@ -1,5 +1,5 @@
360 367 $ echo babar
361 368 - rataxes
362 369 + babar
363 370 This is a noop statement so that
364 371 this test is still more bytes than success.
365 372 pad pad pad pad............................................................
366 373 ]]> </failure>
367 374 </testcase>
368 375 </testsuite>
369 376
370 377 $ cat .testtimes
371 378 test-failure-unicode.t * (glob)
372 379 test-failure.t * (glob)
373 380 test-success.t * (glob)
374 381
375 382 $ rt --list-tests
376 383 test-failure-unicode.t
377 384 test-failure.t
378 385 test-success.t
379 386
380 387 $ rt --list-tests --json
381 388 test-failure-unicode.t
382 389 test-failure.t
383 390 test-success.t
384 391 $ cat report.json
385 392 testreport ={
386 393 "test-failure-unicode.t": {
387 394 "result": "success"
388 395 },
389 396 "test-failure.t": {
390 397 "result": "success"
391 398 },
392 399 "test-success.t": {
393 400 "result": "success"
394 401 }
395 402 } (no-eol)
396 403
397 404 $ rt --list-tests --xunit=xunit.xml
398 405 test-failure-unicode.t
399 406 test-failure.t
400 407 test-success.t
401 408 $ cat xunit.xml
402 409 <?xml version="1.0" encoding="utf-8"?>
403 410 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
404 411 <testcase name="test-failure-unicode.t"/>
405 412 <testcase name="test-failure.t"/>
406 413 <testcase name="test-success.t"/>
407 414 </testsuite>
408 415
409 416 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
410 417 test-failure-unicode.t
411 418 test-failure.t
412 419 $ cat output/report.json
413 420 testreport ={
414 421 "test-failure-unicode.t": {
415 422 "result": "success"
416 423 },
417 424 "test-failure.t": {
418 425 "result": "success"
419 426 }
420 427 } (no-eol)
421 428 $ cat xunit.xml
422 429 <?xml version="1.0" encoding="utf-8"?>
423 430 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
424 431 <testcase name="test-failure-unicode.t"/>
425 432 <testcase name="test-failure.t"/>
426 433 </testsuite>
427 434
428 435 $ rm test-failure-unicode.t
429 436
430 437 test for --retest
431 438 ====================
432 439
433 440 $ rt --retest
434 441
435 442 --- $TESTTMP/test-failure.t
436 443 +++ $TESTTMP/test-failure.t.err
437 444 @@ -1,5 +1,5 @@
438 445 $ echo babar
439 446 - rataxes
440 447 + babar
441 448 This is a noop statement so that
442 449 this test is still more bytes than success.
443 450 pad pad pad pad............................................................
444 451
445 452 ERROR: test-failure.t output changed
446 453 !
447 454 Failed test-failure.t: output changed
448 455 # Ran 2 tests, 1 skipped, 1 failed.
449 456 python hash seed: * (glob)
450 457 [1]
451 458
452 459 --retest works with --outputdir
453 460 $ rm -r output
454 461 $ mkdir output
455 462 $ mv test-failure.t.err output
456 463 $ rt --retest --outputdir output
457 464
458 465 --- $TESTTMP/test-failure.t
459 466 +++ $TESTTMP/output/test-failure.t.err
460 467 @@ -1,5 +1,5 @@
461 468 $ echo babar
462 469 - rataxes
463 470 + babar
464 471 This is a noop statement so that
465 472 this test is still more bytes than success.
466 473 pad pad pad pad............................................................
467 474
468 475 ERROR: test-failure.t output changed
469 476 !
470 477 Failed test-failure.t: output changed
471 478 # Ran 2 tests, 1 skipped, 1 failed.
472 479 python hash seed: * (glob)
473 480 [1]
474 481
475 482 Selecting Tests To Run
476 483 ======================
477 484
478 485 successful
479 486
480 487 $ rt test-success.t
481 488 .
482 489 # Ran 1 tests, 0 skipped, 0 failed.
483 490
484 491 success w/ keyword
485 492 $ rt -k xyzzy
486 493 .
487 494 # Ran 2 tests, 1 skipped, 0 failed.
488 495
489 496 failed
490 497
491 498 $ rt test-failure.t
492 499
493 500 --- $TESTTMP/test-failure.t
494 501 +++ $TESTTMP/test-failure.t.err
495 502 @@ -1,5 +1,5 @@
496 503 $ echo babar
497 504 - rataxes
498 505 + babar
499 506 This is a noop statement so that
500 507 this test is still more bytes than success.
501 508 pad pad pad pad............................................................
502 509
503 510 ERROR: test-failure.t output changed
504 511 !
505 512 Failed test-failure.t: output changed
506 513 # Ran 1 tests, 0 skipped, 1 failed.
507 514 python hash seed: * (glob)
508 515 [1]
509 516
510 517 failure w/ keyword
511 518 $ rt -k rataxes
512 519
513 520 --- $TESTTMP/test-failure.t
514 521 +++ $TESTTMP/test-failure.t.err
515 522 @@ -1,5 +1,5 @@
516 523 $ echo babar
517 524 - rataxes
518 525 + babar
519 526 This is a noop statement so that
520 527 this test is still more bytes than success.
521 528 pad pad pad pad............................................................
522 529
523 530 ERROR: test-failure.t output changed
524 531 !
525 532 Failed test-failure.t: output changed
526 533 # Ran 2 tests, 1 skipped, 1 failed.
527 534 python hash seed: * (glob)
528 535 [1]
529 536
530 537 Verify that when a process fails to start we show a useful message
531 538 ==================================================================
532 539
533 540 $ cat > test-serve-fail.t <<EOF
534 541 > $ echo 'abort: child process failed to start blah'
535 542 > EOF
536 543 $ rt test-serve-fail.t
537 544
538 545 ERROR: test-serve-fail.t output changed
539 546 !
540 547 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
541 548 # Ran 1 tests, 0 skipped, 1 failed.
542 549 python hash seed: * (glob)
543 550 [1]
544 551 $ rm test-serve-fail.t
545 552
546 553 Verify that we can try other ports
547 554 ===================================
548 555 $ hg init inuse
549 556 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
550 557 $ cat blocks.pid >> $DAEMON_PIDS
551 558 $ cat > test-serve-inuse.t <<EOF
552 559 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
553 560 > $ cat hg.pid >> \$DAEMON_PIDS
554 561 > EOF
555 562 $ rt test-serve-inuse.t
556 563 .
557 564 # Ran 1 tests, 0 skipped, 0 failed.
558 565 $ rm test-serve-inuse.t
559 566 $ killdaemons.py $DAEMON_PIDS
560 567 $ rm $DAEMON_PIDS
561 568
562 569 Running In Debug Mode
563 570 ======================
564 571
565 572 $ rt --debug 2>&1 | grep -v pwd
566 573 + echo *SALT* 0 0 (glob)
567 574 *SALT* 0 0 (glob)
568 575 + echo babar
569 576 babar
570 577 + echo *SALT* 10 0 (glob)
571 578 *SALT* 10 0 (glob)
572 579 *+ echo *SALT* 0 0 (glob)
573 580 *SALT* 0 0 (glob)
574 581 + echo babar
575 582 babar
576 583 + echo *SALT* 2 0 (glob)
577 584 *SALT* 2 0 (glob)
578 585 + echo xyzzy
579 586 xyzzy
580 587 + echo *SALT* 9 0 (glob)
581 588 *SALT* 9 0 (glob)
582 589 + printf *abc\ndef\nxyz\n* (glob)
583 590 abc
584 591 def
585 592 xyz
586 593 + echo *SALT* 15 0 (glob)
587 594 *SALT* 15 0 (glob)
588 595 + printf *zyx\nwvu\ntsr\n* (glob)
589 596 zyx
590 597 wvu
591 598 tsr
592 599 + echo *SALT* 22 0 (glob)
593 600 *SALT* 22 0 (glob)
594 601 .
595 602 # Ran 2 tests, 0 skipped, 0 failed.
596 603
597 604 Parallel runs
598 605 ==============
599 606
600 607 (duplicate the failing test to get predictable output)
601 608 $ cp test-failure.t test-failure-copy.t
602 609
603 610 $ rt --jobs 2 test-failure*.t -n
604 611 !!
605 612 Failed test-failure*.t: output changed (glob)
606 613 Failed test-failure*.t: output changed (glob)
607 614 # Ran 2 tests, 0 skipped, 2 failed.
608 615 python hash seed: * (glob)
609 616 [1]
610 617
611 618 failures in parallel with --first should only print one failure
612 619 $ rt --jobs 2 --first test-failure*.t
613 620
614 621 --- $TESTTMP/test-failure*.t (glob)
615 622 +++ $TESTTMP/test-failure*.t.err (glob)
616 623 @@ -1,5 +1,5 @@
617 624 $ echo babar
618 625 - rataxes
619 626 + babar
620 627 This is a noop statement so that
621 628 this test is still more bytes than success.
622 629 pad pad pad pad............................................................
623 630
624 631 Failed test-failure*.t: output changed (glob)
625 632 Failed test-failure*.t: output changed (glob)
626 633 # Ran 2 tests, 0 skipped, 2 failed.
627 634 python hash seed: * (glob)
628 635 [1]
629 636
630 637
631 638 (delete the duplicated test file)
632 639 $ rm test-failure-copy.t
633 640
634 641
635 642 Interactive run
636 643 ===============
637 644
638 645 (backup the failing test)
639 646 $ cp test-failure.t backup
640 647
641 648 Refuse the fix
642 649
643 650 $ echo 'n' | rt -i
644 651
645 652 --- $TESTTMP/test-failure.t
646 653 +++ $TESTTMP/test-failure.t.err
647 654 @@ -1,5 +1,5 @@
648 655 $ echo babar
649 656 - rataxes
650 657 + babar
651 658 This is a noop statement so that
652 659 this test is still more bytes than success.
653 660 pad pad pad pad............................................................
654 661 Accept this change? [n]
655 662 ERROR: test-failure.t output changed
656 663 !.
657 664 Failed test-failure.t: output changed
658 665 # Ran 2 tests, 0 skipped, 1 failed.
659 666 python hash seed: * (glob)
660 667 [1]
661 668
662 669 $ cat test-failure.t
663 670 $ echo babar
664 671 rataxes
665 672 This is a noop statement so that
666 673 this test is still more bytes than success.
667 674 pad pad pad pad............................................................
668 675 pad pad pad pad............................................................
669 676 pad pad pad pad............................................................
670 677 pad pad pad pad............................................................
671 678 pad pad pad pad............................................................
672 679 pad pad pad pad............................................................
673 680
674 681 Interactive with custom view
675 682
676 683 $ echo 'n' | rt -i --view echo
677 684 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
678 685 Accept this change? [n]* (glob)
679 686 ERROR: test-failure.t output changed
680 687 !.
681 688 Failed test-failure.t: output changed
682 689 # Ran 2 tests, 0 skipped, 1 failed.
683 690 python hash seed: * (glob)
684 691 [1]
685 692
686 693 View the fix
687 694
688 695 $ echo 'y' | rt --view echo
689 696 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
690 697
691 698 ERROR: test-failure.t output changed
692 699 !.
693 700 Failed test-failure.t: output changed
694 701 # Ran 2 tests, 0 skipped, 1 failed.
695 702 python hash seed: * (glob)
696 703 [1]
697 704
698 705 Accept the fix
699 706
700 707 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
701 708 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
702 709 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
703 710 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
704 711 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
705 712 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
706 713 $ echo 'y' | rt -i 2>&1
707 714
708 715 --- $TESTTMP/test-failure.t
709 716 +++ $TESTTMP/test-failure.t.err
710 717 @@ -1,5 +1,5 @@
711 718 $ echo babar
712 719 - rataxes
713 720 + babar
714 721 This is a noop statement so that
715 722 this test is still more bytes than success.
716 723 pad pad pad pad............................................................
717 724 @@ -9,7 +9,7 @@
718 725 pad pad pad pad............................................................
719 726 pad pad pad pad............................................................
720 727 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
721 728 - saved backup bundle to $TESTTMP/foo.hg
722 729 + saved backup bundle to $TESTTMP/foo.hg* (glob)
723 730 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
724 731 saved backup bundle to $TESTTMP/foo.hg* (glob)
725 732 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
726 733 Accept this change? [n] ..
727 734 # Ran 2 tests, 0 skipped, 0 failed.
728 735
729 736 $ sed -e 's,(glob)$,&<,g' test-failure.t
730 737 $ echo babar
731 738 babar
732 739 This is a noop statement so that
733 740 this test is still more bytes than success.
734 741 pad pad pad pad............................................................
735 742 pad pad pad pad............................................................
736 743 pad pad pad pad............................................................
737 744 pad pad pad pad............................................................
738 745 pad pad pad pad............................................................
739 746 pad pad pad pad............................................................
740 747 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
741 748 saved backup bundle to $TESTTMP/foo.hg (glob)<
742 749 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
743 750 saved backup bundle to $TESTTMP/foo.hg (glob)<
744 751 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
745 752 saved backup bundle to $TESTTMP/*.hg (glob)<
746 753
747 754 Race condition - test file was modified when test is running
748 755
749 756 $ TESTRACEDIR=`pwd`
750 757 $ export TESTRACEDIR
751 758 $ cat > test-race.t <<EOF
752 759 > $ echo 1
753 760 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
754 761 > EOF
755 762
756 763 $ rt -i test-race.t
757 764
758 765 --- $TESTTMP/test-race.t
759 766 +++ $TESTTMP/test-race.t.err
760 767 @@ -1,2 +1,3 @@
761 768 $ echo 1
762 769 + 1
763 770 $ echo "# a new line" >> $TESTTMP/test-race.t
764 771 Reference output has changed (run again to prompt changes)
765 772 ERROR: test-race.t output changed
766 773 !
767 774 Failed test-race.t: output changed
768 775 # Ran 1 tests, 0 skipped, 1 failed.
769 776 python hash seed: * (glob)
770 777 [1]
771 778
772 779 $ rm test-race.t
773 780
774 781 When "#testcases" is used in .t files
775 782
776 783 $ cat >> test-cases.t <<EOF
777 784 > #testcases a b
778 785 > #if a
779 786 > $ echo 1
780 787 > #endif
781 788 > #if b
782 789 > $ echo 2
783 790 > #endif
784 791 > EOF
785 792
786 793 $ cat <<EOF | rt -i test-cases.t 2>&1
787 794 > y
788 795 > y
789 796 > EOF
790 797
791 798 --- $TESTTMP/test-cases.t
792 799 +++ $TESTTMP/test-cases.t.a.err
793 800 @@ -1,6 +1,7 @@
794 801 #testcases a b
795 802 #if a
796 803 $ echo 1
797 804 + 1
798 805 #endif
799 806 #if b
800 807 $ echo 2
801 808 Accept this change? [n] .
802 809 --- $TESTTMP/test-cases.t
803 810 +++ $TESTTMP/test-cases.t.b.err
804 811 @@ -5,4 +5,5 @@
805 812 #endif
806 813 #if b
807 814 $ echo 2
808 815 + 2
809 816 #endif
810 817 Accept this change? [n] .
811 818 # Ran 2 tests, 0 skipped, 0 failed.
812 819
813 820 $ cat test-cases.t
814 821 #testcases a b
815 822 #if a
816 823 $ echo 1
817 824 1
818 825 #endif
819 826 #if b
820 827 $ echo 2
821 828 2
822 829 #endif
823 830
824 831 $ cat >> test-cases.t <<'EOF'
825 832 > #if a
826 833 > $ NAME=A
827 834 > #else
828 835 > $ NAME=B
829 836 > #endif
830 837 > $ echo $NAME
831 838 > A (a !)
832 839 > B (b !)
833 840 > EOF
834 841 $ rt test-cases.t
835 842 ..
836 843 # Ran 2 tests, 0 skipped, 0 failed.
837 844
838 845 $ rm test-cases.t
839 846
840 847 (reinstall)
841 848 $ mv backup test-failure.t
842 849
843 850 No Diff
844 851 ===============
845 852
846 853 $ rt --nodiff
847 854 !.
848 855 Failed test-failure.t: output changed
849 856 # Ran 2 tests, 0 skipped, 1 failed.
850 857 python hash seed: * (glob)
851 858 [1]
852 859
853 860 test --tmpdir support
854 861 $ rt --tmpdir=$TESTTMP/keep test-success.t
855 862
856 863 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
857 864 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
858 865 .
859 866 # Ran 1 tests, 0 skipped, 0 failed.
860 867
861 868 timeouts
862 869 ========
863 870 $ cat > test-timeout.t <<EOF
864 871 > $ sleep 2
865 872 > $ echo pass
866 873 > pass
867 874 > EOF
868 875 > echo '#require slow' > test-slow-timeout.t
869 876 > cat test-timeout.t >> test-slow-timeout.t
870 877 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
871 878 st
872 879 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
873 880 Failed test-timeout.t: timed out
874 881 # Ran 1 tests, 1 skipped, 1 failed.
875 882 python hash seed: * (glob)
876 883 [1]
877 884 $ rt --timeout=1 --slowtimeout=3 \
878 885 > test-timeout.t test-slow-timeout.t --allow-slow-tests
879 886 .t
880 887 Failed test-timeout.t: timed out
881 888 # Ran 2 tests, 0 skipped, 1 failed.
882 889 python hash seed: * (glob)
883 890 [1]
884 891 $ rm test-timeout.t test-slow-timeout.t
885 892
886 893 test for --time
887 894 ==================
888 895
889 896 $ rt test-success.t --time
890 897 .
891 898 # Ran 1 tests, 0 skipped, 0 failed.
892 899 # Producing time report
893 900 start end cuser csys real Test
894 901 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
895 902
896 903 test for --time with --job enabled
897 904 ====================================
898 905
899 906 $ rt test-success.t --time --jobs 2
900 907 .
901 908 # Ran 1 tests, 0 skipped, 0 failed.
902 909 # Producing time report
903 910 start end cuser csys real Test
904 911 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
905 912
906 913 Skips
907 914 ================
908 915 $ cat > test-skip.t <<EOF
909 916 > $ echo xyzzy
910 917 > #require false
911 918 > EOF
912 919 $ rt --nodiff
913 920 !.s
914 921 Skipped test-skip.t: missing feature: nail clipper
915 922 Failed test-failure.t: output changed
916 923 # Ran 2 tests, 1 skipped, 1 failed.
917 924 python hash seed: * (glob)
918 925 [1]
919 926
920 927 $ rt --keyword xyzzy
921 928 .s
922 929 Skipped test-skip.t: missing feature: nail clipper
923 930 # Ran 2 tests, 2 skipped, 0 failed.
924 931
925 932 Skips with xml
926 933 $ rt --keyword xyzzy \
927 934 > --xunit=xunit.xml
928 935 .s
929 936 Skipped test-skip.t: missing feature: nail clipper
930 937 # Ran 2 tests, 2 skipped, 0 failed.
931 938 $ cat xunit.xml
932 939 <?xml version="1.0" encoding="utf-8"?>
933 940 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
934 941 <testcase name="test-success.t" time="*"/> (glob)
935 942 <testcase name="test-skip.t">
936 943 <skipped>
937 944 <![CDATA[missing feature: nail clipper]]> </skipped>
938 945 </testcase>
939 946 </testsuite>
940 947
941 948 Missing skips or blacklisted skips don't count as executed:
942 949 $ echo test-failure.t > blacklist
943 950 $ rt --blacklist=blacklist --json\
944 951 > test-failure.t test-bogus.t
945 952 ss
946 953 Skipped test-bogus.t: Doesn't exist
947 954 Skipped test-failure.t: blacklisted
948 955 # Ran 0 tests, 2 skipped, 0 failed.
949 956 $ cat report.json
950 957 testreport ={
951 958 "test-bogus.t": {
952 959 "result": "skip"
953 960 },
954 961 "test-failure.t": {
955 962 "result": "skip"
956 963 }
957 964 } (no-eol)
958 965
959 966 Whitelist trumps blacklist
960 967 $ echo test-failure.t > whitelist
961 968 $ rt --blacklist=blacklist --whitelist=whitelist --json\
962 969 > test-failure.t test-bogus.t
963 970 s
964 971 --- $TESTTMP/test-failure.t
965 972 +++ $TESTTMP/test-failure.t.err
966 973 @@ -1,5 +1,5 @@
967 974 $ echo babar
968 975 - rataxes
969 976 + babar
970 977 This is a noop statement so that
971 978 this test is still more bytes than success.
972 979 pad pad pad pad............................................................
973 980
974 981 ERROR: test-failure.t output changed
975 982 !
976 983 Skipped test-bogus.t: Doesn't exist
977 984 Failed test-failure.t: output changed
978 985 # Ran 1 tests, 1 skipped, 1 failed.
979 986 python hash seed: * (glob)
980 987 [1]
981 988
982 989 Ensure that --test-list causes only the tests listed in that file to
983 990 be executed.
984 991 $ echo test-success.t >> onlytest
985 992 $ rt --test-list=onlytest
986 993 .
987 994 # Ran 1 tests, 0 skipped, 0 failed.
988 995 $ echo test-bogus.t >> anothertest
989 996 $ rt --test-list=onlytest --test-list=anothertest
990 997 s.
991 998 Skipped test-bogus.t: Doesn't exist
992 999 # Ran 1 tests, 1 skipped, 0 failed.
993 1000 $ rm onlytest anothertest
994 1001
995 1002 test for --json
996 1003 ==================
997 1004
998 1005 $ rt --json
999 1006
1000 1007 --- $TESTTMP/test-failure.t
1001 1008 +++ $TESTTMP/test-failure.t.err
1002 1009 @@ -1,5 +1,5 @@
1003 1010 $ echo babar
1004 1011 - rataxes
1005 1012 + babar
1006 1013 This is a noop statement so that
1007 1014 this test is still more bytes than success.
1008 1015 pad pad pad pad............................................................
1009 1016
1010 1017 ERROR: test-failure.t output changed
1011 1018 !.s
1012 1019 Skipped test-skip.t: missing feature: nail clipper
1013 1020 Failed test-failure.t: output changed
1014 1021 # Ran 2 tests, 1 skipped, 1 failed.
1015 1022 python hash seed: * (glob)
1016 1023 [1]
1017 1024
1018 1025 $ cat report.json
1019 1026 testreport ={
1020 1027 "test-failure.t": [\{] (re)
1021 1028 "csys": "\s*[\d\.]{4,5}", ? (re)
1022 1029 "cuser": "\s*[\d\.]{4,5}", ? (re)
1023 1030 "diff": "---.+\+\+\+.+", ? (re)
1024 1031 "end": "\s*[\d\.]{4,5}", ? (re)
1025 1032 "result": "failure", ? (re)
1026 1033 "start": "\s*[\d\.]{4,5}", ? (re)
1027 1034 "time": "\s*[\d\.]{4,5}" (re)
1028 1035 }, ? (re)
1029 1036 "test-skip.t": {
1030 1037 "csys": "\s*[\d\.]{4,5}", ? (re)
1031 1038 "cuser": "\s*[\d\.]{4,5}", ? (re)
1032 1039 "diff": "", ? (re)
1033 1040 "end": "\s*[\d\.]{4,5}", ? (re)
1034 1041 "result": "skip", ? (re)
1035 1042 "start": "\s*[\d\.]{4,5}", ? (re)
1036 1043 "time": "\s*[\d\.]{4,5}" (re)
1037 1044 }, ? (re)
1038 1045 "test-success.t": [\{] (re)
1039 1046 "csys": "\s*[\d\.]{4,5}", ? (re)
1040 1047 "cuser": "\s*[\d\.]{4,5}", ? (re)
1041 1048 "diff": "", ? (re)
1042 1049 "end": "\s*[\d\.]{4,5}", ? (re)
1043 1050 "result": "success", ? (re)
1044 1051 "start": "\s*[\d\.]{4,5}", ? (re)
1045 1052 "time": "\s*[\d\.]{4,5}" (re)
1046 1053 }
1047 1054 } (no-eol)
1048 1055 --json with --outputdir
1049 1056
1050 1057 $ rm report.json
1051 1058 $ rm -r output
1052 1059 $ mkdir output
1053 1060 $ rt --json --outputdir output
1054 1061
1055 1062 --- $TESTTMP/test-failure.t
1056 1063 +++ $TESTTMP/output/test-failure.t.err
1057 1064 @@ -1,5 +1,5 @@
1058 1065 $ echo babar
1059 1066 - rataxes
1060 1067 + babar
1061 1068 This is a noop statement so that
1062 1069 this test is still more bytes than success.
1063 1070 pad pad pad pad............................................................
1064 1071
1065 1072 ERROR: test-failure.t output changed
1066 1073 !.s
1067 1074 Skipped test-skip.t: missing feature: nail clipper
1068 1075 Failed test-failure.t: output changed
1069 1076 # Ran 2 tests, 1 skipped, 1 failed.
1070 1077 python hash seed: * (glob)
1071 1078 [1]
1072 1079 $ f report.json
1073 1080 report.json: file not found
1074 1081 $ cat output/report.json
1075 1082 testreport ={
1076 1083 "test-failure.t": [\{] (re)
1077 1084 "csys": "\s*[\d\.]{4,5}", ? (re)
1078 1085 "cuser": "\s*[\d\.]{4,5}", ? (re)
1079 1086 "diff": "---.+\+\+\+.+", ? (re)
1080 1087 "end": "\s*[\d\.]{4,5}", ? (re)
1081 1088 "result": "failure", ? (re)
1082 1089 "start": "\s*[\d\.]{4,5}", ? (re)
1083 1090 "time": "\s*[\d\.]{4,5}" (re)
1084 1091 }, ? (re)
1085 1092 "test-skip.t": {
1086 1093 "csys": "\s*[\d\.]{4,5}", ? (re)
1087 1094 "cuser": "\s*[\d\.]{4,5}", ? (re)
1088 1095 "diff": "", ? (re)
1089 1096 "end": "\s*[\d\.]{4,5}", ? (re)
1090 1097 "result": "skip", ? (re)
1091 1098 "start": "\s*[\d\.]{4,5}", ? (re)
1092 1099 "time": "\s*[\d\.]{4,5}" (re)
1093 1100 }, ? (re)
1094 1101 "test-success.t": [\{] (re)
1095 1102 "csys": "\s*[\d\.]{4,5}", ? (re)
1096 1103 "cuser": "\s*[\d\.]{4,5}", ? (re)
1097 1104 "diff": "", ? (re)
1098 1105 "end": "\s*[\d\.]{4,5}", ? (re)
1099 1106 "result": "success", ? (re)
1100 1107 "start": "\s*[\d\.]{4,5}", ? (re)
1101 1108 "time": "\s*[\d\.]{4,5}" (re)
1102 1109 }
1103 1110 } (no-eol)
1104 1111 $ ls -a output
1105 1112 .
1106 1113 ..
1107 1114 .testtimes
1108 1115 report.json
1109 1116 test-failure.t.err
1110 1117
1111 1118 Test that failed test accepted through interactive are properly reported:
1112 1119
1113 1120 $ cp test-failure.t backup
1114 1121 $ echo y | rt --json -i
1115 1122
1116 1123 --- $TESTTMP/test-failure.t
1117 1124 +++ $TESTTMP/test-failure.t.err
1118 1125 @@ -1,5 +1,5 @@
1119 1126 $ echo babar
1120 1127 - rataxes
1121 1128 + babar
1122 1129 This is a noop statement so that
1123 1130 this test is still more bytes than success.
1124 1131 pad pad pad pad............................................................
1125 1132 Accept this change? [n] ..s
1126 1133 Skipped test-skip.t: missing feature: nail clipper
1127 1134 # Ran 2 tests, 1 skipped, 0 failed.
1128 1135
1129 1136 $ cat report.json
1130 1137 testreport ={
1131 1138 "test-failure.t": [\{] (re)
1132 1139 "csys": "\s*[\d\.]{4,5}", ? (re)
1133 1140 "cuser": "\s*[\d\.]{4,5}", ? (re)
1134 1141 "diff": "", ? (re)
1135 1142 "end": "\s*[\d\.]{4,5}", ? (re)
1136 1143 "result": "success", ? (re)
1137 1144 "start": "\s*[\d\.]{4,5}", ? (re)
1138 1145 "time": "\s*[\d\.]{4,5}" (re)
1139 1146 }, ? (re)
1140 1147 "test-skip.t": {
1141 1148 "csys": "\s*[\d\.]{4,5}", ? (re)
1142 1149 "cuser": "\s*[\d\.]{4,5}", ? (re)
1143 1150 "diff": "", ? (re)
1144 1151 "end": "\s*[\d\.]{4,5}", ? (re)
1145 1152 "result": "skip", ? (re)
1146 1153 "start": "\s*[\d\.]{4,5}", ? (re)
1147 1154 "time": "\s*[\d\.]{4,5}" (re)
1148 1155 }, ? (re)
1149 1156 "test-success.t": [\{] (re)
1150 1157 "csys": "\s*[\d\.]{4,5}", ? (re)
1151 1158 "cuser": "\s*[\d\.]{4,5}", ? (re)
1152 1159 "diff": "", ? (re)
1153 1160 "end": "\s*[\d\.]{4,5}", ? (re)
1154 1161 "result": "success", ? (re)
1155 1162 "start": "\s*[\d\.]{4,5}", ? (re)
1156 1163 "time": "\s*[\d\.]{4,5}" (re)
1157 1164 }
1158 1165 } (no-eol)
1159 1166 $ mv backup test-failure.t
1160 1167
1161 1168 backslash on end of line with glob matching is handled properly
1162 1169
1163 1170 $ cat > test-glob-backslash.t << EOF
1164 1171 > $ echo 'foo bar \\'
1165 1172 > foo * \ (glob)
1166 1173 > EOF
1167 1174
1168 1175 $ rt test-glob-backslash.t
1169 1176 .
1170 1177 # Ran 1 tests, 0 skipped, 0 failed.
1171 1178
1172 1179 $ rm -f test-glob-backslash.t
1173 1180
1174 1181 Test globbing of local IP addresses
1175 1182 $ echo 172.16.18.1
1176 1183 $LOCALIP (glob)
1177 1184 $ echo dead:beef::1
1178 1185 $LOCALIP (glob)
1179 1186
1180 1187 Test reusability for third party tools
1181 1188 ======================================
1182 1189
1183 1190 $ mkdir "$TESTTMP"/anothertests
1184 1191 $ cd "$TESTTMP"/anothertests
1185 1192
1186 1193 test that `run-tests.py` can execute hghave, even if it runs not in
1187 1194 Mercurial source tree.
1188 1195
1189 1196 $ cat > test-hghave.t <<EOF
1190 1197 > #require true
1191 1198 > $ echo foo
1192 1199 > foo
1193 1200 > EOF
1194 1201 $ rt test-hghave.t
1195 1202 .
1196 1203 # Ran 1 tests, 0 skipped, 0 failed.
1197 1204
1198 1205 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1199 1206 running is placed.
1200 1207
1201 1208 $ cat > test-runtestdir.t <<EOF
1202 1209 > - $TESTDIR, in which test-run-tests.t is placed
1203 1210 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1204 1211 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1205 1212 >
1206 1213 > #if windows
1207 1214 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1208 1215 > #else
1209 1216 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1210 1217 > #endif
1211 1218 > $ test "\$RUNTESTDIR" = "$TESTDIR"
1212 1219 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1213 1220 > #!USRBINENVPY
1214 1221 > #
1215 1222 > # check-code - a style and portability checker for Mercurial
1216 1223 > EOF
1217 1224 $ rt test-runtestdir.t
1218 1225 .
1219 1226 # Ran 1 tests, 0 skipped, 0 failed.
1220 1227
1221 1228 #if execbit
1222 1229
1223 1230 test that TESTDIR is referred in PATH
1224 1231
1225 1232 $ cat > custom-command.sh <<EOF
1226 1233 > #!/bin/sh
1227 1234 > echo "hello world"
1228 1235 > EOF
1229 1236 $ chmod +x custom-command.sh
1230 1237 $ cat > test-testdir-path.t <<EOF
1231 1238 > $ custom-command.sh
1232 1239 > hello world
1233 1240 > EOF
1234 1241 $ rt test-testdir-path.t
1235 1242 .
1236 1243 # Ran 1 tests, 0 skipped, 0 failed.
1237 1244
1238 1245 #endif
1239 1246
1240 1247 test support for --allow-slow-tests
1241 1248 $ cat > test-very-slow-test.t <<EOF
1242 1249 > #require slow
1243 1250 > $ echo pass
1244 1251 > pass
1245 1252 > EOF
1246 1253 $ rt test-very-slow-test.t
1247 1254 s
1248 1255 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1249 1256 # Ran 0 tests, 1 skipped, 0 failed.
1250 1257 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1251 1258 .
1252 1259 # Ran 1 tests, 0 skipped, 0 failed.
1253 1260
1254 1261 support for running a test outside the current directory
1255 1262 $ mkdir nonlocal
1256 1263 $ cat > nonlocal/test-is-not-here.t << EOF
1257 1264 > $ echo pass
1258 1265 > pass
1259 1266 > EOF
1260 1267 $ rt nonlocal/test-is-not-here.t
1261 1268 .
1262 1269 # Ran 1 tests, 0 skipped, 0 failed.
1263 1270
1264 1271 support for automatically discovering test if arg is a folder
1265 1272 $ mkdir tmp && cd tmp
1266 1273
1267 1274 $ cat > test-uno.t << EOF
1268 1275 > $ echo line
1269 1276 > line
1270 1277 > EOF
1271 1278
1272 1279 $ cp test-uno.t test-dos.t
1273 1280 $ cd ..
1274 1281 $ cp -R tmp tmpp
1275 1282 $ cp tmp/test-uno.t test-solo.t
1276 1283
1277 1284 $ $PYTHON $TESTDIR/run-tests.py tmp/ test-solo.t tmpp
1278 1285 .....
1279 1286 # Ran 5 tests, 0 skipped, 0 failed.
1280 1287 $ rm -rf tmp tmpp
1281 1288
1282 1289 support for running run-tests.py from another directory
1283 1290 $ mkdir tmp && cd tmp
1284 1291
1285 1292 $ cat > useful-file.sh << EOF
1286 1293 > important command
1287 1294 > EOF
1288 1295
1289 1296 $ cat > test-folder.t << EOF
1290 1297 > $ cat \$TESTDIR/useful-file.sh
1291 1298 > important command
1292 1299 > EOF
1293 1300
1294 1301 $ cat > test-folder-fail.t << EOF
1295 1302 > $ cat \$TESTDIR/useful-file.sh
1296 1303 > important commando
1297 1304 > EOF
1298 1305
1299 1306 $ cd ..
1300 1307 $ $PYTHON $TESTDIR/run-tests.py tmp/test-*.t
1301 1308
1302 1309 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1303 1310 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1304 1311 @@ -1,2 +1,2 @@
1305 1312 $ cat $TESTDIR/useful-file.sh
1306 1313 - important commando
1307 1314 + important command
1308 1315
1309 1316 ERROR: test-folder-fail.t output changed
1310 1317 !.
1311 1318 Failed test-folder-fail.t: output changed
1312 1319 # Ran 2 tests, 0 skipped, 1 failed.
1313 1320 python hash seed: * (glob)
1314 1321 [1]
1315 1322
1316 1323 support for bisecting failed tests automatically
1317 1324 $ hg init bisect
1318 1325 $ cd bisect
1319 1326 $ cat >> test-bisect.t <<EOF
1320 1327 > $ echo pass
1321 1328 > pass
1322 1329 > EOF
1323 1330 $ hg add test-bisect.t
1324 1331 $ hg ci -m 'good'
1325 1332 $ cat >> test-bisect.t <<EOF
1326 1333 > $ echo pass
1327 1334 > fail
1328 1335 > EOF
1329 1336 $ hg ci -m 'bad'
1330 1337 $ rt --known-good-rev=0 test-bisect.t
1331 1338
1332 1339 --- $TESTTMP/anothertests/bisect/test-bisect.t
1333 1340 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1334 1341 @@ -1,4 +1,4 @@
1335 1342 $ echo pass
1336 1343 pass
1337 1344 $ echo pass
1338 1345 - fail
1339 1346 + pass
1340 1347
1341 1348 ERROR: test-bisect.t output changed
1342 1349 !
1343 1350 Failed test-bisect.t: output changed
1344 1351 test-bisect.t broken by 72cbf122d116 (bad)
1345 1352 # Ran 1 tests, 0 skipped, 1 failed.
1346 1353 python hash seed: * (glob)
1347 1354 [1]
1348 1355
1349 1356 $ cd ..
1350 1357
1351 1358 support bisecting a separate repo
1352 1359
1353 1360 $ hg init bisect-dependent
1354 1361 $ cd bisect-dependent
1355 1362 $ cat > test-bisect-dependent.t <<EOF
1356 1363 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1357 1364 > pass
1358 1365 > EOF
1359 1366 $ hg commit -Am dependent test-bisect-dependent.t
1360 1367
1361 1368 $ rt --known-good-rev=0 test-bisect-dependent.t
1362 1369
1363 1370 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1364 1371 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1365 1372 @@ -1,2 +1,2 @@
1366 1373 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1367 1374 - pass
1368 1375 + fail
1369 1376
1370 1377 ERROR: test-bisect-dependent.t output changed
1371 1378 !
1372 1379 Failed test-bisect-dependent.t: output changed
1373 1380 Failed to identify failure point for test-bisect-dependent.t
1374 1381 # Ran 1 tests, 0 skipped, 1 failed.
1375 1382 python hash seed: * (glob)
1376 1383 [1]
1377 1384
1378 1385 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1379 1386 Usage: run-tests.py [options] [tests]
1380 1387
1381 1388 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1382 1389 [2]
1383 1390
1384 1391 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1385 1392
1386 1393 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1387 1394 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1388 1395 @@ -1,2 +1,2 @@
1389 1396 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1390 1397 - pass
1391 1398 + fail
1392 1399
1393 1400 ERROR: test-bisect-dependent.t output changed
1394 1401 !
1395 1402 Failed test-bisect-dependent.t: output changed
1396 1403 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1397 1404 # Ran 1 tests, 0 skipped, 1 failed.
1398 1405 python hash seed: * (glob)
1399 1406 [1]
1400 1407
1401 1408 $ cd ..
1402 1409
1403 1410 Test a broken #if statement doesn't break run-tests threading.
1404 1411 ==============================================================
1405 1412 $ mkdir broken
1406 1413 $ cd broken
1407 1414 $ cat > test-broken.t <<EOF
1408 1415 > true
1409 1416 > #if notarealhghavefeature
1410 1417 > $ false
1411 1418 > #endif
1412 1419 > EOF
1413 1420 $ for f in 1 2 3 4 ; do
1414 1421 > cat > test-works-$f.t <<EOF
1415 1422 > This is test case $f
1416 1423 > $ sleep 1
1417 1424 > EOF
1418 1425 > done
1419 1426 $ rt -j 2
1420 1427 ....
1421 1428 # Ran 5 tests, 0 skipped, 0 failed.
1422 1429 skipped: unknown feature: notarealhghavefeature
1423 1430
1424 1431 $ cd ..
1425 1432 $ rm -rf broken
1426 1433
1427 1434 Test cases in .t files
1428 1435 ======================
1429 1436 $ mkdir cases
1430 1437 $ cd cases
1431 1438 $ cat > test-cases-abc.t <<'EOF'
1432 1439 > #testcases A B C
1433 1440 > $ V=B
1434 1441 > #if A
1435 1442 > $ V=A
1436 1443 > #endif
1437 1444 > #if C
1438 1445 > $ V=C
1439 1446 > #endif
1440 1447 > $ echo $V | sed 's/A/C/'
1441 1448 > C
1442 1449 > #if C
1443 1450 > $ [ $V = C ]
1444 1451 > #endif
1445 1452 > #if A
1446 1453 > $ [ $V = C ]
1447 1454 > [1]
1448 1455 > #endif
1449 1456 > #if no-C
1450 1457 > $ [ $V = C ]
1451 1458 > [1]
1452 1459 > #endif
1453 1460 > $ [ $V = D ]
1454 1461 > [1]
1455 1462 > EOF
1456 1463 $ rt
1457 1464 .
1458 1465 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1459 1466 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1460 1467 @@ -7,7 +7,7 @@
1461 1468 $ V=C
1462 1469 #endif
1463 1470 $ echo $V | sed 's/A/C/'
1464 1471 - C
1465 1472 + B
1466 1473 #if C
1467 1474 $ [ $V = C ]
1468 1475 #endif
1469 1476
1470 1477 ERROR: test-cases-abc.t (case B) output changed
1471 1478 !.
1472 1479 Failed test-cases-abc.t (case B): output changed
1473 1480 # Ran 3 tests, 0 skipped, 1 failed.
1474 1481 python hash seed: * (glob)
1475 1482 [1]
1476 1483
1477 1484 --restart works
1478 1485
1479 1486 $ rt --restart
1480 1487
1481 1488 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1482 1489 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1483 1490 @@ -7,7 +7,7 @@
1484 1491 $ V=C
1485 1492 #endif
1486 1493 $ echo $V | sed 's/A/C/'
1487 1494 - C
1488 1495 + B
1489 1496 #if C
1490 1497 $ [ $V = C ]
1491 1498 #endif
1492 1499
1493 1500 ERROR: test-cases-abc.t (case B) output changed
1494 1501 !.
1495 1502 Failed test-cases-abc.t (case B): output changed
1496 1503 # Ran 2 tests, 0 skipped, 1 failed.
1497 1504 python hash seed: * (glob)
1498 1505 [1]
1499 1506
1500 1507 --restart works with outputdir
1501 1508
1502 1509 $ mkdir output
1503 1510 $ mv test-cases-abc.t.B.err output
1504 1511 $ rt --restart --outputdir output
1505 1512
1506 1513 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1507 1514 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t.B.err
1508 1515 @@ -7,7 +7,7 @@
1509 1516 $ V=C
1510 1517 #endif
1511 1518 $ echo $V | sed 's/A/C/'
1512 1519 - C
1513 1520 + B
1514 1521 #if C
1515 1522 $ [ $V = C ]
1516 1523 #endif
1517 1524
1518 1525 ERROR: test-cases-abc.t (case B) output changed
1519 1526 !.
1520 1527 Failed test-cases-abc.t (case B): output changed
1521 1528 # Ran 2 tests, 0 skipped, 1 failed.
1522 1529 python hash seed: * (glob)
1523 1530 [1]
1524 1531
1525 1532 Test automatic pattern replacement
1526 1533
1527 1534 $ cat << EOF >> common-pattern.py
1528 1535 > substitutions = [
1529 1536 > (br'foo-(.*)\\b',
1530 1537 > br'\$XXX=\\1\$'),
1531 1538 > (br'bar\\n',
1532 1539 > br'\$YYY$\\n'),
1533 1540 > ]
1534 1541 > EOF
1535 1542
1536 1543 $ cat << EOF >> test-substitution.t
1537 1544 > $ echo foo-12
1538 1545 > \$XXX=12$
1539 1546 > $ echo foo-42
1540 1547 > \$XXX=42$
1541 1548 > $ echo bar prior
1542 1549 > bar prior
1543 1550 > $ echo lastbar
1544 1551 > last\$YYY$
1545 1552 > $ echo foo-bar foo-baz
1546 1553 > EOF
1547 1554
1548 1555 $ rt test-substitution.t
1549 1556
1550 1557 --- $TESTTMP/anothertests/cases/test-substitution.t
1551 1558 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1552 1559 @@ -7,3 +7,4 @@
1553 1560 $ echo lastbar
1554 1561 last$YYY$
1555 1562 $ echo foo-bar foo-baz
1556 1563 + $XXX=bar foo-baz$
1557 1564
1558 1565 ERROR: test-substitution.t output changed
1559 1566 !
1560 1567 Failed test-substitution.t: output changed
1561 1568 # Ran 1 tests, 0 skipped, 1 failed.
1562 1569 python hash seed: * (glob)
1563 1570 [1]
General Comments 0
You need to be logged in to leave comments. Login now