##// END OF EJS Templates
run-tests: alias hg to hg.exe on Windows...
Matt Harbison -
r41011:2465e0b2 default
parent child Browse files
Show More
@@ -1,3272 +1,3275 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import argparse
49 49 import collections
50 50 import difflib
51 51 import distutils.version as version
52 52 import errno
53 53 import json
54 54 import multiprocessing
55 55 import os
56 56 import random
57 57 import re
58 58 import shutil
59 59 import signal
60 60 import socket
61 61 import subprocess
62 62 import sys
63 63 import sysconfig
64 64 import tempfile
65 65 import threading
66 66 import time
67 67 import unittest
68 68 import uuid
69 69 import xml.dom.minidom as minidom
70 70
71 71 try:
72 72 import Queue as queue
73 73 except ImportError:
74 74 import queue
75 75
76 76 try:
77 77 import shlex
78 78 shellquote = shlex.quote
79 79 except (ImportError, AttributeError):
80 80 import pipes
81 81 shellquote = pipes.quote
82 82
83 83 if os.environ.get('RTUNICODEPEDANTRY', False):
84 84 try:
85 85 reload(sys)
86 86 sys.setdefaultencoding("undefined")
87 87 except NameError:
88 88 pass
89 89
90 90 processlock = threading.Lock()
91 91
92 92 pygmentspresent = False
93 93 # ANSI color is unsupported prior to Windows 10
94 94 if os.name != 'nt':
95 95 try: # is pygments installed
96 96 import pygments
97 97 import pygments.lexers as lexers
98 98 import pygments.lexer as lexer
99 99 import pygments.formatters as formatters
100 100 import pygments.token as token
101 101 import pygments.style as style
102 102 pygmentspresent = True
103 103 difflexer = lexers.DiffLexer()
104 104 terminal256formatter = formatters.Terminal256Formatter()
105 105 except ImportError:
106 106 pass
107 107
108 108 if pygmentspresent:
109 109 class TestRunnerStyle(style.Style):
110 110 default_style = ""
111 111 skipped = token.string_to_tokentype("Token.Generic.Skipped")
112 112 failed = token.string_to_tokentype("Token.Generic.Failed")
113 113 skippedname = token.string_to_tokentype("Token.Generic.SName")
114 114 failedname = token.string_to_tokentype("Token.Generic.FName")
115 115 styles = {
116 116 skipped: '#e5e5e5',
117 117 skippedname: '#00ffff',
118 118 failed: '#7f0000',
119 119 failedname: '#ff0000',
120 120 }
121 121
122 122 class TestRunnerLexer(lexer.RegexLexer):
123 123 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
124 124 tokens = {
125 125 'root': [
126 126 (r'^Skipped', token.Generic.Skipped, 'skipped'),
127 127 (r'^Failed ', token.Generic.Failed, 'failed'),
128 128 (r'^ERROR: ', token.Generic.Failed, 'failed'),
129 129 ],
130 130 'skipped': [
131 131 (testpattern, token.Generic.SName),
132 132 (r':.*', token.Generic.Skipped),
133 133 ],
134 134 'failed': [
135 135 (testpattern, token.Generic.FName),
136 136 (r'(:| ).*', token.Generic.Failed),
137 137 ]
138 138 }
139 139
140 140 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
141 141 runnerlexer = TestRunnerLexer()
142 142
143 143 origenviron = os.environ.copy()
144 144
145 145 if sys.version_info > (3, 5, 0):
146 146 PYTHON3 = True
147 147 xrange = range # we use xrange in one place, and we'd rather not use range
148 148 def _bytespath(p):
149 149 if p is None:
150 150 return p
151 151 return p.encode('utf-8')
152 152
153 153 def _strpath(p):
154 154 if p is None:
155 155 return p
156 156 return p.decode('utf-8')
157 157
158 158 osenvironb = getattr(os, 'environb', None)
159 159 if osenvironb is None:
160 160 # Windows lacks os.environb, for instance. A proxy over the real thing
161 161 # instead of a copy allows the environment to be updated via bytes on
162 162 # all platforms.
163 163 class environbytes(object):
164 164 def __init__(self, strenv):
165 165 self.__len__ = strenv.__len__
166 166 self.clear = strenv.clear
167 167 self._strenv = strenv
168 168 def __getitem__(self, k):
169 169 v = self._strenv.__getitem__(_strpath(k))
170 170 return _bytespath(v)
171 171 def __setitem__(self, k, v):
172 172 self._strenv.__setitem__(_strpath(k), _strpath(v))
173 173 def __delitem__(self, k):
174 174 self._strenv.__delitem__(_strpath(k))
175 175 def __contains__(self, k):
176 176 return self._strenv.__contains__(_strpath(k))
177 177 def __iter__(self):
178 178 return iter([_bytespath(k) for k in iter(self._strenv)])
179 179 def get(self, k, default=None):
180 180 v = self._strenv.get(_strpath(k), _strpath(default))
181 181 return _bytespath(v)
182 182 def pop(self, k, default=None):
183 183 v = self._strenv.pop(_strpath(k), _strpath(default))
184 184 return _bytespath(v)
185 185
186 186 osenvironb = environbytes(os.environ)
187 187
188 188 getcwdb = getattr(os, 'getcwdb')
189 189 if not getcwdb or os.name == 'nt':
190 190 getcwdb = lambda: _bytespath(os.getcwd())
191 191
192 192 elif sys.version_info >= (3, 0, 0):
193 193 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
194 194 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
195 195 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
196 196 else:
197 197 PYTHON3 = False
198 198
199 199 # In python 2.x, path operations are generally done using
200 200 # bytestrings by default, so we don't have to do any extra
201 201 # fiddling there. We define the wrapper functions anyway just to
202 202 # help keep code consistent between platforms.
203 203 def _bytespath(p):
204 204 return p
205 205
206 206 _strpath = _bytespath
207 207 osenvironb = os.environ
208 208 getcwdb = os.getcwd
209 209
210 210 # For Windows support
211 211 wifexited = getattr(os, "WIFEXITED", lambda x: False)
212 212
213 213 # Whether to use IPv6
214 214 def checksocketfamily(name, port=20058):
215 215 """return true if we can listen on localhost using family=name
216 216
217 217 name should be either 'AF_INET', or 'AF_INET6'.
218 218 port being used is okay - EADDRINUSE is considered as successful.
219 219 """
220 220 family = getattr(socket, name, None)
221 221 if family is None:
222 222 return False
223 223 try:
224 224 s = socket.socket(family, socket.SOCK_STREAM)
225 225 s.bind(('localhost', port))
226 226 s.close()
227 227 return True
228 228 except socket.error as exc:
229 229 if exc.errno == errno.EADDRINUSE:
230 230 return True
231 231 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
232 232 return False
233 233 else:
234 234 raise
235 235 else:
236 236 return False
237 237
238 238 # useipv6 will be set by parseargs
239 239 useipv6 = None
240 240
241 241 def checkportisavailable(port):
242 242 """return true if a port seems free to bind on localhost"""
243 243 if useipv6:
244 244 family = socket.AF_INET6
245 245 else:
246 246 family = socket.AF_INET
247 247 try:
248 248 s = socket.socket(family, socket.SOCK_STREAM)
249 249 s.bind(('localhost', port))
250 250 s.close()
251 251 return True
252 252 except socket.error as exc:
253 253 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
254 254 errno.EPROTONOSUPPORT):
255 255 raise
256 256 return False
257 257
258 258 closefds = os.name == 'posix'
259 259 def Popen4(cmd, wd, timeout, env=None):
260 260 processlock.acquire()
261 261 p = subprocess.Popen(_strpath(cmd), shell=True, bufsize=-1,
262 262 cwd=_strpath(wd), env=env,
263 263 close_fds=closefds,
264 264 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
265 265 stderr=subprocess.STDOUT)
266 266 processlock.release()
267 267
268 268 p.fromchild = p.stdout
269 269 p.tochild = p.stdin
270 270 p.childerr = p.stderr
271 271
272 272 p.timeout = False
273 273 if timeout:
274 274 def t():
275 275 start = time.time()
276 276 while time.time() - start < timeout and p.returncode is None:
277 277 time.sleep(.1)
278 278 p.timeout = True
279 279 if p.returncode is None:
280 280 terminate(p)
281 281 threading.Thread(target=t).start()
282 282
283 283 return p
284 284
285 285 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
286 286 IMPL_PATH = b'PYTHONPATH'
287 287 if 'java' in sys.platform:
288 288 IMPL_PATH = b'JYTHONPATH'
289 289
290 290 defaults = {
291 291 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
292 292 'timeout': ('HGTEST_TIMEOUT', 180),
293 293 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
294 294 'port': ('HGTEST_PORT', 20059),
295 295 'shell': ('HGTEST_SHELL', 'sh'),
296 296 }
297 297
298 298 def canonpath(path):
299 299 return os.path.realpath(os.path.expanduser(path))
300 300
301 301 def parselistfiles(files, listtype, warn=True):
302 302 entries = dict()
303 303 for filename in files:
304 304 try:
305 305 path = os.path.expanduser(os.path.expandvars(filename))
306 306 f = open(path, "rb")
307 307 except IOError as err:
308 308 if err.errno != errno.ENOENT:
309 309 raise
310 310 if warn:
311 311 print("warning: no such %s file: %s" % (listtype, filename))
312 312 continue
313 313
314 314 for line in f.readlines():
315 315 line = line.split(b'#', 1)[0].strip()
316 316 if line:
317 317 entries[line] = filename
318 318
319 319 f.close()
320 320 return entries
321 321
322 322 def parsettestcases(path):
323 323 """read a .t test file, return a set of test case names
324 324
325 325 If path does not exist, return an empty set.
326 326 """
327 327 cases = []
328 328 try:
329 329 with open(path, 'rb') as f:
330 330 for l in f:
331 331 if l.startswith(b'#testcases '):
332 332 cases.append(sorted(l[11:].split()))
333 333 except IOError as ex:
334 334 if ex.errno != errno.ENOENT:
335 335 raise
336 336 return cases
337 337
338 338 def getparser():
339 339 """Obtain the OptionParser used by the CLI."""
340 340 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
341 341
342 342 selection = parser.add_argument_group('Test Selection')
343 343 selection.add_argument('--allow-slow-tests', action='store_true',
344 344 help='allow extremely slow tests')
345 345 selection.add_argument("--blacklist", action="append",
346 346 help="skip tests listed in the specified blacklist file")
347 347 selection.add_argument("--changed",
348 348 help="run tests that are changed in parent rev or working directory")
349 349 selection.add_argument("-k", "--keywords",
350 350 help="run tests matching keywords")
351 351 selection.add_argument("-r", "--retest", action="store_true",
352 352 help = "retest failed tests")
353 353 selection.add_argument("--test-list", action="append",
354 354 help="read tests to run from the specified file")
355 355 selection.add_argument("--whitelist", action="append",
356 356 help="always run tests listed in the specified whitelist file")
357 357 selection.add_argument('tests', metavar='TESTS', nargs='*',
358 358 help='Tests to run')
359 359
360 360 harness = parser.add_argument_group('Test Harness Behavior')
361 361 harness.add_argument('--bisect-repo',
362 362 metavar='bisect_repo',
363 363 help=("Path of a repo to bisect. Use together with "
364 364 "--known-good-rev"))
365 365 harness.add_argument("-d", "--debug", action="store_true",
366 366 help="debug mode: write output of test scripts to console"
367 367 " rather than capturing and diffing it (disables timeout)")
368 368 harness.add_argument("-f", "--first", action="store_true",
369 369 help="exit on the first test failure")
370 370 harness.add_argument("-i", "--interactive", action="store_true",
371 371 help="prompt to accept changed output")
372 372 harness.add_argument("-j", "--jobs", type=int,
373 373 help="number of jobs to run in parallel"
374 374 " (default: $%s or %d)" % defaults['jobs'])
375 375 harness.add_argument("--keep-tmpdir", action="store_true",
376 376 help="keep temporary directory after running tests")
377 377 harness.add_argument('--known-good-rev',
378 378 metavar="known_good_rev",
379 379 help=("Automatically bisect any failures using this "
380 380 "revision as a known-good revision."))
381 381 harness.add_argument("--list-tests", action="store_true",
382 382 help="list tests instead of running them")
383 383 harness.add_argument("--loop", action="store_true",
384 384 help="loop tests repeatedly")
385 385 harness.add_argument('--random', action="store_true",
386 386 help='run tests in random order')
387 387 harness.add_argument('--order-by-runtime', action="store_true",
388 388 help='run slowest tests first, according to .testtimes')
389 389 harness.add_argument("-p", "--port", type=int,
390 390 help="port on which servers should listen"
391 391 " (default: $%s or %d)" % defaults['port'])
392 392 harness.add_argument('--profile-runner', action='store_true',
393 393 help='run statprof on run-tests')
394 394 harness.add_argument("-R", "--restart", action="store_true",
395 395 help="restart at last error")
396 396 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
397 397 help="run each test N times (default=1)", default=1)
398 398 harness.add_argument("--shell",
399 399 help="shell to use (default: $%s or %s)" % defaults['shell'])
400 400 harness.add_argument('--showchannels', action='store_true',
401 401 help='show scheduling channels')
402 402 harness.add_argument("--slowtimeout", type=int,
403 403 help="kill errant slow tests after SLOWTIMEOUT seconds"
404 404 " (default: $%s or %d)" % defaults['slowtimeout'])
405 405 harness.add_argument("-t", "--timeout", type=int,
406 406 help="kill errant tests after TIMEOUT seconds"
407 407 " (default: $%s or %d)" % defaults['timeout'])
408 408 harness.add_argument("--tmpdir",
409 409 help="run tests in the given temporary directory"
410 410 " (implies --keep-tmpdir)")
411 411 harness.add_argument("-v", "--verbose", action="store_true",
412 412 help="output verbose messages")
413 413
414 414 hgconf = parser.add_argument_group('Mercurial Configuration')
415 415 hgconf.add_argument("--chg", action="store_true",
416 416 help="install and use chg wrapper in place of hg")
417 417 hgconf.add_argument("--compiler",
418 418 help="compiler to build with")
419 419 hgconf.add_argument('--extra-config-opt', action="append", default=[],
420 420 help='set the given config opt in the test hgrc')
421 421 hgconf.add_argument("-l", "--local", action="store_true",
422 422 help="shortcut for --with-hg=<testdir>/../hg, "
423 423 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
424 424 hgconf.add_argument("--ipv6", action="store_true",
425 425 help="prefer IPv6 to IPv4 for network related tests")
426 426 hgconf.add_argument("--pure", action="store_true",
427 427 help="use pure Python code instead of C extensions")
428 428 hgconf.add_argument("-3", "--py3-warnings", action="store_true",
429 429 help="enable Py3k warnings on Python 2.7+")
430 430 hgconf.add_argument("--with-chg", metavar="CHG",
431 431 help="use specified chg wrapper in place of hg")
432 432 hgconf.add_argument("--with-hg",
433 433 metavar="HG",
434 434 help="test using specified hg script rather than a "
435 435 "temporary installation")
436 436
437 437 reporting = parser.add_argument_group('Results Reporting')
438 438 reporting.add_argument("-C", "--annotate", action="store_true",
439 439 help="output files annotated with coverage")
440 440 reporting.add_argument("--color", choices=["always", "auto", "never"],
441 441 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
442 442 help="colorisation: always|auto|never (default: auto)")
443 443 reporting.add_argument("-c", "--cover", action="store_true",
444 444 help="print a test coverage report")
445 445 reporting.add_argument('--exceptions', action='store_true',
446 446 help='log all exceptions and generate an exception report')
447 447 reporting.add_argument("-H", "--htmlcov", action="store_true",
448 448 help="create an HTML report of the coverage of the files")
449 449 reporting.add_argument("--json", action="store_true",
450 450 help="store test result data in 'report.json' file")
451 451 reporting.add_argument("--outputdir",
452 452 help="directory to write error logs to (default=test directory)")
453 453 reporting.add_argument("-n", "--nodiff", action="store_true",
454 454 help="skip showing test changes")
455 455 reporting.add_argument("-S", "--noskips", action="store_true",
456 456 help="don't report skip tests verbosely")
457 457 reporting.add_argument("--time", action="store_true",
458 458 help="time how long each test takes")
459 459 reporting.add_argument("--view",
460 460 help="external diff viewer")
461 461 reporting.add_argument("--xunit",
462 462 help="record xunit results at specified path")
463 463
464 464 for option, (envvar, default) in defaults.items():
465 465 defaults[option] = type(default)(os.environ.get(envvar, default))
466 466 parser.set_defaults(**defaults)
467 467
468 468 return parser
469 469
470 470 def parseargs(args, parser):
471 471 """Parse arguments with our OptionParser and validate results."""
472 472 options = parser.parse_args(args)
473 473
474 474 # jython is always pure
475 475 if 'java' in sys.platform or '__pypy__' in sys.modules:
476 476 options.pure = True
477 477
478 478 if options.with_hg:
479 479 options.with_hg = canonpath(_bytespath(options.with_hg))
480 480 if not (os.path.isfile(options.with_hg) and
481 481 os.access(options.with_hg, os.X_OK)):
482 482 parser.error('--with-hg must specify an executable hg script')
483 483 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
484 484 sys.stderr.write('warning: --with-hg should specify an hg script\n')
485 485 sys.stderr.flush()
486 486 if options.local:
487 487 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
488 488 reporootdir = os.path.dirname(testdir)
489 489 pathandattrs = [(b'hg', 'with_hg')]
490 490 if options.chg:
491 491 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
492 492 for relpath, attr in pathandattrs:
493 493 binpath = os.path.join(reporootdir, relpath)
494 494 if os.name != 'nt' and not os.access(binpath, os.X_OK):
495 495 parser.error('--local specified, but %r not found or '
496 496 'not executable' % binpath)
497 497 setattr(options, attr, binpath)
498 498
499 499 if (options.chg or options.with_chg) and os.name == 'nt':
500 500 parser.error('chg does not work on %s' % os.name)
501 501 if options.with_chg:
502 502 options.chg = False # no installation to temporary location
503 503 options.with_chg = canonpath(_bytespath(options.with_chg))
504 504 if not (os.path.isfile(options.with_chg) and
505 505 os.access(options.with_chg, os.X_OK)):
506 506 parser.error('--with-chg must specify a chg executable')
507 507 if options.chg and options.with_hg:
508 508 # chg shares installation location with hg
509 509 parser.error('--chg does not work when --with-hg is specified '
510 510 '(use --with-chg instead)')
511 511
512 512 if options.color == 'always' and not pygmentspresent:
513 513 sys.stderr.write('warning: --color=always ignored because '
514 514 'pygments is not installed\n')
515 515
516 516 if options.bisect_repo and not options.known_good_rev:
517 517 parser.error("--bisect-repo cannot be used without --known-good-rev")
518 518
519 519 global useipv6
520 520 if options.ipv6:
521 521 useipv6 = checksocketfamily('AF_INET6')
522 522 else:
523 523 # only use IPv6 if IPv4 is unavailable and IPv6 is available
524 524 useipv6 = ((not checksocketfamily('AF_INET'))
525 525 and checksocketfamily('AF_INET6'))
526 526
527 527 options.anycoverage = options.cover or options.annotate or options.htmlcov
528 528 if options.anycoverage:
529 529 try:
530 530 import coverage
531 531 covver = version.StrictVersion(coverage.__version__).version
532 532 if covver < (3, 3):
533 533 parser.error('coverage options require coverage 3.3 or later')
534 534 except ImportError:
535 535 parser.error('coverage options now require the coverage package')
536 536
537 537 if options.anycoverage and options.local:
538 538 # this needs some path mangling somewhere, I guess
539 539 parser.error("sorry, coverage options do not work when --local "
540 540 "is specified")
541 541
542 542 if options.anycoverage and options.with_hg:
543 543 parser.error("sorry, coverage options do not work when --with-hg "
544 544 "is specified")
545 545
546 546 global verbose
547 547 if options.verbose:
548 548 verbose = ''
549 549
550 550 if options.tmpdir:
551 551 options.tmpdir = canonpath(options.tmpdir)
552 552
553 553 if options.jobs < 1:
554 554 parser.error('--jobs must be positive')
555 555 if options.interactive and options.debug:
556 556 parser.error("-i/--interactive and -d/--debug are incompatible")
557 557 if options.debug:
558 558 if options.timeout != defaults['timeout']:
559 559 sys.stderr.write(
560 560 'warning: --timeout option ignored with --debug\n')
561 561 if options.slowtimeout != defaults['slowtimeout']:
562 562 sys.stderr.write(
563 563 'warning: --slowtimeout option ignored with --debug\n')
564 564 options.timeout = 0
565 565 options.slowtimeout = 0
566 566 if options.py3_warnings:
567 567 if PYTHON3:
568 568 parser.error(
569 569 '--py3-warnings can only be used on Python 2.7')
570 570
571 571 if options.blacklist:
572 572 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
573 573 if options.whitelist:
574 574 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
575 575 else:
576 576 options.whitelisted = {}
577 577
578 578 if options.showchannels:
579 579 options.nodiff = True
580 580
581 581 return options
582 582
583 583 def rename(src, dst):
584 584 """Like os.rename(), trade atomicity and opened files friendliness
585 585 for existing destination support.
586 586 """
587 587 shutil.copy(src, dst)
588 588 os.remove(src)
589 589
590 590 _unified_diff = difflib.unified_diff
591 591 if PYTHON3:
592 592 import functools
593 593 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
594 594
595 595 def getdiff(expected, output, ref, err):
596 596 servefail = False
597 597 lines = []
598 598 for line in _unified_diff(expected, output, ref, err):
599 599 if line.startswith(b'+++') or line.startswith(b'---'):
600 600 line = line.replace(b'\\', b'/')
601 601 if line.endswith(b' \n'):
602 602 line = line[:-2] + b'\n'
603 603 lines.append(line)
604 604 if not servefail and line.startswith(
605 605 b'+ abort: child process failed to start'):
606 606 servefail = True
607 607
608 608 return servefail, lines
609 609
610 610 verbose = False
611 611 def vlog(*msg):
612 612 """Log only when in verbose mode."""
613 613 if verbose is False:
614 614 return
615 615
616 616 return log(*msg)
617 617
618 618 # Bytes that break XML even in a CDATA block: control characters 0-31
619 619 # sans \t, \n and \r
620 620 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
621 621
622 622 # Match feature conditionalized output lines in the form, capturing the feature
623 623 # list in group 2, and the preceeding line output in group 1:
624 624 #
625 625 # output..output (feature !)\n
626 626 optline = re.compile(b'(.*) \((.+?) !\)\n$')
627 627
628 628 def cdatasafe(data):
629 629 """Make a string safe to include in a CDATA block.
630 630
631 631 Certain control characters are illegal in a CDATA block, and
632 632 there's no way to include a ]]> in a CDATA either. This function
633 633 replaces illegal bytes with ? and adds a space between the ]] so
634 634 that it won't break the CDATA block.
635 635 """
636 636 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
637 637
638 638 def log(*msg):
639 639 """Log something to stdout.
640 640
641 641 Arguments are strings to print.
642 642 """
643 643 with iolock:
644 644 if verbose:
645 645 print(verbose, end=' ')
646 646 for m in msg:
647 647 print(m, end=' ')
648 648 print()
649 649 sys.stdout.flush()
650 650
651 651 def highlightdiff(line, color):
652 652 if not color:
653 653 return line
654 654 assert pygmentspresent
655 655 return pygments.highlight(line.decode('latin1'), difflexer,
656 656 terminal256formatter).encode('latin1')
657 657
658 658 def highlightmsg(msg, color):
659 659 if not color:
660 660 return msg
661 661 assert pygmentspresent
662 662 return pygments.highlight(msg, runnerlexer, runnerformatter)
663 663
664 664 def terminate(proc):
665 665 """Terminate subprocess"""
666 666 vlog('# Terminating process %d' % proc.pid)
667 667 try:
668 668 proc.terminate()
669 669 except OSError:
670 670 pass
671 671
672 672 def killdaemons(pidfile):
673 673 import killdaemons as killmod
674 674 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
675 675 logfn=vlog)
676 676
677 677 class Test(unittest.TestCase):
678 678 """Encapsulates a single, runnable test.
679 679
680 680 While this class conforms to the unittest.TestCase API, it differs in that
681 681 instances need to be instantiated manually. (Typically, unittest.TestCase
682 682 classes are instantiated automatically by scanning modules.)
683 683 """
684 684
685 685 # Status code reserved for skipped tests (used by hghave).
686 686 SKIPPED_STATUS = 80
687 687
688 688 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
689 689 debug=False,
690 690 first=False,
691 691 timeout=None,
692 692 startport=None, extraconfigopts=None,
693 693 py3warnings=False, shell=None, hgcommand=None,
694 694 slowtimeout=None, usechg=False,
695 695 useipv6=False):
696 696 """Create a test from parameters.
697 697
698 698 path is the full path to the file defining the test.
699 699
700 700 tmpdir is the main temporary directory to use for this test.
701 701
702 702 keeptmpdir determines whether to keep the test's temporary directory
703 703 after execution. It defaults to removal (False).
704 704
705 705 debug mode will make the test execute verbosely, with unfiltered
706 706 output.
707 707
708 708 timeout controls the maximum run time of the test. It is ignored when
709 709 debug is True. See slowtimeout for tests with #require slow.
710 710
711 711 slowtimeout overrides timeout if the test has #require slow.
712 712
713 713 startport controls the starting port number to use for this test. Each
714 714 test will reserve 3 port numbers for execution. It is the caller's
715 715 responsibility to allocate a non-overlapping port range to Test
716 716 instances.
717 717
718 718 extraconfigopts is an iterable of extra hgrc config options. Values
719 719 must have the form "key=value" (something understood by hgrc). Values
720 720 of the form "foo.key=value" will result in "[foo] key=value".
721 721
722 722 py3warnings enables Py3k warnings.
723 723
724 724 shell is the shell to execute tests in.
725 725 """
726 726 if timeout is None:
727 727 timeout = defaults['timeout']
728 728 if startport is None:
729 729 startport = defaults['port']
730 730 if slowtimeout is None:
731 731 slowtimeout = defaults['slowtimeout']
732 732 self.path = path
733 733 self.bname = os.path.basename(path)
734 734 self.name = _strpath(self.bname)
735 735 self._testdir = os.path.dirname(path)
736 736 self._outputdir = outputdir
737 737 self._tmpname = os.path.basename(path)
738 738 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
739 739
740 740 self._threadtmp = tmpdir
741 741 self._keeptmpdir = keeptmpdir
742 742 self._debug = debug
743 743 self._first = first
744 744 self._timeout = timeout
745 745 self._slowtimeout = slowtimeout
746 746 self._startport = startport
747 747 self._extraconfigopts = extraconfigopts or []
748 748 self._py3warnings = py3warnings
749 749 self._shell = _bytespath(shell)
750 750 self._hgcommand = hgcommand or b'hg'
751 751 self._usechg = usechg
752 752 self._useipv6 = useipv6
753 753
754 754 self._aborted = False
755 755 self._daemonpids = []
756 756 self._finished = None
757 757 self._ret = None
758 758 self._out = None
759 759 self._skipped = None
760 760 self._testtmp = None
761 761 self._chgsockdir = None
762 762
763 763 self._refout = self.readrefout()
764 764
765 765 def readrefout(self):
766 766 """read reference output"""
767 767 # If we're not in --debug mode and reference output file exists,
768 768 # check test output against it.
769 769 if self._debug:
770 770 return None # to match "out is None"
771 771 elif os.path.exists(self.refpath):
772 772 with open(self.refpath, 'rb') as f:
773 773 return f.read().splitlines(True)
774 774 else:
775 775 return []
776 776
777 777 # needed to get base class __repr__ running
778 778 @property
779 779 def _testMethodName(self):
780 780 return self.name
781 781
782 782 def __str__(self):
783 783 return self.name
784 784
785 785 def shortDescription(self):
786 786 return self.name
787 787
788 788 def setUp(self):
789 789 """Tasks to perform before run()."""
790 790 self._finished = False
791 791 self._ret = None
792 792 self._out = None
793 793 self._skipped = None
794 794
795 795 try:
796 796 os.mkdir(self._threadtmp)
797 797 except OSError as e:
798 798 if e.errno != errno.EEXIST:
799 799 raise
800 800
801 801 name = self._tmpname
802 802 self._testtmp = os.path.join(self._threadtmp, name)
803 803 os.mkdir(self._testtmp)
804 804
805 805 # Remove any previous output files.
806 806 if os.path.exists(self.errpath):
807 807 try:
808 808 os.remove(self.errpath)
809 809 except OSError as e:
810 810 # We might have raced another test to clean up a .err
811 811 # file, so ignore ENOENT when removing a previous .err
812 812 # file.
813 813 if e.errno != errno.ENOENT:
814 814 raise
815 815
816 816 if self._usechg:
817 817 self._chgsockdir = os.path.join(self._threadtmp,
818 818 b'%s.chgsock' % name)
819 819 os.mkdir(self._chgsockdir)
820 820
821 821 def run(self, result):
822 822 """Run this test and report results against a TestResult instance."""
823 823 # This function is extremely similar to unittest.TestCase.run(). Once
824 824 # we require Python 2.7 (or at least its version of unittest), this
825 825 # function can largely go away.
826 826 self._result = result
827 827 result.startTest(self)
828 828 try:
829 829 try:
830 830 self.setUp()
831 831 except (KeyboardInterrupt, SystemExit):
832 832 self._aborted = True
833 833 raise
834 834 except Exception:
835 835 result.addError(self, sys.exc_info())
836 836 return
837 837
838 838 success = False
839 839 try:
840 840 self.runTest()
841 841 except KeyboardInterrupt:
842 842 self._aborted = True
843 843 raise
844 844 except unittest.SkipTest as e:
845 845 result.addSkip(self, str(e))
846 846 # The base class will have already counted this as a
847 847 # test we "ran", but we want to exclude skipped tests
848 848 # from those we count towards those run.
849 849 result.testsRun -= 1
850 850 except self.failureException as e:
851 851 # This differs from unittest in that we don't capture
852 852 # the stack trace. This is for historical reasons and
853 853 # this decision could be revisited in the future,
854 854 # especially for PythonTest instances.
855 855 if result.addFailure(self, str(e)):
856 856 success = True
857 857 except Exception:
858 858 result.addError(self, sys.exc_info())
859 859 else:
860 860 success = True
861 861
862 862 try:
863 863 self.tearDown()
864 864 except (KeyboardInterrupt, SystemExit):
865 865 self._aborted = True
866 866 raise
867 867 except Exception:
868 868 result.addError(self, sys.exc_info())
869 869 success = False
870 870
871 871 if success:
872 872 result.addSuccess(self)
873 873 finally:
874 874 result.stopTest(self, interrupted=self._aborted)
875 875
876 876 def runTest(self):
877 877 """Run this test instance.
878 878
879 879 This will return a tuple describing the result of the test.
880 880 """
881 881 env = self._getenv()
882 882 self._genrestoreenv(env)
883 883 self._daemonpids.append(env['DAEMON_PIDS'])
884 884 self._createhgrc(env['HGRCPATH'])
885 885
886 886 vlog('# Test', self.name)
887 887
888 888 ret, out = self._run(env)
889 889 self._finished = True
890 890 self._ret = ret
891 891 self._out = out
892 892
893 893 def describe(ret):
894 894 if ret < 0:
895 895 return 'killed by signal: %d' % -ret
896 896 return 'returned error code %d' % ret
897 897
898 898 self._skipped = False
899 899
900 900 if ret == self.SKIPPED_STATUS:
901 901 if out is None: # Debug mode, nothing to parse.
902 902 missing = ['unknown']
903 903 failed = None
904 904 else:
905 905 missing, failed = TTest.parsehghaveoutput(out)
906 906
907 907 if not missing:
908 908 missing = ['skipped']
909 909
910 910 if failed:
911 911 self.fail('hg have failed checking for %s' % failed[-1])
912 912 else:
913 913 self._skipped = True
914 914 raise unittest.SkipTest(missing[-1])
915 915 elif ret == 'timeout':
916 916 self.fail('timed out')
917 917 elif ret is False:
918 918 self.fail('no result code from test')
919 919 elif out != self._refout:
920 920 # Diff generation may rely on written .err file.
921 921 if (ret != 0 or out != self._refout) and not self._skipped \
922 922 and not self._debug:
923 923 with open(self.errpath, 'wb') as f:
924 924 for line in out:
925 925 f.write(line)
926 926
927 927 # The result object handles diff calculation for us.
928 928 with firstlock:
929 929 if self._result.addOutputMismatch(self, ret, out, self._refout):
930 930 # change was accepted, skip failing
931 931 return
932 932 if self._first:
933 933 global firsterror
934 934 firsterror = True
935 935
936 936 if ret:
937 937 msg = 'output changed and ' + describe(ret)
938 938 else:
939 939 msg = 'output changed'
940 940
941 941 self.fail(msg)
942 942 elif ret:
943 943 self.fail(describe(ret))
944 944
945 945 def tearDown(self):
946 946 """Tasks to perform after run()."""
947 947 for entry in self._daemonpids:
948 948 killdaemons(entry)
949 949 self._daemonpids = []
950 950
951 951 if self._keeptmpdir:
952 952 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
953 953 (self._testtmp.decode('utf-8'),
954 954 self._threadtmp.decode('utf-8')))
955 955 else:
956 956 shutil.rmtree(self._testtmp, True)
957 957 shutil.rmtree(self._threadtmp, True)
958 958
959 959 if self._usechg:
960 960 # chgservers will stop automatically after they find the socket
961 961 # files are deleted
962 962 shutil.rmtree(self._chgsockdir, True)
963 963
964 964 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
965 965 and not self._debug and self._out:
966 966 with open(self.errpath, 'wb') as f:
967 967 for line in self._out:
968 968 f.write(line)
969 969
970 970 vlog("# Ret was:", self._ret, '(%s)' % self.name)
971 971
972 972 def _run(self, env):
973 973 # This should be implemented in child classes to run tests.
974 974 raise unittest.SkipTest('unknown test type')
975 975
976 976 def abort(self):
977 977 """Terminate execution of this test."""
978 978 self._aborted = True
979 979
980 980 def _portmap(self, i):
981 981 offset = b'' if i == 0 else b'%d' % i
982 982 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
983 983
984 984 def _getreplacements(self):
985 985 """Obtain a mapping of text replacements to apply to test output.
986 986
987 987 Test output needs to be normalized so it can be compared to expected
988 988 output. This function defines how some of that normalization will
989 989 occur.
990 990 """
991 991 r = [
992 992 # This list should be parallel to defineport in _getenv
993 993 self._portmap(0),
994 994 self._portmap(1),
995 995 self._portmap(2),
996 996 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
997 997 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
998 998 ]
999 999 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1000 1000
1001 1001 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1002 1002
1003 1003 if os.path.exists(replacementfile):
1004 1004 data = {}
1005 1005 with open(replacementfile, mode='rb') as source:
1006 1006 # the intermediate 'compile' step help with debugging
1007 1007 code = compile(source.read(), replacementfile, 'exec')
1008 1008 exec(code, data)
1009 1009 for value in data.get('substitutions', ()):
1010 1010 if len(value) != 2:
1011 1011 msg = 'malformatted substitution in %s: %r'
1012 1012 msg %= (replacementfile, value)
1013 1013 raise ValueError(msg)
1014 1014 r.append(value)
1015 1015 return r
1016 1016
1017 1017 def _escapepath(self, p):
1018 1018 if os.name == 'nt':
1019 1019 return (
1020 1020 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1021 1021 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1022 1022 for c in [p[i:i + 1] for i in range(len(p))]))
1023 1023 )
1024 1024 else:
1025 1025 return re.escape(p)
1026 1026
1027 1027 def _localip(self):
1028 1028 if self._useipv6:
1029 1029 return b'::1'
1030 1030 else:
1031 1031 return b'127.0.0.1'
1032 1032
1033 1033 def _genrestoreenv(self, testenv):
1034 1034 """Generate a script that can be used by tests to restore the original
1035 1035 environment."""
1036 1036 # Put the restoreenv script inside self._threadtmp
1037 1037 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1038 1038 testenv['HGTEST_RESTOREENV'] = _strpath(scriptpath)
1039 1039
1040 1040 # Only restore environment variable names that the shell allows
1041 1041 # us to export.
1042 1042 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1043 1043
1044 1044 # Do not restore these variables; otherwise tests would fail.
1045 1045 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1046 1046
1047 1047 with open(scriptpath, 'w') as envf:
1048 1048 for name, value in origenviron.items():
1049 1049 if not name_regex.match(name):
1050 1050 # Skip environment variables with unusual names not
1051 1051 # allowed by most shells.
1052 1052 continue
1053 1053 if name in reqnames:
1054 1054 continue
1055 1055 envf.write('%s=%s\n' % (name, shellquote(value)))
1056 1056
1057 1057 for name in testenv:
1058 1058 if name in origenviron or name in reqnames:
1059 1059 continue
1060 1060 envf.write('unset %s\n' % (name,))
1061 1061
1062 1062 def _getenv(self):
1063 1063 """Obtain environment variables to use during test execution."""
1064 1064 def defineport(i):
1065 1065 offset = '' if i == 0 else '%s' % i
1066 1066 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1067 1067 env = os.environ.copy()
1068 1068 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1069 1069 env['HGEMITWARNINGS'] = '1'
1070 1070 env['TESTTMP'] = _strpath(self._testtmp)
1071 1071 env['TESTNAME'] = self.name
1072 1072 env['HOME'] = _strpath(self._testtmp)
1073 1073 # This number should match portneeded in _getport
1074 1074 for port in xrange(3):
1075 1075 # This list should be parallel to _portmap in _getreplacements
1076 1076 defineport(port)
1077 1077 env["HGRCPATH"] = _strpath(os.path.join(self._threadtmp, b'.hgrc'))
1078 1078 env["DAEMON_PIDS"] = _strpath(os.path.join(self._threadtmp,
1079 1079 b'daemon.pids'))
1080 1080 env["HGEDITOR"] = ('"' + sys.executable + '"'
1081 1081 + ' -c "import sys; sys.exit(0)"')
1082 1082 env["HGUSER"] = "test"
1083 1083 env["HGENCODING"] = "ascii"
1084 1084 env["HGENCODINGMODE"] = "strict"
1085 1085 env["HGHOSTNAME"] = "test-hostname"
1086 1086 env['HGIPV6'] = str(int(self._useipv6))
1087 1087 # See contrib/catapipe.py for how to use this functionality.
1088 1088 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1089 1089 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1090 1090 # non-test one in as a default, otherwise set to devnull
1091 1091 env['HGTESTCATAPULTSERVERPIPE'] = \
1092 1092 env.get('HGCATAPULTSERVERPIPE', os.devnull)
1093 1093
1094 1094 extraextensions = []
1095 1095 for opt in self._extraconfigopts:
1096 1096 section, key = opt.encode('utf-8').split(b'.', 1)
1097 1097 if section != 'extensions':
1098 1098 continue
1099 1099 name = key.split(b'=', 1)[0]
1100 1100 extraextensions.append(name)
1101 1101
1102 1102 if extraextensions:
1103 1103 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1104 1104
1105 1105 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1106 1106 # IP addresses.
1107 1107 env['LOCALIP'] = _strpath(self._localip())
1108 1108
1109 1109 # Reset some environment variables to well-known values so that
1110 1110 # the tests produce repeatable output.
1111 1111 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1112 1112 env['TZ'] = 'GMT'
1113 1113 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1114 1114 env['COLUMNS'] = '80'
1115 1115 env['TERM'] = 'xterm'
1116 1116
1117 1117 dropped = [
1118 1118 'CDPATH',
1119 1119 'CHGDEBUG',
1120 1120 'EDITOR',
1121 1121 'GREP_OPTIONS',
1122 1122 'HG',
1123 1123 'HGMERGE',
1124 1124 'HGPLAIN',
1125 1125 'HGPLAINEXCEPT',
1126 1126 'HGPROF',
1127 1127 'http_proxy',
1128 1128 'no_proxy',
1129 1129 'NO_PROXY',
1130 1130 'PAGER',
1131 1131 'VISUAL',
1132 1132 ]
1133 1133
1134 1134 for k in dropped:
1135 1135 if k in env:
1136 1136 del env[k]
1137 1137
1138 1138 # unset env related to hooks
1139 1139 for k in list(env):
1140 1140 if k.startswith('HG_'):
1141 1141 del env[k]
1142 1142
1143 1143 if self._usechg:
1144 1144 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1145 1145
1146 1146 return env
1147 1147
1148 1148 def _createhgrc(self, path):
1149 1149 """Create an hgrc file for this test."""
1150 1150 with open(path, 'wb') as hgrc:
1151 1151 hgrc.write(b'[ui]\n')
1152 1152 hgrc.write(b'slash = True\n')
1153 1153 hgrc.write(b'interactive = False\n')
1154 1154 hgrc.write(b'merge = internal:merge\n')
1155 1155 hgrc.write(b'mergemarkers = detailed\n')
1156 1156 hgrc.write(b'promptecho = True\n')
1157 1157 hgrc.write(b'[defaults]\n')
1158 1158 hgrc.write(b'[devel]\n')
1159 1159 hgrc.write(b'all-warnings = true\n')
1160 1160 hgrc.write(b'default-date = 0 0\n')
1161 1161 hgrc.write(b'[largefiles]\n')
1162 1162 hgrc.write(b'usercache = %s\n' %
1163 1163 (os.path.join(self._testtmp, b'.cache/largefiles')))
1164 1164 hgrc.write(b'[lfs]\n')
1165 1165 hgrc.write(b'usercache = %s\n' %
1166 1166 (os.path.join(self._testtmp, b'.cache/lfs')))
1167 1167 hgrc.write(b'[web]\n')
1168 1168 hgrc.write(b'address = localhost\n')
1169 1169 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1170 1170 hgrc.write(b'server-header = testing stub value\n')
1171 1171
1172 1172 for opt in self._extraconfigopts:
1173 1173 section, key = opt.encode('utf-8').split(b'.', 1)
1174 1174 assert b'=' in key, ('extra config opt %s must '
1175 1175 'have an = for assignment' % opt)
1176 1176 hgrc.write(b'[%s]\n%s\n' % (section, key))
1177 1177
1178 1178 def fail(self, msg):
1179 1179 # unittest differentiates between errored and failed.
1180 1180 # Failed is denoted by AssertionError (by default at least).
1181 1181 raise AssertionError(msg)
1182 1182
1183 1183 def _runcommand(self, cmd, env, normalizenewlines=False):
1184 1184 """Run command in a sub-process, capturing the output (stdout and
1185 1185 stderr).
1186 1186
1187 1187 Return a tuple (exitcode, output). output is None in debug mode.
1188 1188 """
1189 1189 if self._debug:
1190 1190 proc = subprocess.Popen(_strpath(cmd), shell=True,
1191 1191 cwd=_strpath(self._testtmp),
1192 1192 env=env)
1193 1193 ret = proc.wait()
1194 1194 return (ret, None)
1195 1195
1196 1196 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1197 1197 def cleanup():
1198 1198 terminate(proc)
1199 1199 ret = proc.wait()
1200 1200 if ret == 0:
1201 1201 ret = signal.SIGTERM << 8
1202 1202 killdaemons(env['DAEMON_PIDS'])
1203 1203 return ret
1204 1204
1205 1205 output = b''
1206 1206 proc.tochild.close()
1207 1207
1208 1208 try:
1209 1209 output = proc.fromchild.read()
1210 1210 except KeyboardInterrupt:
1211 1211 vlog('# Handling keyboard interrupt')
1212 1212 cleanup()
1213 1213 raise
1214 1214
1215 1215 ret = proc.wait()
1216 1216 if wifexited(ret):
1217 1217 ret = os.WEXITSTATUS(ret)
1218 1218
1219 1219 if proc.timeout:
1220 1220 ret = 'timeout'
1221 1221
1222 1222 if ret:
1223 1223 killdaemons(env['DAEMON_PIDS'])
1224 1224
1225 1225 for s, r in self._getreplacements():
1226 1226 output = re.sub(s, r, output)
1227 1227
1228 1228 if normalizenewlines:
1229 1229 output = output.replace(b'\r\n', b'\n')
1230 1230
1231 1231 return ret, output.splitlines(True)
1232 1232
1233 1233 class PythonTest(Test):
1234 1234 """A Python-based test."""
1235 1235
1236 1236 @property
1237 1237 def refpath(self):
1238 1238 return os.path.join(self._testdir, b'%s.out' % self.bname)
1239 1239
1240 1240 def _run(self, env):
1241 1241 py3switch = self._py3warnings and b' -3' or b''
1242 1242 # Quote the python(3) executable for Windows
1243 1243 cmd = b'"%s"%s "%s"' % (PYTHON, py3switch, self.path)
1244 1244 vlog("# Running", cmd)
1245 1245 normalizenewlines = os.name == 'nt'
1246 1246 result = self._runcommand(cmd, env,
1247 1247 normalizenewlines=normalizenewlines)
1248 1248 if self._aborted:
1249 1249 raise KeyboardInterrupt()
1250 1250
1251 1251 return result
1252 1252
1253 1253 # Some glob patterns apply only in some circumstances, so the script
1254 1254 # might want to remove (glob) annotations that otherwise should be
1255 1255 # retained.
1256 1256 checkcodeglobpats = [
1257 1257 # On Windows it looks like \ doesn't require a (glob), but we know
1258 1258 # better.
1259 1259 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1260 1260 re.compile(br'^moving \S+/.*[^)]$'),
1261 1261 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1262 1262 # Not all platforms have 127.0.0.1 as loopback (though most do),
1263 1263 # so we always glob that too.
1264 1264 re.compile(br'.*\$LOCALIP.*$'),
1265 1265 ]
1266 1266
1267 1267 bchr = chr
1268 1268 if PYTHON3:
1269 1269 bchr = lambda x: bytes([x])
1270 1270
1271 1271 class TTest(Test):
1272 1272 """A "t test" is a test backed by a .t file."""
1273 1273
1274 1274 SKIPPED_PREFIX = b'skipped: '
1275 1275 FAILED_PREFIX = b'hghave check failed: '
1276 1276 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1277 1277
1278 1278 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1279 1279 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1280 1280 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1281 1281
1282 1282 def __init__(self, path, *args, **kwds):
1283 1283 # accept an extra "case" parameter
1284 1284 case = kwds.pop('case', [])
1285 1285 self._case = case
1286 1286 self._allcases = {x for y in parsettestcases(path) for x in y}
1287 1287 super(TTest, self).__init__(path, *args, **kwds)
1288 1288 if case:
1289 1289 casepath = b'#'.join(case)
1290 1290 self.name = '%s#%s' % (self.name, _strpath(casepath))
1291 1291 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1292 1292 self._tmpname += b'-%s' % casepath
1293 1293 self._have = {}
1294 1294
1295 1295 @property
1296 1296 def refpath(self):
1297 1297 return os.path.join(self._testdir, self.bname)
1298 1298
1299 1299 def _run(self, env):
1300 1300 with open(self.path, 'rb') as f:
1301 1301 lines = f.readlines()
1302 1302
1303 1303 # .t file is both reference output and the test input, keep reference
1304 1304 # output updated with the the test input. This avoids some race
1305 1305 # conditions where the reference output does not match the actual test.
1306 1306 if self._refout is not None:
1307 1307 self._refout = lines
1308 1308
1309 1309 salt, script, after, expected = self._parsetest(lines)
1310 1310
1311 1311 # Write out the generated script.
1312 1312 fname = b'%s.sh' % self._testtmp
1313 1313 with open(fname, 'wb') as f:
1314 1314 for l in script:
1315 1315 f.write(l)
1316 1316
1317 1317 cmd = b'%s "%s"' % (self._shell, fname)
1318 1318 vlog("# Running", cmd)
1319 1319
1320 1320 exitcode, output = self._runcommand(cmd, env)
1321 1321
1322 1322 if self._aborted:
1323 1323 raise KeyboardInterrupt()
1324 1324
1325 1325 # Do not merge output if skipped. Return hghave message instead.
1326 1326 # Similarly, with --debug, output is None.
1327 1327 if exitcode == self.SKIPPED_STATUS or output is None:
1328 1328 return exitcode, output
1329 1329
1330 1330 return self._processoutput(exitcode, output, salt, after, expected)
1331 1331
1332 1332 def _hghave(self, reqs):
1333 1333 allreqs = b' '.join(reqs)
1334 1334 if allreqs in self._have:
1335 1335 return self._have.get(allreqs)
1336 1336
1337 1337 # TODO do something smarter when all other uses of hghave are gone.
1338 1338 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1339 1339 tdir = runtestdir.replace(b'\\', b'/')
1340 1340 proc = Popen4(b'%s -c "%s/hghave %s"' %
1341 1341 (self._shell, tdir, allreqs),
1342 1342 self._testtmp, 0, self._getenv())
1343 1343 stdout, stderr = proc.communicate()
1344 1344 ret = proc.wait()
1345 1345 if wifexited(ret):
1346 1346 ret = os.WEXITSTATUS(ret)
1347 1347 if ret == 2:
1348 1348 print(stdout.decode('utf-8'))
1349 1349 sys.exit(1)
1350 1350
1351 1351 if ret != 0:
1352 1352 self._have[allreqs] = (False, stdout)
1353 1353 return False, stdout
1354 1354
1355 1355 if b'slow' in reqs:
1356 1356 self._timeout = self._slowtimeout
1357 1357
1358 1358 self._have[allreqs] = (True, None)
1359 1359 return True, None
1360 1360
1361 1361 def _iftest(self, args):
1362 1362 # implements "#if"
1363 1363 reqs = []
1364 1364 for arg in args:
1365 1365 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1366 1366 if arg[3:] in self._case:
1367 1367 return False
1368 1368 elif arg in self._allcases:
1369 1369 if arg not in self._case:
1370 1370 return False
1371 1371 else:
1372 1372 reqs.append(arg)
1373 1373 return self._hghave(reqs)[0]
1374 1374
1375 1375 def _parsetest(self, lines):
1376 1376 # We generate a shell script which outputs unique markers to line
1377 1377 # up script results with our source. These markers include input
1378 1378 # line number and the last return code.
1379 1379 salt = b"SALT%d" % time.time()
1380 1380 def addsalt(line, inpython):
1381 1381 if inpython:
1382 1382 script.append(b'%s %d 0\n' % (salt, line))
1383 1383 else:
1384 1384 script.append(b'echo %s %d $?\n' % (salt, line))
1385 1385 activetrace = []
1386 1386 session = str(uuid.uuid4())
1387 1387 if PYTHON3:
1388 1388 session = session.encode('ascii')
1389 1389 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE')
1390 1390 def toggletrace(cmd=None):
1391 1391 if not hgcatapult or hgcatapult == os.devnull:
1392 1392 return
1393 1393
1394 1394 if activetrace:
1395 1395 script.append(
1396 1396 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (
1397 1397 session, activetrace[0]))
1398 1398 if cmd is None:
1399 1399 return
1400 1400
1401 1401 if isinstance(cmd, str):
1402 1402 quoted = shellquote(cmd.strip())
1403 1403 else:
1404 1404 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1405 1405 quoted = quoted.replace(b'\\', b'\\\\')
1406 1406 script.append(
1407 1407 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (
1408 1408 session, quoted))
1409 1409 activetrace[0:] = [quoted]
1410 1410
1411 1411 script = []
1412 1412
1413 1413 # After we run the shell script, we re-unify the script output
1414 1414 # with non-active parts of the source, with synchronization by our
1415 1415 # SALT line number markers. The after table contains the non-active
1416 1416 # components, ordered by line number.
1417 1417 after = {}
1418 1418
1419 1419 # Expected shell script output.
1420 1420 expected = {}
1421 1421
1422 1422 pos = prepos = -1
1423 1423
1424 1424 # True or False when in a true or false conditional section
1425 1425 skipping = None
1426 1426
1427 1427 # We keep track of whether or not we're in a Python block so we
1428 1428 # can generate the surrounding doctest magic.
1429 1429 inpython = False
1430 1430
1431 1431 if self._debug:
1432 1432 script.append(b'set -x\n')
1433 1433 if self._hgcommand != b'hg':
1434 1434 script.append(b'alias hg="%s"\n' % self._hgcommand)
1435 1435 if os.getenv('MSYSTEM'):
1436 1436 script.append(b'alias pwd="pwd -W"\n')
1437 1437
1438 1438 if hgcatapult and hgcatapult != os.devnull:
1439 1439 # Kludge: use a while loop to keep the pipe from getting
1440 1440 # closed by our echo commands. The still-running file gets
1441 1441 # reaped at the end of the script, which causes the while
1442 1442 # loop to exit and closes the pipe. Sigh.
1443 1443 script.append(
1444 1444 b'rtendtracing() {\n'
1445 1445 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1446 1446 b' rm -f "$TESTTMP/.still-running"\n'
1447 1447 b'}\n'
1448 1448 b'trap "rtendtracing" 0\n'
1449 1449 b'touch "$TESTTMP/.still-running"\n'
1450 1450 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1451 1451 b'> %(catapult)s &\n'
1452 1452 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1453 1453 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1454 1454 % {
1455 1455 'name': self.name,
1456 1456 'session': session,
1457 1457 'catapult': hgcatapult,
1458 1458 }
1459 1459 )
1460 1460
1461 1461 if self._case:
1462 1462 casestr = b'#'.join(self._case)
1463 1463 if isinstance(self._case, str):
1464 1464 quoted = shellquote(casestr)
1465 1465 else:
1466 1466 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1467 1467 script.append(b'TESTCASE=%s\n' % quoted)
1468 1468 script.append(b'export TESTCASE\n')
1469 1469
1470 1470 n = 0
1471 1471 for n, l in enumerate(lines):
1472 1472 if not l.endswith(b'\n'):
1473 1473 l += b'\n'
1474 1474 if l.startswith(b'#require'):
1475 1475 lsplit = l.split()
1476 1476 if len(lsplit) < 2 or lsplit[0] != b'#require':
1477 1477 after.setdefault(pos, []).append(' !!! invalid #require\n')
1478 1478 if not skipping:
1479 1479 haveresult, message = self._hghave(lsplit[1:])
1480 1480 if not haveresult:
1481 1481 script = [b'echo "%s"\nexit 80\n' % message]
1482 1482 break
1483 1483 after.setdefault(pos, []).append(l)
1484 1484 elif l.startswith(b'#if'):
1485 1485 lsplit = l.split()
1486 1486 if len(lsplit) < 2 or lsplit[0] != b'#if':
1487 1487 after.setdefault(pos, []).append(' !!! invalid #if\n')
1488 1488 if skipping is not None:
1489 1489 after.setdefault(pos, []).append(' !!! nested #if\n')
1490 1490 skipping = not self._iftest(lsplit[1:])
1491 1491 after.setdefault(pos, []).append(l)
1492 1492 elif l.startswith(b'#else'):
1493 1493 if skipping is None:
1494 1494 after.setdefault(pos, []).append(' !!! missing #if\n')
1495 1495 skipping = not skipping
1496 1496 after.setdefault(pos, []).append(l)
1497 1497 elif l.startswith(b'#endif'):
1498 1498 if skipping is None:
1499 1499 after.setdefault(pos, []).append(' !!! missing #if\n')
1500 1500 skipping = None
1501 1501 after.setdefault(pos, []).append(l)
1502 1502 elif skipping:
1503 1503 after.setdefault(pos, []).append(l)
1504 1504 elif l.startswith(b' >>> '): # python inlines
1505 1505 after.setdefault(pos, []).append(l)
1506 1506 prepos = pos
1507 1507 pos = n
1508 1508 if not inpython:
1509 1509 # We've just entered a Python block. Add the header.
1510 1510 inpython = True
1511 1511 addsalt(prepos, False) # Make sure we report the exit code.
1512 1512 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1513 1513 addsalt(n, True)
1514 1514 script.append(l[2:])
1515 1515 elif l.startswith(b' ... '): # python inlines
1516 1516 after.setdefault(prepos, []).append(l)
1517 1517 script.append(l[2:])
1518 1518 elif l.startswith(b' $ '): # commands
1519 1519 if inpython:
1520 1520 script.append(b'EOF\n')
1521 1521 inpython = False
1522 1522 after.setdefault(pos, []).append(l)
1523 1523 prepos = pos
1524 1524 pos = n
1525 1525 addsalt(n, False)
1526 1526 rawcmd = l[4:]
1527 1527 cmd = rawcmd.split()
1528 1528 toggletrace(rawcmd)
1529 1529 if len(cmd) == 2 and cmd[0] == b'cd':
1530 1530 l = b' $ cd %s || exit 1\n' % cmd[1]
1531 1531 script.append(rawcmd)
1532 1532 elif l.startswith(b' > '): # continuations
1533 1533 after.setdefault(prepos, []).append(l)
1534 1534 script.append(l[4:])
1535 1535 elif l.startswith(b' '): # results
1536 1536 # Queue up a list of expected results.
1537 1537 expected.setdefault(pos, []).append(l[2:])
1538 1538 else:
1539 1539 if inpython:
1540 1540 script.append(b'EOF\n')
1541 1541 inpython = False
1542 1542 # Non-command/result. Queue up for merged output.
1543 1543 after.setdefault(pos, []).append(l)
1544 1544
1545 1545 if inpython:
1546 1546 script.append(b'EOF\n')
1547 1547 if skipping is not None:
1548 1548 after.setdefault(pos, []).append(' !!! missing #endif\n')
1549 1549 addsalt(n + 1, False)
1550 1550 # Need to end any current per-command trace
1551 1551 if activetrace:
1552 1552 toggletrace()
1553 1553 return salt, script, after, expected
1554 1554
1555 1555 def _processoutput(self, exitcode, output, salt, after, expected):
1556 1556 # Merge the script output back into a unified test.
1557 1557 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1558 1558 if exitcode != 0:
1559 1559 warnonly = 3
1560 1560
1561 1561 pos = -1
1562 1562 postout = []
1563 1563 for l in output:
1564 1564 lout, lcmd = l, None
1565 1565 if salt in l:
1566 1566 lout, lcmd = l.split(salt, 1)
1567 1567
1568 1568 while lout:
1569 1569 if not lout.endswith(b'\n'):
1570 1570 lout += b' (no-eol)\n'
1571 1571
1572 1572 # Find the expected output at the current position.
1573 1573 els = [None]
1574 1574 if expected.get(pos, None):
1575 1575 els = expected[pos]
1576 1576
1577 1577 optional = []
1578 1578 for i, el in enumerate(els):
1579 1579 r = False
1580 1580 if el:
1581 1581 r, exact = self.linematch(el, lout)
1582 1582 if isinstance(r, str):
1583 1583 if r == '-glob':
1584 1584 lout = ''.join(el.rsplit(' (glob)', 1))
1585 1585 r = '' # Warn only this line.
1586 1586 elif r == "retry":
1587 1587 postout.append(b' ' + el)
1588 1588 else:
1589 1589 log('\ninfo, unknown linematch result: %r\n' % r)
1590 1590 r = False
1591 1591 if r:
1592 1592 els.pop(i)
1593 1593 break
1594 1594 if el:
1595 1595 if el.endswith(b" (?)\n"):
1596 1596 optional.append(i)
1597 1597 else:
1598 1598 m = optline.match(el)
1599 1599 if m:
1600 1600 conditions = [
1601 1601 c for c in m.group(2).split(b' ')]
1602 1602
1603 1603 if not self._iftest(conditions):
1604 1604 optional.append(i)
1605 1605 if exact:
1606 1606 # Don't allow line to be matches against a later
1607 1607 # line in the output
1608 1608 els.pop(i)
1609 1609 break
1610 1610
1611 1611 if r:
1612 1612 if r == "retry":
1613 1613 continue
1614 1614 # clean up any optional leftovers
1615 1615 for i in optional:
1616 1616 postout.append(b' ' + els[i])
1617 1617 for i in reversed(optional):
1618 1618 del els[i]
1619 1619 postout.append(b' ' + el)
1620 1620 else:
1621 1621 if self.NEEDESCAPE(lout):
1622 1622 lout = TTest._stringescape(b'%s (esc)\n' %
1623 1623 lout.rstrip(b'\n'))
1624 1624 postout.append(b' ' + lout) # Let diff deal with it.
1625 1625 if r != '': # If line failed.
1626 1626 warnonly = 3 # for sure not
1627 1627 elif warnonly == 1: # Is "not yet" and line is warn only.
1628 1628 warnonly = 2 # Yes do warn.
1629 1629 break
1630 1630 else:
1631 1631 # clean up any optional leftovers
1632 1632 while expected.get(pos, None):
1633 1633 el = expected[pos].pop(0)
1634 1634 if el:
1635 1635 if not el.endswith(b" (?)\n"):
1636 1636 m = optline.match(el)
1637 1637 if m:
1638 1638 conditions = [c for c in m.group(2).split(b' ')]
1639 1639
1640 1640 if self._iftest(conditions):
1641 1641 # Don't append as optional line
1642 1642 continue
1643 1643 else:
1644 1644 continue
1645 1645 postout.append(b' ' + el)
1646 1646
1647 1647 if lcmd:
1648 1648 # Add on last return code.
1649 1649 ret = int(lcmd.split()[1])
1650 1650 if ret != 0:
1651 1651 postout.append(b' [%d]\n' % ret)
1652 1652 if pos in after:
1653 1653 # Merge in non-active test bits.
1654 1654 postout += after.pop(pos)
1655 1655 pos = int(lcmd.split()[0])
1656 1656
1657 1657 if pos in after:
1658 1658 postout += after.pop(pos)
1659 1659
1660 1660 if warnonly == 2:
1661 1661 exitcode = False # Set exitcode to warned.
1662 1662
1663 1663 return exitcode, postout
1664 1664
1665 1665 @staticmethod
1666 1666 def rematch(el, l):
1667 1667 try:
1668 1668 el = b'(?:' + el + b')'
1669 1669 # use \Z to ensure that the regex matches to the end of the string
1670 1670 if os.name == 'nt':
1671 1671 return re.match(el + br'\r?\n\Z', l)
1672 1672 return re.match(el + br'\n\Z', l)
1673 1673 except re.error:
1674 1674 # el is an invalid regex
1675 1675 return False
1676 1676
1677 1677 @staticmethod
1678 1678 def globmatch(el, l):
1679 1679 # The only supported special characters are * and ? plus / which also
1680 1680 # matches \ on windows. Escaping of these characters is supported.
1681 1681 if el + b'\n' == l:
1682 1682 if os.altsep:
1683 1683 # matching on "/" is not needed for this line
1684 1684 for pat in checkcodeglobpats:
1685 1685 if pat.match(el):
1686 1686 return True
1687 1687 return b'-glob'
1688 1688 return True
1689 1689 el = el.replace(b'$LOCALIP', b'*')
1690 1690 i, n = 0, len(el)
1691 1691 res = b''
1692 1692 while i < n:
1693 1693 c = el[i:i + 1]
1694 1694 i += 1
1695 1695 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1696 1696 res += el[i - 1:i + 1]
1697 1697 i += 1
1698 1698 elif c == b'*':
1699 1699 res += b'.*'
1700 1700 elif c == b'?':
1701 1701 res += b'.'
1702 1702 elif c == b'/' and os.altsep:
1703 1703 res += b'[/\\\\]'
1704 1704 else:
1705 1705 res += re.escape(c)
1706 1706 return TTest.rematch(res, l)
1707 1707
1708 1708 def linematch(self, el, l):
1709 1709 if el == l: # perfect match (fast)
1710 1710 return True, True
1711 1711 retry = False
1712 1712 if el.endswith(b" (?)\n"):
1713 1713 retry = "retry"
1714 1714 el = el[:-5] + b"\n"
1715 1715 else:
1716 1716 m = optline.match(el)
1717 1717 if m:
1718 1718 conditions = [c for c in m.group(2).split(b' ')]
1719 1719
1720 1720 el = m.group(1) + b"\n"
1721 1721 if not self._iftest(conditions):
1722 1722 retry = "retry" # Not required by listed features
1723 1723
1724 1724 if el.endswith(b" (esc)\n"):
1725 1725 if PYTHON3:
1726 1726 el = el[:-7].decode('unicode_escape') + '\n'
1727 1727 el = el.encode('utf-8')
1728 1728 else:
1729 1729 el = el[:-7].decode('string-escape') + '\n'
1730 1730 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1731 1731 return True, True
1732 1732 if el.endswith(b" (re)\n"):
1733 1733 return (TTest.rematch(el[:-6], l) or retry), False
1734 1734 if el.endswith(b" (glob)\n"):
1735 1735 # ignore '(glob)' added to l by 'replacements'
1736 1736 if l.endswith(b" (glob)\n"):
1737 1737 l = l[:-8] + b"\n"
1738 1738 return (TTest.globmatch(el[:-8], l) or retry), False
1739 1739 if os.altsep:
1740 1740 _l = l.replace(b'\\', b'/')
1741 1741 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1742 1742 return True, True
1743 1743 return retry, True
1744 1744
1745 1745 @staticmethod
1746 1746 def parsehghaveoutput(lines):
1747 1747 '''Parse hghave log lines.
1748 1748
1749 1749 Return tuple of lists (missing, failed):
1750 1750 * the missing/unknown features
1751 1751 * the features for which existence check failed'''
1752 1752 missing = []
1753 1753 failed = []
1754 1754 for line in lines:
1755 1755 if line.startswith(TTest.SKIPPED_PREFIX):
1756 1756 line = line.splitlines()[0]
1757 1757 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1758 1758 elif line.startswith(TTest.FAILED_PREFIX):
1759 1759 line = line.splitlines()[0]
1760 1760 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1761 1761
1762 1762 return missing, failed
1763 1763
1764 1764 @staticmethod
1765 1765 def _escapef(m):
1766 1766 return TTest.ESCAPEMAP[m.group(0)]
1767 1767
1768 1768 @staticmethod
1769 1769 def _stringescape(s):
1770 1770 return TTest.ESCAPESUB(TTest._escapef, s)
1771 1771
1772 1772 iolock = threading.RLock()
1773 1773 firstlock = threading.RLock()
1774 1774 firsterror = False
1775 1775
1776 1776 class TestResult(unittest._TextTestResult):
1777 1777 """Holds results when executing via unittest."""
1778 1778 # Don't worry too much about accessing the non-public _TextTestResult.
1779 1779 # It is relatively common in Python testing tools.
1780 1780 def __init__(self, options, *args, **kwargs):
1781 1781 super(TestResult, self).__init__(*args, **kwargs)
1782 1782
1783 1783 self._options = options
1784 1784
1785 1785 # unittest.TestResult didn't have skipped until 2.7. We need to
1786 1786 # polyfill it.
1787 1787 self.skipped = []
1788 1788
1789 1789 # We have a custom "ignored" result that isn't present in any Python
1790 1790 # unittest implementation. It is very similar to skipped. It may make
1791 1791 # sense to map it into skip some day.
1792 1792 self.ignored = []
1793 1793
1794 1794 self.times = []
1795 1795 self._firststarttime = None
1796 1796 # Data stored for the benefit of generating xunit reports.
1797 1797 self.successes = []
1798 1798 self.faildata = {}
1799 1799
1800 1800 if options.color == 'auto':
1801 1801 self.color = pygmentspresent and self.stream.isatty()
1802 1802 elif options.color == 'never':
1803 1803 self.color = False
1804 1804 else: # 'always', for testing purposes
1805 1805 self.color = pygmentspresent
1806 1806
1807 1807 def onStart(self, test):
1808 1808 """ Can be overriden by custom TestResult
1809 1809 """
1810 1810
1811 1811 def onEnd(self):
1812 1812 """ Can be overriden by custom TestResult
1813 1813 """
1814 1814
1815 1815 def addFailure(self, test, reason):
1816 1816 self.failures.append((test, reason))
1817 1817
1818 1818 if self._options.first:
1819 1819 self.stop()
1820 1820 else:
1821 1821 with iolock:
1822 1822 if reason == "timed out":
1823 1823 self.stream.write('t')
1824 1824 else:
1825 1825 if not self._options.nodiff:
1826 1826 self.stream.write('\n')
1827 1827 # Exclude the '\n' from highlighting to lex correctly
1828 1828 formatted = 'ERROR: %s output changed\n' % test
1829 1829 self.stream.write(highlightmsg(formatted, self.color))
1830 1830 self.stream.write('!')
1831 1831
1832 1832 self.stream.flush()
1833 1833
1834 1834 def addSuccess(self, test):
1835 1835 with iolock:
1836 1836 super(TestResult, self).addSuccess(test)
1837 1837 self.successes.append(test)
1838 1838
1839 1839 def addError(self, test, err):
1840 1840 super(TestResult, self).addError(test, err)
1841 1841 if self._options.first:
1842 1842 self.stop()
1843 1843
1844 1844 # Polyfill.
1845 1845 def addSkip(self, test, reason):
1846 1846 self.skipped.append((test, reason))
1847 1847 with iolock:
1848 1848 if self.showAll:
1849 1849 self.stream.writeln('skipped %s' % reason)
1850 1850 else:
1851 1851 self.stream.write('s')
1852 1852 self.stream.flush()
1853 1853
1854 1854 def addIgnore(self, test, reason):
1855 1855 self.ignored.append((test, reason))
1856 1856 with iolock:
1857 1857 if self.showAll:
1858 1858 self.stream.writeln('ignored %s' % reason)
1859 1859 else:
1860 1860 if reason not in ('not retesting', "doesn't match keyword"):
1861 1861 self.stream.write('i')
1862 1862 else:
1863 1863 self.testsRun += 1
1864 1864 self.stream.flush()
1865 1865
1866 1866 def addOutputMismatch(self, test, ret, got, expected):
1867 1867 """Record a mismatch in test output for a particular test."""
1868 1868 if self.shouldStop or firsterror:
1869 1869 # don't print, some other test case already failed and
1870 1870 # printed, we're just stale and probably failed due to our
1871 1871 # temp dir getting cleaned up.
1872 1872 return
1873 1873
1874 1874 accepted = False
1875 1875 lines = []
1876 1876
1877 1877 with iolock:
1878 1878 if self._options.nodiff:
1879 1879 pass
1880 1880 elif self._options.view:
1881 1881 v = self._options.view
1882 1882 os.system(r"%s %s %s" %
1883 1883 (v, _strpath(test.refpath), _strpath(test.errpath)))
1884 1884 else:
1885 1885 servefail, lines = getdiff(expected, got,
1886 1886 test.refpath, test.errpath)
1887 1887 self.stream.write('\n')
1888 1888 for line in lines:
1889 1889 line = highlightdiff(line, self.color)
1890 1890 if PYTHON3:
1891 1891 self.stream.flush()
1892 1892 self.stream.buffer.write(line)
1893 1893 self.stream.buffer.flush()
1894 1894 else:
1895 1895 self.stream.write(line)
1896 1896 self.stream.flush()
1897 1897
1898 1898 if servefail:
1899 1899 raise test.failureException(
1900 1900 'server failed to start (HGPORT=%s)' % test._startport)
1901 1901
1902 1902 # handle interactive prompt without releasing iolock
1903 1903 if self._options.interactive:
1904 1904 if test.readrefout() != expected:
1905 1905 self.stream.write(
1906 1906 'Reference output has changed (run again to prompt '
1907 1907 'changes)')
1908 1908 else:
1909 1909 self.stream.write('Accept this change? [n] ')
1910 1910 self.stream.flush()
1911 1911 answer = sys.stdin.readline().strip()
1912 1912 if answer.lower() in ('y', 'yes'):
1913 1913 if test.path.endswith(b'.t'):
1914 1914 rename(test.errpath, test.path)
1915 1915 else:
1916 1916 rename(test.errpath, '%s.out' % test.path)
1917 1917 accepted = True
1918 1918 if not accepted:
1919 1919 self.faildata[test.name] = b''.join(lines)
1920 1920
1921 1921 return accepted
1922 1922
1923 1923 def startTest(self, test):
1924 1924 super(TestResult, self).startTest(test)
1925 1925
1926 1926 # os.times module computes the user time and system time spent by
1927 1927 # child's processes along with real elapsed time taken by a process.
1928 1928 # This module has one limitation. It can only work for Linux user
1929 1929 # and not for Windows.
1930 1930 test.started = os.times()
1931 1931 if self._firststarttime is None: # thread racy but irrelevant
1932 1932 self._firststarttime = test.started[4]
1933 1933
1934 1934 def stopTest(self, test, interrupted=False):
1935 1935 super(TestResult, self).stopTest(test)
1936 1936
1937 1937 test.stopped = os.times()
1938 1938
1939 1939 starttime = test.started
1940 1940 endtime = test.stopped
1941 1941 origin = self._firststarttime
1942 1942 self.times.append((test.name,
1943 1943 endtime[2] - starttime[2], # user space CPU time
1944 1944 endtime[3] - starttime[3], # sys space CPU time
1945 1945 endtime[4] - starttime[4], # real time
1946 1946 starttime[4] - origin, # start date in run context
1947 1947 endtime[4] - origin, # end date in run context
1948 1948 ))
1949 1949
1950 1950 if interrupted:
1951 1951 with iolock:
1952 1952 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1953 1953 test.name, self.times[-1][3]))
1954 1954
1955 1955 def getTestResult():
1956 1956 """
1957 1957 Returns the relevant test result
1958 1958 """
1959 1959 if "CUSTOM_TEST_RESULT" in os.environ:
1960 1960 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
1961 1961 return testresultmodule.TestResult
1962 1962 else:
1963 1963 return TestResult
1964 1964
1965 1965 class TestSuite(unittest.TestSuite):
1966 1966 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1967 1967
1968 1968 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1969 1969 retest=False, keywords=None, loop=False, runs_per_test=1,
1970 1970 loadtest=None, showchannels=False,
1971 1971 *args, **kwargs):
1972 1972 """Create a new instance that can run tests with a configuration.
1973 1973
1974 1974 testdir specifies the directory where tests are executed from. This
1975 1975 is typically the ``tests`` directory from Mercurial's source
1976 1976 repository.
1977 1977
1978 1978 jobs specifies the number of jobs to run concurrently. Each test
1979 1979 executes on its own thread. Tests actually spawn new processes, so
1980 1980 state mutation should not be an issue.
1981 1981
1982 1982 If there is only one job, it will use the main thread.
1983 1983
1984 1984 whitelist and blacklist denote tests that have been whitelisted and
1985 1985 blacklisted, respectively. These arguments don't belong in TestSuite.
1986 1986 Instead, whitelist and blacklist should be handled by the thing that
1987 1987 populates the TestSuite with tests. They are present to preserve
1988 1988 backwards compatible behavior which reports skipped tests as part
1989 1989 of the results.
1990 1990
1991 1991 retest denotes whether to retest failed tests. This arguably belongs
1992 1992 outside of TestSuite.
1993 1993
1994 1994 keywords denotes key words that will be used to filter which tests
1995 1995 to execute. This arguably belongs outside of TestSuite.
1996 1996
1997 1997 loop denotes whether to loop over tests forever.
1998 1998 """
1999 1999 super(TestSuite, self).__init__(*args, **kwargs)
2000 2000
2001 2001 self._jobs = jobs
2002 2002 self._whitelist = whitelist
2003 2003 self._blacklist = blacklist
2004 2004 self._retest = retest
2005 2005 self._keywords = keywords
2006 2006 self._loop = loop
2007 2007 self._runs_per_test = runs_per_test
2008 2008 self._loadtest = loadtest
2009 2009 self._showchannels = showchannels
2010 2010
2011 2011 def run(self, result):
2012 2012 # We have a number of filters that need to be applied. We do this
2013 2013 # here instead of inside Test because it makes the running logic for
2014 2014 # Test simpler.
2015 2015 tests = []
2016 2016 num_tests = [0]
2017 2017 for test in self._tests:
2018 2018 def get():
2019 2019 num_tests[0] += 1
2020 2020 if getattr(test, 'should_reload', False):
2021 2021 return self._loadtest(test, num_tests[0])
2022 2022 return test
2023 2023 if not os.path.exists(test.path):
2024 2024 result.addSkip(test, "Doesn't exist")
2025 2025 continue
2026 2026
2027 2027 if not (self._whitelist and test.bname in self._whitelist):
2028 2028 if self._blacklist and test.bname in self._blacklist:
2029 2029 result.addSkip(test, 'blacklisted')
2030 2030 continue
2031 2031
2032 2032 if self._retest and not os.path.exists(test.errpath):
2033 2033 result.addIgnore(test, 'not retesting')
2034 2034 continue
2035 2035
2036 2036 if self._keywords:
2037 2037 with open(test.path, 'rb') as f:
2038 2038 t = f.read().lower() + test.bname.lower()
2039 2039 ignored = False
2040 2040 for k in self._keywords.lower().split():
2041 2041 if k not in t:
2042 2042 result.addIgnore(test, "doesn't match keyword")
2043 2043 ignored = True
2044 2044 break
2045 2045
2046 2046 if ignored:
2047 2047 continue
2048 2048 for _ in xrange(self._runs_per_test):
2049 2049 tests.append(get())
2050 2050
2051 2051 runtests = list(tests)
2052 2052 done = queue.Queue()
2053 2053 running = 0
2054 2054
2055 2055 channels = [""] * self._jobs
2056 2056
2057 2057 def job(test, result):
2058 2058 for n, v in enumerate(channels):
2059 2059 if not v:
2060 2060 channel = n
2061 2061 break
2062 2062 else:
2063 2063 raise ValueError('Could not find output channel')
2064 2064 channels[channel] = "=" + test.name[5:].split(".")[0]
2065 2065 try:
2066 2066 test(result)
2067 2067 done.put(None)
2068 2068 except KeyboardInterrupt:
2069 2069 pass
2070 2070 except: # re-raises
2071 2071 done.put(('!', test, 'run-test raised an error, see traceback'))
2072 2072 raise
2073 2073 finally:
2074 2074 try:
2075 2075 channels[channel] = ''
2076 2076 except IndexError:
2077 2077 pass
2078 2078
2079 2079 def stat():
2080 2080 count = 0
2081 2081 while channels:
2082 2082 d = '\n%03s ' % count
2083 2083 for n, v in enumerate(channels):
2084 2084 if v:
2085 2085 d += v[0]
2086 2086 channels[n] = v[1:] or '.'
2087 2087 else:
2088 2088 d += ' '
2089 2089 d += ' '
2090 2090 with iolock:
2091 2091 sys.stdout.write(d + ' ')
2092 2092 sys.stdout.flush()
2093 2093 for x in xrange(10):
2094 2094 if channels:
2095 2095 time.sleep(.1)
2096 2096 count += 1
2097 2097
2098 2098 stoppedearly = False
2099 2099
2100 2100 if self._showchannels:
2101 2101 statthread = threading.Thread(target=stat, name="stat")
2102 2102 statthread.start()
2103 2103
2104 2104 try:
2105 2105 while tests or running:
2106 2106 if not done.empty() or running == self._jobs or not tests:
2107 2107 try:
2108 2108 done.get(True, 1)
2109 2109 running -= 1
2110 2110 if result and result.shouldStop:
2111 2111 stoppedearly = True
2112 2112 break
2113 2113 except queue.Empty:
2114 2114 continue
2115 2115 if tests and not running == self._jobs:
2116 2116 test = tests.pop(0)
2117 2117 if self._loop:
2118 2118 if getattr(test, 'should_reload', False):
2119 2119 num_tests[0] += 1
2120 2120 tests.append(
2121 2121 self._loadtest(test, num_tests[0]))
2122 2122 else:
2123 2123 tests.append(test)
2124 2124 if self._jobs == 1:
2125 2125 job(test, result)
2126 2126 else:
2127 2127 t = threading.Thread(target=job, name=test.name,
2128 2128 args=(test, result))
2129 2129 t.start()
2130 2130 running += 1
2131 2131
2132 2132 # If we stop early we still need to wait on started tests to
2133 2133 # finish. Otherwise, there is a race between the test completing
2134 2134 # and the test's cleanup code running. This could result in the
2135 2135 # test reporting incorrect.
2136 2136 if stoppedearly:
2137 2137 while running:
2138 2138 try:
2139 2139 done.get(True, 1)
2140 2140 running -= 1
2141 2141 except queue.Empty:
2142 2142 continue
2143 2143 except KeyboardInterrupt:
2144 2144 for test in runtests:
2145 2145 test.abort()
2146 2146
2147 2147 channels = []
2148 2148
2149 2149 return result
2150 2150
2151 2151 # Save the most recent 5 wall-clock runtimes of each test to a
2152 2152 # human-readable text file named .testtimes. Tests are sorted
2153 2153 # alphabetically, while times for each test are listed from oldest to
2154 2154 # newest.
2155 2155
2156 2156 def loadtimes(outputdir):
2157 2157 times = []
2158 2158 try:
2159 2159 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2160 2160 for line in fp:
2161 2161 m = re.match('(.*?) ([0-9. ]+)', line)
2162 2162 times.append((m.group(1),
2163 2163 [float(t) for t in m.group(2).split()]))
2164 2164 except IOError as err:
2165 2165 if err.errno != errno.ENOENT:
2166 2166 raise
2167 2167 return times
2168 2168
2169 2169 def savetimes(outputdir, result):
2170 2170 saved = dict(loadtimes(outputdir))
2171 2171 maxruns = 5
2172 2172 skipped = set([str(t[0]) for t in result.skipped])
2173 2173 for tdata in result.times:
2174 2174 test, real = tdata[0], tdata[3]
2175 2175 if test not in skipped:
2176 2176 ts = saved.setdefault(test, [])
2177 2177 ts.append(real)
2178 2178 ts[:] = ts[-maxruns:]
2179 2179
2180 2180 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2181 2181 dir=outputdir, text=True)
2182 2182 with os.fdopen(fd, 'w') as fp:
2183 2183 for name, ts in sorted(saved.items()):
2184 2184 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2185 2185 timepath = os.path.join(outputdir, b'.testtimes')
2186 2186 try:
2187 2187 os.unlink(timepath)
2188 2188 except OSError:
2189 2189 pass
2190 2190 try:
2191 2191 os.rename(tmpname, timepath)
2192 2192 except OSError:
2193 2193 pass
2194 2194
2195 2195 class TextTestRunner(unittest.TextTestRunner):
2196 2196 """Custom unittest test runner that uses appropriate settings."""
2197 2197
2198 2198 def __init__(self, runner, *args, **kwargs):
2199 2199 super(TextTestRunner, self).__init__(*args, **kwargs)
2200 2200
2201 2201 self._runner = runner
2202 2202
2203 2203 self._result = getTestResult()(self._runner.options, self.stream,
2204 2204 self.descriptions, self.verbosity)
2205 2205
2206 2206 def listtests(self, test):
2207 2207 test = sorted(test, key=lambda t: t.name)
2208 2208
2209 2209 self._result.onStart(test)
2210 2210
2211 2211 for t in test:
2212 2212 print(t.name)
2213 2213 self._result.addSuccess(t)
2214 2214
2215 2215 if self._runner.options.xunit:
2216 2216 with open(self._runner.options.xunit, "wb") as xuf:
2217 2217 self._writexunit(self._result, xuf)
2218 2218
2219 2219 if self._runner.options.json:
2220 2220 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2221 2221 with open(jsonpath, 'w') as fp:
2222 2222 self._writejson(self._result, fp)
2223 2223
2224 2224 return self._result
2225 2225
2226 2226 def run(self, test):
2227 2227 self._result.onStart(test)
2228 2228 test(self._result)
2229 2229
2230 2230 failed = len(self._result.failures)
2231 2231 skipped = len(self._result.skipped)
2232 2232 ignored = len(self._result.ignored)
2233 2233
2234 2234 with iolock:
2235 2235 self.stream.writeln('')
2236 2236
2237 2237 if not self._runner.options.noskips:
2238 2238 for test, msg in self._result.skipped:
2239 2239 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2240 2240 msg = highlightmsg(formatted, self._result.color)
2241 2241 self.stream.write(msg)
2242 2242 for test, msg in self._result.failures:
2243 2243 formatted = 'Failed %s: %s\n' % (test.name, msg)
2244 2244 self.stream.write(highlightmsg(formatted, self._result.color))
2245 2245 for test, msg in self._result.errors:
2246 2246 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2247 2247
2248 2248 if self._runner.options.xunit:
2249 2249 with open(self._runner.options.xunit, "wb") as xuf:
2250 2250 self._writexunit(self._result, xuf)
2251 2251
2252 2252 if self._runner.options.json:
2253 2253 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2254 2254 with open(jsonpath, 'w') as fp:
2255 2255 self._writejson(self._result, fp)
2256 2256
2257 2257 self._runner._checkhglib('Tested')
2258 2258
2259 2259 savetimes(self._runner._outputdir, self._result)
2260 2260
2261 2261 if failed and self._runner.options.known_good_rev:
2262 2262 self._bisecttests(t for t, m in self._result.failures)
2263 2263 self.stream.writeln(
2264 2264 '# Ran %d tests, %d skipped, %d failed.'
2265 2265 % (self._result.testsRun, skipped + ignored, failed))
2266 2266 if failed:
2267 2267 self.stream.writeln('python hash seed: %s' %
2268 2268 os.environ['PYTHONHASHSEED'])
2269 2269 if self._runner.options.time:
2270 2270 self.printtimes(self._result.times)
2271 2271
2272 2272 if self._runner.options.exceptions:
2273 2273 exceptions = aggregateexceptions(
2274 2274 os.path.join(self._runner._outputdir, b'exceptions'))
2275 2275
2276 2276 self.stream.writeln('Exceptions Report:')
2277 2277 self.stream.writeln('%d total from %d frames' %
2278 2278 (exceptions['total'],
2279 2279 len(exceptions['exceptioncounts'])))
2280 2280 combined = exceptions['combined']
2281 2281 for key in sorted(combined, key=combined.get, reverse=True):
2282 2282 frame, line, exc = key
2283 2283 totalcount, testcount, leastcount, leasttest = combined[key]
2284 2284
2285 2285 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2286 2286 % (totalcount,
2287 2287 testcount,
2288 2288 frame, exc,
2289 2289 leasttest, leastcount))
2290 2290
2291 2291 self.stream.flush()
2292 2292
2293 2293 return self._result
2294 2294
2295 2295 def _bisecttests(self, tests):
2296 2296 bisectcmd = ['hg', 'bisect']
2297 2297 bisectrepo = self._runner.options.bisect_repo
2298 2298 if bisectrepo:
2299 2299 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2300 2300 def pread(args):
2301 2301 env = os.environ.copy()
2302 2302 env['HGPLAIN'] = '1'
2303 2303 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2304 2304 stdout=subprocess.PIPE, env=env)
2305 2305 data = p.stdout.read()
2306 2306 p.wait()
2307 2307 return data
2308 2308 for test in tests:
2309 2309 pread(bisectcmd + ['--reset']),
2310 2310 pread(bisectcmd + ['--bad', '.'])
2311 2311 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2312 2312 # TODO: we probably need to forward more options
2313 2313 # that alter hg's behavior inside the tests.
2314 2314 opts = ''
2315 2315 withhg = self._runner.options.with_hg
2316 2316 if withhg:
2317 2317 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2318 2318 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2319 2319 test)
2320 2320 data = pread(bisectcmd + ['--command', rtc])
2321 2321 m = re.search(
2322 2322 (br'\nThe first (?P<goodbad>bad|good) revision '
2323 2323 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2324 2324 br'summary: +(?P<summary>[^\n]+)\n'),
2325 2325 data, (re.MULTILINE | re.DOTALL))
2326 2326 if m is None:
2327 2327 self.stream.writeln(
2328 2328 'Failed to identify failure point for %s' % test)
2329 2329 continue
2330 2330 dat = m.groupdict()
2331 2331 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2332 2332 self.stream.writeln(
2333 2333 '%s %s by %s (%s)' % (
2334 2334 test, verb, dat['node'].decode('ascii'),
2335 2335 dat['summary'].decode('utf8', 'ignore')))
2336 2336
2337 2337 def printtimes(self, times):
2338 2338 # iolock held by run
2339 2339 self.stream.writeln('# Producing time report')
2340 2340 times.sort(key=lambda t: (t[3]))
2341 2341 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2342 2342 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2343 2343 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2344 2344 for tdata in times:
2345 2345 test = tdata[0]
2346 2346 cuser, csys, real, start, end = tdata[1:6]
2347 2347 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2348 2348
2349 2349 @staticmethod
2350 2350 def _writexunit(result, outf):
2351 2351 # See http://llg.cubic.org/docs/junit/ for a reference.
2352 2352 timesd = dict((t[0], t[3]) for t in result.times)
2353 2353 doc = minidom.Document()
2354 2354 s = doc.createElement('testsuite')
2355 2355 s.setAttribute('name', 'run-tests')
2356 2356 s.setAttribute('tests', str(result.testsRun))
2357 2357 s.setAttribute('errors', "0") # TODO
2358 2358 s.setAttribute('failures', str(len(result.failures)))
2359 2359 s.setAttribute('skipped', str(len(result.skipped) +
2360 2360 len(result.ignored)))
2361 2361 doc.appendChild(s)
2362 2362 for tc in result.successes:
2363 2363 t = doc.createElement('testcase')
2364 2364 t.setAttribute('name', tc.name)
2365 2365 tctime = timesd.get(tc.name)
2366 2366 if tctime is not None:
2367 2367 t.setAttribute('time', '%.3f' % tctime)
2368 2368 s.appendChild(t)
2369 2369 for tc, err in sorted(result.faildata.items()):
2370 2370 t = doc.createElement('testcase')
2371 2371 t.setAttribute('name', tc)
2372 2372 tctime = timesd.get(tc)
2373 2373 if tctime is not None:
2374 2374 t.setAttribute('time', '%.3f' % tctime)
2375 2375 # createCDATASection expects a unicode or it will
2376 2376 # convert using default conversion rules, which will
2377 2377 # fail if string isn't ASCII.
2378 2378 err = cdatasafe(err).decode('utf-8', 'replace')
2379 2379 cd = doc.createCDATASection(err)
2380 2380 # Use 'failure' here instead of 'error' to match errors = 0,
2381 2381 # failures = len(result.failures) in the testsuite element.
2382 2382 failelem = doc.createElement('failure')
2383 2383 failelem.setAttribute('message', 'output changed')
2384 2384 failelem.setAttribute('type', 'output-mismatch')
2385 2385 failelem.appendChild(cd)
2386 2386 t.appendChild(failelem)
2387 2387 s.appendChild(t)
2388 2388 for tc, message in result.skipped:
2389 2389 # According to the schema, 'skipped' has no attributes. So store
2390 2390 # the skip message as a text node instead.
2391 2391 t = doc.createElement('testcase')
2392 2392 t.setAttribute('name', tc.name)
2393 2393 binmessage = message.encode('utf-8')
2394 2394 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2395 2395 cd = doc.createCDATASection(message)
2396 2396 skipelem = doc.createElement('skipped')
2397 2397 skipelem.appendChild(cd)
2398 2398 t.appendChild(skipelem)
2399 2399 s.appendChild(t)
2400 2400 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2401 2401
2402 2402 @staticmethod
2403 2403 def _writejson(result, outf):
2404 2404 timesd = {}
2405 2405 for tdata in result.times:
2406 2406 test = tdata[0]
2407 2407 timesd[test] = tdata[1:]
2408 2408
2409 2409 outcome = {}
2410 2410 groups = [('success', ((tc, None)
2411 2411 for tc in result.successes)),
2412 2412 ('failure', result.failures),
2413 2413 ('skip', result.skipped)]
2414 2414 for res, testcases in groups:
2415 2415 for tc, __ in testcases:
2416 2416 if tc.name in timesd:
2417 2417 diff = result.faildata.get(tc.name, b'')
2418 2418 try:
2419 2419 diff = diff.decode('unicode_escape')
2420 2420 except UnicodeDecodeError as e:
2421 2421 diff = '%r decoding diff, sorry' % e
2422 2422 tres = {'result': res,
2423 2423 'time': ('%0.3f' % timesd[tc.name][2]),
2424 2424 'cuser': ('%0.3f' % timesd[tc.name][0]),
2425 2425 'csys': ('%0.3f' % timesd[tc.name][1]),
2426 2426 'start': ('%0.3f' % timesd[tc.name][3]),
2427 2427 'end': ('%0.3f' % timesd[tc.name][4]),
2428 2428 'diff': diff,
2429 2429 }
2430 2430 else:
2431 2431 # blacklisted test
2432 2432 tres = {'result': res}
2433 2433
2434 2434 outcome[tc.name] = tres
2435 2435 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2436 2436 separators=(',', ': '))
2437 2437 outf.writelines(("testreport =", jsonout))
2438 2438
2439 2439 def sorttests(testdescs, previoustimes, shuffle=False):
2440 2440 """Do an in-place sort of tests."""
2441 2441 if shuffle:
2442 2442 random.shuffle(testdescs)
2443 2443 return
2444 2444
2445 2445 if previoustimes:
2446 2446 def sortkey(f):
2447 2447 f = f['path']
2448 2448 if f in previoustimes:
2449 2449 # Use most recent time as estimate
2450 2450 return -previoustimes[f][-1]
2451 2451 else:
2452 2452 # Default to a rather arbitrary value of 1 second for new tests
2453 2453 return -1.0
2454 2454 else:
2455 2455 # keywords for slow tests
2456 2456 slow = {b'svn': 10,
2457 2457 b'cvs': 10,
2458 2458 b'hghave': 10,
2459 2459 b'largefiles-update': 10,
2460 2460 b'run-tests': 10,
2461 2461 b'corruption': 10,
2462 2462 b'race': 10,
2463 2463 b'i18n': 10,
2464 2464 b'check': 100,
2465 2465 b'gendoc': 100,
2466 2466 b'contrib-perf': 200,
2467 2467 }
2468 2468 perf = {}
2469 2469
2470 2470 def sortkey(f):
2471 2471 # run largest tests first, as they tend to take the longest
2472 2472 f = f['path']
2473 2473 try:
2474 2474 return perf[f]
2475 2475 except KeyError:
2476 2476 try:
2477 2477 val = -os.stat(f).st_size
2478 2478 except OSError as e:
2479 2479 if e.errno != errno.ENOENT:
2480 2480 raise
2481 2481 perf[f] = -1e9 # file does not exist, tell early
2482 2482 return -1e9
2483 2483 for kw, mul in slow.items():
2484 2484 if kw in f:
2485 2485 val *= mul
2486 2486 if f.endswith(b'.py'):
2487 2487 val /= 10.0
2488 2488 perf[f] = val / 1000.0
2489 2489 return perf[f]
2490 2490
2491 2491 testdescs.sort(key=sortkey)
2492 2492
2493 2493 class TestRunner(object):
2494 2494 """Holds context for executing tests.
2495 2495
2496 2496 Tests rely on a lot of state. This object holds it for them.
2497 2497 """
2498 2498
2499 2499 # Programs required to run tests.
2500 2500 REQUIREDTOOLS = [
2501 2501 b'diff',
2502 2502 b'grep',
2503 2503 b'unzip',
2504 2504 b'gunzip',
2505 2505 b'bunzip2',
2506 2506 b'sed',
2507 2507 ]
2508 2508
2509 2509 # Maps file extensions to test class.
2510 2510 TESTTYPES = [
2511 2511 (b'.py', PythonTest),
2512 2512 (b'.t', TTest),
2513 2513 ]
2514 2514
2515 2515 def __init__(self):
2516 2516 self.options = None
2517 2517 self._hgroot = None
2518 2518 self._testdir = None
2519 2519 self._outputdir = None
2520 2520 self._hgtmp = None
2521 2521 self._installdir = None
2522 2522 self._bindir = None
2523 2523 self._tmpbinddir = None
2524 2524 self._pythondir = None
2525 2525 self._coveragefile = None
2526 2526 self._createdfiles = []
2527 2527 self._hgcommand = None
2528 2528 self._hgpath = None
2529 2529 self._portoffset = 0
2530 2530 self._ports = {}
2531 2531
2532 2532 def run(self, args, parser=None):
2533 2533 """Run the test suite."""
2534 2534 oldmask = os.umask(0o22)
2535 2535 try:
2536 2536 parser = parser or getparser()
2537 2537 options = parseargs(args, parser)
2538 2538 tests = [_bytespath(a) for a in options.tests]
2539 2539 if options.test_list is not None:
2540 2540 for listfile in options.test_list:
2541 2541 with open(listfile, 'rb') as f:
2542 2542 tests.extend(t for t in f.read().splitlines() if t)
2543 2543 self.options = options
2544 2544
2545 2545 self._checktools()
2546 2546 testdescs = self.findtests(tests)
2547 2547 if options.profile_runner:
2548 2548 import statprof
2549 2549 statprof.start()
2550 2550 result = self._run(testdescs)
2551 2551 if options.profile_runner:
2552 2552 statprof.stop()
2553 2553 statprof.display()
2554 2554 return result
2555 2555
2556 2556 finally:
2557 2557 os.umask(oldmask)
2558 2558
2559 2559 def _run(self, testdescs):
2560 2560 testdir = getcwdb()
2561 2561 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
2562 2562 # assume all tests in same folder for now
2563 2563 if testdescs:
2564 2564 pathname = os.path.dirname(testdescs[0]['path'])
2565 2565 if pathname:
2566 2566 testdir = os.path.join(testdir, pathname)
2567 2567 self._testdir = osenvironb[b'TESTDIR'] = testdir
2568 2568 if self.options.outputdir:
2569 2569 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2570 2570 else:
2571 2571 self._outputdir = getcwdb()
2572 2572 if testdescs and pathname:
2573 2573 self._outputdir = os.path.join(self._outputdir, pathname)
2574 2574 previoustimes = {}
2575 2575 if self.options.order_by_runtime:
2576 2576 previoustimes = dict(loadtimes(self._outputdir))
2577 2577 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2578 2578
2579 2579 if 'PYTHONHASHSEED' not in os.environ:
2580 2580 # use a random python hash seed all the time
2581 2581 # we do the randomness ourself to know what seed is used
2582 2582 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2583 2583
2584 2584 if self.options.tmpdir:
2585 2585 self.options.keep_tmpdir = True
2586 2586 tmpdir = _bytespath(self.options.tmpdir)
2587 2587 if os.path.exists(tmpdir):
2588 2588 # Meaning of tmpdir has changed since 1.3: we used to create
2589 2589 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2590 2590 # tmpdir already exists.
2591 2591 print("error: temp dir %r already exists" % tmpdir)
2592 2592 return 1
2593 2593
2594 2594 os.makedirs(tmpdir)
2595 2595 else:
2596 2596 d = None
2597 2597 if os.name == 'nt':
2598 2598 # without this, we get the default temp dir location, but
2599 2599 # in all lowercase, which causes troubles with paths (issue3490)
2600 2600 d = osenvironb.get(b'TMP', None)
2601 2601 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2602 2602
2603 2603 self._hgtmp = osenvironb[b'HGTMP'] = (
2604 2604 os.path.realpath(tmpdir))
2605 2605
2606 2606 if self.options.with_hg:
2607 2607 self._installdir = None
2608 2608 whg = self.options.with_hg
2609 2609 self._bindir = os.path.dirname(os.path.realpath(whg))
2610 2610 assert isinstance(self._bindir, bytes)
2611 2611 self._hgcommand = os.path.basename(whg)
2612 2612 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2613 2613 os.makedirs(self._tmpbindir)
2614 2614
2615 2615 normbin = os.path.normpath(os.path.abspath(whg))
2616 2616 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2617 2617
2618 2618 # Other Python scripts in the test harness need to
2619 2619 # `import mercurial`. If `hg` is a Python script, we assume
2620 2620 # the Mercurial modules are relative to its path and tell the tests
2621 2621 # to load Python modules from its directory.
2622 2622 with open(whg, 'rb') as fh:
2623 2623 initial = fh.read(1024)
2624 2624
2625 2625 if re.match(b'#!.*python', initial):
2626 2626 self._pythondir = self._bindir
2627 2627 # If it looks like our in-repo Rust binary, use the source root.
2628 2628 # This is a bit hacky. But rhg is still not supported outside the
2629 2629 # source directory. So until it is, do the simple thing.
2630 2630 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2631 2631 self._pythondir = os.path.dirname(self._testdir)
2632 2632 # Fall back to the legacy behavior.
2633 2633 else:
2634 2634 self._pythondir = self._bindir
2635 2635
2636 2636 else:
2637 2637 self._installdir = os.path.join(self._hgtmp, b"install")
2638 2638 self._bindir = os.path.join(self._installdir, b"bin")
2639 2639 self._hgcommand = b'hg'
2640 2640 self._tmpbindir = self._bindir
2641 2641 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2642 2642
2643 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
2644 self._hgcommand += b'.exe'
2645
2643 2646 # set CHGHG, then replace "hg" command by "chg"
2644 2647 chgbindir = self._bindir
2645 2648 if self.options.chg or self.options.with_chg:
2646 2649 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2647 2650 else:
2648 2651 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2649 2652 if self.options.chg:
2650 2653 self._hgcommand = b'chg'
2651 2654 elif self.options.with_chg:
2652 2655 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2653 2656 self._hgcommand = os.path.basename(self.options.with_chg)
2654 2657
2655 2658 osenvironb[b"BINDIR"] = self._bindir
2656 2659 osenvironb[b"PYTHON"] = PYTHON
2657 2660
2658 2661 fileb = _bytespath(__file__)
2659 2662 runtestdir = os.path.abspath(os.path.dirname(fileb))
2660 2663 osenvironb[b'RUNTESTDIR'] = runtestdir
2661 2664 if PYTHON3:
2662 2665 sepb = _bytespath(os.pathsep)
2663 2666 else:
2664 2667 sepb = os.pathsep
2665 2668 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2666 2669 if os.path.islink(__file__):
2667 2670 # test helper will likely be at the end of the symlink
2668 2671 realfile = os.path.realpath(fileb)
2669 2672 realdir = os.path.abspath(os.path.dirname(realfile))
2670 2673 path.insert(2, realdir)
2671 2674 if chgbindir != self._bindir:
2672 2675 path.insert(1, chgbindir)
2673 2676 if self._testdir != runtestdir:
2674 2677 path = [self._testdir] + path
2675 2678 if self._tmpbindir != self._bindir:
2676 2679 path = [self._tmpbindir] + path
2677 2680 osenvironb[b"PATH"] = sepb.join(path)
2678 2681
2679 2682 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2680 2683 # can run .../tests/run-tests.py test-foo where test-foo
2681 2684 # adds an extension to HGRC. Also include run-test.py directory to
2682 2685 # import modules like heredoctest.
2683 2686 pypath = [self._pythondir, self._testdir, runtestdir]
2684 2687 # We have to augment PYTHONPATH, rather than simply replacing
2685 2688 # it, in case external libraries are only available via current
2686 2689 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2687 2690 # are in /opt/subversion.)
2688 2691 oldpypath = osenvironb.get(IMPL_PATH)
2689 2692 if oldpypath:
2690 2693 pypath.append(oldpypath)
2691 2694 osenvironb[IMPL_PATH] = sepb.join(pypath)
2692 2695
2693 2696 if self.options.pure:
2694 2697 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2695 2698 os.environ["HGMODULEPOLICY"] = "py"
2696 2699
2697 2700 if self.options.allow_slow_tests:
2698 2701 os.environ["HGTEST_SLOW"] = "slow"
2699 2702 elif 'HGTEST_SLOW' in os.environ:
2700 2703 del os.environ['HGTEST_SLOW']
2701 2704
2702 2705 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2703 2706
2704 2707 if self.options.exceptions:
2705 2708 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2706 2709 try:
2707 2710 os.makedirs(exceptionsdir)
2708 2711 except OSError as e:
2709 2712 if e.errno != errno.EEXIST:
2710 2713 raise
2711 2714
2712 2715 # Remove all existing exception reports.
2713 2716 for f in os.listdir(exceptionsdir):
2714 2717 os.unlink(os.path.join(exceptionsdir, f))
2715 2718
2716 2719 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2717 2720 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2718 2721 self.options.extra_config_opt.append(
2719 2722 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2720 2723
2721 2724 vlog("# Using TESTDIR", self._testdir)
2722 2725 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2723 2726 vlog("# Using HGTMP", self._hgtmp)
2724 2727 vlog("# Using PATH", os.environ["PATH"])
2725 2728 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2726 2729 vlog("# Writing to directory", self._outputdir)
2727 2730
2728 2731 try:
2729 2732 return self._runtests(testdescs) or 0
2730 2733 finally:
2731 2734 time.sleep(.1)
2732 2735 self._cleanup()
2733 2736
2734 2737 def findtests(self, args):
2735 2738 """Finds possible test files from arguments.
2736 2739
2737 2740 If you wish to inject custom tests into the test harness, this would
2738 2741 be a good function to monkeypatch or override in a derived class.
2739 2742 """
2740 2743 if not args:
2741 2744 if self.options.changed:
2742 2745 proc = Popen4('hg st --rev "%s" -man0 .' %
2743 2746 self.options.changed, None, 0)
2744 2747 stdout, stderr = proc.communicate()
2745 2748 args = stdout.strip(b'\0').split(b'\0')
2746 2749 else:
2747 2750 args = os.listdir(b'.')
2748 2751
2749 2752 expanded_args = []
2750 2753 for arg in args:
2751 2754 if os.path.isdir(arg):
2752 2755 if not arg.endswith(b'/'):
2753 2756 arg += b'/'
2754 2757 expanded_args.extend([arg + a for a in os.listdir(arg)])
2755 2758 else:
2756 2759 expanded_args.append(arg)
2757 2760 args = expanded_args
2758 2761
2759 2762 testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.#]+))')
2760 2763 tests = []
2761 2764 for t in args:
2762 2765 case = []
2763 2766
2764 2767 if not (os.path.basename(t).startswith(b'test-')
2765 2768 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2766 2769
2767 2770 m = testcasepattern.match(t)
2768 2771 if m is not None:
2769 2772 t, _, casestr = m.groups()
2770 2773 if casestr:
2771 2774 case = casestr.split(b'#')
2772 2775 else:
2773 2776 continue
2774 2777
2775 2778 if t.endswith(b'.t'):
2776 2779 # .t file may contain multiple test cases
2777 2780 casedimensions = parsettestcases(t)
2778 2781 if casedimensions:
2779 2782 cases = []
2780 2783 def addcases(case, casedimensions):
2781 2784 if not casedimensions:
2782 2785 cases.append(case)
2783 2786 else:
2784 2787 for c in casedimensions[0]:
2785 2788 addcases(case + [c], casedimensions[1:])
2786 2789 addcases([], casedimensions)
2787 2790 if case and case in cases:
2788 2791 cases = [case]
2789 2792 elif case:
2790 2793 # Ignore invalid cases
2791 2794 cases = []
2792 2795 else:
2793 2796 pass
2794 2797 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2795 2798 else:
2796 2799 tests.append({'path': t})
2797 2800 else:
2798 2801 tests.append({'path': t})
2799 2802 return tests
2800 2803
2801 2804 def _runtests(self, testdescs):
2802 2805 def _reloadtest(test, i):
2803 2806 # convert a test back to its description dict
2804 2807 desc = {'path': test.path}
2805 2808 case = getattr(test, '_case', [])
2806 2809 if case:
2807 2810 desc['case'] = case
2808 2811 return self._gettest(desc, i)
2809 2812
2810 2813 try:
2811 2814 if self.options.restart:
2812 2815 orig = list(testdescs)
2813 2816 while testdescs:
2814 2817 desc = testdescs[0]
2815 2818 # desc['path'] is a relative path
2816 2819 if 'case' in desc:
2817 2820 casestr = b'#'.join(desc['case'])
2818 2821 errpath = b'%s#%s.err' % (desc['path'], casestr)
2819 2822 else:
2820 2823 errpath = b'%s.err' % desc['path']
2821 2824 errpath = os.path.join(self._outputdir, errpath)
2822 2825 if os.path.exists(errpath):
2823 2826 break
2824 2827 testdescs.pop(0)
2825 2828 if not testdescs:
2826 2829 print("running all tests")
2827 2830 testdescs = orig
2828 2831
2829 2832 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2830 2833
2831 2834 jobs = min(len(tests), self.options.jobs)
2832 2835
2833 2836 failed = False
2834 2837 kws = self.options.keywords
2835 2838 if kws is not None and PYTHON3:
2836 2839 kws = kws.encode('utf-8')
2837 2840
2838 2841 suite = TestSuite(self._testdir,
2839 2842 jobs=jobs,
2840 2843 whitelist=self.options.whitelisted,
2841 2844 blacklist=self.options.blacklist,
2842 2845 retest=self.options.retest,
2843 2846 keywords=kws,
2844 2847 loop=self.options.loop,
2845 2848 runs_per_test=self.options.runs_per_test,
2846 2849 showchannels=self.options.showchannels,
2847 2850 tests=tests, loadtest=_reloadtest)
2848 2851 verbosity = 1
2849 2852 if self.options.list_tests:
2850 2853 verbosity = 0
2851 2854 elif self.options.verbose:
2852 2855 verbosity = 2
2853 2856 runner = TextTestRunner(self, verbosity=verbosity)
2854 2857
2855 2858 if self.options.list_tests:
2856 2859 result = runner.listtests(suite)
2857 2860 else:
2858 2861 if self._installdir:
2859 2862 self._installhg()
2860 2863 self._checkhglib("Testing")
2861 2864 else:
2862 2865 self._usecorrectpython()
2863 2866 if self.options.chg:
2864 2867 assert self._installdir
2865 2868 self._installchg()
2866 2869
2867 2870 log('running %d tests using %d parallel processes' % (
2868 2871 len(tests), jobs))
2869 2872
2870 2873 result = runner.run(suite)
2871 2874
2872 2875 if result.failures:
2873 2876 failed = True
2874 2877
2875 2878 result.onEnd()
2876 2879
2877 2880 if self.options.anycoverage:
2878 2881 self._outputcoverage()
2879 2882 except KeyboardInterrupt:
2880 2883 failed = True
2881 2884 print("\ninterrupted!")
2882 2885
2883 2886 if failed:
2884 2887 return 1
2885 2888
2886 2889 def _getport(self, count):
2887 2890 port = self._ports.get(count) # do we have a cached entry?
2888 2891 if port is None:
2889 2892 portneeded = 3
2890 2893 # above 100 tries we just give up and let test reports failure
2891 2894 for tries in xrange(100):
2892 2895 allfree = True
2893 2896 port = self.options.port + self._portoffset
2894 2897 for idx in xrange(portneeded):
2895 2898 if not checkportisavailable(port + idx):
2896 2899 allfree = False
2897 2900 break
2898 2901 self._portoffset += portneeded
2899 2902 if allfree:
2900 2903 break
2901 2904 self._ports[count] = port
2902 2905 return port
2903 2906
2904 2907 def _gettest(self, testdesc, count):
2905 2908 """Obtain a Test by looking at its filename.
2906 2909
2907 2910 Returns a Test instance. The Test may not be runnable if it doesn't
2908 2911 map to a known type.
2909 2912 """
2910 2913 path = testdesc['path']
2911 2914 lctest = path.lower()
2912 2915 testcls = Test
2913 2916
2914 2917 for ext, cls in self.TESTTYPES:
2915 2918 if lctest.endswith(ext):
2916 2919 testcls = cls
2917 2920 break
2918 2921
2919 2922 refpath = os.path.join(getcwdb(), path)
2920 2923 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2921 2924
2922 2925 # extra keyword parameters. 'case' is used by .t tests
2923 2926 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2924 2927
2925 2928 t = testcls(refpath, self._outputdir, tmpdir,
2926 2929 keeptmpdir=self.options.keep_tmpdir,
2927 2930 debug=self.options.debug,
2928 2931 first=self.options.first,
2929 2932 timeout=self.options.timeout,
2930 2933 startport=self._getport(count),
2931 2934 extraconfigopts=self.options.extra_config_opt,
2932 2935 py3warnings=self.options.py3_warnings,
2933 2936 shell=self.options.shell,
2934 2937 hgcommand=self._hgcommand,
2935 2938 usechg=bool(self.options.with_chg or self.options.chg),
2936 2939 useipv6=useipv6, **kwds)
2937 2940 t.should_reload = True
2938 2941 return t
2939 2942
2940 2943 def _cleanup(self):
2941 2944 """Clean up state from this test invocation."""
2942 2945 if self.options.keep_tmpdir:
2943 2946 return
2944 2947
2945 2948 vlog("# Cleaning up HGTMP", self._hgtmp)
2946 2949 shutil.rmtree(self._hgtmp, True)
2947 2950 for f in self._createdfiles:
2948 2951 try:
2949 2952 os.remove(f)
2950 2953 except OSError:
2951 2954 pass
2952 2955
2953 2956 def _usecorrectpython(self):
2954 2957 """Configure the environment to use the appropriate Python in tests."""
2955 2958 # Tests must use the same interpreter as us or bad things will happen.
2956 2959 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2957 2960
2958 2961 # os.symlink() is a thing with py3 on Windows, but it requires
2959 2962 # Administrator rights.
2960 2963 if getattr(os, 'symlink', None) and os.name != 'nt':
2961 2964 vlog("# Making python executable in test path a symlink to '%s'" %
2962 2965 sys.executable)
2963 2966 mypython = os.path.join(self._tmpbindir, pyexename)
2964 2967 try:
2965 2968 if os.readlink(mypython) == sys.executable:
2966 2969 return
2967 2970 os.unlink(mypython)
2968 2971 except OSError as err:
2969 2972 if err.errno != errno.ENOENT:
2970 2973 raise
2971 2974 if self._findprogram(pyexename) != sys.executable:
2972 2975 try:
2973 2976 os.symlink(sys.executable, mypython)
2974 2977 self._createdfiles.append(mypython)
2975 2978 except OSError as err:
2976 2979 # child processes may race, which is harmless
2977 2980 if err.errno != errno.EEXIST:
2978 2981 raise
2979 2982 else:
2980 2983 exedir, exename = os.path.split(sys.executable)
2981 2984 vlog("# Modifying search path to find %s as %s in '%s'" %
2982 2985 (exename, pyexename, exedir))
2983 2986 path = os.environ['PATH'].split(os.pathsep)
2984 2987 while exedir in path:
2985 2988 path.remove(exedir)
2986 2989 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2987 2990 if not self._findprogram(pyexename):
2988 2991 print("WARNING: Cannot find %s in search path" % pyexename)
2989 2992
2990 2993 def _installhg(self):
2991 2994 """Install hg into the test environment.
2992 2995
2993 2996 This will also configure hg with the appropriate testing settings.
2994 2997 """
2995 2998 vlog("# Performing temporary installation of HG")
2996 2999 installerrs = os.path.join(self._hgtmp, b"install.err")
2997 3000 compiler = ''
2998 3001 if self.options.compiler:
2999 3002 compiler = '--compiler ' + self.options.compiler
3000 3003 if self.options.pure:
3001 3004 pure = b"--pure"
3002 3005 else:
3003 3006 pure = b""
3004 3007
3005 3008 # Run installer in hg root
3006 3009 script = os.path.realpath(sys.argv[0])
3007 3010 exe = sys.executable
3008 3011 if PYTHON3:
3009 3012 compiler = _bytespath(compiler)
3010 3013 script = _bytespath(script)
3011 3014 exe = _bytespath(exe)
3012 3015 hgroot = os.path.dirname(os.path.dirname(script))
3013 3016 self._hgroot = hgroot
3014 3017 os.chdir(hgroot)
3015 3018 nohome = b'--home=""'
3016 3019 if os.name == 'nt':
3017 3020 # The --home="" trick works only on OS where os.sep == '/'
3018 3021 # because of a distutils convert_path() fast-path. Avoid it at
3019 3022 # least on Windows for now, deal with .pydistutils.cfg bugs
3020 3023 # when they happen.
3021 3024 nohome = b''
3022 3025 cmd = (b'"%(exe)s" setup.py %(pure)s clean --all'
3023 3026 b' build %(compiler)s --build-base="%(base)s"'
3024 3027 b' install --force --prefix="%(prefix)s"'
3025 3028 b' --install-lib="%(libdir)s"'
3026 3029 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3027 3030 % {b'exe': exe, b'pure': pure,
3028 3031 b'compiler': compiler,
3029 3032 b'base': os.path.join(self._hgtmp, b"build"),
3030 3033 b'prefix': self._installdir, b'libdir': self._pythondir,
3031 3034 b'bindir': self._bindir,
3032 3035 b'nohome': nohome, b'logfile': installerrs})
3033 3036
3034 3037 # setuptools requires install directories to exist.
3035 3038 def makedirs(p):
3036 3039 try:
3037 3040 os.makedirs(p)
3038 3041 except OSError as e:
3039 3042 if e.errno != errno.EEXIST:
3040 3043 raise
3041 3044 makedirs(self._pythondir)
3042 3045 makedirs(self._bindir)
3043 3046
3044 3047 vlog("# Running", cmd)
3045 3048 if subprocess.call(_strpath(cmd), shell=True) == 0:
3046 3049 if not self.options.verbose:
3047 3050 try:
3048 3051 os.remove(installerrs)
3049 3052 except OSError as e:
3050 3053 if e.errno != errno.ENOENT:
3051 3054 raise
3052 3055 else:
3053 3056 with open(installerrs, 'rb') as f:
3054 3057 for line in f:
3055 3058 if PYTHON3:
3056 3059 sys.stdout.buffer.write(line)
3057 3060 else:
3058 3061 sys.stdout.write(line)
3059 3062 sys.exit(1)
3060 3063 os.chdir(self._testdir)
3061 3064
3062 3065 self._usecorrectpython()
3063 3066
3064 3067 if self.options.py3_warnings and not self.options.anycoverage:
3065 3068 vlog("# Updating hg command to enable Py3k Warnings switch")
3066 3069 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
3067 3070 lines = [line.rstrip() for line in f]
3068 3071 lines[0] += ' -3'
3069 3072 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
3070 3073 for line in lines:
3071 3074 f.write(line + '\n')
3072 3075
3073 3076 hgbat = os.path.join(self._bindir, b'hg.bat')
3074 3077 if os.path.isfile(hgbat):
3075 3078 # hg.bat expects to be put in bin/scripts while run-tests.py
3076 3079 # installation layout put it in bin/ directly. Fix it
3077 3080 with open(hgbat, 'rb') as f:
3078 3081 data = f.read()
3079 3082 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
3080 3083 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
3081 3084 b'"%~dp0python" "%~dp0hg" %*')
3082 3085 with open(hgbat, 'wb') as f:
3083 3086 f.write(data)
3084 3087 else:
3085 3088 print('WARNING: cannot fix hg.bat reference to python.exe')
3086 3089
3087 3090 if self.options.anycoverage:
3088 3091 custom = os.path.join(self._testdir, 'sitecustomize.py')
3089 3092 target = os.path.join(self._pythondir, 'sitecustomize.py')
3090 3093 vlog('# Installing coverage trigger to %s' % target)
3091 3094 shutil.copyfile(custom, target)
3092 3095 rc = os.path.join(self._testdir, '.coveragerc')
3093 3096 vlog('# Installing coverage rc to %s' % rc)
3094 3097 os.environ['COVERAGE_PROCESS_START'] = rc
3095 3098 covdir = os.path.join(self._installdir, '..', 'coverage')
3096 3099 try:
3097 3100 os.mkdir(covdir)
3098 3101 except OSError as e:
3099 3102 if e.errno != errno.EEXIST:
3100 3103 raise
3101 3104
3102 3105 os.environ['COVERAGE_DIR'] = covdir
3103 3106
3104 3107 def _checkhglib(self, verb):
3105 3108 """Ensure that the 'mercurial' package imported by python is
3106 3109 the one we expect it to be. If not, print a warning to stderr."""
3107 3110 if ((self._bindir == self._pythondir) and
3108 3111 (self._bindir != self._tmpbindir)):
3109 3112 # The pythondir has been inferred from --with-hg flag.
3110 3113 # We cannot expect anything sensible here.
3111 3114 return
3112 3115 expecthg = os.path.join(self._pythondir, b'mercurial')
3113 3116 actualhg = self._gethgpath()
3114 3117 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3115 3118 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
3116 3119 ' (expected %s)\n'
3117 3120 % (verb, actualhg, expecthg))
3118 3121 def _gethgpath(self):
3119 3122 """Return the path to the mercurial package that is actually found by
3120 3123 the current Python interpreter."""
3121 3124 if self._hgpath is not None:
3122 3125 return self._hgpath
3123 3126
3124 3127 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3125 3128 cmd = cmd % PYTHON
3126 3129 if PYTHON3:
3127 3130 cmd = _strpath(cmd)
3128 3131
3129 3132 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3130 3133 out, err = p.communicate()
3131 3134
3132 3135 self._hgpath = out.strip()
3133 3136
3134 3137 return self._hgpath
3135 3138
3136 3139 def _installchg(self):
3137 3140 """Install chg into the test environment"""
3138 3141 vlog('# Performing temporary installation of CHG')
3139 3142 assert os.path.dirname(self._bindir) == self._installdir
3140 3143 assert self._hgroot, 'must be called after _installhg()'
3141 3144 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
3142 3145 % {b'make': 'make', # TODO: switch by option or environment?
3143 3146 b'prefix': self._installdir})
3144 3147 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3145 3148 vlog("# Running", cmd)
3146 3149 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
3147 3150 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
3148 3151 stderr=subprocess.STDOUT)
3149 3152 out, _err = proc.communicate()
3150 3153 if proc.returncode != 0:
3151 3154 if PYTHON3:
3152 3155 sys.stdout.buffer.write(out)
3153 3156 else:
3154 3157 sys.stdout.write(out)
3155 3158 sys.exit(1)
3156 3159
3157 3160 def _outputcoverage(self):
3158 3161 """Produce code coverage output."""
3159 3162 import coverage
3160 3163 coverage = coverage.coverage
3161 3164
3162 3165 vlog('# Producing coverage report')
3163 3166 # chdir is the easiest way to get short, relative paths in the
3164 3167 # output.
3165 3168 os.chdir(self._hgroot)
3166 3169 covdir = os.path.join(self._installdir, '..', 'coverage')
3167 3170 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3168 3171
3169 3172 # Map install directory paths back to source directory.
3170 3173 cov.config.paths['srcdir'] = ['.', self._pythondir]
3171 3174
3172 3175 cov.combine()
3173 3176
3174 3177 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
3175 3178 cov.report(ignore_errors=True, omit=omit)
3176 3179
3177 3180 if self.options.htmlcov:
3178 3181 htmldir = os.path.join(self._outputdir, 'htmlcov')
3179 3182 cov.html_report(directory=htmldir, omit=omit)
3180 3183 if self.options.annotate:
3181 3184 adir = os.path.join(self._outputdir, 'annotated')
3182 3185 if not os.path.isdir(adir):
3183 3186 os.mkdir(adir)
3184 3187 cov.annotate(directory=adir, omit=omit)
3185 3188
3186 3189 def _findprogram(self, program):
3187 3190 """Search PATH for a executable program"""
3188 3191 dpb = _bytespath(os.defpath)
3189 3192 sepb = _bytespath(os.pathsep)
3190 3193 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3191 3194 name = os.path.join(p, program)
3192 3195 if os.name == 'nt' or os.access(name, os.X_OK):
3193 3196 return name
3194 3197 return None
3195 3198
3196 3199 def _checktools(self):
3197 3200 """Ensure tools required to run tests are present."""
3198 3201 for p in self.REQUIREDTOOLS:
3199 3202 if os.name == 'nt' and not p.endswith(b'.exe'):
3200 3203 p += b'.exe'
3201 3204 found = self._findprogram(p)
3202 3205 if found:
3203 3206 vlog("# Found prerequisite", p, "at", found)
3204 3207 else:
3205 3208 print("WARNING: Did not find prerequisite tool: %s " %
3206 3209 p.decode("utf-8"))
3207 3210
3208 3211 def aggregateexceptions(path):
3209 3212 exceptioncounts = collections.Counter()
3210 3213 testsbyfailure = collections.defaultdict(set)
3211 3214 failuresbytest = collections.defaultdict(set)
3212 3215
3213 3216 for f in os.listdir(path):
3214 3217 with open(os.path.join(path, f), 'rb') as fh:
3215 3218 data = fh.read().split(b'\0')
3216 3219 if len(data) != 5:
3217 3220 continue
3218 3221
3219 3222 exc, mainframe, hgframe, hgline, testname = data
3220 3223 exc = exc.decode('utf-8')
3221 3224 mainframe = mainframe.decode('utf-8')
3222 3225 hgframe = hgframe.decode('utf-8')
3223 3226 hgline = hgline.decode('utf-8')
3224 3227 testname = testname.decode('utf-8')
3225 3228
3226 3229 key = (hgframe, hgline, exc)
3227 3230 exceptioncounts[key] += 1
3228 3231 testsbyfailure[key].add(testname)
3229 3232 failuresbytest[testname].add(key)
3230 3233
3231 3234 # Find test having fewest failures for each failure.
3232 3235 leastfailing = {}
3233 3236 for key, tests in testsbyfailure.items():
3234 3237 fewesttest = None
3235 3238 fewestcount = 99999999
3236 3239 for test in sorted(tests):
3237 3240 if len(failuresbytest[test]) < fewestcount:
3238 3241 fewesttest = test
3239 3242 fewestcount = len(failuresbytest[test])
3240 3243
3241 3244 leastfailing[key] = (fewestcount, fewesttest)
3242 3245
3243 3246 # Create a combined counter so we can sort by total occurrences and
3244 3247 # impacted tests.
3245 3248 combined = {}
3246 3249 for key in exceptioncounts:
3247 3250 combined[key] = (exceptioncounts[key],
3248 3251 len(testsbyfailure[key]),
3249 3252 leastfailing[key][0],
3250 3253 leastfailing[key][1])
3251 3254
3252 3255 return {
3253 3256 'exceptioncounts': exceptioncounts,
3254 3257 'total': sum(exceptioncounts.values()),
3255 3258 'combined': combined,
3256 3259 'leastfailing': leastfailing,
3257 3260 'byfailure': testsbyfailure,
3258 3261 'bytest': failuresbytest,
3259 3262 }
3260 3263
3261 3264 if __name__ == '__main__':
3262 3265 runner = TestRunner()
3263 3266
3264 3267 try:
3265 3268 import msvcrt
3266 3269 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3267 3270 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3268 3271 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3269 3272 except ImportError:
3270 3273 pass
3271 3274
3272 3275 sys.exit(runner.run(sys.argv[1:]))
@@ -1,266 +1,266 b''
1 1 hg debuginstall
2 2 $ hg debuginstall
3 3 checking encoding (ascii)...
4 4 checking Python executable (*) (glob)
5 5 checking Python version (2.*) (glob) (no-py3 !)
6 6 checking Python version (3.*) (glob) (py3 !)
7 checking Python lib (*lib*)... (glob)
7 checking Python lib (.*[Ll]ib.*)... (re)
8 8 checking Python security support (*) (glob)
9 9 TLS 1.2 not supported by Python install; network connections lack modern security (?)
10 10 SNI not supported by Python install; may have connectivity issues with some servers (?)
11 11 checking Mercurial version (*) (glob)
12 12 checking Mercurial custom build (*) (glob)
13 13 checking module policy (*) (glob)
14 14 checking installed modules (*mercurial)... (glob)
15 15 checking registered compression engines (*zlib*) (glob)
16 16 checking available compression engines (*zlib*) (glob)
17 17 checking available compression engines for wire protocol (*zlib*) (glob)
18 18 checking "re2" regexp engine \((available|missing)\) (re)
19 19 checking templates (*mercurial?templates)... (glob)
20 20 checking default template (*mercurial?templates?map-cmdline.default) (glob)
21 21 checking commit editor... (*) (glob)
22 22 checking username (test)
23 23 no problems detected
24 24
25 25 hg debuginstall JSON
26 26 $ hg debuginstall -Tjson | sed 's|\\\\|\\|g'
27 27 [
28 28 {
29 29 "compengines": ["bz2", "bz2truncated", "none", "zlib"*], (glob)
30 30 "compenginesavail": ["bz2", "bz2truncated", "none", "zlib"*], (glob)
31 31 "compenginesserver": [*"zlib"*], (glob)
32 32 "defaulttemplate": "*mercurial?templates?map-cmdline.default", (glob)
33 33 "defaulttemplateerror": null,
34 34 "defaulttemplatenotfound": "default",
35 35 "editor": "*", (glob)
36 36 "editornotfound": false,
37 37 "encoding": "ascii",
38 38 "encodingerror": null,
39 39 "extensionserror": null, (no-pure !)
40 40 "hgmodulepolicy": "*", (glob)
41 41 "hgmodules": "*mercurial", (glob)
42 42 "hgver": "*", (glob)
43 43 "hgverextra": "*", (glob)
44 44 "problems": 0,
45 45 "pythonexe": "*", (glob)
46 46 "pythonlib": "*", (glob)
47 47 "pythonsecurity": [*], (glob)
48 48 "pythonver": "*.*.*", (glob)
49 49 "re2": (true|false), (re)
50 50 "templatedirs": "*mercurial?templates", (glob)
51 51 "username": "test",
52 52 "usernameerror": null,
53 53 "vinotfound": false
54 54 }
55 55 ]
56 56
57 57 hg debuginstall with no username
58 58 $ HGUSER= hg debuginstall
59 59 checking encoding (ascii)...
60 60 checking Python executable (*) (glob)
61 61 checking Python version (2.*) (glob) (no-py3 !)
62 62 checking Python version (3.*) (glob) (py3 !)
63 checking Python lib (*lib*)... (glob)
63 checking Python lib (.*[Ll]ib.*)... (re)
64 64 checking Python security support (*) (glob)
65 65 TLS 1.2 not supported by Python install; network connections lack modern security (?)
66 66 SNI not supported by Python install; may have connectivity issues with some servers (?)
67 67 checking Mercurial version (*) (glob)
68 68 checking Mercurial custom build (*) (glob)
69 69 checking module policy (*) (glob)
70 70 checking installed modules (*mercurial)... (glob)
71 71 checking registered compression engines (*zlib*) (glob)
72 72 checking available compression engines (*zlib*) (glob)
73 73 checking available compression engines for wire protocol (*zlib*) (glob)
74 74 checking "re2" regexp engine \((available|missing)\) (re)
75 75 checking templates (*mercurial?templates)... (glob)
76 76 checking default template (*mercurial?templates?map-cmdline.default) (glob)
77 77 checking commit editor... (*) (glob)
78 78 checking username...
79 79 no username supplied
80 80 (specify a username in your configuration file)
81 81 1 problems detected, please check your install!
82 82 [1]
83 83
84 84 hg debuginstall with invalid encoding
85 85 $ HGENCODING=invalidenc hg debuginstall | grep encoding
86 86 checking encoding (invalidenc)...
87 87 unknown encoding: invalidenc
88 88
89 89 exception message in JSON
90 90
91 91 $ HGENCODING=invalidenc HGUSER= hg debuginstall -Tjson | grep error
92 92 "defaulttemplateerror": null,
93 93 "encodingerror": "unknown encoding: invalidenc",
94 94 "extensionserror": null, (no-pure !)
95 95 "usernameerror": "no username supplied",
96 96
97 97 path variables are expanded (~ is the same as $TESTTMP)
98 98 $ mkdir tools
99 99 $ touch tools/testeditor.exe
100 100 #if execbit
101 101 $ chmod 755 tools/testeditor.exe
102 102 #endif
103 103 $ HGEDITOR="~/tools/testeditor.exe" hg debuginstall
104 104 checking encoding (ascii)...
105 105 checking Python executable (*) (glob)
106 106 checking Python version (2.*) (glob) (no-py3 !)
107 107 checking Python version (3.*) (glob) (py3 !)
108 checking Python lib (*lib*)... (glob)
108 checking Python lib (.*[Ll]ib.*)... (re)
109 109 checking Python security support (*) (glob)
110 110 TLS 1.2 not supported by Python install; network connections lack modern security (?)
111 111 SNI not supported by Python install; may have connectivity issues with some servers (?)
112 112 checking Mercurial version (*) (glob)
113 113 checking Mercurial custom build (*) (glob)
114 114 checking module policy (*) (glob)
115 115 checking installed modules (*mercurial)... (glob)
116 116 checking registered compression engines (*zlib*) (glob)
117 117 checking available compression engines (*zlib*) (glob)
118 118 checking available compression engines for wire protocol (*zlib*) (glob)
119 119 checking "re2" regexp engine \((available|missing)\) (re)
120 120 checking templates (*mercurial?templates)... (glob)
121 121 checking default template (*mercurial?templates?map-cmdline.default) (glob)
122 122 checking commit editor... ($TESTTMP/tools/testeditor.exe)
123 123 checking username (test)
124 124 no problems detected
125 125
126 126 print out the binary post-shlexsplit in the error message when commit editor is
127 127 not found (this is intentionally using backslashes to mimic a windows usecase).
128 128 $ HGEDITOR="c:\foo\bar\baz.exe -y -z" hg debuginstall
129 129 checking encoding (ascii)...
130 130 checking Python executable (*) (glob)
131 131 checking Python version (2.*) (glob) (no-py3 !)
132 132 checking Python version (3.*) (glob) (py3 !)
133 checking Python lib (*lib*)... (glob)
133 checking Python lib (.*[Ll]ib.*)... (re)
134 134 checking Python security support (*) (glob)
135 135 TLS 1.2 not supported by Python install; network connections lack modern security (?)
136 136 SNI not supported by Python install; may have connectivity issues with some servers (?)
137 137 checking Mercurial version (*) (glob)
138 138 checking Mercurial custom build (*) (glob)
139 139 checking module policy (*) (glob)
140 140 checking installed modules (*mercurial)... (glob)
141 141 checking registered compression engines (*zlib*) (glob)
142 142 checking available compression engines (*zlib*) (glob)
143 143 checking available compression engines for wire protocol (*zlib*) (glob)
144 144 checking "re2" regexp engine \((available|missing)\) (re)
145 145 checking templates (*mercurial?templates)... (glob)
146 146 checking default template (*mercurial?templates?map-cmdline.default) (glob)
147 147 checking commit editor... (c:\foo\bar\baz.exe) (windows !)
148 148 Can't find editor 'c:\foo\bar\baz.exe' in PATH (windows !)
149 149 checking commit editor... (c:foobarbaz.exe) (no-windows !)
150 150 Can't find editor 'c:foobarbaz.exe' in PATH (no-windows !)
151 151 (specify a commit editor in your configuration file)
152 152 checking username (test)
153 153 1 problems detected, please check your install!
154 154 [1]
155 155
156 156 #if test-repo
157 157 $ . "$TESTDIR/helpers-testrepo.sh"
158 158
159 159 $ cat >> wixxml.py << EOF
160 160 > import os
161 161 > import subprocess
162 162 > import sys
163 163 > import xml.etree.ElementTree as ET
164 164 >
165 165 > # MSYS mangles the path if it expands $TESTDIR
166 166 > testdir = os.environ['TESTDIR']
167 167 > ns = {'wix' : 'http://schemas.microsoft.com/wix/2006/wi'}
168 168 >
169 169 > def directory(node, relpath):
170 170 > '''generator of files in the xml node, rooted at relpath'''
171 171 > dirs = node.findall('./{%(wix)s}Directory' % ns)
172 172 >
173 173 > for d in dirs:
174 174 > for subfile in directory(d, relpath + d.attrib['Name'] + '/'):
175 175 > yield subfile
176 176 >
177 177 > files = node.findall('./{%(wix)s}Component/{%(wix)s}File' % ns)
178 178 >
179 179 > for f in files:
180 180 > yield relpath + f.attrib['Name']
181 181 >
182 182 > def hgdirectory(relpath):
183 183 > '''generator of tracked files, rooted at relpath'''
184 184 > hgdir = "%s/../mercurial" % (testdir)
185 185 > args = ['hg', '--cwd', hgdir, 'files', relpath]
186 186 > proc = subprocess.Popen(args, stdout=subprocess.PIPE,
187 187 > stderr=subprocess.PIPE)
188 188 > output = proc.communicate()[0]
189 189 >
190 190 > slash = '/'
191 191 > for line in output.splitlines():
192 192 > if os.name == 'nt':
193 193 > yield line.replace(os.sep, slash)
194 194 > else:
195 195 > yield line
196 196 >
197 197 > tracked = [f for f in hgdirectory(sys.argv[1])]
198 198 >
199 199 > xml = ET.parse("%s/../contrib/wix/%s.wxs" % (testdir, sys.argv[1]))
200 200 > root = xml.getroot()
201 201 > dir = root.find('.//{%(wix)s}DirectoryRef' % ns)
202 202 >
203 203 > installed = [f for f in directory(dir, '')]
204 204 >
205 205 > print('Not installed:')
206 206 > for f in sorted(set(tracked) - set(installed)):
207 207 > print(' %s' % f)
208 208 >
209 209 > print('Not tracked:')
210 210 > for f in sorted(set(installed) - set(tracked)):
211 211 > print(' %s' % f)
212 212 > EOF
213 213
214 214 $ ( testrepohgenv; "$PYTHON" wixxml.py help )
215 215 Not installed:
216 216 help/common.txt
217 217 help/hg-ssh.8.txt
218 218 help/hg.1.txt
219 219 help/hgignore.5.txt
220 220 help/hgrc.5.txt
221 221 Not tracked:
222 222
223 223 $ ( testrepohgenv; "$PYTHON" wixxml.py templates )
224 224 Not installed:
225 225 Not tracked:
226 226
227 227 #endif
228 228
229 229 #if virtualenv
230 230
231 231 Verify that Mercurial is installable with pip. Note that this MUST be
232 232 the last test in this file, because we do some nasty things to the
233 233 shell environment in order to make the virtualenv work reliably.
234 234
235 235 $ cd $TESTTMP
236 236 Note: --no-site-packages is deprecated, but some places have an
237 237 ancient virtualenv from their linux distro or similar and it's not yet
238 238 the default for them.
239 239 $ unset PYTHONPATH
240 240 $ "$PYTHON" -m virtualenv --no-site-packages --never-download installenv >> pip.log
241 241 Note: we use this weird path to run pip and hg to avoid platform differences,
242 242 since it's bin on most platforms but Scripts on Windows.
243 243 $ ./installenv/*/pip install --no-index $TESTDIR/.. >> pip.log
244 244 $ ./installenv/*/hg debuginstall || cat pip.log
245 245 checking encoding (ascii)...
246 246 checking Python executable (*) (glob)
247 247 checking Python version (2.*) (glob) (no-py3 !)
248 248 checking Python version (3.*) (glob) (py3 !)
249 249 checking Python lib (*)... (glob)
250 250 checking Python security support (*) (glob)
251 251 TLS 1.2 not supported by Python install; network connections lack modern security (?)
252 252 SNI not supported by Python install; may have connectivity issues with some servers (?)
253 253 checking Mercurial version (*) (glob)
254 254 checking Mercurial custom build (*) (glob)
255 255 checking module policy (*) (glob)
256 256 checking installed modules (*/mercurial)... (glob)
257 257 checking registered compression engines (*) (glob)
258 258 checking available compression engines (*) (glob)
259 259 checking available compression engines for wire protocol (*) (glob)
260 260 checking "re2" regexp engine \((available|missing)\) (re)
261 261 checking templates ($TESTTMP/installenv/*/site-packages/mercurial/templates)... (glob)
262 262 checking default template ($TESTTMP/installenv/*/site-packages/mercurial/templates/map-cmdline.default) (glob)
263 263 checking commit editor... (*) (glob)
264 264 checking username (test)
265 265 no problems detected
266 266 #endif
@@ -1,1930 +1,1932 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
10 10 running 0 tests using 0 parallel processes
11 11
12 12 # Ran 0 tests, 0 skipped, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27 running 0 tests using 0 parallel processes
28 28
29 29 # Ran 0 tests, 0 skipped, 0 failed.
30 30 $ rm hg
31 31 #endif
32 32
33 33 #if execbit
34 34 $ touch hg
35 35 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
36 36 usage: run-tests.py [options] [tests]
37 37 run-tests.py: error: --with-hg must specify an executable hg script
38 38 [2]
39 39 $ rm hg
40 40 #endif
41 41
42 42 Features for testing optional lines
43 43 ===================================
44 44
45 45 $ cat > hghaveaddon.py <<EOF
46 46 > import hghave
47 47 > @hghave.check("custom", "custom hghave feature")
48 48 > def has_custom():
49 49 > return True
50 50 > @hghave.check("missing", "missing hghave feature")
51 51 > def has_missing():
52 52 > return False
53 53 > EOF
54 54
55 55 an empty test
56 56 =======================
57 57
58 58 $ touch test-empty.t
59 59 $ rt
60 60 running 1 tests using 1 parallel processes
61 61 .
62 62 # Ran 1 tests, 0 skipped, 0 failed.
63 63 $ rm test-empty.t
64 64
65 65 a succesful test
66 66 =======================
67 67
68 68 $ cat > test-success.t << EOF
69 69 > $ echo babar
70 70 > babar
71 71 > $ echo xyzzy
72 72 > dont_print (?)
73 73 > nothing[42]line (re) (?)
74 74 > never*happens (glob) (?)
75 75 > more_nothing (?)
76 76 > xyzzy
77 77 > nor this (?)
78 78 > $ printf 'abc\ndef\nxyz\n'
79 79 > 123 (?)
80 80 > abc
81 81 > def (?)
82 82 > 456 (?)
83 83 > xyz
84 84 > $ printf 'zyx\nwvu\ntsr\n'
85 85 > abc (?)
86 86 > zyx (custom !)
87 87 > wvu
88 88 > no_print (no-custom !)
89 89 > tsr (no-missing !)
90 90 > missing (missing !)
91 91 > EOF
92 92
93 93 $ rt
94 94 running 1 tests using 1 parallel processes
95 95 .
96 96 # Ran 1 tests, 0 skipped, 0 failed.
97 97
98 98 failing test
99 99 ==================
100 100
101 101 test churn with globs
102 102 $ cat > test-failure.t <<EOF
103 103 > $ echo "bar-baz"; echo "bar-bad"; echo foo
104 104 > bar*bad (glob)
105 105 > bar*baz (glob)
106 106 > | fo (re)
107 107 > EOF
108 108 $ rt test-failure.t
109 109 running 1 tests using 1 parallel processes
110 110
111 111 --- $TESTTMP/test-failure.t
112 112 +++ $TESTTMP/test-failure.t.err
113 113 @@ -1,4 +1,4 @@
114 114 $ echo "bar-baz"; echo "bar-bad"; echo foo
115 115 + bar*baz (glob)
116 116 bar*bad (glob)
117 117 - bar*baz (glob)
118 118 - | fo (re)
119 119 + foo
120 120
121 121 ERROR: test-failure.t output changed
122 122 !
123 123 Failed test-failure.t: output changed
124 124 # Ran 1 tests, 0 skipped, 1 failed.
125 125 python hash seed: * (glob)
126 126 [1]
127 127
128 128 test how multiple globs gets matched with lines in output
129 129 $ cat > test-failure-globs.t <<EOF
130 130 > $ echo "context"; echo "context"; \
131 131 > echo "key: 1"; echo "value: not a"; \
132 132 > echo "key: 2"; echo "value: not b"; \
133 133 > echo "key: 3"; echo "value: c"; \
134 134 > echo "key: 4"; echo "value: d"
135 135 > context
136 136 > context
137 137 > key: 1
138 138 > value: a
139 139 > key: 2
140 140 > value: b
141 141 > key: 3
142 142 > value: * (glob)
143 143 > key: 4
144 144 > value: * (glob)
145 145 > EOF
146 146 $ rt test-failure-globs.t
147 147 running 1 tests using 1 parallel processes
148 148
149 149 --- $TESTTMP/test-failure-globs.t
150 150 +++ $TESTTMP/test-failure-globs.t.err
151 151 @@ -2,9 +2,9 @@
152 152 context
153 153 context
154 154 key: 1
155 155 - value: a
156 156 + value: not a
157 157 key: 2
158 158 - value: b
159 159 + value: not b
160 160 key: 3
161 161 value: * (glob)
162 162 key: 4
163 163
164 164 ERROR: test-failure-globs.t output changed
165 165 !
166 166 Failed test-failure-globs.t: output changed
167 167 # Ran 1 tests, 0 skipped, 1 failed.
168 168 python hash seed: * (glob)
169 169 [1]
170 170 $ rm test-failure-globs.t
171 171
172 172 test diff colorisation
173 173
174 174 #if no-windows pygments
175 175 $ rt test-failure.t --color always
176 176 running 1 tests using 1 parallel processes
177 177
178 178 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
179 179 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
180 180 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
181 181 $ echo "bar-baz"; echo "bar-bad"; echo foo
182 182 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
183 183 bar*bad (glob)
184 184 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
185 185 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
186 186 \x1b[38;5;34m+ foo\x1b[39m (esc)
187 187
188 188 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
189 189 !
190 190 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
191 191 # Ran 1 tests, 0 skipped, 1 failed.
192 192 python hash seed: * (glob)
193 193 [1]
194 194
195 195 $ rt test-failure.t 2> tmp.log
196 196 running 1 tests using 1 parallel processes
197 197 [1]
198 198 $ cat tmp.log
199 199
200 200 --- $TESTTMP/test-failure.t
201 201 +++ $TESTTMP/test-failure.t.err
202 202 @@ -1,4 +1,4 @@
203 203 $ echo "bar-baz"; echo "bar-bad"; echo foo
204 204 + bar*baz (glob)
205 205 bar*bad (glob)
206 206 - bar*baz (glob)
207 207 - | fo (re)
208 208 + foo
209 209
210 210 ERROR: test-failure.t output changed
211 211 !
212 212 Failed test-failure.t: output changed
213 213 # Ran 1 tests, 0 skipped, 1 failed.
214 214 python hash seed: * (glob)
215 215 #endif
216 216
217 217 $ cat > test-failure.t << EOF
218 218 > $ true
219 219 > should go away (true !)
220 220 > $ true
221 221 > should stay (false !)
222 222 >
223 223 > Should remove first line, not second or third
224 224 > $ echo 'testing'
225 225 > baz*foo (glob) (true !)
226 226 > foobar*foo (glob) (false !)
227 227 > te*ting (glob) (true !)
228 228 >
229 229 > Should keep first two lines, remove third and last
230 230 > $ echo 'testing'
231 231 > test.ng (re) (true !)
232 232 > foo.ar (re) (false !)
233 233 > b.r (re) (true !)
234 234 > missing (?)
235 235 > awol (true !)
236 236 >
237 237 > The "missing" line should stay, even though awol is dropped
238 238 > $ echo 'testing'
239 239 > test.ng (re) (true !)
240 240 > foo.ar (?)
241 241 > awol
242 242 > missing (?)
243 243 > EOF
244 244 $ rt test-failure.t
245 245 running 1 tests using 1 parallel processes
246 246
247 247 --- $TESTTMP/test-failure.t
248 248 +++ $TESTTMP/test-failure.t.err
249 249 @@ -1,11 +1,9 @@
250 250 $ true
251 251 - should go away (true !)
252 252 $ true
253 253 should stay (false !)
254 254
255 255 Should remove first line, not second or third
256 256 $ echo 'testing'
257 257 - baz*foo (glob) (true !)
258 258 foobar*foo (glob) (false !)
259 259 te*ting (glob) (true !)
260 260
261 261 foo.ar (re) (false !)
262 262 missing (?)
263 263 @@ -13,13 +11,10 @@
264 264 $ echo 'testing'
265 265 test.ng (re) (true !)
266 266 foo.ar (re) (false !)
267 267 - b.r (re) (true !)
268 268 missing (?)
269 269 - awol (true !)
270 270
271 271 The "missing" line should stay, even though awol is dropped
272 272 $ echo 'testing'
273 273 test.ng (re) (true !)
274 274 foo.ar (?)
275 275 - awol
276 276 missing (?)
277 277
278 278 ERROR: test-failure.t output changed
279 279 !
280 280 Failed test-failure.t: output changed
281 281 # Ran 1 tests, 0 skipped, 1 failed.
282 282 python hash seed: * (glob)
283 283 [1]
284 284
285 285 basic failing test
286 286 $ cat > test-failure.t << EOF
287 287 > $ echo babar
288 288 > rataxes
289 289 > This is a noop statement so that
290 290 > this test is still more bytes than success.
291 291 > pad pad pad pad............................................................
292 292 > pad pad pad pad............................................................
293 293 > pad pad pad pad............................................................
294 294 > pad pad pad pad............................................................
295 295 > pad pad pad pad............................................................
296 296 > pad pad pad pad............................................................
297 297 > EOF
298 298
299 299 >>> fh = open('test-failure-unicode.t', 'wb')
300 300 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
301 301 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
302 302
303 303 $ rt
304 304 running 3 tests using 1 parallel processes
305 305
306 306 --- $TESTTMP/test-failure.t
307 307 +++ $TESTTMP/test-failure.t.err
308 308 @@ -1,5 +1,5 @@
309 309 $ echo babar
310 310 - rataxes
311 311 + babar
312 312 This is a noop statement so that
313 313 this test is still more bytes than success.
314 314 pad pad pad pad............................................................
315 315
316 316 ERROR: test-failure.t output changed
317 317 !.
318 318 --- $TESTTMP/test-failure-unicode.t
319 319 +++ $TESTTMP/test-failure-unicode.t.err
320 320 @@ -1,2 +1,2 @@
321 321 $ echo babar\xce\xb1 (esc)
322 322 - l\xce\xb5\xce\xb5t (esc)
323 323 + babar\xce\xb1 (esc)
324 324
325 325 ERROR: test-failure-unicode.t output changed
326 326 !
327 327 Failed test-failure.t: output changed
328 328 Failed test-failure-unicode.t: output changed
329 329 # Ran 3 tests, 0 skipped, 2 failed.
330 330 python hash seed: * (glob)
331 331 [1]
332 332
333 333 test --outputdir
334 334 $ mkdir output
335 335 $ rt --outputdir output
336 336 running 3 tests using 1 parallel processes
337 337
338 338 --- $TESTTMP/test-failure.t
339 339 +++ $TESTTMP/output/test-failure.t.err
340 340 @@ -1,5 +1,5 @@
341 341 $ echo babar
342 342 - rataxes
343 343 + babar
344 344 This is a noop statement so that
345 345 this test is still more bytes than success.
346 346 pad pad pad pad............................................................
347 347
348 348 ERROR: test-failure.t output changed
349 349 !.
350 350 --- $TESTTMP/test-failure-unicode.t
351 351 +++ $TESTTMP/output/test-failure-unicode.t.err
352 352 @@ -1,2 +1,2 @@
353 353 $ echo babar\xce\xb1 (esc)
354 354 - l\xce\xb5\xce\xb5t (esc)
355 355 + babar\xce\xb1 (esc)
356 356
357 357 ERROR: test-failure-unicode.t output changed
358 358 !
359 359 Failed test-failure.t: output changed
360 360 Failed test-failure-unicode.t: output changed
361 361 # Ran 3 tests, 0 skipped, 2 failed.
362 362 python hash seed: * (glob)
363 363 [1]
364 364 $ ls -a output
365 365 .
366 366 ..
367 367 .testtimes
368 368 test-failure-unicode.t.err
369 369 test-failure.t.err
370 370
371 371 test --xunit support
372 372 $ rt --xunit=xunit.xml
373 373 running 3 tests using 1 parallel processes
374 374
375 375 --- $TESTTMP/test-failure.t
376 376 +++ $TESTTMP/test-failure.t.err
377 377 @@ -1,5 +1,5 @@
378 378 $ echo babar
379 379 - rataxes
380 380 + babar
381 381 This is a noop statement so that
382 382 this test is still more bytes than success.
383 383 pad pad pad pad............................................................
384 384
385 385 ERROR: test-failure.t output changed
386 386 !.
387 387 --- $TESTTMP/test-failure-unicode.t
388 388 +++ $TESTTMP/test-failure-unicode.t.err
389 389 @@ -1,2 +1,2 @@
390 390 $ echo babar\xce\xb1 (esc)
391 391 - l\xce\xb5\xce\xb5t (esc)
392 392 + babar\xce\xb1 (esc)
393 393
394 394 ERROR: test-failure-unicode.t output changed
395 395 !
396 396 Failed test-failure.t: output changed
397 397 Failed test-failure-unicode.t: output changed
398 398 # Ran 3 tests, 0 skipped, 2 failed.
399 399 python hash seed: * (glob)
400 400 [1]
401 401 $ cat xunit.xml
402 402 <?xml version="1.0" encoding="utf-8"?>
403 403 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
404 404 <testcase name="test-success.t" time="*"/> (glob)
405 405 <testcase name="test-failure-unicode.t" time="*"> (glob)
406 406 <failure message="output changed" type="output-mismatch">
407 407 <![CDATA[--- $TESTTMP/test-failure-unicode.t
408 408 +++ $TESTTMP/test-failure-unicode.t.err
409 409 @@ -1,2 +1,2 @@
410 410 $ echo babar\xce\xb1 (esc)
411 411 - l\xce\xb5\xce\xb5t (esc)
412 412 + babar\xce\xb1 (esc)
413 413 ]]> </failure>
414 414 </testcase>
415 415 <testcase name="test-failure.t" time="*"> (glob)
416 416 <failure message="output changed" type="output-mismatch">
417 417 <![CDATA[--- $TESTTMP/test-failure.t
418 418 +++ $TESTTMP/test-failure.t.err
419 419 @@ -1,5 +1,5 @@
420 420 $ echo babar
421 421 - rataxes
422 422 + babar
423 423 This is a noop statement so that
424 424 this test is still more bytes than success.
425 425 pad pad pad pad............................................................
426 426 ]]> </failure>
427 427 </testcase>
428 428 </testsuite>
429 429
430 430 $ cat .testtimes
431 431 test-empty.t * (glob)
432 432 test-failure-globs.t * (glob)
433 433 test-failure-unicode.t * (glob)
434 434 test-failure.t * (glob)
435 435 test-success.t * (glob)
436 436
437 437 $ rt --list-tests
438 438 test-failure-unicode.t
439 439 test-failure.t
440 440 test-success.t
441 441
442 442 $ rt --list-tests --json
443 443 test-failure-unicode.t
444 444 test-failure.t
445 445 test-success.t
446 446 $ cat report.json
447 447 testreport ={
448 448 "test-failure-unicode.t": {
449 449 "result": "success"
450 450 },
451 451 "test-failure.t": {
452 452 "result": "success"
453 453 },
454 454 "test-success.t": {
455 455 "result": "success"
456 456 }
457 457 } (no-eol)
458 458
459 459 $ rt --list-tests --xunit=xunit.xml
460 460 test-failure-unicode.t
461 461 test-failure.t
462 462 test-success.t
463 463 $ cat xunit.xml
464 464 <?xml version="1.0" encoding="utf-8"?>
465 465 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
466 466 <testcase name="test-failure-unicode.t"/>
467 467 <testcase name="test-failure.t"/>
468 468 <testcase name="test-success.t"/>
469 469 </testsuite>
470 470
471 471 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
472 472 test-failure-unicode.t
473 473 test-failure.t
474 474 $ cat output/report.json
475 475 testreport ={
476 476 "test-failure-unicode.t": {
477 477 "result": "success"
478 478 },
479 479 "test-failure.t": {
480 480 "result": "success"
481 481 }
482 482 } (no-eol)
483 483 $ cat xunit.xml
484 484 <?xml version="1.0" encoding="utf-8"?>
485 485 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
486 486 <testcase name="test-failure-unicode.t"/>
487 487 <testcase name="test-failure.t"/>
488 488 </testsuite>
489 489
490 490 $ rm test-failure-unicode.t
491 491
492 492 test for --retest
493 493 ====================
494 494
495 495 $ rt --retest
496 496 running 2 tests using 1 parallel processes
497 497
498 498 --- $TESTTMP/test-failure.t
499 499 +++ $TESTTMP/test-failure.t.err
500 500 @@ -1,5 +1,5 @@
501 501 $ echo babar
502 502 - rataxes
503 503 + babar
504 504 This is a noop statement so that
505 505 this test is still more bytes than success.
506 506 pad pad pad pad............................................................
507 507
508 508 ERROR: test-failure.t output changed
509 509 !
510 510 Failed test-failure.t: output changed
511 511 # Ran 2 tests, 1 skipped, 1 failed.
512 512 python hash seed: * (glob)
513 513 [1]
514 514
515 515 --retest works with --outputdir
516 516 $ rm -r output
517 517 $ mkdir output
518 518 $ mv test-failure.t.err output
519 519 $ rt --retest --outputdir output
520 520 running 2 tests using 1 parallel processes
521 521
522 522 --- $TESTTMP/test-failure.t
523 523 +++ $TESTTMP/output/test-failure.t.err
524 524 @@ -1,5 +1,5 @@
525 525 $ echo babar
526 526 - rataxes
527 527 + babar
528 528 This is a noop statement so that
529 529 this test is still more bytes than success.
530 530 pad pad pad pad............................................................
531 531
532 532 ERROR: test-failure.t output changed
533 533 !
534 534 Failed test-failure.t: output changed
535 535 # Ran 2 tests, 1 skipped, 1 failed.
536 536 python hash seed: * (glob)
537 537 [1]
538 538
539 539 Selecting Tests To Run
540 540 ======================
541 541
542 542 successful
543 543
544 544 $ rt test-success.t
545 545 running 1 tests using 1 parallel processes
546 546 .
547 547 # Ran 1 tests, 0 skipped, 0 failed.
548 548
549 549 success w/ keyword
550 550 $ rt -k xyzzy
551 551 running 2 tests using 1 parallel processes
552 552 .
553 553 # Ran 2 tests, 1 skipped, 0 failed.
554 554
555 555 failed
556 556
557 557 $ rt test-failure.t
558 558 running 1 tests using 1 parallel processes
559 559
560 560 --- $TESTTMP/test-failure.t
561 561 +++ $TESTTMP/test-failure.t.err
562 562 @@ -1,5 +1,5 @@
563 563 $ echo babar
564 564 - rataxes
565 565 + babar
566 566 This is a noop statement so that
567 567 this test is still more bytes than success.
568 568 pad pad pad pad............................................................
569 569
570 570 ERROR: test-failure.t output changed
571 571 !
572 572 Failed test-failure.t: output changed
573 573 # Ran 1 tests, 0 skipped, 1 failed.
574 574 python hash seed: * (glob)
575 575 [1]
576 576
577 577 failure w/ keyword
578 578 $ rt -k rataxes
579 579 running 2 tests using 1 parallel processes
580 580
581 581 --- $TESTTMP/test-failure.t
582 582 +++ $TESTTMP/test-failure.t.err
583 583 @@ -1,5 +1,5 @@
584 584 $ echo babar
585 585 - rataxes
586 586 + babar
587 587 This is a noop statement so that
588 588 this test is still more bytes than success.
589 589 pad pad pad pad............................................................
590 590
591 591 ERROR: test-failure.t output changed
592 592 !
593 593 Failed test-failure.t: output changed
594 594 # Ran 2 tests, 1 skipped, 1 failed.
595 595 python hash seed: * (glob)
596 596 [1]
597 597
598 598 Verify that when a process fails to start we show a useful message
599 599 ==================================================================
600 600
601 601 $ cat > test-serve-fail.t <<EOF
602 602 > $ echo 'abort: child process failed to start blah'
603 603 > EOF
604 604 $ rt test-serve-fail.t
605 605 running 1 tests using 1 parallel processes
606 606
607 607 --- $TESTTMP/test-serve-fail.t
608 608 +++ $TESTTMP/test-serve-fail.t.err
609 609 @@ -1* +1,2 @@ (glob)
610 610 $ echo 'abort: child process failed to start blah'
611 611 + abort: child process failed to start blah
612 612
613 613 ERROR: test-serve-fail.t output changed
614 614 !
615 615 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
616 616 # Ran 1 tests, 0 skipped, 1 failed.
617 617 python hash seed: * (glob)
618 618 [1]
619 619 $ rm test-serve-fail.t
620 620
621 621 Verify that we can try other ports
622 622 ===================================
623 623
624 624 Extensions aren't inherited by the invoked run-tests.py. An extension
625 625 introducing a repository requirement could cause this to fail. So we force
626 626 HGRCPATH to get a clean environment.
627 627
628 628 $ HGRCPATH= hg init inuse
629 629 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
630 630 $ cat blocks.pid >> $DAEMON_PIDS
631 631 $ cat > test-serve-inuse.t <<EOF
632 632 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
633 633 > $ cat hg.pid >> \$DAEMON_PIDS
634 634 > EOF
635 635 $ rt test-serve-inuse.t
636 636 running 1 tests using 1 parallel processes
637 637 .
638 638 # Ran 1 tests, 0 skipped, 0 failed.
639 639 $ rm test-serve-inuse.t
640 640 $ killdaemons.py $DAEMON_PIDS
641 641
642 642 Running In Debug Mode
643 643 ======================
644 644
645 645 $ rt --debug 2>&1 | grep -v pwd
646 646 running 2 tests using 1 parallel processes
647 + alias hg=hg.exe (windows !)
647 648 + echo *SALT* 0 0 (glob)
648 649 *SALT* 0 0 (glob)
649 650 + echo babar
650 651 babar
651 652 + echo *SALT* 10 0 (glob)
652 653 *SALT* 10 0 (glob)
654 .+ alias hg=hg.exe (windows !)
653 655 *+ echo *SALT* 0 0 (glob)
654 656 *SALT* 0 0 (glob)
655 657 + echo babar
656 658 babar
657 659 + echo *SALT* 2 0 (glob)
658 660 *SALT* 2 0 (glob)
659 661 + echo xyzzy
660 662 xyzzy
661 663 + echo *SALT* 9 0 (glob)
662 664 *SALT* 9 0 (glob)
663 665 + printf *abc\ndef\nxyz\n* (glob)
664 666 abc
665 667 def
666 668 xyz
667 669 + echo *SALT* 15 0 (glob)
668 670 *SALT* 15 0 (glob)
669 671 + printf *zyx\nwvu\ntsr\n* (glob)
670 672 zyx
671 673 wvu
672 674 tsr
673 675 + echo *SALT* 22 0 (glob)
674 676 *SALT* 22 0 (glob)
675 677 .
676 678 # Ran 2 tests, 0 skipped, 0 failed.
677 679
678 680 Parallel runs
679 681 ==============
680 682
681 683 (duplicate the failing test to get predictable output)
682 684 $ cp test-failure.t test-failure-copy.t
683 685
684 686 $ rt --jobs 2 test-failure*.t -n
685 687 running 2 tests using 2 parallel processes
686 688 !!
687 689 Failed test-failure*.t: output changed (glob)
688 690 Failed test-failure*.t: output changed (glob)
689 691 # Ran 2 tests, 0 skipped, 2 failed.
690 692 python hash seed: * (glob)
691 693 [1]
692 694
693 695 failures in parallel with --first should only print one failure
694 696 $ rt --jobs 2 --first test-failure*.t
695 697 running 2 tests using 2 parallel processes
696 698
697 699 --- $TESTTMP/test-failure*.t (glob)
698 700 +++ $TESTTMP/test-failure*.t.err (glob)
699 701 @@ -1,5 +1,5 @@
700 702 $ echo babar
701 703 - rataxes
702 704 + babar
703 705 This is a noop statement so that
704 706 this test is still more bytes than success.
705 707 pad pad pad pad............................................................
706 708
707 709 Failed test-failure*.t: output changed (glob)
708 710 Failed test-failure*.t: output changed (glob)
709 711 # Ran 2 tests, 0 skipped, 2 failed.
710 712 python hash seed: * (glob)
711 713 [1]
712 714
713 715
714 716 (delete the duplicated test file)
715 717 $ rm test-failure-copy.t
716 718
717 719
718 720 Interactive run
719 721 ===============
720 722
721 723 (backup the failing test)
722 724 $ cp test-failure.t backup
723 725
724 726 Refuse the fix
725 727
726 728 $ echo 'n' | rt -i
727 729 running 2 tests using 1 parallel processes
728 730
729 731 --- $TESTTMP/test-failure.t
730 732 +++ $TESTTMP/test-failure.t.err
731 733 @@ -1,5 +1,5 @@
732 734 $ echo babar
733 735 - rataxes
734 736 + babar
735 737 This is a noop statement so that
736 738 this test is still more bytes than success.
737 739 pad pad pad pad............................................................
738 740 Accept this change? [n]
739 741 ERROR: test-failure.t output changed
740 742 !.
741 743 Failed test-failure.t: output changed
742 744 # Ran 2 tests, 0 skipped, 1 failed.
743 745 python hash seed: * (glob)
744 746 [1]
745 747
746 748 $ cat test-failure.t
747 749 $ echo babar
748 750 rataxes
749 751 This is a noop statement so that
750 752 this test is still more bytes than success.
751 753 pad pad pad pad............................................................
752 754 pad pad pad pad............................................................
753 755 pad pad pad pad............................................................
754 756 pad pad pad pad............................................................
755 757 pad pad pad pad............................................................
756 758 pad pad pad pad............................................................
757 759
758 760 Interactive with custom view
759 761
760 762 $ echo 'n' | rt -i --view echo
761 763 running 2 tests using 1 parallel processes
762 764 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
763 765 Accept this change? [n]* (glob)
764 766 ERROR: test-failure.t output changed
765 767 !.
766 768 Failed test-failure.t: output changed
767 769 # Ran 2 tests, 0 skipped, 1 failed.
768 770 python hash seed: * (glob)
769 771 [1]
770 772
771 773 View the fix
772 774
773 775 $ echo 'y' | rt --view echo
774 776 running 2 tests using 1 parallel processes
775 777 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
776 778
777 779 ERROR: test-failure.t output changed
778 780 !.
779 781 Failed test-failure.t: output changed
780 782 # Ran 2 tests, 0 skipped, 1 failed.
781 783 python hash seed: * (glob)
782 784 [1]
783 785
784 786 Accept the fix
785 787
786 788 $ cat >> test-failure.t <<EOF
787 789 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
788 790 > saved backup bundle to \$TESTTMP/foo.hg
789 791 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
790 792 > saved backup bundle to $TESTTMP\\foo.hg
791 793 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
792 794 > saved backup bundle to \$TESTTMP/*.hg (glob)
793 795 > EOF
794 796 $ echo 'y' | rt -i 2>&1
795 797 running 2 tests using 1 parallel processes
796 798
797 799 --- $TESTTMP/test-failure.t
798 800 +++ $TESTTMP/test-failure.t.err
799 801 @@ -1,5 +1,5 @@
800 802 $ echo babar
801 803 - rataxes
802 804 + babar
803 805 This is a noop statement so that
804 806 this test is still more bytes than success.
805 807 pad pad pad pad............................................................
806 808 @@ -11,6 +11,6 @@
807 809 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
808 810 saved backup bundle to $TESTTMP/foo.hg
809 811 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
810 812 - saved backup bundle to $TESTTMP\foo.hg
811 813 + saved backup bundle to $TESTTMP/foo.hg
812 814 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
813 815 saved backup bundle to $TESTTMP/*.hg (glob)
814 816 Accept this change? [n] ..
815 817 # Ran 2 tests, 0 skipped, 0 failed.
816 818
817 819 $ sed -e 's,(glob)$,&<,g' test-failure.t
818 820 $ echo babar
819 821 babar
820 822 This is a noop statement so that
821 823 this test is still more bytes than success.
822 824 pad pad pad pad............................................................
823 825 pad pad pad pad............................................................
824 826 pad pad pad pad............................................................
825 827 pad pad pad pad............................................................
826 828 pad pad pad pad............................................................
827 829 pad pad pad pad............................................................
828 830 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
829 831 saved backup bundle to $TESTTMP/foo.hg
830 832 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
831 833 saved backup bundle to $TESTTMP/foo.hg
832 834 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
833 835 saved backup bundle to $TESTTMP/*.hg (glob)<
834 836
835 837 Race condition - test file was modified when test is running
836 838
837 839 $ TESTRACEDIR=`pwd`
838 840 $ export TESTRACEDIR
839 841 $ cat > test-race.t <<EOF
840 842 > $ echo 1
841 843 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
842 844 > EOF
843 845
844 846 $ rt -i test-race.t
845 847 running 1 tests using 1 parallel processes
846 848
847 849 --- $TESTTMP/test-race.t
848 850 +++ $TESTTMP/test-race.t.err
849 851 @@ -1,2 +1,3 @@
850 852 $ echo 1
851 853 + 1
852 854 $ echo "# a new line" >> $TESTTMP/test-race.t
853 855 Reference output has changed (run again to prompt changes)
854 856 ERROR: test-race.t output changed
855 857 !
856 858 Failed test-race.t: output changed
857 859 # Ran 1 tests, 0 skipped, 1 failed.
858 860 python hash seed: * (glob)
859 861 [1]
860 862
861 863 $ rm test-race.t
862 864
863 865 When "#testcases" is used in .t files
864 866
865 867 $ cat >> test-cases.t <<EOF
866 868 > #testcases a b
867 869 > #if a
868 870 > $ echo 1
869 871 > #endif
870 872 > #if b
871 873 > $ echo 2
872 874 > #endif
873 875 > EOF
874 876
875 877 $ cat <<EOF | rt -i test-cases.t 2>&1
876 878 > y
877 879 > y
878 880 > EOF
879 881 running 2 tests using 1 parallel processes
880 882
881 883 --- $TESTTMP/test-cases.t
882 884 +++ $TESTTMP/test-cases.t#a.err
883 885 @@ -1,6 +1,7 @@
884 886 #testcases a b
885 887 #if a
886 888 $ echo 1
887 889 + 1
888 890 #endif
889 891 #if b
890 892 $ echo 2
891 893 Accept this change? [n] .
892 894 --- $TESTTMP/test-cases.t
893 895 +++ $TESTTMP/test-cases.t#b.err
894 896 @@ -5,4 +5,5 @@
895 897 #endif
896 898 #if b
897 899 $ echo 2
898 900 + 2
899 901 #endif
900 902 Accept this change? [n] .
901 903 # Ran 2 tests, 0 skipped, 0 failed.
902 904
903 905 $ cat test-cases.t
904 906 #testcases a b
905 907 #if a
906 908 $ echo 1
907 909 1
908 910 #endif
909 911 #if b
910 912 $ echo 2
911 913 2
912 914 #endif
913 915
914 916 $ cat >> test-cases.t <<'EOF'
915 917 > #if a
916 918 > $ NAME=A
917 919 > #else
918 920 > $ NAME=B
919 921 > #endif
920 922 > $ echo $NAME
921 923 > A (a !)
922 924 > B (b !)
923 925 > EOF
924 926 $ rt test-cases.t
925 927 running 2 tests using 1 parallel processes
926 928 ..
927 929 # Ran 2 tests, 0 skipped, 0 failed.
928 930
929 931 When using multiple dimensions of "#testcases" in .t files
930 932
931 933 $ cat > test-cases.t <<'EOF'
932 934 > #testcases a b
933 935 > #testcases c d
934 936 > #if a d
935 937 > $ echo $TESTCASE
936 938 > a#d
937 939 > #endif
938 940 > #if b c
939 941 > $ echo yes
940 942 > no
941 943 > #endif
942 944 > EOF
943 945 $ rt test-cases.t
944 946 running 4 tests using 1 parallel processes
945 947 ..
946 948 --- $TESTTMP/test-cases.t
947 949 +++ $TESTTMP/test-cases.t#b#c.err
948 950 @@ -6,5 +6,5 @@
949 951 #endif
950 952 #if b c
951 953 $ echo yes
952 954 - no
953 955 + yes
954 956 #endif
955 957
956 958 ERROR: test-cases.t#b#c output changed
957 959 !.
958 960 Failed test-cases.t#b#c: output changed
959 961 # Ran 4 tests, 0 skipped, 1 failed.
960 962 python hash seed: * (glob)
961 963 [1]
962 964
963 965 $ rm test-cases.t#b#c.err
964 966 $ rm test-cases.t
965 967
966 968 (reinstall)
967 969 $ mv backup test-failure.t
968 970
969 971 No Diff
970 972 ===============
971 973
972 974 $ rt --nodiff
973 975 running 2 tests using 1 parallel processes
974 976 !.
975 977 Failed test-failure.t: output changed
976 978 # Ran 2 tests, 0 skipped, 1 failed.
977 979 python hash seed: * (glob)
978 980 [1]
979 981
980 982 test --tmpdir support
981 983 $ rt --tmpdir=$TESTTMP/keep test-success.t
982 984 running 1 tests using 1 parallel processes
983 985
984 986 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
985 987 Keeping threadtmp dir: $TESTTMP/keep/child1
986 988 .
987 989 # Ran 1 tests, 0 skipped, 0 failed.
988 990
989 991 timeouts
990 992 ========
991 993 $ cat > test-timeout.t <<EOF
992 994 > $ sleep 2
993 995 > $ echo pass
994 996 > pass
995 997 > EOF
996 998 > echo '#require slow' > test-slow-timeout.t
997 999 > cat test-timeout.t >> test-slow-timeout.t
998 1000 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
999 1001 running 2 tests using 1 parallel processes
1000 1002 st
1001 1003 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
1002 1004 Failed test-timeout.t: timed out
1003 1005 # Ran 1 tests, 1 skipped, 1 failed.
1004 1006 python hash seed: * (glob)
1005 1007 [1]
1006 1008 $ rt --timeout=1 --slowtimeout=3 \
1007 1009 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1008 1010 running 2 tests using 1 parallel processes
1009 1011 .t
1010 1012 Failed test-timeout.t: timed out
1011 1013 # Ran 2 tests, 0 skipped, 1 failed.
1012 1014 python hash seed: * (glob)
1013 1015 [1]
1014 1016 $ rm test-timeout.t test-slow-timeout.t
1015 1017
1016 1018 test for --time
1017 1019 ==================
1018 1020
1019 1021 $ rt test-success.t --time
1020 1022 running 1 tests using 1 parallel processes
1021 1023 .
1022 1024 # Ran 1 tests, 0 skipped, 0 failed.
1023 1025 # Producing time report
1024 1026 start end cuser csys real Test
1025 1027 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1026 1028
1027 1029 test for --time with --job enabled
1028 1030 ====================================
1029 1031
1030 1032 $ rt test-success.t --time --jobs 2
1031 1033 running 1 tests using 1 parallel processes
1032 1034 .
1033 1035 # Ran 1 tests, 0 skipped, 0 failed.
1034 1036 # Producing time report
1035 1037 start end cuser csys real Test
1036 1038 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1037 1039
1038 1040 Skips
1039 1041 ================
1040 1042 $ cat > test-skip.t <<EOF
1041 1043 > $ echo xyzzy
1042 1044 > #if true
1043 1045 > #require false
1044 1046 > #end
1045 1047 > EOF
1046 1048 $ cat > test-noskip.t <<EOF
1047 1049 > #if false
1048 1050 > #require false
1049 1051 > #endif
1050 1052 > EOF
1051 1053 $ rt --nodiff
1052 1054 running 4 tests using 1 parallel processes
1053 1055 !.s.
1054 1056 Skipped test-skip.t: missing feature: nail clipper
1055 1057 Failed test-failure.t: output changed
1056 1058 # Ran 3 tests, 1 skipped, 1 failed.
1057 1059 python hash seed: * (glob)
1058 1060 [1]
1059 1061
1060 1062 $ rm test-noskip.t
1061 1063 $ rt --keyword xyzzy
1062 1064 running 3 tests using 1 parallel processes
1063 1065 .s
1064 1066 Skipped test-skip.t: missing feature: nail clipper
1065 1067 # Ran 2 tests, 2 skipped, 0 failed.
1066 1068
1067 1069 Skips with xml
1068 1070 $ rt --keyword xyzzy \
1069 1071 > --xunit=xunit.xml
1070 1072 running 3 tests using 1 parallel processes
1071 1073 .s
1072 1074 Skipped test-skip.t: missing feature: nail clipper
1073 1075 # Ran 2 tests, 2 skipped, 0 failed.
1074 1076 $ cat xunit.xml
1075 1077 <?xml version="1.0" encoding="utf-8"?>
1076 1078 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1077 1079 <testcase name="test-success.t" time="*"/> (glob)
1078 1080 <testcase name="test-skip.t">
1079 1081 <skipped>
1080 1082 <![CDATA[missing feature: nail clipper]]> </skipped>
1081 1083 </testcase>
1082 1084 </testsuite>
1083 1085
1084 1086 Missing skips or blacklisted skips don't count as executed:
1085 1087 $ echo test-failure.t > blacklist
1086 1088 $ rt --blacklist=blacklist --json\
1087 1089 > test-failure.t test-bogus.t
1088 1090 running 2 tests using 1 parallel processes
1089 1091 ss
1090 1092 Skipped test-bogus.t: Doesn't exist
1091 1093 Skipped test-failure.t: blacklisted
1092 1094 # Ran 0 tests, 2 skipped, 0 failed.
1093 1095 $ cat report.json
1094 1096 testreport ={
1095 1097 "test-bogus.t": {
1096 1098 "result": "skip"
1097 1099 },
1098 1100 "test-failure.t": {
1099 1101 "result": "skip"
1100 1102 }
1101 1103 } (no-eol)
1102 1104
1103 1105 Whitelist trumps blacklist
1104 1106 $ echo test-failure.t > whitelist
1105 1107 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1106 1108 > test-failure.t test-bogus.t
1107 1109 running 2 tests using 1 parallel processes
1108 1110 s
1109 1111 --- $TESTTMP/test-failure.t
1110 1112 +++ $TESTTMP/test-failure.t.err
1111 1113 @@ -1,5 +1,5 @@
1112 1114 $ echo babar
1113 1115 - rataxes
1114 1116 + babar
1115 1117 This is a noop statement so that
1116 1118 this test is still more bytes than success.
1117 1119 pad pad pad pad............................................................
1118 1120
1119 1121 ERROR: test-failure.t output changed
1120 1122 !
1121 1123 Skipped test-bogus.t: Doesn't exist
1122 1124 Failed test-failure.t: output changed
1123 1125 # Ran 1 tests, 1 skipped, 1 failed.
1124 1126 python hash seed: * (glob)
1125 1127 [1]
1126 1128
1127 1129 Ensure that --test-list causes only the tests listed in that file to
1128 1130 be executed.
1129 1131 $ echo test-success.t >> onlytest
1130 1132 $ rt --test-list=onlytest
1131 1133 running 1 tests using 1 parallel processes
1132 1134 .
1133 1135 # Ran 1 tests, 0 skipped, 0 failed.
1134 1136 $ echo test-bogus.t >> anothertest
1135 1137 $ rt --test-list=onlytest --test-list=anothertest
1136 1138 running 2 tests using 1 parallel processes
1137 1139 s.
1138 1140 Skipped test-bogus.t: Doesn't exist
1139 1141 # Ran 1 tests, 1 skipped, 0 failed.
1140 1142 $ rm onlytest anothertest
1141 1143
1142 1144 test for --json
1143 1145 ==================
1144 1146
1145 1147 $ rt --json
1146 1148 running 3 tests using 1 parallel processes
1147 1149
1148 1150 --- $TESTTMP/test-failure.t
1149 1151 +++ $TESTTMP/test-failure.t.err
1150 1152 @@ -1,5 +1,5 @@
1151 1153 $ echo babar
1152 1154 - rataxes
1153 1155 + babar
1154 1156 This is a noop statement so that
1155 1157 this test is still more bytes than success.
1156 1158 pad pad pad pad............................................................
1157 1159
1158 1160 ERROR: test-failure.t output changed
1159 1161 !.s
1160 1162 Skipped test-skip.t: missing feature: nail clipper
1161 1163 Failed test-failure.t: output changed
1162 1164 # Ran 2 tests, 1 skipped, 1 failed.
1163 1165 python hash seed: * (glob)
1164 1166 [1]
1165 1167
1166 1168 $ cat report.json
1167 1169 testreport ={
1168 1170 "test-failure.t": [\{] (re)
1169 1171 "csys": "\s*[\d\.]{4,5}", ? (re)
1170 1172 "cuser": "\s*[\d\.]{4,5}", ? (re)
1171 1173 "diff": "---.+\+\+\+.+", ? (re)
1172 1174 "end": "\s*[\d\.]{4,5}", ? (re)
1173 1175 "result": "failure", ? (re)
1174 1176 "start": "\s*[\d\.]{4,5}", ? (re)
1175 1177 "time": "\s*[\d\.]{4,5}" (re)
1176 1178 }, ? (re)
1177 1179 "test-skip.t": {
1178 1180 "csys": "\s*[\d\.]{4,5}", ? (re)
1179 1181 "cuser": "\s*[\d\.]{4,5}", ? (re)
1180 1182 "diff": "", ? (re)
1181 1183 "end": "\s*[\d\.]{4,5}", ? (re)
1182 1184 "result": "skip", ? (re)
1183 1185 "start": "\s*[\d\.]{4,5}", ? (re)
1184 1186 "time": "\s*[\d\.]{4,5}" (re)
1185 1187 }, ? (re)
1186 1188 "test-success.t": [\{] (re)
1187 1189 "csys": "\s*[\d\.]{4,5}", ? (re)
1188 1190 "cuser": "\s*[\d\.]{4,5}", ? (re)
1189 1191 "diff": "", ? (re)
1190 1192 "end": "\s*[\d\.]{4,5}", ? (re)
1191 1193 "result": "success", ? (re)
1192 1194 "start": "\s*[\d\.]{4,5}", ? (re)
1193 1195 "time": "\s*[\d\.]{4,5}" (re)
1194 1196 }
1195 1197 } (no-eol)
1196 1198 --json with --outputdir
1197 1199
1198 1200 $ rm report.json
1199 1201 $ rm -r output
1200 1202 $ mkdir output
1201 1203 $ rt --json --outputdir output
1202 1204 running 3 tests using 1 parallel processes
1203 1205
1204 1206 --- $TESTTMP/test-failure.t
1205 1207 +++ $TESTTMP/output/test-failure.t.err
1206 1208 @@ -1,5 +1,5 @@
1207 1209 $ echo babar
1208 1210 - rataxes
1209 1211 + babar
1210 1212 This is a noop statement so that
1211 1213 this test is still more bytes than success.
1212 1214 pad pad pad pad............................................................
1213 1215
1214 1216 ERROR: test-failure.t output changed
1215 1217 !.s
1216 1218 Skipped test-skip.t: missing feature: nail clipper
1217 1219 Failed test-failure.t: output changed
1218 1220 # Ran 2 tests, 1 skipped, 1 failed.
1219 1221 python hash seed: * (glob)
1220 1222 [1]
1221 1223 $ f report.json
1222 1224 report.json: file not found
1223 1225 $ cat output/report.json
1224 1226 testreport ={
1225 1227 "test-failure.t": [\{] (re)
1226 1228 "csys": "\s*[\d\.]{4,5}", ? (re)
1227 1229 "cuser": "\s*[\d\.]{4,5}", ? (re)
1228 1230 "diff": "---.+\+\+\+.+", ? (re)
1229 1231 "end": "\s*[\d\.]{4,5}", ? (re)
1230 1232 "result": "failure", ? (re)
1231 1233 "start": "\s*[\d\.]{4,5}", ? (re)
1232 1234 "time": "\s*[\d\.]{4,5}" (re)
1233 1235 }, ? (re)
1234 1236 "test-skip.t": {
1235 1237 "csys": "\s*[\d\.]{4,5}", ? (re)
1236 1238 "cuser": "\s*[\d\.]{4,5}", ? (re)
1237 1239 "diff": "", ? (re)
1238 1240 "end": "\s*[\d\.]{4,5}", ? (re)
1239 1241 "result": "skip", ? (re)
1240 1242 "start": "\s*[\d\.]{4,5}", ? (re)
1241 1243 "time": "\s*[\d\.]{4,5}" (re)
1242 1244 }, ? (re)
1243 1245 "test-success.t": [\{] (re)
1244 1246 "csys": "\s*[\d\.]{4,5}", ? (re)
1245 1247 "cuser": "\s*[\d\.]{4,5}", ? (re)
1246 1248 "diff": "", ? (re)
1247 1249 "end": "\s*[\d\.]{4,5}", ? (re)
1248 1250 "result": "success", ? (re)
1249 1251 "start": "\s*[\d\.]{4,5}", ? (re)
1250 1252 "time": "\s*[\d\.]{4,5}" (re)
1251 1253 }
1252 1254 } (no-eol)
1253 1255 $ ls -a output
1254 1256 .
1255 1257 ..
1256 1258 .testtimes
1257 1259 report.json
1258 1260 test-failure.t.err
1259 1261
1260 1262 Test that failed test accepted through interactive are properly reported:
1261 1263
1262 1264 $ cp test-failure.t backup
1263 1265 $ echo y | rt --json -i
1264 1266 running 3 tests using 1 parallel processes
1265 1267
1266 1268 --- $TESTTMP/test-failure.t
1267 1269 +++ $TESTTMP/test-failure.t.err
1268 1270 @@ -1,5 +1,5 @@
1269 1271 $ echo babar
1270 1272 - rataxes
1271 1273 + babar
1272 1274 This is a noop statement so that
1273 1275 this test is still more bytes than success.
1274 1276 pad pad pad pad............................................................
1275 1277 Accept this change? [n] ..s
1276 1278 Skipped test-skip.t: missing feature: nail clipper
1277 1279 # Ran 2 tests, 1 skipped, 0 failed.
1278 1280
1279 1281 $ cat report.json
1280 1282 testreport ={
1281 1283 "test-failure.t": [\{] (re)
1282 1284 "csys": "\s*[\d\.]{4,5}", ? (re)
1283 1285 "cuser": "\s*[\d\.]{4,5}", ? (re)
1284 1286 "diff": "", ? (re)
1285 1287 "end": "\s*[\d\.]{4,5}", ? (re)
1286 1288 "result": "success", ? (re)
1287 1289 "start": "\s*[\d\.]{4,5}", ? (re)
1288 1290 "time": "\s*[\d\.]{4,5}" (re)
1289 1291 }, ? (re)
1290 1292 "test-skip.t": {
1291 1293 "csys": "\s*[\d\.]{4,5}", ? (re)
1292 1294 "cuser": "\s*[\d\.]{4,5}", ? (re)
1293 1295 "diff": "", ? (re)
1294 1296 "end": "\s*[\d\.]{4,5}", ? (re)
1295 1297 "result": "skip", ? (re)
1296 1298 "start": "\s*[\d\.]{4,5}", ? (re)
1297 1299 "time": "\s*[\d\.]{4,5}" (re)
1298 1300 }, ? (re)
1299 1301 "test-success.t": [\{] (re)
1300 1302 "csys": "\s*[\d\.]{4,5}", ? (re)
1301 1303 "cuser": "\s*[\d\.]{4,5}", ? (re)
1302 1304 "diff": "", ? (re)
1303 1305 "end": "\s*[\d\.]{4,5}", ? (re)
1304 1306 "result": "success", ? (re)
1305 1307 "start": "\s*[\d\.]{4,5}", ? (re)
1306 1308 "time": "\s*[\d\.]{4,5}" (re)
1307 1309 }
1308 1310 } (no-eol)
1309 1311 $ mv backup test-failure.t
1310 1312
1311 1313 backslash on end of line with glob matching is handled properly
1312 1314
1313 1315 $ cat > test-glob-backslash.t << EOF
1314 1316 > $ echo 'foo bar \\'
1315 1317 > foo * \ (glob)
1316 1318 > EOF
1317 1319
1318 1320 $ rt test-glob-backslash.t
1319 1321 running 1 tests using 1 parallel processes
1320 1322 .
1321 1323 # Ran 1 tests, 0 skipped, 0 failed.
1322 1324
1323 1325 $ rm -f test-glob-backslash.t
1324 1326
1325 1327 Test globbing of local IP addresses
1326 1328 $ echo 172.16.18.1
1327 1329 $LOCALIP (glob)
1328 1330 $ echo dead:beef::1
1329 1331 $LOCALIP (glob)
1330 1332
1331 1333 Add support for external test formatter
1332 1334 =======================================
1333 1335
1334 1336 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@" test-success.t test-failure.t
1335 1337 running 2 tests using 1 parallel processes
1336 1338
1337 1339 # Ran 2 tests, 0 skipped, 0 failed.
1338 1340 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1339 1341 FAILURE! test-failure.t output changed
1340 1342 SUCCESS! test-success.t
1341 1343 ON_END!
1342 1344
1343 1345 Test reusability for third party tools
1344 1346 ======================================
1345 1347
1346 1348 $ mkdir "$TESTTMP"/anothertests
1347 1349 $ cd "$TESTTMP"/anothertests
1348 1350
1349 1351 test that `run-tests.py` can execute hghave, even if it runs not in
1350 1352 Mercurial source tree.
1351 1353
1352 1354 $ cat > test-hghave.t <<EOF
1353 1355 > #require true
1354 1356 > $ echo foo
1355 1357 > foo
1356 1358 > EOF
1357 1359 $ rt test-hghave.t
1358 1360 running 1 tests using 1 parallel processes
1359 1361 .
1360 1362 # Ran 1 tests, 0 skipped, 0 failed.
1361 1363
1362 1364 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1363 1365 running is placed.
1364 1366
1365 1367 $ cat > test-runtestdir.t <<EOF
1366 1368 > - $TESTDIR, in which test-run-tests.t is placed
1367 1369 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1368 1370 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1369 1371 >
1370 1372 > #if windows
1371 1373 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1372 1374 > #else
1373 1375 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1374 1376 > #endif
1375 1377 > If this prints a path, that means RUNTESTDIR didn't equal
1376 1378 > TESTDIR as it should have.
1377 1379 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1378 1380 > This should print the start of check-code. If this passes but the
1379 1381 > previous check failed, that means we found a copy of check-code at whatever
1380 1382 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1381 1383 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1382 1384 > #!USRBINENVPY
1383 1385 > #
1384 1386 > # check-code - a style and portability checker for Mercurial
1385 1387 > EOF
1386 1388 $ rt test-runtestdir.t
1387 1389 running 1 tests using 1 parallel processes
1388 1390 .
1389 1391 # Ran 1 tests, 0 skipped, 0 failed.
1390 1392
1391 1393 #if execbit
1392 1394
1393 1395 test that TESTDIR is referred in PATH
1394 1396
1395 1397 $ cat > custom-command.sh <<EOF
1396 1398 > #!/bin/sh
1397 1399 > echo "hello world"
1398 1400 > EOF
1399 1401 $ chmod +x custom-command.sh
1400 1402 $ cat > test-testdir-path.t <<EOF
1401 1403 > $ custom-command.sh
1402 1404 > hello world
1403 1405 > EOF
1404 1406 $ rt test-testdir-path.t
1405 1407 running 1 tests using 1 parallel processes
1406 1408 .
1407 1409 # Ran 1 tests, 0 skipped, 0 failed.
1408 1410
1409 1411 #endif
1410 1412
1411 1413 test support for --allow-slow-tests
1412 1414 $ cat > test-very-slow-test.t <<EOF
1413 1415 > #require slow
1414 1416 > $ echo pass
1415 1417 > pass
1416 1418 > EOF
1417 1419 $ rt test-very-slow-test.t
1418 1420 running 1 tests using 1 parallel processes
1419 1421 s
1420 1422 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1421 1423 # Ran 0 tests, 1 skipped, 0 failed.
1422 1424 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1423 1425 running 1 tests using 1 parallel processes
1424 1426 .
1425 1427 # Ran 1 tests, 0 skipped, 0 failed.
1426 1428
1427 1429 support for running a test outside the current directory
1428 1430 $ mkdir nonlocal
1429 1431 $ cat > nonlocal/test-is-not-here.t << EOF
1430 1432 > $ echo pass
1431 1433 > pass
1432 1434 > EOF
1433 1435 $ rt nonlocal/test-is-not-here.t
1434 1436 running 1 tests using 1 parallel processes
1435 1437 .
1436 1438 # Ran 1 tests, 0 skipped, 0 failed.
1437 1439
1438 1440 support for automatically discovering test if arg is a folder
1439 1441 $ mkdir tmp && cd tmp
1440 1442
1441 1443 $ cat > test-uno.t << EOF
1442 1444 > $ echo line
1443 1445 > line
1444 1446 > EOF
1445 1447
1446 1448 $ cp test-uno.t test-dos.t
1447 1449 $ cd ..
1448 1450 $ cp -R tmp tmpp
1449 1451 $ cp tmp/test-uno.t test-solo.t
1450 1452
1451 1453 $ rt tmp/ test-solo.t tmpp
1452 1454 running 5 tests using 1 parallel processes
1453 1455 .....
1454 1456 # Ran 5 tests, 0 skipped, 0 failed.
1455 1457 $ rm -rf tmp tmpp
1456 1458
1457 1459 support for running run-tests.py from another directory
1458 1460 $ mkdir tmp && cd tmp
1459 1461
1460 1462 $ cat > useful-file.sh << EOF
1461 1463 > important command
1462 1464 > EOF
1463 1465
1464 1466 $ cat > test-folder.t << EOF
1465 1467 > $ cat \$TESTDIR/useful-file.sh
1466 1468 > important command
1467 1469 > EOF
1468 1470
1469 1471 $ cat > test-folder-fail.t << EOF
1470 1472 > $ cat \$TESTDIR/useful-file.sh
1471 1473 > important commando
1472 1474 > EOF
1473 1475
1474 1476 $ cd ..
1475 1477 $ rt tmp/test-*.t
1476 1478 running 2 tests using 1 parallel processes
1477 1479
1478 1480 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1479 1481 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1480 1482 @@ -1,2 +1,2 @@
1481 1483 $ cat $TESTDIR/useful-file.sh
1482 1484 - important commando
1483 1485 + important command
1484 1486
1485 1487 ERROR: test-folder-fail.t output changed
1486 1488 !.
1487 1489 Failed test-folder-fail.t: output changed
1488 1490 # Ran 2 tests, 0 skipped, 1 failed.
1489 1491 python hash seed: * (glob)
1490 1492 [1]
1491 1493
1492 1494 support for bisecting failed tests automatically
1493 1495 $ hg init bisect
1494 1496 $ cd bisect
1495 1497 $ cat >> test-bisect.t <<EOF
1496 1498 > $ echo pass
1497 1499 > pass
1498 1500 > EOF
1499 1501 $ hg add test-bisect.t
1500 1502 $ hg ci -m 'good'
1501 1503 $ cat >> test-bisect.t <<EOF
1502 1504 > $ echo pass
1503 1505 > fail
1504 1506 > EOF
1505 1507 $ hg ci -m 'bad'
1506 1508 $ rt --known-good-rev=0 test-bisect.t
1507 1509 running 1 tests using 1 parallel processes
1508 1510
1509 1511 --- $TESTTMP/anothertests/bisect/test-bisect.t
1510 1512 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1511 1513 @@ -1,4 +1,4 @@
1512 1514 $ echo pass
1513 1515 pass
1514 1516 $ echo pass
1515 1517 - fail
1516 1518 + pass
1517 1519
1518 1520 ERROR: test-bisect.t output changed
1519 1521 !
1520 1522 Failed test-bisect.t: output changed
1521 1523 test-bisect.t broken by 72cbf122d116 (bad)
1522 1524 # Ran 1 tests, 0 skipped, 1 failed.
1523 1525 python hash seed: * (glob)
1524 1526 [1]
1525 1527
1526 1528 $ cd ..
1527 1529
1528 1530 support bisecting a separate repo
1529 1531
1530 1532 $ hg init bisect-dependent
1531 1533 $ cd bisect-dependent
1532 1534 $ cat > test-bisect-dependent.t <<EOF
1533 1535 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1534 1536 > pass
1535 1537 > EOF
1536 1538 $ hg commit -Am dependent test-bisect-dependent.t
1537 1539
1538 1540 $ rt --known-good-rev=0 test-bisect-dependent.t
1539 1541 running 1 tests using 1 parallel processes
1540 1542
1541 1543 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1542 1544 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1543 1545 @@ -1,2 +1,2 @@
1544 1546 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1545 1547 - pass
1546 1548 + fail
1547 1549
1548 1550 ERROR: test-bisect-dependent.t output changed
1549 1551 !
1550 1552 Failed test-bisect-dependent.t: output changed
1551 1553 Failed to identify failure point for test-bisect-dependent.t
1552 1554 # Ran 1 tests, 0 skipped, 1 failed.
1553 1555 python hash seed: * (glob)
1554 1556 [1]
1555 1557
1556 1558 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1557 1559 usage: run-tests.py [options] [tests]
1558 1560 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1559 1561 [2]
1560 1562
1561 1563 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1562 1564 running 1 tests using 1 parallel processes
1563 1565
1564 1566 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1565 1567 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1566 1568 @@ -1,2 +1,2 @@
1567 1569 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1568 1570 - pass
1569 1571 + fail
1570 1572
1571 1573 ERROR: test-bisect-dependent.t output changed
1572 1574 !
1573 1575 Failed test-bisect-dependent.t: output changed
1574 1576 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1575 1577 # Ran 1 tests, 0 skipped, 1 failed.
1576 1578 python hash seed: * (glob)
1577 1579 [1]
1578 1580
1579 1581 $ cd ..
1580 1582
1581 1583 Test a broken #if statement doesn't break run-tests threading.
1582 1584 ==============================================================
1583 1585 $ mkdir broken
1584 1586 $ cd broken
1585 1587 $ cat > test-broken.t <<EOF
1586 1588 > true
1587 1589 > #if notarealhghavefeature
1588 1590 > $ false
1589 1591 > #endif
1590 1592 > EOF
1591 1593 $ for f in 1 2 3 4 ; do
1592 1594 > cat > test-works-$f.t <<EOF
1593 1595 > This is test case $f
1594 1596 > $ sleep 1
1595 1597 > EOF
1596 1598 > done
1597 1599 $ rt -j 2
1598 1600 running 5 tests using 2 parallel processes
1599 1601 ....
1600 1602 # Ran 5 tests, 0 skipped, 0 failed.
1601 1603 skipped: unknown feature: notarealhghavefeature
1602 1604
1603 1605 $ cd ..
1604 1606 $ rm -rf broken
1605 1607
1606 1608 Test cases in .t files
1607 1609 ======================
1608 1610 $ mkdir cases
1609 1611 $ cd cases
1610 1612 $ cat > test-cases-abc.t <<'EOF'
1611 1613 > #testcases A B C
1612 1614 > $ V=B
1613 1615 > #if A
1614 1616 > $ V=A
1615 1617 > #endif
1616 1618 > #if C
1617 1619 > $ V=C
1618 1620 > #endif
1619 1621 > $ echo $V | sed 's/A/C/'
1620 1622 > C
1621 1623 > #if C
1622 1624 > $ [ $V = C ]
1623 1625 > #endif
1624 1626 > #if A
1625 1627 > $ [ $V = C ]
1626 1628 > [1]
1627 1629 > #endif
1628 1630 > #if no-C
1629 1631 > $ [ $V = C ]
1630 1632 > [1]
1631 1633 > #endif
1632 1634 > $ [ $V = D ]
1633 1635 > [1]
1634 1636 > EOF
1635 1637 $ rt
1636 1638 running 3 tests using 1 parallel processes
1637 1639 .
1638 1640 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1639 1641 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1640 1642 @@ -7,7 +7,7 @@
1641 1643 $ V=C
1642 1644 #endif
1643 1645 $ echo $V | sed 's/A/C/'
1644 1646 - C
1645 1647 + B
1646 1648 #if C
1647 1649 $ [ $V = C ]
1648 1650 #endif
1649 1651
1650 1652 ERROR: test-cases-abc.t#B output changed
1651 1653 !.
1652 1654 Failed test-cases-abc.t#B: output changed
1653 1655 # Ran 3 tests, 0 skipped, 1 failed.
1654 1656 python hash seed: * (glob)
1655 1657 [1]
1656 1658
1657 1659 --restart works
1658 1660
1659 1661 $ rt --restart
1660 1662 running 2 tests using 1 parallel processes
1661 1663
1662 1664 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1663 1665 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1664 1666 @@ -7,7 +7,7 @@
1665 1667 $ V=C
1666 1668 #endif
1667 1669 $ echo $V | sed 's/A/C/'
1668 1670 - C
1669 1671 + B
1670 1672 #if C
1671 1673 $ [ $V = C ]
1672 1674 #endif
1673 1675
1674 1676 ERROR: test-cases-abc.t#B output changed
1675 1677 !.
1676 1678 Failed test-cases-abc.t#B: output changed
1677 1679 # Ran 2 tests, 0 skipped, 1 failed.
1678 1680 python hash seed: * (glob)
1679 1681 [1]
1680 1682
1681 1683 --restart works with outputdir
1682 1684
1683 1685 $ mkdir output
1684 1686 $ mv test-cases-abc.t#B.err output
1685 1687 $ rt --restart --outputdir output
1686 1688 running 2 tests using 1 parallel processes
1687 1689
1688 1690 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1689 1691 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1690 1692 @@ -7,7 +7,7 @@
1691 1693 $ V=C
1692 1694 #endif
1693 1695 $ echo $V | sed 's/A/C/'
1694 1696 - C
1695 1697 + B
1696 1698 #if C
1697 1699 $ [ $V = C ]
1698 1700 #endif
1699 1701
1700 1702 ERROR: test-cases-abc.t#B output changed
1701 1703 !.
1702 1704 Failed test-cases-abc.t#B: output changed
1703 1705 # Ran 2 tests, 0 skipped, 1 failed.
1704 1706 python hash seed: * (glob)
1705 1707 [1]
1706 1708
1707 1709 Test TESTCASE variable
1708 1710
1709 1711 $ cat > test-cases-ab.t <<'EOF'
1710 1712 > $ dostuff() {
1711 1713 > > echo "In case $TESTCASE"
1712 1714 > > }
1713 1715 > #testcases A B
1714 1716 > #if A
1715 1717 > $ dostuff
1716 1718 > In case A
1717 1719 > #endif
1718 1720 > #if B
1719 1721 > $ dostuff
1720 1722 > In case B
1721 1723 > #endif
1722 1724 > EOF
1723 1725 $ rt test-cases-ab.t
1724 1726 running 2 tests using 1 parallel processes
1725 1727 ..
1726 1728 # Ran 2 tests, 0 skipped, 0 failed.
1727 1729
1728 1730 Support running a specific test case
1729 1731
1730 1732 $ rt "test-cases-abc.t#B"
1731 1733 running 1 tests using 1 parallel processes
1732 1734
1733 1735 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1734 1736 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1735 1737 @@ -7,7 +7,7 @@
1736 1738 $ V=C
1737 1739 #endif
1738 1740 $ echo $V | sed 's/A/C/'
1739 1741 - C
1740 1742 + B
1741 1743 #if C
1742 1744 $ [ $V = C ]
1743 1745 #endif
1744 1746
1745 1747 ERROR: test-cases-abc.t#B output changed
1746 1748 !
1747 1749 Failed test-cases-abc.t#B: output changed
1748 1750 # Ran 1 tests, 0 skipped, 1 failed.
1749 1751 python hash seed: * (glob)
1750 1752 [1]
1751 1753
1752 1754 Support running multiple test cases in the same file
1753 1755
1754 1756 $ rt test-cases-abc.t#B test-cases-abc.t#C
1755 1757 running 2 tests using 1 parallel processes
1756 1758
1757 1759 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1758 1760 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1759 1761 @@ -7,7 +7,7 @@
1760 1762 $ V=C
1761 1763 #endif
1762 1764 $ echo $V | sed 's/A/C/'
1763 1765 - C
1764 1766 + B
1765 1767 #if C
1766 1768 $ [ $V = C ]
1767 1769 #endif
1768 1770
1769 1771 ERROR: test-cases-abc.t#B output changed
1770 1772 !.
1771 1773 Failed test-cases-abc.t#B: output changed
1772 1774 # Ran 2 tests, 0 skipped, 1 failed.
1773 1775 python hash seed: * (glob)
1774 1776 [1]
1775 1777
1776 1778 Support ignoring invalid test cases
1777 1779
1778 1780 $ rt test-cases-abc.t#B test-cases-abc.t#D
1779 1781 running 1 tests using 1 parallel processes
1780 1782
1781 1783 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1782 1784 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1783 1785 @@ -7,7 +7,7 @@
1784 1786 $ V=C
1785 1787 #endif
1786 1788 $ echo $V | sed 's/A/C/'
1787 1789 - C
1788 1790 + B
1789 1791 #if C
1790 1792 $ [ $V = C ]
1791 1793 #endif
1792 1794
1793 1795 ERROR: test-cases-abc.t#B output changed
1794 1796 !
1795 1797 Failed test-cases-abc.t#B: output changed
1796 1798 # Ran 1 tests, 0 skipped, 1 failed.
1797 1799 python hash seed: * (glob)
1798 1800 [1]
1799 1801
1800 1802 Support running complex test cases names
1801 1803
1802 1804 $ cat > test-cases-advanced-cases.t <<'EOF'
1803 1805 > #testcases simple case-with-dashes casewith_-.chars
1804 1806 > $ echo $TESTCASE
1805 1807 > simple
1806 1808 > EOF
1807 1809
1808 1810 $ cat test-cases-advanced-cases.t
1809 1811 #testcases simple case-with-dashes casewith_-.chars
1810 1812 $ echo $TESTCASE
1811 1813 simple
1812 1814
1813 1815 $ rt test-cases-advanced-cases.t
1814 1816 running 3 tests using 1 parallel processes
1815 1817
1816 1818 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1817 1819 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1818 1820 @@ -1,3 +1,3 @@
1819 1821 #testcases simple case-with-dashes casewith_-.chars
1820 1822 $ echo $TESTCASE
1821 1823 - simple
1822 1824 + case-with-dashes
1823 1825
1824 1826 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1825 1827 !
1826 1828 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1827 1829 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1828 1830 @@ -1,3 +1,3 @@
1829 1831 #testcases simple case-with-dashes casewith_-.chars
1830 1832 $ echo $TESTCASE
1831 1833 - simple
1832 1834 + casewith_-.chars
1833 1835
1834 1836 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1835 1837 !.
1836 1838 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1837 1839 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1838 1840 # Ran 3 tests, 0 skipped, 2 failed.
1839 1841 python hash seed: * (glob)
1840 1842 [1]
1841 1843
1842 1844 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1843 1845 running 1 tests using 1 parallel processes
1844 1846
1845 1847 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1846 1848 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1847 1849 @@ -1,3 +1,3 @@
1848 1850 #testcases simple case-with-dashes casewith_-.chars
1849 1851 $ echo $TESTCASE
1850 1852 - simple
1851 1853 + case-with-dashes
1852 1854
1853 1855 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1854 1856 !
1855 1857 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1856 1858 # Ran 1 tests, 0 skipped, 1 failed.
1857 1859 python hash seed: * (glob)
1858 1860 [1]
1859 1861
1860 1862 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1861 1863 running 1 tests using 1 parallel processes
1862 1864
1863 1865 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1864 1866 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1865 1867 @@ -1,3 +1,3 @@
1866 1868 #testcases simple case-with-dashes casewith_-.chars
1867 1869 $ echo $TESTCASE
1868 1870 - simple
1869 1871 + casewith_-.chars
1870 1872
1871 1873 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1872 1874 !
1873 1875 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1874 1876 # Ran 1 tests, 0 skipped, 1 failed.
1875 1877 python hash seed: * (glob)
1876 1878 [1]
1877 1879
1878 1880 Test automatic pattern replacement
1879 1881 ==================================
1880 1882
1881 1883 $ cat << EOF >> common-pattern.py
1882 1884 > substitutions = [
1883 1885 > (br'foo-(.*)\\b',
1884 1886 > br'\$XXX=\\1\$'),
1885 1887 > (br'bar\\n',
1886 1888 > br'\$YYY$\\n'),
1887 1889 > ]
1888 1890 > EOF
1889 1891
1890 1892 $ cat << EOF >> test-substitution.t
1891 1893 > $ echo foo-12
1892 1894 > \$XXX=12$
1893 1895 > $ echo foo-42
1894 1896 > \$XXX=42$
1895 1897 > $ echo bar prior
1896 1898 > bar prior
1897 1899 > $ echo lastbar
1898 1900 > last\$YYY$
1899 1901 > $ echo foo-bar foo-baz
1900 1902 > EOF
1901 1903
1902 1904 $ rt test-substitution.t
1903 1905 running 1 tests using 1 parallel processes
1904 1906
1905 1907 --- $TESTTMP/anothertests/cases/test-substitution.t
1906 1908 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1907 1909 @@ -7,3 +7,4 @@
1908 1910 $ echo lastbar
1909 1911 last$YYY$
1910 1912 $ echo foo-bar foo-baz
1911 1913 + $XXX=bar foo-baz$
1912 1914
1913 1915 ERROR: test-substitution.t output changed
1914 1916 !
1915 1917 Failed test-substitution.t: output changed
1916 1918 # Ran 1 tests, 0 skipped, 1 failed.
1917 1919 python hash seed: * (glob)
1918 1920 [1]
1919 1921
1920 1922 --extra-config-opt works
1921 1923
1922 1924 $ cat << EOF >> test-config-opt.t
1923 1925 > $ hg init test-config-opt
1924 1926 > $ hg -R test-config-opt purge
1925 1927 > EOF
1926 1928
1927 1929 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1928 1930 running 1 tests using 1 parallel processes
1929 1931 .
1930 1932 # Ran 1 tests, 0 skipped, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now