##// END OF EJS Templates
run-tests: report timeouts in a less alarming fashion...
Matt Mackall -
r27393:a40b623e default
parent child Browse files
Show More
@@ -1,2265 +1,2268 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import print_function
47 47
48 48 from distutils import version
49 49 import difflib
50 50 import errno
51 51 import optparse
52 52 import os
53 53 import shutil
54 54 import subprocess
55 55 import signal
56 56 import socket
57 57 import sys
58 58 import tempfile
59 59 import time
60 60 import random
61 61 import re
62 62 import threading
63 63 import killdaemons as killmod
64 64 try:
65 65 import Queue as queue
66 66 except ImportError:
67 67 import queue
68 68 from xml.dom import minidom
69 69 import unittest
70 70
71 71 osenvironb = getattr(os, 'environb', os.environ)
72 72
73 73 try:
74 74 import json
75 75 except ImportError:
76 76 try:
77 77 import simplejson as json
78 78 except ImportError:
79 79 json = None
80 80
81 81 processlock = threading.Lock()
82 82
83 83 if sys.version_info > (3, 5, 0):
84 84 PYTHON3 = True
85 85 xrange = range # we use xrange in one place, and we'd rather not use range
86 86 def _bytespath(p):
87 87 return p.encode('utf-8')
88 88
89 89 def _strpath(p):
90 90 return p.decode('utf-8')
91 91
92 92 elif sys.version_info >= (3, 0, 0):
93 93 print('%s is only supported on Python 3.5+ and 2.6-2.7, not %s' %
94 94 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
95 95 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
96 96 else:
97 97 PYTHON3 = False
98 98
99 99 # In python 2.x, path operations are generally done using
100 100 # bytestrings by default, so we don't have to do any extra
101 101 # fiddling there. We define the wrapper functions anyway just to
102 102 # help keep code consistent between platforms.
103 103 def _bytespath(p):
104 104 return p
105 105
106 106 _strpath = _bytespath
107 107
108 108 # For Windows support
109 109 wifexited = getattr(os, "WIFEXITED", lambda x: False)
110 110
111 111 def checkportisavailable(port):
112 112 """return true if a port seems free to bind on localhost"""
113 113 try:
114 114 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
115 115 s.bind(('localhost', port))
116 116 s.close()
117 117 return True
118 118 except socket.error as exc:
119 119 if not exc.errno == errno.EADDRINUSE:
120 120 raise
121 121 return False
122 122
123 123 closefds = os.name == 'posix'
124 124 def Popen4(cmd, wd, timeout, env=None):
125 125 processlock.acquire()
126 126 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
127 127 close_fds=closefds,
128 128 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
129 129 stderr=subprocess.STDOUT)
130 130 processlock.release()
131 131
132 132 p.fromchild = p.stdout
133 133 p.tochild = p.stdin
134 134 p.childerr = p.stderr
135 135
136 136 p.timeout = False
137 137 if timeout:
138 138 def t():
139 139 start = time.time()
140 140 while time.time() - start < timeout and p.returncode is None:
141 141 time.sleep(.1)
142 142 p.timeout = True
143 143 if p.returncode is None:
144 144 terminate(p)
145 145 threading.Thread(target=t).start()
146 146
147 147 return p
148 148
149 149 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
150 150 IMPL_PATH = b'PYTHONPATH'
151 151 if 'java' in sys.platform:
152 152 IMPL_PATH = b'JYTHONPATH'
153 153
154 154 defaults = {
155 155 'jobs': ('HGTEST_JOBS', 1),
156 156 'timeout': ('HGTEST_TIMEOUT', 180),
157 157 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
158 158 'port': ('HGTEST_PORT', 20059),
159 159 'shell': ('HGTEST_SHELL', 'sh'),
160 160 }
161 161
162 162 def parselistfiles(files, listtype, warn=True):
163 163 entries = dict()
164 164 for filename in files:
165 165 try:
166 166 path = os.path.expanduser(os.path.expandvars(filename))
167 167 f = open(path, "rb")
168 168 except IOError as err:
169 169 if err.errno != errno.ENOENT:
170 170 raise
171 171 if warn:
172 172 print("warning: no such %s file: %s" % (listtype, filename))
173 173 continue
174 174
175 175 for line in f.readlines():
176 176 line = line.split(b'#', 1)[0].strip()
177 177 if line:
178 178 entries[line] = filename
179 179
180 180 f.close()
181 181 return entries
182 182
183 183 def getparser():
184 184 """Obtain the OptionParser used by the CLI."""
185 185 parser = optparse.OptionParser("%prog [options] [tests]")
186 186
187 187 # keep these sorted
188 188 parser.add_option("--blacklist", action="append",
189 189 help="skip tests listed in the specified blacklist file")
190 190 parser.add_option("--whitelist", action="append",
191 191 help="always run tests listed in the specified whitelist file")
192 192 parser.add_option("--changed", type="string",
193 193 help="run tests that are changed in parent rev or working directory")
194 194 parser.add_option("-C", "--annotate", action="store_true",
195 195 help="output files annotated with coverage")
196 196 parser.add_option("-c", "--cover", action="store_true",
197 197 help="print a test coverage report")
198 198 parser.add_option("-d", "--debug", action="store_true",
199 199 help="debug mode: write output of test scripts to console"
200 200 " rather than capturing and diffing it (disables timeout)")
201 201 parser.add_option("-f", "--first", action="store_true",
202 202 help="exit on the first test failure")
203 203 parser.add_option("-H", "--htmlcov", action="store_true",
204 204 help="create an HTML report of the coverage of the files")
205 205 parser.add_option("-i", "--interactive", action="store_true",
206 206 help="prompt to accept changed output")
207 207 parser.add_option("-j", "--jobs", type="int",
208 208 help="number of jobs to run in parallel"
209 209 " (default: $%s or %d)" % defaults['jobs'])
210 210 parser.add_option("--keep-tmpdir", action="store_true",
211 211 help="keep temporary directory after running tests")
212 212 parser.add_option("-k", "--keywords",
213 213 help="run tests matching keywords")
214 214 parser.add_option("-l", "--local", action="store_true",
215 215 help="shortcut for --with-hg=<testdir>/../hg")
216 216 parser.add_option("--loop", action="store_true",
217 217 help="loop tests repeatedly")
218 218 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
219 219 help="run each test N times (default=1)", default=1)
220 220 parser.add_option("-n", "--nodiff", action="store_true",
221 221 help="skip showing test changes")
222 222 parser.add_option("-p", "--port", type="int",
223 223 help="port on which servers should listen"
224 224 " (default: $%s or %d)" % defaults['port'])
225 225 parser.add_option("--compiler", type="string",
226 226 help="compiler to build with")
227 227 parser.add_option("--pure", action="store_true",
228 228 help="use pure Python code instead of C extensions")
229 229 parser.add_option("-R", "--restart", action="store_true",
230 230 help="restart at last error")
231 231 parser.add_option("-r", "--retest", action="store_true",
232 232 help="retest failed tests")
233 233 parser.add_option("-S", "--noskips", action="store_true",
234 234 help="don't report skip tests verbosely")
235 235 parser.add_option("--shell", type="string",
236 236 help="shell to use (default: $%s or %s)" % defaults['shell'])
237 237 parser.add_option("-t", "--timeout", type="int",
238 238 help="kill errant tests after TIMEOUT seconds"
239 239 " (default: $%s or %d)" % defaults['timeout'])
240 240 parser.add_option("--slowtimeout", type="int",
241 241 help="kill errant slow tests after SLOWTIMEOUT seconds"
242 242 " (default: $%s or %d)" % defaults['slowtimeout'])
243 243 parser.add_option("--time", action="store_true",
244 244 help="time how long each test takes")
245 245 parser.add_option("--json", action="store_true",
246 246 help="store test result data in 'report.json' file")
247 247 parser.add_option("--tmpdir", type="string",
248 248 help="run tests in the given temporary directory"
249 249 " (implies --keep-tmpdir)")
250 250 parser.add_option("-v", "--verbose", action="store_true",
251 251 help="output verbose messages")
252 252 parser.add_option("--xunit", type="string",
253 253 help="record xunit results at specified path")
254 254 parser.add_option("--view", type="string",
255 255 help="external diff viewer")
256 256 parser.add_option("--with-hg", type="string",
257 257 metavar="HG",
258 258 help="test using specified hg script rather than a "
259 259 "temporary installation")
260 260 parser.add_option("-3", "--py3k-warnings", action="store_true",
261 261 help="enable Py3k warnings on Python 2.6+")
262 262 parser.add_option('--extra-config-opt', action="append",
263 263 help='set the given config opt in the test hgrc')
264 264 parser.add_option('--random', action="store_true",
265 265 help='run tests in random order')
266 266 parser.add_option('--profile-runner', action='store_true',
267 267 help='run statprof on run-tests')
268 268 parser.add_option('--allow-slow-tests', action='store_true',
269 269 help='allow extremely slow tests')
270 270
271 271 for option, (envvar, default) in defaults.items():
272 272 defaults[option] = type(default)(os.environ.get(envvar, default))
273 273 parser.set_defaults(**defaults)
274 274
275 275 return parser
276 276
277 277 def parseargs(args, parser):
278 278 """Parse arguments with our OptionParser and validate results."""
279 279 (options, args) = parser.parse_args(args)
280 280
281 281 # jython is always pure
282 282 if 'java' in sys.platform or '__pypy__' in sys.modules:
283 283 options.pure = True
284 284
285 285 if options.with_hg:
286 286 options.with_hg = os.path.expanduser(options.with_hg)
287 287 if not (os.path.isfile(options.with_hg) and
288 288 os.access(options.with_hg, os.X_OK)):
289 289 parser.error('--with-hg must specify an executable hg script')
290 290 if not os.path.basename(options.with_hg) == 'hg':
291 291 sys.stderr.write('warning: --with-hg should specify an hg script\n')
292 292 if options.local:
293 293 testdir = os.path.dirname(_bytespath(os.path.realpath(sys.argv[0])))
294 294 hgbin = os.path.join(os.path.dirname(testdir), b'hg')
295 295 if os.name != 'nt' and not os.access(hgbin, os.X_OK):
296 296 parser.error('--local specified, but %r not found or not executable'
297 297 % hgbin)
298 298 options.with_hg = hgbin
299 299
300 300 options.anycoverage = options.cover or options.annotate or options.htmlcov
301 301 if options.anycoverage:
302 302 try:
303 303 import coverage
304 304 covver = version.StrictVersion(coverage.__version__).version
305 305 if covver < (3, 3):
306 306 parser.error('coverage options require coverage 3.3 or later')
307 307 except ImportError:
308 308 parser.error('coverage options now require the coverage package')
309 309
310 310 if options.anycoverage and options.local:
311 311 # this needs some path mangling somewhere, I guess
312 312 parser.error("sorry, coverage options do not work when --local "
313 313 "is specified")
314 314
315 315 if options.anycoverage and options.with_hg:
316 316 parser.error("sorry, coverage options do not work when --with-hg "
317 317 "is specified")
318 318
319 319 global verbose
320 320 if options.verbose:
321 321 verbose = ''
322 322
323 323 if options.tmpdir:
324 324 options.tmpdir = os.path.expanduser(options.tmpdir)
325 325
326 326 if options.jobs < 1:
327 327 parser.error('--jobs must be positive')
328 328 if options.interactive and options.debug:
329 329 parser.error("-i/--interactive and -d/--debug are incompatible")
330 330 if options.debug:
331 331 if options.timeout != defaults['timeout']:
332 332 sys.stderr.write(
333 333 'warning: --timeout option ignored with --debug\n')
334 334 if options.slowtimeout != defaults['slowtimeout']:
335 335 sys.stderr.write(
336 336 'warning: --slowtimeout option ignored with --debug\n')
337 337 options.timeout = 0
338 338 options.slowtimeout = 0
339 339 if options.py3k_warnings:
340 340 if PYTHON3:
341 341 parser.error(
342 342 '--py3k-warnings can only be used on Python 2.6 and 2.7')
343 343 if options.blacklist:
344 344 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
345 345 if options.whitelist:
346 346 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
347 347 else:
348 348 options.whitelisted = {}
349 349
350 350 return (options, args)
351 351
352 352 def rename(src, dst):
353 353 """Like os.rename(), trade atomicity and opened files friendliness
354 354 for existing destination support.
355 355 """
356 356 shutil.copy(src, dst)
357 357 os.remove(src)
358 358
359 359 _unified_diff = difflib.unified_diff
360 360 if PYTHON3:
361 361 import functools
362 362 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
363 363
364 364 def getdiff(expected, output, ref, err):
365 365 servefail = False
366 366 lines = []
367 367 for line in _unified_diff(expected, output, ref, err):
368 368 if line.startswith(b'+++') or line.startswith(b'---'):
369 369 line = line.replace(b'\\', b'/')
370 370 if line.endswith(b' \n'):
371 371 line = line[:-2] + b'\n'
372 372 lines.append(line)
373 373 if not servefail and line.startswith(
374 374 b'+ abort: child process failed to start'):
375 375 servefail = True
376 376
377 377 return servefail, lines
378 378
379 379 verbose = False
380 380 def vlog(*msg):
381 381 """Log only when in verbose mode."""
382 382 if verbose is False:
383 383 return
384 384
385 385 return log(*msg)
386 386
387 387 # Bytes that break XML even in a CDATA block: control characters 0-31
388 388 # sans \t, \n and \r
389 389 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
390 390
391 391 def cdatasafe(data):
392 392 """Make a string safe to include in a CDATA block.
393 393
394 394 Certain control characters are illegal in a CDATA block, and
395 395 there's no way to include a ]]> in a CDATA either. This function
396 396 replaces illegal bytes with ? and adds a space between the ]] so
397 397 that it won't break the CDATA block.
398 398 """
399 399 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
400 400
401 401 def log(*msg):
402 402 """Log something to stdout.
403 403
404 404 Arguments are strings to print.
405 405 """
406 406 with iolock:
407 407 if verbose:
408 408 print(verbose, end=' ')
409 409 for m in msg:
410 410 print(m, end=' ')
411 411 print()
412 412 sys.stdout.flush()
413 413
414 414 def terminate(proc):
415 415 """Terminate subprocess (with fallback for Python versions < 2.6)"""
416 416 vlog('# Terminating process %d' % proc.pid)
417 417 try:
418 418 getattr(proc, 'terminate', lambda : os.kill(proc.pid, signal.SIGTERM))()
419 419 except OSError:
420 420 pass
421 421
422 422 def killdaemons(pidfile):
423 423 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
424 424 logfn=vlog)
425 425
426 426 class Test(unittest.TestCase):
427 427 """Encapsulates a single, runnable test.
428 428
429 429 While this class conforms to the unittest.TestCase API, it differs in that
430 430 instances need to be instantiated manually. (Typically, unittest.TestCase
431 431 classes are instantiated automatically by scanning modules.)
432 432 """
433 433
434 434 # Status code reserved for skipped tests (used by hghave).
435 435 SKIPPED_STATUS = 80
436 436
437 437 def __init__(self, path, tmpdir, keeptmpdir=False,
438 438 debug=False,
439 439 timeout=defaults['timeout'],
440 440 startport=defaults['port'], extraconfigopts=None,
441 441 py3kwarnings=False, shell=None,
442 442 slowtimeout=defaults['slowtimeout']):
443 443 """Create a test from parameters.
444 444
445 445 path is the full path to the file defining the test.
446 446
447 447 tmpdir is the main temporary directory to use for this test.
448 448
449 449 keeptmpdir determines whether to keep the test's temporary directory
450 450 after execution. It defaults to removal (False).
451 451
452 452 debug mode will make the test execute verbosely, with unfiltered
453 453 output.
454 454
455 455 timeout controls the maximum run time of the test. It is ignored when
456 456 debug is True. See slowtimeout for tests with #require slow.
457 457
458 458 slowtimeout overrides timeout if the test has #require slow.
459 459
460 460 startport controls the starting port number to use for this test. Each
461 461 test will reserve 3 port numbers for execution. It is the caller's
462 462 responsibility to allocate a non-overlapping port range to Test
463 463 instances.
464 464
465 465 extraconfigopts is an iterable of extra hgrc config options. Values
466 466 must have the form "key=value" (something understood by hgrc). Values
467 467 of the form "foo.key=value" will result in "[foo] key=value".
468 468
469 469 py3kwarnings enables Py3k warnings.
470 470
471 471 shell is the shell to execute tests in.
472 472 """
473 473 self.path = path
474 474 self.bname = os.path.basename(path)
475 475 self.name = _strpath(self.bname)
476 476 self._testdir = os.path.dirname(path)
477 477 self.errpath = os.path.join(self._testdir, b'%s.err' % self.bname)
478 478
479 479 self._threadtmp = tmpdir
480 480 self._keeptmpdir = keeptmpdir
481 481 self._debug = debug
482 482 self._timeout = timeout
483 483 self._slowtimeout = slowtimeout
484 484 self._startport = startport
485 485 self._extraconfigopts = extraconfigopts or []
486 486 self._py3kwarnings = py3kwarnings
487 487 self._shell = _bytespath(shell)
488 488
489 489 self._aborted = False
490 490 self._daemonpids = []
491 491 self._finished = None
492 492 self._ret = None
493 493 self._out = None
494 494 self._skipped = None
495 495 self._testtmp = None
496 496
497 497 # If we're not in --debug mode and reference output file exists,
498 498 # check test output against it.
499 499 if debug:
500 500 self._refout = None # to match "out is None"
501 501 elif os.path.exists(self.refpath):
502 502 f = open(self.refpath, 'rb')
503 503 self._refout = f.read().splitlines(True)
504 504 f.close()
505 505 else:
506 506 self._refout = []
507 507
508 508 # needed to get base class __repr__ running
509 509 @property
510 510 def _testMethodName(self):
511 511 return self.name
512 512
513 513 def __str__(self):
514 514 return self.name
515 515
516 516 def shortDescription(self):
517 517 return self.name
518 518
519 519 def setUp(self):
520 520 """Tasks to perform before run()."""
521 521 self._finished = False
522 522 self._ret = None
523 523 self._out = None
524 524 self._skipped = None
525 525
526 526 try:
527 527 os.mkdir(self._threadtmp)
528 528 except OSError as e:
529 529 if e.errno != errno.EEXIST:
530 530 raise
531 531
532 532 self._testtmp = os.path.join(self._threadtmp,
533 533 os.path.basename(self.path))
534 534 os.mkdir(self._testtmp)
535 535
536 536 # Remove any previous output files.
537 537 if os.path.exists(self.errpath):
538 538 try:
539 539 os.remove(self.errpath)
540 540 except OSError as e:
541 541 # We might have raced another test to clean up a .err
542 542 # file, so ignore ENOENT when removing a previous .err
543 543 # file.
544 544 if e.errno != errno.ENOENT:
545 545 raise
546 546
547 547 def run(self, result):
548 548 """Run this test and report results against a TestResult instance."""
549 549 # This function is extremely similar to unittest.TestCase.run(). Once
550 550 # we require Python 2.7 (or at least its version of unittest), this
551 551 # function can largely go away.
552 552 self._result = result
553 553 result.startTest(self)
554 554 try:
555 555 try:
556 556 self.setUp()
557 557 except (KeyboardInterrupt, SystemExit):
558 558 self._aborted = True
559 559 raise
560 560 except Exception:
561 561 result.addError(self, sys.exc_info())
562 562 return
563 563
564 564 success = False
565 565 try:
566 566 self.runTest()
567 567 except KeyboardInterrupt:
568 568 self._aborted = True
569 569 raise
570 570 except SkipTest as e:
571 571 result.addSkip(self, str(e))
572 572 # The base class will have already counted this as a
573 573 # test we "ran", but we want to exclude skipped tests
574 574 # from those we count towards those run.
575 575 result.testsRun -= 1
576 576 except IgnoreTest as e:
577 577 result.addIgnore(self, str(e))
578 578 # As with skips, ignores also should be excluded from
579 579 # the number of tests executed.
580 580 result.testsRun -= 1
581 581 except WarnTest as e:
582 582 result.addWarn(self, str(e))
583 583 except self.failureException as e:
584 584 # This differs from unittest in that we don't capture
585 585 # the stack trace. This is for historical reasons and
586 586 # this decision could be revisited in the future,
587 587 # especially for PythonTest instances.
588 588 if result.addFailure(self, str(e)):
589 589 success = True
590 590 except Exception:
591 591 result.addError(self, sys.exc_info())
592 592 else:
593 593 success = True
594 594
595 595 try:
596 596 self.tearDown()
597 597 except (KeyboardInterrupt, SystemExit):
598 598 self._aborted = True
599 599 raise
600 600 except Exception:
601 601 result.addError(self, sys.exc_info())
602 602 success = False
603 603
604 604 if success:
605 605 result.addSuccess(self)
606 606 finally:
607 607 result.stopTest(self, interrupted=self._aborted)
608 608
609 609 def runTest(self):
610 610 """Run this test instance.
611 611
612 612 This will return a tuple describing the result of the test.
613 613 """
614 614 env = self._getenv()
615 615 self._daemonpids.append(env['DAEMON_PIDS'])
616 616 self._createhgrc(env['HGRCPATH'])
617 617
618 618 vlog('# Test', self.name)
619 619
620 620 ret, out = self._run(env)
621 621 self._finished = True
622 622 self._ret = ret
623 623 self._out = out
624 624
625 625 def describe(ret):
626 626 if ret < 0:
627 627 return 'killed by signal: %d' % -ret
628 628 return 'returned error code %d' % ret
629 629
630 630 self._skipped = False
631 631
632 632 if ret == self.SKIPPED_STATUS:
633 633 if out is None: # Debug mode, nothing to parse.
634 634 missing = ['unknown']
635 635 failed = None
636 636 else:
637 637 missing, failed = TTest.parsehghaveoutput(out)
638 638
639 639 if not missing:
640 640 missing = ['skipped']
641 641
642 642 if failed:
643 643 self.fail('hg have failed checking for %s' % failed[-1])
644 644 else:
645 645 self._skipped = True
646 646 raise SkipTest(missing[-1])
647 647 elif ret == 'timeout':
648 648 self.fail('timed out')
649 649 elif ret is False:
650 650 raise WarnTest('no result code from test')
651 651 elif out != self._refout:
652 652 # Diff generation may rely on written .err file.
653 653 if (ret != 0 or out != self._refout) and not self._skipped \
654 654 and not self._debug:
655 655 f = open(self.errpath, 'wb')
656 656 for line in out:
657 657 f.write(line)
658 658 f.close()
659 659
660 660 # The result object handles diff calculation for us.
661 661 if self._result.addOutputMismatch(self, ret, out, self._refout):
662 662 # change was accepted, skip failing
663 663 return
664 664
665 665 if ret:
666 666 msg = 'output changed and ' + describe(ret)
667 667 else:
668 668 msg = 'output changed'
669 669
670 670 self.fail(msg)
671 671 elif ret:
672 672 self.fail(describe(ret))
673 673
674 674 def tearDown(self):
675 675 """Tasks to perform after run()."""
676 676 for entry in self._daemonpids:
677 677 killdaemons(entry)
678 678 self._daemonpids = []
679 679
680 680 if self._keeptmpdir:
681 681 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
682 682 (self._testtmp, self._threadtmp))
683 683 else:
684 684 shutil.rmtree(self._testtmp, True)
685 685 shutil.rmtree(self._threadtmp, True)
686 686
687 687 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
688 688 and not self._debug and self._out:
689 689 f = open(self.errpath, 'wb')
690 690 for line in self._out:
691 691 f.write(line)
692 692 f.close()
693 693
694 694 vlog("# Ret was:", self._ret, '(%s)' % self.name)
695 695
696 696 def _run(self, env):
697 697 # This should be implemented in child classes to run tests.
698 698 raise SkipTest('unknown test type')
699 699
700 700 def abort(self):
701 701 """Terminate execution of this test."""
702 702 self._aborted = True
703 703
704 704 def _getreplacements(self):
705 705 """Obtain a mapping of text replacements to apply to test output.
706 706
707 707 Test output needs to be normalized so it can be compared to expected
708 708 output. This function defines how some of that normalization will
709 709 occur.
710 710 """
711 711 r = [
712 712 (br':%d\b' % self._startport, b':$HGPORT'),
713 713 (br':%d\b' % (self._startport + 1), b':$HGPORT1'),
714 714 (br':%d\b' % (self._startport + 2), b':$HGPORT2'),
715 715 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
716 716 br'\1 (glob)'),
717 717 ]
718 718
719 719 if os.name == 'nt':
720 720 r.append(
721 721 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
722 722 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
723 723 for c in self._testtmp), b'$TESTTMP'))
724 724 else:
725 725 r.append((re.escape(self._testtmp), b'$TESTTMP'))
726 726
727 727 return r
728 728
729 729 def _getenv(self):
730 730 """Obtain environment variables to use during test execution."""
731 731 env = os.environ.copy()
732 732 env['TESTTMP'] = self._testtmp
733 733 env['HOME'] = self._testtmp
734 734 env["HGPORT"] = str(self._startport)
735 735 env["HGPORT1"] = str(self._startport + 1)
736 736 env["HGPORT2"] = str(self._startport + 2)
737 737 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
738 738 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
739 739 env["HGEDITOR"] = ('"' + sys.executable + '"'
740 740 + ' -c "import sys; sys.exit(0)"')
741 741 env["HGMERGE"] = "internal:merge"
742 742 env["HGUSER"] = "test"
743 743 env["HGENCODING"] = "ascii"
744 744 env["HGENCODINGMODE"] = "strict"
745 745
746 746 # Reset some environment variables to well-known values so that
747 747 # the tests produce repeatable output.
748 748 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
749 749 env['TZ'] = 'GMT'
750 750 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
751 751 env['COLUMNS'] = '80'
752 752 env['TERM'] = 'xterm'
753 753
754 754 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
755 755 'NO_PROXY').split():
756 756 if k in env:
757 757 del env[k]
758 758
759 759 # unset env related to hooks
760 760 for k in env.keys():
761 761 if k.startswith('HG_'):
762 762 del env[k]
763 763
764 764 return env
765 765
766 766 def _createhgrc(self, path):
767 767 """Create an hgrc file for this test."""
768 768 hgrc = open(path, 'wb')
769 769 hgrc.write(b'[ui]\n')
770 770 hgrc.write(b'slash = True\n')
771 771 hgrc.write(b'interactive = False\n')
772 772 hgrc.write(b'mergemarkers = detailed\n')
773 773 hgrc.write(b'promptecho = True\n')
774 774 hgrc.write(b'[defaults]\n')
775 775 hgrc.write(b'backout = -d "0 0"\n')
776 776 hgrc.write(b'commit = -d "0 0"\n')
777 777 hgrc.write(b'shelve = --date "0 0"\n')
778 778 hgrc.write(b'tag = -d "0 0"\n')
779 779 hgrc.write(b'[devel]\n')
780 780 hgrc.write(b'all-warnings = true\n')
781 781 hgrc.write(b'[largefiles]\n')
782 782 hgrc.write(b'usercache = %s\n' %
783 783 (os.path.join(self._testtmp, b'.cache/largefiles')))
784 784
785 785 for opt in self._extraconfigopts:
786 786 section, key = opt.split('.', 1)
787 787 assert '=' in key, ('extra config opt %s must '
788 788 'have an = for assignment' % opt)
789 789 hgrc.write(b'[%s]\n%s\n' % (section, key))
790 790 hgrc.close()
791 791
792 792 def fail(self, msg):
793 793 # unittest differentiates between errored and failed.
794 794 # Failed is denoted by AssertionError (by default at least).
795 795 raise AssertionError(msg)
796 796
797 797 def _runcommand(self, cmd, env, normalizenewlines=False):
798 798 """Run command in a sub-process, capturing the output (stdout and
799 799 stderr).
800 800
801 801 Return a tuple (exitcode, output). output is None in debug mode.
802 802 """
803 803 if self._debug:
804 804 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
805 805 env=env)
806 806 ret = proc.wait()
807 807 return (ret, None)
808 808
809 809 proc = Popen4(cmd, self._testtmp, self._timeout, env)
810 810 def cleanup():
811 811 terminate(proc)
812 812 ret = proc.wait()
813 813 if ret == 0:
814 814 ret = signal.SIGTERM << 8
815 815 killdaemons(env['DAEMON_PIDS'])
816 816 return ret
817 817
818 818 output = ''
819 819 proc.tochild.close()
820 820
821 821 try:
822 822 output = proc.fromchild.read()
823 823 except KeyboardInterrupt:
824 824 vlog('# Handling keyboard interrupt')
825 825 cleanup()
826 826 raise
827 827
828 828 ret = proc.wait()
829 829 if wifexited(ret):
830 830 ret = os.WEXITSTATUS(ret)
831 831
832 832 if proc.timeout:
833 833 ret = 'timeout'
834 834
835 835 if ret:
836 836 killdaemons(env['DAEMON_PIDS'])
837 837
838 838 for s, r in self._getreplacements():
839 839 output = re.sub(s, r, output)
840 840
841 841 if normalizenewlines:
842 842 output = output.replace('\r\n', '\n')
843 843
844 844 return ret, output.splitlines(True)
845 845
846 846 class PythonTest(Test):
847 847 """A Python-based test."""
848 848
849 849 @property
850 850 def refpath(self):
851 851 return os.path.join(self._testdir, b'%s.out' % self.bname)
852 852
853 853 def _run(self, env):
854 854 py3kswitch = self._py3kwarnings and b' -3' or b''
855 855 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
856 856 vlog("# Running", cmd)
857 857 normalizenewlines = os.name == 'nt'
858 858 result = self._runcommand(cmd, env,
859 859 normalizenewlines=normalizenewlines)
860 860 if self._aborted:
861 861 raise KeyboardInterrupt()
862 862
863 863 return result
864 864
865 865 # This script may want to drop globs from lines matching these patterns on
866 866 # Windows, but check-code.py wants a glob on these lines unconditionally. Don't
867 867 # warn if that is the case for anything matching these lines.
868 868 checkcodeglobpats = [
869 869 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
870 870 re.compile(br'^moving \S+/.*[^)]$'),
871 871 re.compile(br'^pulling from \$TESTTMP/.*[^)]$')
872 872 ]
873 873
874 874 bchr = chr
875 875 if PYTHON3:
876 876 bchr = lambda x: bytes([x])
877 877
878 878 class TTest(Test):
879 879 """A "t test" is a test backed by a .t file."""
880 880
881 881 SKIPPED_PREFIX = 'skipped: '
882 882 FAILED_PREFIX = 'hghave check failed: '
883 883 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
884 884
885 885 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
886 886 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
887 887 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
888 888
889 889 @property
890 890 def refpath(self):
891 891 return os.path.join(self._testdir, self.bname)
892 892
893 893 def _run(self, env):
894 894 f = open(self.path, 'rb')
895 895 lines = f.readlines()
896 896 f.close()
897 897
898 898 salt, script, after, expected = self._parsetest(lines)
899 899
900 900 # Write out the generated script.
901 901 fname = b'%s.sh' % self._testtmp
902 902 f = open(fname, 'wb')
903 903 for l in script:
904 904 f.write(l)
905 905 f.close()
906 906
907 907 cmd = b'%s "%s"' % (self._shell, fname)
908 908 vlog("# Running", cmd)
909 909
910 910 exitcode, output = self._runcommand(cmd, env)
911 911
912 912 if self._aborted:
913 913 raise KeyboardInterrupt()
914 914
915 915 # Do not merge output if skipped. Return hghave message instead.
916 916 # Similarly, with --debug, output is None.
917 917 if exitcode == self.SKIPPED_STATUS or output is None:
918 918 return exitcode, output
919 919
920 920 return self._processoutput(exitcode, output, salt, after, expected)
921 921
922 922 def _hghave(self, reqs):
923 923 # TODO do something smarter when all other uses of hghave are gone.
924 924 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
925 925 tdir = runtestdir.replace(b'\\', b'/')
926 926 proc = Popen4(b'%s -c "%s/hghave %s"' %
927 927 (self._shell, tdir, b' '.join(reqs)),
928 928 self._testtmp, 0, self._getenv())
929 929 stdout, stderr = proc.communicate()
930 930 ret = proc.wait()
931 931 if wifexited(ret):
932 932 ret = os.WEXITSTATUS(ret)
933 933 if ret == 2:
934 934 print(stdout)
935 935 sys.exit(1)
936 936
937 937 if ret != 0:
938 938 return False
939 939
940 940 if 'slow' in reqs:
941 941 self._timeout = self._slowtimeout
942 942 return True
943 943
944 944 def _parsetest(self, lines):
945 945 # We generate a shell script which outputs unique markers to line
946 946 # up script results with our source. These markers include input
947 947 # line number and the last return code.
948 948 salt = b"SALT%d" % time.time()
949 949 def addsalt(line, inpython):
950 950 if inpython:
951 951 script.append(b'%s %d 0\n' % (salt, line))
952 952 else:
953 953 script.append(b'echo %s %d $?\n' % (salt, line))
954 954
955 955 script = []
956 956
957 957 # After we run the shell script, we re-unify the script output
958 958 # with non-active parts of the source, with synchronization by our
959 959 # SALT line number markers. The after table contains the non-active
960 960 # components, ordered by line number.
961 961 after = {}
962 962
963 963 # Expected shell script output.
964 964 expected = {}
965 965
966 966 pos = prepos = -1
967 967
968 968 # True or False when in a true or false conditional section
969 969 skipping = None
970 970
971 971 # We keep track of whether or not we're in a Python block so we
972 972 # can generate the surrounding doctest magic.
973 973 inpython = False
974 974
975 975 if self._debug:
976 976 script.append(b'set -x\n')
977 977 if os.getenv('MSYSTEM'):
978 978 script.append(b'alias pwd="pwd -W"\n')
979 979
980 980 for n, l in enumerate(lines):
981 981 if not l.endswith(b'\n'):
982 982 l += b'\n'
983 983 if l.startswith(b'#require'):
984 984 lsplit = l.split()
985 985 if len(lsplit) < 2 or lsplit[0] != b'#require':
986 986 after.setdefault(pos, []).append(' !!! invalid #require\n')
987 987 if not self._hghave(lsplit[1:]):
988 988 script = [b"exit 80\n"]
989 989 break
990 990 after.setdefault(pos, []).append(l)
991 991 elif l.startswith(b'#if'):
992 992 lsplit = l.split()
993 993 if len(lsplit) < 2 or lsplit[0] != b'#if':
994 994 after.setdefault(pos, []).append(' !!! invalid #if\n')
995 995 if skipping is not None:
996 996 after.setdefault(pos, []).append(' !!! nested #if\n')
997 997 skipping = not self._hghave(lsplit[1:])
998 998 after.setdefault(pos, []).append(l)
999 999 elif l.startswith(b'#else'):
1000 1000 if skipping is None:
1001 1001 after.setdefault(pos, []).append(' !!! missing #if\n')
1002 1002 skipping = not skipping
1003 1003 after.setdefault(pos, []).append(l)
1004 1004 elif l.startswith(b'#endif'):
1005 1005 if skipping is None:
1006 1006 after.setdefault(pos, []).append(' !!! missing #if\n')
1007 1007 skipping = None
1008 1008 after.setdefault(pos, []).append(l)
1009 1009 elif skipping:
1010 1010 after.setdefault(pos, []).append(l)
1011 1011 elif l.startswith(b' >>> '): # python inlines
1012 1012 after.setdefault(pos, []).append(l)
1013 1013 prepos = pos
1014 1014 pos = n
1015 1015 if not inpython:
1016 1016 # We've just entered a Python block. Add the header.
1017 1017 inpython = True
1018 1018 addsalt(prepos, False) # Make sure we report the exit code.
1019 1019 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1020 1020 addsalt(n, True)
1021 1021 script.append(l[2:])
1022 1022 elif l.startswith(b' ... '): # python inlines
1023 1023 after.setdefault(prepos, []).append(l)
1024 1024 script.append(l[2:])
1025 1025 elif l.startswith(b' $ '): # commands
1026 1026 if inpython:
1027 1027 script.append(b'EOF\n')
1028 1028 inpython = False
1029 1029 after.setdefault(pos, []).append(l)
1030 1030 prepos = pos
1031 1031 pos = n
1032 1032 addsalt(n, False)
1033 1033 cmd = l[4:].split()
1034 1034 if len(cmd) == 2 and cmd[0] == b'cd':
1035 1035 l = b' $ cd %s || exit 1\n' % cmd[1]
1036 1036 script.append(l[4:])
1037 1037 elif l.startswith(b' > '): # continuations
1038 1038 after.setdefault(prepos, []).append(l)
1039 1039 script.append(l[4:])
1040 1040 elif l.startswith(b' '): # results
1041 1041 # Queue up a list of expected results.
1042 1042 expected.setdefault(pos, []).append(l[2:])
1043 1043 else:
1044 1044 if inpython:
1045 1045 script.append(b'EOF\n')
1046 1046 inpython = False
1047 1047 # Non-command/result. Queue up for merged output.
1048 1048 after.setdefault(pos, []).append(l)
1049 1049
1050 1050 if inpython:
1051 1051 script.append(b'EOF\n')
1052 1052 if skipping is not None:
1053 1053 after.setdefault(pos, []).append(' !!! missing #endif\n')
1054 1054 addsalt(n + 1, False)
1055 1055
1056 1056 return salt, script, after, expected
1057 1057
1058 1058 def _processoutput(self, exitcode, output, salt, after, expected):
1059 1059 # Merge the script output back into a unified test.
1060 1060 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1061 1061 if exitcode != 0:
1062 1062 warnonly = 3
1063 1063
1064 1064 pos = -1
1065 1065 postout = []
1066 1066 for l in output:
1067 1067 lout, lcmd = l, None
1068 1068 if salt in l:
1069 1069 lout, lcmd = l.split(salt, 1)
1070 1070
1071 1071 while lout:
1072 1072 if not lout.endswith(b'\n'):
1073 1073 lout += b' (no-eol)\n'
1074 1074
1075 1075 # Find the expected output at the current position.
1076 1076 el = None
1077 1077 if expected.get(pos, None):
1078 1078 el = expected[pos].pop(0)
1079 1079
1080 1080 r = TTest.linematch(el, lout)
1081 1081 if isinstance(r, str):
1082 1082 if r == '+glob':
1083 1083 lout = el[:-1] + ' (glob)\n'
1084 1084 r = '' # Warn only this line.
1085 1085 elif r == '-glob':
1086 1086 lout = ''.join(el.rsplit(' (glob)', 1))
1087 1087 r = '' # Warn only this line.
1088 1088 elif r == "retry":
1089 1089 postout.append(b' ' + el)
1090 1090 continue
1091 1091 else:
1092 1092 log('\ninfo, unknown linematch result: %r\n' % r)
1093 1093 r = False
1094 1094 if r:
1095 1095 postout.append(b' ' + el)
1096 1096 else:
1097 1097 if self.NEEDESCAPE(lout):
1098 1098 lout = TTest._stringescape(b'%s (esc)\n' %
1099 1099 lout.rstrip(b'\n'))
1100 1100 postout.append(b' ' + lout) # Let diff deal with it.
1101 1101 if r != '': # If line failed.
1102 1102 warnonly = 3 # for sure not
1103 1103 elif warnonly == 1: # Is "not yet" and line is warn only.
1104 1104 warnonly = 2 # Yes do warn.
1105 1105 break
1106 1106
1107 1107 # clean up any optional leftovers
1108 1108 while expected.get(pos, None):
1109 1109 el = expected[pos].pop(0)
1110 1110 if not el.endswith(b" (?)\n"):
1111 1111 expected[pos].insert(0, el)
1112 1112 break
1113 1113 postout.append(b' ' + el)
1114 1114
1115 1115 if lcmd:
1116 1116 # Add on last return code.
1117 1117 ret = int(lcmd.split()[1])
1118 1118 if ret != 0:
1119 1119 postout.append(b' [%d]\n' % ret)
1120 1120 if pos in after:
1121 1121 # Merge in non-active test bits.
1122 1122 postout += after.pop(pos)
1123 1123 pos = int(lcmd.split()[0])
1124 1124
1125 1125 if pos in after:
1126 1126 postout += after.pop(pos)
1127 1127
1128 1128 if warnonly == 2:
1129 1129 exitcode = False # Set exitcode to warned.
1130 1130
1131 1131 return exitcode, postout
1132 1132
1133 1133 @staticmethod
1134 1134 def rematch(el, l):
1135 1135 try:
1136 1136 # use \Z to ensure that the regex matches to the end of the string
1137 1137 if os.name == 'nt':
1138 1138 return re.match(el + br'\r?\n\Z', l)
1139 1139 return re.match(el + br'\n\Z', l)
1140 1140 except re.error:
1141 1141 # el is an invalid regex
1142 1142 return False
1143 1143
1144 1144 @staticmethod
1145 1145 def globmatch(el, l):
1146 1146 # The only supported special characters are * and ? plus / which also
1147 1147 # matches \ on windows. Escaping of these characters is supported.
1148 1148 if el + b'\n' == l:
1149 1149 if os.altsep:
1150 1150 # matching on "/" is not needed for this line
1151 1151 for pat in checkcodeglobpats:
1152 1152 if pat.match(el):
1153 1153 return True
1154 1154 return b'-glob'
1155 1155 return True
1156 1156 i, n = 0, len(el)
1157 1157 res = b''
1158 1158 while i < n:
1159 1159 c = el[i:i + 1]
1160 1160 i += 1
1161 1161 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1162 1162 res += el[i - 1:i + 1]
1163 1163 i += 1
1164 1164 elif c == b'*':
1165 1165 res += b'.*'
1166 1166 elif c == b'?':
1167 1167 res += b'.'
1168 1168 elif c == b'/' and os.altsep:
1169 1169 res += b'[/\\\\]'
1170 1170 else:
1171 1171 res += re.escape(c)
1172 1172 return TTest.rematch(res, l)
1173 1173
1174 1174 @staticmethod
1175 1175 def linematch(el, l):
1176 1176 retry = False
1177 1177 if el == l: # perfect match (fast)
1178 1178 return True
1179 1179 if el:
1180 1180 if el.endswith(b" (?)\n"):
1181 1181 retry = "retry"
1182 1182 el = el[:-5] + "\n"
1183 1183 if el.endswith(b" (esc)\n"):
1184 1184 if PYTHON3:
1185 1185 el = el[:-7].decode('unicode_escape') + '\n'
1186 1186 el = el.encode('utf-8')
1187 1187 else:
1188 1188 el = el[:-7].decode('string-escape') + '\n'
1189 1189 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1190 1190 return True
1191 1191 if el.endswith(b" (re)\n"):
1192 1192 return TTest.rematch(el[:-6], l) or retry
1193 1193 if el.endswith(b" (glob)\n"):
1194 1194 # ignore '(glob)' added to l by 'replacements'
1195 1195 if l.endswith(b" (glob)\n"):
1196 1196 l = l[:-8] + b"\n"
1197 1197 return TTest.globmatch(el[:-8], l)
1198 1198 if os.altsep and l.replace(b'\\', b'/') == el:
1199 1199 return b'+glob'
1200 1200 return retry
1201 1201
1202 1202 @staticmethod
1203 1203 def parsehghaveoutput(lines):
1204 1204 '''Parse hghave log lines.
1205 1205
1206 1206 Return tuple of lists (missing, failed):
1207 1207 * the missing/unknown features
1208 1208 * the features for which existence check failed'''
1209 1209 missing = []
1210 1210 failed = []
1211 1211 for line in lines:
1212 1212 if line.startswith(TTest.SKIPPED_PREFIX):
1213 1213 line = line.splitlines()[0]
1214 1214 missing.append(line[len(TTest.SKIPPED_PREFIX):])
1215 1215 elif line.startswith(TTest.FAILED_PREFIX):
1216 1216 line = line.splitlines()[0]
1217 1217 failed.append(line[len(TTest.FAILED_PREFIX):])
1218 1218
1219 1219 return missing, failed
1220 1220
1221 1221 @staticmethod
1222 1222 def _escapef(m):
1223 1223 return TTest.ESCAPEMAP[m.group(0)]
1224 1224
1225 1225 @staticmethod
1226 1226 def _stringescape(s):
1227 1227 return TTest.ESCAPESUB(TTest._escapef, s)
1228 1228
1229 1229 iolock = threading.RLock()
1230 1230
1231 1231 class SkipTest(Exception):
1232 1232 """Raised to indicate that a test is to be skipped."""
1233 1233
1234 1234 class IgnoreTest(Exception):
1235 1235 """Raised to indicate that a test is to be ignored."""
1236 1236
1237 1237 class WarnTest(Exception):
1238 1238 """Raised to indicate that a test warned."""
1239 1239
1240 1240 class TestResult(unittest._TextTestResult):
1241 1241 """Holds results when executing via unittest."""
1242 1242 # Don't worry too much about accessing the non-public _TextTestResult.
1243 1243 # It is relatively common in Python testing tools.
1244 1244 def __init__(self, options, *args, **kwargs):
1245 1245 super(TestResult, self).__init__(*args, **kwargs)
1246 1246
1247 1247 self._options = options
1248 1248
1249 1249 # unittest.TestResult didn't have skipped until 2.7. We need to
1250 1250 # polyfill it.
1251 1251 self.skipped = []
1252 1252
1253 1253 # We have a custom "ignored" result that isn't present in any Python
1254 1254 # unittest implementation. It is very similar to skipped. It may make
1255 1255 # sense to map it into skip some day.
1256 1256 self.ignored = []
1257 1257
1258 1258 # We have a custom "warned" result that isn't present in any Python
1259 1259 # unittest implementation. It is very similar to failed. It may make
1260 1260 # sense to map it into fail some day.
1261 1261 self.warned = []
1262 1262
1263 1263 self.times = []
1264 1264 self._firststarttime = None
1265 1265 # Data stored for the benefit of generating xunit reports.
1266 1266 self.successes = []
1267 1267 self.faildata = {}
1268 1268
1269 1269 def addFailure(self, test, reason):
1270 1270 self.failures.append((test, reason))
1271 1271
1272 1272 if self._options.first:
1273 1273 self.stop()
1274 1274 else:
1275 1275 with iolock:
1276 if not self._options.nodiff:
1277 self.stream.write('\nERROR: %s output changed\n' % test)
1276 if reason == "timed out":
1277 self.stream.write('t')
1278 else:
1279 if not self._options.nodiff:
1280 self.stream.write('\nERROR: %s output changed\n' % test)
1281 self.stream.write('!')
1278 1282
1279 self.stream.write('!')
1280 1283 self.stream.flush()
1281 1284
1282 1285 def addSuccess(self, test):
1283 1286 with iolock:
1284 1287 super(TestResult, self).addSuccess(test)
1285 1288 self.successes.append(test)
1286 1289
1287 1290 def addError(self, test, err):
1288 1291 super(TestResult, self).addError(test, err)
1289 1292 if self._options.first:
1290 1293 self.stop()
1291 1294
1292 1295 # Polyfill.
1293 1296 def addSkip(self, test, reason):
1294 1297 self.skipped.append((test, reason))
1295 1298 with iolock:
1296 1299 if self.showAll:
1297 1300 self.stream.writeln('skipped %s' % reason)
1298 1301 else:
1299 1302 self.stream.write('s')
1300 1303 self.stream.flush()
1301 1304
1302 1305 def addIgnore(self, test, reason):
1303 1306 self.ignored.append((test, reason))
1304 1307 with iolock:
1305 1308 if self.showAll:
1306 1309 self.stream.writeln('ignored %s' % reason)
1307 1310 else:
1308 1311 if reason not in ('not retesting', "doesn't match keyword"):
1309 1312 self.stream.write('i')
1310 1313 else:
1311 1314 self.testsRun += 1
1312 1315 self.stream.flush()
1313 1316
1314 1317 def addWarn(self, test, reason):
1315 1318 self.warned.append((test, reason))
1316 1319
1317 1320 if self._options.first:
1318 1321 self.stop()
1319 1322
1320 1323 with iolock:
1321 1324 if self.showAll:
1322 1325 self.stream.writeln('warned %s' % reason)
1323 1326 else:
1324 1327 self.stream.write('~')
1325 1328 self.stream.flush()
1326 1329
1327 1330 def addOutputMismatch(self, test, ret, got, expected):
1328 1331 """Record a mismatch in test output for a particular test."""
1329 1332 if self.shouldStop:
1330 1333 # don't print, some other test case already failed and
1331 1334 # printed, we're just stale and probably failed due to our
1332 1335 # temp dir getting cleaned up.
1333 1336 return
1334 1337
1335 1338 accepted = False
1336 1339 failed = False
1337 1340 lines = []
1338 1341
1339 1342 with iolock:
1340 1343 if self._options.nodiff:
1341 1344 pass
1342 1345 elif self._options.view:
1343 1346 v = self._options.view
1344 1347 if PYTHON3:
1345 1348 v = _bytespath(v)
1346 1349 os.system(b"%s %s %s" %
1347 1350 (v, test.refpath, test.errpath))
1348 1351 else:
1349 1352 servefail, lines = getdiff(expected, got,
1350 1353 test.refpath, test.errpath)
1351 1354 if servefail:
1352 1355 self.addFailure(
1353 1356 test,
1354 1357 'server failed to start (HGPORT=%s)' % test._startport)
1355 1358 else:
1356 1359 self.stream.write('\n')
1357 1360 for line in lines:
1358 1361 if PYTHON3:
1359 1362 self.stream.flush()
1360 1363 self.stream.buffer.write(line)
1361 1364 self.stream.buffer.flush()
1362 1365 else:
1363 1366 self.stream.write(line)
1364 1367 self.stream.flush()
1365 1368
1366 1369 # handle interactive prompt without releasing iolock
1367 1370 if self._options.interactive:
1368 1371 self.stream.write('Accept this change? [n] ')
1369 1372 answer = sys.stdin.readline().strip()
1370 1373 if answer.lower() in ('y', 'yes'):
1371 1374 if test.name.endswith('.t'):
1372 1375 rename(test.errpath, test.path)
1373 1376 else:
1374 1377 rename(test.errpath, '%s.out' % test.path)
1375 1378 accepted = True
1376 1379 if not accepted and not failed:
1377 1380 self.faildata[test.name] = b''.join(lines)
1378 1381
1379 1382 return accepted
1380 1383
1381 1384 def startTest(self, test):
1382 1385 super(TestResult, self).startTest(test)
1383 1386
1384 1387 # os.times module computes the user time and system time spent by
1385 1388 # child's processes along with real elapsed time taken by a process.
1386 1389 # This module has one limitation. It can only work for Linux user
1387 1390 # and not for Windows.
1388 1391 test.started = os.times()
1389 1392 if self._firststarttime is None: # thread racy but irrelevant
1390 1393 self._firststarttime = test.started[4]
1391 1394
1392 1395 def stopTest(self, test, interrupted=False):
1393 1396 super(TestResult, self).stopTest(test)
1394 1397
1395 1398 test.stopped = os.times()
1396 1399
1397 1400 starttime = test.started
1398 1401 endtime = test.stopped
1399 1402 origin = self._firststarttime
1400 1403 self.times.append((test.name,
1401 1404 endtime[2] - starttime[2], # user space CPU time
1402 1405 endtime[3] - starttime[3], # sys space CPU time
1403 1406 endtime[4] - starttime[4], # real time
1404 1407 starttime[4] - origin, # start date in run context
1405 1408 endtime[4] - origin, # end date in run context
1406 1409 ))
1407 1410
1408 1411 if interrupted:
1409 1412 with iolock:
1410 1413 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1411 1414 test.name, self.times[-1][3]))
1412 1415
1413 1416 class TestSuite(unittest.TestSuite):
1414 1417 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1415 1418
1416 1419 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1417 1420 retest=False, keywords=None, loop=False, runs_per_test=1,
1418 1421 loadtest=None,
1419 1422 *args, **kwargs):
1420 1423 """Create a new instance that can run tests with a configuration.
1421 1424
1422 1425 testdir specifies the directory where tests are executed from. This
1423 1426 is typically the ``tests`` directory from Mercurial's source
1424 1427 repository.
1425 1428
1426 1429 jobs specifies the number of jobs to run concurrently. Each test
1427 1430 executes on its own thread. Tests actually spawn new processes, so
1428 1431 state mutation should not be an issue.
1429 1432
1430 1433 whitelist and blacklist denote tests that have been whitelisted and
1431 1434 blacklisted, respectively. These arguments don't belong in TestSuite.
1432 1435 Instead, whitelist and blacklist should be handled by the thing that
1433 1436 populates the TestSuite with tests. They are present to preserve
1434 1437 backwards compatible behavior which reports skipped tests as part
1435 1438 of the results.
1436 1439
1437 1440 retest denotes whether to retest failed tests. This arguably belongs
1438 1441 outside of TestSuite.
1439 1442
1440 1443 keywords denotes key words that will be used to filter which tests
1441 1444 to execute. This arguably belongs outside of TestSuite.
1442 1445
1443 1446 loop denotes whether to loop over tests forever.
1444 1447 """
1445 1448 super(TestSuite, self).__init__(*args, **kwargs)
1446 1449
1447 1450 self._jobs = jobs
1448 1451 self._whitelist = whitelist
1449 1452 self._blacklist = blacklist
1450 1453 self._retest = retest
1451 1454 self._keywords = keywords
1452 1455 self._loop = loop
1453 1456 self._runs_per_test = runs_per_test
1454 1457 self._loadtest = loadtest
1455 1458
1456 1459 def run(self, result):
1457 1460 # We have a number of filters that need to be applied. We do this
1458 1461 # here instead of inside Test because it makes the running logic for
1459 1462 # Test simpler.
1460 1463 tests = []
1461 1464 num_tests = [0]
1462 1465 for test in self._tests:
1463 1466 def get():
1464 1467 num_tests[0] += 1
1465 1468 if getattr(test, 'should_reload', False):
1466 1469 return self._loadtest(test.bname, num_tests[0])
1467 1470 return test
1468 1471 if not os.path.exists(test.path):
1469 1472 result.addSkip(test, "Doesn't exist")
1470 1473 continue
1471 1474
1472 1475 if not (self._whitelist and test.name in self._whitelist):
1473 1476 if self._blacklist and test.bname in self._blacklist:
1474 1477 result.addSkip(test, 'blacklisted')
1475 1478 continue
1476 1479
1477 1480 if self._retest and not os.path.exists(test.errpath):
1478 1481 result.addIgnore(test, 'not retesting')
1479 1482 continue
1480 1483
1481 1484 if self._keywords:
1482 1485 f = open(test.path, 'rb')
1483 1486 t = f.read().lower() + test.bname.lower()
1484 1487 f.close()
1485 1488 ignored = False
1486 1489 for k in self._keywords.lower().split():
1487 1490 if k not in t:
1488 1491 result.addIgnore(test, "doesn't match keyword")
1489 1492 ignored = True
1490 1493 break
1491 1494
1492 1495 if ignored:
1493 1496 continue
1494 1497 for _ in xrange(self._runs_per_test):
1495 1498 tests.append(get())
1496 1499
1497 1500 runtests = list(tests)
1498 1501 done = queue.Queue()
1499 1502 running = 0
1500 1503
1501 1504 def job(test, result):
1502 1505 try:
1503 1506 test(result)
1504 1507 done.put(None)
1505 1508 except KeyboardInterrupt:
1506 1509 pass
1507 1510 except: # re-raises
1508 1511 done.put(('!', test, 'run-test raised an error, see traceback'))
1509 1512 raise
1510 1513
1511 1514 stoppedearly = False
1512 1515
1513 1516 try:
1514 1517 while tests or running:
1515 1518 if not done.empty() or running == self._jobs or not tests:
1516 1519 try:
1517 1520 done.get(True, 1)
1518 1521 running -= 1
1519 1522 if result and result.shouldStop:
1520 1523 stoppedearly = True
1521 1524 break
1522 1525 except queue.Empty:
1523 1526 continue
1524 1527 if tests and not running == self._jobs:
1525 1528 test = tests.pop(0)
1526 1529 if self._loop:
1527 1530 if getattr(test, 'should_reload', False):
1528 1531 num_tests[0] += 1
1529 1532 tests.append(
1530 1533 self._loadtest(test.name, num_tests[0]))
1531 1534 else:
1532 1535 tests.append(test)
1533 1536 t = threading.Thread(target=job, name=test.name,
1534 1537 args=(test, result))
1535 1538 t.start()
1536 1539 running += 1
1537 1540
1538 1541 # If we stop early we still need to wait on started tests to
1539 1542 # finish. Otherwise, there is a race between the test completing
1540 1543 # and the test's cleanup code running. This could result in the
1541 1544 # test reporting incorrect.
1542 1545 if stoppedearly:
1543 1546 while running:
1544 1547 try:
1545 1548 done.get(True, 1)
1546 1549 running -= 1
1547 1550 except queue.Empty:
1548 1551 continue
1549 1552 except KeyboardInterrupt:
1550 1553 for test in runtests:
1551 1554 test.abort()
1552 1555
1553 1556 return result
1554 1557
1555 1558 class TextTestRunner(unittest.TextTestRunner):
1556 1559 """Custom unittest test runner that uses appropriate settings."""
1557 1560
1558 1561 def __init__(self, runner, *args, **kwargs):
1559 1562 super(TextTestRunner, self).__init__(*args, **kwargs)
1560 1563
1561 1564 self._runner = runner
1562 1565
1563 1566 def run(self, test):
1564 1567 result = TestResult(self._runner.options, self.stream,
1565 1568 self.descriptions, self.verbosity)
1566 1569
1567 1570 test(result)
1568 1571
1569 1572 failed = len(result.failures)
1570 1573 warned = len(result.warned)
1571 1574 skipped = len(result.skipped)
1572 1575 ignored = len(result.ignored)
1573 1576
1574 1577 with iolock:
1575 1578 self.stream.writeln('')
1576 1579
1577 1580 if not self._runner.options.noskips:
1578 1581 for test, msg in result.skipped:
1579 1582 self.stream.writeln('Skipped %s: %s' % (test.name, msg))
1580 1583 for test, msg in result.warned:
1581 1584 self.stream.writeln('Warned %s: %s' % (test.name, msg))
1582 1585 for test, msg in result.failures:
1583 1586 self.stream.writeln('Failed %s: %s' % (test.name, msg))
1584 1587 for test, msg in result.errors:
1585 1588 self.stream.writeln('Errored %s: %s' % (test.name, msg))
1586 1589
1587 1590 if self._runner.options.xunit:
1588 1591 xuf = open(self._runner.options.xunit, 'wb')
1589 1592 try:
1590 1593 timesd = dict((t[0], t[3]) for t in result.times)
1591 1594 doc = minidom.Document()
1592 1595 s = doc.createElement('testsuite')
1593 1596 s.setAttribute('name', 'run-tests')
1594 1597 s.setAttribute('tests', str(result.testsRun))
1595 1598 s.setAttribute('errors', "0") # TODO
1596 1599 s.setAttribute('failures', str(failed))
1597 1600 s.setAttribute('skipped', str(skipped + ignored))
1598 1601 doc.appendChild(s)
1599 1602 for tc in result.successes:
1600 1603 t = doc.createElement('testcase')
1601 1604 t.setAttribute('name', tc.name)
1602 1605 t.setAttribute('time', '%.3f' % timesd[tc.name])
1603 1606 s.appendChild(t)
1604 1607 for tc, err in sorted(result.faildata.items()):
1605 1608 t = doc.createElement('testcase')
1606 1609 t.setAttribute('name', tc)
1607 1610 t.setAttribute('time', '%.3f' % timesd[tc])
1608 1611 # createCDATASection expects a unicode or it will
1609 1612 # convert using default conversion rules, which will
1610 1613 # fail if string isn't ASCII.
1611 1614 err = cdatasafe(err).decode('utf-8', 'replace')
1612 1615 cd = doc.createCDATASection(err)
1613 1616 t.appendChild(cd)
1614 1617 s.appendChild(t)
1615 1618 xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
1616 1619 finally:
1617 1620 xuf.close()
1618 1621
1619 1622 if self._runner.options.json:
1620 1623 if json is None:
1621 1624 raise ImportError("json module not installed")
1622 1625 jsonpath = os.path.join(self._runner._testdir, 'report.json')
1623 1626 fp = open(jsonpath, 'w')
1624 1627 try:
1625 1628 timesd = {}
1626 1629 for tdata in result.times:
1627 1630 test = tdata[0]
1628 1631 timesd[test] = tdata[1:]
1629 1632
1630 1633 outcome = {}
1631 1634 groups = [('success', ((tc, None)
1632 1635 for tc in result.successes)),
1633 1636 ('failure', result.failures),
1634 1637 ('skip', result.skipped)]
1635 1638 for res, testcases in groups:
1636 1639 for tc, __ in testcases:
1637 1640 tres = {'result': res,
1638 1641 'time': ('%0.3f' % timesd[tc.name][2]),
1639 1642 'cuser': ('%0.3f' % timesd[tc.name][0]),
1640 1643 'csys': ('%0.3f' % timesd[tc.name][1]),
1641 1644 'start': ('%0.3f' % timesd[tc.name][3]),
1642 1645 'end': ('%0.3f' % timesd[tc.name][4])}
1643 1646 outcome[tc.name] = tres
1644 1647 jsonout = json.dumps(outcome, sort_keys=True, indent=4)
1645 1648 fp.writelines(("testreport =", jsonout))
1646 1649 finally:
1647 1650 fp.close()
1648 1651
1649 1652 self._runner._checkhglib('Tested')
1650 1653
1651 1654 self.stream.writeln(
1652 1655 '# Ran %d tests, %d skipped, %d warned, %d failed.'
1653 1656 % (result.testsRun,
1654 1657 skipped + ignored, warned, failed))
1655 1658 if failed:
1656 1659 self.stream.writeln('python hash seed: %s' %
1657 1660 os.environ['PYTHONHASHSEED'])
1658 1661 if self._runner.options.time:
1659 1662 self.printtimes(result.times)
1660 1663
1661 1664 return result
1662 1665
1663 1666 def printtimes(self, times):
1664 1667 # iolock held by run
1665 1668 self.stream.writeln('# Producing time report')
1666 1669 times.sort(key=lambda t: (t[3]))
1667 1670 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
1668 1671 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
1669 1672 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
1670 1673 for tdata in times:
1671 1674 test = tdata[0]
1672 1675 cuser, csys, real, start, end = tdata[1:6]
1673 1676 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
1674 1677
1675 1678 class TestRunner(object):
1676 1679 """Holds context for executing tests.
1677 1680
1678 1681 Tests rely on a lot of state. This object holds it for them.
1679 1682 """
1680 1683
1681 1684 # Programs required to run tests.
1682 1685 REQUIREDTOOLS = [
1683 1686 os.path.basename(_bytespath(sys.executable)),
1684 1687 b'diff',
1685 1688 b'grep',
1686 1689 b'unzip',
1687 1690 b'gunzip',
1688 1691 b'bunzip2',
1689 1692 b'sed',
1690 1693 ]
1691 1694
1692 1695 # Maps file extensions to test class.
1693 1696 TESTTYPES = [
1694 1697 (b'.py', PythonTest),
1695 1698 (b'.t', TTest),
1696 1699 ]
1697 1700
1698 1701 def __init__(self):
1699 1702 self.options = None
1700 1703 self._hgroot = None
1701 1704 self._testdir = None
1702 1705 self._hgtmp = None
1703 1706 self._installdir = None
1704 1707 self._bindir = None
1705 1708 self._tmpbinddir = None
1706 1709 self._pythondir = None
1707 1710 self._coveragefile = None
1708 1711 self._createdfiles = []
1709 1712 self._hgpath = None
1710 1713 self._portoffset = 0
1711 1714 self._ports = {}
1712 1715
1713 1716 def run(self, args, parser=None):
1714 1717 """Run the test suite."""
1715 1718 oldmask = os.umask(0o22)
1716 1719 try:
1717 1720 parser = parser or getparser()
1718 1721 options, args = parseargs(args, parser)
1719 1722 # positional arguments are paths to test files to run, so
1720 1723 # we make sure they're all bytestrings
1721 1724 args = [_bytespath(a) for a in args]
1722 1725 self.options = options
1723 1726
1724 1727 self._checktools()
1725 1728 tests = self.findtests(args)
1726 1729 if options.profile_runner:
1727 1730 import statprof
1728 1731 statprof.start()
1729 1732 result = self._run(tests)
1730 1733 if options.profile_runner:
1731 1734 statprof.stop()
1732 1735 statprof.display()
1733 1736 return result
1734 1737
1735 1738 finally:
1736 1739 os.umask(oldmask)
1737 1740
1738 1741 def _run(self, tests):
1739 1742 if self.options.random:
1740 1743 random.shuffle(tests)
1741 1744 else:
1742 1745 # keywords for slow tests
1743 1746 slow = {b'svn': 10,
1744 1747 b'gendoc': 10,
1745 1748 b'check-code-hg': 100,
1746 1749 b'contrib-perf': 200,
1747 1750 }
1748 1751 def sortkey(f):
1749 1752 # run largest tests first, as they tend to take the longest
1750 1753 try:
1751 1754 val = -os.stat(f).st_size
1752 1755 except OSError as e:
1753 1756 if e.errno != errno.ENOENT:
1754 1757 raise
1755 1758 return -1e9 # file does not exist, tell early
1756 1759 for kw, mul in slow.items():
1757 1760 if kw in f:
1758 1761 val *= mul
1759 1762 return val
1760 1763 tests.sort(key=sortkey)
1761 1764
1762 1765 self._testdir = osenvironb[b'TESTDIR'] = getattr(
1763 1766 os, 'getcwdb', os.getcwd)()
1764 1767
1765 1768 if 'PYTHONHASHSEED' not in os.environ:
1766 1769 # use a random python hash seed all the time
1767 1770 # we do the randomness ourself to know what seed is used
1768 1771 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
1769 1772
1770 1773 if self.options.tmpdir:
1771 1774 self.options.keep_tmpdir = True
1772 1775 tmpdir = _bytespath(self.options.tmpdir)
1773 1776 if os.path.exists(tmpdir):
1774 1777 # Meaning of tmpdir has changed since 1.3: we used to create
1775 1778 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
1776 1779 # tmpdir already exists.
1777 1780 print("error: temp dir %r already exists" % tmpdir)
1778 1781 return 1
1779 1782
1780 1783 # Automatically removing tmpdir sounds convenient, but could
1781 1784 # really annoy anyone in the habit of using "--tmpdir=/tmp"
1782 1785 # or "--tmpdir=$HOME".
1783 1786 #vlog("# Removing temp dir", tmpdir)
1784 1787 #shutil.rmtree(tmpdir)
1785 1788 os.makedirs(tmpdir)
1786 1789 else:
1787 1790 d = None
1788 1791 if os.name == 'nt':
1789 1792 # without this, we get the default temp dir location, but
1790 1793 # in all lowercase, which causes troubles with paths (issue3490)
1791 1794 d = osenvironb.get(b'TMP', None)
1792 1795 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
1793 1796
1794 1797 self._hgtmp = osenvironb[b'HGTMP'] = (
1795 1798 os.path.realpath(tmpdir))
1796 1799
1797 1800 if self.options.with_hg:
1798 1801 self._installdir = None
1799 1802 whg = self.options.with_hg
1800 1803 # If --with-hg is not specified, we have bytes already,
1801 1804 # but if it was specified in python3 we get a str, so we
1802 1805 # have to encode it back into a bytes.
1803 1806 if PYTHON3:
1804 1807 if not isinstance(whg, bytes):
1805 1808 whg = _bytespath(whg)
1806 1809 self._bindir = os.path.dirname(os.path.realpath(whg))
1807 1810 assert isinstance(self._bindir, bytes)
1808 1811 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
1809 1812 os.makedirs(self._tmpbindir)
1810 1813
1811 1814 # This looks redundant with how Python initializes sys.path from
1812 1815 # the location of the script being executed. Needed because the
1813 1816 # "hg" specified by --with-hg is not the only Python script
1814 1817 # executed in the test suite that needs to import 'mercurial'
1815 1818 # ... which means it's not really redundant at all.
1816 1819 self._pythondir = self._bindir
1817 1820 else:
1818 1821 self._installdir = os.path.join(self._hgtmp, b"install")
1819 1822 self._bindir = osenvironb[b"BINDIR"] = \
1820 1823 os.path.join(self._installdir, b"bin")
1821 1824 self._tmpbindir = self._bindir
1822 1825 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
1823 1826
1824 1827 osenvironb[b"BINDIR"] = self._bindir
1825 1828 osenvironb[b"PYTHON"] = PYTHON
1826 1829
1827 1830 fileb = _bytespath(__file__)
1828 1831 runtestdir = os.path.abspath(os.path.dirname(fileb))
1829 1832 osenvironb[b'RUNTESTDIR'] = runtestdir
1830 1833 if PYTHON3:
1831 1834 sepb = _bytespath(os.pathsep)
1832 1835 else:
1833 1836 sepb = os.pathsep
1834 1837 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
1835 1838 if os.path.islink(__file__):
1836 1839 # test helper will likely be at the end of the symlink
1837 1840 realfile = os.path.realpath(fileb)
1838 1841 realdir = os.path.abspath(os.path.dirname(realfile))
1839 1842 path.insert(2, realdir)
1840 1843 if self._testdir != runtestdir:
1841 1844 path = [self._testdir] + path
1842 1845 if self._tmpbindir != self._bindir:
1843 1846 path = [self._tmpbindir] + path
1844 1847 osenvironb[b"PATH"] = sepb.join(path)
1845 1848
1846 1849 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
1847 1850 # can run .../tests/run-tests.py test-foo where test-foo
1848 1851 # adds an extension to HGRC. Also include run-test.py directory to
1849 1852 # import modules like heredoctest.
1850 1853 pypath = [self._pythondir, self._testdir, runtestdir]
1851 1854 # We have to augment PYTHONPATH, rather than simply replacing
1852 1855 # it, in case external libraries are only available via current
1853 1856 # PYTHONPATH. (In particular, the Subversion bindings on OS X
1854 1857 # are in /opt/subversion.)
1855 1858 oldpypath = osenvironb.get(IMPL_PATH)
1856 1859 if oldpypath:
1857 1860 pypath.append(oldpypath)
1858 1861 osenvironb[IMPL_PATH] = sepb.join(pypath)
1859 1862
1860 1863 if self.options.pure:
1861 1864 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
1862 1865
1863 1866 if self.options.allow_slow_tests:
1864 1867 os.environ["HGTEST_SLOW"] = "slow"
1865 1868 elif 'HGTEST_SLOW' in os.environ:
1866 1869 del os.environ['HGTEST_SLOW']
1867 1870
1868 1871 self._coveragefile = os.path.join(self._testdir, b'.coverage')
1869 1872
1870 1873 vlog("# Using TESTDIR", self._testdir)
1871 1874 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
1872 1875 vlog("# Using HGTMP", self._hgtmp)
1873 1876 vlog("# Using PATH", os.environ["PATH"])
1874 1877 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
1875 1878
1876 1879 try:
1877 1880 return self._runtests(tests) or 0
1878 1881 finally:
1879 1882 time.sleep(.1)
1880 1883 self._cleanup()
1881 1884
1882 1885 def findtests(self, args):
1883 1886 """Finds possible test files from arguments.
1884 1887
1885 1888 If you wish to inject custom tests into the test harness, this would
1886 1889 be a good function to monkeypatch or override in a derived class.
1887 1890 """
1888 1891 if not args:
1889 1892 if self.options.changed:
1890 1893 proc = Popen4('hg st --rev "%s" -man0 .' %
1891 1894 self.options.changed, None, 0)
1892 1895 stdout, stderr = proc.communicate()
1893 1896 args = stdout.strip(b'\0').split(b'\0')
1894 1897 else:
1895 1898 args = os.listdir(b'.')
1896 1899
1897 1900 return [t for t in args
1898 1901 if os.path.basename(t).startswith(b'test-')
1899 1902 and (t.endswith(b'.py') or t.endswith(b'.t'))]
1900 1903
1901 1904 def _runtests(self, tests):
1902 1905 try:
1903 1906 if self._installdir:
1904 1907 self._installhg()
1905 1908 self._checkhglib("Testing")
1906 1909 else:
1907 1910 self._usecorrectpython()
1908 1911
1909 1912 if self.options.restart:
1910 1913 orig = list(tests)
1911 1914 while tests:
1912 1915 if os.path.exists(tests[0] + ".err"):
1913 1916 break
1914 1917 tests.pop(0)
1915 1918 if not tests:
1916 1919 print("running all tests")
1917 1920 tests = orig
1918 1921
1919 1922 tests = [self._gettest(t, i) for i, t in enumerate(tests)]
1920 1923
1921 1924 failed = False
1922 1925 warned = False
1923 1926 kws = self.options.keywords
1924 1927 if kws is not None and PYTHON3:
1925 1928 kws = kws.encode('utf-8')
1926 1929
1927 1930 suite = TestSuite(self._testdir,
1928 1931 jobs=self.options.jobs,
1929 1932 whitelist=self.options.whitelisted,
1930 1933 blacklist=self.options.blacklist,
1931 1934 retest=self.options.retest,
1932 1935 keywords=kws,
1933 1936 loop=self.options.loop,
1934 1937 runs_per_test=self.options.runs_per_test,
1935 1938 tests=tests, loadtest=self._gettest)
1936 1939 verbosity = 1
1937 1940 if self.options.verbose:
1938 1941 verbosity = 2
1939 1942 runner = TextTestRunner(self, verbosity=verbosity)
1940 1943 result = runner.run(suite)
1941 1944
1942 1945 if result.failures:
1943 1946 failed = True
1944 1947 if result.warned:
1945 1948 warned = True
1946 1949
1947 1950 if self.options.anycoverage:
1948 1951 self._outputcoverage()
1949 1952 except KeyboardInterrupt:
1950 1953 failed = True
1951 1954 print("\ninterrupted!")
1952 1955
1953 1956 if failed:
1954 1957 return 1
1955 1958 if warned:
1956 1959 return 80
1957 1960
1958 1961 def _getport(self, count):
1959 1962 port = self._ports.get(count) # do we have a cached entry?
1960 1963 if port is None:
1961 1964 port = self.options.port + self._portoffset
1962 1965 portneeded = 3
1963 1966 # above 100 tries we just give up and let test reports failure
1964 1967 for tries in xrange(100):
1965 1968 allfree = True
1966 1969 for idx in xrange(portneeded):
1967 1970 if not checkportisavailable(port + idx):
1968 1971 allfree = False
1969 1972 break
1970 1973 self._portoffset += portneeded
1971 1974 if allfree:
1972 1975 break
1973 1976 self._ports[count] = port
1974 1977 return port
1975 1978
1976 1979 def _gettest(self, test, count):
1977 1980 """Obtain a Test by looking at its filename.
1978 1981
1979 1982 Returns a Test instance. The Test may not be runnable if it doesn't
1980 1983 map to a known type.
1981 1984 """
1982 1985 lctest = test.lower()
1983 1986 testcls = Test
1984 1987
1985 1988 for ext, cls in self.TESTTYPES:
1986 1989 if lctest.endswith(ext):
1987 1990 testcls = cls
1988 1991 break
1989 1992
1990 1993 refpath = os.path.join(self._testdir, test)
1991 1994 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
1992 1995
1993 1996 t = testcls(refpath, tmpdir,
1994 1997 keeptmpdir=self.options.keep_tmpdir,
1995 1998 debug=self.options.debug,
1996 1999 timeout=self.options.timeout,
1997 2000 startport=self._getport(count),
1998 2001 extraconfigopts=self.options.extra_config_opt,
1999 2002 py3kwarnings=self.options.py3k_warnings,
2000 2003 shell=self.options.shell)
2001 2004 t.should_reload = True
2002 2005 return t
2003 2006
2004 2007 def _cleanup(self):
2005 2008 """Clean up state from this test invocation."""
2006 2009
2007 2010 if self.options.keep_tmpdir:
2008 2011 return
2009 2012
2010 2013 vlog("# Cleaning up HGTMP", self._hgtmp)
2011 2014 shutil.rmtree(self._hgtmp, True)
2012 2015 for f in self._createdfiles:
2013 2016 try:
2014 2017 os.remove(f)
2015 2018 except OSError:
2016 2019 pass
2017 2020
2018 2021 def _usecorrectpython(self):
2019 2022 """Configure the environment to use the appropriate Python in tests."""
2020 2023 # Tests must use the same interpreter as us or bad things will happen.
2021 2024 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2022 2025 if getattr(os, 'symlink', None):
2023 2026 vlog("# Making python executable in test path a symlink to '%s'" %
2024 2027 sys.executable)
2025 2028 mypython = os.path.join(self._tmpbindir, pyexename)
2026 2029 try:
2027 2030 if os.readlink(mypython) == sys.executable:
2028 2031 return
2029 2032 os.unlink(mypython)
2030 2033 except OSError as err:
2031 2034 if err.errno != errno.ENOENT:
2032 2035 raise
2033 2036 if self._findprogram(pyexename) != sys.executable:
2034 2037 try:
2035 2038 os.symlink(sys.executable, mypython)
2036 2039 self._createdfiles.append(mypython)
2037 2040 except OSError as err:
2038 2041 # child processes may race, which is harmless
2039 2042 if err.errno != errno.EEXIST:
2040 2043 raise
2041 2044 else:
2042 2045 exedir, exename = os.path.split(sys.executable)
2043 2046 vlog("# Modifying search path to find %s as %s in '%s'" %
2044 2047 (exename, pyexename, exedir))
2045 2048 path = os.environ['PATH'].split(os.pathsep)
2046 2049 while exedir in path:
2047 2050 path.remove(exedir)
2048 2051 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2049 2052 if not self._findprogram(pyexename):
2050 2053 print("WARNING: Cannot find %s in search path" % pyexename)
2051 2054
2052 2055 def _installhg(self):
2053 2056 """Install hg into the test environment.
2054 2057
2055 2058 This will also configure hg with the appropriate testing settings.
2056 2059 """
2057 2060 vlog("# Performing temporary installation of HG")
2058 2061 installerrs = os.path.join(b"tests", b"install.err")
2059 2062 compiler = ''
2060 2063 if self.options.compiler:
2061 2064 compiler = '--compiler ' + self.options.compiler
2062 2065 if self.options.pure:
2063 2066 pure = b"--pure"
2064 2067 else:
2065 2068 pure = b""
2066 2069 py3 = ''
2067 2070
2068 2071 # Run installer in hg root
2069 2072 script = os.path.realpath(sys.argv[0])
2070 2073 exe = sys.executable
2071 2074 if PYTHON3:
2072 2075 py3 = b'--c2to3'
2073 2076 compiler = _bytespath(compiler)
2074 2077 script = _bytespath(script)
2075 2078 exe = _bytespath(exe)
2076 2079 hgroot = os.path.dirname(os.path.dirname(script))
2077 2080 self._hgroot = hgroot
2078 2081 os.chdir(hgroot)
2079 2082 nohome = b'--home=""'
2080 2083 if os.name == 'nt':
2081 2084 # The --home="" trick works only on OS where os.sep == '/'
2082 2085 # because of a distutils convert_path() fast-path. Avoid it at
2083 2086 # least on Windows for now, deal with .pydistutils.cfg bugs
2084 2087 # when they happen.
2085 2088 nohome = b''
2086 2089 cmd = (b'%(exe)s setup.py %(py3)s %(pure)s clean --all'
2087 2090 b' build %(compiler)s --build-base="%(base)s"'
2088 2091 b' install --force --prefix="%(prefix)s"'
2089 2092 b' --install-lib="%(libdir)s"'
2090 2093 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2091 2094 % {b'exe': exe, b'py3': py3, b'pure': pure,
2092 2095 b'compiler': compiler,
2093 2096 b'base': os.path.join(self._hgtmp, b"build"),
2094 2097 b'prefix': self._installdir, b'libdir': self._pythondir,
2095 2098 b'bindir': self._bindir,
2096 2099 b'nohome': nohome, b'logfile': installerrs})
2097 2100
2098 2101 # setuptools requires install directories to exist.
2099 2102 def makedirs(p):
2100 2103 try:
2101 2104 os.makedirs(p)
2102 2105 except OSError as e:
2103 2106 if e.errno != errno.EEXIST:
2104 2107 raise
2105 2108 makedirs(self._pythondir)
2106 2109 makedirs(self._bindir)
2107 2110
2108 2111 vlog("# Running", cmd)
2109 2112 if os.system(cmd) == 0:
2110 2113 if not self.options.verbose:
2111 2114 try:
2112 2115 os.remove(installerrs)
2113 2116 except OSError as e:
2114 2117 if e.errno != errno.ENOENT:
2115 2118 raise
2116 2119 else:
2117 2120 f = open(installerrs, 'rb')
2118 2121 for line in f:
2119 2122 if PYTHON3:
2120 2123 sys.stdout.buffer.write(line)
2121 2124 else:
2122 2125 sys.stdout.write(line)
2123 2126 f.close()
2124 2127 sys.exit(1)
2125 2128 os.chdir(self._testdir)
2126 2129
2127 2130 self._usecorrectpython()
2128 2131
2129 2132 if self.options.py3k_warnings and not self.options.anycoverage:
2130 2133 vlog("# Updating hg command to enable Py3k Warnings switch")
2131 2134 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2132 2135 lines = [line.rstrip() for line in f]
2133 2136 lines[0] += ' -3'
2134 2137 f.close()
2135 2138 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2136 2139 for line in lines:
2137 2140 f.write(line + '\n')
2138 2141 f.close()
2139 2142
2140 2143 hgbat = os.path.join(self._bindir, b'hg.bat')
2141 2144 if os.path.isfile(hgbat):
2142 2145 # hg.bat expects to be put in bin/scripts while run-tests.py
2143 2146 # installation layout put it in bin/ directly. Fix it
2144 2147 f = open(hgbat, 'rb')
2145 2148 data = f.read()
2146 2149 f.close()
2147 2150 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2148 2151 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2149 2152 b'"%~dp0python" "%~dp0hg" %*')
2150 2153 f = open(hgbat, 'wb')
2151 2154 f.write(data)
2152 2155 f.close()
2153 2156 else:
2154 2157 print('WARNING: cannot fix hg.bat reference to python.exe')
2155 2158
2156 2159 if self.options.anycoverage:
2157 2160 custom = os.path.join(self._testdir, 'sitecustomize.py')
2158 2161 target = os.path.join(self._pythondir, 'sitecustomize.py')
2159 2162 vlog('# Installing coverage trigger to %s' % target)
2160 2163 shutil.copyfile(custom, target)
2161 2164 rc = os.path.join(self._testdir, '.coveragerc')
2162 2165 vlog('# Installing coverage rc to %s' % rc)
2163 2166 os.environ['COVERAGE_PROCESS_START'] = rc
2164 2167 covdir = os.path.join(self._installdir, '..', 'coverage')
2165 2168 try:
2166 2169 os.mkdir(covdir)
2167 2170 except OSError as e:
2168 2171 if e.errno != errno.EEXIST:
2169 2172 raise
2170 2173
2171 2174 os.environ['COVERAGE_DIR'] = covdir
2172 2175
2173 2176 def _checkhglib(self, verb):
2174 2177 """Ensure that the 'mercurial' package imported by python is
2175 2178 the one we expect it to be. If not, print a warning to stderr."""
2176 2179 if ((self._bindir == self._pythondir) and
2177 2180 (self._bindir != self._tmpbindir)):
2178 2181 # The pythondir has been inferred from --with-hg flag.
2179 2182 # We cannot expect anything sensible here.
2180 2183 return
2181 2184 expecthg = os.path.join(self._pythondir, b'mercurial')
2182 2185 actualhg = self._gethgpath()
2183 2186 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2184 2187 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2185 2188 ' (expected %s)\n'
2186 2189 % (verb, actualhg, expecthg))
2187 2190 def _gethgpath(self):
2188 2191 """Return the path to the mercurial package that is actually found by
2189 2192 the current Python interpreter."""
2190 2193 if self._hgpath is not None:
2191 2194 return self._hgpath
2192 2195
2193 2196 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2194 2197 cmd = cmd % PYTHON
2195 2198 if PYTHON3:
2196 2199 cmd = _strpath(cmd)
2197 2200 pipe = os.popen(cmd)
2198 2201 try:
2199 2202 self._hgpath = _bytespath(pipe.read().strip())
2200 2203 finally:
2201 2204 pipe.close()
2202 2205
2203 2206 return self._hgpath
2204 2207
2205 2208 def _outputcoverage(self):
2206 2209 """Produce code coverage output."""
2207 2210 from coverage import coverage
2208 2211
2209 2212 vlog('# Producing coverage report')
2210 2213 # chdir is the easiest way to get short, relative paths in the
2211 2214 # output.
2212 2215 os.chdir(self._hgroot)
2213 2216 covdir = os.path.join(self._installdir, '..', 'coverage')
2214 2217 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2215 2218
2216 2219 # Map install directory paths back to source directory.
2217 2220 cov.config.paths['srcdir'] = ['.', self._pythondir]
2218 2221
2219 2222 cov.combine()
2220 2223
2221 2224 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2222 2225 cov.report(ignore_errors=True, omit=omit)
2223 2226
2224 2227 if self.options.htmlcov:
2225 2228 htmldir = os.path.join(self._testdir, 'htmlcov')
2226 2229 cov.html_report(directory=htmldir, omit=omit)
2227 2230 if self.options.annotate:
2228 2231 adir = os.path.join(self._testdir, 'annotated')
2229 2232 if not os.path.isdir(adir):
2230 2233 os.mkdir(adir)
2231 2234 cov.annotate(directory=adir, omit=omit)
2232 2235
2233 2236 def _findprogram(self, program):
2234 2237 """Search PATH for a executable program"""
2235 2238 dpb = _bytespath(os.defpath)
2236 2239 sepb = _bytespath(os.pathsep)
2237 2240 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2238 2241 name = os.path.join(p, program)
2239 2242 if os.name == 'nt' or os.access(name, os.X_OK):
2240 2243 return name
2241 2244 return None
2242 2245
2243 2246 def _checktools(self):
2244 2247 """Ensure tools required to run tests are present."""
2245 2248 for p in self.REQUIREDTOOLS:
2246 2249 if os.name == 'nt' and not p.endswith('.exe'):
2247 2250 p += '.exe'
2248 2251 found = self._findprogram(p)
2249 2252 if found:
2250 2253 vlog("# Found prerequisite", p, "at", found)
2251 2254 else:
2252 2255 print("WARNING: Did not find prerequisite tool: %s " % p)
2253 2256
2254 2257 if __name__ == '__main__':
2255 2258 runner = TestRunner()
2256 2259
2257 2260 try:
2258 2261 import msvcrt
2259 2262 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2260 2263 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2261 2264 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2262 2265 except ImportError:
2263 2266 pass
2264 2267
2265 2268 sys.exit(runner.run(sys.argv[1:]))
@@ -1,679 +1,675 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ unset HGTEST_JOBS
6 6 $ unset HGTEST_TIMEOUT
7 7 $ unset HGTEST_PORT
8 8 $ unset HGTEST_SHELL
9 9
10 10 Smoke test
11 11 ============
12 12
13 13 $ run-tests.py $HGTEST_RUN_TESTS_PURE
14 14
15 15 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
16 16
17 17 a succesful test
18 18 =======================
19 19
20 20 $ cat > test-success.t << EOF
21 21 > $ echo babar
22 22 > babar
23 23 > $ echo xyzzy
24 24 > never happens (?)
25 25 > xyzzy
26 26 > nor this (?)
27 27 > EOF
28 28
29 29 $ run-tests.py --with-hg=`which hg`
30 30 .
31 31 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
32 32
33 33 failing test
34 34 ==================
35 35
36 36 $ cat > test-failure.t << EOF
37 37 > $ echo babar
38 38 > rataxes
39 39 > This is a noop statement so that
40 40 > this test is still more bytes than success.
41 41 > EOF
42 42
43 43 >>> fh = open('test-failure-unicode.t', 'wb')
44 44 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
45 45 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
46 46
47 47 $ run-tests.py --with-hg=`which hg`
48 48
49 49 --- $TESTTMP/test-failure.t
50 50 +++ $TESTTMP/test-failure.t.err
51 51 @@ -1,4 +1,4 @@
52 52 $ echo babar
53 53 - rataxes
54 54 + babar
55 55 This is a noop statement so that
56 56 this test is still more bytes than success.
57 57
58 58 ERROR: test-failure.t output changed
59 59 !.
60 60 --- $TESTTMP/test-failure-unicode.t
61 61 +++ $TESTTMP/test-failure-unicode.t.err
62 62 @@ -1,2 +1,2 @@
63 63 $ echo babar\xce\xb1 (esc)
64 64 - l\xce\xb5\xce\xb5t (esc)
65 65 + babar\xce\xb1 (esc)
66 66
67 67 ERROR: test-failure-unicode.t output changed
68 68 !
69 69 Failed test-failure.t: output changed
70 70 Failed test-failure-unicode.t: output changed
71 71 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
72 72 python hash seed: * (glob)
73 73 [1]
74 74
75 75 test --xunit support
76 76 $ run-tests.py --with-hg=`which hg` --xunit=xunit.xml
77 77
78 78 --- $TESTTMP/test-failure.t
79 79 +++ $TESTTMP/test-failure.t.err
80 80 @@ -1,4 +1,4 @@
81 81 $ echo babar
82 82 - rataxes
83 83 + babar
84 84 This is a noop statement so that
85 85 this test is still more bytes than success.
86 86
87 87 ERROR: test-failure.t output changed
88 88 !.
89 89 --- $TESTTMP/test-failure-unicode.t
90 90 +++ $TESTTMP/test-failure-unicode.t.err
91 91 @@ -1,2 +1,2 @@
92 92 $ echo babar\xce\xb1 (esc)
93 93 - l\xce\xb5\xce\xb5t (esc)
94 94 + babar\xce\xb1 (esc)
95 95
96 96 ERROR: test-failure-unicode.t output changed
97 97 !
98 98 Failed test-failure.t: output changed
99 99 Failed test-failure-unicode.t: output changed
100 100 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
101 101 python hash seed: * (glob)
102 102 [1]
103 103 $ cat xunit.xml
104 104 <?xml version="1.0" encoding="utf-8"?>
105 105 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
106 106 <testcase name="test-success.t" time="*"/> (glob)
107 107 <testcase name="test-failure-unicode.t" time="*"> (glob)
108 108 <![CDATA[--- $TESTTMP/test-failure-unicode.t
109 109 +++ $TESTTMP/test-failure-unicode.t.err
110 110 @@ -1,2 +1,2 @@
111 111 $ echo babar\xce\xb1 (esc)
112 112 - l\xce\xb5\xce\xb5t (esc)
113 113 + babar\xce\xb1 (esc)
114 114 ]]> </testcase>
115 115 <testcase name="test-failure.t" time="*"> (glob)
116 116 <![CDATA[--- $TESTTMP/test-failure.t
117 117 +++ $TESTTMP/test-failure.t.err
118 118 @@ -1,4 +1,4 @@
119 119 $ echo babar
120 120 - rataxes
121 121 + babar
122 122 This is a noop statement so that
123 123 this test is still more bytes than success.
124 124 ]]> </testcase>
125 125 </testsuite>
126 126
127 127 $ rm test-failure-unicode.t
128 128
129 129 test for --retest
130 130 ====================
131 131
132 132 $ run-tests.py --with-hg=`which hg` --retest
133 133
134 134 --- $TESTTMP/test-failure.t
135 135 +++ $TESTTMP/test-failure.t.err
136 136 @@ -1,4 +1,4 @@
137 137 $ echo babar
138 138 - rataxes
139 139 + babar
140 140 This is a noop statement so that
141 141 this test is still more bytes than success.
142 142
143 143 ERROR: test-failure.t output changed
144 144 !
145 145 Failed test-failure.t: output changed
146 146 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
147 147 python hash seed: * (glob)
148 148 [1]
149 149
150 150 Selecting Tests To Run
151 151 ======================
152 152
153 153 successful
154 154
155 155 $ run-tests.py --with-hg=`which hg` test-success.t
156 156 .
157 157 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
158 158
159 159 success w/ keyword
160 160 $ run-tests.py --with-hg=`which hg` -k xyzzy
161 161 .
162 162 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
163 163
164 164 failed
165 165
166 166 $ run-tests.py --with-hg=`which hg` test-failure.t
167 167
168 168 --- $TESTTMP/test-failure.t
169 169 +++ $TESTTMP/test-failure.t.err
170 170 @@ -1,4 +1,4 @@
171 171 $ echo babar
172 172 - rataxes
173 173 + babar
174 174 This is a noop statement so that
175 175 this test is still more bytes than success.
176 176
177 177 ERROR: test-failure.t output changed
178 178 !
179 179 Failed test-failure.t: output changed
180 180 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
181 181 python hash seed: * (glob)
182 182 [1]
183 183
184 184 failure w/ keyword
185 185 $ run-tests.py --with-hg=`which hg` -k rataxes
186 186
187 187 --- $TESTTMP/test-failure.t
188 188 +++ $TESTTMP/test-failure.t.err
189 189 @@ -1,4 +1,4 @@
190 190 $ echo babar
191 191 - rataxes
192 192 + babar
193 193 This is a noop statement so that
194 194 this test is still more bytes than success.
195 195
196 196 ERROR: test-failure.t output changed
197 197 !
198 198 Failed test-failure.t: output changed
199 199 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
200 200 python hash seed: * (glob)
201 201 [1]
202 202
203 203 Verify that when a process fails to start we show a useful message
204 204 ==================================================================
205 205 NOTE: there is currently a bug where this shows "2 failed" even though
206 206 it's actually the same test being reported for failure twice.
207 207
208 208 $ cat > test-serve-fail.t <<EOF
209 209 > $ echo 'abort: child process failed to start blah'
210 210 > EOF
211 211 $ run-tests.py --with-hg=`which hg` test-serve-fail.t
212 212
213 213 ERROR: test-serve-fail.t output changed
214 214 !
215 215 ERROR: test-serve-fail.t output changed
216 216 !
217 217 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
218 218 Failed test-serve-fail.t: output changed
219 219 # Ran 1 tests, 0 skipped, 0 warned, 2 failed.
220 220 python hash seed: * (glob)
221 221 [1]
222 222 $ rm test-serve-fail.t
223 223
224 224 Running In Debug Mode
225 225 ======================
226 226
227 227 $ run-tests.py --with-hg=`which hg` --debug 2>&1 | grep -v pwd
228 228 + echo *SALT* 0 0 (glob)
229 229 *SALT* 0 0 (glob)
230 230 + echo babar
231 231 babar
232 232 + echo *SALT* 4 0 (glob)
233 233 *SALT* 4 0 (glob)
234 234 .+ echo *SALT* 0 0 (glob)
235 235 *SALT* 0 0 (glob)
236 236 + echo babar
237 237 babar
238 238 + echo *SALT* 2 0 (glob)
239 239 *SALT* 2 0 (glob)
240 240 + echo xyzzy
241 241 xyzzy
242 242 + echo *SALT* 6 0 (glob)
243 243 *SALT* 6 0 (glob)
244 244 .
245 245 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
246 246
247 247 Parallel runs
248 248 ==============
249 249
250 250 (duplicate the failing test to get predictable output)
251 251 $ cp test-failure.t test-failure-copy.t
252 252
253 253 $ run-tests.py --with-hg=`which hg` --jobs 2 test-failure*.t -n
254 254 !!
255 255 Failed test-failure*.t: output changed (glob)
256 256 Failed test-failure*.t: output changed (glob)
257 257 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
258 258 python hash seed: * (glob)
259 259 [1]
260 260
261 261 failures in parallel with --first should only print one failure
262 262 >>> f = open('test-nothing.t', 'w')
263 263 >>> f.write('foo\n' * 1024) and None
264 264 >>> f.write(' $ sleep 1') and None
265 265 $ run-tests.py --with-hg=`which hg` --jobs 2 --first
266 266
267 267 --- $TESTTMP/test-failure*.t (glob)
268 268 +++ $TESTTMP/test-failure*.t.err (glob)
269 269 @@ -1,4 +1,4 @@
270 270 $ echo babar
271 271 - rataxes
272 272 + babar
273 273 This is a noop statement so that
274 274 this test is still more bytes than success.
275 275
276 276 Failed test-failure*.t: output changed (glob)
277 277 Failed test-nothing.t: output changed
278 278 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
279 279 python hash seed: * (glob)
280 280 [1]
281 281
282 282
283 283 (delete the duplicated test file)
284 284 $ rm test-failure-copy.t test-nothing.t
285 285
286 286
287 287 Interactive run
288 288 ===============
289 289
290 290 (backup the failing test)
291 291 $ cp test-failure.t backup
292 292
293 293 Refuse the fix
294 294
295 295 $ echo 'n' | run-tests.py --with-hg=`which hg` -i
296 296
297 297 --- $TESTTMP/test-failure.t
298 298 +++ $TESTTMP/test-failure.t.err
299 299 @@ -1,4 +1,4 @@
300 300 $ echo babar
301 301 - rataxes
302 302 + babar
303 303 This is a noop statement so that
304 304 this test is still more bytes than success.
305 305 Accept this change? [n]
306 306 ERROR: test-failure.t output changed
307 307 !.
308 308 Failed test-failure.t: output changed
309 309 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
310 310 python hash seed: * (glob)
311 311 [1]
312 312
313 313 $ cat test-failure.t
314 314 $ echo babar
315 315 rataxes
316 316 This is a noop statement so that
317 317 this test is still more bytes than success.
318 318
319 319 Interactive with custom view
320 320
321 321 $ echo 'n' | run-tests.py --with-hg=`which hg` -i --view echo
322 322 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
323 323 Accept this change? [n]* (glob)
324 324 ERROR: test-failure.t output changed
325 325 !.
326 326 Failed test-failure.t: output changed
327 327 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
328 328 python hash seed: * (glob)
329 329 [1]
330 330
331 331 View the fix
332 332
333 333 $ echo 'y' | run-tests.py --with-hg=`which hg` --view echo
334 334 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
335 335
336 336 ERROR: test-failure.t output changed
337 337 !.
338 338 Failed test-failure.t: output changed
339 339 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
340 340 python hash seed: * (glob)
341 341 [1]
342 342
343 343 Accept the fix
344 344
345 345 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
346 346 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
347 347 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
348 348 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
349 349 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
350 350 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
351 351 $ echo 'y' | run-tests.py --with-hg=`which hg` -i 2>&1 | \
352 352 > sed -e 's,(glob)$,&<,g'
353 353
354 354 --- $TESTTMP/test-failure.t
355 355 +++ $TESTTMP/test-failure.t.err
356 356 @@ -1,9 +1,9 @@
357 357 $ echo babar
358 358 - rataxes
359 359 + babar
360 360 This is a noop statement so that
361 361 this test is still more bytes than success.
362 362 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
363 363 - saved backup bundle to $TESTTMP/foo.hg
364 364 + saved backup bundle to $TESTTMP/foo.hg (glob)<
365 365 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
366 366 saved backup bundle to $TESTTMP/foo.hg (glob)<
367 367 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
368 368 Accept this change? [n] ..
369 369 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
370 370
371 371 $ sed -e 's,(glob)$,&<,g' test-failure.t
372 372 $ echo babar
373 373 babar
374 374 This is a noop statement so that
375 375 this test is still more bytes than success.
376 376 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
377 377 saved backup bundle to $TESTTMP/foo.hg (glob)<
378 378 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
379 379 saved backup bundle to $TESTTMP/foo.hg (glob)<
380 380 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
381 381 saved backup bundle to $TESTTMP/*.hg (glob)<
382 382
383 383 (reinstall)
384 384 $ mv backup test-failure.t
385 385
386 386 No Diff
387 387 ===============
388 388
389 389 $ run-tests.py --with-hg=`which hg` --nodiff
390 390 !.
391 391 Failed test-failure.t: output changed
392 392 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
393 393 python hash seed: * (glob)
394 394 [1]
395 395
396 396 test --tmpdir support
397 397 $ run-tests.py --with-hg=`which hg` --tmpdir=$TESTTMP/keep test-success.t
398 398
399 399 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
400 400 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
401 401 .
402 402 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
403 403
404 404 timeouts
405 405 ========
406 406 $ cat > test-timeout.t <<EOF
407 407 > $ sleep 2
408 408 > $ echo pass
409 409 > pass
410 410 > EOF
411 411 > echo '#require slow' > test-slow-timeout.t
412 412 > cat test-timeout.t >> test-slow-timeout.t
413 413 $ run-tests.py --with-hg=`which hg` --timeout=1 --slowtimeout=3 \
414 414 > test-timeout.t test-slow-timeout.t
415 s
416 ERROR: test-timeout.t output changed
417 !
415 st
418 416 Skipped test-slow-timeout.t: skipped
419 417 Failed test-timeout.t: timed out
420 418 # Ran 1 tests, 1 skipped, 0 warned, 1 failed.
421 419 python hash seed: * (glob)
422 420 [1]
423 421 $ run-tests.py --with-hg=`which hg` --timeout=1 --slowtimeout=3 \
424 422 > test-timeout.t test-slow-timeout.t --allow-slow-tests
425 .
426 ERROR: test-timeout.t output changed
427 !
423 .t
428 424 Failed test-timeout.t: timed out
429 425 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
430 426 python hash seed: * (glob)
431 427 [1]
432 428 $ rm test-timeout.t test-slow-timeout.t
433 429
434 430 test for --time
435 431 ==================
436 432
437 433 $ run-tests.py --with-hg=`which hg` test-success.t --time
438 434 .
439 435 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
440 436 # Producing time report
441 437 start end cuser csys real Test
442 438 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
443 439
444 440 test for --time with --job enabled
445 441 ====================================
446 442
447 443 $ run-tests.py --with-hg=`which hg` test-success.t --time --jobs 2
448 444 .
449 445 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
450 446 # Producing time report
451 447 start end cuser csys real Test
452 448 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
453 449
454 450 Skips
455 451 ================
456 452 $ cat > test-skip.t <<EOF
457 453 > $ echo xyzzy
458 454 > #require false
459 455 > EOF
460 456 $ run-tests.py --with-hg=`which hg` --nodiff
461 457 !.s
462 458 Skipped test-skip.t: skipped
463 459 Failed test-failure.t: output changed
464 460 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
465 461 python hash seed: * (glob)
466 462 [1]
467 463
468 464 $ run-tests.py --with-hg=`which hg` --keyword xyzzy
469 465 .s
470 466 Skipped test-skip.t: skipped
471 467 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
472 468
473 469 Skips with xml
474 470 $ run-tests.py --with-hg=`which hg` --keyword xyzzy \
475 471 > --xunit=xunit.xml
476 472 .s
477 473 Skipped test-skip.t: skipped
478 474 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
479 475 $ cat xunit.xml
480 476 <?xml version="1.0" encoding="utf-8"?>
481 477 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
482 478 <testcase name="test-success.t" time="*"/> (glob)
483 479 </testsuite>
484 480
485 481 Missing skips or blacklisted skips don't count as executed:
486 482 $ echo test-failure.t > blacklist
487 483 $ run-tests.py --with-hg=`which hg` --blacklist=blacklist \
488 484 > test-failure.t test-bogus.t
489 485 ss
490 486 Skipped test-bogus.t: Doesn't exist
491 487 Skipped test-failure.t: blacklisted
492 488 # Ran 0 tests, 2 skipped, 0 warned, 0 failed.
493 489
494 490 #if json
495 491
496 492 test for --json
497 493 ==================
498 494
499 495 $ run-tests.py --with-hg=`which hg` --json
500 496
501 497 --- $TESTTMP/test-failure.t
502 498 +++ $TESTTMP/test-failure.t.err
503 499 @@ -1,4 +1,4 @@
504 500 $ echo babar
505 501 - rataxes
506 502 + babar
507 503 This is a noop statement so that
508 504 this test is still more bytes than success.
509 505
510 506 ERROR: test-failure.t output changed
511 507 !.s
512 508 Skipped test-skip.t: skipped
513 509 Failed test-failure.t: output changed
514 510 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
515 511 python hash seed: * (glob)
516 512 [1]
517 513
518 514 $ cat report.json
519 515 testreport ={
520 516 "test-failure.t": [\{] (re)
521 517 "csys": "\s*[\d\.]{4,5}", ? (re)
522 518 "cuser": "\s*[\d\.]{4,5}", ? (re)
523 519 "end": "\s*[\d\.]{4,5}", ? (re)
524 520 "result": "failure", ? (re)
525 521 "start": "\s*[\d\.]{4,5}", ? (re)
526 522 "time": "\s*[\d\.]{4,5}" (re)
527 523 }, ? (re)
528 524 "test-skip.t": {
529 525 "csys": "\s*[\d\.]{4,5}", ? (re)
530 526 "cuser": "\s*[\d\.]{4,5}", ? (re)
531 527 "end": "\s*[\d\.]{4,5}", ? (re)
532 528 "result": "skip", ? (re)
533 529 "start": "\s*[\d\.]{4,5}", ? (re)
534 530 "time": "\s*[\d\.]{4,5}" (re)
535 531 }, ? (re)
536 532 "test-success.t": [\{] (re)
537 533 "csys": "\s*[\d\.]{4,5}", ? (re)
538 534 "cuser": "\s*[\d\.]{4,5}", ? (re)
539 535 "end": "\s*[\d\.]{4,5}", ? (re)
540 536 "result": "success", ? (re)
541 537 "start": "\s*[\d\.]{4,5}", ? (re)
542 538 "time": "\s*[\d\.]{4,5}" (re)
543 539 }
544 540 } (no-eol)
545 541
546 542 Test that failed test accepted through interactive are properly reported:
547 543
548 544 $ cp test-failure.t backup
549 545 $ echo y | run-tests.py --with-hg=`which hg` --json -i
550 546
551 547 --- $TESTTMP/test-failure.t
552 548 +++ $TESTTMP/test-failure.t.err
553 549 @@ -1,4 +1,4 @@
554 550 $ echo babar
555 551 - rataxes
556 552 + babar
557 553 This is a noop statement so that
558 554 this test is still more bytes than success.
559 555 Accept this change? [n] ..s
560 556 Skipped test-skip.t: skipped
561 557 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
562 558
563 559 $ cat report.json
564 560 testreport ={
565 561 "test-failure.t": [\{] (re)
566 562 "csys": "\s*[\d\.]{4,5}", ? (re)
567 563 "cuser": "\s*[\d\.]{4,5}", ? (re)
568 564 "end": "\s*[\d\.]{4,5}", ? (re)
569 565 "result": "success", ? (re)
570 566 "start": "\s*[\d\.]{4,5}", ? (re)
571 567 "time": "\s*[\d\.]{4,5}" (re)
572 568 }, ? (re)
573 569 "test-skip.t": {
574 570 "csys": "\s*[\d\.]{4,5}", ? (re)
575 571 "cuser": "\s*[\d\.]{4,5}", ? (re)
576 572 "end": "\s*[\d\.]{4,5}", ? (re)
577 573 "result": "skip", ? (re)
578 574 "start": "\s*[\d\.]{4,5}", ? (re)
579 575 "time": "\s*[\d\.]{4,5}" (re)
580 576 }, ? (re)
581 577 "test-success.t": [\{] (re)
582 578 "csys": "\s*[\d\.]{4,5}", ? (re)
583 579 "cuser": "\s*[\d\.]{4,5}", ? (re)
584 580 "end": "\s*[\d\.]{4,5}", ? (re)
585 581 "result": "success", ? (re)
586 582 "start": "\s*[\d\.]{4,5}", ? (re)
587 583 "time": "\s*[\d\.]{4,5}" (re)
588 584 }
589 585 } (no-eol)
590 586 $ mv backup test-failure.t
591 587
592 588 #endif
593 589
594 590 backslash on end of line with glob matching is handled properly
595 591
596 592 $ cat > test-glob-backslash.t << EOF
597 593 > $ echo 'foo bar \\'
598 594 > foo * \ (glob)
599 595 > EOF
600 596
601 597 $ run-tests.py --with-hg=`which hg` test-glob-backslash.t
602 598 .
603 599 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
604 600
605 601 $ rm -f test-glob-backslash.t
606 602
607 603 Test reusability for third party tools
608 604 ======================================
609 605
610 606 $ mkdir "$TESTTMP"/anothertests
611 607 $ cd "$TESTTMP"/anothertests
612 608
613 609 test that `run-tests.py` can execute hghave, even if it runs not in
614 610 Mercurial source tree.
615 611
616 612 $ cat > test-hghave.t <<EOF
617 613 > #require true
618 614 > $ echo foo
619 615 > foo
620 616 > EOF
621 617 $ run-tests.py $HGTEST_RUN_TESTS_PURE test-hghave.t
622 618 .
623 619 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
624 620
625 621 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
626 622 running is placed.
627 623
628 624 $ cat > test-runtestdir.t <<EOF
629 625 > - $TESTDIR, in which test-run-tests.t is placed
630 626 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
631 627 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
632 628 >
633 629 > #if windows
634 630 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
635 631 > #else
636 632 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
637 633 > #endif
638 634 > $ test "\$RUNTESTDIR" = "$TESTDIR"
639 635 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py
640 636 > #!/usr/bin/env python
641 637 > #
642 638 > # check-code - a style and portability checker for Mercurial
643 639 > EOF
644 640 $ run-tests.py $HGTEST_RUN_TESTS_PURE test-runtestdir.t
645 641 .
646 642 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
647 643
648 644 #if execbit
649 645
650 646 test that TESTDIR is referred in PATH
651 647
652 648 $ cat > custom-command.sh <<EOF
653 649 > #!/bin/sh
654 650 > echo "hello world"
655 651 > EOF
656 652 $ chmod +x custom-command.sh
657 653 $ cat > test-testdir-path.t <<EOF
658 654 > $ custom-command.sh
659 655 > hello world
660 656 > EOF
661 657 $ run-tests.py $HGTEST_RUN_TESTS_PURE test-testdir-path.t
662 658 .
663 659 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
664 660
665 661 #endif
666 662
667 663 test support for --allow-slow-tests
668 664 $ cat > test-very-slow-test.t <<EOF
669 665 > #require slow
670 666 > $ echo pass
671 667 > pass
672 668 > EOF
673 669 $ run-tests.py $HGTEST_RUN_TESTS_PURE test-very-slow-test.t
674 670 s
675 671 Skipped test-very-slow-test.t: skipped
676 672 # Ran 0 tests, 1 skipped, 0 warned, 0 failed.
677 673 $ run-tests.py $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
678 674 .
679 675 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now