##// END OF EJS Templates
run-tests.py: add RUNTESTDIR to refer `tests` of Mercurial...
FUJIWARA Katsunori -
r25729:57dfadc4 default
parent child Browse files
Show More
@@ -1,2227 +1,2229 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 #
39 39 # (You could use any subset of the tests: test-s* happens to match
40 40 # enough that it's worth doing parallel runs, few enough that it
41 41 # completes fairly quickly, includes both shell and Python scripts, and
42 42 # includes some scripts that run daemon processes.)
43 43
44 44 from __future__ import print_function
45 45
46 46 from distutils import version
47 47 import difflib
48 48 import errno
49 49 import optparse
50 50 import os
51 51 import shutil
52 52 import subprocess
53 53 import signal
54 54 import socket
55 55 import sys
56 56 import tempfile
57 57 import time
58 58 import random
59 59 import re
60 60 import threading
61 61 import killdaemons as killmod
62 62 try:
63 63 import Queue as queue
64 64 except ImportError:
65 65 import queue
66 66 from xml.dom import minidom
67 67 import unittest
68 68
69 69 osenvironb = getattr(os, 'environb', os.environ)
70 70
71 71 try:
72 72 import json
73 73 except ImportError:
74 74 try:
75 75 import simplejson as json
76 76 except ImportError:
77 77 json = None
78 78
79 79 processlock = threading.Lock()
80 80
81 81 if sys.version_info > (3, 5, 0):
82 82 PYTHON3 = True
83 83 xrange = range # we use xrange in one place, and we'd rather not use range
84 84 def _bytespath(p):
85 85 return p.encode('utf-8')
86 86
87 87 def _strpath(p):
88 88 return p.decode('utf-8')
89 89
90 90 elif sys.version_info >= (3, 0, 0):
91 91 print('%s is only supported on Python 3.5+ and 2.6-2.7, not %s' %
92 92 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
93 93 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
94 94 else:
95 95 PYTHON3 = False
96 96
97 97 # In python 2.x, path operations are generally done using
98 98 # bytestrings by default, so we don't have to do any extra
99 99 # fiddling there. We define the wrapper functions anyway just to
100 100 # help keep code consistent between platforms.
101 101 def _bytespath(p):
102 102 return p
103 103
104 104 _strpath = _bytespath
105 105
106 106 # For Windows support
107 107 wifexited = getattr(os, "WIFEXITED", lambda x: False)
108 108
109 109 def checkportisavailable(port):
110 110 """return true if a port seems free to bind on localhost"""
111 111 try:
112 112 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
113 113 s.bind(('localhost', port))
114 114 s.close()
115 115 return True
116 116 except socket.error as exc:
117 117 if not exc.errno == errno.EADDRINUSE:
118 118 raise
119 119 return False
120 120
121 121 closefds = os.name == 'posix'
122 122 def Popen4(cmd, wd, timeout, env=None):
123 123 processlock.acquire()
124 124 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
125 125 close_fds=closefds,
126 126 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
127 127 stderr=subprocess.STDOUT)
128 128 processlock.release()
129 129
130 130 p.fromchild = p.stdout
131 131 p.tochild = p.stdin
132 132 p.childerr = p.stderr
133 133
134 134 p.timeout = False
135 135 if timeout:
136 136 def t():
137 137 start = time.time()
138 138 while time.time() - start < timeout and p.returncode is None:
139 139 time.sleep(.1)
140 140 p.timeout = True
141 141 if p.returncode is None:
142 142 terminate(p)
143 143 threading.Thread(target=t).start()
144 144
145 145 return p
146 146
147 147 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
148 148 IMPL_PATH = b'PYTHONPATH'
149 149 if 'java' in sys.platform:
150 150 IMPL_PATH = b'JYTHONPATH'
151 151
152 152 defaults = {
153 153 'jobs': ('HGTEST_JOBS', 1),
154 154 'timeout': ('HGTEST_TIMEOUT', 180),
155 155 'port': ('HGTEST_PORT', 20059),
156 156 'shell': ('HGTEST_SHELL', 'sh'),
157 157 }
158 158
159 159 def parselistfiles(files, listtype, warn=True):
160 160 entries = dict()
161 161 for filename in files:
162 162 try:
163 163 path = os.path.expanduser(os.path.expandvars(filename))
164 164 f = open(path, "rb")
165 165 except IOError as err:
166 166 if err.errno != errno.ENOENT:
167 167 raise
168 168 if warn:
169 169 print("warning: no such %s file: %s" % (listtype, filename))
170 170 continue
171 171
172 172 for line in f.readlines():
173 173 line = line.split(b'#', 1)[0].strip()
174 174 if line:
175 175 entries[line] = filename
176 176
177 177 f.close()
178 178 return entries
179 179
180 180 def getparser():
181 181 """Obtain the OptionParser used by the CLI."""
182 182 parser = optparse.OptionParser("%prog [options] [tests]")
183 183
184 184 # keep these sorted
185 185 parser.add_option("--blacklist", action="append",
186 186 help="skip tests listed in the specified blacklist file")
187 187 parser.add_option("--whitelist", action="append",
188 188 help="always run tests listed in the specified whitelist file")
189 189 parser.add_option("--changed", type="string",
190 190 help="run tests that are changed in parent rev or working directory")
191 191 parser.add_option("-C", "--annotate", action="store_true",
192 192 help="output files annotated with coverage")
193 193 parser.add_option("-c", "--cover", action="store_true",
194 194 help="print a test coverage report")
195 195 parser.add_option("-d", "--debug", action="store_true",
196 196 help="debug mode: write output of test scripts to console"
197 197 " rather than capturing and diffing it (disables timeout)")
198 198 parser.add_option("-f", "--first", action="store_true",
199 199 help="exit on the first test failure")
200 200 parser.add_option("-H", "--htmlcov", action="store_true",
201 201 help="create an HTML report of the coverage of the files")
202 202 parser.add_option("-i", "--interactive", action="store_true",
203 203 help="prompt to accept changed output")
204 204 parser.add_option("-j", "--jobs", type="int",
205 205 help="number of jobs to run in parallel"
206 206 " (default: $%s or %d)" % defaults['jobs'])
207 207 parser.add_option("--keep-tmpdir", action="store_true",
208 208 help="keep temporary directory after running tests")
209 209 parser.add_option("-k", "--keywords",
210 210 help="run tests matching keywords")
211 211 parser.add_option("-l", "--local", action="store_true",
212 212 help="shortcut for --with-hg=<testdir>/../hg")
213 213 parser.add_option("--loop", action="store_true",
214 214 help="loop tests repeatedly")
215 215 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
216 216 help="run each test N times (default=1)", default=1)
217 217 parser.add_option("-n", "--nodiff", action="store_true",
218 218 help="skip showing test changes")
219 219 parser.add_option("-p", "--port", type="int",
220 220 help="port on which servers should listen"
221 221 " (default: $%s or %d)" % defaults['port'])
222 222 parser.add_option("--compiler", type="string",
223 223 help="compiler to build with")
224 224 parser.add_option("--pure", action="store_true",
225 225 help="use pure Python code instead of C extensions")
226 226 parser.add_option("-R", "--restart", action="store_true",
227 227 help="restart at last error")
228 228 parser.add_option("-r", "--retest", action="store_true",
229 229 help="retest failed tests")
230 230 parser.add_option("-S", "--noskips", action="store_true",
231 231 help="don't report skip tests verbosely")
232 232 parser.add_option("--shell", type="string",
233 233 help="shell to use (default: $%s or %s)" % defaults['shell'])
234 234 parser.add_option("-t", "--timeout", type="int",
235 235 help="kill errant tests after TIMEOUT seconds"
236 236 " (default: $%s or %d)" % defaults['timeout'])
237 237 parser.add_option("--time", action="store_true",
238 238 help="time how long each test takes")
239 239 parser.add_option("--json", action="store_true",
240 240 help="store test result data in 'report.json' file")
241 241 parser.add_option("--tmpdir", type="string",
242 242 help="run tests in the given temporary directory"
243 243 " (implies --keep-tmpdir)")
244 244 parser.add_option("-v", "--verbose", action="store_true",
245 245 help="output verbose messages")
246 246 parser.add_option("--xunit", type="string",
247 247 help="record xunit results at specified path")
248 248 parser.add_option("--view", type="string",
249 249 help="external diff viewer")
250 250 parser.add_option("--with-hg", type="string",
251 251 metavar="HG",
252 252 help="test using specified hg script rather than a "
253 253 "temporary installation")
254 254 parser.add_option("-3", "--py3k-warnings", action="store_true",
255 255 help="enable Py3k warnings on Python 2.6+")
256 256 parser.add_option('--extra-config-opt', action="append",
257 257 help='set the given config opt in the test hgrc')
258 258 parser.add_option('--random', action="store_true",
259 259 help='run tests in random order')
260 260 parser.add_option('--profile-runner', action='store_true',
261 261 help='run statprof on run-tests')
262 262
263 263 for option, (envvar, default) in defaults.items():
264 264 defaults[option] = type(default)(os.environ.get(envvar, default))
265 265 parser.set_defaults(**defaults)
266 266
267 267 return parser
268 268
269 269 def parseargs(args, parser):
270 270 """Parse arguments with our OptionParser and validate results."""
271 271 (options, args) = parser.parse_args(args)
272 272
273 273 # jython is always pure
274 274 if 'java' in sys.platform or '__pypy__' in sys.modules:
275 275 options.pure = True
276 276
277 277 if options.with_hg:
278 278 options.with_hg = os.path.expanduser(options.with_hg)
279 279 if not (os.path.isfile(options.with_hg) and
280 280 os.access(options.with_hg, os.X_OK)):
281 281 parser.error('--with-hg must specify an executable hg script')
282 282 if not os.path.basename(options.with_hg) == 'hg':
283 283 sys.stderr.write('warning: --with-hg should specify an hg script\n')
284 284 if options.local:
285 285 testdir = os.path.dirname(_bytespath(os.path.realpath(sys.argv[0])))
286 286 hgbin = os.path.join(os.path.dirname(testdir), b'hg')
287 287 if os.name != 'nt' and not os.access(hgbin, os.X_OK):
288 288 parser.error('--local specified, but %r not found or not executable'
289 289 % hgbin)
290 290 options.with_hg = hgbin
291 291
292 292 options.anycoverage = options.cover or options.annotate or options.htmlcov
293 293 if options.anycoverage:
294 294 try:
295 295 import coverage
296 296 covver = version.StrictVersion(coverage.__version__).version
297 297 if covver < (3, 3):
298 298 parser.error('coverage options require coverage 3.3 or later')
299 299 except ImportError:
300 300 parser.error('coverage options now require the coverage package')
301 301
302 302 if options.anycoverage and options.local:
303 303 # this needs some path mangling somewhere, I guess
304 304 parser.error("sorry, coverage options do not work when --local "
305 305 "is specified")
306 306
307 307 if options.anycoverage and options.with_hg:
308 308 parser.error("sorry, coverage options do not work when --with-hg "
309 309 "is specified")
310 310
311 311 global verbose
312 312 if options.verbose:
313 313 verbose = ''
314 314
315 315 if options.tmpdir:
316 316 options.tmpdir = os.path.expanduser(options.tmpdir)
317 317
318 318 if options.jobs < 1:
319 319 parser.error('--jobs must be positive')
320 320 if options.interactive and options.debug:
321 321 parser.error("-i/--interactive and -d/--debug are incompatible")
322 322 if options.debug:
323 323 if options.timeout != defaults['timeout']:
324 324 sys.stderr.write(
325 325 'warning: --timeout option ignored with --debug\n')
326 326 options.timeout = 0
327 327 if options.py3k_warnings:
328 328 if PYTHON3:
329 329 parser.error(
330 330 '--py3k-warnings can only be used on Python 2.6 and 2.7')
331 331 if options.blacklist:
332 332 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
333 333 if options.whitelist:
334 334 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
335 335 else:
336 336 options.whitelisted = {}
337 337
338 338 return (options, args)
339 339
340 340 def rename(src, dst):
341 341 """Like os.rename(), trade atomicity and opened files friendliness
342 342 for existing destination support.
343 343 """
344 344 shutil.copy(src, dst)
345 345 os.remove(src)
346 346
347 347 _unified_diff = difflib.unified_diff
348 348 if PYTHON3:
349 349 import functools
350 350 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
351 351
352 352 def getdiff(expected, output, ref, err):
353 353 servefail = False
354 354 lines = []
355 355 for line in _unified_diff(expected, output, ref, err):
356 356 if line.startswith(b'+++') or line.startswith(b'---'):
357 357 line = line.replace(b'\\', b'/')
358 358 if line.endswith(b' \n'):
359 359 line = line[:-2] + b'\n'
360 360 lines.append(line)
361 361 if not servefail and line.startswith(
362 362 b'+ abort: child process failed to start'):
363 363 servefail = True
364 364
365 365 return servefail, lines
366 366
367 367 verbose = False
368 368 def vlog(*msg):
369 369 """Log only when in verbose mode."""
370 370 if verbose is False:
371 371 return
372 372
373 373 return log(*msg)
374 374
375 375 # Bytes that break XML even in a CDATA block: control characters 0-31
376 376 # sans \t, \n and \r
377 377 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
378 378
379 379 def cdatasafe(data):
380 380 """Make a string safe to include in a CDATA block.
381 381
382 382 Certain control characters are illegal in a CDATA block, and
383 383 there's no way to include a ]]> in a CDATA either. This function
384 384 replaces illegal bytes with ? and adds a space between the ]] so
385 385 that it won't break the CDATA block.
386 386 """
387 387 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
388 388
389 389 def log(*msg):
390 390 """Log something to stdout.
391 391
392 392 Arguments are strings to print.
393 393 """
394 394 with iolock:
395 395 if verbose:
396 396 print(verbose, end=' ')
397 397 for m in msg:
398 398 print(m, end=' ')
399 399 print()
400 400 sys.stdout.flush()
401 401
402 402 def terminate(proc):
403 403 """Terminate subprocess (with fallback for Python versions < 2.6)"""
404 404 vlog('# Terminating process %d' % proc.pid)
405 405 try:
406 406 getattr(proc, 'terminate', lambda : os.kill(proc.pid, signal.SIGTERM))()
407 407 except OSError:
408 408 pass
409 409
410 410 def killdaemons(pidfile):
411 411 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
412 412 logfn=vlog)
413 413
414 414 class Test(unittest.TestCase):
415 415 """Encapsulates a single, runnable test.
416 416
417 417 While this class conforms to the unittest.TestCase API, it differs in that
418 418 instances need to be instantiated manually. (Typically, unittest.TestCase
419 419 classes are instantiated automatically by scanning modules.)
420 420 """
421 421
422 422 # Status code reserved for skipped tests (used by hghave).
423 423 SKIPPED_STATUS = 80
424 424
425 425 def __init__(self, path, tmpdir, keeptmpdir=False,
426 426 debug=False,
427 427 timeout=defaults['timeout'],
428 428 startport=defaults['port'], extraconfigopts=None,
429 429 py3kwarnings=False, shell=None):
430 430 """Create a test from parameters.
431 431
432 432 path is the full path to the file defining the test.
433 433
434 434 tmpdir is the main temporary directory to use for this test.
435 435
436 436 keeptmpdir determines whether to keep the test's temporary directory
437 437 after execution. It defaults to removal (False).
438 438
439 439 debug mode will make the test execute verbosely, with unfiltered
440 440 output.
441 441
442 442 timeout controls the maximum run time of the test. It is ignored when
443 443 debug is True.
444 444
445 445 startport controls the starting port number to use for this test. Each
446 446 test will reserve 3 port numbers for execution. It is the caller's
447 447 responsibility to allocate a non-overlapping port range to Test
448 448 instances.
449 449
450 450 extraconfigopts is an iterable of extra hgrc config options. Values
451 451 must have the form "key=value" (something understood by hgrc). Values
452 452 of the form "foo.key=value" will result in "[foo] key=value".
453 453
454 454 py3kwarnings enables Py3k warnings.
455 455
456 456 shell is the shell to execute tests in.
457 457 """
458 458 self.path = path
459 459 self.bname = os.path.basename(path)
460 460 self.name = _strpath(self.bname)
461 461 self._testdir = os.path.dirname(path)
462 462 self.errpath = os.path.join(self._testdir, b'%s.err' % self.bname)
463 463
464 464 self._threadtmp = tmpdir
465 465 self._keeptmpdir = keeptmpdir
466 466 self._debug = debug
467 467 self._timeout = timeout
468 468 self._startport = startport
469 469 self._extraconfigopts = extraconfigopts or []
470 470 self._py3kwarnings = py3kwarnings
471 471 self._shell = _bytespath(shell)
472 472
473 473 self._aborted = False
474 474 self._daemonpids = []
475 475 self._finished = None
476 476 self._ret = None
477 477 self._out = None
478 478 self._skipped = None
479 479 self._testtmp = None
480 480
481 481 # If we're not in --debug mode and reference output file exists,
482 482 # check test output against it.
483 483 if debug:
484 484 self._refout = None # to match "out is None"
485 485 elif os.path.exists(self.refpath):
486 486 f = open(self.refpath, 'rb')
487 487 self._refout = f.read().splitlines(True)
488 488 f.close()
489 489 else:
490 490 self._refout = []
491 491
492 492 # needed to get base class __repr__ running
493 493 @property
494 494 def _testMethodName(self):
495 495 return self.name
496 496
497 497 def __str__(self):
498 498 return self.name
499 499
500 500 def shortDescription(self):
501 501 return self.name
502 502
503 503 def setUp(self):
504 504 """Tasks to perform before run()."""
505 505 self._finished = False
506 506 self._ret = None
507 507 self._out = None
508 508 self._skipped = None
509 509
510 510 try:
511 511 os.mkdir(self._threadtmp)
512 512 except OSError as e:
513 513 if e.errno != errno.EEXIST:
514 514 raise
515 515
516 516 self._testtmp = os.path.join(self._threadtmp,
517 517 os.path.basename(self.path))
518 518 os.mkdir(self._testtmp)
519 519
520 520 # Remove any previous output files.
521 521 if os.path.exists(self.errpath):
522 522 try:
523 523 os.remove(self.errpath)
524 524 except OSError as e:
525 525 # We might have raced another test to clean up a .err
526 526 # file, so ignore ENOENT when removing a previous .err
527 527 # file.
528 528 if e.errno != errno.ENOENT:
529 529 raise
530 530
531 531 def run(self, result):
532 532 """Run this test and report results against a TestResult instance."""
533 533 # This function is extremely similar to unittest.TestCase.run(). Once
534 534 # we require Python 2.7 (or at least its version of unittest), this
535 535 # function can largely go away.
536 536 self._result = result
537 537 result.startTest(self)
538 538 try:
539 539 try:
540 540 self.setUp()
541 541 except (KeyboardInterrupt, SystemExit):
542 542 self._aborted = True
543 543 raise
544 544 except Exception:
545 545 result.addError(self, sys.exc_info())
546 546 return
547 547
548 548 success = False
549 549 try:
550 550 self.runTest()
551 551 except KeyboardInterrupt:
552 552 self._aborted = True
553 553 raise
554 554 except SkipTest as e:
555 555 result.addSkip(self, str(e))
556 556 # The base class will have already counted this as a
557 557 # test we "ran", but we want to exclude skipped tests
558 558 # from those we count towards those run.
559 559 result.testsRun -= 1
560 560 except IgnoreTest as e:
561 561 result.addIgnore(self, str(e))
562 562 # As with skips, ignores also should be excluded from
563 563 # the number of tests executed.
564 564 result.testsRun -= 1
565 565 except WarnTest as e:
566 566 result.addWarn(self, str(e))
567 567 except self.failureException as e:
568 568 # This differs from unittest in that we don't capture
569 569 # the stack trace. This is for historical reasons and
570 570 # this decision could be revisited in the future,
571 571 # especially for PythonTest instances.
572 572 if result.addFailure(self, str(e)):
573 573 success = True
574 574 except Exception:
575 575 result.addError(self, sys.exc_info())
576 576 else:
577 577 success = True
578 578
579 579 try:
580 580 self.tearDown()
581 581 except (KeyboardInterrupt, SystemExit):
582 582 self._aborted = True
583 583 raise
584 584 except Exception:
585 585 result.addError(self, sys.exc_info())
586 586 success = False
587 587
588 588 if success:
589 589 result.addSuccess(self)
590 590 finally:
591 591 result.stopTest(self, interrupted=self._aborted)
592 592
593 593 def runTest(self):
594 594 """Run this test instance.
595 595
596 596 This will return a tuple describing the result of the test.
597 597 """
598 598 env = self._getenv()
599 599 self._daemonpids.append(env['DAEMON_PIDS'])
600 600 self._createhgrc(env['HGRCPATH'])
601 601
602 602 vlog('# Test', self.name)
603 603
604 604 ret, out = self._run(env)
605 605 self._finished = True
606 606 self._ret = ret
607 607 self._out = out
608 608
609 609 def describe(ret):
610 610 if ret < 0:
611 611 return 'killed by signal: %d' % -ret
612 612 return 'returned error code %d' % ret
613 613
614 614 self._skipped = False
615 615
616 616 if ret == self.SKIPPED_STATUS:
617 617 if out is None: # Debug mode, nothing to parse.
618 618 missing = ['unknown']
619 619 failed = None
620 620 else:
621 621 missing, failed = TTest.parsehghaveoutput(out)
622 622
623 623 if not missing:
624 624 missing = ['skipped']
625 625
626 626 if failed:
627 627 self.fail('hg have failed checking for %s' % failed[-1])
628 628 else:
629 629 self._skipped = True
630 630 raise SkipTest(missing[-1])
631 631 elif ret == 'timeout':
632 632 self.fail('timed out')
633 633 elif ret is False:
634 634 raise WarnTest('no result code from test')
635 635 elif out != self._refout:
636 636 # Diff generation may rely on written .err file.
637 637 if (ret != 0 or out != self._refout) and not self._skipped \
638 638 and not self._debug:
639 639 f = open(self.errpath, 'wb')
640 640 for line in out:
641 641 f.write(line)
642 642 f.close()
643 643
644 644 # The result object handles diff calculation for us.
645 645 if self._result.addOutputMismatch(self, ret, out, self._refout):
646 646 # change was accepted, skip failing
647 647 return
648 648
649 649 if ret:
650 650 msg = 'output changed and ' + describe(ret)
651 651 else:
652 652 msg = 'output changed'
653 653
654 654 self.fail(msg)
655 655 elif ret:
656 656 self.fail(describe(ret))
657 657
658 658 def tearDown(self):
659 659 """Tasks to perform after run()."""
660 660 for entry in self._daemonpids:
661 661 killdaemons(entry)
662 662 self._daemonpids = []
663 663
664 664 if not self._keeptmpdir:
665 665 shutil.rmtree(self._testtmp, True)
666 666 shutil.rmtree(self._threadtmp, True)
667 667
668 668 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
669 669 and not self._debug and self._out:
670 670 f = open(self.errpath, 'wb')
671 671 for line in self._out:
672 672 f.write(line)
673 673 f.close()
674 674
675 675 vlog("# Ret was:", self._ret, '(%s)' % self.name)
676 676
677 677 def _run(self, env):
678 678 # This should be implemented in child classes to run tests.
679 679 raise SkipTest('unknown test type')
680 680
681 681 def abort(self):
682 682 """Terminate execution of this test."""
683 683 self._aborted = True
684 684
685 685 def _getreplacements(self):
686 686 """Obtain a mapping of text replacements to apply to test output.
687 687
688 688 Test output needs to be normalized so it can be compared to expected
689 689 output. This function defines how some of that normalization will
690 690 occur.
691 691 """
692 692 r = [
693 693 (br':%d\b' % self._startport, b':$HGPORT'),
694 694 (br':%d\b' % (self._startport + 1), b':$HGPORT1'),
695 695 (br':%d\b' % (self._startport + 2), b':$HGPORT2'),
696 696 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
697 697 br'\1 (glob)'),
698 698 ]
699 699
700 700 if os.name == 'nt':
701 701 r.append(
702 702 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
703 703 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
704 704 for c in self._testtmp), b'$TESTTMP'))
705 705 else:
706 706 r.append((re.escape(self._testtmp), b'$TESTTMP'))
707 707
708 708 return r
709 709
710 710 def _getenv(self):
711 711 """Obtain environment variables to use during test execution."""
712 712 env = os.environ.copy()
713 713 env['TESTTMP'] = self._testtmp
714 714 env['HOME'] = self._testtmp
715 715 env["HGPORT"] = str(self._startport)
716 716 env["HGPORT1"] = str(self._startport + 1)
717 717 env["HGPORT2"] = str(self._startport + 2)
718 718 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
719 719 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
720 720 env["HGEDITOR"] = ('"' + sys.executable + '"'
721 721 + ' -c "import sys; sys.exit(0)"')
722 722 env["HGMERGE"] = "internal:merge"
723 723 env["HGUSER"] = "test"
724 724 env["HGENCODING"] = "ascii"
725 725 env["HGENCODINGMODE"] = "strict"
726 726
727 727 # Reset some environment variables to well-known values so that
728 728 # the tests produce repeatable output.
729 729 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
730 730 env['TZ'] = 'GMT'
731 731 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
732 732 env['COLUMNS'] = '80'
733 733 env['TERM'] = 'xterm'
734 734
735 735 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
736 736 'NO_PROXY').split():
737 737 if k in env:
738 738 del env[k]
739 739
740 740 # unset env related to hooks
741 741 for k in env.keys():
742 742 if k.startswith('HG_'):
743 743 del env[k]
744 744
745 745 return env
746 746
747 747 def _createhgrc(self, path):
748 748 """Create an hgrc file for this test."""
749 749 hgrc = open(path, 'wb')
750 750 hgrc.write(b'[ui]\n')
751 751 hgrc.write(b'slash = True\n')
752 752 hgrc.write(b'interactive = False\n')
753 753 hgrc.write(b'mergemarkers = detailed\n')
754 754 hgrc.write(b'promptecho = True\n')
755 755 hgrc.write(b'[defaults]\n')
756 756 hgrc.write(b'backout = -d "0 0"\n')
757 757 hgrc.write(b'commit = -d "0 0"\n')
758 758 hgrc.write(b'shelve = --date "0 0"\n')
759 759 hgrc.write(b'tag = -d "0 0"\n')
760 760 hgrc.write(b'[devel]\n')
761 761 hgrc.write(b'all-warnings = true\n')
762 762 hgrc.write(b'[largefiles]\n')
763 763 hgrc.write(b'usercache = %s\n' %
764 764 (os.path.join(self._testtmp, b'.cache/largefiles')))
765 765
766 766 for opt in self._extraconfigopts:
767 767 section, key = opt.split('.', 1)
768 768 assert '=' in key, ('extra config opt %s must '
769 769 'have an = for assignment' % opt)
770 770 hgrc.write(b'[%s]\n%s\n' % (section, key))
771 771 hgrc.close()
772 772
773 773 def fail(self, msg):
774 774 # unittest differentiates between errored and failed.
775 775 # Failed is denoted by AssertionError (by default at least).
776 776 raise AssertionError(msg)
777 777
778 778 def _runcommand(self, cmd, env, normalizenewlines=False):
779 779 """Run command in a sub-process, capturing the output (stdout and
780 780 stderr).
781 781
782 782 Return a tuple (exitcode, output). output is None in debug mode.
783 783 """
784 784 if self._debug:
785 785 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
786 786 env=env)
787 787 ret = proc.wait()
788 788 return (ret, None)
789 789
790 790 proc = Popen4(cmd, self._testtmp, self._timeout, env)
791 791 def cleanup():
792 792 terminate(proc)
793 793 ret = proc.wait()
794 794 if ret == 0:
795 795 ret = signal.SIGTERM << 8
796 796 killdaemons(env['DAEMON_PIDS'])
797 797 return ret
798 798
799 799 output = ''
800 800 proc.tochild.close()
801 801
802 802 try:
803 803 output = proc.fromchild.read()
804 804 except KeyboardInterrupt:
805 805 vlog('# Handling keyboard interrupt')
806 806 cleanup()
807 807 raise
808 808
809 809 ret = proc.wait()
810 810 if wifexited(ret):
811 811 ret = os.WEXITSTATUS(ret)
812 812
813 813 if proc.timeout:
814 814 ret = 'timeout'
815 815
816 816 if ret:
817 817 killdaemons(env['DAEMON_PIDS'])
818 818
819 819 for s, r in self._getreplacements():
820 820 output = re.sub(s, r, output)
821 821
822 822 if normalizenewlines:
823 823 output = output.replace('\r\n', '\n')
824 824
825 825 return ret, output.splitlines(True)
826 826
827 827 class PythonTest(Test):
828 828 """A Python-based test."""
829 829
830 830 @property
831 831 def refpath(self):
832 832 return os.path.join(self._testdir, b'%s.out' % self.bname)
833 833
834 834 def _run(self, env):
835 835 py3kswitch = self._py3kwarnings and b' -3' or b''
836 836 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
837 837 vlog("# Running", cmd)
838 838 normalizenewlines = os.name == 'nt'
839 839 result = self._runcommand(cmd, env,
840 840 normalizenewlines=normalizenewlines)
841 841 if self._aborted:
842 842 raise KeyboardInterrupt()
843 843
844 844 return result
845 845
846 846 # This script may want to drop globs from lines matching these patterns on
847 847 # Windows, but check-code.py wants a glob on these lines unconditionally. Don't
848 848 # warn if that is the case for anything matching these lines.
849 849 checkcodeglobpats = [
850 850 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
851 851 re.compile(br'^moving \S+/.*[^)]$'),
852 852 re.compile(br'^pulling from \$TESTTMP/.*[^)]$')
853 853 ]
854 854
855 855 bchr = chr
856 856 if PYTHON3:
857 857 bchr = lambda x: bytes([x])
858 858
859 859 class TTest(Test):
860 860 """A "t test" is a test backed by a .t file."""
861 861
862 862 SKIPPED_PREFIX = 'skipped: '
863 863 FAILED_PREFIX = 'hghave check failed: '
864 864 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
865 865
866 866 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
867 867 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
868 868 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
869 869
870 870 @property
871 871 def refpath(self):
872 872 return os.path.join(self._testdir, self.bname)
873 873
874 874 def _run(self, env):
875 875 f = open(self.path, 'rb')
876 876 lines = f.readlines()
877 877 f.close()
878 878
879 879 salt, script, after, expected = self._parsetest(lines)
880 880
881 881 # Write out the generated script.
882 882 fname = b'%s.sh' % self._testtmp
883 883 f = open(fname, 'wb')
884 884 for l in script:
885 885 f.write(l)
886 886 f.close()
887 887
888 888 cmd = b'%s "%s"' % (self._shell, fname)
889 889 vlog("# Running", cmd)
890 890
891 891 exitcode, output = self._runcommand(cmd, env)
892 892
893 893 if self._aborted:
894 894 raise KeyboardInterrupt()
895 895
896 896 # Do not merge output if skipped. Return hghave message instead.
897 897 # Similarly, with --debug, output is None.
898 898 if exitcode == self.SKIPPED_STATUS or output is None:
899 899 return exitcode, output
900 900
901 901 return self._processoutput(exitcode, output, salt, after, expected)
902 902
903 903 def _hghave(self, reqs):
904 904 # TODO do something smarter when all other uses of hghave are gone.
905 905 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
906 906 tdir = runtestdir.replace(b'\\', b'/')
907 907 proc = Popen4(b'%s -c "%s/hghave %s"' %
908 908 (self._shell, tdir, b' '.join(reqs)),
909 909 self._testtmp, 0, self._getenv())
910 910 stdout, stderr = proc.communicate()
911 911 ret = proc.wait()
912 912 if wifexited(ret):
913 913 ret = os.WEXITSTATUS(ret)
914 914 if ret == 2:
915 915 print(stdout)
916 916 sys.exit(1)
917 917
918 918 return ret == 0
919 919
920 920 def _parsetest(self, lines):
921 921 # We generate a shell script which outputs unique markers to line
922 922 # up script results with our source. These markers include input
923 923 # line number and the last return code.
924 924 salt = b"SALT%d" % time.time()
925 925 def addsalt(line, inpython):
926 926 if inpython:
927 927 script.append(b'%s %d 0\n' % (salt, line))
928 928 else:
929 929 script.append(b'echo %s %d $?\n' % (salt, line))
930 930
931 931 script = []
932 932
933 933 # After we run the shell script, we re-unify the script output
934 934 # with non-active parts of the source, with synchronization by our
935 935 # SALT line number markers. The after table contains the non-active
936 936 # components, ordered by line number.
937 937 after = {}
938 938
939 939 # Expected shell script output.
940 940 expected = {}
941 941
942 942 pos = prepos = -1
943 943
944 944 # True or False when in a true or false conditional section
945 945 skipping = None
946 946
947 947 # We keep track of whether or not we're in a Python block so we
948 948 # can generate the surrounding doctest magic.
949 949 inpython = False
950 950
951 951 if self._debug:
952 952 script.append(b'set -x\n')
953 953 if os.getenv('MSYSTEM'):
954 954 script.append(b'alias pwd="pwd -W"\n')
955 955
956 956 for n, l in enumerate(lines):
957 957 if not l.endswith(b'\n'):
958 958 l += b'\n'
959 959 if l.startswith(b'#require'):
960 960 lsplit = l.split()
961 961 if len(lsplit) < 2 or lsplit[0] != b'#require':
962 962 after.setdefault(pos, []).append(' !!! invalid #require\n')
963 963 if not self._hghave(lsplit[1:]):
964 964 script = [b"exit 80\n"]
965 965 break
966 966 after.setdefault(pos, []).append(l)
967 967 elif l.startswith(b'#if'):
968 968 lsplit = l.split()
969 969 if len(lsplit) < 2 or lsplit[0] != b'#if':
970 970 after.setdefault(pos, []).append(' !!! invalid #if\n')
971 971 if skipping is not None:
972 972 after.setdefault(pos, []).append(' !!! nested #if\n')
973 973 skipping = not self._hghave(lsplit[1:])
974 974 after.setdefault(pos, []).append(l)
975 975 elif l.startswith(b'#else'):
976 976 if skipping is None:
977 977 after.setdefault(pos, []).append(' !!! missing #if\n')
978 978 skipping = not skipping
979 979 after.setdefault(pos, []).append(l)
980 980 elif l.startswith(b'#endif'):
981 981 if skipping is None:
982 982 after.setdefault(pos, []).append(' !!! missing #if\n')
983 983 skipping = None
984 984 after.setdefault(pos, []).append(l)
985 985 elif skipping:
986 986 after.setdefault(pos, []).append(l)
987 987 elif l.startswith(b' >>> '): # python inlines
988 988 after.setdefault(pos, []).append(l)
989 989 prepos = pos
990 990 pos = n
991 991 if not inpython:
992 992 # We've just entered a Python block. Add the header.
993 993 inpython = True
994 994 addsalt(prepos, False) # Make sure we report the exit code.
995 995 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
996 996 addsalt(n, True)
997 997 script.append(l[2:])
998 998 elif l.startswith(b' ... '): # python inlines
999 999 after.setdefault(prepos, []).append(l)
1000 1000 script.append(l[2:])
1001 1001 elif l.startswith(b' $ '): # commands
1002 1002 if inpython:
1003 1003 script.append(b'EOF\n')
1004 1004 inpython = False
1005 1005 after.setdefault(pos, []).append(l)
1006 1006 prepos = pos
1007 1007 pos = n
1008 1008 addsalt(n, False)
1009 1009 cmd = l[4:].split()
1010 1010 if len(cmd) == 2 and cmd[0] == b'cd':
1011 1011 l = b' $ cd %s || exit 1\n' % cmd[1]
1012 1012 script.append(l[4:])
1013 1013 elif l.startswith(b' > '): # continuations
1014 1014 after.setdefault(prepos, []).append(l)
1015 1015 script.append(l[4:])
1016 1016 elif l.startswith(b' '): # results
1017 1017 # Queue up a list of expected results.
1018 1018 expected.setdefault(pos, []).append(l[2:])
1019 1019 else:
1020 1020 if inpython:
1021 1021 script.append(b'EOF\n')
1022 1022 inpython = False
1023 1023 # Non-command/result. Queue up for merged output.
1024 1024 after.setdefault(pos, []).append(l)
1025 1025
1026 1026 if inpython:
1027 1027 script.append(b'EOF\n')
1028 1028 if skipping is not None:
1029 1029 after.setdefault(pos, []).append(' !!! missing #endif\n')
1030 1030 addsalt(n + 1, False)
1031 1031
1032 1032 return salt, script, after, expected
1033 1033
1034 1034 def _processoutput(self, exitcode, output, salt, after, expected):
1035 1035 # Merge the script output back into a unified test.
1036 1036 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1037 1037 if exitcode != 0:
1038 1038 warnonly = 3
1039 1039
1040 1040 pos = -1
1041 1041 postout = []
1042 1042 for l in output:
1043 1043 lout, lcmd = l, None
1044 1044 if salt in l:
1045 1045 lout, lcmd = l.split(salt, 1)
1046 1046
1047 1047 while lout:
1048 1048 if not lout.endswith(b'\n'):
1049 1049 lout += b' (no-eol)\n'
1050 1050
1051 1051 # Find the expected output at the current position.
1052 1052 el = None
1053 1053 if expected.get(pos, None):
1054 1054 el = expected[pos].pop(0)
1055 1055
1056 1056 r = TTest.linematch(el, lout)
1057 1057 if isinstance(r, str):
1058 1058 if r == '+glob':
1059 1059 lout = el[:-1] + ' (glob)\n'
1060 1060 r = '' # Warn only this line.
1061 1061 elif r == '-glob':
1062 1062 lout = ''.join(el.rsplit(' (glob)', 1))
1063 1063 r = '' # Warn only this line.
1064 1064 elif r == "retry":
1065 1065 postout.append(b' ' + el)
1066 1066 continue
1067 1067 else:
1068 1068 log('\ninfo, unknown linematch result: %r\n' % r)
1069 1069 r = False
1070 1070 if r:
1071 1071 postout.append(b' ' + el)
1072 1072 else:
1073 1073 if self.NEEDESCAPE(lout):
1074 1074 lout = TTest._stringescape(b'%s (esc)\n' %
1075 1075 lout.rstrip(b'\n'))
1076 1076 postout.append(b' ' + lout) # Let diff deal with it.
1077 1077 if r != '': # If line failed.
1078 1078 warnonly = 3 # for sure not
1079 1079 elif warnonly == 1: # Is "not yet" and line is warn only.
1080 1080 warnonly = 2 # Yes do warn.
1081 1081 break
1082 1082
1083 1083 # clean up any optional leftovers
1084 1084 while expected.get(pos, None):
1085 1085 el = expected[pos].pop(0)
1086 1086 if not el.endswith(" (?)\n"):
1087 1087 expected[pos].insert(0, el)
1088 1088 break
1089 1089 postout.append(b' ' + el)
1090 1090
1091 1091 if lcmd:
1092 1092 # Add on last return code.
1093 1093 ret = int(lcmd.split()[1])
1094 1094 if ret != 0:
1095 1095 postout.append(b' [%d]\n' % ret)
1096 1096 if pos in after:
1097 1097 # Merge in non-active test bits.
1098 1098 postout += after.pop(pos)
1099 1099 pos = int(lcmd.split()[0])
1100 1100
1101 1101 if pos in after:
1102 1102 postout += after.pop(pos)
1103 1103
1104 1104 if warnonly == 2:
1105 1105 exitcode = False # Set exitcode to warned.
1106 1106
1107 1107 return exitcode, postout
1108 1108
1109 1109 @staticmethod
1110 1110 def rematch(el, l):
1111 1111 try:
1112 1112 # use \Z to ensure that the regex matches to the end of the string
1113 1113 if os.name == 'nt':
1114 1114 return re.match(el + br'\r?\n\Z', l)
1115 1115 return re.match(el + br'\n\Z', l)
1116 1116 except re.error:
1117 1117 # el is an invalid regex
1118 1118 return False
1119 1119
1120 1120 @staticmethod
1121 1121 def globmatch(el, l):
1122 1122 # The only supported special characters are * and ? plus / which also
1123 1123 # matches \ on windows. Escaping of these characters is supported.
1124 1124 if el + b'\n' == l:
1125 1125 if os.altsep:
1126 1126 # matching on "/" is not needed for this line
1127 1127 for pat in checkcodeglobpats:
1128 1128 if pat.match(el):
1129 1129 return True
1130 1130 return b'-glob'
1131 1131 return True
1132 1132 i, n = 0, len(el)
1133 1133 res = b''
1134 1134 while i < n:
1135 1135 c = el[i:i + 1]
1136 1136 i += 1
1137 1137 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1138 1138 res += el[i - 1:i + 1]
1139 1139 i += 1
1140 1140 elif c == b'*':
1141 1141 res += b'.*'
1142 1142 elif c == b'?':
1143 1143 res += b'.'
1144 1144 elif c == b'/' and os.altsep:
1145 1145 res += b'[/\\\\]'
1146 1146 else:
1147 1147 res += re.escape(c)
1148 1148 return TTest.rematch(res, l)
1149 1149
1150 1150 @staticmethod
1151 1151 def linematch(el, l):
1152 1152 retry = False
1153 1153 if el == l: # perfect match (fast)
1154 1154 return True
1155 1155 if el:
1156 1156 if el.endswith(" (?)\n"):
1157 1157 retry = "retry"
1158 1158 el = el[:-5] + "\n"
1159 1159 if el.endswith(b" (esc)\n"):
1160 1160 if PYTHON3:
1161 1161 el = el[:-7].decode('unicode_escape') + '\n'
1162 1162 el = el.encode('utf-8')
1163 1163 else:
1164 1164 el = el[:-7].decode('string-escape') + '\n'
1165 1165 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1166 1166 return True
1167 1167 if el.endswith(b" (re)\n"):
1168 1168 return TTest.rematch(el[:-6], l) or retry
1169 1169 if el.endswith(b" (glob)\n"):
1170 1170 # ignore '(glob)' added to l by 'replacements'
1171 1171 if l.endswith(b" (glob)\n"):
1172 1172 l = l[:-8] + b"\n"
1173 1173 return TTest.globmatch(el[:-8], l)
1174 1174 if os.altsep and l.replace(b'\\', b'/') == el:
1175 1175 return b'+glob'
1176 1176 return retry
1177 1177
1178 1178 @staticmethod
1179 1179 def parsehghaveoutput(lines):
1180 1180 '''Parse hghave log lines.
1181 1181
1182 1182 Return tuple of lists (missing, failed):
1183 1183 * the missing/unknown features
1184 1184 * the features for which existence check failed'''
1185 1185 missing = []
1186 1186 failed = []
1187 1187 for line in lines:
1188 1188 if line.startswith(TTest.SKIPPED_PREFIX):
1189 1189 line = line.splitlines()[0]
1190 1190 missing.append(line[len(TTest.SKIPPED_PREFIX):])
1191 1191 elif line.startswith(TTest.FAILED_PREFIX):
1192 1192 line = line.splitlines()[0]
1193 1193 failed.append(line[len(TTest.FAILED_PREFIX):])
1194 1194
1195 1195 return missing, failed
1196 1196
1197 1197 @staticmethod
1198 1198 def _escapef(m):
1199 1199 return TTest.ESCAPEMAP[m.group(0)]
1200 1200
1201 1201 @staticmethod
1202 1202 def _stringescape(s):
1203 1203 return TTest.ESCAPESUB(TTest._escapef, s)
1204 1204
1205 1205 iolock = threading.RLock()
1206 1206
1207 1207 class SkipTest(Exception):
1208 1208 """Raised to indicate that a test is to be skipped."""
1209 1209
1210 1210 class IgnoreTest(Exception):
1211 1211 """Raised to indicate that a test is to be ignored."""
1212 1212
1213 1213 class WarnTest(Exception):
1214 1214 """Raised to indicate that a test warned."""
1215 1215
1216 1216 class TestResult(unittest._TextTestResult):
1217 1217 """Holds results when executing via unittest."""
1218 1218 # Don't worry too much about accessing the non-public _TextTestResult.
1219 1219 # It is relatively common in Python testing tools.
1220 1220 def __init__(self, options, *args, **kwargs):
1221 1221 super(TestResult, self).__init__(*args, **kwargs)
1222 1222
1223 1223 self._options = options
1224 1224
1225 1225 # unittest.TestResult didn't have skipped until 2.7. We need to
1226 1226 # polyfill it.
1227 1227 self.skipped = []
1228 1228
1229 1229 # We have a custom "ignored" result that isn't present in any Python
1230 1230 # unittest implementation. It is very similar to skipped. It may make
1231 1231 # sense to map it into skip some day.
1232 1232 self.ignored = []
1233 1233
1234 1234 # We have a custom "warned" result that isn't present in any Python
1235 1235 # unittest implementation. It is very similar to failed. It may make
1236 1236 # sense to map it into fail some day.
1237 1237 self.warned = []
1238 1238
1239 1239 self.times = []
1240 1240 self._firststarttime = None
1241 1241 # Data stored for the benefit of generating xunit reports.
1242 1242 self.successes = []
1243 1243 self.faildata = {}
1244 1244
1245 1245 def addFailure(self, test, reason):
1246 1246 self.failures.append((test, reason))
1247 1247
1248 1248 if self._options.first:
1249 1249 self.stop()
1250 1250 else:
1251 1251 with iolock:
1252 1252 if not self._options.nodiff:
1253 1253 self.stream.write('\nERROR: %s output changed\n' % test)
1254 1254
1255 1255 self.stream.write('!')
1256 1256 self.stream.flush()
1257 1257
1258 1258 def addSuccess(self, test):
1259 1259 with iolock:
1260 1260 super(TestResult, self).addSuccess(test)
1261 1261 self.successes.append(test)
1262 1262
1263 1263 def addError(self, test, err):
1264 1264 super(TestResult, self).addError(test, err)
1265 1265 if self._options.first:
1266 1266 self.stop()
1267 1267
1268 1268 # Polyfill.
1269 1269 def addSkip(self, test, reason):
1270 1270 self.skipped.append((test, reason))
1271 1271 with iolock:
1272 1272 if self.showAll:
1273 1273 self.stream.writeln('skipped %s' % reason)
1274 1274 else:
1275 1275 self.stream.write('s')
1276 1276 self.stream.flush()
1277 1277
1278 1278 def addIgnore(self, test, reason):
1279 1279 self.ignored.append((test, reason))
1280 1280 with iolock:
1281 1281 if self.showAll:
1282 1282 self.stream.writeln('ignored %s' % reason)
1283 1283 else:
1284 1284 if reason not in ('not retesting', "doesn't match keyword"):
1285 1285 self.stream.write('i')
1286 1286 else:
1287 1287 self.testsRun += 1
1288 1288 self.stream.flush()
1289 1289
1290 1290 def addWarn(self, test, reason):
1291 1291 self.warned.append((test, reason))
1292 1292
1293 1293 if self._options.first:
1294 1294 self.stop()
1295 1295
1296 1296 with iolock:
1297 1297 if self.showAll:
1298 1298 self.stream.writeln('warned %s' % reason)
1299 1299 else:
1300 1300 self.stream.write('~')
1301 1301 self.stream.flush()
1302 1302
1303 1303 def addOutputMismatch(self, test, ret, got, expected):
1304 1304 """Record a mismatch in test output for a particular test."""
1305 1305 if self.shouldStop:
1306 1306 # don't print, some other test case already failed and
1307 1307 # printed, we're just stale and probably failed due to our
1308 1308 # temp dir getting cleaned up.
1309 1309 return
1310 1310
1311 1311 accepted = False
1312 1312 failed = False
1313 1313 lines = []
1314 1314
1315 1315 with iolock:
1316 1316 if self._options.nodiff:
1317 1317 pass
1318 1318 elif self._options.view:
1319 1319 v = self._options.view
1320 1320 if PYTHON3:
1321 1321 v = _bytespath(v)
1322 1322 os.system(b"%s %s %s" %
1323 1323 (v, test.refpath, test.errpath))
1324 1324 else:
1325 1325 servefail, lines = getdiff(expected, got,
1326 1326 test.refpath, test.errpath)
1327 1327 if servefail:
1328 1328 self.addFailure(
1329 1329 test,
1330 1330 'server failed to start (HGPORT=%s)' % test._startport)
1331 1331 else:
1332 1332 self.stream.write('\n')
1333 1333 for line in lines:
1334 1334 if PYTHON3:
1335 1335 self.stream.flush()
1336 1336 self.stream.buffer.write(line)
1337 1337 self.stream.buffer.flush()
1338 1338 else:
1339 1339 self.stream.write(line)
1340 1340 self.stream.flush()
1341 1341
1342 1342 # handle interactive prompt without releasing iolock
1343 1343 if self._options.interactive:
1344 1344 self.stream.write('Accept this change? [n] ')
1345 1345 answer = sys.stdin.readline().strip()
1346 1346 if answer.lower() in ('y', 'yes'):
1347 1347 if test.name.endswith('.t'):
1348 1348 rename(test.errpath, test.path)
1349 1349 else:
1350 1350 rename(test.errpath, '%s.out' % test.path)
1351 1351 accepted = True
1352 1352 if not accepted and not failed:
1353 1353 self.faildata[test.name] = b''.join(lines)
1354 1354
1355 1355 return accepted
1356 1356
1357 1357 def startTest(self, test):
1358 1358 super(TestResult, self).startTest(test)
1359 1359
1360 1360 # os.times module computes the user time and system time spent by
1361 1361 # child's processes along with real elapsed time taken by a process.
1362 1362 # This module has one limitation. It can only work for Linux user
1363 1363 # and not for Windows.
1364 1364 test.started = os.times()
1365 1365 if self._firststarttime is None: # thread racy but irrelevant
1366 1366 self._firststarttime = test.started[4]
1367 1367
1368 1368 def stopTest(self, test, interrupted=False):
1369 1369 super(TestResult, self).stopTest(test)
1370 1370
1371 1371 test.stopped = os.times()
1372 1372
1373 1373 starttime = test.started
1374 1374 endtime = test.stopped
1375 1375 origin = self._firststarttime
1376 1376 self.times.append((test.name,
1377 1377 endtime[2] - starttime[2], # user space CPU time
1378 1378 endtime[3] - starttime[3], # sys space CPU time
1379 1379 endtime[4] - starttime[4], # real time
1380 1380 starttime[4] - origin, # start date in run context
1381 1381 endtime[4] - origin, # end date in run context
1382 1382 ))
1383 1383
1384 1384 if interrupted:
1385 1385 with iolock:
1386 1386 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1387 1387 test.name, self.times[-1][3]))
1388 1388
1389 1389 class TestSuite(unittest.TestSuite):
1390 1390 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1391 1391
1392 1392 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1393 1393 retest=False, keywords=None, loop=False, runs_per_test=1,
1394 1394 loadtest=None,
1395 1395 *args, **kwargs):
1396 1396 """Create a new instance that can run tests with a configuration.
1397 1397
1398 1398 testdir specifies the directory where tests are executed from. This
1399 1399 is typically the ``tests`` directory from Mercurial's source
1400 1400 repository.
1401 1401
1402 1402 jobs specifies the number of jobs to run concurrently. Each test
1403 1403 executes on its own thread. Tests actually spawn new processes, so
1404 1404 state mutation should not be an issue.
1405 1405
1406 1406 whitelist and blacklist denote tests that have been whitelisted and
1407 1407 blacklisted, respectively. These arguments don't belong in TestSuite.
1408 1408 Instead, whitelist and blacklist should be handled by the thing that
1409 1409 populates the TestSuite with tests. They are present to preserve
1410 1410 backwards compatible behavior which reports skipped tests as part
1411 1411 of the results.
1412 1412
1413 1413 retest denotes whether to retest failed tests. This arguably belongs
1414 1414 outside of TestSuite.
1415 1415
1416 1416 keywords denotes key words that will be used to filter which tests
1417 1417 to execute. This arguably belongs outside of TestSuite.
1418 1418
1419 1419 loop denotes whether to loop over tests forever.
1420 1420 """
1421 1421 super(TestSuite, self).__init__(*args, **kwargs)
1422 1422
1423 1423 self._jobs = jobs
1424 1424 self._whitelist = whitelist
1425 1425 self._blacklist = blacklist
1426 1426 self._retest = retest
1427 1427 self._keywords = keywords
1428 1428 self._loop = loop
1429 1429 self._runs_per_test = runs_per_test
1430 1430 self._loadtest = loadtest
1431 1431
1432 1432 def run(self, result):
1433 1433 # We have a number of filters that need to be applied. We do this
1434 1434 # here instead of inside Test because it makes the running logic for
1435 1435 # Test simpler.
1436 1436 tests = []
1437 1437 num_tests = [0]
1438 1438 for test in self._tests:
1439 1439 def get():
1440 1440 num_tests[0] += 1
1441 1441 if getattr(test, 'should_reload', False):
1442 1442 return self._loadtest(test.bname, num_tests[0])
1443 1443 return test
1444 1444 if not os.path.exists(test.path):
1445 1445 result.addSkip(test, "Doesn't exist")
1446 1446 continue
1447 1447
1448 1448 if not (self._whitelist and test.name in self._whitelist):
1449 1449 if self._blacklist and test.bname in self._blacklist:
1450 1450 result.addSkip(test, 'blacklisted')
1451 1451 continue
1452 1452
1453 1453 if self._retest and not os.path.exists(test.errpath):
1454 1454 result.addIgnore(test, 'not retesting')
1455 1455 continue
1456 1456
1457 1457 if self._keywords:
1458 1458 f = open(test.path, 'rb')
1459 1459 t = f.read().lower() + test.bname.lower()
1460 1460 f.close()
1461 1461 ignored = False
1462 1462 for k in self._keywords.lower().split():
1463 1463 if k not in t:
1464 1464 result.addIgnore(test, "doesn't match keyword")
1465 1465 ignored = True
1466 1466 break
1467 1467
1468 1468 if ignored:
1469 1469 continue
1470 1470 for _ in xrange(self._runs_per_test):
1471 1471 tests.append(get())
1472 1472
1473 1473 runtests = list(tests)
1474 1474 done = queue.Queue()
1475 1475 running = 0
1476 1476
1477 1477 def job(test, result):
1478 1478 try:
1479 1479 test(result)
1480 1480 done.put(None)
1481 1481 except KeyboardInterrupt:
1482 1482 pass
1483 1483 except: # re-raises
1484 1484 done.put(('!', test, 'run-test raised an error, see traceback'))
1485 1485 raise
1486 1486
1487 1487 stoppedearly = False
1488 1488
1489 1489 try:
1490 1490 while tests or running:
1491 1491 if not done.empty() or running == self._jobs or not tests:
1492 1492 try:
1493 1493 done.get(True, 1)
1494 1494 running -= 1
1495 1495 if result and result.shouldStop:
1496 1496 stoppedearly = True
1497 1497 break
1498 1498 except queue.Empty:
1499 1499 continue
1500 1500 if tests and not running == self._jobs:
1501 1501 test = tests.pop(0)
1502 1502 if self._loop:
1503 1503 if getattr(test, 'should_reload', False):
1504 1504 num_tests[0] += 1
1505 1505 tests.append(
1506 1506 self._loadtest(test.name, num_tests[0]))
1507 1507 else:
1508 1508 tests.append(test)
1509 1509 t = threading.Thread(target=job, name=test.name,
1510 1510 args=(test, result))
1511 1511 t.start()
1512 1512 running += 1
1513 1513
1514 1514 # If we stop early we still need to wait on started tests to
1515 1515 # finish. Otherwise, there is a race between the test completing
1516 1516 # and the test's cleanup code running. This could result in the
1517 1517 # test reporting incorrect.
1518 1518 if stoppedearly:
1519 1519 while running:
1520 1520 try:
1521 1521 done.get(True, 1)
1522 1522 running -= 1
1523 1523 except queue.Empty:
1524 1524 continue
1525 1525 except KeyboardInterrupt:
1526 1526 for test in runtests:
1527 1527 test.abort()
1528 1528
1529 1529 return result
1530 1530
1531 1531 class TextTestRunner(unittest.TextTestRunner):
1532 1532 """Custom unittest test runner that uses appropriate settings."""
1533 1533
1534 1534 def __init__(self, runner, *args, **kwargs):
1535 1535 super(TextTestRunner, self).__init__(*args, **kwargs)
1536 1536
1537 1537 self._runner = runner
1538 1538
1539 1539 def run(self, test):
1540 1540 result = TestResult(self._runner.options, self.stream,
1541 1541 self.descriptions, self.verbosity)
1542 1542
1543 1543 test(result)
1544 1544
1545 1545 failed = len(result.failures)
1546 1546 warned = len(result.warned)
1547 1547 skipped = len(result.skipped)
1548 1548 ignored = len(result.ignored)
1549 1549
1550 1550 with iolock:
1551 1551 self.stream.writeln('')
1552 1552
1553 1553 if not self._runner.options.noskips:
1554 1554 for test, msg in result.skipped:
1555 1555 self.stream.writeln('Skipped %s: %s' % (test.name, msg))
1556 1556 for test, msg in result.warned:
1557 1557 self.stream.writeln('Warned %s: %s' % (test.name, msg))
1558 1558 for test, msg in result.failures:
1559 1559 self.stream.writeln('Failed %s: %s' % (test.name, msg))
1560 1560 for test, msg in result.errors:
1561 1561 self.stream.writeln('Errored %s: %s' % (test.name, msg))
1562 1562
1563 1563 if self._runner.options.xunit:
1564 1564 xuf = open(self._runner.options.xunit, 'wb')
1565 1565 try:
1566 1566 timesd = dict((t[0], t[3]) for t in result.times)
1567 1567 doc = minidom.Document()
1568 1568 s = doc.createElement('testsuite')
1569 1569 s.setAttribute('name', 'run-tests')
1570 1570 s.setAttribute('tests', str(result.testsRun))
1571 1571 s.setAttribute('errors', "0") # TODO
1572 1572 s.setAttribute('failures', str(failed))
1573 1573 s.setAttribute('skipped', str(skipped + ignored))
1574 1574 doc.appendChild(s)
1575 1575 for tc in result.successes:
1576 1576 t = doc.createElement('testcase')
1577 1577 t.setAttribute('name', tc.name)
1578 1578 t.setAttribute('time', '%.3f' % timesd[tc.name])
1579 1579 s.appendChild(t)
1580 1580 for tc, err in sorted(result.faildata.items()):
1581 1581 t = doc.createElement('testcase')
1582 1582 t.setAttribute('name', tc)
1583 1583 t.setAttribute('time', '%.3f' % timesd[tc])
1584 1584 # createCDATASection expects a unicode or it will
1585 1585 # convert using default conversion rules, which will
1586 1586 # fail if string isn't ASCII.
1587 1587 err = cdatasafe(err).decode('utf-8', 'replace')
1588 1588 cd = doc.createCDATASection(err)
1589 1589 t.appendChild(cd)
1590 1590 s.appendChild(t)
1591 1591 xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
1592 1592 finally:
1593 1593 xuf.close()
1594 1594
1595 1595 if self._runner.options.json:
1596 1596 if json is None:
1597 1597 raise ImportError("json module not installed")
1598 1598 jsonpath = os.path.join(self._runner._testdir, 'report.json')
1599 1599 fp = open(jsonpath, 'w')
1600 1600 try:
1601 1601 timesd = {}
1602 1602 for tdata in result.times:
1603 1603 test = tdata[0]
1604 1604 timesd[test] = tdata[1:]
1605 1605
1606 1606 outcome = {}
1607 1607 groups = [('success', ((tc, None)
1608 1608 for tc in result.successes)),
1609 1609 ('failure', result.failures),
1610 1610 ('skip', result.skipped)]
1611 1611 for res, testcases in groups:
1612 1612 for tc, __ in testcases:
1613 1613 tres = {'result': res,
1614 1614 'time': ('%0.3f' % timesd[tc.name][2]),
1615 1615 'cuser': ('%0.3f' % timesd[tc.name][0]),
1616 1616 'csys': ('%0.3f' % timesd[tc.name][1]),
1617 1617 'start': ('%0.3f' % timesd[tc.name][3]),
1618 1618 'end': ('%0.3f' % timesd[tc.name][4])}
1619 1619 outcome[tc.name] = tres
1620 1620 jsonout = json.dumps(outcome, sort_keys=True, indent=4)
1621 1621 fp.writelines(("testreport =", jsonout))
1622 1622 finally:
1623 1623 fp.close()
1624 1624
1625 1625 self._runner._checkhglib('Tested')
1626 1626
1627 1627 self.stream.writeln(
1628 1628 '# Ran %d tests, %d skipped, %d warned, %d failed.'
1629 1629 % (result.testsRun,
1630 1630 skipped + ignored, warned, failed))
1631 1631 if failed:
1632 1632 self.stream.writeln('python hash seed: %s' %
1633 1633 os.environ['PYTHONHASHSEED'])
1634 1634 if self._runner.options.time:
1635 1635 self.printtimes(result.times)
1636 1636
1637 1637 return result
1638 1638
1639 1639 def printtimes(self, times):
1640 1640 # iolock held by run
1641 1641 self.stream.writeln('# Producing time report')
1642 1642 times.sort(key=lambda t: (t[3]))
1643 1643 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
1644 1644 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
1645 1645 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
1646 1646 for tdata in times:
1647 1647 test = tdata[0]
1648 1648 cuser, csys, real, start, end = tdata[1:6]
1649 1649 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
1650 1650
1651 1651 class TestRunner(object):
1652 1652 """Holds context for executing tests.
1653 1653
1654 1654 Tests rely on a lot of state. This object holds it for them.
1655 1655 """
1656 1656
1657 1657 # Programs required to run tests.
1658 1658 REQUIREDTOOLS = [
1659 1659 os.path.basename(_bytespath(sys.executable)),
1660 1660 b'diff',
1661 1661 b'grep',
1662 1662 b'unzip',
1663 1663 b'gunzip',
1664 1664 b'bunzip2',
1665 1665 b'sed',
1666 1666 ]
1667 1667
1668 1668 # Maps file extensions to test class.
1669 1669 TESTTYPES = [
1670 1670 (b'.py', PythonTest),
1671 1671 (b'.t', TTest),
1672 1672 ]
1673 1673
1674 1674 def __init__(self):
1675 1675 self.options = None
1676 1676 self._hgroot = None
1677 1677 self._testdir = None
1678 1678 self._hgtmp = None
1679 1679 self._installdir = None
1680 1680 self._bindir = None
1681 1681 self._tmpbinddir = None
1682 1682 self._pythondir = None
1683 1683 self._coveragefile = None
1684 1684 self._createdfiles = []
1685 1685 self._hgpath = None
1686 1686 self._portoffset = 0
1687 1687 self._ports = {}
1688 1688
1689 1689 def run(self, args, parser=None):
1690 1690 """Run the test suite."""
1691 1691 oldmask = os.umask(0o22)
1692 1692 try:
1693 1693 parser = parser or getparser()
1694 1694 options, args = parseargs(args, parser)
1695 1695 # positional arguments are paths to test files to run, so
1696 1696 # we make sure they're all bytestrings
1697 1697 args = [_bytespath(a) for a in args]
1698 1698 self.options = options
1699 1699
1700 1700 self._checktools()
1701 1701 tests = self.findtests(args)
1702 1702 if options.profile_runner:
1703 1703 import statprof
1704 1704 statprof.start()
1705 1705 result = self._run(tests)
1706 1706 if options.profile_runner:
1707 1707 statprof.stop()
1708 1708 statprof.display()
1709 1709 return result
1710 1710
1711 1711 finally:
1712 1712 os.umask(oldmask)
1713 1713
1714 1714 def _run(self, tests):
1715 1715 if self.options.random:
1716 1716 random.shuffle(tests)
1717 1717 else:
1718 1718 # keywords for slow tests
1719 1719 slow = {b'svn': 10,
1720 1720 b'gendoc': 10,
1721 1721 b'check-code-hg': 100,
1722 1722 }
1723 1723 def sortkey(f):
1724 1724 # run largest tests first, as they tend to take the longest
1725 1725 try:
1726 1726 val = -os.stat(f).st_size
1727 1727 except OSError as e:
1728 1728 if e.errno != errno.ENOENT:
1729 1729 raise
1730 1730 return -1e9 # file does not exist, tell early
1731 1731 for kw, mul in slow.items():
1732 1732 if kw in f:
1733 1733 val *= mul
1734 1734 return val
1735 1735 tests.sort(key=sortkey)
1736 1736
1737 1737 self._testdir = osenvironb[b'TESTDIR'] = getattr(
1738 1738 os, 'getcwdb', os.getcwd)()
1739 1739
1740 1740 if 'PYTHONHASHSEED' not in os.environ:
1741 1741 # use a random python hash seed all the time
1742 1742 # we do the randomness ourself to know what seed is used
1743 1743 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
1744 1744
1745 1745 if self.options.tmpdir:
1746 1746 self.options.keep_tmpdir = True
1747 1747 tmpdir = _bytespath(self.options.tmpdir)
1748 1748 if os.path.exists(tmpdir):
1749 1749 # Meaning of tmpdir has changed since 1.3: we used to create
1750 1750 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
1751 1751 # tmpdir already exists.
1752 1752 print("error: temp dir %r already exists" % tmpdir)
1753 1753 return 1
1754 1754
1755 1755 # Automatically removing tmpdir sounds convenient, but could
1756 1756 # really annoy anyone in the habit of using "--tmpdir=/tmp"
1757 1757 # or "--tmpdir=$HOME".
1758 1758 #vlog("# Removing temp dir", tmpdir)
1759 1759 #shutil.rmtree(tmpdir)
1760 1760 os.makedirs(tmpdir)
1761 1761 else:
1762 1762 d = None
1763 1763 if os.name == 'nt':
1764 1764 # without this, we get the default temp dir location, but
1765 1765 # in all lowercase, which causes troubles with paths (issue3490)
1766 1766 d = osenvironb.get(b'TMP', None)
1767 1767 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
1768 1768
1769 1769 self._hgtmp = osenvironb[b'HGTMP'] = (
1770 1770 os.path.realpath(tmpdir))
1771 1771
1772 1772 if self.options.with_hg:
1773 1773 self._installdir = None
1774 1774 whg = self.options.with_hg
1775 1775 # If --with-hg is not specified, we have bytes already,
1776 1776 # but if it was specified in python3 we get a str, so we
1777 1777 # have to encode it back into a bytes.
1778 1778 if PYTHON3:
1779 1779 if not isinstance(whg, bytes):
1780 1780 whg = _bytespath(whg)
1781 1781 self._bindir = os.path.dirname(os.path.realpath(whg))
1782 1782 assert isinstance(self._bindir, bytes)
1783 1783 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
1784 1784 os.makedirs(self._tmpbindir)
1785 1785
1786 1786 # This looks redundant with how Python initializes sys.path from
1787 1787 # the location of the script being executed. Needed because the
1788 1788 # "hg" specified by --with-hg is not the only Python script
1789 1789 # executed in the test suite that needs to import 'mercurial'
1790 1790 # ... which means it's not really redundant at all.
1791 1791 self._pythondir = self._bindir
1792 1792 else:
1793 1793 self._installdir = os.path.join(self._hgtmp, b"install")
1794 1794 self._bindir = osenvironb[b"BINDIR"] = \
1795 1795 os.path.join(self._installdir, b"bin")
1796 1796 self._tmpbindir = self._bindir
1797 1797 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
1798 1798
1799 1799 osenvironb[b"BINDIR"] = self._bindir
1800 1800 osenvironb[b"PYTHON"] = PYTHON
1801 1801
1802 1802 fileb = _bytespath(__file__)
1803 1803 runtestdir = os.path.abspath(os.path.dirname(fileb))
1804 osenvironb[b'RUNTESTDIR'] = runtestdir
1804 1805 if PYTHON3:
1805 1806 sepb = _bytespath(os.pathsep)
1806 1807 else:
1807 1808 sepb = os.pathsep
1808 1809 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
1809 1810 if os.path.islink(__file__):
1810 1811 # test helper will likely be at the end of the symlink
1811 1812 realfile = os.path.realpath(fileb)
1812 1813 realdir = os.path.abspath(os.path.dirname(realfile))
1813 1814 path.insert(2, realdir)
1814 1815 if self._tmpbindir != self._bindir:
1815 1816 path = [self._tmpbindir] + path
1816 1817 osenvironb[b"PATH"] = sepb.join(path)
1817 1818
1818 1819 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
1819 1820 # can run .../tests/run-tests.py test-foo where test-foo
1820 1821 # adds an extension to HGRC. Also include run-test.py directory to
1821 1822 # import modules like heredoctest.
1822 1823 pypath = [self._pythondir, self._testdir, runtestdir]
1823 1824 # We have to augment PYTHONPATH, rather than simply replacing
1824 1825 # it, in case external libraries are only available via current
1825 1826 # PYTHONPATH. (In particular, the Subversion bindings on OS X
1826 1827 # are in /opt/subversion.)
1827 1828 oldpypath = osenvironb.get(IMPL_PATH)
1828 1829 if oldpypath:
1829 1830 pypath.append(oldpypath)
1830 1831 osenvironb[IMPL_PATH] = sepb.join(pypath)
1831 1832
1832 1833 if self.options.pure:
1833 1834 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
1834 1835
1835 1836 self._coveragefile = os.path.join(self._testdir, b'.coverage')
1836 1837
1837 1838 vlog("# Using TESTDIR", self._testdir)
1839 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
1838 1840 vlog("# Using HGTMP", self._hgtmp)
1839 1841 vlog("# Using PATH", os.environ["PATH"])
1840 1842 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
1841 1843
1842 1844 try:
1843 1845 return self._runtests(tests) or 0
1844 1846 finally:
1845 1847 time.sleep(.1)
1846 1848 self._cleanup()
1847 1849
1848 1850 def findtests(self, args):
1849 1851 """Finds possible test files from arguments.
1850 1852
1851 1853 If you wish to inject custom tests into the test harness, this would
1852 1854 be a good function to monkeypatch or override in a derived class.
1853 1855 """
1854 1856 if not args:
1855 1857 if self.options.changed:
1856 1858 proc = Popen4('hg st --rev "%s" -man0 .' %
1857 1859 self.options.changed, None, 0)
1858 1860 stdout, stderr = proc.communicate()
1859 1861 args = stdout.strip(b'\0').split(b'\0')
1860 1862 else:
1861 1863 args = os.listdir(b'.')
1862 1864
1863 1865 return [t for t in args
1864 1866 if os.path.basename(t).startswith(b'test-')
1865 1867 and (t.endswith(b'.py') or t.endswith(b'.t'))]
1866 1868
1867 1869 def _runtests(self, tests):
1868 1870 try:
1869 1871 if self._installdir:
1870 1872 self._installhg()
1871 1873 self._checkhglib("Testing")
1872 1874 else:
1873 1875 self._usecorrectpython()
1874 1876
1875 1877 if self.options.restart:
1876 1878 orig = list(tests)
1877 1879 while tests:
1878 1880 if os.path.exists(tests[0] + ".err"):
1879 1881 break
1880 1882 tests.pop(0)
1881 1883 if not tests:
1882 1884 print("running all tests")
1883 1885 tests = orig
1884 1886
1885 1887 tests = [self._gettest(t, i) for i, t in enumerate(tests)]
1886 1888
1887 1889 failed = False
1888 1890 warned = False
1889 1891 kws = self.options.keywords
1890 1892 if kws is not None and PYTHON3:
1891 1893 kws = kws.encode('utf-8')
1892 1894
1893 1895 suite = TestSuite(self._testdir,
1894 1896 jobs=self.options.jobs,
1895 1897 whitelist=self.options.whitelisted,
1896 1898 blacklist=self.options.blacklist,
1897 1899 retest=self.options.retest,
1898 1900 keywords=kws,
1899 1901 loop=self.options.loop,
1900 1902 runs_per_test=self.options.runs_per_test,
1901 1903 tests=tests, loadtest=self._gettest)
1902 1904 verbosity = 1
1903 1905 if self.options.verbose:
1904 1906 verbosity = 2
1905 1907 runner = TextTestRunner(self, verbosity=verbosity)
1906 1908 result = runner.run(suite)
1907 1909
1908 1910 if result.failures:
1909 1911 failed = True
1910 1912 if result.warned:
1911 1913 warned = True
1912 1914
1913 1915 if self.options.anycoverage:
1914 1916 self._outputcoverage()
1915 1917 except KeyboardInterrupt:
1916 1918 failed = True
1917 1919 print("\ninterrupted!")
1918 1920
1919 1921 if failed:
1920 1922 return 1
1921 1923 if warned:
1922 1924 return 80
1923 1925
1924 1926 def _getport(self, count):
1925 1927 port = self._ports.get(count) # do we have a cached entry?
1926 1928 if port is None:
1927 1929 port = self.options.port + self._portoffset
1928 1930 portneeded = 3
1929 1931 # above 100 tries we just give up and let test reports failure
1930 1932 for tries in xrange(100):
1931 1933 allfree = True
1932 1934 for idx in xrange(portneeded):
1933 1935 if not checkportisavailable(port + idx):
1934 1936 allfree = False
1935 1937 break
1936 1938 self._portoffset += portneeded
1937 1939 if allfree:
1938 1940 break
1939 1941 self._ports[count] = port
1940 1942 return port
1941 1943
1942 1944 def _gettest(self, test, count):
1943 1945 """Obtain a Test by looking at its filename.
1944 1946
1945 1947 Returns a Test instance. The Test may not be runnable if it doesn't
1946 1948 map to a known type.
1947 1949 """
1948 1950 lctest = test.lower()
1949 1951 testcls = Test
1950 1952
1951 1953 for ext, cls in self.TESTTYPES:
1952 1954 if lctest.endswith(ext):
1953 1955 testcls = cls
1954 1956 break
1955 1957
1956 1958 refpath = os.path.join(self._testdir, test)
1957 1959 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
1958 1960
1959 1961 t = testcls(refpath, tmpdir,
1960 1962 keeptmpdir=self.options.keep_tmpdir,
1961 1963 debug=self.options.debug,
1962 1964 timeout=self.options.timeout,
1963 1965 startport=self._getport(count),
1964 1966 extraconfigopts=self.options.extra_config_opt,
1965 1967 py3kwarnings=self.options.py3k_warnings,
1966 1968 shell=self.options.shell)
1967 1969 t.should_reload = True
1968 1970 return t
1969 1971
1970 1972 def _cleanup(self):
1971 1973 """Clean up state from this test invocation."""
1972 1974
1973 1975 if self.options.keep_tmpdir:
1974 1976 return
1975 1977
1976 1978 vlog("# Cleaning up HGTMP", self._hgtmp)
1977 1979 shutil.rmtree(self._hgtmp, True)
1978 1980 for f in self._createdfiles:
1979 1981 try:
1980 1982 os.remove(f)
1981 1983 except OSError:
1982 1984 pass
1983 1985
1984 1986 def _usecorrectpython(self):
1985 1987 """Configure the environment to use the appropriate Python in tests."""
1986 1988 # Tests must use the same interpreter as us or bad things will happen.
1987 1989 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
1988 1990 if getattr(os, 'symlink', None):
1989 1991 vlog("# Making python executable in test path a symlink to '%s'" %
1990 1992 sys.executable)
1991 1993 mypython = os.path.join(self._tmpbindir, pyexename)
1992 1994 try:
1993 1995 if os.readlink(mypython) == sys.executable:
1994 1996 return
1995 1997 os.unlink(mypython)
1996 1998 except OSError as err:
1997 1999 if err.errno != errno.ENOENT:
1998 2000 raise
1999 2001 if self._findprogram(pyexename) != sys.executable:
2000 2002 try:
2001 2003 os.symlink(sys.executable, mypython)
2002 2004 self._createdfiles.append(mypython)
2003 2005 except OSError as err:
2004 2006 # child processes may race, which is harmless
2005 2007 if err.errno != errno.EEXIST:
2006 2008 raise
2007 2009 else:
2008 2010 exedir, exename = os.path.split(sys.executable)
2009 2011 vlog("# Modifying search path to find %s as %s in '%s'" %
2010 2012 (exename, pyexename, exedir))
2011 2013 path = os.environ['PATH'].split(os.pathsep)
2012 2014 while exedir in path:
2013 2015 path.remove(exedir)
2014 2016 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2015 2017 if not self._findprogram(pyexename):
2016 2018 print("WARNING: Cannot find %s in search path" % pyexename)
2017 2019
2018 2020 def _installhg(self):
2019 2021 """Install hg into the test environment.
2020 2022
2021 2023 This will also configure hg with the appropriate testing settings.
2022 2024 """
2023 2025 vlog("# Performing temporary installation of HG")
2024 2026 installerrs = os.path.join(b"tests", b"install.err")
2025 2027 compiler = ''
2026 2028 if self.options.compiler:
2027 2029 compiler = '--compiler ' + self.options.compiler
2028 2030 if self.options.pure:
2029 2031 pure = b"--pure"
2030 2032 else:
2031 2033 pure = b""
2032 2034 py3 = ''
2033 2035
2034 2036 # Run installer in hg root
2035 2037 script = os.path.realpath(sys.argv[0])
2036 2038 exe = sys.executable
2037 2039 if PYTHON3:
2038 2040 py3 = b'--c2to3'
2039 2041 compiler = _bytespath(compiler)
2040 2042 script = _bytespath(script)
2041 2043 exe = _bytespath(exe)
2042 2044 hgroot = os.path.dirname(os.path.dirname(script))
2043 2045 self._hgroot = hgroot
2044 2046 os.chdir(hgroot)
2045 2047 nohome = b'--home=""'
2046 2048 if os.name == 'nt':
2047 2049 # The --home="" trick works only on OS where os.sep == '/'
2048 2050 # because of a distutils convert_path() fast-path. Avoid it at
2049 2051 # least on Windows for now, deal with .pydistutils.cfg bugs
2050 2052 # when they happen.
2051 2053 nohome = b''
2052 2054 cmd = (b'%(exe)s setup.py %(py3)s %(pure)s clean --all'
2053 2055 b' build %(compiler)s --build-base="%(base)s"'
2054 2056 b' install --force --prefix="%(prefix)s"'
2055 2057 b' --install-lib="%(libdir)s"'
2056 2058 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2057 2059 % {b'exe': exe, b'py3': py3, b'pure': pure,
2058 2060 b'compiler': compiler,
2059 2061 b'base': os.path.join(self._hgtmp, b"build"),
2060 2062 b'prefix': self._installdir, b'libdir': self._pythondir,
2061 2063 b'bindir': self._bindir,
2062 2064 b'nohome': nohome, b'logfile': installerrs})
2063 2065
2064 2066 # setuptools requires install directories to exist.
2065 2067 def makedirs(p):
2066 2068 try:
2067 2069 os.makedirs(p)
2068 2070 except OSError as e:
2069 2071 if e.errno != errno.EEXIST:
2070 2072 raise
2071 2073 makedirs(self._pythondir)
2072 2074 makedirs(self._bindir)
2073 2075
2074 2076 vlog("# Running", cmd)
2075 2077 if os.system(cmd) == 0:
2076 2078 if not self.options.verbose:
2077 2079 os.remove(installerrs)
2078 2080 else:
2079 2081 f = open(installerrs, 'rb')
2080 2082 for line in f:
2081 2083 if PYTHON3:
2082 2084 sys.stdout.buffer.write(line)
2083 2085 else:
2084 2086 sys.stdout.write(line)
2085 2087 f.close()
2086 2088 sys.exit(1)
2087 2089 os.chdir(self._testdir)
2088 2090
2089 2091 self._usecorrectpython()
2090 2092
2091 2093 if self.options.py3k_warnings and not self.options.anycoverage:
2092 2094 vlog("# Updating hg command to enable Py3k Warnings switch")
2093 2095 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2094 2096 lines = [line.rstrip() for line in f]
2095 2097 lines[0] += ' -3'
2096 2098 f.close()
2097 2099 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2098 2100 for line in lines:
2099 2101 f.write(line + '\n')
2100 2102 f.close()
2101 2103
2102 2104 hgbat = os.path.join(self._bindir, b'hg.bat')
2103 2105 if os.path.isfile(hgbat):
2104 2106 # hg.bat expects to be put in bin/scripts while run-tests.py
2105 2107 # installation layout put it in bin/ directly. Fix it
2106 2108 f = open(hgbat, 'rb')
2107 2109 data = f.read()
2108 2110 f.close()
2109 2111 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2110 2112 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2111 2113 b'"%~dp0python" "%~dp0hg" %*')
2112 2114 f = open(hgbat, 'wb')
2113 2115 f.write(data)
2114 2116 f.close()
2115 2117 else:
2116 2118 print('WARNING: cannot fix hg.bat reference to python.exe')
2117 2119
2118 2120 if self.options.anycoverage:
2119 2121 custom = os.path.join(self._testdir, 'sitecustomize.py')
2120 2122 target = os.path.join(self._pythondir, 'sitecustomize.py')
2121 2123 vlog('# Installing coverage trigger to %s' % target)
2122 2124 shutil.copyfile(custom, target)
2123 2125 rc = os.path.join(self._testdir, '.coveragerc')
2124 2126 vlog('# Installing coverage rc to %s' % rc)
2125 2127 os.environ['COVERAGE_PROCESS_START'] = rc
2126 2128 covdir = os.path.join(self._installdir, '..', 'coverage')
2127 2129 try:
2128 2130 os.mkdir(covdir)
2129 2131 except OSError as e:
2130 2132 if e.errno != errno.EEXIST:
2131 2133 raise
2132 2134
2133 2135 os.environ['COVERAGE_DIR'] = covdir
2134 2136
2135 2137 def _checkhglib(self, verb):
2136 2138 """Ensure that the 'mercurial' package imported by python is
2137 2139 the one we expect it to be. If not, print a warning to stderr."""
2138 2140 if ((self._bindir == self._pythondir) and
2139 2141 (self._bindir != self._tmpbindir)):
2140 2142 # The pythondir has been inferred from --with-hg flag.
2141 2143 # We cannot expect anything sensible here.
2142 2144 return
2143 2145 expecthg = os.path.join(self._pythondir, b'mercurial')
2144 2146 actualhg = self._gethgpath()
2145 2147 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2146 2148 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2147 2149 ' (expected %s)\n'
2148 2150 % (verb, actualhg, expecthg))
2149 2151 def _gethgpath(self):
2150 2152 """Return the path to the mercurial package that is actually found by
2151 2153 the current Python interpreter."""
2152 2154 if self._hgpath is not None:
2153 2155 return self._hgpath
2154 2156
2155 2157 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2156 2158 cmd = cmd % PYTHON
2157 2159 if PYTHON3:
2158 2160 cmd = _strpath(cmd)
2159 2161 pipe = os.popen(cmd)
2160 2162 try:
2161 2163 self._hgpath = _bytespath(pipe.read().strip())
2162 2164 finally:
2163 2165 pipe.close()
2164 2166
2165 2167 return self._hgpath
2166 2168
2167 2169 def _outputcoverage(self):
2168 2170 """Produce code coverage output."""
2169 2171 from coverage import coverage
2170 2172
2171 2173 vlog('# Producing coverage report')
2172 2174 # chdir is the easiest way to get short, relative paths in the
2173 2175 # output.
2174 2176 os.chdir(self._hgroot)
2175 2177 covdir = os.path.join(self._installdir, '..', 'coverage')
2176 2178 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2177 2179
2178 2180 # Map install directory paths back to source directory.
2179 2181 cov.config.paths['srcdir'] = ['.', self._pythondir]
2180 2182
2181 2183 cov.combine()
2182 2184
2183 2185 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2184 2186 cov.report(ignore_errors=True, omit=omit)
2185 2187
2186 2188 if self.options.htmlcov:
2187 2189 htmldir = os.path.join(self._testdir, 'htmlcov')
2188 2190 cov.html_report(directory=htmldir, omit=omit)
2189 2191 if self.options.annotate:
2190 2192 adir = os.path.join(self._testdir, 'annotated')
2191 2193 if not os.path.isdir(adir):
2192 2194 os.mkdir(adir)
2193 2195 cov.annotate(directory=adir, omit=omit)
2194 2196
2195 2197 def _findprogram(self, program):
2196 2198 """Search PATH for a executable program"""
2197 2199 dpb = _bytespath(os.defpath)
2198 2200 sepb = _bytespath(os.pathsep)
2199 2201 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2200 2202 name = os.path.join(p, program)
2201 2203 if os.name == 'nt' or os.access(name, os.X_OK):
2202 2204 return name
2203 2205 return None
2204 2206
2205 2207 def _checktools(self):
2206 2208 """Ensure tools required to run tests are present."""
2207 2209 for p in self.REQUIREDTOOLS:
2208 2210 if os.name == 'nt' and not p.endswith('.exe'):
2209 2211 p += '.exe'
2210 2212 found = self._findprogram(p)
2211 2213 if found:
2212 2214 vlog("# Found prerequisite", p, "at", found)
2213 2215 else:
2214 2216 print("WARNING: Did not find prerequisite tool: %s " % p)
2215 2217
2216 2218 if __name__ == '__main__':
2217 2219 runner = TestRunner()
2218 2220
2219 2221 try:
2220 2222 import msvcrt
2221 2223 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2222 2224 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2223 2225 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2224 2226 except ImportError:
2225 2227 pass
2226 2228
2227 2229 sys.exit(runner.run(sys.argv[1:]))
@@ -1,585 +1,604 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ unset HGTEST_JOBS
6 6 $ unset HGTEST_TIMEOUT
7 7 $ unset HGTEST_PORT
8 8 $ unset HGTEST_SHELL
9 9
10 10 Smoke test
11 11 ============
12 12
13 13 $ run-tests.py $HGTEST_RUN_TESTS_PURE
14 14
15 15 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
16 16
17 17 a succesful test
18 18 =======================
19 19
20 20 $ cat > test-success.t << EOF
21 21 > $ echo babar
22 22 > babar
23 23 > $ echo xyzzy
24 24 > never happens (?)
25 25 > xyzzy
26 26 > nor this (?)
27 27 > EOF
28 28
29 29 $ run-tests.py --with-hg=`which hg`
30 30 .
31 31 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
32 32
33 33 failing test
34 34 ==================
35 35
36 36 $ cat > test-failure.t << EOF
37 37 > $ echo babar
38 38 > rataxes
39 39 > This is a noop statement so that
40 40 > this test is still more bytes than success.
41 41 > EOF
42 42
43 43 >>> fh = open('test-failure-unicode.t', 'wb')
44 44 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
45 45 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
46 46
47 47 $ run-tests.py --with-hg=`which hg`
48 48
49 49 --- $TESTTMP/test-failure.t
50 50 +++ $TESTTMP/test-failure.t.err
51 51 @@ -1,4 +1,4 @@
52 52 $ echo babar
53 53 - rataxes
54 54 + babar
55 55 This is a noop statement so that
56 56 this test is still more bytes than success.
57 57
58 58 ERROR: test-failure.t output changed
59 59 !.
60 60 --- $TESTTMP/test-failure-unicode.t
61 61 +++ $TESTTMP/test-failure-unicode.t.err
62 62 @@ -1,2 +1,2 @@
63 63 $ echo babar\xce\xb1 (esc)
64 64 - l\xce\xb5\xce\xb5t (esc)
65 65 + babar\xce\xb1 (esc)
66 66
67 67 ERROR: test-failure-unicode.t output changed
68 68 !
69 69 Failed test-failure.t: output changed
70 70 Failed test-failure-unicode.t: output changed
71 71 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
72 72 python hash seed: * (glob)
73 73 [1]
74 74
75 75 test --xunit support
76 76 $ run-tests.py --with-hg=`which hg` --xunit=xunit.xml
77 77
78 78 --- $TESTTMP/test-failure.t
79 79 +++ $TESTTMP/test-failure.t.err
80 80 @@ -1,4 +1,4 @@
81 81 $ echo babar
82 82 - rataxes
83 83 + babar
84 84 This is a noop statement so that
85 85 this test is still more bytes than success.
86 86
87 87 ERROR: test-failure.t output changed
88 88 !.
89 89 --- $TESTTMP/test-failure-unicode.t
90 90 +++ $TESTTMP/test-failure-unicode.t.err
91 91 @@ -1,2 +1,2 @@
92 92 $ echo babar\xce\xb1 (esc)
93 93 - l\xce\xb5\xce\xb5t (esc)
94 94 + babar\xce\xb1 (esc)
95 95
96 96 ERROR: test-failure-unicode.t output changed
97 97 !
98 98 Failed test-failure.t: output changed
99 99 Failed test-failure-unicode.t: output changed
100 100 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
101 101 python hash seed: * (glob)
102 102 [1]
103 103 $ cat xunit.xml
104 104 <?xml version="1.0" encoding="utf-8"?>
105 105 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
106 106 <testcase name="test-success.t" time="*"/> (glob)
107 107 <testcase name="test-failure-unicode.t" time="*"> (glob)
108 108 <![CDATA[--- $TESTTMP/test-failure-unicode.t
109 109 +++ $TESTTMP/test-failure-unicode.t.err
110 110 @@ -1,2 +1,2 @@
111 111 $ echo babar\xce\xb1 (esc)
112 112 - l\xce\xb5\xce\xb5t (esc)
113 113 + babar\xce\xb1 (esc)
114 114 ]]> </testcase>
115 115 <testcase name="test-failure.t" time="*"> (glob)
116 116 <![CDATA[--- $TESTTMP/test-failure.t
117 117 +++ $TESTTMP/test-failure.t.err
118 118 @@ -1,4 +1,4 @@
119 119 $ echo babar
120 120 - rataxes
121 121 + babar
122 122 This is a noop statement so that
123 123 this test is still more bytes than success.
124 124 ]]> </testcase>
125 125 </testsuite>
126 126
127 127 $ rm test-failure-unicode.t
128 128
129 129 test for --retest
130 130 ====================
131 131
132 132 $ run-tests.py --with-hg=`which hg` --retest
133 133
134 134 --- $TESTTMP/test-failure.t
135 135 +++ $TESTTMP/test-failure.t.err
136 136 @@ -1,4 +1,4 @@
137 137 $ echo babar
138 138 - rataxes
139 139 + babar
140 140 This is a noop statement so that
141 141 this test is still more bytes than success.
142 142
143 143 ERROR: test-failure.t output changed
144 144 !
145 145 Failed test-failure.t: output changed
146 146 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
147 147 python hash seed: * (glob)
148 148 [1]
149 149
150 150 Selecting Tests To Run
151 151 ======================
152 152
153 153 successful
154 154
155 155 $ run-tests.py --with-hg=`which hg` test-success.t
156 156 .
157 157 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
158 158
159 159 success w/ keyword
160 160 $ run-tests.py --with-hg=`which hg` -k xyzzy
161 161 .
162 162 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
163 163
164 164 failed
165 165
166 166 $ run-tests.py --with-hg=`which hg` test-failure.t
167 167
168 168 --- $TESTTMP/test-failure.t
169 169 +++ $TESTTMP/test-failure.t.err
170 170 @@ -1,4 +1,4 @@
171 171 $ echo babar
172 172 - rataxes
173 173 + babar
174 174 This is a noop statement so that
175 175 this test is still more bytes than success.
176 176
177 177 ERROR: test-failure.t output changed
178 178 !
179 179 Failed test-failure.t: output changed
180 180 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
181 181 python hash seed: * (glob)
182 182 [1]
183 183
184 184 failure w/ keyword
185 185 $ run-tests.py --with-hg=`which hg` -k rataxes
186 186
187 187 --- $TESTTMP/test-failure.t
188 188 +++ $TESTTMP/test-failure.t.err
189 189 @@ -1,4 +1,4 @@
190 190 $ echo babar
191 191 - rataxes
192 192 + babar
193 193 This is a noop statement so that
194 194 this test is still more bytes than success.
195 195
196 196 ERROR: test-failure.t output changed
197 197 !
198 198 Failed test-failure.t: output changed
199 199 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
200 200 python hash seed: * (glob)
201 201 [1]
202 202
203 203 Verify that when a process fails to start we show a useful message
204 204 ==================================================================
205 205 NOTE: there is currently a bug where this shows "2 failed" even though
206 206 it's actually the same test being reported for failure twice.
207 207
208 208 $ cat > test-serve-fail.t <<EOF
209 209 > $ echo 'abort: child process failed to start blah'
210 210 > EOF
211 211 $ run-tests.py --with-hg=`which hg` test-serve-fail.t
212 212
213 213 ERROR: test-serve-fail.t output changed
214 214 !
215 215 ERROR: test-serve-fail.t output changed
216 216 !
217 217 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
218 218 Failed test-serve-fail.t: output changed
219 219 # Ran 1 tests, 0 skipped, 0 warned, 2 failed.
220 220 python hash seed: * (glob)
221 221 [1]
222 222 $ rm test-serve-fail.t
223 223
224 224 Running In Debug Mode
225 225 ======================
226 226
227 227 $ run-tests.py --with-hg=`which hg` --debug 2>&1 | grep -v pwd
228 228 + echo *SALT* 0 0 (glob)
229 229 *SALT* 0 0 (glob)
230 230 + echo babar
231 231 babar
232 232 + echo *SALT* 4 0 (glob)
233 233 *SALT* 4 0 (glob)
234 234 .+ echo *SALT* 0 0 (glob)
235 235 *SALT* 0 0 (glob)
236 236 + echo babar
237 237 babar
238 238 + echo *SALT* 2 0 (glob)
239 239 *SALT* 2 0 (glob)
240 240 + echo xyzzy
241 241 xyzzy
242 242 + echo *SALT* 6 0 (glob)
243 243 *SALT* 6 0 (glob)
244 244 .
245 245 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
246 246
247 247 Parallel runs
248 248 ==============
249 249
250 250 (duplicate the failing test to get predictable output)
251 251 $ cp test-failure.t test-failure-copy.t
252 252
253 253 $ run-tests.py --with-hg=`which hg` --jobs 2 test-failure*.t -n
254 254 !!
255 255 Failed test-failure*.t: output changed (glob)
256 256 Failed test-failure*.t: output changed (glob)
257 257 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
258 258 python hash seed: * (glob)
259 259 [1]
260 260
261 261 failures in parallel with --first should only print one failure
262 262 >>> f = open('test-nothing.t', 'w')
263 263 >>> f.write('foo\n' * 1024) and None
264 264 >>> f.write(' $ sleep 1') and None
265 265 $ run-tests.py --with-hg=`which hg` --jobs 2 --first
266 266
267 267 --- $TESTTMP/test-failure*.t (glob)
268 268 +++ $TESTTMP/test-failure*.t.err (glob)
269 269 @@ -1,4 +1,4 @@
270 270 $ echo babar
271 271 - rataxes
272 272 + babar
273 273 This is a noop statement so that
274 274 this test is still more bytes than success.
275 275
276 276 Failed test-failure*.t: output changed (glob)
277 277 Failed test-nothing.t: output changed
278 278 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
279 279 python hash seed: * (glob)
280 280 [1]
281 281
282 282
283 283 (delete the duplicated test file)
284 284 $ rm test-failure-copy.t test-nothing.t
285 285
286 286
287 287 Interactive run
288 288 ===============
289 289
290 290 (backup the failing test)
291 291 $ cp test-failure.t backup
292 292
293 293 Refuse the fix
294 294
295 295 $ echo 'n' | run-tests.py --with-hg=`which hg` -i
296 296
297 297 --- $TESTTMP/test-failure.t
298 298 +++ $TESTTMP/test-failure.t.err
299 299 @@ -1,4 +1,4 @@
300 300 $ echo babar
301 301 - rataxes
302 302 + babar
303 303 This is a noop statement so that
304 304 this test is still more bytes than success.
305 305 Accept this change? [n]
306 306 ERROR: test-failure.t output changed
307 307 !.
308 308 Failed test-failure.t: output changed
309 309 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
310 310 python hash seed: * (glob)
311 311 [1]
312 312
313 313 $ cat test-failure.t
314 314 $ echo babar
315 315 rataxes
316 316 This is a noop statement so that
317 317 this test is still more bytes than success.
318 318
319 319 Interactive with custom view
320 320
321 321 $ echo 'n' | run-tests.py --with-hg=`which hg` -i --view echo
322 322 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
323 323 Accept this change? [n]* (glob)
324 324 ERROR: test-failure.t output changed
325 325 !.
326 326 Failed test-failure.t: output changed
327 327 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
328 328 python hash seed: * (glob)
329 329 [1]
330 330
331 331 View the fix
332 332
333 333 $ echo 'y' | run-tests.py --with-hg=`which hg` --view echo
334 334 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
335 335
336 336 ERROR: test-failure.t output changed
337 337 !.
338 338 Failed test-failure.t: output changed
339 339 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
340 340 python hash seed: * (glob)
341 341 [1]
342 342
343 343 Accept the fix
344 344
345 345 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
346 346 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
347 347 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
348 348 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
349 349 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
350 350 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
351 351 $ echo 'y' | run-tests.py --with-hg=`which hg` -i 2>&1 | \
352 352 > sed -e 's,(glob)$,&<,g'
353 353
354 354 --- $TESTTMP/test-failure.t
355 355 +++ $TESTTMP/test-failure.t.err
356 356 @@ -1,9 +1,9 @@
357 357 $ echo babar
358 358 - rataxes
359 359 + babar
360 360 This is a noop statement so that
361 361 this test is still more bytes than success.
362 362 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
363 363 - saved backup bundle to $TESTTMP/foo.hg
364 364 + saved backup bundle to $TESTTMP/foo.hg (glob)<
365 365 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
366 366 saved backup bundle to $TESTTMP/foo.hg (glob)<
367 367 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
368 368 Accept this change? [n] ..
369 369 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
370 370
371 371 $ sed -e 's,(glob)$,&<,g' test-failure.t
372 372 $ echo babar
373 373 babar
374 374 This is a noop statement so that
375 375 this test is still more bytes than success.
376 376 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
377 377 saved backup bundle to $TESTTMP/foo.hg (glob)<
378 378 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
379 379 saved backup bundle to $TESTTMP/foo.hg (glob)<
380 380 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
381 381 saved backup bundle to $TESTTMP/*.hg (glob)<
382 382
383 383 (reinstall)
384 384 $ mv backup test-failure.t
385 385
386 386 No Diff
387 387 ===============
388 388
389 389 $ run-tests.py --with-hg=`which hg` --nodiff
390 390 !.
391 391 Failed test-failure.t: output changed
392 392 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
393 393 python hash seed: * (glob)
394 394 [1]
395 395
396 396 test for --time
397 397 ==================
398 398
399 399 $ run-tests.py --with-hg=`which hg` test-success.t --time
400 400 .
401 401 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
402 402 # Producing time report
403 403 start end cuser csys real Test
404 404 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
405 405
406 406 test for --time with --job enabled
407 407 ====================================
408 408
409 409 $ run-tests.py --with-hg=`which hg` test-success.t --time --jobs 2
410 410 .
411 411 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
412 412 # Producing time report
413 413 start end cuser csys real Test
414 414 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
415 415
416 416 Skips
417 417 ================
418 418 $ cat > test-skip.t <<EOF
419 419 > $ echo xyzzy
420 420 > #require false
421 421 > EOF
422 422 $ run-tests.py --with-hg=`which hg` --nodiff
423 423 !.s
424 424 Skipped test-skip.t: skipped
425 425 Failed test-failure.t: output changed
426 426 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
427 427 python hash seed: * (glob)
428 428 [1]
429 429
430 430 $ run-tests.py --with-hg=`which hg` --keyword xyzzy
431 431 .s
432 432 Skipped test-skip.t: skipped
433 433 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
434 434
435 435 Skips with xml
436 436 $ run-tests.py --with-hg=`which hg` --keyword xyzzy \
437 437 > --xunit=xunit.xml
438 438 .s
439 439 Skipped test-skip.t: skipped
440 440 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
441 441 $ cat xunit.xml
442 442 <?xml version="1.0" encoding="utf-8"?>
443 443 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
444 444 <testcase name="test-success.t" time="*"/> (glob)
445 445 </testsuite>
446 446
447 447 Missing skips or blacklisted skips don't count as executed:
448 448 $ echo test-failure.t > blacklist
449 449 $ run-tests.py --with-hg=`which hg` --blacklist=blacklist \
450 450 > test-failure.t test-bogus.t
451 451 ss
452 452 Skipped test-bogus.t: Doesn't exist
453 453 Skipped test-failure.t: blacklisted
454 454 # Ran 0 tests, 2 skipped, 0 warned, 0 failed.
455 455
456 456 #if json
457 457
458 458 test for --json
459 459 ==================
460 460
461 461 $ run-tests.py --with-hg=`which hg` --json
462 462
463 463 --- $TESTTMP/test-failure.t
464 464 +++ $TESTTMP/test-failure.t.err
465 465 @@ -1,4 +1,4 @@
466 466 $ echo babar
467 467 - rataxes
468 468 + babar
469 469 This is a noop statement so that
470 470 this test is still more bytes than success.
471 471
472 472 ERROR: test-failure.t output changed
473 473 !.s
474 474 Skipped test-skip.t: skipped
475 475 Failed test-failure.t: output changed
476 476 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
477 477 python hash seed: * (glob)
478 478 [1]
479 479
480 480 $ cat report.json
481 481 testreport ={
482 482 "test-failure.t": [\{] (re)
483 483 "csys": "\s*[\d\.]{4,5}", ? (re)
484 484 "cuser": "\s*[\d\.]{4,5}", ? (re)
485 485 "end": "\s*[\d\.]{4,5}", ? (re)
486 486 "result": "failure", ? (re)
487 487 "start": "\s*[\d\.]{4,5}", ? (re)
488 488 "time": "\s*[\d\.]{4,5}" (re)
489 489 }, ? (re)
490 490 "test-skip.t": {
491 491 "csys": "\s*[\d\.]{4,5}", ? (re)
492 492 "cuser": "\s*[\d\.]{4,5}", ? (re)
493 493 "end": "\s*[\d\.]{4,5}", ? (re)
494 494 "result": "skip", ? (re)
495 495 "start": "\s*[\d\.]{4,5}", ? (re)
496 496 "time": "\s*[\d\.]{4,5}" (re)
497 497 }, ? (re)
498 498 "test-success.t": [\{] (re)
499 499 "csys": "\s*[\d\.]{4,5}", ? (re)
500 500 "cuser": "\s*[\d\.]{4,5}", ? (re)
501 501 "end": "\s*[\d\.]{4,5}", ? (re)
502 502 "result": "success", ? (re)
503 503 "start": "\s*[\d\.]{4,5}", ? (re)
504 504 "time": "\s*[\d\.]{4,5}" (re)
505 505 }
506 506 } (no-eol)
507 507
508 508 Test that failed test accepted through interactive are properly reported:
509 509
510 510 $ cp test-failure.t backup
511 511 $ echo y | run-tests.py --with-hg=`which hg` --json -i
512 512
513 513 --- $TESTTMP/test-failure.t
514 514 +++ $TESTTMP/test-failure.t.err
515 515 @@ -1,4 +1,4 @@
516 516 $ echo babar
517 517 - rataxes
518 518 + babar
519 519 This is a noop statement so that
520 520 this test is still more bytes than success.
521 521 Accept this change? [n] ..s
522 522 Skipped test-skip.t: skipped
523 523 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
524 524
525 525 $ cat report.json
526 526 testreport ={
527 527 "test-failure.t": [\{] (re)
528 528 "csys": "\s*[\d\.]{4,5}", ? (re)
529 529 "cuser": "\s*[\d\.]{4,5}", ? (re)
530 530 "end": "\s*[\d\.]{4,5}", ? (re)
531 531 "result": "success", ? (re)
532 532 "start": "\s*[\d\.]{4,5}", ? (re)
533 533 "time": "\s*[\d\.]{4,5}" (re)
534 534 }, ? (re)
535 535 "test-skip.t": {
536 536 "csys": "\s*[\d\.]{4,5}", ? (re)
537 537 "cuser": "\s*[\d\.]{4,5}", ? (re)
538 538 "end": "\s*[\d\.]{4,5}", ? (re)
539 539 "result": "skip", ? (re)
540 540 "start": "\s*[\d\.]{4,5}", ? (re)
541 541 "time": "\s*[\d\.]{4,5}" (re)
542 542 }, ? (re)
543 543 "test-success.t": [\{] (re)
544 544 "csys": "\s*[\d\.]{4,5}", ? (re)
545 545 "cuser": "\s*[\d\.]{4,5}", ? (re)
546 546 "end": "\s*[\d\.]{4,5}", ? (re)
547 547 "result": "success", ? (re)
548 548 "start": "\s*[\d\.]{4,5}", ? (re)
549 549 "time": "\s*[\d\.]{4,5}" (re)
550 550 }
551 551 } (no-eol)
552 552 $ mv backup test-failure.t
553 553
554 554 #endif
555 555
556 556 backslash on end of line with glob matching is handled properly
557 557
558 558 $ cat > test-glob-backslash.t << EOF
559 559 > $ echo 'foo bar \\'
560 560 > foo * \ (glob)
561 561 > EOF
562 562
563 563 $ run-tests.py --with-hg=`which hg` test-glob-backslash.t
564 564 .
565 565 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
566 566
567 567 $ rm -f test-glob-backslash.t
568 568
569 569 Test reusability for third party tools
570 570 ======================================
571 571
572 572 $ mkdir "$TESTTMP"/anothertests
573 573 $ cd "$TESTTMP"/anothertests
574 574
575 575 test that `run-tests.py` can execute hghave, even if it runs not in
576 576 Mercurial source tree.
577 577
578 578 $ cat > test-hghave.t <<EOF
579 579 > #require true
580 580 > $ echo foo
581 581 > foo
582 582 > EOF
583 583 $ run-tests.py test-hghave.t
584 584 .
585 585 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
586
587 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
588 running is placed.
589
590 $ cat > test-runtestdir.t <<EOF
591 > - $TESTDIR, in which test-run-tests.t is placed
592 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
593 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
594 >
595 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
596 > $ test "\$RUNTESTDIR" = "$TESTDIR"
597 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py
598 > #!/usr/bin/env python
599 > #
600 > # check-code - a style and portability checker for Mercurial
601 > EOF
602 $ run-tests.py test-runtestdir.t
603 .
604 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now